content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import logging from common.licenses import get_license_info from common.loader import provider_details as prov from common.requester import DelayedRequester from common.storage.image import ImageStore logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s: %(message)s", level=logging.INFO ) logger = logging.getLogger(__name__) LIMIT = 100 DELAY = 5.0 RETRIES = 3 PROVIDER = prov.SCIENCE_DEFAULT_PROVIDER ENDPOINT = "https://collection.sciencemuseumgroup.org.uk/search/" delay_request = DelayedRequester(delay=DELAY) image_store = ImageStore(provider=PROVIDER) HEADERS = {"Accept": "application/json"} DEFAULT_QUERY_PARAMS = { "has_image": 1, "image_license": "CC", "page[size]": LIMIT, "page[number]": 0, "date[from]": 0, "date[to]": 1500, } YEAR_RANGE = [ (0, 1500), (1500, 1750), (1750, 1825), (1825, 1850), (1850, 1875), (1875, 1900), (1900, 1915), (1915, 1940), (1940, 1965), (1965, 1990), (1990, 2020), ] # global variable to keep track of records pulled RECORD_IDS = [] if __name__ == "__main__": main()
[ 11748, 18931, 198, 198, 6738, 2219, 13, 677, 4541, 1330, 651, 62, 43085, 62, 10951, 198, 6738, 2219, 13, 29356, 1330, 10131, 62, 36604, 355, 899, 198, 6738, 2219, 13, 8897, 7834, 1330, 4216, 16548, 16844, 7834, 198, 6738, 2219, 13, 35...
2.445652
460
# tape variables TS_MAX=1000 # the digital tape model
[ 2, 9154, 9633, 198, 4694, 62, 22921, 28, 12825, 198, 198, 2, 262, 4875, 9154, 2746, 628, 198 ]
3.166667
18
import json # Output must be returned in the format mentioned below: # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format lambda_response = { "isBase64Encoded": False, "statusCode": 200, "headers": { "Content-Type": "application/json", }, "body": json.dumps({ "Status": "OK" }) }
[ 11748, 33918, 628, 198, 2, 25235, 1276, 307, 4504, 287, 262, 5794, 4750, 2174, 25, 198, 2, 220, 220, 3740, 1378, 31628, 13, 8356, 13, 33103, 13, 785, 14, 499, 10055, 1014, 14, 42861, 14, 16244, 263, 41311, 14, 2617, 12, 929, 12, 5...
2.604938
162
import pygame import time import numpy as np import sys gray = (150, 150, 150) white = (255, 255, 255) black = (0, 0, 0, ) red_block = (255, 0, 0) red_border = (76, 0, 19) block_color = (255, 128, 0) border_color = (165,42,42) screen = None SIDE = 50 BORDER = 5 MARGIN = 5 LINE = 1 h_switch = True ## Render function for the unblockme_class if __name__ == "__main__": from unblockme_class import * matrix, goal = get_example() game = unblock_me(matrix, goal) render_unblockme(game)
[ 11748, 12972, 6057, 198, 11748, 640, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 25064, 198, 198, 44605, 796, 220, 357, 8628, 11, 6640, 11, 6640, 8, 198, 11186, 796, 357, 13381, 11, 14280, 11, 14280, 8, 198, 13424, 796, 357, 15, ...
2.425837
209
# API from pyramid.scaffolds import PyramidTemplate import os import re import logging
[ 2, 7824, 198, 6738, 27944, 13, 1416, 2001, 10119, 1330, 41450, 30800, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 18931, 628, 628, 628, 628, 628 ]
3.692308
26
# -*- coding: utf-8 -*- """Test GUI component.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ #from contextlib import contextmanager from pytest import yield_fixture, fixture, raises import numpy as np from numpy.testing import assert_array_equal as ae from .. import supervisor as _supervisor from ..supervisor import (Supervisor, TaskLogger, ClusterView, SimilarityView, ActionCreator, ) from phy.gui import GUI from phy.gui.widgets import Barrier from phy.gui.qt import qInstallMessageHandler from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready from phy.utils.context import Context from phylib.utils import connect, Bunch, emit qInstallMessageHandler(handler) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test tasks #------------------------------------------------------------------------------ def test_task_1(tl): assert tl.last_state(None) is None def test_task_2(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.process() assert tl.last_state() == ([0], 1, None, None) def test_task_3(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.process() assert tl.last_state() == ([0], 1, [100], 101) def test_task_merge(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000) tl.process() assert tl.last_state() == ([1000], 1001, None, None) tl.enqueue(tl.supervisor, 'undo') tl.process() assert tl.last_state() == ([0], 1, [100], 101) tl.enqueue(tl.supervisor, 'redo') tl.process() assert tl.last_state() == ([1000], 1001, None, None) def test_task_split(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001]) tl.process() assert tl.last_state() == ([1000, 1001], 1002, None, None) def test_task_move_1(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.supervisor, 'move', [0], 'good') tl.process() assert tl.last_state() == ([1], 2, None, None) def test_task_move_best(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'move', 'best', 'good') tl.process() assert tl.last_state() == ([1], 2, None, None) def test_task_move_similar(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'move', 'similar', 'good') tl.process() assert tl.last_state() == ([0], 1, [101], 102) def test_task_move_all(tl): tl.enqueue(tl.cluster_view, 'select', [0]) tl.enqueue(tl.similarity_view, 'select', [100]) tl.enqueue(tl.supervisor, 'move', 'all', 'good') tl.process() assert tl.last_state() == ([1], 2, [101], 102) #------------------------------------------------------------------------------ # Test cluster and similarity views #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test ActionCreator #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test GUI component #------------------------------------------------------------------------------ def test_supervisor_merge_move(qtbot, supervisor): """Check that merge then move selects the next cluster in the original cluster view, not the updated cluster view.""" _select(supervisor, [20, 11], []) _assert_selected(supervisor, [20, 11]) supervisor.actions.merge() supervisor.block() _assert_selected(supervisor, [31]) supervisor.actions.move('good', 'all') supervisor.block() _assert_selected(supervisor, [30]) supervisor.actions.move('good', 'all') supervisor.block() _assert_selected(supervisor, [2])
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 14402, 25757, 7515, 526, 15931, 198, 198, 2, 10097, 26171, 198, 2, 1846, 3742, 198, 2, 10097, 26171, 198, 198, 2, 6738, 4732, 8019, 1330, 4732, 37153, 198, ...
2.900637
1,570
# # CommandManager.py # Botpy # # Created by Ashish Ahuja on 4th September 2017. # # import threading import chatexchange as ce
[ 2, 198, 2, 9455, 13511, 13, 9078, 198, 2, 18579, 9078, 198, 2, 198, 2, 15622, 416, 7844, 680, 7900, 84, 6592, 319, 604, 400, 2693, 2177, 13, 198, 2, 198, 2, 198, 198, 11748, 4704, 278, 198, 11748, 442, 378, 87, 3803, 355, 2906, ...
2.804348
46
import os import glob subdirs = glob.glob("tests/periodicities/*"); subdirs = ['tests/periodicities/Month', 'tests/periodicities/Minute', 'tests/periodicities/Week', 'tests/periodicities/Business_Hour', 'tests/periodicities/Business_Day', 'tests/periodicities/Second', 'tests/periodicities/Semi_Month', 'tests/periodicities/Hour', 'tests/periodicities/Day'] #print(subdirs) print("PYTHON=python3\n\n"); lAllTarget = ""; for subdir1 in sorted(subdirs): lBase = os.path.basename(subdir1); test_target = ""; for filename in sorted(glob.glob(subdir1 + "/*.py")): bn = os.path.basename(filename); logfile = bn.replace("/" , "_"); logfile = "logs/periodicities_" + logfile.replace(".py" , ".log"); print("#PROCESSING FILE : " , filename, bn , logfile); print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1"); test_target = bn + " " + test_target; lAllTarget = lAllTarget + " " + lBase; print("\n\n", lBase , ": ", test_target, "\n" , "\n"); print("\n# ********************************************** \n"); print("all: " , lAllTarget , "\n\t\n");
[ 11748, 28686, 198, 11748, 15095, 198, 198, 7266, 15908, 82, 796, 15095, 13, 4743, 672, 7203, 41989, 14, 41007, 291, 871, 15211, 15341, 198, 7266, 15908, 82, 796, 37250, 41989, 14, 41007, 291, 871, 14, 31948, 3256, 198, 220, 220, 220, ...
2.216846
558
# # Copyright (C) 2018 Neal Digre. # # This software may be modified and distributed under the terms # of the MIT license. See the LICENSE file for details. # # # Portions of this module are copied or lightly modified from the # Tensor2Tensor registry_test module, so here is their license: # # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for utils.registry References: Slight modification of `Tensor2Tensor registry_test`_. .. _Tensor2Tensor registry_test: https://github.com/tensorflow/ tensor2tensor/blob/master/tensor2tensor/utils/registry_test.py """ import unittest from carpedm.util import registry from carpedm.models.generic import Model from carpedm.models.baseline import SingleCharBaseline if __name__ == '__main__': unittest.main()
[ 2, 198, 2, 15069, 357, 34, 8, 2864, 29189, 7367, 260, 13, 198, 2, 198, 2, 770, 3788, 743, 307, 9518, 290, 9387, 739, 262, 2846, 198, 2, 286, 262, 17168, 5964, 13, 4091, 262, 38559, 24290, 2393, 329, 3307, 13, 198, 2, 198, 2, 1...
3.404639
388
"""empty message Revision ID: 8f7565cf50c1 Revises: 872760122cc9, 8652bf9c03ff Create Date: 2020-10-02 11:11:49.823678 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8f7565cf50c1' down_revision = ('872760122cc9', '8652bf9c03ff') branch_labels = None depends_on = None
[ 37811, 28920, 3275, 198, 198, 18009, 1166, 4522, 25, 807, 69, 2425, 2996, 12993, 1120, 66, 16, 198, 18009, 2696, 25, 10083, 27988, 486, 1828, 535, 24, 11, 807, 43193, 19881, 24, 66, 3070, 487, 198, 16447, 7536, 25, 12131, 12, 940, 1...
2.433824
136
# Copyright 2017 MDSLAB - University of Messina # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import asyncio import json import subprocess import time import txaio from iotronic.common import exception from iotronic.common.i18n import _ from iotronic.common.i18n import _LI from iotronic.common.i18n import _LW from iotronic.db import api as dbapi from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_messaging.rpc import dispatcher import importlib from threading import Thread import ssl import os import signal from autobahn.asyncio.component import Component LOG = logging.getLogger(__name__) service_opts = [ cfg.StrOpt('notification_level', choices=[('debug', _('"debug" level')), ('info', _('"info" level')), ('warning', _('"warning" level')), ('error', _('"error" level')), ('critical', _('"critical" level'))], help=_('Specifies the minimum level for which to send ' 'notifications. If not set, no notifications will ' 'be sent. The default is for this option to be unset.')), ] wamp_opts = [ cfg.StrOpt('wamp_transport_url', default='ws://localhost:8181/', help=('URL of wamp broker')), cfg.StrOpt('wamp_realm', default='s4t', help=('realm broker')), cfg.BoolOpt('register_agent', default=False, help=('Flag for marking this agent as a registration agent')), cfg.BoolOpt('skip_cert_verify', default=False, help=( 'Flag for skipping the verification of the server cert ' '(for the auto-signed ones)')), cfg.IntOpt('autoPingInterval', default=2, help=('autoPingInterval parameter for wamp')), cfg.IntOpt('autoPingTimeout', default=2, help=('autoPingInterval parameter for wamp')), cfg.BoolOpt('service_allow_list', default=False, help='Enable service allow list checks.'), cfg.StrOpt('service_allow_list_path', default="(/var/lib/wstun/allowlist)", help='Path of allowlist.json file.'), ] proxy_opts = [ cfg.StrOpt('proxy', choices=[('nginx', _('nginx proxy')), ], help=_('Proxy for webservices')), ] CONF = cfg.CONF cfg.CONF.register_opts(service_opts) cfg.CONF.register_opts(proxy_opts) CONF.register_opts(wamp_opts, 'wamp') txaio.start_logging(level="info") wamp_session_caller = None AGENT_HOST = None LOOP = None connected = False # OSLO ENDPOINT class WampEndpoint(object): def read_allowlist(): try: with open(CONF.wamp.service_allow_list_path, "r") as allow_file: allow_list_str = allow_file.read() allow_list = json.loads(allow_list_str) #LOG.debug(allow_list) return allow_list except Exception as err: LOG.error(err) class AgentEndpoint(object): # used for testing
[ 2, 15069, 2177, 337, 5258, 48780, 532, 2059, 286, 10626, 1437, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 22...
2.31207
1,599
#!/usr/bin/env python3 import argparse import logging import sys import numpy as np import util if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 1822, 29572, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 7736, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, ...
2.764706
51
# The MIT License (MIT) # Copyright (c) 2021 by the xcube development team and contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import xarray as xr
[ 2, 383, 17168, 13789, 357, 36393, 8, 198, 2, 15069, 357, 66, 8, 33448, 416, 262, 2124, 40296, 2478, 1074, 290, 20420, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 286, 19...
3.841935
310
"""Simulation logs""" from __future__ import annotations # default once 3.10 import sys from enum import Enum from typing import Type, TYPE_CHECKING if TYPE_CHECKING: from core.timeline import Timeline
[ 37811, 8890, 1741, 17259, 37811, 198, 6738, 11593, 37443, 834, 1330, 37647, 220, 1303, 4277, 1752, 513, 13, 940, 198, 11748, 25064, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 19720, 1330, 5994, 11, 41876, 62, 50084, 2751, 198, 198, 3...
3.533333
60
# # # # Copyright 2015 Quantopian, Inc. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # from distutils.version import StrictVersion # import os # import numpy as np # # # This is *not* a place to dump arbitrary classes/modules for convenience, # # it is a place to expose the public interfaces. # from trading_calendars import get_calendar # # from . import data # from . import finance # from . import gens # from . import utils # from .utils.numpy_utils import numpy_version # from .utils.pandas_utils import new_pandas # from .utils.run_algo import run_algorithm # from ._version import get_versions # # # These need to happen after the other imports. # from . algorithm import TradingAlgorithm # from . import api # from zipline import extensions as ext # from zipline.finance.blotter import Blotter # # # PERF: Fire a warning if calendars were instantiated during zipline import. # # Having calendars doesn't break anything per-se, but it makes zipline imports # # noticeably slower, which becomes particularly noticeable in the Zipline CLI. # from trading_calendars.calendar_utils import global_calendar_dispatcher # if global_calendar_dispatcher._calendars: # import warnings # warnings.warn( # "Found TradingCalendar instances after zipline import.\n" # "Zipline startup will be much slower until this is fixed!", # ) # del warnings # del global_calendar_dispatcher # # # __version__ = get_versions()['version'] # del get_versions # # extension_args = ext.Namespace() # # # def load_ipython_extension(ipython): # from .__main__ import zipline_magic # ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline') # # # if os.name == 'nt': # # we need to be able to write to our temp directoy on windows so we # # create a subdir in %TMP% that has write access and use that as %TMP% # def _(): # import atexit # import tempfile # # tempfile.tempdir = tempdir = tempfile.mkdtemp() # # @atexit.register # def cleanup_tempdir(): # import shutil # shutil.rmtree(tempdir) # _() # del _ # # __all__ = [ # 'Blotter', # 'TradingAlgorithm', # 'api', # 'data', # 'finance', # 'get_calendar', # 'gens', # 'run_algorithm', # 'utils', # 'extension_args' # ] # # # def setup(self, # np=np, # numpy_version=numpy_version, # StrictVersion=StrictVersion, # new_pandas=new_pandas): # """Lives in zipline.__init__ for doctests.""" # # if numpy_version >= StrictVersion('1.14'): # self.old_opts = np.get_printoptions() # np.set_printoptions(legacy='1.13') # else: # self.old_opts = None # # if new_pandas: # self.old_err = np.geterr() # # old pandas has numpy compat that sets this # np.seterr(all='ignore') # else: # self.old_err = None # # # def teardown(self, np=np): # """Lives in zipline.__init__ for doctests.""" # # if self.old_err is not None: # np.seterr(**self.old_err) # # if self.old_opts is not None: # np.set_printoptions(**self.old_opts) # # # del os # del np # del numpy_version # del StrictVersion # del new_pandas
[ 2, 1303, 198, 2, 1303, 15069, 1853, 16972, 37548, 11, 3457, 13, 198, 2, 1303, 198, 2, 1303, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 1303, 345, 743, 407, 779, 428, 2393, 2845, 28...
2.554348
1,472
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import logging import os import pickle from dataclasses import dataclass from multiprocessing import Pipe, Process, Queue import ijson import smarts.core.scenario as scenario def fetch_history_at_timestep(self, timestep: str): if timestep not in self._all_timesteps: return {} elif timestep in self.traffic_history: return self.traffic_history[timestep] # ask child process to prepare the next batch: self._prepare_next_batch() self._prev_batch_history = self._current_traffic_history # receives the previous batch child process prepared self._current_traffic_history = self._receive_data_conn.recv() if timestep in self._current_traffic_history: return self._current_traffic_history[timestep] # no history exists at requested timestamp return {}
[ 2, 15069, 357, 34, 8, 12131, 13, 43208, 21852, 1766, 1539, 12052, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2, 286, 428, 3788, 290, 391...
3.225443
621
import sqlite3 connect = sqlite3.connect("production.db") cursor = connect.cursor() cursor.execute("UPDATE PERSON SET edad = 19 WHERE nombre = 'Conker'") connect.commit() connect.close()
[ 11748, 44161, 578, 18, 201, 198, 201, 198, 8443, 796, 44161, 578, 18, 13, 8443, 7203, 25493, 13, 9945, 4943, 201, 198, 66, 21471, 796, 2018, 13, 66, 21471, 3419, 201, 198, 66, 21471, 13, 41049, 7203, 16977, 46740, 25823, 1225, 324, ...
2.867647
68
import pygame from StringIO import StringIO import cv2 import os import numpy
[ 11748, 12972, 6057, 198, 6738, 10903, 9399, 1330, 10903, 9399, 198, 11748, 269, 85, 17, 198, 11748, 28686, 198, 11748, 299, 32152, 628 ]
3.434783
23
import binascii from os import urandom import humanize from django.conf import settings from django.db import models from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from durin.settings import durin_settings from durin.signals import token_renewed User = settings.AUTH_USER_MODEL def __str__(self) -> str: return self.token
[ 11748, 9874, 292, 979, 72, 198, 6738, 28686, 1330, 2956, 3749, 198, 198, 11748, 1692, 1096, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11...
3.08
125
# Copyright (c) 2013 Aparajita Fishman # Change for CudaLint: Alexey T. # License: MIT import os from cuda_lint import Linter, util if os.name=='nt': _exe = os.path.join(os.path.dirname(__file__), 'tidy_win32', 'tidy') else: _exe = 'tidy'
[ 2, 15069, 357, 66, 8, 2211, 317, 1845, 1228, 5350, 13388, 805, 198, 2, 9794, 329, 327, 15339, 43, 600, 25, 4422, 2959, 309, 13, 198, 2, 13789, 25, 17168, 198, 198, 11748, 28686, 198, 6738, 269, 15339, 62, 75, 600, 1330, 406, 3849,...
2.349057
106
"set operations for multiple sequences"
[ 1, 2617, 4560, 329, 3294, 16311, 1, 198 ]
5
8
msg_dict = { 'resource_not_found': 'The resource you specified was not found', 'invalid_gender': "The gender you specified is invalid!!", 'many_invalid_fields': 'Some errors occured while validating some fields. Please check and try again', 'unique': 'The {} you inputted already exists', 'user_not_found': 'The user with that username/email and password combination was not found', 'email_not_found': 'A user with email `{}` does not exist', 'user_already_verified': 'The user with that email has already been verified', 'invalid_flight_type': 'Flight type must be either international or local', 'invalid_flight_schedule': 'Flight schedule must be at least 12 hours before it is created', 'resource_id_not_found': 'The {} with that id was not found', 'user_book_flight_twice': 'You had previously booked for this Flight and thus cannot do it again', 'flight_booking_expired': 'You cannot book for a flight less than 24 hours before the flight', 'flight_schedule_expired': 'The schedule of this flight has already passed and thus you cannot book it', 'missing_field': 'You forgot to include this field', 'value_not_a_file': 'The value you inputted is not a file', 'not_an_image': 'The file you uploaded is not a valid image', 'image_too_large': 'Image must not be more than 2MB', 'payment_link_error': 'An error occurred while creating payment link', 'booking_already_paid': 'You have already paid for this flight', 'booking_expired': 'Your booking has expired, thus you cannot pay for this ticket', 'invalid_url': 'The `{}` field must be a valid URL with protocols `http` or `https`', "invalid_url_field": 'This field must be a valid URL with protocols `http` or `https`', 'paystack_threw_error': "There was an unexpected error while processing request. " "Please raise this as an issue in at " "https://github.com/chidioguejiofor/airtech-api/issues", 'empty_request': 'You did not specify any `{}` data in your request', 'paid_booking_cannot_be_deleted': 'You cannot delete this Booking because you have already paid for it', 'cannot_delete_expired_booking': 'You cannot delete an expired booking', 'cannot_delete_flight_with_bookings': 'You cannot delete this flight because users have started booking it', 'cannot_delete_flight_that_has_flown': 'You cannot delete this flight because the schedule date has been passed', 'cannot_update_flight_field_with_bookings': 'You cannot update the `{}` of this flight because it has already been booked', 'cannot_update_field': 'You cannot update a {} {}', 'regular_user_only': 'This endpoint is for only regular users', 'profile_not_updated': 'You need to update your profile picture before you can do this', 'only_alpha_and_numbers': 'This field can contain only alphabets and numbers' }
[ 19662, 62, 11600, 796, 1391, 198, 220, 220, 220, 705, 31092, 62, 1662, 62, 9275, 10354, 198, 220, 220, 220, 705, 464, 8271, 345, 7368, 373, 407, 1043, 3256, 198, 220, 220, 220, 705, 259, 12102, 62, 8388, 10354, 198, 220, 220, 220, ...
2.991045
1,005
from .marshmallow import ma from .schemas import ArticleSchema __all__ = [ 'ma', 'ArticleSchema' ]
[ 6738, 764, 76, 5406, 42725, 1330, 17266, 198, 198, 6738, 764, 1416, 4411, 292, 1330, 10172, 27054, 2611, 198, 198, 834, 439, 834, 796, 685, 198, 220, 220, 220, 705, 2611, 3256, 198, 220, 220, 220, 705, 14906, 27054, 2611, 6, 198, 60...
2.477273
44
import argparse from time import sleep, time from collections import defaultdict from sqlalchemy import orm, text, insert, delete from sqlalchemy.orm import selectinload import models from app import db from app import logger from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues from util import elapsed # python -m scripts.fast_queue --entity=work --method=add_everything --limit=3 if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run fast queue.") parser.add_argument('--entity', type=str, help="the entity type to run") parser.add_argument('--method', type=str, help="the method to run") parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)") parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on") parser.add_argument( '--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once" ) parsed_args = parser.parse_args() run(**vars(parsed_args))
[ 11748, 1822, 29572, 198, 6738, 640, 1330, 3993, 11, 640, 198, 6738, 17268, 1330, 4277, 11600, 198, 198, 6738, 44161, 282, 26599, 1330, 393, 76, 11, 2420, 11, 7550, 11, 12233, 198, 6738, 44161, 282, 26599, 13, 579, 1330, 2922, 259, 222...
3.103933
356
import json import discord from discord.ext import commands from assets import internet_funcs from assets.list_funcs import chunks
[ 11748, 33918, 198, 198, 11748, 36446, 198, 6738, 36446, 13, 2302, 1330, 9729, 198, 198, 6738, 6798, 1330, 5230, 62, 12543, 6359, 198, 6738, 6798, 13, 4868, 62, 12543, 6359, 1330, 22716, 628, 198 ]
3.970588
34
from forest_fire.server import server server.launch()
[ 6738, 8222, 62, 6495, 13, 15388, 1330, 4382, 198, 198, 15388, 13, 35681, 3419, 198 ]
3.666667
15
# Prep import json, configparser, pickle, csv, logging, os import pandas as pd from tweepy import AppAuthHandler, API, Cursor # Reading in configuation params = configparser.ConfigParser() params.read('config.ini') # Functions # Takes config file and returns authenticated api object # Get relevant user ids # takes user ids, and writes out a txt file wiith each user's status jsons # Running script # Setting up logger logging.basicConfig(filename, filemode = 'a', format = '(%(asctime)s) %(levelname)s: %(message)s', level = logging.INFO) # Authenticating api api = twitter_auth(params) # Get users from pre-parsed data # csv file with: # user, subset # ..., ... # subset is just a way to subset users from the csv file # if subset == None, then no subsetting is performed users = get_ids(path, subset) # Getting timelines get_timelines(users, api, outpath) # Double checking errors retry_missed_users(logfile, api, outpath)
[ 2, 19141, 198, 11748, 33918, 11, 4566, 48610, 11, 2298, 293, 11, 269, 21370, 11, 18931, 11, 28686, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 4184, 538, 88, 1330, 2034, 30515, 25060, 11, 7824, 11, 327, 21471, 198, 198, 2, 1172...
3.231034
290
# # Copyright 2021 Ocean Protocol Foundation # SPDX-License-Identifier: Apache-2.0 # from eth_utils import is_address from web3 import Web3 def compare_eth_addresses(address, checker, logger): """ Compare two addresses and return TRUE if there is a match :param str address: Address :param str checker: Address to compare with :param logger: instance of logging :return: boolean """ logger.debug("compare_eth_addresses address: %s" % address) logger.debug("compare_eth_addresses checker: %s" % checker) if not is_address(address): logger.debug("Address is not web3 valid") return False if not is_address(checker): logger.debug("Checker is not web3 valid") return False return Web3.toChecksumAddress(address) == Web3.toChecksumAddress(checker)
[ 2, 198, 2, 15069, 33448, 10692, 20497, 5693, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 2, 198, 6738, 4555, 62, 26791, 1330, 318, 62, 21975, 198, 6738, 3992, 18, 1330, 5313, 18, 628, 198, 198, ...
2.858131
289
"""Auto-generated file, do not edit by hand. DJ metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_DJ = PhoneMetadata(id='DJ', country_code=253, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[27]\\d{7}', possible_length=(8,)), fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1[2-5]|7[45])\\d{5}', example_number='21360003', possible_length=(8,)), mobile=PhoneNumberDesc(national_number_pattern='77\\d{6}', example_number='77831001', possible_length=(8,)), number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')])
[ 37811, 27722, 12, 27568, 2393, 11, 466, 407, 4370, 416, 1021, 13, 13004, 20150, 37811, 201, 198, 6738, 11485, 746, 261, 19261, 14706, 1330, 7913, 26227, 11, 14484, 15057, 24564, 11, 14484, 9171, 14706, 201, 198, 201, 198, 11909, 11651, ...
2.702811
249
#!/usr/bin/env python """ Unicode case folding database conversion utility Parses the database and generates a C++ function which implements the case folding algorithm. The database entries are of the form: <code>; <status>; <mapping>; # <name> <status> can be one of four characters: C - Common mappings S - mappings for Simple case folding F - mappings for Full case folding T - special case for Turkish I characters Right now this generates a function which implements simple case folding (C+S entries). """ import sys import re import urllib2 # This variable will body of the mappings function body = "" # Reads file line-by-line, extracts Common and Simple case fold mappings and # returns a (from_char, to_char, from_name) tuple. # Computes the shift (to_char - from_char) in a mapping. def shift(mapping): return mapping[1] - mapping[0] # Computes the stride (from_char2 - from_char1) of two mappings. # Computes the stride of a list of mappings. The list should have at least two # mappings. All mappings in the list are assumed to have the same stride. # b is a list of mappings. All the mappings are assumed to have the same # shift and the stride between adjecant mappings (if any) is constant. current_block = [] f = urllib2.urlopen(sys.argv[1]) for m in mappings(f): if len(current_block) == 0: current_block.append(m) continue if shift(current_block[0]) != shift(m): # Incompatible shift, start a new block. dump_block(current_block) current_block = [m] continue if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m): current_block.append(m) continue # Incompatible stride, start a new block. dump_block(current_block) current_block = [m] f.close() dump_block(current_block) print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//' print '//' print '// This file was generated by utils/unicode-case-fold.py from the Unicode' print '// case folding database at' print '// ', sys.argv[1] print '//' print '// To regenerate this file, run:' print '// utils/unicode-case-fold.py \\' print '// "{}" \\'.format(sys.argv[1]) print '// > lib/Support/UnicodeCaseFold.cpp' print '//' print '//===----------------------------------------------------------------------===//' print '' print '#include "llvm/Support/Unicode.h"' print '' print "int llvm::sys::unicode::foldCharSimple(int C) {" print body print " return C;" print "}"
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 198, 3118, 291, 1098, 1339, 29909, 6831, 11315, 10361, 198, 198, 47, 945, 274, 262, 6831, 290, 18616, 257, 327, 4880, 2163, 543, 23986, 262, 1339, 198, 11379, 278, 11862, 13, 38...
3.021505
837
import itertools import json import logging import re from django.views.generic import TemplateView from django.http import HttpResponse from django.views import View from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from django.views.decorators.csrf import csrf_exempt from django.shortcuts import render, redirect from django.urls import reverse from django.utils import timezone from django.utils.decorators import method_decorator from django_chunk_upload_handlers.clam_av import VirusFoundInFileException from core.base import GroupRequiredMixin from core.utils import ( deep_index_items_by, deep_index_items_by_exists, get, key_by, index_users_by_group, compact_list, submission_contact, public_login_url, parse_notify_template, parse_api_datetime, pluck, to_json, from_json, deep_update, internal_redirect, is_date, notify_footer, notify_contact_email, ) from django_countries import countries from django.conf import settings from cases.submissions import SUBMISSION_TYPE_HELPERS, get_submission_deadline from cases.utils import decorate_orgs from core.constants import ( ALL_REGION_ALLOWED_TYPE_IDS, SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION, SECURITY_GROUP_TRA_LEAD_INVESTIGATOR, SECURITY_GROUPS_TRA, SECURITY_GROUP_TRA_ADMINISTRATOR, SECURITY_GROUPS_TRA_ADMINS, SECURITY_GROUP_ORGANISATION_OWNER, SUBMISSION_TYPE_QUESTIONNAIRE, SUBMISSION_TYPE_APPLICATION, SUBMISSION_NOTICE_TYPE_INVITE, SUBMISSION_NOTICE_TYPE_DEFICIENCY, SUBMISSION_TYPE_THIRD_PARTY, CASE_ROLE_AWAITING_APPROVAL, CASE_ROLE_REJECTED, CASE_ROLE_APPLICANT, CASE_ROLE_PREPARING, DIRECTION_TRA_TO_PUBLIC, ) from trade_remedies_client.mixins import TradeRemediesAPIClientMixin from trade_remedies_client.exceptions import APIException logger = logging.getLogger(__name__) org_fields = json.dumps( { "Organisation": { "id": 0, "has_non_draft_subs": 0, "gov_body": 0, "has_roi": 0, } } )
[ 11748, 340, 861, 10141, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 302, 198, 6738, 42625, 14208, 13, 33571, 13, 41357, 1330, 37350, 7680, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 42625, 14208, 13, 3357...
2.417978
890
#!/usr/bin/env python3 import numpy as np from sklearn.neighbors import KernelDensity # Relative tolerance (in percent) for some comparisons on measured data. TOLERANCE = 25 # Lower tolerance for comparison of measured data LTOL = 100 - TOLERANCE # Upper tolerance for comparison of measured data UTOL = 100 + TOLERANCE # Resolution of the raw input buffer data. Corresponds to 2 pulses of each 26.3 at 38 kHz. MICROS_PER_TICK = 50 # Value is subtracted from all marks and added to all spaces before decoding, to compensate for the signal forming of different IR receiver modules. MARK_EXCESS_MICROS = 20 NEC_ADDRESS_BITS = 16 # 16 bit address or 8 bit address and 8 bit inverted address NEC_COMMAND_BITS = 16 # Command and inverted command NEC_BITS = (NEC_ADDRESS_BITS + NEC_COMMAND_BITS) NEC_UNIT = 560 NEC_HEADER_MARK = (16 * NEC_UNIT) # 9000 NEC_HEADER_SPACE = (8 * NEC_UNIT) # 4500 NEC_BIT_MARK = NEC_UNIT NEC_ONE_SPACE = (3 * NEC_UNIT) # 1690 NEC_ZERO_SPACE = NEC_UNIT PROTOCOL_IS_LSB_FIRST = False PROTOCOL_IS_MSB_FIRST = True #C8E880=? #131780 data_onoff=[3016,1561,344,1186,343,1189,343,425,341,421,348,1185,348,425,341,424,346,419,345,1189,343,1187,342,1188,341,428,342,1184,347,425,344,439,328,423,351,415,351,414,348,428,342,1188,341,436,330,424,348,421,343,422,348,8272,3011,1563,343,1185,344,1183,346,422,346,422,349,1182,346,422,344,425,345,421,349,1185,342,1187,348,1184,342,422,348,1183,346,423,346,419,351,419,348,424,344,427,340,445,323,1190,342,442,325,423,345,422,347,419,348,8272,3014,1559,348,1201,326,1206,326,419,348,425,343,1183,347,419,349,424,343,427,340,1189,343,1186,343,1187,342,422,348,1184,344,436,330,422,351,423,344,424,341,422,348,438,329,1205,324,425,343,422,351,419,348,425,344] #131720 data_30=[3015,1558,345,1183,346,1201,329,419,347,426,344,1185,347,419,345,444,325,442,328,1201,328,1204,325,1204,330,418,345,1186,343,422,348,422,348,425,342,445,322,1205,325,425,342,426,346,419,345,424,345,440,327,426,345,8262,3012,1562,341,1189,344,1186,341,424,345,424,343,1186,343,429,341,425,343,425,345,1185,344,1186,343,1187,346,425,341,1187,340,440,305,448,354,404,357,419,350,1185,341,426,341,440,329,419,348,426,320,448,345,421,345,8260,3013,1563,343,1187,352,1166,328,450,342,439,328,1208,323,425,343,421,347,422,349,1187,342,1186,352,1165,353,439,330,1204,300,453,341,424,342,428,318,462,333,1203,299,446,346,424,344,428,349,431,301,446,348,424,342] #131750 data_50=[3020,1555,345,1188,344,1188,342,419,347,424,346,1183,345,419,351,424,342,441,326,1204,328,1201,326,1205,324,422,356,1176,348,419,344,422,350,425,342,1186,343,416,350,1185,345,423,347,419,347,421,348,442,331,418,348,8258,3016,1558,345,1184,346,1183,346,425,342,421,348,1188,343,424,344,417,349,422,348,1185,344,1184,348,1181,346,419,350,1187,342,424,345,424,343,422,350,1183,343,442,328,1201,327,419,349,423,346,424,343,421,349,435,331,8258,3018,1557,346,1184,348,1181,346,422,347,423,344,1186,346,419,354,415,355,407,350,1186,343,1190,343,1183,346,419,348,1203,326,419,350,419,348,424,344,1184,344,422,346,1187,343,417,350,421,348,419,347,442,325,425,346] #131760 data_100=[3023,1556,346,1183,346,1183,346,419,349,422,348,1184,348,417,352,419,350,420,347,1183,348,1201,328,1185,346,422,345,1185,358,412,345,422,348,419,347,441,328,1187,345,1183,346,425,343,424,346,422,344,440,329,439,332,8267,3017,1558,346,1185,344,1185,344,423,346,423,347,1184,345,420,349,423,346,438,328,1190,342,1184,346,1180,355,416,349,1199,330,419,351,419,351,418,345,422,348,1186,345,1202,327,422,350,414,353,421,348,420,346,419,350,8268,3017,1560,343,1187,345,1202,330,414,352,417,356,1178,348,418,350,417,352,417,350,1184,347,1182,348,1185,344,438,331,1189,343,441,325,417,353,419,362,397,357,1184,346,1184,348,424,342,425,345,419,349,436,331,425,345] # 070710 data_brighten=[3021,1557,348,1183,348,1183,345,1184,344,423,347,437,330,421,348,421,349,418,348,1184,348,1182,351,1197,329,421,348,421,348,417,350,422,347,423,347,1185,343,419,350,440,327,441,329,421,348,421,353,418,344,423,346,8252,3019,1559,347,1184,343,1184,345,1184,348,417,350,419,350,424,345,439,328,435,335,1201,328,1188,341,1186,347,438,328,441,330,420,347,419,349,423,345,1185,345,427,344,438,328,420,349,416,354,418,348,425,344,424,346,8252,3017,1576,329,1202,327,1186,349,1180,346,419,360,429,326,442,328,422,345,419,350,1184,346,1184,347,1203,330,418,345,422,348,422,351,435,331,437,330,1200,329,419,350,421,345,425,345,419,351,434,332,442,327,421,346,8255,3017,1558,345,1185,348,1199,331,1200,327,418,353,420,347,425,343,422,348,423,344,1189,342,1181,348,1184,345,424,346,436,331,437,332,421,349,420,346,1185,347,425,342,424,346,439,327,419,350,419,348,423,347,422,347,8251,3018,1561,342,1187,342,1185,347,1203,327,417,352,421,348,424,343,419,349,438,349,1155,354,1186,346,1186,344,422,347,419,347,436,333,420,349,424,343,1184,348,421,346,423,346,419,351,438,328,423,346,422,348,421,345] # 070730 data_darken=[3018,1562,345,1189,372,1147,353,1184,348,423,347,420,346,423,346,424,345,422,347,1185,345,1183,346,1184,348,417,352,419,348,424,346,424,344,423,347,1185,344,1184,345,419,350,424,345,427,343,439,331,414,353,419,349,8261,3018,1558,351,1182,343,1190,350,1176,346,425,345,425,340,422,348,436,333,422,347,1201,328,1186,343,1190,343,424,341,443,327,421,348,417,351,419,352,1181,345,1186,348,415,353,421,345,424,346,419,347,428,341,441,329,8263,3018,1556,347,1188,342,1189,341,1186,346,423,347,440,336,398,361,421,346,428,341,1184,345,1187,342,1185,347,421,349,423,343,423,347,424,344,426,341,1186,343,1189,344,423,343,441,328,417,353,417,349,424,346,423,346] # 131740 data_nightlight=[3020,1557,348,1185,350,1180,350,419,349,421,348,1185,346,419,348,441,327,442,331,1180,348,1184,348,1188,343,424,346,1186,343,419,350,424,345,424,346,419,350,421,350,1201,329,421,347,417,352,419,350,421,348,421,349,8260,3019,1557,351,1183,346,1184,348,422,347,419,347,1204,327,441,328,441,328,437,335,1184,344,1184,349,1185,346,438,331,1185,348,419,346,426,343,420,350,418,350,420,350,1183,352,418,347,417,354,423,344,422,347,417,354,8259,3019,1557,348,1189,343,1184,346,421,345,440,331,1184,346,422,345,424,345,438,331,1186,346,1186,344,1184,345,419,350,1184,347,421,348,417,353,419,347,424,345,424,346,1184,345,423,346,423,346,427,342,419,353,419,345] data_hitachi=[8917,4558,525,590,525,1725,526,1728,527,590,524,1727,526,606,513,1726,521,592,527,1725,525,594,528,605,509,1727,526,588,531,1723,527,588,532,1736,511,1726,526,588,531,583,533,593,529,1720,529,1723,526,1730,523,588,530,588,528,1722,528,1727,535,1703,537,586,534,603,512,594,525,1728,522,39873] #a = np.array(data_nightlight) a = np.array(data_hitachi) #show_aeha(a) decode_nec(a)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 710, 394, 32289, 1330, 32169, 35, 6377, 198, 198, 2, 45344, 15621, 357, 259, 1411, 8, 329, 617, 17909, 319, 8630, 1366, ...
2
3,319
import torch from torch import nn from mmcv.cnn.utils import constant_init, kaiming_init
[ 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 198, 6738, 8085, 33967, 13, 66, 20471, 13, 26791, 1330, 6937, 62, 15003, 11, 479, 1385, 278, 62, 15003, 198 ]
3.178571
28
import json import requests from datetime import datetime from playsound import playsound tday=datetime.today().strftime('%Y-%m-%d') right_now=datetime.today().strftime('%I-%M-%p') response = requests.get("https://www.londonprayertimes.com/api/times/?format=json&key=0239f686-4423-408e-9a0c-7968a403d197&year=&month=") data=response.json() for key,value in data.items(): if value >= '03:30' and value < '06:00': print('It is asr time') #playsound('/home/danish/Downloads/adan.mp3')
[ 11748, 33918, 198, 198, 11748, 7007, 198, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 5341, 633, 1330, 5341, 633, 198, 198, 83, 820, 28, 19608, 8079, 13, 40838, 22446, 2536, 31387, 10786, 4, 56, 12, 4, 76, 12, 4, 67, ...
2.380734
218
import pytest from django.test import TestCase from rest_framework import serializers as drf_serializers from pokemon import models, serializers
[ 11748, 12972, 9288, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 1334, 62, 30604, 1330, 11389, 11341, 355, 1553, 69, 62, 46911, 11341, 198, 198, 6738, 43962, 1330, 4981, 11, 11389, 11341, 628 ]
4.083333
36
from sqlpuzzle.exceptions import InvalidArgumentException __all__ = ('parse_args',) # pylint: disable=dangerous-default-value,keyword-arg-before-vararg def parse_args(options={}, *args, **kwds): """ Parser of arguments. dict options { int min_items: Min of required items to fold one tuple. (default: 1) int max_items: Count of items in one tuple. Last `max_items-min_items` items is by default set to None. (default: 1) bool allow_dict: Flag allowing dictionary as first (and only one) argument or dictinary as **kwds. (default: False) bool allow_list: Flag allowing list as first (and only one) argument. (default: False) } Examples: calling with min_items=1, max_items=2, allow_dict=False: arg1, arg2 => ((arg1, None), (arg2, None)) (arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None)) arg1=val1 => FAIL {key1: val1} => FAIL calling with min_items=2, max_items=3, allow_dict=True: arg1, arg2 => ((arg1, arg2, None),) arg1, arg2, arg3 => ((arg1, arg2, arg3),) (arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),) arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None)) {key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None)) (arg1a, arg1b), arg2a, arg2b => FAIL """ parser_options = ParserOptions(options) parser_input = ParserInput(args, kwds) parser = Parser(parser_options, parser_input) parser.parse() return parser.output_data # pylint: disable=too-few-public-methods class Parser: def __init__(self, options, input_data): self.options = options self.input_data = input_data self.output_data = []
[ 6738, 44161, 79, 9625, 13, 1069, 11755, 1330, 17665, 28100, 1713, 16922, 198, 198, 834, 439, 834, 796, 19203, 29572, 62, 22046, 3256, 8, 628, 198, 2, 279, 2645, 600, 25, 15560, 28, 38537, 516, 12, 12286, 12, 8367, 11, 2539, 4775, 12...
2.206024
830
from __future__ import unicode_literals from django.contrib.auth.models import User from djblets.webapi.errors import PERMISSION_DENIED from reviewboard.reviews.models import ScreenshotComment from reviewboard.webapi.resources import resources from reviewboard.webapi.tests.base import BaseWebAPITestCase from reviewboard.webapi.tests.mimetypes import ( screenshot_comment_item_mimetype, screenshot_comment_list_mimetype) from reviewboard.webapi.tests.mixins import ( BasicTestsMetaclass, ReviewRequestChildItemMixin, ReviewRequestChildListMixin) from reviewboard.webapi.tests.mixins_comment import ( CommentItemMixin, CommentListMixin) from reviewboard.webapi.tests.urls import ( get_review_screenshot_comment_item_url, get_review_screenshot_comment_list_url)
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 903, 912, 13, 12384, 15042, 13, 48277, 1330, 19878, 44, 40373, 62, 41819, 1976...
3.072797
261
""" Command line tool """ import asyncio from qbapi.request import create_request from qbapi.services.clients import Producer, Consumer
[ 37811, 198, 21575, 1627, 2891, 198, 37811, 198, 11748, 30351, 952, 198, 198, 6738, 10662, 65, 15042, 13, 25927, 1330, 2251, 62, 25927, 198, 6738, 10662, 65, 15042, 13, 30416, 13, 565, 2334, 1330, 30436, 11, 18110, 628 ]
3.631579
38
import unittest import datetime import rdflib # needed for eval(repr(...)) below from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN from rdflib.namespace import XSD if __name__ == "__main__": unittest.main()
[ 11748, 555, 715, 395, 198, 11748, 4818, 8079, 198, 198, 11748, 374, 67, 2704, 571, 220, 1303, 2622, 329, 5418, 7, 260, 1050, 7, 986, 4008, 2174, 198, 6738, 374, 67, 2704, 571, 13, 4354, 1330, 25659, 1691, 11, 37902, 4663, 891, 11, ...
2.46
100
import telegram from emoji import emojize from .base import TextMessageBase
[ 11748, 573, 30536, 198, 6738, 44805, 1330, 795, 13210, 1096, 198, 198, 6738, 764, 8692, 1330, 8255, 12837, 14881, 628 ]
3.9
20
import os os.environ['CUDA_VISIBLE_DEVICES']='0' from common import * from dataset import * from model import * #------------------------------------ # main ################################################################# if __name__ == '__main__': print( '%s: calling main function ... ' % os.path.basename(__file__)) run_train()
[ 11748, 28686, 198, 418, 13, 268, 2268, 17816, 43633, 5631, 62, 29817, 34563, 62, 39345, 34444, 20520, 11639, 15, 6, 198, 198, 6738, 2219, 220, 1330, 1635, 198, 6738, 27039, 1330, 1635, 198, 6738, 2746, 220, 220, 1330, 1635, 628, 628, ...
3.50495
101
import requests import os get_json('/Users/yuchenli/Downloads/city_geojson-master', 100000)
[ 11748, 7007, 198, 11748, 28686, 198, 1136, 62, 17752, 10786, 14, 14490, 14, 88, 1229, 831, 4528, 14, 10002, 82, 14, 19205, 62, 469, 13210, 1559, 12, 9866, 3256, 1802, 830, 8 ]
2.84375
32
# from sklearn.cluster import DBSCAN,KMeans # # # def run(data,radius=300): # res={} # # epsilon=0.001, min_samples=200 # epsilon = radius / 100000 # # epsilon = 0.003 # min_samples = 100 # db = DBSCAN(eps=epsilon, min_samples=min_samples) # # eps # # min_samples,, # y_pred = db.fit_predict(data) # # print(y_pred) # # df_user_info['label'] = y_pred # n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # # if n_clusters_<1: # model = KMeans(n_clusters=1, random_state=0) # model.fit(data) # centroid = model.cluster_centers_ # res['point']=
[ 2, 422, 1341, 35720, 13, 565, 5819, 1330, 360, 4462, 44565, 11, 42, 5308, 504, 198, 2, 198, 2, 198, 2, 825, 1057, 7, 7890, 11, 42172, 28, 6200, 2599, 198, 2, 220, 220, 220, 220, 581, 34758, 92, 198, 2, 220, 220, 220, 220, 1303...
1.915916
333
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest, warnings from verticapy import vDataFrame, drop_table from verticapy import set_option set_option("print_info", False)
[ 2, 357, 66, 8, 15069, 685, 7908, 12, 1238, 2481, 60, 4527, 17061, 393, 530, 286, 663, 29116, 13, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 921, 743, 407, 779, 428, 2393, ...
3.743719
199
import re from collections import Counter
[ 11748, 302, 198, 6738, 17268, 1330, 15034, 198 ]
5.25
8
''' - Leetcode problem: 23 - Difficulty: Hard - Brief problem description: Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity. Example: Input: [ 1->4->5, 1->3->4, 2->6 ] Output: 1->1->2->3->4->4->5->6 - Solution Summary: - Used Resources: --- Bo Zhou ''' # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next
[ 7061, 6, 198, 12, 1004, 316, 8189, 1917, 25, 2242, 198, 198, 12, 27419, 25, 6912, 198, 198, 12, 22821, 1917, 6764, 25, 198, 198, 13102, 469, 479, 23243, 6692, 8341, 290, 1441, 340, 355, 530, 23243, 1351, 13, 16213, 2736, 290, 6901, ...
2.521505
186
from django.views import generic
[ 6738, 42625, 14208, 13, 33571, 1330, 14276 ]
4.571429
7
# Written by Ivan Sapozhkov and Denis Chagin <denis.chagin@emlid.com> # # Copyright (c) 2016, Emlid Limited # All rights reserved. # # Redistribution and use in source and binary forms, # with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, # OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. if __name__ == '__main__': network = {'ssid': "MySSID", 'password': "NewPassword", 'security': "wpaeap", 'identity': "alex@example.com"} conv = convert_to_wpas_network(network) reconv = convert_to_wificontrol_network(conv) print(conv, reconv)
[ 2, 22503, 416, 21798, 35980, 8590, 71, 21862, 290, 33089, 609, 23183, 1279, 6559, 271, 13, 354, 23183, 31, 368, 75, 312, 13, 785, 29, 198, 2, 198, 2, 15069, 357, 66, 8, 1584, 11, 2295, 75, 312, 15302, 198, 2, 1439, 2489, 10395, ...
3.355357
560
# -*- coding: utf-8 -*- """ This module is distributed as part of the Laminaria Core (Python Version). Get the Source Code in GitHub: https://github.com/MrKelpy/LaminariaCore The LaminariaCore is Open Source and distributed under the MIT License """ # Built-in Imports import datetime import random import asyncio import os # Third Party Imports import screeninfo from discord.ext import commands import discord from fpdf import FPDF # Local Application Imports ############################################################################### ### DATE & TIME ### ############################################################################### def twochars(arg): """ Formats a string of two characters into the format of (0X), useful for date formatting. :param arg: The string :return: String """ if len(arg) == 1: return f"0{arg}" return arg def get_formatted_date(date: datetime, include_seconds: bool = False): """ Returns a given date in the handy DD/MM/YY - HH:MM:SS format. :param date: The date to be formatted -> datetime.datetime :param include_seconds: If set to True, include seconds in the format. :return: String """ date_string = f"{twochars(str(date.day))}/{twochars(str(date.month))}/{twochars(str(date.year))} - " \ f"{twochars(str(date.hour))}:{twochars(str(date.minute))}" if include_seconds: date_string += f":{twochars(str(date.second))}" return date_string def get_formatted_date_now(include_seconds: bool = False, formatting: int = 1): """ Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one. :param formatting: Format type -> int :param include_seconds: If set to True, include seconds in the format. :return: String """ now = datetime.datetime.now() if formatting == 1: date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \ f"{twochars(str(now.hour))}:{twochars(str(now.minute))}" elif formatting == 2: date_string = f"{twochars(str(now.day))}.{twochars(str(now.month))}.{twochars(str(now.year))}_" \ f"{twochars(str(now.hour))}.{twochars(str(now.minute))}" else: date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \ f"{twochars(str(now.hour))}:{twochars(str(now.minute))}" if include_seconds: date_string += f":{twochars(str(now.second))}" return date_string def time_until_midnight(): """ Get seconds left until midnight """ tomorrow = datetime.date.today() + datetime.timedelta(days=1) timedelta_until_midnight = datetime.datetime.combine(tomorrow, datetime.time.min) - datetime.datetime.now() return timedelta_until_midnight.seconds ############################################################################### ### GENERAL ### ############################################################################### def get_absolute_screen_coords(relx, rely): """ Returns absolute screen coordinates based off the given relative coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be x960, y360. :param relx: Relative X Coordinate :param rely: Relative Y Coordinate :return: Absolute Coordinates """ monitor = screeninfo.get_monitors()[0] x = (relx*monitor.width)/100 y = (rely*monitor.height)/100 return x, y def get_relative_screen_coords(x, y): """ Returns relative screen coordinates based off the given absolute coordinates. The relative coordinates are percentage-based values calculates relatively to the monitor specs and the given coords. :param x: Absolute X :param y: Absolute Y :return: """ monitor = screeninfo.get_monitors()[0] relx = (x*100)/monitor.width rely = (y*100)/monitor.height return relx, rely ############################################################################### ### PLACEHOLDERS ### ############################################################################### ############################################################################### ### DISCORD.PY ### ###############################################################################
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 1212, 8265, 318, 9387, 355, 636, 286, 262, 406, 5669, 10312, 7231, 357, 37906, 10628, 737, 198, 3855, 262, 8090, 6127, 287, 21722, 25, 198, 5450, 1378, 12567...
2.68195
1,723
# -------------------------------------------------------------------------------------- # Copyright (c) 2013-2021, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # -------------------------------------------------------------------------------------- """ Demonstrate all the ways to initialize a value 1. Pass the value directly 2. Assign the default value explicitly 3. Provide the value during initialization of the object 4. Provide factory callable that returns a value 5. Use a _default_* static method """ import sys from atom.api import Atom, Int, Str def get_last_name(): """Return a last name based on the system byteorder.""" return sys.byteorder.capitalize() if __name__ == "__main__": bob = Person(address="101 Main") print((bob.first_name, bob.last_name, bob.age)) print(bob.mother)
[ 2, 16529, 19351, 438, 198, 2, 15069, 357, 66, 8, 2211, 12, 1238, 2481, 11, 399, 14913, 291, 7712, 4816, 13, 198, 2, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 40499, 347, 10305, 13789, 13, 198, 2, 198, 2, 383, 1336, 5964, 318...
3.912863
241
#!/usr/bin/env python # # Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1) # All Rights Reserved. # # You may only modify and redistribute this under the terms of any of the # following licenses(2): Mozilla Public License, V1.1, GNU General # Public License, V2.0, GNU Lesser General Public License, V2.1 # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://kamaelia.sourceforge.net/AUTHORS - please extend this file, # not this notice. # (2) Reproduced in the COPYING file, and at: # http://kamaelia.sourceforge.net/COPYING # Under section 3.5 of the MPL, we are using this text since we deem the MPL # notice inappropriate for this file. As per MPL/GPL/LGPL removal of this # notice is prohibited. # # Please contact us via: kamaelia-list-owner@lists.sourceforge.net # to discuss alternative licensing. # ------------------------------------------------------------------------- # """\ ============================================= Parsing and Creation of YUV4MPEG format files ============================================= YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends video fram data structures to its "outbox" outbox. FrameToYUV4MPEG does the reverse - taking frame data structures sent to its "inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox." The YUV4MPEG file format is supported by many tools, such as mjpegtools, mplayer/mencoder, and ffmpeg. Example Usage ------------- Playback a YUV4MPEG format file:: Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...), YUV4MPEGToFrame(), VideoOverlay() ).run() Decode a dirac encoded video file to a YUV4MPEG format file:: Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...), DiracDecoder(), FrameToYUV4MPEG(), SimpleFileWriter("output.yuv4mpeg") ).run() YUV4MPEGToFrame Behaviour ------------------------- Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox and frame data structures will be sent out of the "outbox" outbox as soon as they are parsed. See below for a description of the uncompressed frame data structure format. This component supports sending data out of its outbox to a size limited inbox. If the size limited inbox is full, this component will pause until it is able to send out the data. Data will not be consumed from the inbox if this component is waiting to send to the outbox. If a producerFinished message is received on the "control" inbox, this component will complete parsing any data pending in its inbox, and finish sending any resulting data to its outbox. It will then send the producerFinished message on out of its "signal" outbox and terminate. If a shutdownMicroprocess message is received on the "control" inbox, this component will immediately send it on out of its "signal" outbox and immediately terminate. It will not complete processing, or sending on any pending data. FrameToYUV4MPEG Behaviour ------------------------- Send frame data structures to the "inbox" inbox of this component. YUV4MPEG format binary string data will be sent out of the "outbox" outbox. See below for a description of the uncompressed frame data structure format. The header data for the YUV4MPEG file is determined from the first frame. All frames sent to this component must therefore be in the same pixel format and size, otherwise the output data will not be valid YUV4MPEG. This component supports sending data out of its outbox to a size limited inbox. If the size limited inbox is full, this component will pause until it is able to send out the data. Data will not be consumed from the inbox if this component is waiting to send to the outbox. If a producerFinished message is received on the "control" inbox, this component will complete parsing any data pending in its inbox, and finish sending any resulting data to its outbox. It will then send the producerFinished message on out of its "signal" outbox and terminate. If a shutdownMicroprocess message is received on the "control" inbox, this component will immediately send it on out of its "signal" outbox and immediately terminate. It will not complete processing, or sending on any pending data. ========================= UNCOMPRESSED FRAME FORMAT ========================= A frame is a dictionary data structure. It must, at minimum contain the first 3 ("yuv", "size" and "pixformat"):: { "yuv" : (y_data, u_data, v_data) # a tuple of strings "size" : (width, height) # in pixels "pixformat" : pixelformat # format of raw video data "frame_rate" : fps # frames per second "interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields "topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data "pixel_aspect" : fraction # aspect ratio of pixels "sequence_meta" : metadata # string containing extended metadata # (no whitespace or control characters) } All other fields are optional when providing frames to FrameToYUV4MPEG. YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields will be filled in if the relevant header data is detected in the file. The pixel formats recognised (and therefore supported) are:: "YUV420_planar" "YUV411_planar" "YUV422_planar" "YUV444_planar" "YUV4444_planar" "Y_planar" """ from Axon.Component import component #from Axon.Ipc import WaitComplete from Axon.Ipc import shutdownMicroprocess, producerFinished from Axon.AxonExceptions import noSpaceInBox import re from Kamaelia.Support.Data.Rationals import rational def parse_seq_tags(fields): """Parses YUV4MPEG header tags""" params = {} tags = {} while fields: m = re.match("^ (.)(\S*)(.*)$", fields) (tag,value,fields) = m.groups() tags[tag] = value if "W" in tags and "H" in tags: params['size'] = (int(tags["W"]), int(tags["H"])) else: raise if "C" in tags: C = tags["C"] if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default) params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) elif C == "420paldv": # 4:2:0 with PAL-DV siting params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) elif C == "411": # 4:1:1, cosited params['pixformat'] = "YUV411_planar" params['chroma_size'] = (params['size'][0]/4, params['size'][1]) elif C == "422": # 4:2:2, cosited params['pixformat'] = "YUV422_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]) elif C == "444": # 4:4:4 (no subsampling) params['pixformat'] = "YUV444_planar" params['chroma_size'] = (params['size'][0], params['size'][1]) elif C == "444alpha": # 4:4:4 with an alpha channel params['pixformat'] = "YUV4444_planar" params['chroma_size'] = (params['size'][0], params['size'][1]) elif C == "mono": # luma (Y') plane only params['pixformat'] = "Y_planar" params['chroma_size'] = (0,0) else: params['pixformat'] = "YUV420_planar" params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2) if "I" in tags: I = tags["I"] if I == "?": # unknown (default) pass elif I == "p": # progressive/none params["interlaced"] = False elif I == "t": # top-field-first params["interlaced"] = True params["topfieldfirst"] = True elif I == "b": # bottom-field-first params["interlaced"] = True params["topfieldfirst"] = False elif I == "m": # mixed-mode: refer to 'I' tag in frame header pass if "F" in tags: m = re.match("^(\d+):(\d+)$",tags["F"]) num, denom = float(m.groups()[0]), float(m.groups()[1]) if denom > 0: params["frame_rate"] = num/denom if "A" in tags: m = re.match("^(\d+):(\d+)$",tags["A"]) num, denom = float(m.groups()[0]), float(m.groups()[1]) if denom > 0: params["pixel_aspect"] = num/denom if "X" in tags: params["sequence_meta"] = tags["X"] return params def parse_frame_tags(fields): """\ Parses YUV4MPEG frame tags. """ params = {} tags = {} while fields: m = re.match("^ (.)(\S*)(.*)$", fields) (tag,value,fields) = m.groups() tags[tag] = value if "I" in tags: x,y,z = tags["I"][0], tags["I"][1], tags["I"][2] if x == "t": # top-field-first params["interlaced"] = True params["topfieldfirst"] = True elif x == "T": # top-field-first and repeat params["interlaced"] = True params["topfieldfirst"] = True elif x == "b": # bottom-field-first params["interlaced"] = True params["topfieldfirst"] = False elif x == "B": # bottom-field-first and repeat params["interlaced"] = True params["topfieldfirst"] = False elif x == "1": # single progressive frame params["interlaced"] = False elif x == "2": # double progressive frame (repeat) params["interlaced"] = False elif x == "3": # triple progressive frame (repeat) params["interlaced"] = False if y == "p": # fields sampled at same time params["interlaced"] = False elif y == "i": # fields sampled at different times params["interlaced"] = True if z == "p": # progressive (subsampling over whole frame) pass elif z == "i": # interlaced (each field subsampled independently) pass elif z == "?": # unknown (allowed only for non-4:2:0 subsampling) pass if "X" in tags: params["meta"] = tags["X"] return params __kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, ) if __name__ == "__main__": from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.File.Reading import RateControlledFileReader from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)), YUV4MPEGToFrame(), FrameToYUV4MPEG(), YUV4MPEGToFrame(), VideoOverlay(), ).run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 357, 34, 8, 4343, 3517, 32250, 10501, 290, 509, 1689, 25418, 25767, 669, 7, 16, 8, 198, 2, 220, 220, 220, 220, 1439, 6923, 33876, 13, 198, 2, 198, 2, 921, 743, ...
2.458706
4,589
import click from flask.cli import AppGroup from project import app, db from project.dateutils import berlin_tz from project.services.event import ( get_recurring_events, update_event_dates_with_recurrence_rule, ) event_cli = AppGroup("event") app.cli.add_command(event_cli)
[ 11748, 3904, 198, 6738, 42903, 13, 44506, 1330, 2034, 13247, 198, 198, 6738, 1628, 1330, 598, 11, 20613, 198, 6738, 1628, 13, 4475, 26791, 1330, 18157, 2815, 62, 22877, 198, 6738, 1628, 13, 30416, 13, 15596, 1330, 357, 198, 220, 220, ...
3.031579
95
import os import re import sys import subprocess import pytest from testplan.common.utils.path import change_directory import platform ON_WINDOWS = platform.system() == 'Windows' KNOWN_EXCEPTIONS = [ "TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example. "ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example. "ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example. "ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example. "ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example. "RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example. "No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example. "AttributeError: 'module' object has no attribute 'poll'", "RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example. ] SKIP_ON_WINDOWS = [ os.path.join('Cpp', 'GTest', 'test_plan.py'), ] ROOT_DIR_CONTENTS = [ "setup.py", "requirements.txt", "README.rst", "LICENSE.md" ]
[ 11748, 28686, 198, 11748, 302, 198, 11748, 25064, 198, 11748, 850, 14681, 198, 11748, 12972, 9288, 198, 198, 6738, 1332, 11578, 13, 11321, 13, 26791, 13, 6978, 1330, 1487, 62, 34945, 198, 198, 11748, 3859, 198, 198, 1340, 62, 33207, 796...
3.298578
422
from io import StringIO
[ 198, 6738, 33245, 1330, 10903, 9399, 628 ]
3.714286
7
from core import getattr_path from rest_framework import status from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase
[ 6738, 4755, 1330, 651, 35226, 62, 6978, 198, 6738, 1334, 62, 30604, 1330, 3722, 198, 198, 6738, 3884, 13, 41989, 13, 9288, 62, 33571, 13, 9288, 62, 43082, 62, 4868, 13, 9288, 62, 12961, 62, 43082, 62, 4868, 1330, 4765, 23690, 8053, ...
3.822222
45
""" Check for Office file types ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft VBA macros (Visual Basic for Applications), mainly for malware analysis. Author: Philippe Lagadec - http://www.decalage.info License: BSD, see source code or documentation Project Repository: https://github.com/decalage2/ViperMonkey """ # === LICENSE ================================================================== # ViperMonkey is copyright (c) 2015-2016 Philippe Lagadec (http://www.decalage.info) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Office magic numbers. magic_nums = { "office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97 "office2007" : "50 4B 3 4", # Office 2007+ (PKZip) } # PE magic number. pe_magic_num = "4D 5A" def is_pe_file(fname, is_data): """ Check to see if the given file is a PE executable. return - True if it is a PE file, False if not. """ # Read the 1st 8 bytes of the file. curr_magic = get_1st_8_bytes(fname, is_data) # See if we the known magic #. return (curr_magic.startswith(pe_magic_num)) def is_office_file(fname, is_data): """ Check to see if the given file is a MS Office file format. return - True if it is an Office file, False if not. """ # Read the 1st 8 bytes of the file. curr_magic = get_1st_8_bytes(fname, is_data) # See if we have 1 of the known magic #s. for typ in magic_nums.keys(): magic = magic_nums[typ] if (curr_magic.startswith(magic)): return True return False
[ 37811, 198, 9787, 329, 4452, 2393, 3858, 198, 198, 53, 9346, 9069, 2539, 318, 257, 16976, 3113, 284, 21136, 11, 16602, 290, 6179, 5413, 198, 53, 4339, 34749, 357, 36259, 14392, 329, 26622, 828, 8384, 329, 18953, 3781, 13, 198, 198, 13...
3.029979
934
from st2tests.base import BaseSensorTestCase from third_party_resource import ThirdPartyResource
[ 6738, 336, 17, 41989, 13, 8692, 1330, 7308, 47864, 14402, 20448, 198, 198, 6738, 2368, 62, 10608, 62, 31092, 1330, 10467, 33553, 26198, 628 ]
4.125
24
import datetime import getpass import logging import os import pathlib import platform import re import smtplib import sys from contextlib import contextmanager from email.message import EmailMessage from functools import wraps import azure.functions as func import click import gspread import pandas as pd from apscheduler.schedulers.background import BlockingScheduler from oauth2client.service_account import ServiceAccountCredentials from selenium import webdriver from selenium.common.exceptions import TimeoutException from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) log.addHandler(handler) def run(email, username, email_to, password, gsheet, doc_key): log.info('In run') content = [] for link in os.environ["searchLinks"].split(): content += get_prometheus_apartments(link) formatted_content = format_email(content) if gsheet: log.info('Updating gsheet') update_historical_data(doc_key, content) formatted_content += f'For historical data click the link below:\nhttps://docs.google.com/spreadsheets/d/1XZocxmyQ91e1exBvwDAaSR8Rhavy9WPnwLSz0Z5SKsM/edit?usp=sharing' if email: log.info('Sending email') send_email(username, password, email_to, formatted_content) log.info(content) if __name__ == '__main__': cli()
[ 11748, 4818, 8079, 198, 11748, 651, 6603, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 3108, 8019, 198, 11748, 3859, 198, 11748, 302, 198, 11748, 895, 83, 489, 571, 198, 11748, 25064, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, ...
2.947368
589
from PyQt6.QtWidgets import QApplication, QWidget, QFileDialog import PyQt6.QtCore as QtCore import PyQt6.QtGui as QtGui import sys, time, json, requests, traceback, configparser, os import MrWorldwideUI, ConfigurationUI, UpdateManagerUI version = "v1.0.0" def readConfigurationFile(config): try: configFile = open("config.ini") configFile.close() return config.read("config.ini") except: config['general'] = {} config['general']['libretranslate_mirror'] = 'https://translate.astian.org/translate' config['defaults'] = {} config['defaults']['default_source_language'] = LangTypes.ENGLISH config['defaults']['default_target_language'] = LangTypes.SPANISH with open('config.ini', 'w') as configFile: config.write(configFile) configFile.close() return config def main(): global app app = QApplication(sys.argv) app.setQuitOnLastWindowClosed(False) app.setStyle("Fusion") form = MrWorldwide() form.show() app.exec() if __name__ == '__main__': main()
[ 6738, 9485, 48, 83, 21, 13, 48, 83, 54, 312, 11407, 1330, 1195, 23416, 11, 1195, 38300, 11, 1195, 8979, 44204, 198, 11748, 9485, 48, 83, 21, 13, 48, 83, 14055, 355, 33734, 14055, 198, 11748, 9485, 48, 83, 21, 13, 48, 83, 8205, 7...
2.720548
365
# -*- coding: utf-8 -*- # Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Demo CLI tool for Azure.""" import os from datetime import datetime from typing import TYPE_CHECKING from Crypto.PublicKey import RSA from libcloudforensics import logging_utils from libcloudforensics.providers.azure.internal import account from libcloudforensics.providers.azure.internal import monitoring from libcloudforensics.providers.azure import forensics logging_utils.SetUpLogger(__name__) logger = logging_utils.GetLogger(__name__) if TYPE_CHECKING: import argparse def ListInstances(args: 'argparse.Namespace') -> None: """List instances in Azure subscription. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ az_account = account.AZAccount(args.default_resource_group_name) instances = az_account.compute.ListInstances( resource_group_name=args.resource_group_name) logger.info('Instances found:') for instance in instances.values(): boot_disk = instance.GetBootDisk() logger.info( 'Name: {0:s}, Boot disk: {1:s}'.format(instance.name, boot_disk.name)) def ListDisks(args: 'argparse.Namespace') -> None: """List disks in Azure subscription. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ az_account = account.AZAccount(args.default_resource_group_name) disks = az_account.compute.ListDisks( resource_group_name=args.resource_group_name) logger.info('Disks found:') for disk_name, disk in disks.items(): logger.info('Name: {0:s}, Region: {1:s}'.format(disk_name, disk.region)) def CreateDiskCopy(args: 'argparse.Namespace') -> None: """Create an Azure disk copy. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ logger.info('Starting disk copy...') disk_copy = forensics.CreateDiskCopy(args.default_resource_group_name, instance_name=args.instance_name, disk_name=args.disk_name, disk_type=args.disk_type, region=args.region, src_profile=args.src_profile, dst_profile=args.dst_profile) logger.info( 'Done! Disk {0:s} successfully created. You will find it in ' 'your Azure subscription under the name {1:s}.'.format( disk_copy.resource_id, disk_copy.name)) def StartAnalysisVm(args: 'argparse.Namespace') -> None: """Start forensic analysis VM. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ attach_disks = [] if args.attach_disks: attach_disks = args.attach_disks.split(',') # Check if attach_disks parameter exists and if there # are any empty entries. if not (attach_disks and all(elements for elements in attach_disks)): logger.error('error: parameter --attach_disks: {0:s}'.format( args.attach_disks)) return ssh_public_key = args.ssh_public_key if not ssh_public_key: # According to https://docs.microsoft.com/cs-cz/samples/azure-samples/ # resource-manager-python-template-deployment/resource-manager-python- # template-deployment/ there's no API to generate a new SSH key pair in # Azure, so we do this manually... ssh_public_key = _GenerateSSHKeyPair(args.instance_name) logger.info('Starting analysis VM...') vm = forensics.StartAnalysisVm(args.default_resource_group_name, args.instance_name, int(args.disk_size), ssh_public_key, cpu_cores=int(args.cpu_cores), memory_in_mb=int(args.memory_in_mb), region=args.region, attach_disks=attach_disks, dst_profile=args.dst_profile) logger.info('Analysis VM started.') logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1]))) def _GenerateSSHKeyPair(vm_name: str) -> str: """Generate a SSH key pair and returns its public key. Both public and private keys will be saved in the current directory. Args: vm_name (str): The VM name for which to generate the key pair. Returns: str: The public key for the generated SSH key pair. Raises: ValueError: If vm_name is None. """ if not vm_name: raise ValueError('Parameter vm_name must not be None.') logger.info('Generating a new SSH key pair for VM: {0:s}'.format(vm_name)) key = RSA.generate(2048) key_name = '{0:s}-ssh'.format(vm_name) public_key = key.publickey().exportKey('OpenSSH') path_public_key = os.path.join(os.getcwd(), key_name + '.pub') private_key = key.exportKey('PEM') path_private_key = os.path.join(os.getcwd(), key_name + '.pem') with open(path_private_key, 'wb') as f: f.write(private_key) with open(path_public_key, 'wb') as f: f.write(public_key) logger.info('SSH key pair generated. Public key saved in {0:s}, private key ' 'saved in {1:s}'.format(path_public_key, path_private_key)) return public_key.decode('utf-8') def ListMetrics(args: 'argparse.Namespace') -> None: """List Azure Monitoring metrics for a resource. Args: args (argparse.Namespace): Arguments from ArgumentParser. """ az_account = account.AZAccount(args.default_resource_group_name) az_monitoring = monitoring.AZMonitoring(az_account) metrics = az_monitoring.ListAvailableMetricsForResource(args.resource_id) for metric in metrics: logger.info('Available metric: {0:s}'.format(metric)) def QueryMetrics(args: 'argparse.Namespace') -> None: """Query Azure Monitoring metrics for a resource. Args: args (argparse.Namespace): Arguments from ArgumentParser. Raises: RuntimeError: If from_date or to_date could not be parsed. """ az_account = account.AZAccount(args.default_resource_group_name) az_monitoring = monitoring.AZMonitoring(az_account) from_date, to_date = args.from_date, args.to_date if from_date and to_date: try: from_date = datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%SZ') to_date = datetime.strptime(to_date, '%Y-%m-%dT%H:%M:%SZ') except ValueError as exception: raise RuntimeError( 'Cannot parse date: {0!s}'.format(exception)) from exception metrics = az_monitoring.GetMetricsForResource( args.resource_id, metrics=args.metrics, from_date=from_date, to_date=to_date, interval=args.interval, aggregation=args.aggregation or 'Total', qfilter=args.qfilter) for metric, metric_value in metrics.items(): logger.info('Metric: {0:s}'.format(metric)) for timestamp, value in metric_value.items(): logger.info(' Timestamp: {0:s}, value: {1:s}'.format(timestamp, value))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 12131, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428...
2.542437
2,922
""" PyBBIO - bbio.py Copyright (c) 2012-2015 - Alexander Hiam <alex@graycat.io> Released under the MIT license https://github.com/graycatlabs/PyBBIO """ import sys, atexit from .platform import platform_init, platform_cleanup from .common import ADDITIONAL_CLEANUP, util_init def bbio_init(): """ Pre-run initialization, i.e. starting module clocks, etc. """ util_init() platform_init() def bbio_cleanup(): """ Post-run cleanup, i.e. stopping module clocks, etc. """ # Run user cleanup routines: for cleanup in ADDITIONAL_CLEANUP: try: cleanup() except Exception as e: # Something went wrong with one of the cleanup routines, but we # want to keep going; just print the error and continue print "*Exception raised trying to call cleanup routine '%s':\n %s" %\ (cleanup, e) platform_cleanup() # The following code detects if Python is running interactively, # and if so initializes PyBBIO on import and registers PyBBIO's # cleanup to be called at exit, otherwise it defines the run() and # stop() methods for the file based control flow: import __main__ if not hasattr(__main__, '__file__'): # We're in the interpreter, see: # http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode bbio_init() print "PyBBIO initialized" atexit.register(interactive_cleanup) else: bbio_init() atexit.register(bbio_cleanup) # Imported in a Python file, define run() and stop(): def run(setup, loop): """ The main loop; must be passed a setup and a loop function. First the setup function will be called once, then the loop function wil be called continuously until a stop signal is raised, e.g. CTRL-C or a call to the stop() function from within the loop. """ try: setup() while (True): loop() except KeyboardInterrupt: # Manual exit signal, clean up and exit happy exit(0) def stop(): """ Preferred way for a program to stop itself. """ raise KeyboardInterrupt # Expected happy stop condition in run()
[ 37811, 198, 9485, 33, 3483, 46, 532, 275, 65, 952, 13, 9078, 198, 15069, 357, 66, 8, 2321, 12, 4626, 532, 10009, 367, 1789, 1279, 1000, 87, 31, 44605, 9246, 13, 952, 29, 198, 28728, 739, 262, 17168, 5964, 198, 3740, 1378, 12567, 1...
2.920613
718
""" The Endeavors asset collection has a number of irregular assets. Be careful writing any custom code here. """ from app.assets import endeavors from app import models
[ 37811, 198, 220, 220, 220, 383, 5268, 68, 615, 669, 11171, 4947, 468, 257, 1271, 286, 21388, 6798, 13, 1355, 8161, 198, 220, 220, 220, 3597, 597, 2183, 2438, 994, 13, 198, 198, 37811, 628, 198, 6738, 598, 13, 19668, 1330, 40273, 198...
3.734694
49
import sys from PyQt5 import QtCore, QtWidgets from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget from PyQt5.QtCore import QSize if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) mainWin = HelloWindow() mainWin.show() sys.exit( app.exec_() )
[ 11748, 25064, 198, 6738, 9485, 48, 83, 20, 1330, 33734, 14055, 11, 33734, 54, 312, 11407, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 13383, 27703, 11, 1195, 33986, 11, 1195, 41339, 32517, 11, 1195, 38300, 1...
2.446281
121
#!/usr/bin/env python import os import sys import numpy from setuptools import setup, Extension #include markdown description in pip page this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() # https://github.com/pypa/packaging-problems/issues/84 # no sensible way to include header files by default headers = ['scipybiteopt/biteopt.h', 'scipybiteopt/biteoptort.h', 'scipybiteopt/spheropt.h', 'scipybiteopt/biteaux.h', 'scipybiteopt/nmsopt.h'] module1 = Extension('scipybiteopt.biteopt', sources=get_c_sources(['scipybiteopt/biteopt_py_ext.cpp'], include_headers=(sys.argv[1] == "sdist")), language="c++", include_dirs=[numpy.get_include()], extra_compile_args=['-std=c++11', '-O3'] if os.name != 'nt' else ['-O3']) setup(name='scipybiteopt', version='1.1.1', description="Scipy style wrapper for Aleksey Vaneev's BiteOpt", author='dschmitz89', author_email='danielschmitzsiegen@gmail.com', license='MIT', long_description=long_description, long_description_content_type='text/markdown', url = 'https://github.com/dschmitz89/scipybiteopt', packages = ['scipybiteopt'], ext_modules = [module1], install_requires=[ 'numpy'] )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 299, 32152, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 27995, 198, 198, 2, 17256, 1317, 2902, 6764, 287, 7347, 2443, 198, 5661, 62, 34945,...
2.270833
624
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ =========================================================== Data Processing (:mod:`qiskit_experiments.data_processing`) =========================================================== .. currentmodule:: qiskit_experiments.data_processing Data processing is the act of taking the data returned by the backend and converting it into a format that can be analyzed. It is implemented as a chain of data processing steps that transform various input data, e.g. IQ data, into a desired format, e.g. population, which can be analyzed. These data transformations may consist of multiple steps, such as kerneling and discrimination. Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction` also called a `node`. The data processor implements the :meth:`__call__` method. Once initialized, it can thus be used as a standard python function: .. code-block:: python processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...]) out_data = processor(in_data) The data input to the processor is a sequence of dictionaries each representing the result of a single circuit. The output of the processor is a numpy array whose shape and data type depend on the combination of the nodes in the data processor. Uncertainties that arise from quantum measurements or finite sampling can be taken into account in the nodes: a standard error can be generated in a node and can be propagated through the subsequent nodes in the data processor. Correlation between computed values is also considered. Classes ======= .. autosummary:: :toctree: ../stubs/ DataProcessor DataAction TrainableDataAction Data Processing Nodes ===================== .. autosummary:: :toctree: ../stubs/ Probability MarginalizeCounts ToImag ToReal SVD AverageData BasisExpectationValue MinMaxNormalize """ from .data_action import DataAction, TrainableDataAction from .nodes import ( Probability, MarginalizeCounts, ToImag, ToReal, SVD, AverageData, BasisExpectationValue, MinMaxNormalize, ) from .data_processor import DataProcessor
[ 2, 770, 2438, 318, 636, 286, 1195, 1984, 270, 13, 198, 2, 198, 2, 357, 34, 8, 15069, 19764, 33448, 13, 198, 2, 198, 2, 770, 2438, 318, 11971, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 13, 921, 743, 198, 2, 7330, 257, 486...
3.597527
728
import torch.nn as nn import torch.nn.functional as F from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation if __name__ == '__main__': import torch model = get_model(13) xyz = torch.rand(6, 9, 2048) (model(xyz))
[ 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 6738, 387, 17459, 13, 12727, 3262, 62, 12727, 3262, 17, 62, 9078, 13165, 354, 13, 27530, 13, 4122, 3262, 17, 62, 26791, 1330, 6252, 7934, ...
2.646018
113
from django.contrib import admin from .models import Notification admin.site.register(Notification)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 764, 27530, 1330, 42808, 198, 198, 28482, 13, 15654, 13, 30238, 7, 3673, 2649, 8 ]
4
25
import krpc import time import math from simple_pid import PID conn = krpc.connect(name="UI Test") vessel = conn.space_center.active_vessel kerbin_frame = vessel.orbit.body.reference_frame orb_frame = vessel.orbital_reference_frame srf_frame = vessel.surface_reference_frame surface_gravity = vessel.orbit.body.surface_gravity current_met = conn.add_stream(getattr, vessel, 'met') current_roll = conn.add_stream(getattr, vessel.flight(), 'roll') current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch') current_heading = conn.add_stream(getattr, vessel.flight(), 'heading') current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude') lowest = conn.add_stream(vessel.bounding_box, srf_frame) current_drag = conn.add_stream(getattr, vessel.flight(), 'drag') current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force') current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed') vessel.control.activate_next_stage() vessel.control.sas = True time.sleep(.2) vessel.control.sas_mode = conn.space_center.SASMode.retrograde for engine in vessel.parts.engines: engine.gimbal_locked = True while True: aero_amp = math.sqrt(current_aero()[0] ** 2 + current_aero()[1] ** 2 + current_aero()[2] ** 2) time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass) + vessel.orbit.body.surface_gravity) if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed(): print(current_speed()) print(f"Start Hover Slam Burn") vessel.control.throttle = .9 break while current_speed() > 50: print(current_speed()) time.sleep(.01) pass print(f"Switch to Stab") for leg in vessel.parts.legs: leg.deployed = True pid1 = PID(.15, 0, .5, setpoint=0) pid1.output_limits = (0, 1) pid1.sample_time = 0.01 while bottom_altitude() > 1: vessel.control.throttle = pid1(bottom_altitude()) # pid1.setpoint *= .98 time.sleep(.01) vessel.control.sas_mode = conn.space_center.SASMode.radial vessel.control.throttle = 0
[ 11748, 479, 81, 14751, 198, 11748, 640, 198, 11748, 10688, 198, 6738, 2829, 62, 35317, 1330, 37022, 198, 198, 37043, 796, 479, 81, 14751, 13, 8443, 7, 3672, 2625, 10080, 6208, 4943, 198, 1158, 741, 796, 48260, 13, 13200, 62, 16159, 13...
2.502904
861
from cx_core import integration as integration_module from cx_core.controller import Controller
[ 6738, 43213, 62, 7295, 1330, 11812, 355, 11812, 62, 21412, 198, 6738, 43213, 62, 7295, 13, 36500, 1330, 22741, 628 ]
4.85
20
############################################################################### # # Python script utilities as included from the cloudera-framework-assembly, # do not edit directly # ############################################################################### import os import re
[ 29113, 29113, 7804, 4242, 21017, 198, 2, 198, 2, 11361, 4226, 20081, 355, 3017, 422, 262, 537, 280, 1082, 64, 12, 30604, 12, 41873, 11, 198, 2, 466, 407, 4370, 3264, 198, 2, 198, 29113, 29113, 7804, 4242, 21017, 198, 198, 11748, 286...
6.06383
47
import datetime import os from io import BytesIO import logging from functools import wraps from copy import deepcopy from collections import Counter import slugify import yaml import mistune import requests from flask import \ Blueprint, Flask, render_template, abort, send_file, make_response from flask_cors import CORS from flask_jsonpify import jsonify from flask_basicauth import BasicAuth from datapackage_pipelines.status import status_mgr from datapackage_pipelines.utilities.stat_utils import user_facing_stats YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper markdown = mistune.Markdown(hard_wrap=True) status = status_mgr() def basic_auth_required(view_func): """ A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. """ return wrapper blueprint = Blueprint('dpp', 'dpp') def _make_badge_response(subject, text, colour): image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format( subject, text, colour) r = requests.get(image_url) buffer_image = BytesIO(r.content) buffer_image.seek(0) res = make_response(send_file(buffer_image, mimetype='image/svg+xml')) res.headers['Cache-Control'] = \ 'max-age=0, no-cache, no-store, must-revalidate' res.headers['Expires'] = '0' return res app = Flask(__name__) app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \ and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False): app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME'] app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD'] app.config['BASIC_AUTH_ACTIVE'] = True basic_auth = BasicAuth(app) CORS(app) url_prefix = os.environ.get('DPP_BASE_PATH', '/') if not url_prefix.endswith('/'): url_prefix += '/' logging.info('Serving on path %s', url_prefix) app.register_blueprint(blueprint, url_prefix=url_prefix)
[ 11748, 4818, 8079, 198, 11748, 28686, 198, 6738, 33245, 1330, 2750, 4879, 9399, 198, 11748, 18931, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 17268, 1330, 15034, 198, 198, 11748, 31065, 1958, ...
2.620429
793
# coding: utf-8 import numpy as np import torch.nn as nn
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 13, 20471, 355, 299, 77, 628 ]
2.565217
23
import pygame as pg from pygame.locals import * import sys import board.chess_board as board w = 60 * 8 h = 60 * 8 if __name__ == "__main__": # Launch main-function if running this script game = Game() game.run()
[ 11748, 12972, 6057, 355, 23241, 198, 6738, 12972, 6057, 13, 17946, 874, 1330, 1635, 198, 11748, 25064, 198, 11748, 3096, 13, 2395, 824, 62, 3526, 355, 3096, 628, 198, 86, 796, 3126, 1635, 807, 198, 71, 796, 3126, 1635, 807, 628, 198, ...
2.771084
83
#importing libraries import torch import torch.utils.data as data import os import random from PIL import Image
[ 2, 11748, 278, 12782, 201, 198, 201, 198, 11748, 28034, 201, 198, 11748, 28034, 13, 26791, 13, 7890, 355, 1366, 201, 198, 11748, 28686, 201, 198, 11748, 4738, 201, 198, 6738, 350, 4146, 1330, 7412, 201, 198, 201, 198, 220, 220, 220, ...
2.509091
55
# @Time : 2022/1/1 # @Author : Yuanhang Zhou # @email : sdzyh002@gmail.com import os from math import floor import torch from loguru import logger from typing import List, Dict from copy import copy, deepcopy import pickle import os import numpy import ipdb from crslab.config import PRETRAIN_PATH, SAVE_PATH from crslab.data import get_dataloader, dataset_language_map from crslab.evaluator.metrics.base import AverageMetric from crslab.evaluator.metrics.gen import PPLMetric from crslab.system.base import BaseSystem from crslab.system.utils.functions import ind2txt, ind2txt2 import random from tqdm import tqdm
[ 2, 2488, 7575, 220, 220, 220, 1058, 220, 220, 33160, 14, 16, 14, 16, 198, 2, 2488, 13838, 220, 1058, 220, 220, 34071, 33255, 32222, 198, 2, 2488, 12888, 220, 220, 1058, 220, 220, 45647, 7357, 71, 21601, 31, 14816, 13, 785, 198, 19...
2.930233
215
# flake8: noqa """This is the main public API of Morepath. Additional public APIs can be imported from the :mod:`morepath.error` and :mod:`morepath.pdbsupport` modules. For custom directive implementations that interact with core directives for grouping or subclassing purposes, or that need to use one of the Morepath registries, you may need to import from :mod:`morepath.directive`. The other submodules are considered private. If you find yourself needing to import from them in application or extension code, please report an issue about it on the Morepath issue tracker. """ from dectate import commit from .app import App, dispatch_method from .core import ( excview_tween_factory as EXCVIEW, poisoned_host_header_protection_tween_factory as HOST_HEADER_PROTECTION, model_predicate, name_predicate, request_method_predicate, ) from .core import request_method_predicate as LAST_VIEW_PREDICATE from .view import render_json, render_html, redirect from .request import Request, Response from .autosetup import scan, autoscan from .authentication import Identity, IdentityPolicy, NO_IDENTITY from .converter import Converter from .reify import reify from .run import run
[ 2, 781, 539, 23, 25, 645, 20402, 198, 37811, 1212, 318, 262, 1388, 1171, 7824, 286, 3125, 6978, 13, 198, 198, 17699, 1171, 23113, 460, 307, 17392, 422, 262, 1058, 4666, 25, 63, 3549, 6978, 13, 18224, 63, 198, 392, 1058, 4666, 25, ...
3.513196
341
EMAIL_AND_FAX_FORM_CONSTANTS = { }
[ 27630, 4146, 62, 6981, 62, 7708, 55, 62, 21389, 62, 10943, 2257, 1565, 4694, 796, 1391, 198, 220, 220, 198, 92 ]
1.761905
21
#Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation #import of libraries import numpy as np import pandas as pd from pandas_datareader import data as wb import matplotlib.pyplot as plt from scipy.stats import norm #ticker selection
[ 2, 39156, 2867, 2746, 1262, 281, 4554, 286, 262, 22489, 40089, 18640, 290, 4373, 666, 20843, 16022, 628, 198, 2, 11748, 286, 12782, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 19798, 292, 62, 1...
3.657895
76
""" Modified version of train_bert.py that adds DeepSpeed """ import os import datetime import json import pathlib import re import string from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union import random import datasets import fire import logging import loguru import numpy as np import pytz import sh import torch import torch.nn as nn import deepspeed from torch.utils.data import DataLoader, Dataset from torch.utils.tensorboard import SummaryWriter from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.models.roberta import RobertaConfig, RobertaModel from transformers.models.roberta.modeling_roberta import ( RobertaLMHead, RobertaPreTrainedModel, ) ###################################################################### ####################### Logging Functions ############################ ###################################################################### logger = loguru.logger def log_dist(message: str, ranks: List[int] = [], level: int = logging.INFO) -> None: """Log messages for specified ranks only""" my_rank = int(os.environ.get("RANK", "0")) if my_rank in ranks: if level == logging.INFO: logger.info(f'[Rank {my_rank}] {message}') if level == logging.ERROR: logger.error(f'[Rank {my_rank}] {message}') if level == logging.DEBUG: logger.debug(f'[Rank {my_rank}] {message}') ###################################################################### ############### Dataset Creation Related Functions ################### ###################################################################### TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] def collate_function(batch: List[Tuple[List[int], List[int]]], pad_token_id: int) -> Dict[str, torch.Tensor]: """Collect a list of masked token indices, and labels, and batch them, padding to max length in the batch. """ max_length = max(len(token_ids) for token_ids, _ in batch) padded_token_ids = [ token_ids + [pad_token_id for _ in range(0, max_length - len(token_ids))] for token_ids, _ in batch ] padded_labels = [ labels + [pad_token_id for _ in range(0, max_length - len(labels))] for _, labels in batch ] src_tokens = torch.LongTensor(padded_token_ids) tgt_tokens = torch.LongTensor(padded_labels) attention_mask = src_tokens.ne(pad_token_id).type_as(src_tokens) return { "src_tokens": src_tokens, "tgt_tokens": tgt_tokens, "attention_mask": attention_mask, } def masking_function( text: str, tokenizer: TokenizerType, mask_prob: float, random_replace_prob: float, unmask_replace_prob: float, max_length: int, ) -> Tuple[List[int], List[int]]: """Given a text string, randomly mask wordpieces for Bert MLM training. Args: text (str): The input text tokenizer (TokenizerType): The tokenizer for tokenization mask_prob (float): What fraction of tokens to mask random_replace_prob (float): Of the masked tokens, how many should be replaced with random tokens (improves performance) unmask_replace_prob (float): Of the masked tokens, how many should be replaced with the original token (improves performance) max_length (int): The maximum sequence length to consider. Note that for Bert style models, this is a function of the number of positional embeddings you learn Returns: Tuple[List[int], List[int]]: The masked token ids (based on the tokenizer passed), and the output labels (padded with `tokenizer.pad_token_id`) """ # Note: By default, encode does add the BOS and EOS token # Disabling that behaviour to make this more clear tokenized_ids = ([tokenizer.bos_token_id] + tokenizer.encode(text, add_special_tokens=False, truncation=True, max_length=max_length - 2) + [tokenizer.eos_token_id]) seq_len = len(tokenized_ids) tokenized_ids = np.array(tokenized_ids) subword_mask = np.full(len(tokenized_ids), False) # Masking the BOS and EOS token leads to slightly worse performance low = 1 high = len(subword_mask) - 1 mask_choices = np.arange(low, high) num_subwords_to_mask = max( int((mask_prob * (high - low)) + np.random.rand()), 1) subword_mask[np.random.choice(mask_choices, num_subwords_to_mask, replace=False)] = True # Create the labels first labels = np.full(seq_len, tokenizer.pad_token_id) labels[subword_mask] = tokenized_ids[subword_mask] tokenized_ids[subword_mask] = tokenizer.mask_token_id # Now of the masked tokens, choose how many to replace with random and how many to unmask rand_or_unmask_prob = random_replace_prob + unmask_replace_prob if rand_or_unmask_prob > 0: rand_or_unmask = subword_mask & (np.random.rand(len(tokenized_ids)) < rand_or_unmask_prob) if random_replace_prob == 0: unmask = rand_or_unmask rand_mask = None elif unmask_replace_prob == 0: unmask = None rand_mask = rand_or_unmask else: unmask_prob = unmask_replace_prob / rand_or_unmask_prob decision = np.random.rand(len(tokenized_ids)) < unmask_prob unmask = rand_or_unmask & decision rand_mask = rand_or_unmask & (~decision) if unmask is not None: tokenized_ids[unmask] = labels[unmask] if rand_mask is not None: weights = np.ones(tokenizer.vocab_size) weights[tokenizer.all_special_ids] = 0 probs = weights / weights.sum() num_rand = rand_mask.sum() tokenized_ids[rand_mask] = np.random.choice(tokenizer.vocab_size, num_rand, p=probs) return tokenized_ids.tolist(), labels.tolist() T = TypeVar("T") def create_data_iterator( mask_prob: float, random_replace_prob: float, unmask_replace_prob: float, batch_size: int, max_seq_length: int = 512, tokenizer: str = "roberta-base", ) -> InfiniteIterator: """Create the dataloader. Args: mask_prob (float): Fraction of tokens to mask random_replace_prob (float): Fraction of masked tokens to replace with random token unmask_replace_prob (float): Fraction of masked tokens to replace with the actual token batch_size (int): The batch size of the generated tensors max_seq_length (int, optional): The maximum sequence length for the MLM task. Defaults to 512. tokenizer (str, optional): The tokenizer to use. Defaults to "roberta-base". Returns: InfiniteIterator: The torch DataLoader, wrapped in an InfiniteIterator class, to be able to continuously generate samples """ wikitext_dataset = datasets.load_dataset("wikitext", "wikitext-2-v1", split="train") wikitext_dataset = wikitext_dataset.filter( lambda record: record["text"] != "").map( lambda record: {"text": record["text"].rstrip("\n")}) tokenizer = AutoTokenizer.from_pretrained(tokenizer) masking_function_partial = partial( masking_function, tokenizer=tokenizer, mask_prob=mask_prob, random_replace_prob=random_replace_prob, unmask_replace_prob=unmask_replace_prob, max_length=max_seq_length, ) dataset = WikiTextMLMDataset(wikitext_dataset, masking_function_partial) collate_fn_partial = partial(collate_function, pad_token_id=tokenizer.pad_token_id) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn_partial) return InfiniteIterator(dataloader) ###################################################################### ############### Model Creation Related Functions ##################### ###################################################################### def create_model(num_layers: int, num_heads: int, ff_dim: int, h_dim: int, dropout: float) -> RobertaMLMModel: """Create a Bert model with the specified `num_heads`, `ff_dim`, `h_dim` and `dropout` Args: num_layers (int): The number of layers num_heads (int): The number of attention heads ff_dim (int): The intermediate hidden size of the feed forward block of the transformer h_dim (int): The hidden dim of the intermediate representations of the transformer dropout (float): The value of dropout to be used. Note that we apply the same dropout to both the attention layers and the FF layers Returns: RobertaMLMModel: A Roberta model for MLM task """ roberta_config_dict = { "attention_probs_dropout_prob": dropout, "bos_token_id": 0, "eos_token_id": 2, "hidden_act": "gelu", "hidden_dropout_prob": dropout, "hidden_size": h_dim, "initializer_range": 0.02, "intermediate_size": ff_dim, "layer_norm_eps": 1e-05, "max_position_embeddings": 514, "model_type": "roberta", "num_attention_heads": num_heads, "num_hidden_layers": num_layers, "pad_token_id": 1, "type_vocab_size": 1, "vocab_size": 50265, } roberta_config = RobertaConfig.from_dict(roberta_config_dict) roberta_encoder = RobertaModel(roberta_config) roberta_model = RobertaMLMModel(roberta_config, roberta_encoder) return roberta_model ###################################################################### ########### Experiment Management Related Functions ################## ###################################################################### def get_unique_identifier(length: int = 8) -> str: """Create a unique identifier by choosing `length` random characters from list of ascii characters and numbers """ alphabet = string.ascii_lowercase + string.digits uuid = "".join(alphabet[ix] for ix in np.random.choice(len(alphabet), length)) return uuid def create_experiment_dir(checkpoint_dir: pathlib.Path, all_arguments: Dict[str, Any]) -> pathlib.Path: """Create an experiment directory and save all arguments in it. Additionally, also store the githash and gitdiff. Finally create a directory for `Tensorboard` logs. The structure would look something like checkpoint_dir `-experiment-name |- hparams.json |- githash.log |- gitdiff.log `- tb_dir/ Args: checkpoint_dir (pathlib.Path): The base checkpoint directory all_arguments (Dict[str, Any]): The arguments to save Returns: pathlib.Path: The experiment directory """ # experiment name follows the following convention # {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid} current_time = datetime.datetime.now(pytz.timezone("US/Pacific")) expname = "bert_pretrain.{0}.{1}.{2}.{3}.{4}.{5}.{6}".format( current_time.year, current_time.month, current_time.day, current_time.hour, current_time.minute, current_time.second, get_unique_identifier(), ) exp_dir = checkpoint_dir / expname if not is_rank_0(): return exp_dir exp_dir.mkdir(exist_ok=False) hparams_file = exp_dir / "hparams.json" with hparams_file.open("w") as handle: json.dump(obj=all_arguments, fp=handle, indent=2) # Save the git hash try: gitlog = sh.git.log("-1", format="%H", _tty_out=False, _fg=False) with (exp_dir / "githash.log").open("w") as handle: handle.write(gitlog.stdout.decode("utf-8")) except sh.ErrorReturnCode_128: log_dist( "Seems like the code is not running from" " within a git repo, so hash will" " not be stored. However, it" " is strongly advised to use" " version control.", ranks=[0], level=logging.INFO) # And the git diff try: gitdiff = sh.git.diff(_fg=False, _tty_out=False) with (exp_dir / "gitdiff.log").open("w") as handle: handle.write(gitdiff.stdout.decode("utf-8")) except sh.ErrorReturnCode_129: log_dist( "Seems like the code is not running from" " within a git repo, so diff will" " not be stored. However, it" " is strongly advised to use" " version control.", ranks=[0], level=logging.INFO) # Finally create the Tensorboard Dir tb_dir = exp_dir / "tb_dir" tb_dir.mkdir(exist_ok=False) return exp_dir ###################################################################### ################ Checkpoint Related Functions ######################## ###################################################################### def load_model_checkpoint( load_checkpoint_dir: pathlib.Path, model: torch.nn.Module, optimizer: torch.optim.Optimizer, ) -> Tuple[int, torch.nn.Module, torch.optim.Optimizer]: """Loads the optimizer state dict and model state dict from the load_checkpoint_dir into the passed model and optimizer. Searches for the most recent checkpoint to load from Args: load_checkpoint_dir (pathlib.Path): The base checkpoint directory to load from model (torch.nn.Module): The model to load the checkpoint weights into optimizer (torch.optim.Optimizer): The optimizer to load the checkpoint weigths into Returns: Tuple[int, torch.nn.Module, torch.optim.Optimizer]: The checkpoint step, model with state_dict loaded and optimizer with state_dict loaded """ log_dist( f"Loading model and optimizer checkpoint from {load_checkpoint_dir}", ranks=[0], level=logging.INFO) checkpoint_files = list( filter( lambda path: re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name) is not None, load_checkpoint_dir.glob("*.pt"), )) assert len(checkpoint_files) > 0, "No checkpoints found in directory" checkpoint_files = sorted( checkpoint_files, key=lambda path: int( re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name).group("iter_no") ), ) latest_checkpoint_path = checkpoint_files[-1] checkpoint_step = int( re.search(r"iter_(?P<iter_no>\d+)\.pt", latest_checkpoint_path.name).group("iter_no")) state_dict = torch.load(latest_checkpoint_path) model.load_state_dict(state_dict["model"], strict=True) optimizer.load_state_dict(state_dict["optimizer"]) log_dist( f"Loading model and optimizer checkpoints done. Loaded from {latest_checkpoint_path}", ranks=[0], level=logging.INFO) return checkpoint_step, model, optimizer ###################################################################### ######################## Driver Functions ############################ ###################################################################### def train( checkpoint_dir: str = None, load_checkpoint_dir: str = None, # Dataset Parameters mask_prob: float = 0.15, random_replace_prob: float = 0.1, unmask_replace_prob: float = 0.1, max_seq_length: int = 512, tokenizer: str = "roberta-base", # Model Parameters num_layers: int = 6, num_heads: int = 8, ff_dim: int = 512, h_dim: int = 256, dropout: float = 0.1, # Training Parameters batch_size: int = 8, num_iterations: int = 10000, checkpoint_every: int = 1000, log_every: int = 10, local_rank: int = -1, ) -> pathlib.Path: """Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf) (transformer encoder only) model for MLM Task Args: checkpoint_dir (str): The base experiment directory to save experiments to mask_prob (float, optional): The fraction of tokens to mask. Defaults to 0.15. random_replace_prob (float, optional): The fraction of masked tokens to replace with random token. Defaults to 0.1. unmask_replace_prob (float, optional): The fraction of masked tokens to leave unchanged. Defaults to 0.1. max_seq_length (int, optional): The maximum sequence length of the examples. Defaults to 512. tokenizer (str, optional): The tokenizer to use. Defaults to "roberta-base". num_layers (int, optional): The number of layers in the Bert model. Defaults to 6. num_heads (int, optional): Number of attention heads to use. Defaults to 8. ff_dim (int, optional): Size of the intermediate dimension in the FF layer. Defaults to 512. h_dim (int, optional): Size of intermediate representations. Defaults to 256. dropout (float, optional): Amout of Dropout to use. Defaults to 0.1. batch_size (int, optional): The minibatch size. Defaults to 8. num_iterations (int, optional): Total number of iterations to run the model for. Defaults to 10000. checkpoint_every (int, optional): Save checkpoint after these many steps. ..note :: You want this to be frequent enough that you can resume training in case it crashes, but not so much that you fill up your entire storage ! Defaults to 1000. log_every (int, optional): Print logs after these many steps. Defaults to 10. local_rank (int, optional): Which GPU to run on (-1 for CPU). Defaults to -1. Returns: pathlib.Path: The final experiment directory """ device = (torch.device("cuda", local_rank) if (local_rank > -1) and torch.cuda.is_available() else torch.device("cpu")) ################################ ###### Create Exp. Dir ######### ################################ if checkpoint_dir is None and load_checkpoint_dir is None: log_dist( "Need to specify one of checkpoint_dir" " or load_checkpoint_dir", ranks=[0], level=logging.ERROR) return if checkpoint_dir is not None and load_checkpoint_dir is not None: log_dist( "Cannot specify both checkpoint_dir" " and load_checkpoint_dir", ranks=[0], level=logging.ERROR) return if checkpoint_dir: log_dist("Creating Experiment Directory", ranks=[0], level=logging.INFO) checkpoint_dir = pathlib.Path(checkpoint_dir) checkpoint_dir.mkdir(exist_ok=True) all_arguments = { # Dataset Params "mask_prob": mask_prob, "random_replace_prob": random_replace_prob, "unmask_replace_prob": unmask_replace_prob, "max_seq_length": max_seq_length, "tokenizer": tokenizer, # Model Params "num_layers": num_layers, "num_heads": num_heads, "ff_dim": ff_dim, "h_dim": h_dim, "dropout": dropout, # Training Params "batch_size": batch_size, "num_iterations": num_iterations, "checkpoint_every": checkpoint_every, } exp_dir = create_experiment_dir(checkpoint_dir, all_arguments) log_dist(f"Experiment Directory created at {exp_dir}", ranks=[0], level=logging.INFO) else: log_dist("Loading from Experiment Directory", ranks=[0], level=logging.INFO) load_checkpoint_dir = pathlib.Path(load_checkpoint_dir) assert load_checkpoint_dir.exists() with (load_checkpoint_dir / "hparams.json").open("r") as handle: hparams = json.load(handle) # Set the hparams # Dataset Params mask_prob = hparams.get("mask_prob", mask_prob) tokenizer = hparams.get("tokenizer", tokenizer) random_replace_prob = hparams.get("random_replace_prob", random_replace_prob) unmask_replace_prob = hparams.get("unmask_replace_prob", unmask_replace_prob) max_seq_length = hparams.get("max_seq_length", max_seq_length) # Model Params ff_dim = hparams.get("ff_dim", ff_dim) h_dim = hparams.get("h_dim", h_dim) dropout = hparams.get("dropout", dropout) num_layers = hparams.get("num_layers", num_layers) num_heads = hparams.get("num_heads", num_heads) # Training Params batch_size = hparams.get("batch_size", batch_size) _num_iterations = hparams.get("num_iterations", num_iterations) num_iterations = max(num_iterations, _num_iterations) checkpoint_every = hparams.get("checkpoint_every", checkpoint_every) exp_dir = load_checkpoint_dir # Tensorboard writer if is_rank_0(): tb_dir = exp_dir / "tb_dir" assert tb_dir.exists() summary_writer = SummaryWriter(log_dir=tb_dir) ################################ ###### Create Datasets ######### ################################ log_dist("Creating Datasets", ranks=[0], level=logging.INFO) data_iterator = create_data_iterator( mask_prob=mask_prob, random_replace_prob=random_replace_prob, unmask_replace_prob=unmask_replace_prob, tokenizer=tokenizer, max_seq_length=max_seq_length, batch_size=batch_size, ) log_dist("Dataset Creation Done", ranks=[0], level=logging.INFO) ################################ ###### Create Model ############ ################################ log_dist("Creating Model", ranks=[0], level=logging.INFO) model = create_model( num_layers=num_layers, num_heads=num_heads, ff_dim=ff_dim, h_dim=h_dim, dropout=dropout, ) log_dist("Model Creation Done", ranks=[0], level=logging.INFO) ################################ ###### DeepSpeed engine ######## ################################ log_dist("Creating DeepSpeed engine", ranks=[0], level=logging.INFO) ds_config = { "train_micro_batch_size_per_gpu": batch_size, "optimizer": { "type": "Adam", "params": { "lr": 1e-4 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": 1, "offload_optimizer": { "device": "cpu" } } } model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=ds_config) log_dist("DeepSpeed engine created", ranks=[0], level=logging.INFO) ################################ #### Load Model checkpoint ##### ################################ start_step = 1 if load_checkpoint_dir is not None: _, client_state = model.load_checkpoint(load_dir=load_checkpoint_dir) checkpoint_step = client_state['checkpoint_step'] start_step = checkpoint_step + 1 ################################ ####### The Training Loop ###### ################################ log_dist( f"Total number of model parameters: {sum([p.numel() for p in model.parameters()]):,d}", ranks=[0], level=logging.INFO) model.train() losses = [] for step, batch in enumerate(data_iterator, start=start_step): if step >= num_iterations: break # Move the tensors to device for key, value in batch.items(): batch[key] = value.to(device) # Forward pass loss = model(**batch) # Backward pass model.backward(loss) # Optimizer Step model.step() losses.append(loss.item()) if step % log_every == 0: log_dist("Loss: {0:.4f}".format(np.mean(losses)), ranks=[0], level=logging.INFO) if is_rank_0(): summary_writer.add_scalar(f"Train/loss", np.mean(losses), step) if step % checkpoint_every == 0: model.save_checkpoint(save_dir=exp_dir, client_state={'checkpoint_step': step}) log_dist("Saved model to {0}".format(exp_dir), ranks=[0], level=logging.INFO) # Save the last checkpoint if not saved yet if step % checkpoint_every != 0: model.save_checkpoint(save_dir=exp_dir, client_state={'checkpoint_step': step}) log_dist("Saved model to {0}".format(exp_dir), ranks=[0], level=logging.INFO) return exp_dir if __name__ == "__main__": torch.manual_seed(42) np.random.seed(0) random.seed(0) fire.Fire(train)
[ 37811, 198, 5841, 1431, 2196, 286, 4512, 62, 4835, 13, 9078, 326, 6673, 10766, 22785, 198, 37811, 198, 198, 11748, 28686, 198, 11748, 4818, 8079, 198, 11748, 33918, 198, 11748, 3108, 8019, 198, 11748, 302, 198, 11748, 4731, 198, 6738, 1...
2.248513
11,766
# Given a singly linked list, determine if it is a palindrome. # Definition for singly-linked list.
[ 2, 11259, 257, 1702, 306, 6692, 1351, 11, 5004, 611, 340, 318, 257, 6340, 521, 5998, 13, 198, 198, 2, 30396, 329, 1702, 306, 12, 25614, 1351, 13, 628, 628, 628, 198 ]
3.34375
32
""" Rocky is a CLI based provisioning and management tool for CloudCIX Cloud software. Rocky is designed to operate in an out of band (OOB) network, serarated from other CloudCIX networks. Rocky's purpose is to facilitate monitoring, testing, debug and recovery """ __version__ = '0.3.5'
[ 37811, 198, 19665, 88, 318, 257, 43749, 1912, 8287, 278, 290, 4542, 2891, 329, 10130, 34, 10426, 10130, 3788, 13, 198, 198, 19665, 88, 318, 3562, 284, 8076, 287, 281, 503, 286, 4097, 357, 6684, 33, 8, 3127, 11, 1055, 283, 515, 422, ...
3.625
80
from tkinter import * import time root=Tk() root.title('Calculator') root.config(bg='wheat') s='' text=StringVar() f=Frame(root,bg='#dcdde1') e=Entry(f,textvariable=text,bg='#f5f6fa',fg='#353b48',font='roboto 34 bold',justify='right',relief=RAISED) e.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH) l=['#aabbcc','#bbccdd','#ccddee','#ddeeff'] for i in ['789/','456*','123+','.0-=']: f=Frame(root,bg=l.pop()) for j in i: b=Button(f,text=j,bg='#00a8ff',fg='#353b48',font='roboto 34 italic',command=(lambda x=j:display(x)) if j!='=' else solve) b.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH) f1=Frame(root,bg='#dcdde1') clear=Button(f1,text='C',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear) clear.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) clear1=Button(f1,text='CE',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear1) clear1.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH) f1.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH) f2=Frame(root,bg='#dcdde1') label=Label(f2,bg='#00a8ff',fg='#353b48',font='roboto 34') label.pack(padx=10,pady=10,expand=YES,fill=BOTH) f2.pack(padx=10,pady=10,expand=YES,fill=BOTH) con() root.mainloop()
[ 6738, 256, 74, 3849, 1330, 1635, 198, 11748, 640, 198, 15763, 28, 51, 74, 3419, 198, 15763, 13, 7839, 10786, 9771, 3129, 1352, 11537, 198, 15763, 13, 11250, 7, 35904, 11639, 12491, 265, 11537, 198, 198, 82, 28, 7061, 198, 5239, 28, ...
2.034321
641
import glob import os import keras import tensorflow as tf from keras.models import load_model from keras.callbacks import ModelCheckpoint import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import src.util.Files as Files from src.util.ImageLoader import load_images_generator, resize_image, load_images_generator_with_filename import numpy as np import logging as log import random from src.util.Arguments import anomaly_arguments, get_model_choice import src.util.Arguments as Arguments from scipy.stats import norm from PIL import Image from src.train.Models import autoencoder, conv_autoencoder, vae_autoencoder, vae_loss, get_dummy_loss, from_argument_choice import src.train.Models as Models import src.util.Filenames as Filenames import math os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' def train_on_images(epochs, max_x, max_y, path, model_type, model_name, arg_steps, validation_path, color_mode="RGB"): sess = tf.Session() keras.backend.set_session(sess) # max_x = max([i.shape[0] for i in images]) # max_y = max([i.shape[1] for i in images]) # max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png') # print(max_x, max_y) # 304, 298 epochs = epochs shape = (max_y, max_x, 3) model = Models.from_argument_choice(model_type, shape) steps = len(glob.glob(path)) if arg_steps != 0: steps = arg_steps model.summary() # define the checkpoint checkpoint = ModelCheckpoint(model_name, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] log.info('Fitting model...') if validation_path: history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode), validation_data=centered_image_generator(validation_path, max_x, max_y, color_mode=color_mode), validation_steps=100, epochs=epochs, steps_per_epoch=steps, callbacks=callbacks_list) else: history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode), epochs=epochs, steps_per_epoch=steps, callbacks=callbacks_list) model.save(model_name) loss = history.history['loss'] try: plt.plot(loss) if validation_path: val_loss = history.history['val_loss'] plt.plot(val_loss, color='g') plt.title(model_name) plt.ylabel("Loss") plt.xlabel("Epoch") plt.savefig(f'training_loss_{model_name}.png') except: log.info("Failed to create loss graph") log.info('Finished fitting model') return model def load_model_and_predict(model_path, num_predictions, path, max_x, max_y, model_type, model=None, color_mode="RGB", template_only=False): # vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5) im_shape = (max_x, max_y, 3) if model_type == get_model_choice(Arguments.VAE) and not model: model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)}) mu = model.get_layer('mu').output log_var = model.get_layer('log').output model.summary() print(mu, log_var) model.compile(optimizer='rmsprop', loss=vae_loss(im_shape, log_var, mu)) if model_type == get_model_choice(Arguments.CONVVAE) and not model: model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)}) encoder = model.get_layer('encoder') decoder = model.get_layer('decoder') mu = encoder.get_layer('mu').output log_var = encoder.get_layer('log').output model.compile(optimizer='adam', loss=vae_loss(im_shape, log_var, mu)) if model_type != get_model_choice(Arguments.VAE) and not model: model = load_model(model_path) model.summary() print("Loaded Model", model, model.input_shape) max_x = model.input_shape[1] max_y = model.input_shape[2] images = list(image_generator_with_filename(path, max_x, max_y, color_mode=color_mode)) random.shuffle(images) index = 0 print(f'Loaded {len(images)} images') model_name = model_path.split('.')[0] save_dir = Files.makedir_else_cleardir(f'./predictions/{model_name}_{Filenames.remove_path(Filenames.strip_path_modifier(path))}') for i, filename in images: # centered_image_generator(path, max_x, max_y): hashed = Filenames.md5hash(filename) anomaly = "anomaly" in filename extra = "_anomaly_" if anomaly else "_normal_" pred = model.predict(i) print(pred.shape) for ii in i: if color_mode == 'HSV': ii = Image.fromarray((ii * 255).astype(np.uint8), 'HSV') ii = ii.convert("RGB") ii = np.array(ii) plt.imsave(str(save_dir / f'orig{extra}{hashed}_{index}.png'), ii) #plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1) print("input shape",i.shape) evaluate = model.evaluate(i, i) if type(evaluate) is list: evaluate = evaluate[0] print(index, evaluate) for p in pred: #print("prediction",p) p = p / np.max(p) if color_mode == 'HSV': p = Image.fromarray((p * 255).astype(np.uint8), 'HSV') p = p.convert('RGB') p = np.array(p) if template_only: # Hacky solution, oh well template_path = './src/sign_detection/image_generation/images/signs/png/362.50/362_5.png' im = Image.open(template_path) im = im.convert('RGB') im = im.resize(size=(64,64)) im = np.array(im) score = image_mse(i[0], im) plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{score}.png'), im) else: plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{str(evaluate)}.png'), p) index += 1 if index == num_predictions: break if __name__ == '__main__': args = anomaly_arguments() log.info('Arguments', args) print("Arguments", args) model = None if args.do_training: model = train_on_images( epochs=args.epochs, path=args.path, max_x=args.max_x, max_y=args.max_y, model_type=args.model_type, model_name=args.model, arg_steps=args.steps, color_mode=args.color, validation_path=args.validation_path ) if args.do_predict: load_model_and_predict( model_path=args.model, num_predictions=args.num_predictions, max_x=args.max_x, max_y=args.max_y, path=args.pred_path if args.pred_path else args.path, model_type=args.model_type, model=model, color_mode=args.color, template_only=args.template )
[ 11748, 15095, 198, 11748, 28686, 198, 198, 11748, 41927, 292, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 198, 6738, 41927, 292, 13, 27530, 1330, 3440, 62, 19849, 198, 6738, 41927, 292, 13, 13345, 10146, 1330, 9104, 9787, 4122, 198, ...
2.085511
3,520
from .utils import _get_return_type def windowed(data, size, step=1, ret_type=None): ''' dp.windowed applies a window function to a collection of data items. Parameters ----------- :param data: an iterable collection of data :param size: the window size :param step: the window step :param ret_type: if provided the used return type, otherwise ret_type(data) :return: the windowed data list Examples ----------- >>> import daproli as dp >>> numbers = range(10) >>> dp.windowed(numbers, 2, step=2) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] ''' if ret_type is None: ret_type = _get_return_type(data) return [ret_type(data[i:i+size]) for i in range(0, len(data)-(size-1), step)] def flatten(data, ret_type=None): ''' dp.flatten applies a flatten function to a collection of data items. Parameters ----------- :param data: an iterable collection of data :param ret_type: if provided the used return type, otherwise ret_type(data) :return: the flattened data collection Examples ----------- >>> import daproli as dp >>> dp.flatten([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ''' if ret_type is None: ret_type = _get_return_type(data) return ret_type([item for sub in data for item in sub])
[ 6738, 764, 26791, 1330, 4808, 1136, 62, 7783, 62, 4906, 628, 198, 4299, 4324, 276, 7, 7890, 11, 2546, 11, 2239, 28, 16, 11, 1005, 62, 4906, 28, 14202, 2599, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 288, 79, 13, 7972, 6...
2.572243
526
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import * import json import sys from only_for_platform import not_for_platform, PLATFORM_WINDOWS
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 7061, 6, 198, 26656, 15385, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 273, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 17080, 6169, 351, ...
3.844
250
import os import csv import numpy as np from sklearn.utils import shuffle ## Read in frame data samples = [] with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file reader = csv.reader(csvfile) #as a readable csv for line in reader: samples.append(line) #add each line of the log file to samples samples = samples[1:] # to remove table header samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased from sklearn.model_selection import train_test_split train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB ## Define generator to handle small portions of images at a time so that training is not as memory-heavy # compile and train the model using the generator function train_generator = generator(train_samples, batch_size=32) validation_generator = generator(validation_samples, batch_size=32) ch, row, col = 3, 160, 320 # Full image format #import Keras model layers from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda from keras.layers.convolutional import Conv2D, Cropping2D from keras.layers.pooling import MaxPooling2D # BUILD MODEL model = Sequential() # Preprocess incoming data, centered around zero with small standard deviation model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch))) # Crop incoming data (training, validation, and autonomous so that everything is consistent) model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training # Begin CNN (similar to NVIDIA architecture) # Convolution layer 1-3, kernel size 5 with stride of 2 model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu')) model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu')) model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu')) # Convolution layers 4-5, kernel size 3 wth stride of 1 model.add(Conv2D(64,(3,3),activation='relu')) model.add(Conv2D(64,(3,3),activation='relu')) # Flatten convolution output to yield single numerical result model.add(Flatten()) # Fully connected layers to complete computations, gradually decreasing in parameters until final value model.add(Dense(100)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) ## Training hyper parameters to play with ## Stop training checkpoints... # save_path = 'model{epoch:02d}-{val_loss:.2f}.h5' # checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True) # stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5) ## OR batch_size = 32 epochs = 5 #*** ## Compile and train the model model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators #save the trained model model.save('model.h5')
[ 11748, 28686, 198, 11748, 269, 21370, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 26791, 1330, 36273, 198, 198, 2235, 4149, 287, 5739, 1366, 198, 82, 12629, 796, 17635, 198, 4480, 1280, 10786, 14, 40720, 8738, 14, 77...
3.092453
1,060
#!/usr/bin/env python3 """ A script containing the basic principles of the extraction primitive inner workings""" from __future__ import division, print_function from ghostdr import polyfit import numpy as pn # Firstly, let's find all the needed files fitsdir='/Users/mireland/data/ghost/cal_frames/' #Define the files in use (NB xmod.txt and wavemod.txt should be correct) arc_file = fitsdir+"arc_extracted.fits" # load it in now: extracted_flux,extracted_vars=pyfits.getdata(arc_file) # Where is the default location for the model? By default it is a parameter # in the ghost class. If this needs to be overwritten, go ahead. # This is the xmod file. Wherever it is saved from the flat reduction. xmodel_file=fitsdir+'GHOST_1_1_blue_std_xmodPolyfit.fits' # All the other models... which are currently in the "test" directory. wmodel_file=test_files_dir+'wparams_blue_std.fits' spatmod_file=test_files_dir+'spatmod.fits' specmod_file=test_files_dir+'specmod.fits' rotmod_file=test_files_dir+'rotmod2.fits' # Find the arc line list file arclinefile='/home/jbento/code/ghostdr/ghostdr/ADCONFIG_GHOST/lookups/GHOST/Polyfit/mnras0378-0221-SD1.txt' arcwaves, arcfluxes= np.loadtxt(arclinefile,usecols=[1,2]).T #instantiate the ghost arm arm = polyfit.GhostArm('blue',mode='std') arm.spectral_format_with_matrix(xpars,wpars,spatpars,specpars,rotpars) #Get the initial default model from the lookup location xpars=pyfits.getdata(xmodel_file) wpars=pyfits.getdata(wmodel_file) spatpars=pyfits.getdata(spatmod_file) specpars=pyfits.getdata(specmod_file) rotpars=pyfits.getdata(rotmod_file) slitview = polyfit.SlitView(image_array, flat_image_array, mode='std') # The extractor is given the polyfit "arm" object, and a slitview object which has # been instantiated with the slit viewer data. extractor = polyfit.Extractor(arm, slitview) #Now find the other lines, after first re-loading into the extractor. # the inspect parameter is a verbose option for visualising the line # finding results lines_out=extractor.find_lines(extracted_flux, arcwaves, inspect=False) #Now finally do the wavelength fit! fitted_params, wave_and_resid = arm.read_lines_and_fit(wpars,lines_out,ydeg=3,xdeg=3) # Optionally show residuals? #Now write the output to a file, in whatever format suits the recipe system best. pyfits.writeto('outputs.fits',fitted_params)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 37811, 317, 4226, 7268, 262, 4096, 7811, 286, 262, 22236, 20049, 8434, 220, 198, 1818, 654, 37811, 198, 198, 6738, 11593, 37443, 834, 1330, 7297, 11, 3601, 62, 8818, 198, 6738...
2.917079
808
import unittest import datetime import kronos string_format_time = "%Y-%m-%d %H:%M:%S" date_time_str = "2020-07-19 18:14:21"
[ 11748, 555, 715, 395, 198, 11748, 4818, 8079, 198, 11748, 479, 1313, 418, 198, 198, 8841, 62, 18982, 62, 2435, 796, 36521, 56, 12, 4, 76, 12, 4, 67, 4064, 39, 25, 4, 44, 25, 4, 50, 1, 198, 4475, 62, 2435, 62, 2536, 796, 366, ...
2.189655
58
##!/usr/bin/env python3 # Mass Flow Controller Arduino driver # Copyright (C) 2015 Simon Howroyd, Jason James # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ############################################################################# # Import libraries from time import sleep #from quick2wire.i2c import I2CMaster, reading # Define class # External getter def getMoles(self, fun, ch): rate = self.get(fun,ch)*(7.0/6280.0) # TODO should be *125.718/134.82 (density H2 at 1.5bar) return rate
[ 2235, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 5674, 27782, 22741, 27634, 4639, 198, 198, 2, 15069, 357, 34, 8, 1853, 220, 11288, 1374, 3287, 67, 11, 8982, 3700, 198, 2, 220, 198, 2, 220, 220, 220, 220, 770, 1430...
3.164384
365
from asyncio import Future from greenlet import getcurrent import psycopg2 from psycopg2 import * # noqa from psycopg2 import extensions, OperationalError __version__ = psycopg2.__version__ def psycopg2_wait_callback(conn): """A wait callback to allow greenlet to work with Psycopg. The caller must be from a greenlet other than the main one. :param conn: psycopg2 connection or file number This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. """ while True: state = conn.poll() if state == extensions.POLL_OK: # Done with waiting break elif state == extensions.POLL_READ: _wait_fd(conn) elif state == extensions.POLL_WRITE: _wait_fd(conn, read=False) else: # pragma nocover raise OperationalError("Bad result from poll: %r" % state) # INTERNALS def _wait_fd(conn, read=True): '''Wait for an event on file descriptor ``fd``. :param conn: file descriptor :param read: wait for a read event if ``True``, otherwise a wait for write event. This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. ''' current = getcurrent() parent = current.parent assert parent, '"_wait_fd" must be called by greenlet with a parent' try: fileno = conn.fileno() except AttributeError: fileno = conn future = Future() # When the event on fd occurs switch back to the current greenlet if read: future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read) else: future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read) # switch back to parent greenlet parent.switch(future) # Back on the child greenlet. Raise error if there is one future.result() try: extensions.POLL_OK except AttributeError: # pragma nocover from pulsar import ImproperlyConfigured raise ImproperlyConfigured( 'Psycopg2 does not have support for asynchronous connections. ' 'You need at least version 2.2.0 of Psycopg2.') extensions.set_wait_callback(psycopg2_wait_callback)
[ 6738, 30351, 952, 1330, 10898, 198, 198, 6738, 4077, 1616, 1330, 651, 14421, 198, 11748, 17331, 22163, 70, 17, 198, 6738, 17331, 22163, 70, 17, 1330, 1635, 220, 1303, 645, 20402, 198, 6738, 17331, 22163, 70, 17, 1330, 18366, 11, 6564, ...
2.725
840
# Copyright 2011-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the replica_set_connection module.""" import copy import datetime import os import signal import socket import sys import time import thread import traceback import unittest sys.path[0:0] = [""] from nose.plugins.skip import SkipTest from bson.son import SON from bson.tz_util import utc from pymongo.connection import Connection from pymongo.read_preferences import ReadPreference from pymongo.replica_set_connection import ReplicaSetConnection from pymongo.replica_set_connection import _partition_node from pymongo.database import Database from pymongo.errors import (AutoReconnect, ConfigurationError, ConnectionFailure, InvalidName, OperationFailure) from test import version from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host host = os.environ.get("DB_IP", 'localhost') port = int(os.environ.get("DB_PORT", 27017)) pair = '%s:%d' % (host, port) if __name__ == "__main__": unittest.main()
[ 2, 15069, 2813, 12, 6999, 838, 5235, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, ...
2.963834
553
import argparse_helper as argparse import config_dir import sys from .editor import Editor if __name__ == '__main__': main("-f", "foo", "/tmp/x")
[ 11748, 1822, 29572, 62, 2978, 525, 355, 1822, 29572, 198, 11748, 4566, 62, 15908, 198, 11748, 25064, 198, 198, 6738, 764, 35352, 1330, 12058, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, ...
2.818182
55
import ez_setup ez_setup.use_setuptools() from setuptools import setup, find_packages import import_utils setup( name = "import-utils", version = import_utils.__version__, description = 'A module that supports simple programmatic module imports', packages = find_packages(), author = 'Evgeny.Fadeev', author_email = 'evgeny.fadeev@gmail.com', license = 'BSD', keywords = 'import, module', url = 'http://askbot.org', include_package_data = True, classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], long_description = import_utils.__doc__ )
[ 11748, 304, 89, 62, 40406, 198, 8471, 62, 40406, 13, 1904, 62, 2617, 37623, 10141, 3419, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 11748, 1330, 62, 26791, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, ...
2.715596
327
import numpy as np from visual_dynamics.policies import CameraTargetPolicy
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 5874, 62, 67, 4989, 873, 13, 79, 4160, 444, 1330, 20432, 21745, 36727, 628 ]
3.5
22
with open("inputday3.txt") as f: data = [x for x in f.read().split()] gamma = "" epsilon = "" for b in range(0, len(data[0])): one = 0 zero = 0 for c in range(0, len(data)): if data[c][b] == '0': zero += 1 else: one += 1 if zero > one: gamma += '0' epsilon += '1' else: gamma += '1' epsilon += '0' g = int(gamma, 2) e = int(epsilon, 2) print("PART 1", g * e) gamma = "" epsilon = "" data2 = data.copy() index = 0 while len(data) > 1: one = 0 zero = 0 ones = [] zeroes = [] for c in range(0, len(data)): if data[c][index] == "0": zero += 1 zeroes.append(data[c]) else: one += 1 ones.append(data[c]) if zero > one: data = zeroes else: data = ones index += 1 oxygen = int(data[0], 2) data = data2 index = 0 while len(data) > 1: one = 0 zero = 0 ones = [] zeroes = [] for c in range(0, len(data)): if data[c][index] == '0': zero += 1 zeroes.append(data[c]) else: one += 1 ones.append(data[c]) if one < zero: data = ones else: data = zeroes index += 1 co2 = int(data[0], 2) print("PART 2", oxygen * co2)
[ 4480, 1280, 7203, 15414, 820, 18, 13, 14116, 4943, 355, 277, 25, 201, 198, 220, 220, 220, 1366, 796, 685, 87, 329, 2124, 287, 277, 13, 961, 22446, 35312, 3419, 60, 201, 198, 201, 198, 28483, 2611, 796, 13538, 201, 198, 538, 18217, ...
1.749064
801
from __future__ import print_function from PIL import Image import os import os.path import numpy as np import sys from misc import AverageMeter from eval_accuracy import simple_accuracy if sys.version_info[0] == 2: import cPickle as pickle else: import pickle import torch.utils.data as data import torch from multiprocessing import Value def train_reorganized(trainloader, model, criterion, optimizer, epochs): # train the model model.train() top1 = AverageMeter() losses = AverageMeter() for epoch in range(epochs): for batch_idx, (inputs) in enumerate(trainloader): targets = torch.LongTensor(np.tile(np.arange(inputs.size(1)), inputs.size(0))) inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1)) inputs, targets = torch.autograd.Variable(inputs.cuda()), torch.autograd.Variable(targets.cuda()) outputs, _ = model(inputs) loss = criterion(outputs, targets) prec1 = simple_accuracy(outputs.data.cpu(), targets.data.cpu()) top1.update(prec1, inputs.size(0)) losses.update(loss.data.cpu(), inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % 10 == 0: print('Epoch: [{} | {}], batch: {}, loss: {}, Accuracy: {}'.format(epoch + 1, epochs, batch_idx + 1, losses.avg, top1.avg))
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 350, 4146, 1330, 7412, 198, 11748, 28686, 198, 11748, 28686, 13, 6978, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 25064, 198, 6738, 12747, 1330, 13475, 44, 2357, 198, 6738, ...
2.402576
621
import json import logging import os from typing import Optional from mir import scm from mir.tools import mir_storage
[ 11748, 33918, 198, 11748, 18931, 198, 11748, 28686, 198, 6738, 19720, 1330, 32233, 198, 198, 6738, 5720, 1330, 629, 76, 198, 6738, 5720, 13, 31391, 1330, 5720, 62, 35350, 628, 628, 628 ]
3.90625
32
import argparse import math import os import pickle from typing import List import cv2 import numpy as np import torch from PIL import Image, ImageDraw, ImageFont import configs.paths_config from configs import paths_config from training.networks import SynthesisBlock
[ 11748, 1822, 29572, 198, 11748, 10688, 198, 11748, 28686, 198, 11748, 2298, 293, 198, 6738, 19720, 1330, 7343, 198, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 350, 4146, 1330, 7412, 11, 74...
3.61039
77
import pandas as pd # UD 1.0 CONLL_COLUMNS = ['i', 'w', 'l', 'p', 'n', 'm', 'g', 'f', 'd', 'c'] # UD 2.0 CONLL_COLUMNS_V2 = ['i', 'w', 'l', 'x', 'p', 'm', 'g', 'f', 'e', 'o'] # possible morphological attributes MORPH_ATTS = ['type', 'animacy', #'gender', 'number' "Abbr", "Animacy", "Aspect", "Case", "Definite", "Degree", "Evident", "Foreign", "Gender", "Mood", "NumType", "Number", "Person", "Polarity", "Polite", "Poss", "PronType", "Reflex", "Tense", "VerbForm", "Voice", "Type"] def _make_sent_csv(sentstring, fname, meta, splitter, i, skip_meta=False): """ Take one CONLL-U sentence and add all metadata to each row Return: str (CSV data) and dict (sent level metadata) """ fixed_lines = [] raw_lines = sentstring.splitlines() for line in raw_lines: if not line: continue if line.startswith('#'): if not skip_meta: try: k, v = line.lstrip('# ').split(splitter, 1) except ValueError: k, v = line.lstrip('# ').split(splitter.strip(), 1) meta[k.lower().strip()] = v.strip() else: line = '%s\t%s\t%s' % (fname, i, line) fixed_lines.append(line) return '\n'.join(fixed_lines), meta def _add_governors_to_df(df): """ Add governor info to a DF. Increases memory usage quite a bit. """ # save the original index i = df.index.get_level_values('i') # add g dfg = df.set_index('g', append=True) # remove i dfg = dfg.reset_index('i') dfg = df.loc[dfg.index] dfg = dfg[['w', 'l', 'p', 'f']] dfg['i'] = i dfg = dfg.set_index('i', append=True) dfg.index.names = ['file', 's', 'g', 'i'] dfg = dfg.reset_index('g', drop=True) for c in list(dfg.columns): try: dfg[c] = dfg[c].cat.add_categories(['ROOT']) except (AttributeError, ValueError): pass dfg = dfg.fillna('ROOT') dfg.columns = ['gw', 'gl', 'gp', 'gf'] dfg = df.join(dfg, how="inner") return dfg def conll_df(path, corpus_name=False, corp_folder=False, v2="auto", skip_morph=False, skip_meta=False, add_gov=False, drop=['text', 'newdoc id'], file_index=True, categories=True, extra_fields='auto', drop_redundant=True, **kwargs): """ Optimised CONLL-U reader for v2.0 data Args: path (str): the file to prepare Returns: pd.DataFrame: 2d array representation of file data """ import os import re try: from io import StringIO except ImportError: from StringIO import StringIO splitter = ' = ' if v2 else '=' with open(path, 'r') as fo: data = fo.read().strip('\n') if v2 == 'auto': v2 = 'sent_id = ' in data[:9999] fname = os.path.basename(path) # metadata that applies filewide # a little bonus for those with annual data basedict = {} if not skip_meta: year = re.search(r'[12][0-9][0-9][0-9]', fname) if year: basedict['year'] = year.group(0) sents = data.split('\n\n') sents_meta = [_make_sent_csv(sstring, fname, dict(basedict), splitter, i, skip_meta=skip_meta) \ for i, sstring in enumerate(sents, start=1)] sents, metadata = zip(*sents_meta) # make the sent df sents = '\n\n'.join(sents) sents = StringIO(sents) if v2: cols = ['file', 's'] + CONLL_COLUMNS_V2 else: cols = ['file', 's'] + CONLL_COLUMNS df = pd.read_csv(sents, sep="\t", header=None, names=cols, quoting=kwargs.pop('quoting', 3), index_col=[0, 1, 2], engine='c', na_filter=False, **kwargs) if v2 and not skip_morph: df['m'] = df['m'].fillna('') df['o'] = df['o'].fillna('') if extra_fields == 'auto': # evil line to get all possible keys in the final column extra_fields = list(df['o'].str.extractall(r'(?:^|\|)([^=]+?)=')[0].unique()) cats = MORPH_ATTS + extra_fields if 'SpaceAfter' not in cats: cats.append('SpaceAfter') cats = list(set(cats)) om = df['o'].str.cat(df['m'], sep='|').str.strip('|_') # this is a very slow list comp, but i can't think of a better way to do it. # the 'extractall' solution makes columns for not just the value, but the key... extra = [om.str.extract('%s=([^|$]+)' % cat.title(), expand=True) for cat in cats] extra = pd.concat(extra, axis=1) extra.columns = cats df = pd.concat([df, extra], axis=1) # make and join the meta df if not skip_meta: metadata = {i: d for i, d in enumerate(metadata, start=1)} metadata = pd.DataFrame(metadata).T metadata.index.name = 's' df = metadata.join(df, how='inner') # we never want these to show up as a dataframe column badcols = ['sent_id', 's', 'i', 'file'] # if we aren't parsing morph and extra columns, we should at least keep them if not skip_morph: badcols += ['o', 'm'] if drop: badcols = badcols + drop df = df.drop(badcols, axis=1, errors='ignore') # some evil code to handle conll-u files where g col could be a string if 'g' in df.columns: df['g'] = df['g'].fillna(0) if df['g'].dtype in [object, str]: df['g'] = df['g'].str.replace('_', '0').astype(int) df['g'] = df['g'].astype(int) df = df.fillna('_') # attempt to categorise data if categories: for c in list(df.columns): if c in ['g', 'date']: continue try: df[c] = df[c].astype('category') except: pass if add_gov: df = _add_governors_to_df(df) if not file_index: df.index = df.index.droplevel('file') if drop_redundant: empty_cols = [] for c in df.columns: if len(df[c].unique()) == 1: empty_cols.append(c) df = df.drop(empty_cols, axis=1) #reorder columns so that important things are first firsts = CONLL_COLUMNS_V2 if v2 else CONLL_COLUMNS firsts = [i for i in firsts if i in list(df.columns)] lasts = [i for i in list(df.columns) if i not in firsts] df = df[firsts + lasts] return df
[ 11748, 19798, 292, 355, 279, 67, 198, 198, 2, 43700, 352, 13, 15, 198, 10943, 3069, 62, 25154, 5883, 8035, 796, 37250, 72, 3256, 705, 86, 3256, 705, 75, 3256, 705, 79, 3256, 705, 77, 3256, 705, 76, 3256, 705, 70, 3256, 705, 69, ...
1.947368
3,515
#!/bin/python3 import argparse import datetime import functools import logging import os import psycopg2 from dateutil.relativedelta import relativedelta from atpy.data.cache.lmdb_cache import * from atpy.data.cache.postgres_cache import BarsInPeriodProvider from atpy.data.cache.postgres_cache import request_adjustments from atpy.data.splits_dividends import adjust_df if __name__ == "__main__": logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="PostgreSQL to LMDB configuration") parser.add_argument('-lmdb_path', type=str, default=None, help="LMDB Path") parser.add_argument('-delta_back', type=int, default=8, help="Default number of years to look back") parser.add_argument('-adjust_splits', action='store_true', default=True, help="Adjust splits before saving") parser.add_argument('-adjust_dividends', action='store_true', default=False, help="Adjust dividends before saving") args = parser.parse_args() lmdb_path = args.lmdb_path if args.lmdb_path is not None else os.environ['ATPY_LMDB_PATH'] con = psycopg2.connect(os.environ['POSTGRESQL_CACHE']) adjustments = None if args.adjust_splits and args.adjust_dividends: adjustments = request_adjustments(conn=con, table_name='splits_dividends') elif args.adjust_splits: adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='split') elif args.adjust_dividends: adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='dividend') now = datetime.datetime.now() bgn_prd = datetime.datetime(now.year - args.delta_back, 1, 1) bgn_prd = bgn_prd + relativedelta(days=7 - bgn_prd.weekday()) cache_read = functools.partial(read_pickle, lmdb_path=lmdb_path) bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table='bars_60m', bgn_prd=bgn_prd, delta=relativedelta(days=7), overlap=relativedelta(microseconds=-1), cache=cache_read) for i, df in enumerate(bars_in_period): if cache_read(bars_in_period.current_cache_key()) is None: if adjustments is not None: adjust_df(df, adjustments) write(bars_in_period.current_cache_key(), df, lmdb_path) logging.info('Saving ' + bars_in_period.current_cache_key()) else: logging.info('Cache hit on ' + bars_in_period.current_cache_key())
[ 2, 48443, 8800, 14, 29412, 18, 198, 198, 11748, 1822, 29572, 198, 11748, 4818, 8079, 198, 11748, 1257, 310, 10141, 198, 11748, 18931, 198, 11748, 28686, 198, 198, 11748, 17331, 22163, 70, 17, 198, 6738, 3128, 22602, 13, 2411, 265, 1572,...
2.562757
972