content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# -*- coding: utf-8 -*- from __future__ import unicode_literals import itertools as it from django.db import models, migrations def convert_status(apps, schema_editor): ''' Migrate Visit.skipped and ScheduledPhoneCall.skipped -> status (pending,missed,deleted,attended) ''' Visit = apps.get_model("contacts","Visit") ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall") for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()): if obj.skipped is None: obj.status = 'pending' elif obj.skipped == False: obj.status = 'attended' elif obj.skipped == True: obj.status = 'missed' obj.save() def unconvert_status(apps, schema_editor): ''' Reverse function sets skipped based on status''' Visit = apps.get_model("contacts","Visit") ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall") for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()): if obj.status == 'pending': obj.skipped = None elif obj.status == 'attended': obj.skipped = False elif obj.status == 'missed': obj.skipped = True obj.save()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 11748, 340, 861, 10141, 355, 340, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 11, 15720, 602, 19...
2.435547
512
import os import sys from datetime import time import unittest sys.path.append( os.path.dirname( os.path.dirname(os.path.join("..", "..", "..", os.path.dirname("__file__"))) ) ) from core.controller import BaseTimeRangeController if __name__ == "__main__": unittest.main()
[ 11748, 28686, 198, 11748, 25064, 198, 6738, 4818, 8079, 1330, 640, 198, 11748, 555, 715, 395, 198, 198, 17597, 13, 6978, 13, 33295, 7, 198, 220, 220, 220, 28686, 13, 6978, 13, 15908, 3672, 7, 198, 220, 220, 220, 220, 220, 220, 220, ...
2.538462
117
from gtts import gTTS as ttos from pydub import AudioSegment import os
[ 6738, 308, 83, 912, 1330, 308, 51, 4694, 355, 256, 83, 418, 198, 6738, 279, 5173, 549, 1330, 13491, 41030, 434, 198, 11748, 28686, 198 ]
2.84
25
from __future__ import absolute_import from relaax.server.parameter_server import parameter_server_base from relaax.server.common import session from . import ddpg_model
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 823, 64, 897, 13, 15388, 13, 17143, 2357, 62, 15388, 1330, 11507, 62, 15388, 62, 8692, 198, 6738, 823, 64, 897, 13, 15388, 13, 11321, 1330, 6246, 198, 198, 6738, 764, 1330, ...
3.659574
47
# -*- coding: utf-8 -*- import json import os import pyminifier try: import io as StringIO except ImportError: import cStringIO as StringIO # lint:ok # Check to see if slimit or some other minification library is installed and # Set minify equal to slimit's minify function. try: import slimit js_minify = slimit.minify except ImportError as error: print(error) js_minify = slimit = None ############################################################################### def process_unittest(filename): """Process a VFS filename for Brython.""" print("Generating {}".format(filename)) nb = 0 nb_err = 0 _main_root = os.path.dirname(filename) _VFS = {} for _mydir in ("Lib",): for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)): if 'unittest' not in _root: continue if '__pycache__' in _root: continue for _file in _files: _ext = os.path.splitext(_file)[1] if _ext not in ('.py'): continue nb += 1 file_name = os.path.join(_root, _file) try: # python 3 with open(file_name, encoding="utf-8") as file_with_data: _data = file_with_data.read() except Exception as reason: # python 2 with open(file_name, "r") as file_with_data: _data = str(file_with_data.read()).decode("utf-8") if not len(_data): print("No data for {} ({}).".format(_file, type(_data))) if _ext.lower() == '.py' and _data: try: _data = pyminifier.remove_comments_and_docstrings( _data) _data = pyminifier.dedent(_data) except Exception as error: print(error) nb_err += 1 _vfs_filename = os.path.join( _root, _file).replace(_main_root, '') _vfs_filename = _vfs_filename.replace("\\", "/") mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.') mod_name, ext = os.path.splitext(mod_name) is_package = mod_name.endswith('__init__') if is_package: mod_name = mod_name[:-9] _VFS[mod_name] = [_data, 1] else: _VFS[mod_name] = [_data] print(("Adding %s %s" % (mod_name, _vfs_filename))) print('%s files, %s errors' % (nb, nb_err)) with open(filename, "w") as file_to_write_VFS: file_to_write_VFS.write('__BRYTHON__.libs = __BRYTHON__.libs || {};\n') file_to_write_VFS.write("__BRYTHON__.=libs['unittest']=%s;\n\n" % json.dumps(_VFS)) file_to_write_VFS.write(""" __BRYTHON__.import_from_unittest function(mod_name){ var stored = __BRYTHON__.libs['unittest'][mod_name] if(stored!==undefined){ var module_contents = stored[0] var is_package = stored[1] var path = 'py_unittest' var module = {name:mod_name,__class__:$B.$ModuleDict,is_package:is_package} if(is_package){var package=mod_name} else{ var elts = mod_name.split('.') elts.pop() var package = elts.join('.') } $B.modules[mod_name].$package = is_package $B.modules[mod_name].__package__ = package run_py(module,path,module_contents) return true } return null } // add this import function to brython by doing the following: // <body onload="brython({custom_import_funcs:[__BRYTHON__.import_from_unittest]})"> // this will allow us to import unittest modules. """) def process(filename, exclude_dirs=['unittest',]): """Process a VFS filename for Brython.""" print("Generating {}".format(filename)) nb = 0 nb_err = 0 _main_root = os.path.dirname(filename) _VFS = {} for _mydir in ("libs", "Lib"): for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)): #if _root.endswith('lib_migration'): _flag=False for _exclude in exclude_dirs: if _exclude in _root: #_root.endswith(_exclude): _flag=True continue if _flag: continue # skip these modules if '__pycache__' in _root: continue nb += 1 for _file in _files: _ext = os.path.splitext(_file)[1] if _ext not in ('.js', '.py'): continue nb += 1 with open(os.path.join(_root, _file), "r") as file_with_data: _data = file_with_data.read() if len(_data) == 0: print('no data for %s' % _file) _data = unicode('') print(_data, type(_data)) else: _data = _data.decode('utf-8') if _ext in '.js': if js_minify is not None: try: _data = js_minify(_data) except Exception as error: print(error) elif _ext == '.py' and len(_data) > 0: try: _data = pyminifier.remove_comments_and_docstrings(_data) _data = pyminifier.dedent(_data) except Exception as error: print(error) nb_err += 1 _vfs_filename = os.path.join(_root, _file).replace(_main_root, '') _vfs_filename = _vfs_filename.replace("\\", "/") if _vfs_filename.startswith('/libs/crypto_js/rollups/'): if _file not in ('md5.js', 'sha1.js', 'sha3.js', 'sha224.js', 'sha384.js', 'sha512.js'): continue mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.') mod_name, ext = os.path.splitext(mod_name) is_package = mod_name.endswith('__init__') if is_package: mod_name = mod_name[:-9] _VFS[mod_name] = [ext, _data, 1] else: _VFS[mod_name] = [ext, _data] print(("adding %s %s" % (mod_name, _vfs_filename))) print('%s files, %s errors' % (nb, nb_err)) with open(filename, "w") as file_to_write_VFS: file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\n') file_to_write_VFS.write('__BRYTHON__.VFS=%s;\n\n' % json.dumps(_VFS)) ############################################################################### if __name__ == '__main__': _main_root = os.path.join(os.getcwd(), '../src') process(os.path.join(_main_root, "py_VFS.js"))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 628, 198, 11748, 33918, 198, 11748, 28686, 198, 198, 11748, 12972, 1084, 7483, 198, 198, 28311, 25, 198, 220, 220, 220, 1330, 33245, 355, 10903, 9399, 198, 16341, 17267, 12331,...
1.867706
3,719
from rtlsdr import RtlSdr from contextlib import closing from matplotlib import pyplot as plt import numpy as np from scipy.signal import spectrogram, windows from scipy import signal from skimage.io import imsave, imread from datetime import datetime import json import os from tqdm import tqdm import time from queue import Queue import asyncio from pathlib import Path import warnings for cat in [RuntimeWarning, UserWarning, FutureWarning]: warnings.filterwarnings("ignore", category=cat) # y -- spectrogram, nf by nt array # dbf -- Dynamic range of the spectrum from sklearn.preprocessing import MinMaxScaler, StandardScaler #string_to_linspace('24M:28M:3M') if __name__ == "__main__": #split_images() #plot_one() scan(repeats=3, target_hpb=1500) split_images() #plot_one()
[ 6738, 374, 83, 7278, 7109, 1330, 371, 28781, 50, 7109, 198, 6738, 4732, 8019, 1330, 9605, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 12683, 282, 133...
3.018382
272
# import os TEST_DIR = os.path.abspath(os.path.dirname(__file__))
[ 2, 198, 11748, 28686, 198, 198, 51, 6465, 62, 34720, 796, 28686, 13, 6978, 13, 397, 2777, 776, 7, 418, 13, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 4008, 198 ]
2.16129
31
from __future__ import print_function import argparse import os import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torchvision import datasets, transforms from tqdm import tqdm from pyromancy import pyromq from pyromancy.losses import LossGroup, NegativeLogLikelihood from pyromancy.metrics import MetricGroup, Accuracy from pyromancy.subscribers import LogSubscriber # noinspection PyCallingNonCallable,PyCallingNonCallable def run_once(args, train_loader, test_loader): broker = pyromq.Broker() model = Net() if args.cuda: model.cuda() training_events = pyromq.TrainingEventPublisher(broker=broker) broker.add_subscriber(LogSubscriber(experiment_uid=args.experiment_name, log_file=os.path.join('logs', args.experiment_name), to_console=args.log_to_console)) opt = torch.optim.SGD(params=model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum) losses = LossGroup(optimizer=opt, grad_clip_norm=args.grad_clip_norm, name='losses', channel_name=pyromq.channels.METRIC_EVENTS, broker=broker) losses.add(NegativeLogLikelihood(name='nll', target_name='y_target', output_name='y_pred'), data_target='train') # Metrics metrics = MetricGroup(name='metrics', channel_name=pyromq.channels.METRIC_EVENTS, broker=broker) metrics.add(Accuracy(name='acc', target_name='y_target', output_name='y_pred'), data_target='*') metrics.add(NegativeLogLikelihood(name='nll', target_name='y_target', output_name='y_pred'), data_target='val') training_events.training_start() for _ in tqdm(range(args.epochs), total=args.epochs): training_events.epoch_start() model.train(True) for data, target in train_loader: # From the original example if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) # put the incoming batch data into a dictionary batch_dict = {'x_data': data, 'y_target': target} # Training Event training_events.batch_start() # Get model outputs predictions = {'y_pred': model(batch_dict['x_data'])} # Compute Metrics metrics.compute(in_dict=batch_dict, out_dict=predictions, data_type='train') # Compute Losses losses.compute(in_dict=batch_dict, out_dict=predictions, data_type='train') losses.step() # Training Event training_events.batch_end() model.train(False) for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) batch_dict = {'x_data': data, 'y_target': target} # Training Event training_events.batch_start() predictions = {'y_pred': model(batch_dict['x_data'])} metrics.compute(in_dict=batch_dict, out_dict=predictions, data_type='val') training_events.batch_end() training_events.epoch_end() if __name__ == "__main__": main()
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 6738, 28034, 13,...
1.971646
1,975
#!/usr/bin/env python import os import copy import json import numpy as np import selfdrive.messaging as messaging from selfdrive.locationd.calibration_helpers import Calibration from selfdrive.swaglog import cloudlog from common.params import Params from common.transformations.model import model_height from common.transformations.camera import view_frame_from_device_frame, get_view_frame_from_road_frame, \ eon_intrinsics, get_calib_from_vp, H, W MPH_TO_MS = 0.44704 MIN_SPEED_FILTER = 15 * MPH_TO_MS MAX_YAW_RATE_FILTER = np.radians(2) # per second INPUTS_NEEDED = 300 # allow to update VP every so many frames INPUTS_WANTED = 600 # We want a little bit more than we need for stability WRITE_CYCLES = 400 # write every 400 cycles VP_INIT = np.array([W/2., H/2.]) # These validity corners were chosen by looking at 1000 # and taking most extreme cases with some margin. VP_VALIDITY_CORNERS = np.array([[W//2 - 150, 280], [W//2 + 150, 540]]) DEBUG = os.getenv("DEBUG") is not None if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 28686, 198, 11748, 4866, 198, 11748, 33918, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2116, 19472, 13, 37348, 3039, 355, 19925, 198, 6738, 2116, 19472, 13, 24886, 67, 13, 99...
2.717172
396
import argparse import copy import logging import sys from dataclasses import dataclass from datetime import datetime, timedelta from slack_sdk import WebClient from typing import Dict, Optional, List import pytz from hunter import config from hunter.attributes import get_back_links from hunter.config import ConfigError, Config from hunter.data_selector import DataSelector from hunter.grafana import GrafanaError, Grafana, Annotation from hunter.graphite import GraphiteError from hunter.importer import DataImportError, Importers from hunter.report import Report from hunter.series import ( AnalysisOptions, ChangePointGroup, SeriesComparison, compare, AnalyzedSeries, ) from hunter.slack import SlackNotifier, NotificationError from hunter.test_config import TestConfigError, TestConfig, GraphiteTestConfig from hunter.util import parse_datetime, DateFormatError, interpolate def setup_data_selector_parser(parser: argparse.ArgumentParser): parser.add_argument( "--branch", metavar="STRING", dest="branch", help="name of the branch", nargs="?" ) parser.add_argument( "--metrics", metavar="LIST", dest="metrics", help="a comma-separated list of metrics to analyze", ) parser.add_argument( "--attrs", metavar="LIST", dest="attributes", help="a comma-separated list of attribute names associated with the runs " "(e.g. commit, branch, version); " "if not specified, it will be automatically filled based on available information", ) since_group = parser.add_mutually_exclusive_group() since_group.add_argument( "--since-commit", metavar="STRING", dest="since_commit", help="the commit at the start of the time span to analyze", ) since_group.add_argument( "--since-version", metavar="STRING", dest="since_version", help="the version at the start of the time span to analyze", ) since_group.add_argument( "--since", metavar="DATE", dest="since_time", help="the start of the time span to analyze; " "accepts ISO, and human-readable dates like '10 weeks ago'", ) until_group = parser.add_mutually_exclusive_group() until_group.add_argument( "--until-commit", metavar="STRING", dest="until_commit", help="the commit at the end of the time span to analyze", ) until_group.add_argument( "--until-version", metavar="STRING", dest="until_version", help="the version at the end of the time span to analyze", ) until_group.add_argument( "--until", metavar="DATE", dest="until_time", help="the end of the time span to analyze; same syntax as --since", ) parser.add_argument( "--last", type=int, metavar="COUNT", dest="last_n_points", help="the number of data points to take from the end of the series" ) def data_selector_from_args(args: argparse.Namespace) -> DataSelector: data_selector = DataSelector() if args.branch: data_selector.branch = args.branch if args.metrics is not None: data_selector.metrics = list(args.metrics.split(",")) if args.attributes is not None: data_selector.attributes = list(args.attributes.split(",")) if args.since_commit is not None: data_selector.since_commit = args.since_commit if args.since_version is not None: data_selector.since_version = args.since_version if args.since_time is not None: data_selector.since_time = parse_datetime(args.since_time) if args.until_commit is not None: data_selector.until_commit = args.until_commit if args.until_version is not None: data_selector.until_version = args.until_version if args.until_time is not None: data_selector.until_time = parse_datetime(args.until_time) if args.last_n_points is not None: data_selector.last_n_points = args.last_n_points return data_selector def setup_analysis_options_parser(parser: argparse.ArgumentParser): parser.add_argument( "-P, --p-value", dest="pvalue", type=float, default=0.001, help="maximum accepted P-value of a change-point; " "P denotes the probability that the change-point has " "been found by a random coincidence, rather than a real " "difference between the data distributions", ) parser.add_argument( "-M", "--magnitude", dest="magnitude", type=float, default=0.0, help="minimum accepted magnitude of a change-point " "computed as abs(new_mean / old_mean - 1.0); use it " "to filter out stupidly small changes like < 0.01", ) parser.add_argument( "--window", default=50, type=int, dest="window", help="the number of data points analyzed at once; " "the window size affects the discriminative " "power of the change point detection algorithm; " "large windows are less susceptible to noise; " "however, a very large window may cause dismissing short regressions " "as noise so it is best to keep it short enough to include not more " "than a few change points (optimally at most 1)", ) def analysis_options_from_args(args: argparse.Namespace) -> AnalysisOptions: conf = AnalysisOptions() if args.pvalue is not None: conf.max_pvalue = args.pvalue if args.magnitude is not None: conf.min_magnitude = args.magnitude if args.window is not None: conf.window_len = args.window return conf def main(): logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) parser = argparse.ArgumentParser(description="Hunts performance regressions in Fallout results") subparsers = parser.add_subparsers(dest="command") list_tests_parser = subparsers.add_parser("list-tests", help="list available tests") list_tests_parser.add_argument("group", help="name of the group of the tests", nargs="*") list_metrics_parser = subparsers.add_parser( "list-metrics", help="list available metrics for a test" ) list_metrics_parser.add_argument("test", help="name of the test") subparsers.add_parser("list-groups", help="list available groups of tests") analyze_parser = subparsers.add_parser( "analyze", help="analyze performance test results", formatter_class=argparse.RawTextHelpFormatter, ) analyze_parser.add_argument("tests", help="name of the test or group of the tests", nargs="+") analyze_parser.add_argument( "--update-grafana", help="Update Grafana dashboards with appropriate annotations of change points", action="store_true", ) analyze_parser.add_argument( "--notify-slack", help="Send notification containing a summary of change points to given Slack channels", nargs="+", ) analyze_parser.add_argument( "--cph-report-since", help="Sets a limit on the date range of the Change Point History reported to Slack. Same syntax as --since.", metavar="DATE", dest="cph_report_since", ) setup_data_selector_parser(analyze_parser) setup_analysis_options_parser(analyze_parser) regressions_parser = subparsers.add_parser("regressions", help="find performance regressions") regressions_parser.add_argument( "tests", help="name of the test or group of the tests", nargs="+" ) setup_data_selector_parser(regressions_parser) setup_analysis_options_parser(regressions_parser) remove_annotations_parser = subparsers.add_parser("remove-annotations") remove_annotations_parser.add_argument( "tests", help="name of the test or test group", nargs="*" ) remove_annotations_parser.add_argument( "--force", help="don't ask questions, just do it", dest="force", action="store_true" ) validate_parser = subparsers.add_parser("validate", help="validates the tests and metrics defined in the configuration") try: args = parser.parse_args() conf = config.load_config() hunter = Hunter(conf) if args.command == "list-groups": hunter.list_test_groups() if args.command == "list-tests": group_names = args.group if args.group else None hunter.list_tests(group_names) if args.command == "list-metrics": test = hunter.get_test(args.test) hunter.list_metrics(test) if args.command == "analyze": update_grafana_flag = args.update_grafana slack_notification_channels = args.notify_slack slack_cph_since = parse_datetime(args.cph_report_since) data_selector = data_selector_from_args(args) options = analysis_options_from_args(args) tests = hunter.get_tests(*args.tests) tests_analyzed_series = {test.name: None for test in tests} for test in tests: try: analyzed_series = hunter.analyze(test, selector=data_selector, options=options) if update_grafana_flag: if not isinstance(test, GraphiteTestConfig): raise GrafanaError(f"Not a Graphite test") hunter.update_grafana_annotations(test, analyzed_series) if slack_notification_channels: tests_analyzed_series[test.name] = analyzed_series except DataImportError as err: logging.error(err.message) except GrafanaError as err: logging.error( f"Failed to update grafana dashboards for {test.name}: {err.message}" ) if slack_notification_channels: hunter.notify_slack( tests_analyzed_series, selector=data_selector, channels=slack_notification_channels, since=slack_cph_since, ) if args.command == "regressions": data_selector = data_selector_from_args(args) options = analysis_options_from_args(args) tests = hunter.get_tests(*args.tests) regressing_test_count = 0 errors = 0 for test in tests: try: regressions = hunter.regressions( test, selector=data_selector, options=options ) if regressions: regressing_test_count += 1 except HunterError as err: logging.error(err.message) errors += 1 except DataImportError as err: logging.error(err.message) errors += 1 if regressing_test_count == 0: print("No regressions found!") elif regressing_test_count == 1: print("Regressions in 1 test found") else: print(f"Regressions in {regressing_test_count} tests found") if errors > 0: print(f"Some tests were skipped due to import / analyze errors. Consult error log.") if args.command == "remove-annotations": if args.tests: tests = hunter.get_tests(*args.tests) for test in tests: hunter.remove_grafana_annotations(test, args.force) else: hunter.remove_grafana_annotations(None, args.force) if args.command == "validate": hunter.validate() if args.command is None: parser.print_usage() except ConfigError as err: logging.error(err.message) exit(1) except TestConfigError as err: logging.error(err.message) exit(1) except GraphiteError as err: logging.error(err.message) exit(1) except GrafanaError as err: logging.error(err.message) exit(1) except DataImportError as err: logging.error(err.message) exit(1) except HunterError as err: logging.error(err.message) exit(1) except DateFormatError as err: logging.error(err.message) exit(1) except NotificationError as err: logging.error(err.message) exit(1) if __name__ == "__main__": main()
[ 11748, 1822, 29572, 198, 11748, 4866, 198, 11748, 18931, 198, 11748, 25064, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 30740, 62, 21282, 74, 1330, 5313, 11792...
2.334926
5,443
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. import os import sys import shutil # Check these extensions were installed. import sphinx_gallery.gen_gallery # The package should be installed in a virtual environment. import onnxruntime # The documentation requires two extensions available at: # https://github.com/xadupre/sphinx-docfx-yaml # https://github.com/xadupre/sphinx-docfx-markdown import sphinx_modern_theme # -- Project information ----------------------------------------------------- project = 'ONNX Runtime' copyright = '2018, Microsoft' author = 'Microsoft' version = onnxruntime.__version__ release = version # -- General configuration --------------------------------------------------- extensions = [ 'sphinx.ext.intersphinx', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', "sphinx.ext.autodoc", 'sphinx.ext.githubpages', "sphinx_gallery.gen_gallery", 'sphinx.ext.autodoc', "docfx_yaml.extension", "docfx_markdown", "pyquickhelper.sphinxext.sphinx_runpython_extension", ] templates_path = ['_templates'] source_parsers = { '.md': 'recommonmark.parser.CommonMarkParser', } source_suffix = ['.rst', '.md'] master_doc = 'intro' language = "en" exclude_patterns = [] pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- html_theme = "sphinx_modern_theme" html_theme_path = [sphinx_modern_theme.get_html_theme_path()] html_logo = "../MSFT-Onnx-Runtime-11282019-Logo.png" html_static_path = ['_static'] # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} # -- Options for Sphinx Gallery ---------------------------------------------- sphinx_gallery_conf = { 'examples_dirs': 'examples', 'gallery_dirs': 'auto_examples', } # -- markdown options ----------------------------------------------------------- md_image_dest = "media" md_link_replace = { '#onnxruntimesessionoptionsenable-profiling)': '#class-onnxruntimesessionoptions)', } # -- Setup actions -----------------------------------------------------------
[ 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 198, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 28373, 2393, 329, 262, 45368, 28413, 10...
3.155378
753
"""Traffic simulator code.""" import sys from os import path from traffic_sim.analysis import TrafficExperiment from traffic_sim.console import console if not __package__: _path = path.realpath(path.abspath(__file__)) sys.path.insert(0, path.dirname(path.dirname(_path))) def main(): """Run code from CLI.""" console.log('traffic sim') num_trials = 30 ex = TrafficExperiment( experiments=100, trials=num_trials, rows=10, cols=10, epochs=10, ) ex.run() ex.analyze() if __name__ == '__main__': main()
[ 37811, 15721, 2108, 35375, 2438, 526, 15931, 198, 198, 11748, 25064, 198, 6738, 28686, 1330, 3108, 198, 198, 6738, 4979, 62, 14323, 13, 20930, 1330, 23624, 20468, 3681, 198, 6738, 4979, 62, 14323, 13, 41947, 1330, 8624, 198, 198, 361, 4...
2.391837
245
#! /usr/bin/env python # -*- coding: UTF-8 -*- from __future__ import division,print_function,absolute_import,unicode_literals import sys import os os.chdir(sys.path[0]) sys.path.append('/mnt/sda2/github/TSF1KEV/TSFpy') from TSF_io import * #from TSF_Forth import * from TSF_shuffle import * from TSF_match import * from TSF_calc import * from TSF_time import * TSF_Forth_init(TSF_io_argvs(),[TSF_shuffle_Initwords,TSF_match_Initwords,TSF_calc_Initwords,TSF_time_Initwords]) TSF_Forth_setTSF("TSF_Tab-Separated-Forth:", "\t".join(["UTF-8","#TSF_encoding","200","#TSF_calcPR","N-Fibonacci:","#TSF_this","0","#TSF_fin."]), TSF_style="T") TSF_Forth_setTSF("N-Fibonacci:", "\t".join(["TSF_argvs:","#TSF_cloneargvs","TSF_argvs:","#TSF_lenthe","[0]Z[Fibcount:0]~[TSF_argvs:0]","#TSF_calcDC","Fibcount:","0","#TSF_pokethe","Fibonacci:","#TSF_this"]), TSF_style="T") TSF_Forth_setTSF("Fibonacci:", "\t".join(["[Fibcount:1]Z1~[Fibcount:1]","#TSF_calcDC","((2&(([0]+3)*[0]+2)^)/((2&(2*[0]+2)^)-(2&([0]+1)^)-1)\\1)#(2&([0]+1)^)","#TSF_calcDC","1","#TSF_echoN","[Fibcount:1]+1","#TSF_calcDC","Fibcount:","1","#TSF_pokethe","Fibjump:","[Fibcount:0]-([Fibcount:1]+1)o0~1","#TSF_calcDC","#TSF_peekthe","#TSF_this"]), TSF_style="T") TSF_Forth_setTSF("Fibcount:", "\t".join(["20","-1"]), TSF_style="T") TSF_Forth_setTSF("Fibjump:", "\t".join(["Fibonacci:","#exit"]), TSF_style="T") TSF_Forth_addfin(TSF_io_argvs()) TSF_Forth_argvsleftcut(TSF_io_argvs(),1) TSF_Forth_run()
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 7297, 11, 4798, 62, 8818, 11, 48546, 62, 11748, 11, 46903, 1098, 62, 17201, 874, 198, 1...
1.940492
773
#!/usr/bin/python # -*- coding: UTF-8 -*- import re import sys, getopt import glob import os if __name__ == "__main__": main(sys.argv[1:])
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 198, 11748, 302, 198, 11748, 25064, 11, 651, 8738, 198, 11748, 15095, 198, 11748, 28686, 628, 628, 628, 628, 628, 628, 628, 198, ...
2.304348
69
from pyatool import PYAToolkit # toolkit # adb PYAToolkit.bind_cmd(func_name='test_a', command='shell pm list package | grep google') # PYAToolkit.bind_func(real_func=test_b) # log PYAToolkit.switch_logger(True) # d = PYAToolkit('123456F') assert d.is_connected() # # d = PYAToolkit('123456F', mode='remote') # result = d.test_a() # # package:com.google.android.webview # result = d.test_b() # i am test_b, running on 123456F # `std` `standard_func` # d.std.get_current_activity(toolkit=d) # all_functions = d.current_function() print(all_functions) # # id d.hello_world() # installed_package = d.show_package() # current_activity_name = d.get_current_activity() # apkurlpathgithub d.install_from(url=r'https://github.com/williamfzc/simhand2/releases/download/v0.1.2/app-debug.apk') # d.install_from(path=r'/Users/admin/some_path/some_apk.apk') # target_package_name = 'com.github.williamfzc.simhand2' is_installed = d.is_installed(package_name=target_package_name) # d.clean_cache(target_package_name) if is_installed: d.uninstall(target_package_name) # ip local_address = d.get_ip_address() print(local_address) # wifi d.switch_wifi(False) # d.switch_airplane(True) d.switch_airplane(False) d.switch_wifi(True) # d.set_ime('com.sohu.inputmethod.sogouoem/.SogouIME') # push and pull d.push('./README.md', '/sdcard/') d.pull('/sdcard/README.md', './haha.md') # send keyevent d.input_key_event(26) d.input_key_event(26) # swipe d.swipe(500, 1200, 500, 200) # click d.click(200, 200)
[ 6738, 12972, 265, 970, 1330, 350, 56, 1404, 970, 15813, 628, 198, 2, 2891, 15813, 628, 198, 2, 512, 65, 198, 47, 56, 1404, 970, 15813, 13, 21653, 62, 28758, 7, 20786, 62, 3672, 11639, 9288, 62, 64, 3256, 3141, 11639, 29149, 9114, ...
2.292793
666
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division from six.moves import xrange, zip import tensorflow as tf from .tensor import Tensor
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 3601, 62, 8818, 11, 7297, 198, 6738, 2237, 13, 76, 5241, 133...
2.450549
91
""" Here we're going to code for the local rotations. We're doing an object oriented approach Left and right are in reference to the origin """ __version__ = 1.0 __author__ = 'Katie Kruzan' import string # just to get the alphabet easily iterable import sys # This just helps us in our printing from typing import Dict # This helps us in our documentation # Getting the structure for the classes we're putting together def standardCircle(num_verts: int) -> (Dict[str, Segment], Dict[str, Outer], Dict[str, Inner]): """ This will go through and initialize our standard starting circle :param num_verts: the number of outer nodes we will have :returns: tuple(segs, outs, inns) -segs - dictionary of str: Segment objects in the circle \\ -outs - dictionary of str: Outer objects in the circle \\ -inns - dictionary of str: Inner objects in the circle """ # Initializing our dictionaries segs = dict() outs = dict() inns = dict() # Running through the number of vertices we will be edning up with for i in range(num_verts): # start with an inner node - labeling with lowercase letters inn = Inner(string.ascii_letters[i]) # If we aren't on the first one, connect it to the previous one. if i != 0: inn.setLeftInner(inns[string.ascii_letters[i - 1]]) # If we've hit the end of the line, go ahead and close up the circle. if i == num_verts - 1: inn.setRightInner(inns[string.ascii_letters[0]]) # then make the outer out = Outer(str(i + 1)) # Go ahead and connect the inner we just made with this outer node out.setAdjInner(inn) # If we aren't on the first one, go ahead and connect it to the previous segment if i != 0: out.setLeftSegment(segs[str(-i)]) # Now time to make the segment seg = Segment(str(-i - 1)) # Go ahead and connect the outer node we just made with this segment seg.setLeftOuter(out) # If we're at the end of the circle, then we close it up. Otherwise, move on if i == num_verts - 1: seg.setRightOuter(outs[str(1)]) # add them to our dictionaries segs[seg.getName()] = seg outs[out.getName()] = out inns[inn.getName()] = inn # If we've made it here, then we've made the full circle and are ready to return it return segs, outs, inns def findTheFace(source_in: Inner) -> list: """ This will take an inner node and use the algorithm to walk the face that it is on. The order of the face will be i, o, s, o, i repeat :param source_in: Inner node object we are starting from. :return: face: a list representing the face. This list is of inner, outer, and segment objects in the order i, o, s, o, i, repeat. """ # initialize the list face = list() # starting the face with the source inner node. face.append(source_in) # initialize the ending inner node we will be using for comparison end_in = None # As long as we haven't looped back around, go through the following process. while source_in != end_in: # inner: find adjacent outer face.append(face[-1].getAdjOuter()) # outer: go to right seg face.append(face[-1].getRightSegment()) # segment: go to right outer face.append(face[-1].getRightOuter()) # outer: then adj inner face.append(face[-1].getAdjInner()) # then left inner and repeat. # set this inner node as our node to compare to our starting node. end_in = face[-1].getLeftInner() face.append(end_in) return face def faceCannonOrder(face: list) -> list: """ Just list the face with the face elements in order. We will do it with the first numerical face, and then go right before it for an order that will be consistent. :param face: a list representing the face. This list is of inner, outer, and segment objects in the order i, o, s, o, i, repeat. :return: ordered face in canonical order """ # find the first numerical face then go right before it # initialize face num as a relatively high number we won't encounter facenum = 333 # initialize the int for where we will split the list start_ind = 0 # loop through and find the face we want to find for i in range(len(face)): try: if int(face[i].getName()) < facenum: # To get here, we must have found a lower face # keep track of where this is located in the list start_ind = i - 1 # make our current lowest face the new lowest face to keep comparing to. facenum = int(face[i].getName()) # if we try casting a letter to a number, python will get upset, but that also means we're looking at # an inner node, which we don't want for this anyways. except ValueError: continue # make our ordered face getting from the starting index to the end, then wrapping around and getting the rest of # the face ord_face = face[start_ind:] + face[:start_ind] # go through and make sure we don't have any duplicate elements right by each other. If we do, then drop them. for i in range(len(ord_face) - 1): if ord_face[i].toString() == ord_face[i + 1].toString(): ord_face.pop(i) break # return the ordered face return ord_face def grabAllTheFaces(inns: Dict[str, Inner]) -> list: """ Function to get the list of unique faces for our circle. :param inns: dictionary of Inner objects. We will loop through these to get the faces :return: faces: List of distinct faces in canonical order. """ # initialize the list of faces faces = list() # a set of all the elements we have covered by the faces. Will use this for a completeness check covered = set() # run through every inner node we've been given for inn in inns: # Generate the face that inner node lies on face = findTheFace(inns[inn]) # put the face we've gotten in canonical order face = faceCannonOrder(face) # Check if we've already captured it. if face not in faces: # If not, then add it to our list of faces faces.append(face) # Go ahead and add the elements in this face to our covered set covered.update(face) # check we've gotten all the elements if len(covered) == (3 * len(inns)): print('We got em!!!') # Now return a list of all the faces we have. return faces def printCircleStatus(segs: Dict[str, Segment], outs: Dict[str, Outer], inns: Dict[str, Inner]): """ Helper function that prints the status of the circle to the console :param segs: dictionary of str: Segment objects in the circle :param outs: dictionary of str: Outer objects in the circle :param inns: dictionary of str: Inner objects in the circle :return: None """ # Run through the segments print('\nSegments:') for k in segs: print() print(k) print(segs[k].toString()) # Run through the Outer nodes print('\nOuters:') for k in outs: print() print(k) print(outs[k].toString()) # Run through the Inner nodes print('\nInners:') for k in inns: print() print(k) print(inns[k].toString()) if __name__ == '__main__': # This is where you change the variables. # must be a positive integer > 2 verts = 12 # Must be a string with spaces between each element. If you want to denote multiple cycles, you must add a | switch_txt = '2 3 4 5 | 12 7' # we're going to make a list of all the switches and all the cycles switches = list() # first, we get the cycles, split by '|' cycles = switch_txt.split('|') for c in cycles: # We're going to split the switch into a list split by the whitespace s = c.strip().split() # Then we're going to append the switches in the cycle to the new list switches.append(s) # Go ahead and make the standard circle given the number of vertices we want to use. segments, outers, inners = standardCircle(verts) # Go through and grab the faces for our standard circle facs = grabAllTheFaces(inners) print('\nPrinting the faces') for f in facs: print() for p in f: sys.stdout.write(p.getName() + ' ') # Go through and do the switches for each cycle for switch in switches: for num in range(len(switch)): # store the current part of the switch we're working on cs = switch[num] # store the next part of the switch we're working on, looping to the beginning if we're at the end ns = switch[(num + 1) % len(switch)] # Do the actual switch # Getting the new inner and outer validly switched up inners[string.ascii_letters[int(cs) - 1]].setAdjOuter(outers[ns]) outers[ns].setAdjInner(inners[string.ascii_letters[int(cs) - 1]]) # print how the final rotation sits printCircleStatus(segments, outers, inners) # Go through and generate and print the new faces new_facs = grabAllTheFaces(inners) print('\nPrinting the new faces') for f in new_facs: print() for p in f: sys.stdout.write(p.getName() + ' ')
[ 37811, 198, 4342, 356, 821, 1016, 284, 2438, 329, 262, 1957, 5724, 602, 13, 775, 821, 1804, 281, 2134, 25921, 3164, 198, 18819, 290, 826, 389, 287, 4941, 284, 262, 8159, 198, 37811, 198, 198, 834, 9641, 834, 796, 352, 13, 15, 198, ...
2.638797
3,624
import random import argparse import numpy as np import pandas as pd import os import time import string import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from tqdm import tqdm from model import WideResnet from cifar import get_train_loader, get_val_loader from label_guessor import LabelGuessor from lr_scheduler import WarmupCosineLrScheduler from ema import EMA import utils ## args parser = argparse.ArgumentParser(description=' FixMatch Training') parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet') parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet') parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset') parser.add_argument('--n-labeled', type=int, default=10, help='number of labeled samples for training') parser.add_argument('--n-epochs', type=int, default=256, help='number of training epochs') parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled samples') parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled samples') parser.add_argument('--mu-c', type=int, default=1, help='factor of train batch size of contrastive learing samples') parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold') parser.add_argument('--n-imgs-per-epoch', type=int, default=50000, help='number of training images for each epoch') parser.add_argument('--lam-x', type=float, default=1., help='coefficient of labeled loss') parser.add_argument('--lam-u', type=float, default=1., help='coefficient of unlabeled loss') parser.add_argument('--lam-clr', type=float, default=1., help='coefficient of contrastive loss') parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module') parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training') parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay') parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer') parser.add_argument('--seed', type=int, default=-1, help='seed for random behaviors, no seed if negtive') parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector') parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax') parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label') parser.add_argument('--test', default=0, type=int, help='0 is softmax test function, 1 is similarity test function') parser.add_argument('--bootstrap', type=int, default=16, help='Bootstrapping factor (default=16)') parser.add_argument('--boot-schedule', type=int, default=1, help='Bootstrapping schedule (default=1)') parser.add_argument('--balance', type=int, default=0, help='Balance class methods to use (default=0 None)') parser.add_argument('--delT', type=float, default=0.2, help='Class balance threshold delta (default=0.2)') args = parser.parse_args() print(args) # save results save_name_pre = '{}_E{}_B{}_LX{}_LU{}_LCLR{}_THR{}_LR{}_WD{}'.format(args.n_labeled, args.n_epochs, args.batchsize, args.lam_x, args.lam_u, args.lam_clr, args.thr, args.lr, args.weight_decay) ticks = time.time() result_dir = 'results/' + save_name_pre + '.' + str(ticks) if not os.path.exists(result_dir): os.mkdir(result_dir) if __name__ == '__main__': train()
[ 11748, 4738, 198, 11748, 1822, 29572, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 28686, 198, 11748, 640, 198, 11748, 4731, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 19...
3.048443
1,156
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv("data.csv") data.info() """ Data columns (total 33 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 569 non-null int64 . . . 32 Unnamed: 32 0 non-null float64 """ data.drop(["Unnamed: 32", "id"], axis = 1, inplace = True) # data.head(10) data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis] y = data.diagnosis.values x_data = data.drop(["diagnosis"], axis = 1) # %% Normalization x_normalized = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values x_data.head() """ x_data.head() Out[9]: radius_mean texture_mean ... symmetry_worst fractal_dimension_worst 0 17.99 10.38 ... 0.4601 0.11890 1 20.57 17.77 ... 0.2750 0.08902 2 19.69 21.25 ... 0.3613 0.08758 3 11.42 20.38 ... 0.6638 0.17300 4 20.29 14.34 ... 0.2364 0.07678 """ x_normalized.head() """ x_normalized.head() Out[10]: radius_mean texture_mean ... symmetry_worst fractal_dimension_worst 0 0.521037 0.022658 ... 0.598462 0.418864 1 0.643144 0.272574 ... 0.233590 0.222878 2 0.601496 0.390260 ... 0.403706 0.213433 3 0.210090 0.360839 ... 1.000000 0.773711 4 0.629893 0.156578 ... 0.157500 0.142595 """ # %% train test split from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x_normalized,y,test_size = 0.25, random_state = 42) # test size & random state can be changed, test size can be choosen as 0.2 or 0.18 # sklearn randomly splits, with given state data will be splitted with same random pattern. # rows as features x_train = x_train.T x_test = x_test.T y_train = y_train.T y_test = y_test.T # %% Parameter Initialize """ If all the weights were initialized to zero, backpropagation will not work as expected because the gradient for the intermediate neurons and starting neurons will die out(become zero) and will not update ever. """ # %% # Updating(learning) parameters # prediction #implementing logistic regression # %% Hyperparameter tuning logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 3, num_iterations = 1500) """ Cost after iteration 0: 0.693035 Cost after iteration 100: 0.153169 Cost after iteration 200: 0.121662 Cost after iteration 300: 0.107146 Cost after iteration 400: 0.098404 Cost after iteration 500: 0.092401 Cost after iteration 600: 0.087937 Cost after iteration 700: 0.084435 Cost after iteration 800: 0.081582 Cost after iteration 900: 0.079191 Cost after iteration 1000: 0.077143 Cost after iteration 1100: 0.075359 Cost after iteration 1200: 0.073784 Cost after iteration 1300: 0.072378 Cost after iteration 1400: 0.071111 No handles with labels found to put in legend. test accuracy: 98.6013986013986 % train accuracy: 98.35680751173709 % """ logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 1500) """ Cost after iteration 0: 0.693035 Cost after iteration 100: 0.226383 Cost after iteration 200: 0.176670 Cost after iteration 300: 0.153585 Cost after iteration 400: 0.139306 Cost after iteration 500: 0.129319 Cost after iteration 600: 0.121835 Cost after iteration 700: 0.115963 Cost after iteration 800: 0.111204 Cost after iteration 900: 0.107248 No handles with labels found to put in legend. Cost after iteration 1000: 0.103893 Cost after iteration 1100: 0.101001 Cost after iteration 1200: 0.098474 Cost after iteration 1300: 0.096240 Cost after iteration 1400: 0.094247 test accuracy: 97.9020979020979 % train accuracy: 98.12206572769954 % """ logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 0.3, num_iterations = 1500) """ Cost after iteration 0: 0.693035 Cost after iteration 100: 0.357455 Cost after iteration 200: 0.274917 Cost after iteration 300: 0.235865 Cost after iteration 400: 0.212165 Cost after iteration 500: 0.195780 Cost after iteration 600: 0.183524 Cost after iteration 700: 0.173868 Cost after iteration 800: 0.165980 Cost after iteration 900: 0.159363 Cost after iteration 1000: 0.153700 Cost after iteration 1100: 0.148775 Cost after iteration 1200: 0.144439 Cost after iteration 1300: 0.140581 Cost after iteration 1400: 0.137119 No handles with labels found to put in legend. test accuracy: 97.9020979020979 % train accuracy: 96.94835680751174 % """ # %% Sklearn from sklearn.linear_model import LogisticRegression x_train = x_train.T x_test = x_test.T y_train = y_train.T y_test = y_test.T logreg = LogisticRegression(random_state = 42,max_iter= 1500) print("test accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_test, y_test))) print("train accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_train, y_train))) """ test accuracy: 0.986013986013986 train accuracy: 0.9671361502347418 """ # %%
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 384, 397, 1211, 355, 3013, 82, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 198, 7890, 796, 279, 67, 13, 961, 62, 40664, 7203, 78...
2.447393
2,148
import os import numpy as np import torch import argparse from hparams import create_hparams from model import lcm from train import load_model from torch.utils.data import DataLoader from reader import TextMelIDLoader, TextMelIDCollate, id2sp from inference_utils import plot_data parser = argparse.ArgumentParser() parser.add_argument('-c', '--checkpoint_path', type=str, help='directory to save checkpoints') parser.add_argument('--hparams', type=str, required=False, help='comma separated name=value pairs') args = parser.parse_args() checkpoint_path=args.checkpoint_path hparams = create_hparams(args.hparams) model = load_model(hparams) model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False) _ = model.eval() print('Generating embedding of %s ...'%hparams.speaker_A) gen_embedding(hparams.speaker_A) print('Generating embedding of %s ...'%hparams.speaker_B) gen_embedding(hparams.speaker_B)
[ 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 1822, 29572, 198, 198, 6738, 289, 37266, 1330, 2251, 62, 71, 37266, 198, 6738, 2746, 1330, 300, 11215, 198, 6738, 4512, 1330, 3440, 62, 19849, 198, 6738, 2...
2.802292
349
# Generated by Django 3.1.2 on 2020-11-29 13:25 import dalme_app.models._templates from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django_currentuser.middleware import uuid import wagtail.search.index
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 17, 319, 12131, 12, 1157, 12, 1959, 1511, 25, 1495, 198, 198, 11748, 288, 282, 1326, 62, 1324, 13, 27530, 13557, 11498, 17041, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 426...
3.091954
87
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 628 ]
3.888889
9
# recursive_bt_maze.py # # Author: Jens Gansloser # Created On: 16 Feb 2019 import os import random import numpy as np
[ 2, 45115, 62, 18347, 62, 76, 6201, 13, 9078, 198, 2, 198, 2, 6434, 25, 449, 641, 402, 504, 75, 13416, 198, 2, 15622, 1550, 25, 1467, 3158, 13130, 198, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 299, 32152, 355, 45941, 628 ]
2.813953
43
import re import math def read_file(path): #path = input("Please enter the path of the KV File:") #path = "C:\\Steam\\steamapps\\common\\dota 2 beta\\game\\dota_addons\\heataria\\scripts\\npc\\abilities\\heataria_blaze_path.txt" try: file = open(path, "r") text = file.read() except FileNotFoundError: text = read_file() finally: master = KVPart("master") master.set_master(True) progress_text(text, master) return master #processes a KV textfile into a KV_Part structure
[ 198, 11748, 302, 198, 11748, 10688, 198, 197, 197, 198, 198, 4299, 1100, 62, 7753, 7, 6978, 2599, 198, 197, 2, 6978, 796, 5128, 7203, 5492, 3802, 262, 3108, 286, 262, 509, 53, 9220, 25, 4943, 198, 197, 2, 6978, 796, 366, 34, 25, ...
2.643617
188
#!/usr/bin/python # # This source file is part of appleseed. # Visit http://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2014-2016 Francois Beaune, The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import print_function import argparse import os import shutil #-------------------------------------------------------------------------------------------------- # Utility functions. #-------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------- # Update reference images in a given test suite directory. #-------------------------------------------------------------------------------------------------- def update_ref_images(parent_dir): renders_dir = os.path.join(parent_dir, "renders") ref_dir = os.path.join(parent_dir, "ref") safe_mkdir(ref_dir) for filename in os.listdir(renders_dir): if os.path.splitext(filename)[1] == ".png": src_path = os.path.join(renders_dir, filename) dst_path = os.path.join(ref_dir, filename) print(" copying {0} to {1}...".format(src_path, dst_path)) shutil.copyfile(src_path, dst_path) #-------------------------------------------------------------------------------------------------- # Entry point. #-------------------------------------------------------------------------------------------------- if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 198, 2, 770, 2723, 2393, 318, 636, 286, 22514, 2308, 13, 198, 2, 16440, 2638, 1378, 1324, 829, 2308, 71, 80, 13, 3262, 14, 329, 3224, 1321, 290, 4133, 13, 198, 2, 198, 2, 770, ...
3.913819
673
from typing import List, Any import time from discord import Embed, Reaction from utils import uniquify # EMOJIS regional_indicator_A to regional_indicator_T reaction_emojies = ['\U0001F1E6', '\U0001F1E7', '\U0001F1E8', '\U0001F1E9', '\U0001F1EA', '\U0001F1EB', '\U0001F1EC', '\U0001F1ED', '\U0001F1EE', '\U0001F1EF', '\U0001F1F0', '\U0001F1F1', '\U0001F1F2', '\U0001F1F3', '\U0001F1F4', '\U0001F1F5', '\U0001F1F6', '\U0001F1F7', '\U0001F1F8', '\U0001F1F9'] number_emojies = {'rq_plus_one': 1, 'rq_plus_two': 2, 'rq_plus_three': 3, 'rq_plus_four': 4}
[ 6738, 19720, 1330, 7343, 11, 4377, 198, 11748, 640, 198, 6738, 36446, 1330, 13302, 276, 11, 39912, 198, 6738, 3384, 4487, 1330, 555, 1557, 1958, 198, 198, 2, 412, 11770, 41, 1797, 7915, 62, 521, 26407, 62, 32, 284, 7915, 62, 521, 26...
1.420973
658
import email.utils as em import re if __name__ == '__main__': obj = Main()
[ 11748, 3053, 13, 26791, 355, 795, 198, 11748, 302, 198, 220, 220, 220, 220, 220, 220, 220, 220, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 26181, 796, 8774, 3419, 198 ]
2.315789
38
# -*- coding: utf-8 -*- import tensorflow as tf import random import math import os from config import FLAGS from model import Seq2Seq from dialog import Dialog if __name__ == "__main__": tf.app.run()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 4738, 198, 11748, 10688, 198, 11748, 28686, 198, 198, 6738, 4566, 1330, 9977, 4760, 50, 198, 6738, 2746, 1330, 1001,...
2.74026
77
# Author: Alexander Fabisch <Alexander.Fabisch@dfki.de> import numpy as np from bolero.representation import BlackBoxBehavior from bolero.representation import DMPBehavior as DMPBehaviorImpl
[ 2, 6434, 25, 10009, 376, 8102, 354, 1279, 38708, 13, 37, 8102, 354, 31, 7568, 4106, 13, 2934, 29, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 11572, 3529, 13, 15603, 341, 1330, 2619, 14253, 25267, 15759, 198, 6738, 11572, 3529...
3.344828
58
import logging.config import tornado from bitstampws import Client as Websocket import lib.configs.logging from lib.subscribers import SimpleLoggerSubscriber logging.config.dictConfig(lib.configs.logging.d) if __name__ == '__main__': with Websocket() as client: with SimpleLoggerSubscriber(client): client.connect() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: client.close()
[ 11748, 18931, 13, 11250, 198, 11748, 33718, 198, 6738, 1643, 301, 696, 18504, 1330, 20985, 355, 47736, 5459, 198, 198, 11748, 9195, 13, 11250, 82, 13, 6404, 2667, 198, 6738, 9195, 13, 7266, 40075, 364, 1330, 17427, 11187, 1362, 7004, 14...
2.445
200
import random from pprint import pformat from copy import deepcopy from utils.logger import GP_Logger from terminal_set import TerminalSet
[ 11748, 4738, 198, 6738, 279, 4798, 1330, 279, 18982, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 3384, 4487, 13, 6404, 1362, 1330, 14714, 62, 11187, 1362, 198, 6738, 12094, 62, 2617, 1330, 24523, 7248, 198 ]
3.861111
36
# -*- coding: utf-8 -*- """ packaging package. """ from pyrin.packaging.base import Package
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 8002, 3039, 5301, 13, 198, 37811, 198, 198, 6738, 279, 2417, 259, 13, 8002, 3039, 13, 8692, 1330, 15717, 628 ]
2.611111
36
#coding:utf-8 ''' filename:heapq_merge.py chap:7 subject:4-2 conditions:heapq.merge,sorted_list:lst1,lst2 lst3=merged_list(lst1,lst2) is sorted solution:heapq.merge ''' import heapq lst1 = [1,3,5,7,9] lst2 = [2,4,6,8] if __name__ == '__main__': lst3 = heapq.merge(lst1,lst2) print('lst3',lst3) print(list(lst3))
[ 2, 66, 7656, 25, 40477, 12, 23, 198, 198, 7061, 6, 198, 220, 220, 220, 29472, 25, 258, 499, 80, 62, 647, 469, 13, 9078, 198, 220, 220, 220, 220, 220, 220, 220, 28022, 25, 22, 198, 220, 220, 220, 2426, 25, 19, 12, 17, 198, 22...
1.719626
214
"""Generated client library for serviceuser version v1.""" # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.py import base_api from googlecloudsdk.third_party.apis.serviceuser.v1 import serviceuser_v1_messages as messages
[ 37811, 8645, 515, 5456, 5888, 329, 2139, 7220, 2196, 410, 16, 526, 15931, 198, 2, 24550, 25, 770, 2393, 318, 1960, 519, 877, 515, 290, 815, 407, 307, 13012, 416, 1021, 13, 198, 6738, 2471, 270, 10141, 13, 8692, 13, 9078, 1330, 2779,...
3.447368
76
#!/usr/bin/env python3 # adapted from wav2letter/src/feature/test/MfccTest.cpp import itertools as it import os import sys from wav2letter.feature import FeatureParams, Mfcc if __name__ == "__main__": if len(sys.argv) != 2: print(f"usage: {sys.argv[0]} feature_test_data_path", file=sys.stderr) print(" (usually: <wav2letter_root>/src/feature/test/data)", file=sys.stderr) sys.exit(1) data_path = sys.argv[1] wavinput = load_data("sa1.dat") # golden features to compare htkfeatures = load_data("sa1-mfcc.htk") assert len(wavinput) > 0 assert len(htkfeatures) > 0 params = FeatureParams() # define parameters of the featurization params.sampling_freq = 16000 params.low_freq_filterbank = 0 params.high_freq_filterbank = 8000 params.num_filterbank_chans = 20 params.num_cepstral_coeffs = 13 params.use_energy = False params.zero_mean_frame = False params.use_power = False # apply MFCC featurization mfcc = Mfcc(params) features = mfcc.apply(wavinput) # check that obtained features are the same as golden one assert len(features) == len(htkfeatures) assert len(features) % 39 == 0 numframes = len(features) // 39 featurescopy = features.copy() for f in range(numframes): for i in range(1, 39): features[f * 39 + i - 1] = features[f * 39 + i] features[f * 39 + 12] = featurescopy[f * 39 + 0] features[f * 39 + 25] = featurescopy[f * 39 + 13] features[f * 39 + 38] = featurescopy[f * 39 + 26] differences = [abs(x[0] - x[1]) for x in zip(features, htkfeatures)] print(f"max_diff={max(differences)}") print(f"avg_diff={sum(differences)/len(differences)}")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 16573, 422, 266, 615, 17, 9291, 14, 10677, 14, 30053, 14, 9288, 14, 44, 69, 535, 14402, 13, 20322, 198, 198, 11748, 340, 861, 10141, 355, 340, 198, 11748, 28686, 198, 1...
2.430556
720
from tkinter import * import onetimepad if __name__ == "__main__": root=Tk() Message_Decrypt(root) root.mainloop()
[ 6738, 256, 74, 3849, 1330, 1635, 198, 11748, 319, 8079, 15636, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 6808, 28, 51, 74, 3419, 198, 220, 220, 220, 16000, 62, 10707, 6012, 7, 15763, 8, ...
2.433962
53
#!/usr/bin/env python3 from urdf2optcontrol import optimizer from matplotlib import pyplot as plt import pathlib # URDF options urdf_path = pathlib.Path(__file__).parent.joinpath('urdf', 'rrbot.urdf').absolute() root = "link1" end = "link3" in_cond = [0] * 4 my_constraints = [my_constraint1, my_constraint2] my_final_constraints = [my_final_constraint1, my_final_constraint2] time_horizon = 2.0 steps = 40 # Load the urdf and calculate the differential equations optimizer.load_robot(urdf_path, root, end) # Loading the problem conditions optimizer.load_problem( my_cost_func, steps, in_cond, time_horizon=time_horizon, constraints=my_constraints, final_constraints=my_final_constraints, max_iter=500 ) # Solving the non linear problem res = optimizer.solve() print('u = ', res['u'][0]) print('q = ', res['q'][0]) # Print the results! fig = optimizer.plot_result(show=True)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 6738, 220, 2799, 69, 17, 8738, 13716, 1330, 6436, 7509, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 220, 198, 11748, 3108, 8019, 198, 198, 2, 37902, 8068, 3689,...
2.567039
358
""" Pooled PostgreSQL database backend for Django. Requires psycopg 2: http://initd.org/projects/psycopg2 """ from django import get_version as get_django_version from django.db.backends.postgresql_psycopg2.base import \ DatabaseWrapper as OriginalDatabaseWrapper from django.db.backends.signals import connection_created from threading import Lock import logging import sys try: import psycopg2 as Database import psycopg2.extensions except ImportError, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e) logger = logging.getLogger(__name__) ''' This holds our connection pool instances (for each alias in settings.DATABASES that uses our PooledDatabaseWrapper.) ''' connection_pools = {} connection_pools_lock = Lock() pool_config_defaults = { 'MIN_CONNS': None, 'MAX_CONNS': 1, 'TEST_ON_BORROW': False, 'TEST_ON_BORROW_QUERY': 'SELECT 1' } def _set_up_pool_config(self): ''' Helper to configure pool options during DatabaseWrapper initialization. ''' self._max_conns = self.settings_dict['OPTIONS'].get('MAX_CONNS', pool_config_defaults['MAX_CONNS']) self._min_conns = self.settings_dict['OPTIONS'].get('MIN_CONNS', self._max_conns) self._test_on_borrow = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW', pool_config_defaults['TEST_ON_BORROW']) if self._test_on_borrow: self._test_on_borrow_query = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW_QUERY', pool_config_defaults['TEST_ON_BORROW_QUERY']) else: self._test_on_borrow_query = None def _create_connection_pool(self, conn_params): ''' Helper to initialize the connection pool. ''' connection_pools_lock.acquire() try: # One more read to prevent a read/write race condition (We do this # here to avoid the overhead of locking each time we get a connection.) if (self.alias not in connection_pools or connection_pools[self.alias]['settings'] != self.settings_dict): logger.info("Creating connection pool for db alias %s" % self.alias) logger.info(" using MIN_CONNS = %s, MAX_CONNS = %s, TEST_ON_BORROW = %s" % (self._min_conns, self._max_conns, self._test_on_borrow)) from psycopg2 import pool connection_pools[self.alias] = { 'pool': pool.ThreadedConnectionPool(self._min_conns, self._max_conns, **conn_params), 'settings': dict(self.settings_dict), } finally: connection_pools_lock.release() ''' Simple Postgres pooled connection that uses psycopg2's built-in ThreadedConnectionPool implementation. In Django, use this by specifying MAX_CONNS and (optionally) MIN_CONNS in the OPTIONS dictionary for the given db entry in settings.DATABASES. MAX_CONNS should be equal to the maximum number of threads your app server is configured for. For example, if you are running Gunicorn or Apache/mod_wsgi (in a multiple *process* configuration) MAX_CONNS should be set to 1, since you'll have a dedicated python interpreter per process/worker. If you're running Apache/mod_wsgi in a multiple *thread* configuration set MAX_CONNS to the number of threads you have configured for each process. By default MIN_CONNS will be set to MAX_CONNS, which prevents connections from being closed. If your load is spikey and you want to recycle connections, set MIN_CONNS to something lower than MAX_CONNS. I suggest it should be no lower than your 95th percentile concurrency for your app server. If you wish to validate connections on each check out, specify TEST_ON_BORROW (set to True) in the OPTIONS dictionary for the given db entry. You can also provide an optional TEST_ON_BORROW_QUERY, which is "SELECT 1" by default. ''' ''' Choose a version of the DatabaseWrapper class to use based on the Django version. This is a bit hacky, what's a more elegant way? ''' django_version = get_django_version() if django_version.startswith('1.3'): from django.db.backends.postgresql_psycopg2.base import CursorWrapper elif django_version.startswith('1.4') or django_version.startswith('1.5'): from django.conf import settings from django.db.backends.postgresql_psycopg2.base import utc_tzinfo_factory, \ CursorWrapper # The force_str call around the password seems to be the only change from # 1.4 to 1.5, so we'll use the same DatabaseWrapper class and make # force_str a no-op. try: from django.utils.encoding import force_str except ImportError: force_str = lambda x: x elif django_version.startswith('1.6'): else: raise ImportError("Unsupported Django version %s" % django_version)
[ 37811, 198, 27201, 276, 2947, 47701, 6831, 30203, 329, 37770, 13, 198, 198, 39618, 17331, 22163, 70, 362, 25, 2638, 1378, 15003, 67, 13, 2398, 14, 42068, 14, 13764, 22163, 70, 17, 198, 37811, 198, 6738, 42625, 14208, 1330, 651, 62, 96...
2.496559
2,034
# Skeleton from fastapi_skeleton.core import messages
[ 2, 19460, 10565, 198, 6738, 3049, 15042, 62, 82, 38800, 13, 7295, 1330, 6218, 628, 198 ]
3.5
16
# Copyright 2016 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil TREX_OPT = '/opt/trex' TREX_UNUSED = [ '_t-rex-64-debug', '_t-rex-64-debug-o', 'bp-sim-64', 'bp-sim-64-debug', 't-rex-64-debug', 't-rex-64-debug-o', 'automation/__init__.py', 'automation/graph_template.html', 'automation/config', 'automation/h_avc.py', 'automation/phantom', 'automation/readme.txt', 'automation/regression', 'automation/report_template.html', 'automation/sshpass.exp', 'automation/trex_perf.py', 'wkhtmltopdf-amd64' ] def remove_unused_libs(path, files): """ Remove files not used by traffic generator. """ for f in files: f = os.path.join(path, f) try: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) except OSError: print "Skipped file:" print f continue def get_dir_size(start_path='.'): """ Computes size of directory. :return: size of directory with subdirectiories """ total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for f in filenames: try: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) except OSError: continue return total_size if __name__ == "__main__": versions = os.listdir(TREX_OPT) for version in versions: trex_path = os.path.join(TREX_OPT, version) print 'Cleaning TRex', version try: size_before = get_dir_size(trex_path) remove_unused_libs(trex_path, TREX_UNUSED) size_after = get_dir_size(trex_path) print '==== Saved Space ====' print size_before - size_after except OSError: import traceback print traceback.print_exc() print 'Cleanup was not finished.'
[ 2, 15069, 1584, 28289, 11998, 11, 3457, 13, 220, 1439, 2489, 10395, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779,...
2.217314
1,132
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mean stddev box coder. This box coder use the following coding schema to encode boxes: rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev. """ from object_detection.core import box_coder from object_detection.core import box_list
[ 2, 15069, 2177, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 201, 198, 2, 201, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 201, 198, 2, 345, 743, 407, 779, 428, 2393, 2...
3.614232
267
from storage.buffer import QLearningBuffer from utils.torch_utils import ExpertTransition, augmentTransition from utils.parameters import buffer_aug_type
[ 6738, 6143, 13, 22252, 1330, 1195, 41730, 28632, 198, 6738, 3384, 4487, 13, 13165, 354, 62, 26791, 1330, 25516, 8291, 653, 11, 35016, 8291, 653, 198, 6738, 3384, 4487, 13, 17143, 7307, 1330, 11876, 62, 7493, 62, 4906, 628, 628, 628 ]
3.878049
41
import torch from typing import Any, Dict, List, OrderedDict, Tuple from hlrl.core.agents import RLAgent from hlrl.core.common.wrappers import MethodWrapper
[ 11748, 28034, 198, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 11, 14230, 1068, 35, 713, 11, 309, 29291, 198, 198, 6738, 289, 14050, 75, 13, 7295, 13, 49638, 1330, 371, 13534, 6783, 198, 6738, 289, 14050, 75, 13, 7295, 13,...
3.057692
52
# -*- coding: utf-8 -*- PIXIVUTIL_VERSION = '20191220-beta1' PIXIVUTIL_LINK = 'https://github.com/Nandaka/PixivUtil2/releases' PIXIVUTIL_DONATE = 'https://bit.ly/PixivUtilDonation' # Log Settings PIXIVUTIL_LOG_FILE = 'pixivutil.log' PIXIVUTIL_LOG_SIZE = 10485760 PIXIVUTIL_LOG_COUNT = 10 PIXIVUTIL_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" # Download Results PIXIVUTIL_NOT_OK = -1 PIXIVUTIL_OK = 0 PIXIVUTIL_SKIP_OLDER = 1 PIXIVUTIL_SKIP_BLACKLIST = 2 PIXIVUTIL_KEYBOARD_INTERRUPT = 3 PIXIVUTIL_SKIP_DUPLICATE = 4 PIXIVUTIL_SKIP_LOCAL_LARGER = 5 PIXIVUTIL_CHECK_DOWNLOAD = 6 PIXIVUTIL_ABORTED = 9999 BUFFER_SIZE = 8192
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 47, 10426, 3824, 3843, 4146, 62, 43717, 796, 705, 23344, 1065, 1238, 12, 31361, 16, 6, 198, 47, 10426, 3824, 3843, 4146, 62, 43, 17248, 796, 705, 5450, 1378, 1256...
2.015528
322
from django.http import Http404 from django.shortcuts import render, redirect, reverse from django.views.generic import ListView from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.models import User from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView from rest_framework.renderers import TemplateHTMLRenderer from .models import Schema, SchemaColumn, SchemaResponse, SchemaUrl from .forms import SchemaResponseForm, ResponseUpdateForm from .serializers import SchemaResponseSerializer from .prepare_data import getcolumns import pytz """ API Views """
[ 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 26429, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 18941, 11, 9575, 198, 6738, 42625, 14208, 13, 33571, 13, 41357, 1330, 7343, 7680, 220, 198, 6738, 42625, 14208, 13, 3642, ...
3.75
192
import os.path import types import sys
[ 198, 11748, 28686, 13, 6978, 198, 11748, 3858, 198, 11748, 25064, 198 ]
3.333333
12
from django.shortcuts import render from rest_framework import response from rest_framework.serializers import Serializer from . import serializers from rest_framework.response import Response from rest_framework.views import APIView from django.views import View from rest_framework import status from . models import SaveList, User, Lyrics, SearchHistory, VerificationCode, SubmitLyrics from rest_framework.permissions import BasePermission, IsAuthenticated, SAFE_METHODS, IsAdminUser from rest_framework.authtoken.models import Token from django.contrib.auth.hashers import make_password, check_password from django.contrib.auth import login, authenticate import requests from django.db.models import Q from bs4 import BeautifulSoup import json from datetime import datetime import random from django.core.mail import EmailMessage, EmailMultiAlternatives from django.conf import settings from django.template.loader import get_template from django.urls import reverse import jwt from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode from django.contrib.sites.shortcuts import get_current_site from .utils import Util from rest_framework_simplejwt.tokens import RefreshToken from django.template import Context from django.http import HttpResponse, HttpResponseNotFound import os import re import urllib from datetime import datetime import random import time now = datetime.now() import json ''' class EditLyricsView(APIView): def post(self, request, pk, *args, **kwargs ): data=request.data lyrics=Lyrics.objects.get(pk=pk) lyrics.title=request.POST['title'] lyrics.artist=request.POST['artist'] lyrics.body=request.POST['body'] Lyrics.objects.get(pk=pk) lyrics.save() lyrics_item=Lyrics.objects.get(pk=pk) serializer=serializers.LyricsSerializer(lyrics_item,many=False) response={'lyrics':serializer.data} return Response(response,status=status.HTTP_200_OK ) ''' """ class SignupView(APIView): def post(self, request, *args, **kwargs): user=User() serializer=serializers.UserSerializer(data=request.data) print(request.data) if serializer.is_valid(): password=make_password(request.data['password']) username=request.data['username'] user.username=username user.first_name=request.data['first_name'] user.last_name=request.data['last_name'] user.email=request.data['email'] user.email_username=request.data['email'] user.password=password user.save() new_user=User.objects.get(username=username) print(new_user) token=Token.objects.create(user=new_user) response={'token':token.key, 'user':serializer.data} return Response(response, status=status.HTTP_200_OK) else: return Response(serializer.errors) """ """ data = requests.get(f"https://api.lyrics.ovh/v1/{artistSlug}/{titleSlug}/") lyric = data.json() if data.status_code == 200: lyrics.title=title lyrics.artist=artist lyrics.title_slug=titleSlug lyrics.artist_slug=artistSlug lyrics.body=lyric['lyrics'] lyrics.save() lyrics_item=Lyrics.objects.get(title_slug=title_slug, artist_slug=artist_slug) searchHistory.lyrics_id = lyrics_item.id searchHistory.searcher_username = request.user.username searchHistory.moment=now.strftime('%Y-%m-%d %H:%M:%S') searchHistory.save() serializer=serializers.LyricsSerializer(lyrics_item, many=False) response={'lyrics':serializer.data} return Response(response,status=status.HTTP_200_OK ) """
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 1334, 62, 30604, 1330, 2882, 198, 6738, 1334, 62, 30604, 13, 46911, 11341, 1330, 23283, 7509, 198, 6738, 764, 1330, 11389, 11341, 198, 6738, 1334, 62, 30604, 13, 26209, 1330, ...
2.304183
1,841
from selenium import webdriver from time import sleep from selenium.webdriver.common.keys import Keys from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait def Dm(driver,user,message): ''' This function is used to direct message a single user/group ''' driver.get('https://www.instagram.com/direct/inbox/') send_message_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button'))).click() search_user = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input'))) search_user.send_keys(user) selector = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'))).click() next_button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div'))).click() try: text = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea'))) text.send_keys(message) send = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button'))).click() driver.get('https://www.instagram.com/direct/inbox/') except: print('No message sent to '+user) driver.get('https://www.instagram.com/direct/inbox/')
[ 6738, 384, 11925, 1505, 1330, 3992, 26230, 198, 6738, 640, 1330, 3993, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11321, 13, 13083, 1330, 26363, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11284, 1330, 2938, 62, 17561, 17...
2.346257
748
import requests from datetime import datetime import json from extras import Day, Lesson def send(self, url, method="get", params={}, files={}): """ Parameters ------------ url -> Represents the url to go to method -> Represents the method to use. Can be either `get` or `post` params -> Represents the parameters to send to the website. Only use it on `post` files -> Pretty much the same as for the params ------------ """ return getattr(self.session, str(method).strip().lower())(self.url.format(url), data=json.dumps(params), files=files) def __str__(self): return json.dumps({ "MashovAPI": { "url": self.url, "sessionH": dict(self.session.headers), "sessionC": self.session.cookies.get_dict(), "username": self.username, "password": self.password, "schoolData": self.school_data, "schoolID": self.school_ID, "currentYear": self.current_year, "loginData": self.login_data, "isLoggedIn": self.is_logged_in, "authID": self.auth_ID, "userID": self.user_ID, "uid": self.uid, "uID": self.uID, "guid": self.guid, "guID": self.guID, "schoolSite": self.school_site, "moodleSite": self.moodle_site, "schoolName": self.school_name, "lastName": self.last_name, "firstName": self.first_name, "className": self.class_name, "lastPass": self.last_pass, "lastLogin": self.last_login, "schoolYears": self.school_years, "csrfToken": self.csrf_token, "userChildren": self.user_children }}) def get_day(self, day_num: int): """ Parameters ------------ day -> Represents the day number ------------ """ day = [] timetable = [] for i in self.timetable: if i["timeTable"]["day"] == day_num: timetable.append(i) for i in range(len(timetable)): for j in range(i+1, len(timetable), 1): if timetable[i]["timeTable"]["lesson"] > timetable[j]["timeTable"]["lesson"]: temp = timetable[i] timetable[i] = timetable[j] timetable[j] = temp for i in timetable: if not "'" in i["groupDetails"]["subjectName"]: # We don't need that. It's useless. if len(day) > 0: while i["timeTable"]["lesson"] > day[-1].number + 1: day.append(Lesson( lesson="", lesson_number=day[-1].number + 1, lesson_time="", classroom="", teacher="", ) ) i["groupDetails"]["groupTeachers"][0]["teacherName"] = i["groupDetails"]["groupTeachers"][0]["teacherName"].replace("-", " ") day.append(Lesson( lesson=i["groupDetails"]["subjectName"], lesson_number=i["timeTable"]["lesson"], lesson_time="", classroom=i["timeTable"]["roomNum"], teacher=i["groupDetails"]["groupTeachers"][0]["teacherName"] ) ) return Day(day_num, day) def get_today(self): """ Parameters ------------ ------------ """ today = datetime.now().weekday() today += 2 if today > 7: today -= 7 return self.get_day(today)
[ 11748, 7007, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 33918, 198, 6738, 33849, 1330, 3596, 11, 12892, 261, 628, 628, 198, 220, 220, 220, 825, 3758, 7, 944, 11, 19016, 11, 2446, 2625, 1136, 1600, 42287, 34758, 5512, 3696, 3...
1.738017
2,420
''' Created on 10/11/2017 @author: jschmid3@stevens.edu Pledge: I pledge my honor that I have abided by the Stevens Honor System -Joshua Schmidt CS115 - Lab 6 ''' def isOdd(n): '''Returns whether or not the integer argument is odd.''' #question 1: base_2 of 42: 101010 if n == 0: return False if n % 2 != 0: return True return False #question 2: if given an odd base-10 number, the least-significant bit of its base-2 representation will be a 1. #question 3: if given an even base-10 number, the least-significant bit of its base-2 representation will be a 0. #This is because 2^0 = 1, and that is the only way to make an odd number, by having a 1 in the least significant bit. #question 4: By eliminating the least significant bit, the original number decreases by a factor of 2, if the bit is a 0. #if the least significant bit is a 1, the original number is decreased by a factor of 2, - 1. #question 5: If N is odd, the base-2 of N is Y + "1". If N is even, the base-2 of N is Y + "0". #This is because to get from N base-10 to N base-2 you do successive division by 2, keeping the remainder, so given #the base-2 of all of the division except for the first, one must put that remainder in front, hence the answer given. def numToBinary(n): '''Precondition: integer argument is non-negative. Returns the string with the binary representation of non-negative integer n. If n is 0, the empty string is returned.''' if n == 0: return "" elif isOdd(n): return numToBinary(n // 2) + "1" else: return numToBinary(n // 2) + "0" #print(numToBinary(15)) def binaryToNum(s): '''Precondition: s is a string of 0s and 1s. Returns the integer corresponding to the binary representation in s. Note: the empty string represents 0.''' if s == "": return 0 return int(s[0])*(2**(len(s)-1)) + binaryToNum(s[1:]) #print(binaryToNum("1111")) def addBin(s, numAdd, carry = 0): """adds 2 binary numbers""" if s == "" or numAdd == "": if carry == 0: return s + numAdd place = carry carry = 0 if s != "" and s[-1] == "1": carry = place place = 1 - place if numAdd != "" and numAdd[-1] == "1": carry += place place = 1 - place return addBin(s[:-1], numAdd[:-1], carry) + str(place) #print(addBin("100", "001", 0)) def makeEightBit(a): """makes a binary number 8 bit""" if len(a) == 8: print(str(a)) return str(a) elif len(a) > 8: #print(a[(len(a)-8):]) makeEightBit(a[(len(a)-8):]) else: makeEightBit("0" + a) return "" def increment(s): '''Precondition: s is a string of 8 bits. Returns the binary representation of binaryToNum(s) + 1.''' #numAdd = "00000001" dec = binaryToNum(s) dec += 1 answer = numToBinary(dec) #print(answer) if len(answer) > 8: return answer[(len(answer)-8):] answer = (8-len(answer))*"0" + answer return answer #print(increment("1110100000")) def count(s, n): '''Precondition: s is an 8-bit string and n >= 0. Prints s and its n successors.''' if n == 0: print(s) return "" print(s) return count(increment(s), n-1) #print(count("11111110", 5)) #print("a") def numToTernary(n): '''Precondition: integer argument is non-negative. Returns the string with the ternary representation of non-negative integer n. If n is 0, the empty string is returned.''' if n == 0: return "" return numToTernary(n // 3) + str(n % 3) #print(numToTernary(42)) def ternaryToNum(s): '''Precondition: s is a string of 0s, 1s, and 2s. Returns the integer corresponding to the ternary representation in s. Note: the empty string represents 0.''' if s == "": return 0 return int(s[0])*(3**(len(s)-1)) + ternaryToNum(s[1:]) #print(ternaryToNum('12211010'))
[ 7061, 6, 198, 41972, 319, 838, 14, 1157, 14, 5539, 198, 31, 9800, 25, 220, 220, 474, 20601, 13602, 18, 31, 4169, 574, 82, 13, 15532, 198, 47, 2965, 25, 220, 220, 220, 314, 13995, 616, 7522, 326, 314, 423, 450, 1384, 416, 262, 20...
2.544279
1,547
#!/usr/bin/env python from math import * import sys frame_width = 200 frame_height = 75 drill = 1.6 # 1/16 inch radius. extrusion = 15 motor_screw_grid = 31 motor_cutout_diameter = 22 motor_width = 42.2 motor_offset = 35 # Motor face to extrusion. motor_side, motor_bend = rotate(0, motor_offset + extrusion, 30) motor_side += extrusion/2 motor_side += extrusion/cos(pi/6) mc = motor_cutout_diameter/2 + drill #nema23 = 47.14 # Mounting screws center-to-center clover = 6 thickness = 0.0478 * 25.4 # 18 gauge steel. enable_perimeter = False print >> sys.stderr, 'thickness', thickness print >> sys.stderr, 'motor_bend', motor_bend print >> sys.stderr, 'motor_side', motor_side print >> sys.stderr, 'mc', mc print >> sys.stderr, 'extrusion-to-extrusion', frame_width print >> sys.stderr, 'edge-to-edge', frame_width + 2*extrusion xa = motor_side - drill # Outside wings start xb = motor_side + motor_bend + drill xs1 = xa + extrusion/2 # Extrusion screws xs2 = xb - extrusion/2 # xe = frame_width/2 # Extrusion corner xt = motor_width/2 xms = motor_screw_grid/sqrt(2) xgs = 19 ya = frame_height/2 + drill # Top without flange yb = frame_height/2 + drill - extrusion ys = frame_height/2 - extrusion/2 # Extrusion screws yt = motor_width/2 yt2 = yt + 4 yms = xms ygs = xgs s2 = sqrt(2) print 'G17 ; Select XY plane for arcs' print 'G90 ; Absolute coordinates' move('G92', x=0, y=0, z=0) linear(x=0, y=0, z=0) print '; Gasket screw holes' for x in (-xgs, xgs): for y in (-x, x): jump(x=x, y=y) # clockwise(i=1) if enable_perimeter: print '; Horizontal extrusion screw holes' for x in (xs1, xs2): jump(x=x, y=ys) for x in (xs2, xs1, -xs1, -xs2): jump(x=x, y=-ys) for x in (-xs2, -xs1): jump(x=x, y=ys) #print '; 22mm dia cutout for reference' #jump(x=0, y=11) #clockwise(j=-11) #print '; NEMA17 square for reference' #jump(x=0, y=yt*s2) #linear(x=xt*s2, y=0) #linear(x=0, y=-yt*s2) #linear(x=-xt*s2, y=0) #linear(x=0, y=yt*s2) for z in (-1, -2.5): clovercut(z) if enable_perimeter: for z in (-1, -2.5): perimeter(z) print '; All done' up()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 10688, 1330, 1635, 198, 11748, 25064, 198, 198, 14535, 62, 10394, 796, 939, 198, 14535, 62, 17015, 796, 5441, 198, 7109, 359, 796, 352, 13, 21, 220, 1303, 352, 14, 1433, 1...
2.228363
959
import torch from torchvision.datasets import MNIST from torchvision import transforms from torch.utils.data import DataLoader from scripts.utils import SyntheticNoiseDataset from models.babyunet import BabyUnet CHECKPOINTS_PATH = '../checkpoints/' mnist_test = MNIST('../inferred_data/MNIST', download=True, transform=transforms.Compose([ transforms.ToTensor(), ]), train=False) noisy_mnist_test = SyntheticNoiseDataset(mnist_test, 'test') data_loader = DataLoader(noisy_mnist_test, batch_size=256, shuffle=True) for x in range(0, 200, 10): trained_model = BabyUnet() trained_model.load_state_dict( CHECKPOINTS_PATH + 'model' + str(x)) trained_model.eval() for i, batch in enumerate(data_loader): denoised = trained_model(batch) break() np.save(denoised.numpy(), '../inferred_data/model' + str(x) + '.npz')
[ 11748, 28034, 198, 6738, 28034, 10178, 13, 19608, 292, 1039, 1330, 29060, 8808, 198, 198, 6738, 28034, 10178, 1330, 31408, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 198, 198, 6738, 14750, 13, 26791, 1330, 26375, 6587, 2949...
2.459677
372
# Script for data augmentation functions import numpy as np from collections import deque from PIL import Image import cv2 from data.config import * def imread_cv2(image_path): """ Read image_path with cv2 format (H, W, C) if image is '.gif' outputs is a numpy array of {0,1} """ image_format = image_path[-3:] if image_format == 'jpg': image = cv2.imread(image_path) else: image = np.array(Image.open(image_path)) return image def image_to_tensor(image, mean=0, std=1.): """Transform image (input is numpy array, read in by cv2) """ if len(image.shape) == 2: image = image.reshape(image.shape[0], image.shape[1], 1) image = image.astype(np.float32) image = (image-mean)/std image = image.transpose((2,0,1)) tensor = torch.from_numpy(image) return tensor # --- Data Augmentation functions --- # # A lot of functions can be found here: # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py#L223 # transform image and label def randomHorizontalFlip(image, mask, p=0.5): """Do a random horizontal flip with probability p""" if np.random.random() < p: image = np.fliplr(image) mask = np.fliplr(mask) return image, mask def randomVerticalFlip(image, mask, p=0.5): """Do a random vertical flip with probability p""" if np.random.random() < p: image = np.flipud(image) mask = np.flipud(mask) return image, mask def randomHorizontalShift(image, mask, max_shift=0.05, p=0.5): """Do random horizontal shift with max proportion shift and with probability p Elements that roll beyond the last position are re-introduced at the first.""" max_shift_pixels = int(max_shift*image.shape[1]) shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1)) if np.random.random() < p: image = np.roll(image, shift, axis=1) mask = np.roll(mask, shift, axis=1) return image, mask def randomVerticalShift(image, mask, max_shift=0.05, p=0.5): """Do random vertical shift with max proportion shift and probability p Elements that roll beyond the last position are re-introduced at the first.""" max_shift_pixels = int(max_shift*image.shape[0]) shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1)) if np.random.random() < p: image = np.roll(image, shift, axis=0) mask = np.roll(mask, shift, axis=0) return image, mask def randomInvert(image, mask, p=0.5): """Randomly invert image with probability p""" if np.random.random() < p: image = 255 - image mask = mask return image, mask def randomBrightness(image, mask, p=0.75): """With probability p, randomly increase or decrease brightness. See https://stackoverflow.com/questions/37822375/python-opencv-increasing-image-brightness-without-overflowing-uint8-array""" if np.random.random() < p: max_value = np.percentile(255-image, q=25) # avoid burning out white cars, so take image-specific maximum value = np.random.choice(np.arange(-max_value, max_value)) if value > 0: image = np.where((255 - image) < value,255,image+value).astype(np.uint8) else: image = np.where(image < -value,0,image+value).astype(np.uint8) return image, mask def randomHue(image, mask, p=0.25, max_value=75): """With probability p, randomly increase or decrease hue. See https://stackoverflow.com/questions/32609098/how-to-fast-change-image-brightness-with-python-opencv""" if np.random.random() < p: value = np.random.choice(np.arange(-max_value, max_value)) hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hsv[:,:,0] = hsv[:,:,0] + value hsv = np.clip(hsv, a_min=0, a_max=255).astype(np.uint8) image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) return image, mask def GaussianBlur(image, mask, kernel=(1, 1),sigma=1, p=0.5): """With probability p, apply Gaussian blur""" # TODO return image, mask def randomRotate(image, mask, max_angle, p=0.5): """Perform random rotation with max_angle and probability p""" # TODO return(image, mask)
[ 2, 12327, 329, 1366, 16339, 14374, 5499, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 17268, 1330, 390, 4188, 198, 6738, 350, 4146, 1330, 7412, 198, 11748, 269, 85, 17, 198, 6738, 1366, 13, 11250, 1330, 1635, 198, 198, 4299, 54...
2.516438
1,673
""" substitute_finder app custom templatetags module """ from django import template register = template.Library()
[ 37811, 198, 7266, 301, 3678, 62, 22805, 598, 2183, 2169, 489, 265, 316, 3775, 8265, 198, 37811, 198, 6738, 42625, 14208, 1330, 11055, 198, 198, 30238, 796, 11055, 13, 23377, 3419, 628 ]
3.65625
32
from django.shortcuts import render, redirect, get_object_or_404 from django.urls import reverse_lazy from django import views from django.views import generic as g_views from django.views.generic import base as b_views, edit as e_views from .. import forms, models
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 18941, 11, 651, 62, 15252, 62, 273, 62, 26429, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 62, 75, 12582, 198, 6738, 42625, 14208, 1330, 5009, 198, 6738, 42625, 14208, 13, 3...
3.443038
79
from load_las_data import LoadLasData from altair_log_plot import AltAirLogPlot from load_shapefile_data import LoadShpData from alitair_well_location_map import WellLocationMap
[ 6738, 3440, 62, 21921, 62, 7890, 1330, 8778, 46898, 6601, 198, 6738, 5988, 958, 62, 6404, 62, 29487, 1330, 12344, 16170, 11187, 43328, 198, 6738, 3440, 62, 43358, 7753, 62, 7890, 1330, 8778, 2484, 79, 6601, 198, 6738, 435, 270, 958, 6...
3.423077
52
import pickle import pandas as pd from typing import List, Tuple
[ 11748, 2298, 293, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 198 ]
3.421053
19
import xadmin from users.models import VerifyCode from xadmin import views xadmin.site.register(VerifyCode, VerifyCodeAdmin) xadmin.site.register(views.BaseAdminView, BaseSetting) xadmin.site.register(views.CommAdminView, GlobalSettings)
[ 11748, 2124, 28482, 201, 198, 6738, 2985, 13, 27530, 1330, 49899, 10669, 201, 198, 6738, 2124, 28482, 1330, 5009, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 87, 28482, 13, 15654, 13, 30238, 7, 13414, 1958, 10669, 11, ...
3.02381
84
### Load necessary libraries ### import numpy as np from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score import tensorflow as tf from tensorflow import keras from sklearn.metrics import ConfusionMatrixDisplay model = get_network() model.summary() ### Train and evaluate via 10-Folds cross-validation ### accuracies = [] folds = np.array(['fold1','fold2','fold3','fold4', 'fold5','fold6','fold7','fold8', 'fold9','fold10']) load_dir = "UrbanSounds8K/processed/" kf = KFold(n_splits=10) for train_index, test_index in kf.split(folds): x_train, y_train = [], [] for ind in train_index: # read features or segments of an audio file train_data = np.load("{0}/{1}.npz".format(load_dir,folds[ind]), allow_pickle=True) # for training stack all the segments so that they are treated as an example/instance features = np.concatenate(train_data["features"], axis=0) labels = np.concatenate(train_data["labels"], axis=0) x_train.append(features) y_train.append(labels) # stack x,y pairs of all training folds x_train = np.concatenate(x_train, axis = 0).astype(np.float32) y_train = np.concatenate(y_train, axis = 0).astype(np.float32) # for testing we will make predictions on each segment and average them to # produce single label for an entire sound clip. test_data = np.load("{0}/{1}.npz".format(load_dir, folds[test_index][0]), allow_pickle=True) x_test = test_data["features"] y_test = test_data["labels"] log_dir="logs/fit/" + folds[test_index][0] tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) model = get_network() model.fit(x_train, y_train, epochs = 20, batch_size = 64, verbose = 1, validation_split=0.2, use_multiprocessing=True, workers=8, callbacks=[tensorboard_callback]) # evaluate on test set/fold y_true, y_pred = [], [] for x, y in zip(x_test, y_test): # average predictions over segments of a sound clip avg_p = np.argmax(np.mean(model.predict(x), axis = 0)) y_pred.append(avg_p) # pick single label via np.unique for a sound clip y_true.append(np.unique(y)[0]) accuracies.append(accuracy_score(y_true, y_pred)) print("Fold n accuracy: {0}".format(accuracy_score(y_true, y_pred))) cm = ConfusionMatrixDisplay.from_predictions(y_true, y_pred) cm.figure_.savefig('conf_mat_' + str(test_index) + '_acc_' + str(accuracy_score(y_true, y_pred)) + '.png',dpi=1000) print("Average 10 Folds Accuracy: {0}".format(np.mean(accuracies)))
[ 21017, 8778, 3306, 12782, 44386, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 509, 37, 727, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 9922, 62, 26675, 198, 198, 11748, 11192, 273, 111...
2.406554
1,129
import os import pprint import subprocess import time from typing import Dict, List from kubernetes.client import ( V1EnvVar, V1EnvVarSource, V1ObjectFieldSelector, V1ResourceFieldSelector, ) from metaflow import FlowSpec, step, environment, resources, current kubernetes_vars = get_env_vars( { "LOCAL_STORAGE": "requests.ephemeral-storage", "LOCAL_STORAGE_LIMIT": "limits.ephemeral-storage", "CPU": "requests.cpu", "CPU_LIMIT": "limits.cpu", "MEMORY": "requests.memory", "MEMORY_LIMIT": "limits.memory", } ) kubernetes_vars.append( V1EnvVar( name="MY_POD_NAME", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector(field_path="metadata.name") ), ) ) annotations = { "metaflow.org/flow_name": "MF_NAME", "metaflow.org/step": "MF_STEP", "metaflow.org/run_id": "MF_RUN_ID", "metaflow.org/experiment": "MF_EXPERIMENT", "metaflow.org/tag_metaflow_test": "MF_TAG_METAFLOW_TEST", "metaflow.org/tag_test_t1": "MF_TAG_TEST_T1", } for annotation, env_name in annotations.items(): kubernetes_vars.append( V1EnvVar( name=env_name, value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( field_path=f"metadata.annotations['{annotation}']" ) ), ) ) labels = { "aip.zillowgroup.net/kfp-pod-default": "KF_POD_DEFAULT", "tags.ledger.zgtools.net/ai-flow-name": "AI_FLOW_NAME", "tags.ledger.zgtools.net/ai-step-name": "AI_STEP_NAME", "tags.ledger.zgtools.net/ai-experiment-name": "AI_EXPERIMENT_NAME", } for label, env_name in labels.items(): kubernetes_vars.append( V1EnvVar( name=env_name, value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( field_path=f"metadata.labels['{label}']" ) ), ) ) if __name__ == "__main__": ResourcesFlow()
[ 11748, 28686, 198, 11748, 279, 4798, 198, 11748, 850, 14681, 198, 11748, 640, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 198, 198, 6738, 479, 18478, 3262, 274, 13, 16366, 1330, 357, 198, 220, 220, 220, 569, 16, 4834, 85, 19852, 11, ...
1.947267
1,043
#!/usr/bin/env python # -*- coding: utf-8 -*- """Functions used for data handling """ __author__ = "Christina Ludwig, GIScience Research Group, Heidelberg University" __email__ = "christina.ludwig@uni-heidelberg.de" import os import yaml from shapely.geometry import box import numpy as np import pandas as pd import geopandas as gpd import json from nb_utils.utils import create_bbox, reproject_to_utm CONTEXT_NAMES = {"area": "Area", "building_density": "Building density", "age": "Days since creation", "n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number", "user_count_inner": "Inner user count", "user_density_inner": "Inner user density", "user_count_outer": "Outer user count", "user_density_outer": "Outer user density", "feature_count": "Feature count", "random": "Random"} rules_colnames = ['antecedents', 'consequents', 'antecedent support', 'consequent support', 'support', 'confidence', 'lift', 'leverage', 'conviction', "context", "context_min", "context_max", "context_p_min", "context_p_max", "nfeatures", "rule"] pretty_names_units = {"area": "Area [ha]", "building_density": "Building density", "feature_count": "Feature count", "age": "Days since creation", "n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number", "user_count_inner": "Inner user count", "user_density_inner": "Inner user density", "user_count_outer": "Outer user count", "user_density_outer": "Outer user density", "random": "Random"} def load_config(config_file, cities): """ Load config parameters from file :param config_file: :param cities: :return: """ if not os.path.exists(config_file): print("ERROR: Config file {} does not exist.".format(config_file)) else: with open(config_file, 'r') as src: config = yaml.load(src, Loader=yaml.FullLoader) config_cities = config["locations"] config_cities = {city: config_cities[city] for city in cities} return config_cities def load_data(cities, data_dir): """ Load data into notebook from file :return: """ loaded_tags_dfs = [] loaded_context_dfs = [] for city in cities: print("Loading {}...".format(city)) # Check paths tags_file = os.path.join(data_dir, city, "{}_tags.json".format(city)) context_file = os.path.join(data_dir, city, "{}_context.geojson".format(city)) if (not os.path.exists(tags_file)) or (not os.path.exists(context_file)): print("{}: Input files not found.".format(city)) return None, None, None # Read data and set index tags_df = pd.read_json(tags_file).set_index("@osmId") context_df = gpd.read_file(context_file).set_index("@osmId") # Calculate area (should be moved to data_extraction) context_df["area"] = reproject_to_utm(context_df).area #/ 10000. # conversion to ha # Add column holding the city name context_df["city"] = city loaded_tags_dfs.append(tags_df) loaded_context_dfs.append(context_df) # Convert list of dataframes to dataframe all_tags_df = pd.concat(loaded_tags_dfs, axis=0) all_tags_df = all_tags_df.fillna(False) all_context_df = pd.concat(loaded_context_dfs, axis=0) all_features = all_context_df.join(all_tags_df, sort=False) # Add dummy columns for "no antecedent" and random context variable all_features["none"] = True all_features["random"] = np.random.rand(len(all_features)) # The park iteself is always counted as an objects inside of it. Therefore, subtract 1. all_features["feature_count"] = all_features["feature_count"] - 1 # Delete unnecessary columns unnecessary_cols = list(filter(lambda x: x.startswith("gt:"), all_features.columns)) + ["leisure=park"] all_features.drop(unnecessary_cols, axis=1, inplace=True) return all_features def create_city_bboxes(config_cities): """ Creat bboxes of cities :return: """ bboxes = {c: box(*create_bbox(config_cities[c]["center"], config_cities[c]["width"])) for c in config_cities.keys()} bbox_df = pd.DataFrame().from_dict(bboxes, orient="index", columns=["geometry"]) return gpd.GeoDataFrame(bbox_df) def dump_city_rules(city_rules, interim_dir): """ Write results from context based association rule analysis to file :param city_rules: :param interim_dir: :return: """ city_rules_dir = os.path.join(interim_dir, "city_rules") if not os.path.exists(city_rules_dir): os.mkdir(city_rules_dir) for k, v in city_rules.items(): print(k) v["heatmap"].to_json(os.path.join(city_rules_dir, "{}_heatmap.json".format(k))) v["valid_rules"].reset_index().to_json(os.path.join(city_rules_dir, "{}_valid_rules.json".format(k))) with open(os.path.join(city_rules_dir, "{}_sel_features.json".format(k)), "w") as dst: json.dump(list(v["sel_features"].index), dst) def load_city_rules(cities, interim_dir, all_features): """ Load results from context based association rule analysis to file :param cities: :param interim_dir: :param all_features: :return: """ city_rules = {} for city in cities: with open(os.path.join(interim_dir, "city_rules", "{}_sel_features.json".format(city))) as dst: selected_ids = json.load(dst) sel_features = all_features.loc[selected_ids] selected_osmids = json city_rules[city] = { "heatmap": pd.read_json(os.path.join(interim_dir, "city_rules", "{}_heatmap.json".format(city))), "valid_rules": pd.read_json( os.path.join(interim_dir, "city_rules", "{}_valid_rules.json".format(city))).set_index("index"), "sel_features": sel_features} return city_rules
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 24629, 2733, 973, 329, 1366, 9041, 198, 37811, 198, 198, 834, 9800, 834, 796, 366, 10684, 1437, 44476, 11, 402, ...
2.503576
2,377
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg import six from keystoneclient.auth.identity.v3 import base from keystoneclient.auth.identity.v3 import token __all__ = ['FederatedBaseAuth']
[ 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 257, 4866, 286, 262, 13789, 379, 198, 2,...
3.600985
203
from warnings import warn from tables.utilsextension import * _warnmsg = ("utilsExtension is pending deprecation, import utilsextension instead. " "You may use the pt2to3 tool to update your source code.") warn(_warnmsg, DeprecationWarning, stacklevel=2)
[ 6738, 14601, 1330, 9828, 201, 198, 6738, 8893, 13, 22602, 325, 742, 3004, 1330, 1635, 201, 198, 201, 198, 62, 40539, 19662, 796, 5855, 26791, 11627, 3004, 318, 13310, 1207, 8344, 341, 11, 1330, 7736, 325, 742, 3004, 2427, 13, 366, 201...
3.078652
89
#!/usr/bin/env python3 # The format of your own localizable method. # This is an example of '"string".localized' SUFFIX = '.localized' KEY = r'"(?:\\.|[^"\\])*"' LOCALIZABLE_RE = r'%s%s' % (KEY, SUFFIX) # Specify the path of localizable files in project. LOCALIZABLE_FILE_PATH = '' LOCALIZABLE_FILE_NAMES = ['Localizable'] LOCALIZABLE_FILE_TYPES = ['strings'] # File types of source file. SEARCH_TYPES = ['swift', 'm', 'json'] SOURCE_FILE_EXCLUSIVE_PATHS = [ 'Assets.xcassets', 'Carthage', 'ThirdParty', 'Pods', 'Media.xcassets', 'Framework', 'bin'] LOCALIZABLE_FILE_EXCLUSIVE_PATHS = ['Carthage', 'ThirdParty', 'Pods', 'Framework', 'bin'] LOCALIZABLE_FORMAT_RE = r'"(?:\\.|[^"\\])*"\s*=\s*"(?:\\.|[^"\\])*";\n' DEFAULT_TARGET_PATH = 'generated.strings'
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 383, 5794, 286, 534, 898, 1957, 13821, 2446, 13, 198, 2, 770, 318, 281, 1672, 286, 705, 1, 8841, 1911, 12001, 1143, 6, 198, 12564, 5777, 10426, 796, 45302, 12001, 1143, ...
2.252809
356
from dashboard_analytics.models import AccountType, InstrumentType, Account, Transaction
[ 6738, 30415, 62, 38200, 14094, 13, 27530, 1330, 10781, 6030, 11, 42410, 6030, 11, 10781, 11, 45389 ]
5.176471
17
#!/usr/bin/env python """ Add all (potentially gigantic) histograms in a group of files. """ import dashi import tables import os, sys, operator, shutil from optparse import OptionParser parser = OptionParser(usage="%prog [OPTIONS] infiles outfile", description=__doc__) parser.add_option("--blocksize", dest="blocksize", type=int, default=2048) opts, args = parser.parse_args() if len(args) < 2: parser.error("You must specify at least one output and one input file") infiles, outfile = args[:-1], args[-1] if os.path.exists(outfile): parser.error("%s already exists!" % outfile) shutil.copy(infiles[0], outfile) from collections import defaultdict paths = defaultdict(list) for fname in infiles[1:]: with tables.openFile(fname) as hdf: for group in hdf.walkNodes(where='/', classname='Group'): if 'ndim' in group._v_attrs: # a dashi histogram path = group._v_pathname paths[path].append(fname) def histadd(sourceGroup, destGroup, blocksize=1): """ Add dashi histograms stored in HDF5 groups :param blocksize: operate on blocksize I/O chunks at a time """ for arr in '_h_bincontent', '_h_squaredweights': source = sourceGroup._v_children[arr] dest = destGroup._v_children[arr] chunksize = blocksize*reduce(operator.mul, dest.chunkshape) size = reduce(operator.mul, dest.shape) for i in range(0, size, chunksize): dest[i:i+chunksize] += source[i:i+chunksize] for prop in 'nentries', 'nans', 'nans_wgt', 'nans_sqwgt': destGroup._v_attrs[prop] += sourceGroup._v_attrs[prop] with tables.openFile(outfile, 'a') as ohdf: for path, fnames in paths.iteritems(): print(path) destGroup = ohdf.getNode(path) for fname in fnames: with tables.openFile(fname) as hdf: histadd(hdf.getNode(path), destGroup, opts.blocksize)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 4550, 477, 357, 13059, 3746, 24804, 8, 1554, 26836, 287, 257, 1448, 286, 3696, 13, 198, 37811, 198, 198, 11748, 288, 12144, 198, 11748, 8893, 198, 11748, 28686, 11, 25...
2.655172
667
import datetime
[ 11748, 4818, 8079, 628, 628, 628, 628, 628, 628 ]
3
9
from .base import * DEBUG = True EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'SMS', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', } } INSTALLED_APPS += [ 'debug_toolbar.apps.DebugToolbarConfig', 'django_extensions', ] ALLOWED_HOSTS += ['.herokuapp.com'] # Loads SECRET_KEY from .env file # SECRET_KEY = get_env_variable('SECRET_KEY')
[ 6738, 764, 8692, 1330, 1635, 198, 198, 30531, 796, 6407, 198, 198, 27630, 4146, 62, 31098, 10619, 796, 705, 28241, 14208, 13, 7295, 13, 4529, 13, 1891, 2412, 13, 41947, 13, 15333, 7282, 437, 6, 198, 198, 35, 1404, 6242, 1921, 1546, ...
2.153226
248
from functions import get_df, write_df import geopy from geopy import distance """ The function question3 takes in the latitude and longitude of potential distress locations, and returns the nearest port with essential provisions such as water, fuel_oil and diesel. """ if __name__ == "__main__": question3("foodpanda_tables", 32.610982, -38.706256)
[ 6738, 5499, 1330, 651, 62, 7568, 11, 3551, 62, 7568, 198, 11748, 4903, 11081, 198, 6738, 4903, 11081, 1330, 5253, 198, 198, 37811, 198, 464, 2163, 1808, 18, 2753, 287, 262, 32477, 290, 890, 3984, 286, 2785, 17087, 7064, 11, 198, 392, ...
3.56
100
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2022 Valory AG # Copyright 2018-2021 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """Check amount of time for acn connection communications.""" import asyncio import logging import os import time from contextlib import contextmanager from tempfile import TemporaryDirectory from typing import Callable, List, Tuple, Union from aea_cli_benchmark.case_acn_communication.utils import ( DEFAULT_DELEGATE_PORT, DEFAULT_MAILBOX_PORT, DEFAULT_NODE_PORT, _make_libp2p_client_connection, _make_libp2p_connection, _make_libp2p_mailbox_connection, ) from aea.connections.base import Connection from aea.mail.base import Envelope from packages.fetchai.protocols.default.message import DefaultMessage def make_envelope(from_addr: str, to_addr: str) -> Envelope: """Construct an envelope.""" msg = DefaultMessage( dialogue_reference=("", ""), message_id=1, target=0, performative=DefaultMessage.Performative.BYTES, content=b"hello", ) envelope = Envelope( to=to_addr, sender=from_addr, message=msg, ) return envelope def run(connection: str, run_times: int = 10) -> List[Tuple[str, Union[int, float]]]: """Check construction time and memory usage.""" logging.basicConfig(level=logging.CRITICAL) cwd = os.getcwd() try: if connection == "p2pnode": elif connection == "client": elif connection == "mailbox": else: raise ValueError(f"Unsupported connection: {connection}") with TemporaryDirectory() as tmp_dir: os.chdir(tmp_dir) coro = _run(con_maker) first_time, second_time = asyncio.get_event_loop().run_until_complete(coro) return [ ("first time (seconds)", first_time), ("second time (seconds)", second_time), ] finally: os.chdir(cwd)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 16529, 26171, 198, 2, 198, 2, 220, 220, 15069, 33160, 3254, 652, 13077, 198, 2, 220, 220, 15069, 2864, 12, 1...
2.740816
980
import pybullet as p #p.connect(p.UDP,"192.168.86.100") p.connect(p.SHARED_MEMORY) p.resetSimulation() objects = [p.loadURDF("plane.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)] objects = [p.loadURDF("samurai.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)] objects = [p.loadURDF("pr2_gripper.urdf", 0.500000,0.300006,0.700000,-0.000000,-0.000000,-0.000031,1.000000)] pr2_gripper = objects[0] print ("pr2_gripper=") print (pr2_gripper) jointPositions=[ 0.550569, 0.000000, 0.549657, 0.000000 ] for jointIndex in range (p.getNumJoints(pr2_gripper)): p.resetJointState(pr2_gripper,jointIndex,jointPositions[jointIndex]) pr2_cid = p.createConstraint(pr2_gripper,-1,-1,-1,p.JOINT_FIXED,[0,0,0],[0.2,0,0],[0.500000,0.300006,0.700000]) print ("pr2_cid") print (pr2_cid) objects = [p.loadURDF("kuka_iiwa/model_vr_limits.urdf", 1.400000,-0.200000,0.600000,0.000000,0.000000,0.000000,1.000000)] kuka = objects[0] jointPositions=[ -0.000000, -0.000000, 0.000000, 1.570793, 0.000000, -1.036725, 0.000001 ] for jointIndex in range (p.getNumJoints(kuka)): p.resetJointState(kuka,jointIndex,jointPositions[jointIndex]) p.setJointMotorControl2(kuka,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0) objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.700000,0.000000,0.000000,0.000000,1.000000)] objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.800000,0.000000,0.000000,0.000000,1.000000)] objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.900000,0.000000,0.000000,0.000000,1.000000)] objects = p.loadSDF("gripper/wsg50_one_motor_gripper_new_free_base.sdf") kuka_gripper = objects[0] print ("kuka gripper=") print(kuka_gripper) p.resetBasePositionAndOrientation(kuka_gripper,[0.923103,-0.200000,1.250036],[-0.000000,0.964531,-0.000002,-0.263970]) jointPositions=[ 0.000000, -0.011130, -0.206421, 0.205143, -0.009999, 0.000000, -0.010055, 0.000000 ] for jointIndex in range (p.getNumJoints(kuka_gripper)): p.resetJointState(kuka_gripper,jointIndex,jointPositions[jointIndex]) p.setJointMotorControl2(kuka_gripper,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0) kuka_cid = p.createConstraint(kuka, 6, kuka_gripper,0,p.JOINT_FIXED, [0,0,0], [0,0,0.05],[0,0,0]) objects = [p.loadURDF("jenga/jenga.urdf", 1.300000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)] objects = [p.loadURDF("jenga/jenga.urdf", 1.200000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)] objects = [p.loadURDF("jenga/jenga.urdf", 1.100000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)] objects = [p.loadURDF("jenga/jenga.urdf", 1.000000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)] objects = [p.loadURDF("jenga/jenga.urdf", 0.900000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)] objects = [p.loadURDF("jenga/jenga.urdf", 0.800000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)] objects = [p.loadURDF("table/table.urdf", 1.000000,-0.200000,0.000000,0.000000,0.000000,0.707107,0.707107)] objects = [p.loadURDF("teddy_vhacd.urdf", 1.050000,-0.500000,0.700000,0.000000,0.000000,0.707107,0.707107)] objects = [p.loadURDF("cube_small.urdf", 0.950000,-0.100000,0.700000,0.000000,0.000000,0.707107,0.707107)] objects = [p.loadURDF("sphere_small.urdf", 0.850000,-0.400000,0.700000,0.000000,0.000000,0.707107,0.707107)] objects = [p.loadURDF("duck_vhacd.urdf", 0.850000,-0.400000,0.900000,0.000000,0.000000,0.707107,0.707107)] objects = p.loadSDF("kiva_shelf/model.sdf") ob = objects[0] p.resetBasePositionAndOrientation(ob,[0.000000,1.000000,1.204500],[0.000000,0.000000,0.000000,1.000000]) objects = [p.loadURDF("teddy_vhacd.urdf", -0.100000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)] objects = [p.loadURDF("sphere_small.urdf", -0.100000,0.955006,1.169706,0.633232,-0.000000,-0.000000,0.773962)] objects = [p.loadURDF("cube_small.urdf", 0.300000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)] objects = [p.loadURDF("table_square/table_square.urdf", -1.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)] ob = objects[0] jointPositions=[ 0.000000 ] for jointIndex in range (p.getNumJoints(ob)): p.resetJointState(ob,jointIndex,jointPositions[jointIndex]) objects = [p.loadURDF("husky/husky.urdf", 2.000000,-5.000000,1.000000,0.000000,0.000000,0.000000,1.000000)] ob = objects[0] jointPositions=[ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 ] for jointIndex in range (p.getNumJoints(ob)): p.resetJointState(ob,jointIndex,jointPositions[jointIndex]) p.setGravity(0.000000,0.000000,0.000000) p.setGravity(0,0,-10) p.stepSimulation() p.disconnect()
[ 11748, 12972, 15065, 1616, 355, 279, 198, 2, 79, 13, 8443, 7, 79, 13, 52, 6322, 553, 17477, 13, 14656, 13, 4521, 13, 3064, 4943, 198, 79, 13, 8443, 7, 79, 13, 9693, 1503, 1961, 62, 44, 3620, 15513, 8, 198, 79, 13, 42503, 8890, ...
2.170857
2,148
def longest_common_prefix(s1: str, s2: str) -> str: """ Finds the longest common prefix (substring) given two strings s1: First string to compare s2: Second string to compare Returns: Longest common prefix between s1 and s2 >>> longest_common_prefix("ACTA", "GCCT") '' >>> longest_common_prefix("ACTA", "ACT") 'ACT' >>> longest_common_prefix("ACT", "ACTA") 'ACT' >>> longest_common_prefix("GATA", "GAAT") 'GA' >>> longest_common_prefix("ATGA", "") '' >>> longest_common_prefix("", "GCCT") '' >>> longest_common_prefix("GCCT", "GCCT") 'GCCT' """ i = 0 while i < min(len(s1), len(s2)): if s1[i] != s2[i]: break i += 1 return s1[:i] def longest_common_suffix(s1: str, s2: str) -> str: """ Finds the longest common suffix (substring) given two strings s1: First string to compare s2: Second string to compare Returns: Longest common suffix between s1 and s2 >>> longest_common_suffix("ACTA", "GCCT") '' >>> longest_common_suffix("ACTA", "CTA") 'CTA' >>> longest_common_suffix("CTA", "ACTA") 'CTA' >>> longest_common_suffix("GATAT", "GAATAT") 'ATAT' >>> longest_common_suffix("ACTA", "") '' >>> longest_common_suffix("", "GCCT") '' >>> longest_common_suffix("GCCT", "GCCT") 'GCCT' """ return longest_common_prefix(s1[::-1], s2[::-1])[::-1] def find_hamming_distance(s1: str, s2: str) -> int: """Compute the Hamming distance between two strings of equal length >>> find_hamming_distance("ATG", "ATC") 1 >>> find_hamming_distance("ATG", "TGA") 3 >>> find_hamming_distance("A", "A") 0 >>> find_hamming_distance("ATG", "ATG") 0 >>> find_hamming_distance("", "") 0 >>> find_hamming_distance("GAGGTAGCGGCGTTTAAC", "GTGGTAACGGGGTTTAAC") 3 """ assert len(s1) == len(s2) return sum(1 for i in range(len(s1)) if s1[i] != s2[i]) def find_levenshtein_distance(s1: str, s2: str) -> int: """Compute the Levenshtein distance between two strings (i.e., minimum number of edits including substitution, insertion and deletion needed in a string to turn it into another) >>> find_levenshtein_distance("AT", "") 2 >>> find_levenshtein_distance("AT", "ATC") 1 >>> find_levenshtein_distance("ATG", "ATC") 1 >>> find_levenshtein_distance("ATG", "TGA") 2 >>> find_levenshtein_distance("ATG", "ATG") 0 >>> find_levenshtein_distance("", "") 0 >>> find_levenshtein_distance("GAGGTAGCGGCGTTTAAC", "GTGGTAACGGGGTTTAAC") 3 >>> find_levenshtein_distance("TGGCCGCGCAAAAACAGC", "TGACCGCGCAAAACAGC") 2 >>> find_levenshtein_distance("GCGTATGCGGCTAACGC", "GCTATGCGGCTATACGC") 2 """ # initializing a matrix for with `len(s1) + 1` rows and `len(s2) + 1` columns D = [[0 for x in range(len(s2) + 1)] for y in range(len(s1) + 1)] # fill first column for i in range(len(s1) + 1): D[i][0] = i # fill first row for j in range(len(s2) + 1): D[0][j] = j # fill rest of the matrix for i in range(1, len(s1) + 1): for j in range(1, len(s2) + 1): distance_left = D[i][j - 1] + 1 # deletion in pattern distance_above = D[i - 1][j] + 1 # insertion in pattern distance_diagonal = D[i - 1][j - 1] + ( s1[i - 1] != s2[j - 1] ) # substitution D[i][j] = min(distance_left, distance_above, distance_diagonal) # return the last value (i.e., right most bottom value) return D[-1][-1]
[ 4299, 14069, 62, 11321, 62, 40290, 7, 82, 16, 25, 965, 11, 264, 17, 25, 965, 8, 4613, 965, 25, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 9938, 82, 262, 14069, 2219, 21231, 357, 7266, 8841, 8, 1813, 734, 13042, 628, 220, 220...
2.216758
1,647
from __future__ import annotations from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING from datetime import date, datetime import pandas as pd import numpy as np import re import locale try: locale.setlocale(locale.LC_ALL, "en_US.UTF-8") except locale.Error: # Readthedocs has a problem, but difficult to replicate locale.setlocale(locale.LC_ALL, "") from . import CoreScript from ..models import ColumnModel from ..types import MimeType if TYPE_CHECKING: from ..schema import Schema from ..models import DataSourceModel
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 6738, 19720, 1330, 32233, 11, 360, 713, 11, 7343, 11, 4479, 11, 5994, 11, 41876, 62, 50084, 2751, 198, 6738, 4818, 8079, 1330, 3128, 11, 4818, 8079, 198, 11748, 19798, 292, 355, 279, 67, 198,...
3.134078
179
#!/usr/bin/python3 if __name__ == "__main__": import sys args(sys.argv)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1330, 25064, 198, 220, 220, 220, 26498, 7, 17597, 13, 853, 85, 8, 198 ]
2.131579
38
""" These modules contain sub-modules related to defining various profiles in a model """
[ 37811, 198, 4711, 13103, 3994, 850, 12, 18170, 3519, 284, 16215, 2972, 16545, 287, 257, 2746, 198, 37811 ]
4.944444
18
#!/usr/bin/env python3 import re lines = get_input() count = 0 for line in lines: lower, upper, char, password = re.split(r'-|: | ', line) lower, upper = int(lower) - 1, int(upper) - 1 try: if (password[lower] == char) ^ (password[upper] == char): count += 1 except: # don't care about boundaries pass print(count)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 302, 628, 198, 198, 6615, 796, 651, 62, 15414, 3419, 198, 9127, 796, 657, 198, 198, 1640, 1627, 287, 3951, 25, 198, 220, 220, 220, 2793, 11, 6727, 11, 1149, 11, 920...
2.31677
161
from __future__ import print_function import json from os.path import join, dirname from watson_developer_cloud import ToneAnalyzerV3 from watson_developer_cloud.tone_analyzer_v3 import ToneInput from pprint import pprint # If service instance provides API key authentication # service = ToneAnalyzerV3( # ## url is optional, and defaults to the URL below. Use the correct URL for your region. # url='https://gateway.watsonplatform.net/tone-analyzer/api', # version='2017-09-21', # iam_apikey='your_apikey') service = ToneAnalyzerV3( ## url is optional, and defaults to the URL below. Use the correct URL for your region. # url='https://gateway.watsonplatform.net/tone-analyzer/api', username='f0ec47cc-5191-4421-8fca-2395917e1640', password='q7JOpjOabiY5', version='2017-09-21') # print("\ntone_chat() example 1:\n") # utterances = [{ # 'text': 'I am very happy.', # 'user': 'glenn' # }, { # 'text': 'It is a good day.', # 'user': 'glenn' # }] # tone_chat = service.tone_chat(utterances).get_result() # print(json.dumps(tone_chat, indent=2)) # print("\ntone() example 1:\n") # print( # json.dumps( # service.tone( # tone_input='I am very happy. It is a good day.', # content_type="text/plain").get_result(), # indent=2)) # print("\ntone() example 2:\n") # with open(join(dirname(__file__), # '../resources/tone-example.json')) as tone_json: # tone = service.tone(json.load(tone_json)['text'], "text/plain").get_result() # print(json.dumps(tone, indent=2)) # print("\ntone() example 3:\n") # with open(join(dirname(__file__), # '../resources/tone-example.json')) as tone_json: # tone = service.tone( # tone_input=json.load(tone_json)['text'], # content_type='text/plain', # sentences=True).get_result() # print(json.dumps(tone, indent=2)) # print("\ntone() example 4:\n") # with open(join(dirname(__file__), # '../resources/tone-example.json')) as tone_json: # tone = service.tone( # tone_input=json.load(tone_json), # content_type='application/json').get_result() # print(json.dumps(tone, indent=2)) # print("\ntone() example 5:\n") # with open(join(dirname(__file__), # '../resources/tone-example-html.json')) as tone_html: # tone = service.tone( # json.load(tone_html)['text'], content_type='text/html').get_result() # print(json.dumps(tone, indent=2)) # print("\ntone() example 6 with GDPR support:\n") # service.set_detailed_response(True) # with open(join(dirname(__file__), # '../resources/tone-example-html.json')) as tone_html: # tone = service.tone( # json.load(tone_html)['text'], # content_type='text/html', # headers={ # 'Custom-Header': 'custom_value' # }) # print(tone) # print(tone.get_headers()) # print(tone.get_result()) # print(tone.get_status_code()) # service.set_detailed_response(False) # print("\ntone() example 7:\n") test_tone="Hi Team, The times are difficult! Our sales have been disappointing for the past three quarters for our data analytics product suite. We have a competitive data analytics product suite in the industry. However, we are not doing a good job at selling it, and this is really frustrating.We are missing critical sales opportunities. We cannot blame the economy for our lack of execution. Our clients need analytical tools to change their current business outcomes. In fact, it is in times such as this, our clients want to get the insights they need to turn their businesses around. It is disheartening to see that we are failing at closing deals, in such a hungry market. Let's buckle up and execute.Jennifer BakerSales Leader, North-East region" tone_input = ToneInput(test_tone) result = service.tone(tone_input=tone_input, content_type="application/json").get_result() # print(type(json.dumps(tone, indent=2))) pprint(result)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 33918, 198, 6738, 28686, 13, 6978, 1330, 4654, 11, 26672, 3672, 198, 6738, 266, 13506, 62, 16244, 263, 62, 17721, 1330, 45362, 37702, 9107, 53, 18, 198, 6738, 266, 13506, 62, ...
2.649567
1,501
# -*- coding: utf-8 -*- from hcloud.core.domain import BaseDomain from hcloud.helpers.descriptors import ISODateTime
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 289, 17721, 13, 7295, 13, 27830, 1330, 7308, 43961, 198, 198, 6738, 289, 17721, 13, 16794, 364, 13, 20147, 1968, 669, 1330, 3180, 3727, 378, 7575, 628, 628, 628, ...
2.73913
46
import pytest from AutomationFramework.page_objects.interfaces.interfaces import Interfaces from AutomationFramework.tests.base_test import BaseTest
[ 11748, 12972, 9288, 198, 6738, 17406, 341, 21055, 6433, 13, 7700, 62, 48205, 13, 3849, 32186, 13, 3849, 32186, 1330, 4225, 32186, 198, 6738, 17406, 341, 21055, 6433, 13, 41989, 13, 8692, 62, 9288, 1330, 7308, 14402, 628 ]
3.947368
38
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sql
[ 2, 15069, 2321, 4946, 25896, 5693, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, ...
3.855346
159
from typing import Any, List, Union from boa3.builtin import NeoMetadata, metadata, public from boa3.builtin.contract import Nep17TransferEvent from boa3.builtin.interop.blockchain import get_contract from boa3.builtin.interop.contract import GAS, NEO, call_contract from boa3.builtin.interop.runtime import calling_script_hash, check_witness from boa3.builtin.interop.storage import delete, get, put from boa3.builtin.type import UInt160 # ------------------------------------------- # METADATA # ------------------------------------------- # ------------------------------------------- # Storage Key Prefixes # ------------------------------------------- KYC_WHITELIST_PREFIX = b'KYCWhitelistApproved' TOKEN_TOTAL_SUPPLY_PREFIX = b'TokenTotalSupply' TRANSFER_ALLOWANCE_PREFIX = b'TransferAllowancePrefix_' # ------------------------------------------- # TOKEN SETTINGS # ------------------------------------------- # Script hash of the contract owner TOKEN_OWNER = UInt160() # Symbol of the Token TOKEN_SYMBOL = 'ICO' # Number of decimal places TOKEN_DECIMALS = 8 # Initial Supply of tokens in the system TOKEN_INITIAL_SUPPLY = 10_000_000 * 100_000_000 # 10m total supply * 10^8 (decimals) # ------------------------------------------- # Events # ------------------------------------------- on_transfer = Nep17TransferEvent # ------------------------------------------- # Methods # ------------------------------------------- def is_administrator() -> bool: """ Validates if the invoker has administrative rights :return: whether the contract's invoker is an administrator """ return check_witness(TOKEN_OWNER) def is_valid_address(address: UInt160) -> bool: """ Validates if the address passed through the kyc. :return: whether the given address is validated by kyc """ return get(KYC_WHITELIST_PREFIX + address).to_int() > 0 # ------------------------------------------- # Public methods from NEP5.1 # ------------------------------------------- def post_transfer(from_address: Union[UInt160, None], to_address: Union[UInt160, None], amount: int, data: Any): """ Checks if the one receiving NEP17 tokens is a smart contract and if it's one the onPayment method will be called :param from_address: the address of the sender :type from_address: UInt160 :param to_address: the address of the receiver :type to_address: UInt160 :param amount: the amount of cryptocurrency that is being sent :type amount: int :param data: any pertinent data that might validate the transaction :type data: Any """ if not isinstance(to_address, None): # TODO: change to 'is not None' when `is` semantic is implemented contract = get_contract(to_address) if not isinstance(contract, None): # TODO: change to 'is not None' when `is` semantic is implemented call_contract(to_address, 'onPayment', [from_address, amount, data]) # ------------------------------------------- # Public methods from KYC # -------------------------------------------
[ 6738, 19720, 1330, 4377, 11, 7343, 11, 4479, 198, 198, 6738, 1489, 64, 18, 13, 18780, 259, 1330, 21227, 9171, 14706, 11, 20150, 11, 1171, 198, 6738, 1489, 64, 18, 13, 18780, 259, 13, 28484, 1330, 15310, 1558, 43260, 9237, 198, 6738, ...
3.444939
899
import os # Restrict the script to run on CPU os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "" # Import Keras Tensoflow Backend # from keras import backend as K import tensorflow as tf # Configure it to use only specific CPU Cores config = tf.ConfigProto(intra_op_parallelism_threads=4, inter_op_parallelism_threads=4, device_count={"CPU": 1, "GPU": 0}, allow_soft_placement=True) # import tensorflow as tf import numpy as np from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator from sklearn.metrics import confusion_matrix from models_AC import SentenceModel import json import os
[ 11748, 28686, 198, 2, 37163, 262, 4226, 284, 1057, 319, 9135, 198, 418, 13, 268, 2268, 14631, 43633, 5631, 62, 7206, 27389, 62, 12532, 1137, 8973, 796, 366, 5662, 40, 62, 45346, 62, 2389, 1, 198, 418, 13, 268, 2268, 14692, 43633, 56...
2.465035
286
import discord from redbot.core.bot import Red from redbot.core.commands import commands from redbot.core.utils.chat_formatting import humanize_list from .utils import permcheck, rpccheck
[ 11748, 36446, 201, 198, 6738, 2266, 13645, 13, 7295, 13, 13645, 1330, 2297, 201, 198, 6738, 2266, 13645, 13, 7295, 13, 9503, 1746, 1330, 9729, 201, 198, 6738, 2266, 13645, 13, 7295, 13, 26791, 13, 17006, 62, 18982, 889, 1330, 1692, 10...
3.078125
64
import torch DEVICE = 'cuda' import math import torch.optim as optim from model import * import os import copy, gzip, pickle, time data_dir = './drive/MyDrive/music_classification/Data' classes = os.listdir(data_dir+'/images_original')
[ 11748, 28034, 198, 7206, 27389, 796, 705, 66, 15339, 6, 198, 11748, 10688, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 6738, 2746, 1330, 1635, 198, 11748, 28686, 198, 11748, 4866, 11, 308, 13344, 11, 2298, 293, 11, 640, 198, 7890, 6...
3.105263
76
# Copyright 2021 cstsunfu. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """schedulers""" import importlib import os from dlk.utils.register import Register from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR import math scheduler_config_register = Register("Schedule config register.") scheduler_register = Register("Schedule register.") # automatically import any Python files in the schedulers directory schedulers_dir = os.path.dirname(__file__) import_schedulers(schedulers_dir, "dlk.core.schedulers")
[ 2, 15069, 33448, 269, 6448, 403, 20942, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, ...
3.579125
297
# Copyright 2017 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for projectq.backends._sim._simulator.py, using both the Python and the C++ simulator as backends. """ import copy import math import cmath import numpy import pytest import random import scipy import scipy.sparse import scipy.sparse.linalg from projectq import MainEngine from projectq.cengines import (BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError) from projectq.ops import (All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz) from projectq.libs.math import (AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN) from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag from projectq.types import WeakQubitRef from projectq.backends import Simulator tolerance = 1e-6 class Mock1QubitGate(BasicGate): def test_simulator_is_available(sim): backend = DummyEngine(save_commands=True) eng = MainEngine(backend, []) qubit = eng.allocate_qubit() Measure | qubit qubit[0].__del__() assert len(backend.received_commands) == 3 # Test that allocate, measure, basic math, and deallocate are available. for cmd in backend.received_commands: assert sim.is_available(cmd) new_cmd = backend.received_commands[-1] new_cmd.gate = Mock6QubitGate() assert not sim.is_available(new_cmd) new_cmd.gate = MockNoMatrixGate() assert not sim.is_available(new_cmd) new_cmd.gate = Mock1QubitGate() assert sim.is_available(new_cmd) new_cmd = backend.received_commands[-2] assert len(new_cmd.qubits) == 1 new_cmd.gate = AddConstantModN(1, 2) assert sim.is_available(new_cmd) new_cmd.gate = MultiplyByConstantModN(1, 2) assert sim.is_available(new_cmd) #new_cmd.gate = DivideByConstantModN(1, 2) #assert sim.is_available(new_cmd) def test_simulator_cheat(sim): # cheat function should return a tuple assert isinstance(sim.cheat(), tuple) # first entry is the qubit mapping. # should be empty: assert len(sim.cheat()[0]) == 0 # state vector should only have 1 entry: assert len(sim.cheat()[1]) == 1 eng = MainEngine(sim, []) qubit = eng.allocate_qubit() # one qubit has been allocated assert len(sim.cheat()[0]) == 1 assert sim.cheat()[0][0] == 0 assert len(sim.cheat()[1]) == 2 assert 1. == pytest.approx(abs(sim.cheat()[1][0])) qubit[0].__del__() # should be empty: assert len(sim.cheat()[0]) == 0 # state vector should only have 1 entry: assert len(sim.cheat()[1]) == 1 def test_simulator_functional_measurement(sim): eng = MainEngine(sim, []) qubits = eng.allocate_qureg(5) # entangle all qubits: H | qubits[0] for qb in qubits[1:]: CNOT | (qubits[0], qb) All(Measure) | qubits bit_value_sum = sum([int(qubit) for qubit in qubits]) assert bit_value_sum == 0 or bit_value_sum == 5 def test_simulator_measure_mapped_qubit(sim): eng = MainEngine(sim, []) qb1 = WeakQubitRef(engine=eng, idx=1) qb2 = WeakQubitRef(engine=eng, idx=2) cmd0 = Command(engine=eng, gate=Allocate, qubits=([qb1],)) cmd1 = Command(engine=eng, gate=X, qubits=([qb1],)) cmd2 = Command(engine=eng, gate=Measure, qubits=([qb1],), controls=[], tags=[LogicalQubitIDTag(2)]) with pytest.raises(NotYetMeasuredError): int(qb1) with pytest.raises(NotYetMeasuredError): int(qb2) eng.send([cmd0, cmd1, cmd2]) eng.flush() with pytest.raises(NotYetMeasuredError): int(qb1) assert int(qb2) == 1 def test_simulator_kqubit_exception(sim): m1 = Rx(0.3).matrix m2 = Rx(0.8).matrix m3 = Ry(0.1).matrix m4 = Rz(0.9).matrix.dot(Ry(-0.1).matrix) m = numpy.kron(m4, numpy.kron(m3, numpy.kron(m2, m1))) eng = MainEngine(sim, []) qureg = eng.allocate_qureg(3) with pytest.raises(Exception): KQubitGate() | qureg with pytest.raises(Exception): H | qureg def test_simulator_swap(sim): eng = MainEngine(sim, []) qubits1 = eng.allocate_qureg(1) qubits2 = eng.allocate_qureg(1) X | qubits1 Swap | (qubits1, qubits2) All(Measure) | qubits1 All(Measure) | qubits2 assert (int(qubits1[0]) == 0) and (int(qubits2[0]) == 1) SqrtSwap | (qubits1, qubits2) SqrtSwap | (qubits1, qubits2) All(Measure) | qubits1 All(Measure) | qubits2 assert (int(qubits1[0]) == 1) and (int(qubits2[0]) == 0) def test_simulator_math(sim): eng = MainEngine(sim, []) qubits = eng.allocate_qureg(8) AddConstant(1) | qubits; All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 1 AddConstantModN(10, 256) | qubits; All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 11 controls = eng.allocate_qureg(1) # Control is off C(AddConstantModN(10, 256)) | (controls, qubits) All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 11 # Turn control on X | controls C(AddConstantModN(10, 256)) | (controls, qubits) All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 21 SubConstant(5) | qubits; All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 16 C(SubConstantModN(10, 256)) | (controls, qubits) All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 6 # Turn control off X | controls C(SubConstantModN(10, 256)) | (controls, qubits) All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 6 MultiplyByConstantModN(2, 256) | qubits; All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 12 # Control is off C(MultiplyByConstantModN(2, 256)) | (controls, qubits) All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 12 # Turn control on X | controls C(MultiplyByConstantModN(10, 256)) | (controls, qubits) All(Measure) | qubits value = 0 for i in range(len(qubits)): value += int(qubits[i]) << i assert value == 120 def test_simulator_probability(sim, mapper): engine_list = [LocalOptimizer()] if mapper is not None: engine_list.append(mapper) eng = MainEngine(sim, engine_list=engine_list) qubits = eng.allocate_qureg(6) All(H) | qubits eng.flush() bits = [0, 0, 1, 0, 1, 0] for i in range(6): assert (eng.backend.get_probability(bits[:i], qubits[:i]) == pytest.approx(0.5**i)) extra_qubit = eng.allocate_qubit() with pytest.raises(RuntimeError): eng.backend.get_probability([0], extra_qubit) del extra_qubit All(H) | qubits Ry(2 * math.acos(math.sqrt(0.3))) | qubits[0] eng.flush() assert eng.backend.get_probability([0], [qubits[0]]) == pytest.approx(0.3) Ry(2 * math.acos(math.sqrt(0.4))) | qubits[2] eng.flush() assert eng.backend.get_probability([0], [qubits[2]]) == pytest.approx(0.4) assert (numpy.isclose(0.12, eng.backend.get_probability([0, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance)) assert (numpy.isclose(0.18, eng.backend.get_probability([0, 1], qubits[:3:2]), rtol=tolerance, atol=tolerance)) assert (numpy.isclose(0.28, eng.backend.get_probability([1, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance)) All(Measure) | qubits def test_simulator_amplitude(sim, mapper): engine_list = [LocalOptimizer()] if mapper is not None: engine_list.append(mapper) eng = MainEngine(sim, engine_list=engine_list) qubits = eng.allocate_qureg(6) All(X) | qubits All(H) | qubits eng.flush() bits = [0, 0, 1, 0, 1, 0] polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits)) while polPhi < 0: polPhi += 2 * math.pi assert polR == pytest.approx(1. / 8.) bits = [0, 0, 0, 0, 1, 0] polR2, polPhi2 = cmath.polar(eng.backend.get_amplitude(bits, qubits)) while polPhi2 < math.pi: polPhi2 += 2 * math.pi assert polR2 == pytest.approx(polR) assert (polPhi2 - math.pi) == pytest.approx(polPhi) bits = [0, 1, 1, 0, 1, 0] polR3, polPhi3 = cmath.polar(eng.backend.get_amplitude(bits, qubits)) while polPhi3 < math.pi: polPhi3 += 2 * math.pi assert polR3 == pytest.approx(polR) assert (polPhi3 - math.pi) == pytest.approx(polPhi) All(H) | qubits All(X) | qubits Ry(2 * math.acos(0.3)) | qubits[0] eng.flush() bits = [0] * 6 polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits)) assert polR == pytest.approx(0.3) bits[0] = 1 polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits)) assert (polR == pytest.approx(math.sqrt(0.91))) All(Measure) | qubits # raises if not all qubits are in the list: with pytest.raises(RuntimeError): eng.backend.get_amplitude(bits, qubits[:-1]) # doesn't just check for length: with pytest.raises(RuntimeError): eng.backend.get_amplitude(bits, qubits[:-1] + [qubits[0]]) extra_qubit = eng.allocate_qubit() eng.flush() # there is a new qubit now! with pytest.raises(RuntimeError): eng.backend.get_amplitude(bits, qubits) def test_simulator_set_wavefunction(sim, mapper): engine_list = [LocalOptimizer()] if mapper is not None: engine_list.append(mapper) eng = MainEngine(sim, engine_list=engine_list) qubits = eng.allocate_qureg(2) wf = [0., 0., math.sqrt(0.2), math.sqrt(0.8)] with pytest.raises(RuntimeError): eng.backend.set_wavefunction(wf, qubits) eng.flush() eng.backend.set_wavefunction(wf, qubits) assert pytest.approx(eng.backend.get_probability('1', [qubits[0]])) == .8 assert pytest.approx(eng.backend.get_probability('01', qubits)) == .2 assert pytest.approx(eng.backend.get_probability('1', [qubits[1]])) == 1. All(Measure) | qubits def test_simulator_set_wavefunction_always_complex(sim): """ Checks that wavefunction is always complex """ eng = MainEngine(sim) qubit = eng.allocate_qubit() eng.flush() wf = [1., 0] eng.backend.set_wavefunction(wf, qubit) Y | qubit eng.flush() amplitude = eng.backend.get_amplitude('1', qubit) assert amplitude == pytest.approx(1j) or amplitude == pytest.approx(-1j) def test_simulator_collapse_wavefunction(sim, mapper): engine_list = [LocalOptimizer()] if mapper is not None: engine_list.append(mapper) eng = MainEngine(sim, engine_list=engine_list) qubits = eng.allocate_qureg(4) # unknown qubits: raises with pytest.raises(RuntimeError): eng.backend.collapse_wavefunction(qubits, [0] * 4) eng.flush() eng.backend.collapse_wavefunction(qubits, [0] * 4) assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == 1. All(H) | qubits[1:] eng.flush() assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == .125 # impossible outcome: raises with pytest.raises(RuntimeError): eng.backend.collapse_wavefunction(qubits, [1] + [0] * 3) eng.backend.collapse_wavefunction(qubits[:-1], [0, 1, 0]) probability = eng.backend.get_probability([0, 1, 0, 1], qubits) assert probability == pytest.approx(.5) eng.backend.set_wavefunction([1.] + [0.] * 15, qubits) H | qubits[0] CNOT | (qubits[0], qubits[1]) eng.flush() eng.backend.collapse_wavefunction([qubits[0]], [1]) probability = eng.backend.get_probability([1, 1], qubits[0:2]) assert probability == pytest.approx(1.) def test_simulator_no_uncompute_exception(sim): eng = MainEngine(sim, []) qubit = eng.allocate_qubit() H | qubit with pytest.raises(RuntimeError): qubit[0].__del__() # If you wanted to keep using the qubit, you shouldn't have deleted it. assert qubit[0].id == -1 def test_simulator_functional_entangle(sim): eng = MainEngine(sim, []) qubits = eng.allocate_qureg(5) # entangle all qubits: H | qubits[0] for qb in qubits[1:]: CNOT | (qubits[0], qb) # check the state vector: assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance) assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance) for i in range(1, 31): assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance) # unentangle all except the first 2 for qb in qubits[2:]: CNOT | (qubits[0], qb) # entangle using Toffolis for qb in qubits[2:]: Toffoli | (qubits[0], qubits[1], qb) # check the state vector: assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance) assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance) for i in range(1, 31): assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance) # uncompute using multi-controlled NOTs with Control(eng, qubits[0:-1]): X | qubits[-1] with Control(eng, qubits[0:-2]): X | qubits[-2] with Control(eng, qubits[0:-3]): X | qubits[-3] CNOT | (qubits[0], qubits[1]) H | qubits[0] # check the state vector: assert 1. == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance) for i in range(1, 32): assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance) All(Measure) | qubits def test_simulator_convert_logical_to_mapped_qubits(sim): mapper = BasicMapperEngine() mapper.receive = receive eng = MainEngine(sim, [mapper]) qubit0 = eng.allocate_qubit() qubit1 = eng.allocate_qubit() mapper.current_mapping = {qubit0[0].id: qubit1[0].id, qubit1[0].id: qubit0[0].id} assert (sim._convert_logical_to_mapped_qureg(qubit0 + qubit1) == qubit1 + qubit0) def slow_implementation(angles, control_qubits, target_qubit, eng, gate_class): """ Assumption is that control_qubits[0] is lowest order bit We apply angles[0] to state |0> """ assert len(angles) == 2**len(control_qubits) for index in range(2**len(control_qubits)): with Compute(eng): for bit_pos in range(len(control_qubits)): if not (index >> bit_pos) & 1: X | control_qubits[bit_pos] with Control(eng, control_qubits): gate_class(angles[index]) | target_qubit Uncompute(eng)
[ 2, 220, 220, 15069, 2177, 4935, 48, 12, 21055, 6433, 357, 2503, 13, 16302, 80, 13, 354, 8, 198, 2, 198, 2, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 220, 220, 345, 743...
2.249788
7,090
""" app.deps ======== Register dependencies that are not part of a ``Flask`` extension. """ from flask import Flask from redis import Redis from rq import Queue def init_app(app: Flask) -> None: """Register application helpers that are not ``Flask-`` extensions. As these are not ``Flask`` extensions they do not have an ``init_app`` method, and so can be attached to the app by declaring them as instance attributes. .. todo:: These are not declared in ``__init__`` and are a bit of a code-smell. Using ``flask.g`` may be more appropriate... :param app: Application factory object. """ app.redis = Redis.from_url(app.config["REDIS_URL"]) # type: ignore app.task_queue = Queue("jss-tasks", connection=app.redis) # type: ignore
[ 37811, 198, 1324, 13, 10378, 82, 198, 2559, 198, 198, 38804, 20086, 326, 389, 407, 636, 286, 257, 7559, 7414, 2093, 15506, 7552, 13, 198, 37811, 198, 6738, 42903, 1330, 46947, 198, 6738, 2266, 271, 1330, 2297, 271, 198, 6738, 374, 80,...
3.066929
254
from util.args import * from util.logger import Logger
[ 6738, 7736, 13, 22046, 1330, 1635, 198, 6738, 7736, 13, 6404, 1362, 1330, 5972, 1362, 198 ]
3.4375
16
# from wx.lib.pubsub import pub from pubsub import pub import serial import threading import queue import time
[ 2, 422, 266, 87, 13, 8019, 13, 12984, 7266, 1330, 2240, 198, 6738, 2240, 7266, 1330, 2240, 198, 11748, 11389, 198, 11748, 4704, 278, 198, 11748, 16834, 198, 11748, 640, 628 ]
3.612903
31
from flask import Flask from datadog import statsd import logging import os # This is a small example application # It uses tracing and dogstatsd on a sample flask application log = logging.getLogger("app") app = Flask(__name__) # The app has two routes, a basic endpoint and an exception endpoint # This is meant to be run directly, instead of executed through flask run if __name__ == '__main__': # It grabs the host and port from the environment port = 5001 host = '0.0.0.0' if os.environ.get('HOST'): host = os.environ.get('HOST') if os.environ.get('PORT'): port = os.environ.get('PORT') app.run(debug=True, host=host, port=port)
[ 6738, 42903, 1330, 46947, 198, 6738, 4818, 324, 519, 1330, 9756, 67, 198, 11748, 18931, 198, 198, 11748, 28686, 198, 198, 2, 770, 318, 257, 1402, 1672, 3586, 198, 2, 632, 3544, 35328, 290, 3290, 34242, 67, 319, 257, 6291, 42903, 3586,...
2.881356
236
# Lets create a linked list that has the following elements ''' 1. FE 2. SE 3. TE 4. BE ''' # Creating a Node class to create individual Nodes number_list= LinkedList() number_list.add("FE") number_list.add("SE") number_list.add("TE") number_list.add("BE")
[ 2, 38257, 2251, 257, 6692, 1351, 326, 468, 262, 1708, 4847, 220, 201, 198, 7061, 6, 201, 198, 16, 13, 18630, 201, 198, 17, 13, 7946, 201, 198, 18, 13, 13368, 201, 198, 19, 13, 9348, 201, 198, 7061, 6, 201, 198, 201, 198, 2, 30...
2.346774
124
# Copyright (c) MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from monai.utils import optional_import Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange")
[ 2, 15069, 357, 66, 8, 25000, 20185, 42727, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743...
3.693878
196
import pytest from opentrons.commands import protocol_commands def test_delay_with_message(): """It should allow a message to be appended to the delay text.""" command = protocol_commands.delay(seconds=1, minutes=1, msg="Waiting...") assert command["payload"]["text"] == ( "Delaying for 1 minutes and 1.0 seconds. Waiting..." )
[ 11748, 12972, 9288, 198, 6738, 1034, 298, 12212, 13, 9503, 1746, 1330, 8435, 62, 9503, 1746, 628, 198, 198, 4299, 1332, 62, 40850, 62, 4480, 62, 20500, 33529, 198, 220, 220, 220, 37227, 1026, 815, 1249, 257, 3275, 284, 307, 598, 1631,...
2.991597
119
from unittest.mock import patch import pytest from just_bin_it.endpoints.sources import HistogramSource from tests.doubles.consumer import StubConsumer TEST_MESSAGE = b"this is a byte message" INVALID_FB = b"this is an invalid fb message"
[ 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 655, 62, 8800, 62, 270, 13, 437, 13033, 13, 82, 2203, 1330, 5590, 21857, 7416, 198, 6738, 5254, 13, 67, 280, 7689, 13, 49827, 1330, 41135, ...
3.0375
80
import getopt import sys from libcloud.compute.types import NodeState from lc import get_lc from printer import Printer def lister_main(what, resource=None, extension=False, supports_location=False, **kwargs): """Shortcut for main() routine for lister tools, e.g. lc-SOMETHING-list @param what: what we are listing, e.g. 'nodes' @param extension: is it an extension of core libcloud functionality? @param kwargs: additional arguments for the call @type what: C{string} @param supports_location: tells that objects we listing could be filtered by location @type supports_location: C{bool} """ list_method = "%slist_%s" % ({True: 'ex_', False: ''}[extension], what) profile = "default" format = location = None options = "f:p:" if supports_location: options += "l:" try: opts, args = getopt.getopt(sys.argv[1:], options) except getopt.GetoptError, err: sys.stderr.write("%s\n" % str(err)) sys.exit(1) for o, a in opts: if o == "-f": format = a if o == "-p": profile = a if o == "-l": location = a try: conn = get_lc(profile, resource=resource) list_kwargs = kwargs if supports_location and location is not None: nodelocation = filter(lambda loc: str(loc.id) == location, conn.list_locations())[0] list_kwargs["location"] = nodelocation for node in getattr(conn, list_method)(**list_kwargs): Printer.do(node, format) except Exception, err: sys.stderr.write("Error: %s\n" % str(err)) def save_image_main(): """Shortcut for main() routine for provider specific image save tools. """ profile = 'default' name = node_id = None try: opts, args = getopt.getopt(sys.argv[1:], "i:n:p:") except getopt.GetoptError, err: sys.stderr.write("%s\n" % str(err)) sys.exit(1) for o, a in opts: if o == "-i": node_id = a if o == "-n": name = a if o == "-p": profile = a if node_id is None or name is None: usage(sys.argv[0]) sys.exit(1) conn = get_lc(profile) node = get_node_or_fail(conn, node_id, print_error_and_exit, ("Error: cannot find node with id '%s'." % node_id,)) Printer.do(conn.ex_save_image(node, name)) def get_node_or_fail(conn, node_id, coroutine=None, cargs=(), ckwargs={}): """Shortcut to get a single node by its id. In case when such node could not be found, coroutine could be called to handle such case. Typically coroutine will output an error message and exit from application. @param conn: libcloud connection handle @param node_id: id of the node to search for @param coroutine: a callable object to handle case when node cannot be found @param cargs: positional arguments for coroutine @param kwargs: keyword arguments for coroutine @return: node object if found, None otherwise""" try: node = [node for node in conn.list_nodes() if str(node.id) == str(node_id)][0] return node except IndexError: if callable(coroutine): coroutine(*cargs, **ckwargs) return None
[ 11748, 651, 8738, 198, 11748, 25064, 198, 198, 6738, 9195, 17721, 13, 5589, 1133, 13, 19199, 1330, 19081, 9012, 198, 198, 6738, 300, 66, 1330, 651, 62, 44601, 198, 6738, 20632, 1330, 1736, 3849, 628, 198, 4299, 300, 1694, 62, 12417, 7...
2.345696
1,429
# -*- coding: utf-8 -*- '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2020, Caleb Bell <Caleb.Andrew.Bell@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' import pytest from fluids.core import C2K import thermo from chemicals.utils import * from thermo import * from fluids.numerics import * from math import * import json import os import numpy as np
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 41829, 605, 14044, 8495, 10074, 357, 1925, 1961, 43, 737, 41086, 329, 1429, 21128, 13, 198, 15269, 357, 34, 8, 12131, 11, 40777, 7459, 1279, 34, 32100, 13, 20...
3.720745
376
ten_things = "Apples Oranges cows Telephone Light Sugar" print ("Wait there are not 10 things in that list. Let's fix") stuff = ten_things.split(' ') more_stuff = {"Day", "Night", "Song", "Firebee", "Corn", "Banana", "Girl", "Boy"} while len(stuff) !=10: next_one = more_stuff.pop() print("Adding: ", next_one) stuff.append(next_one) print (f"There are {len(stuff)} items n ow.") print ("There we go : ", stuff) print ("Let's do some things with stuff.") print (stuff[1]) print (stuff[-1]) # whoa! cool! print (stuff.pop()) print (' '.join(stuff)) # what? cool ! print ('#'.join(stuff[3:5])) #super stealler!
[ 1452, 62, 27971, 796, 366, 4677, 829, 1471, 6231, 22575, 44735, 4401, 20874, 1, 198, 198, 4798, 5855, 21321, 612, 389, 407, 838, 1243, 287, 326, 1351, 13, 3914, 338, 4259, 4943, 198, 198, 41094, 796, 3478, 62, 27971, 13, 35312, 10786,...
2.748899
227
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *
[ 2, 15069, 2211, 12, 1238, 1828, 13914, 45036, 3549, 2351, 4765, 11, 11419, 290, 584, 198, 2, 1338, 441, 4935, 34152, 13, 4091, 262, 1353, 12, 5715, 27975, 38162, 9947, 2393, 329, 3307, 13, 198, 2, 198, 2, 30628, 55, 12, 34156, 12, ...
3.421875
64
#!/usr/bin/env python """ Copyright 2010-2019 University Of Southern California Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import division, print_function # Import Python modules import os import sys import math import shutil # Import Broadband modules import plot_srf import bband_utils from irikura_gen_srf_cfg import IrikuraGenSrfCfg from install_cfg import InstallCfg if __name__ == "__main__": print("Testing Module: %s" % os.path.basename((sys.argv[0]))) ME = IrikuraGenSrf(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sim_id=int(sys.argv[5])) ME.run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 198, 15269, 3050, 12, 23344, 2059, 3226, 8050, 3442, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 7...
3.089636
357
"""test_models.py: runs tests on the models for digit.""" import pytest from core.models import (Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException, ) from django.test import TestCase from django.contrib.auth.models import User
[ 37811, 9288, 62, 27530, 13, 9078, 25, 4539, 5254, 319, 262, 4981, 329, 16839, 526, 15931, 198, 11748, 12972, 9288, 198, 6738, 4755, 13, 27530, 1330, 357, 42233, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 22...
1.630872
298