content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import googlemaps gmaps = googlemaps.Client(key='google_key')
[ 11748, 23645, 31803, 198, 198, 70, 31803, 796, 23645, 31803, 13, 11792, 7, 2539, 11639, 13297, 62, 2539, 11537, 628 ]
3.2
20
from __future__ import absolute_import from __future__ import division from __future__ import print_function # Imports import os import numpy as np import tensorflow as tf def run(model, X, Y, optimizer=None, nb_epochs=30, nb_batches=128): """ Run the estimator """ if optimizer is None: optimizer = tf.keras.estimators.SGD( lr=0.0009, decay=1e-5, momentum=0.9, nesterov=True) # 1. Compile the model model.compile( optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # 2. Create an estimator model_est = tf.keras.estimator.model_to_estimator( keras_model=model, model_dir='./lenet') # Training # 3a. Create the training function train_input_fn = tf.estimator.inputs.numpy_input_fn( x={model.input_names[0]: X['train'].astype(np.float32)}, y=Y['train'].astype(np.float32), batch_size=nb_batches, num_epochs=nb_epochs, shuffle=True ) # 3b. Train the model model_est.train(input_fn=train_input_fn, steps=nb_epochs*nb_batches) # Evaluate # 4a. Evaluate the model eval_input_fn = tf.estimator.inputs.numpy_input_fn( x={model.input_names[0]: X['test'].astype(np.float32)}, y=Y['test'].astype(np.float32), batch_size=nb_batches, num_epochs=nb_epochs, shuffle=True ) # 4b. Evaluate the model model_eval = model_est.evaluate(input_fn=eval_input_fn) print(model_eval) return model_est, model_eval def run_from_generator( model, input_func=None, input_func_dict=None, eval_func_dict=None, nb_epochs=10, optimizer=None, model_dir=None): """ Overloaded function to create an estimator using tf.data.Dataset :param model : uncompiled keras model :param input_fn : input function providing tf.data.Dataset to the estimator :param input_fn_dict : dictionary containing input params for input_fn :param eval_fn_dict : dictionary containing params for eval input_fn :param model_dir : directory to store the trained model """ # 1. Create optimizer and compile model if optimizer is None if (optimizer is None): optimizer = tf.keras.optimizers.SGD( lr=1e-3, decay=1e-5, momentum=0.9, nesterov=True) # 2. compile the model model.compile( optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # 3. create estimator dir_path = os.path.join(os.getcwd(), model_dir) print("Model path chosen : ", dir_path) if (not os.path.exists(dir_path)): os.mkdir(dir_path) print("Creating estimator...") est = tf.keras.estimator.model_to_estimator( keras_model=model, model_dir=dir_path) # 4. Train and Evaluate the model print("Training...") # training spec train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_func(input_func_dict), max_steps=500) # evaluation spec eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_func(eval_func_dict)) # Run the training model_est = tf.estimator.train_and_evaluate(est, train_spec, eval_spec) #est.train(input_fn=lambda: input_func(input_func_dict), # steps=None) # #est.evalute(input_fn=lambda: input_func(eval_func_dict)) return est
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 2, 1846, 3742, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11...
2.280106
1,503
""" Represents an app archive. This is an app at rest, whether it's a naked app bundle in a directory, or a zipped app bundle, or an IPA. We have a common interface to extract these apps to a temp file, then resign them, and create an archive of the same type """ import abc import biplist from bundle import App, Bundle, is_info_plist_native from exceptions import MissingHelpers, NotSignable, NotMatched from distutils import spawn import logging import os from os.path import abspath, dirname, exists, isdir, isfile, join, normpath import tempfile import re from subprocess import call from signer import Signer import shutil import zipfile REMOVE_WATCHKIT = True helper_paths = {} log = logging.getLogger(__name__) def get_helper(helper_name): """ find paths to executables. Cached in helper_paths """ if helper_name not in helper_paths or helper_paths[helper_name] is None: # note, find_executable returns None is not found # in other words, we keep retrying until found helper_paths[helper_name] = spawn.find_executable(helper_name) log.debug("got executable {} for {}".format(helper_paths[helper_name], helper_name)) return helper_paths[helper_name] def get_watchkit_paths(root_bundle_path): """ collect sub-bundles of this bundle that have watchkit """ # typical structure: # # app_bundle # ... # some_directory # watchkit_extension <-- this is the watchkit bundle # Info.plist # watchkit_bundle <-- this is the part that runs on the Watch # Info.plist <-- WKWatchKitApp=True # watchkit_paths = [] for path, _, _ in os.walk(root_bundle_path): if path == root_bundle_path: continue try: bundle = Bundle(path) except NotMatched: # this directory is not a bundle continue if bundle.info.get('WKWatchKitApp') is True: # get the *containing* bundle watchkit_paths.append(dirname(path)) return watchkit_paths def process_watchkit(root_bundle_path, should_remove=False): """ Unfortunately, we currently can't sign WatchKit. If you don't care about watchkit functionality, it is generally harmless to remove it, so that's the default. Remove when https://github.com/saucelabs/isign/issues/20 is fixed """ watchkit_paths = get_watchkit_paths(root_bundle_path) if len(watchkit_paths) > 0: if should_remove: for path in watchkit_paths: log.warning("Removing WatchKit bundle {}".format(path)) shutil.rmtree(path) else: raise NotSignable("Cannot yet sign WatchKit bundles") def unarchive_to_temp(self): containing_dir = make_temp_dir() log.debug("unarchiving to temp... %s -> %s", self.path, containing_dir) shutil.rmtree(containing_dir) # quirk of copytree, top dir can't exist already shutil.copytree(self.path, containing_dir) process_watchkit(containing_dir, REMOVE_WATCHKIT) return UncompressedArchive(containing_dir, '.', self.__class__) class AppZipArchive(Archive): """ Just like an app, except it's zipped up, and when repackaged, should be re-zipped. """ app_dir_pattern = r'^([^/]+\.app/).*$' extensions = ['.zip'] helpers = ['zip', 'unzip'] def __init__(self, path): self.path = path zipfile_obj = zipfile.ZipFile(path) self.relative_bundle_dir = self.find_bundle_dir(zipfile_obj) self.bundle_info = self.get_info(self.relative_bundle_dir, zipfile_obj) def unarchive_to_temp(self): containing_dir = make_temp_dir() call([get_helper('unzip'), "-qu", self.path, "-d", containing_dir]) app_dir = abspath(join(containing_dir, self.relative_bundle_dir)) process_watchkit(app_dir, REMOVE_WATCHKIT) return UncompressedArchive(containing_dir, self.relative_bundle_dir, self.__class__) def archive_factory(path): """ Guess what kind of archive we are dealing with, return an archive object. Returns None if path did not match any archive type """ archive = None for cls in [IpaArchive, AppZipArchive, AppArchive]: if cls.precheck(path): archive = cls(path) log.debug("File %s matched as %s", path, cls.__name__) break return archive def view(input_path): if not exists(input_path): raise IOError("{0} not found".format(input_path)) ua = None bundle_info = None try: archive = archive_factory(input_path) if archive is None: raise NotMatched('No matching archive type found') ua = archive.unarchive_to_temp() bundle_info = ua.bundle.info finally: if ua is not None: ua.remove() return bundle_info def resign(input_path, certificate, key, apple_cert, provisioning_profile, output_path, info_props=None, alternate_entitlements_path=None): """ Unified interface to extract any kind of archive from a temporary file, resign it with these credentials, and create a similar archive for that resigned app """ if not exists(input_path): raise IOError("{0} not found".format(input_path)) log.debug('Signing with apple_cert: {}'.format(apple_cert)) log.debug('Signing with key: {}'.format(key)) log.debug('Signing with certificate: {}'.format(certificate)) log.debug('Signing with provisioning_profile: {}'.format(provisioning_profile)) signer = Signer(signer_cert_file=certificate, signer_key_file=key, apple_cert_file=apple_cert) ua = None bundle_info = None try: archive = archive_factory(input_path) if archive is None: raise NotSignable('No matching archive type found') ua = archive.unarchive_to_temp() if info_props: # Override info.plist props of the parent bundle ua.bundle.update_info_props(info_props) ua.bundle.resign(signer, provisioning_profile, alternate_entitlements_path) bundle_info = ua.bundle.info ua.archive(output_path) except NotSignable as e: msg = "Not signable: <{0}>: {1}\n".format(input_path, e) log.info(msg) raise finally: if ua is not None: ua.remove() return bundle_info
[ 37811, 1432, 6629, 281, 598, 15424, 13, 770, 318, 281, 598, 379, 1334, 11, 1771, 340, 338, 257, 12105, 198, 220, 220, 220, 598, 18537, 287, 257, 8619, 11, 393, 257, 1976, 3949, 598, 18537, 11, 393, 281, 27966, 13, 775, 423, 257, 1...
2.378505
2,782
from conan.tools.env import Environment def runenv_from_cpp_info(conanfile, cpp_info): """ return an Environment deducing the runtime information from a cpp_info """ dyn_runenv = Environment(conanfile) if cpp_info is None: # This happens when the dependency is a private one = BINARY_SKIP return dyn_runenv if cpp_info.bin_paths: # cpp_info.exes is not defined yet dyn_runenv.prepend_path("PATH", cpp_info.bin_paths) # If it is a build_require this will be the build-os, otherwise it will be the host-os if cpp_info.lib_paths: dyn_runenv.prepend_path("LD_LIBRARY_PATH", cpp_info.lib_paths) dyn_runenv.prepend_path("DYLD_LIBRARY_PATH", cpp_info.lib_paths) if cpp_info.framework_paths: dyn_runenv.prepend_path("DYLD_FRAMEWORK_PATH", cpp_info.framework_paths) return dyn_runenv
[ 6738, 369, 272, 13, 31391, 13, 24330, 1330, 9344, 628, 198, 4299, 1057, 24330, 62, 6738, 62, 20322, 62, 10951, 7, 1102, 272, 7753, 11, 269, 381, 62, 10951, 2599, 198, 220, 220, 220, 37227, 1441, 281, 9344, 4648, 25648, 262, 19124, 1...
2.438746
351
""" ********************************************************************************** List model ********************************************************************************** """ from enum import Enum from dataclasses import dataclass from uuid import UUID from datetime import datetime
[ 37811, 198, 17174, 17174, 8412, 1174, 198, 8053, 2746, 198, 17174, 17174, 8412, 1174, 198, 37811, 198, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 334, 27112, 1330, 471, 27586, 198, ...
6.276596
47
# -*- coding: utf-8 -*- """ Automation task as a AppDaemon App for Home Assistant - current meter PEAK POWER notifications """ import datetime as dt from enum import IntEnum import appdaemon.plugins.hass.hassapi as hass LOG_LEVEL = "INFO" LOG_LEVEL_ALERT = "WARNING" LOGGER = "special_event_log" COEF_CRITICAL_LIMIT = 1.1 # 10% over limit MIN_TIME_TURN_OFF_AC = 60 # secs # Big power consumers BIG_CONSUMER_1_CLIMATE = "switch.ac_dry_contact" BIG_CONSUMER_1_LABEL = "aire acondicionado" BIG_CONSUMER_2 = "switch.calentador" BIG_CONSUMER_2_LABEL = "calentador" _IOS_SOUND_POWER_PEAK = "US-EN-Morgan-Freeman-Vacate-The-Premises.wav" # noinspection PyClassHasNoInit
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 38062, 341, 4876, 355, 257, 2034, 26531, 7966, 2034, 329, 5995, 15286, 532, 198, 14421, 16430, 18468, 10206, 40295, 19605, 198, 37811, 198, 11748, 4818, 8079, ...
2.415771
279
# -*- test-case-name: twisted.internet.test -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ This module provides support for Twisted to interact with the glib/gtk2 mainloop. In order to use this support, simply do the following:: | from twisted.internet import gtk2reactor | gtk2reactor.install() Then use twisted.internet APIs as usual. The other methods here are not intended to be called directly. When installing the reactor, you can choose whether to use the glib event loop or the GTK+ event loop which is based on it but adds GUI integration. """ # System Imports import sys, signal from zope.interface import implements try: if not hasattr(sys, 'frozen'): # Don't want to check this for py2exe import pygtk pygtk.require('2.0') except (ImportError, AttributeError): pass # maybe we're using pygtk before this hack existed. import gobject if hasattr(gobject, "threads_init"): # recent versions of python-gtk expose this. python-gtk=2.4.1 # (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping # glib-2.2.3) does not. gobject.threads_init() # Twisted Imports from twisted.python import log, runtime, failure from twisted.python.compat import set from twisted.internet.interfaces import IReactorFDSet from twisted.internet import main, base, posixbase, error, selectreactor POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL # glib's iochannel sources won't tell us about any events that we haven't # asked for, even if those events aren't sensible inputs to the poll() # call. INFLAGS = gobject.IO_IN | POLL_DISCONNECTED OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED def install(useGtk=True): """ Configure the twisted mainloop to be run inside the gtk mainloop. @param useGtk: should glib rather than GTK+ event loop be used (this will be slightly faster but does not support GUI). """ reactor = Gtk2Reactor(useGtk) from twisted.internet.main import installReactor installReactor(reactor) return reactor def portableInstall(useGtk=True): """ Configure the twisted mainloop to be run inside the gtk mainloop. """ reactor = PortableGtkReactor() from twisted.internet.main import installReactor installReactor(reactor) return reactor if runtime.platform.getType() != 'posix': install = portableInstall __all__ = ['install']
[ 2, 532, 9, 12, 1332, 12, 7442, 12, 3672, 25, 19074, 13, 37675, 13, 9288, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 40006, 24936, 46779, 13, 198, 2, 4091, 38559, 24290, 329, 3307, 13, 628, 198, 37811, 198, 1212, 8265, 3769, 1104, 32...
2.979268
820
""" Setup: - Import Libraries - Setup tf on multiple cores - Import Data """ import pandas as pd import numpy as np import tensorflow as tf import seaborn as sns from time import time import multiprocessing import random import os from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM, ConvLSTM2D, Flatten from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from joblib import dump, load from mod.prep import log_return, log_return_np, preprocess from mod.model import return_pred from mod.eval import evaluate_regression, evaluate_up_down cores = multiprocessing.cpu_count() tf.config.threading.set_inter_op_parallelism_threads(cores-1) root_folder = "data" wide_close = pd.read_csv(root_folder + "/working/wide_close.csv") wide_target = pd.read_csv(root_folder + "/working/wide_target.csv") asset_details = pd.read_csv(root_folder + "/asset_details.csv") assets = [str(i) for i in asset_details["Asset_ID"]] """ Preprocess """ close_returns = wide_close[assets].apply(log_return) close_returns["time"] = wide_close["time"] close_returns[assets] = close_returns[assets].replace([np.inf,-np.inf],np.nan) """ Linear Regression """ x_steps, y_steps = 60, [1, 15] col_in, col_out = "1", "1" train_x, test_x, train_y, test_y, time_d = preprocess(data_in = wide_close, col_in, col_out, time_col="time", x_steps, y_steps) # 1 step lr_1 = LinearRegression() lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,0,:], lr_1) evaluate_regression(true, pred) evaluate_up_down(true, pred) # 15 step lr_15 = LinearRegression() lr_15.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,1,:], lr_1) evaluate_regression(true, pred) evaluate_up_down(true, pred) """ calculate and store components seperately process: - first, get rolling values for each timestamp - then, predict 1 and 15 gaps and store in array """ # Production """ Steps: - Get train, val test and test indices. Importantly, this needs to cover all assets (even though not all assets exist) for the whole time period. - Build models """ assets = list(asset_details["Asset_ID"].astype(str)) # Get indexes i = np.select( [ (wide_close.index >= 0) & (wide_close.index <= (len(wide_close)*0.7)), (wide_close.index > (len(wide_close)*0.7)) & (wide_close.index <= (len(wide_close)*0.8)) ], ["train", "val"], default = "test") indexes = pd.DataFrame({"time":wide_close["time"], "set":i}) for a in assets: print("asset", a) filt = indexes["set"][~pd.isna(wide_close[a])] counts = filt.value_counts() df = pd.DataFrame({"counts":counts, "pct":counts/np.sum(counts)}) print(df, "\n\n") indexes_d = {} for s in indexes["set"].unique(): indexes_d[s] = indexes["time"][indexes["set"] == s] mkdir "model_files" mkdir "model_files/linear_regression" for a in assets: print("Asset", a) x_steps, y_steps = 60, [1, 16] cols_in, cols_out = a, a train_x, test_x, train_y, test_y, time_d = preprocess(wide_close, cols_in, cols_out, "time", x_steps, y_steps) # 1 step lr_1 = LinearRegression() lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,0,:], lr_1) print("Model 1 Metrics") evaluate_regression(true, pred) evaluate_up_down(true, pred) # 16 step lr_16 = LinearRegression() lr_16.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,1,:], lr_16) print("Model 16 Metrics") evaluate_regression(true, pred) evaluate_up_down(true, pred) dump(lr_1, f"model_files/linear_regression/lr_{a}_1") dump(lr_16, f"model_files/linear_regression/lr_{a}_16") dump(time_d, "model_files/linear_regression/lr_times") """ Random Forest """ rf = RandomForestRegressor(n_jobs=-1) # start = time.time() rf.fit(train_x.reshape(-1, x_steps), train_y.reshape(-1)) # print("Took:", round(start-time.time()))
[ 37811, 198, 40786, 25, 198, 220, 220, 220, 532, 17267, 46267, 198, 220, 220, 220, 532, 31122, 48700, 319, 3294, 21758, 198, 220, 220, 220, 532, 17267, 6060, 198, 37811, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152...
2.360976
1,845
# This example show how to use inline keyboards and process button presses import telebot import time from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton import os, sys from PIL import Image, ImageDraw, ImageFont import random TELEGRAM_TOKEN = '1425859530:AAF5MQE87Zg_bv3B2RLe3Vl2A5rMz6vYpsA' bot = telebot.TeleBot(TELEGRAM_TOKEN) channelId = -1001390673326 user_dict = {} bot.polling(none_stop=True)
[ 2, 770, 1672, 905, 703, 284, 779, 26098, 34512, 290, 1429, 4936, 31048, 201, 198, 11748, 5735, 13645, 201, 198, 11748, 640, 201, 198, 6738, 5735, 13645, 13, 19199, 1330, 554, 1370, 9218, 3526, 9704, 929, 11, 554, 1370, 9218, 3526, 218...
2.525714
175
from rectangle2 import rectangle_area
[ 6738, 35991, 17, 1330, 35991, 62, 20337, 198 ]
4.75
8
# -*- coding: utf-8 -*- from unittest import TestCase, TestLoader from radio import (Radio, ListenerNotFound, ReplyHandlerAlreadyBound, HandlerAlreadyBound) suite = TestLoader().loadTestsFromTestCase(TestRadioRequestReplyMethods)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 555, 715, 395, 1330, 6208, 20448, 11, 6208, 17401, 198, 198, 6738, 5243, 1330, 357, 26093, 11, 7343, 877, 3673, 21077, 11, 14883, 25060, 37447, 49646, 11, 198...
2.865169
89
import sys from typing import Sequence import pytest from jina import Request, QueryLang, Document from jina.clients.request import request_generator from jina.proto import jina_pb2 from jina.proto.jina_pb2 import EnvelopeProto from jina.types.message import Message from jina.types.request import _trigger_fields from tests import random_docs
[ 11748, 25064, 198, 6738, 19720, 1330, 45835, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 474, 1437, 1330, 19390, 11, 43301, 43, 648, 11, 16854, 198, 6738, 474, 1437, 13, 565, 2334, 13, 25927, 1330, 2581, 62, 8612, 1352, 198, 6738, 4...
3.324074
108
import os import sys DIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )
[ 11748, 28686, 198, 11748, 25064, 198, 198, 34720, 62, 19238, 62, 43559, 62, 6173, 46023, 796, 28686, 13, 6978, 13, 397, 2777, 776, 7, 28686, 13, 6978, 13, 15908, 3672, 7, 11593, 7753, 834, 1267, 1267, 628 ]
2.459459
37
from distutils.core import setup setup( name='utils', version='1.0.0', author='Mirco Tracolli', author_email='mirco.tracolli@pg.infn.it', packages=[ 'utils', ], scripts=[], url='https://github.com/Cloud-PG/smart-cache', license='Apache 2.0 License', description='Utils for the SmartCache project', long_description="To do...", install_requires=open("requirements.txt").read(), classifier=[ "Operating System :: POSIX :: Linux", "License :: OSI Approved :: Apache 2.0 License", "Programming Language :: Python :: 3 :: Only" ] )
[ 6738, 1233, 26791, 13, 7295, 1330, 9058, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 11639, 26791, 3256, 198, 220, 220, 220, 2196, 11639, 16, 13, 15, 13, 15, 3256, 198, 220, 220, 220, 1772, 11639, 27453, 1073, 833, 330, 692, 72, ...
2.504065
246
"""Support for Eight Sleep binary sensors.""" from __future__ import annotations import logging from pyeight.eight import EightSleep from homeassistant.components.binary_sensor import ( BinarySensorDeviceClass, BinarySensorEntity, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from . import EightSleepBaseEntity from .const import DATA_API, DATA_HEAT, DOMAIN _LOGGER = logging.getLogger(__name__)
[ 37811, 15514, 329, 18087, 17376, 13934, 15736, 526, 15931, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 18931, 198, 198, 6738, 279, 5948, 432, 13, 26022, 1330, 18087, 40555, 198, 198, 6738, 1363, 562, 10167, 13, 5589, 3906,...
3.635838
173
# This file is part of the CERN Indico plugins. # Copyright (C) 2014 - 2022 CERN # # The CERN Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; see # the LICENSE file for more details. from unittest.mock import MagicMock import pytest from requests.exceptions import HTTPError, Timeout from indico.testing.util import extract_logs from indico_ravem.plugin import RavemPlugin from indico_ravem.util import has_access, ravem_api_call
[ 2, 770, 2393, 318, 636, 286, 262, 327, 28778, 1423, 3713, 20652, 13, 198, 2, 15069, 357, 34, 8, 1946, 532, 33160, 327, 28778, 198, 2, 198, 2, 383, 327, 28778, 1423, 3713, 20652, 389, 1479, 3788, 26, 345, 460, 17678, 4163, 198, 2, ...
3.322581
155
# _*_ coding: utf-8 _*_ # --------------------------- __author__ = 'StormSha' __date__ = '2018/3/28 18:01' # --------------------------- # -------------------------django---------------------- from django.conf.urls import url from .views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView from .views import TeacherListView, TeacherDetailView urlpatterns = [ url(r'^list/$', OrgView.as_view(), name="org_list"), url(r'^add_ask/$', AddUserAskView.as_view(), name="add_ask"), url(r'^home/(?P<org_id>\d+)/$', OrgHomeView.as_view(), name="org_home"), url(r'^course/(?P<org_id>\d+)/$', OrgCourseView.as_view(), name="org_course"), url(r'^desc/(?P<org_id>\d+)/$', OrgDescView.as_view(), name="org_desc"), url(r'^org_teacher/(?P<org_id>\d+)/$', OrgTeacherView.as_view(), name="org_teacher"), # --------------------------------------- url(r'^add_fav/$', AddFavView.as_view(), name="add_fav"), # -----------------------teacher------------------------------ url(r'^teacher/list/$', TeacherListView.as_view(), name="teacher_list"), url(r'^teacher/detail/(?P<teacher_id>\d+)/$', TeacherDetailView.as_view(), name="teacher_detail") ]
[ 2, 4808, 9, 62, 19617, 25, 3384, 69, 12, 23, 4808, 9, 62, 198, 2, 220, 22369, 6329, 198, 834, 9800, 834, 796, 705, 32173, 2484, 64, 6, 198, 834, 4475, 834, 796, 705, 7908, 14, 18, 14, 2078, 1248, 25, 486, 6, 198, 2, 220, 223...
2.592751
469
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 2...
2.857143
49
import FWCore.ParameterSet.Config as cms # handle normal mixing or premixing # change assumptions about lumi rate # turnon = True enables default, False disables # recalibration and darkening always together # needs lumi to set proper ZS thresholds (tbd)
[ 11748, 48849, 14055, 13, 36301, 7248, 13, 16934, 355, 269, 907, 198, 198, 2, 5412, 3487, 17090, 393, 4199, 844, 278, 198, 198, 2, 1487, 14895, 546, 300, 12994, 2494, 198, 198, 2, 1210, 261, 796, 6407, 13536, 4277, 11, 10352, 595, 29...
3.7
70
import numpy as np import xml.etree.ElementTree as ET if __name__ == '__main__': robot = MuJoCoXmlRobot('mujoco_assets/hopper.xml') params = list(1.0 * np.array(robot.get_params())) robot.update(params, 'mujoco_assets/hopper_test.xml') assert robot.get_params() == params #assert robot.get_height() == 1.31 print(robot.get_param_limits()) print(robot.get_param_names()) robot = MuJoCoXmlRobot('mujoco_assets/walker2d.xml') params = [.4,.04,.5,.05,.55,.055,.6,.06,.5,.05,.55,.055,.6,.06] robot.update(params, 'mujoco_assets/walker2d_test.xml') assert robot.get_params() == params assert robot.get_height() == 1.31 print(robot.get_param_limits()) print(robot.get_param_names()) robot = MuJoCoXmlRobot('mujoco_assets/ant.xml') params = [.2, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06] robot.update(params, 'mujoco_assets/ant_test.xml') assert robot.get_params() == params assert robot.get_height() == .2 print(robot.get_param_limits()) print(robot.get_param_names()) robot = MuJoCoXmlRobot('mujoco_assets/humanoid.xml') params = list(.8 * np.array(robot.get_params())) robot.update(params, 'mujoco_assets/humanoid_test.xml') assert robot.get_params() == params print(robot.get_height()) #assert robot.get_height() == .6085 print(robot.get_param_limits()) print(robot.get_param_names()) import gym, roboschool env = gym.make("RoboschoolHopper-v1") env.unwrapped.model_xml = 'mujoco_assets/hopper_test.xml' env.reset() #env.render() import os from scipy.misc import imsave import subprocess as sp outdir = 'xml_vid' os.makedirs(outdir, exist_ok=True) i = 0 for _ in range(10): env.reset() for _ in range(100): env.step(env.action_space.sample()) rgb = env.render('rgb_array') imsave(os.path.join(outdir, '{:05d}.png'.format(i)), rgb) i+=1 sp.call(['ffmpeg', '-r', '60', '-f', 'image2', '-i', os.path.join(outdir, '%05d.png'), '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', os.path.join(outdir, 'out.mp4')]) env.close()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 12152, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 9379, 796, 8252, 9908, 7222, 55, 4029, 14350, 313, 1...
2.220202
990
''' Load User Middleware''' from masonite.facades.Auth import Auth
[ 7061, 6, 8778, 11787, 6046, 1574, 7061, 6, 198, 6738, 285, 888, 578, 13, 38942, 2367, 13, 30515, 1330, 26828, 198 ]
3.190476
21
from minqlx_plugin_test import * import logging import unittest from mockito import * from mockito.matchers import * from hamcrest import * from redis import Redis from merciful_elo_limit import *
[ 6738, 949, 13976, 87, 62, 33803, 62, 9288, 1330, 1635, 198, 198, 11748, 18931, 198, 11748, 555, 715, 395, 198, 198, 6738, 15290, 10094, 1330, 1635, 198, 6738, 15290, 10094, 13, 6759, 3533, 1330, 1635, 198, 6738, 8891, 66, 2118, 1330, ...
3.258065
62
from ronglian_sms_sdk import SmsSDK from celery_tasks.main import app # # celerytask # celery(main)
[ 6738, 374, 506, 75, 666, 62, 82, 907, 62, 21282, 74, 1330, 311, 907, 10305, 42, 198, 6738, 18725, 1924, 62, 83, 6791, 13, 12417, 1330, 598, 198, 2, 220, 198, 2, 18725, 1924, 35943, 198, 2, 18725, 1924, 7, 12417, 8 ]
2.380952
42
import sublime_plugin
[ 11748, 41674, 62, 33803, 628, 628 ]
4.166667
6
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test the Python API and shell binary of the tensorflowjs pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import json import os import shutil import subprocess import sys import tempfile import unittest import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import variables from tensorflow.python.training.tracking import tracking from tensorflow.python.saved_model.save import save import tensorflow_hub as hub import tensorflowjs as tfjs def _createKerasModel(layer_name_prefix, h5_path=None): """Create a Keras model for testing. Args: layer_name_prefix: A prefix string for layer names. This helps avoid clashes in layer names between different test methods. h5_path: Optional string path for a HDF5 (.h5) file to save the model in. Returns: An instance of keras.Model. """ input_tensor = keras.layers.Input((3, )) dense1 = keras.layers.Dense( 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros', name=layer_name_prefix + '1')(input_tensor) output = keras.layers.Dense( 2, use_bias=False, kernel_initializer='ones', name=layer_name_prefix + '2')(dense1) model = keras.models.Model(inputs=[input_tensor], outputs=[output]) if h5_path: model.save(h5_path) return model def _createTensorFlowSavedModelV1(name_scope, save_path): """Create a TensorFlow SavedModel for testing. Args: name_scope: Name scope to create the model under. This helps avoid op and variable name clashes between different test methods. save_path: The directory path in which to save the model. """ graph = tf.Graph() with graph.as_default(): with tf.compat.v1.name_scope(name_scope): x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]]) w = tf.compat.v1.get_variable('w', shape=[2, 2]) y = tf.compat.v1.matmul(x, w) output = tf.compat.v1.nn.softmax(y) init_op = w.initializer # Create a builder. builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_path) with tf.compat.v1.Session() as sess: # Run the initializer on `w`. sess.run(init_op) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map={ "serving_default": tf.compat.v1.saved_model.signature_def_utils.predict_signature_def( inputs={"x": x}, outputs={"output": output}) }, assets_collection=None) builder.save() def _createTensorFlowSavedModel(name_scope, save_path): """Create a TensorFlow SavedModel for testing. Args: name_scope: Name scope to create the model under. This helps avoid op and variable name clashes between different test methods. save_path: The directory path in which to save the model. """ input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) to_save = root.f.get_concrete_function(input_data) save(root, save_path, to_save) def _create_hub_module(save_path): """Create a TensorFlow Hub module for testing. Args: save_path: The directory path in which to save the model. """ # Module function that doubles its input. graph = tf.Graph() with graph.as_default(): spec = hub.create_module_spec(double_module_fn) m = hub.Module(spec) # Export the module. with tf.compat.v1.Session(graph=graph) as sess: sess.run(tf.compat.v1.global_variables_initializer()) m.export(save_path, sess) class ConvertTfKerasSavedModelTest(tf.test.TestCase): if __name__ == '__main__': tf.test.main()
[ 2, 15069, 2864, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 7330...
2.740023
1,754
# -*- coding: UTF-8 -*- import os, re, shutil, time, xbmc from resources.lib.modules import control try: import json as simplejson except: import simplejson ADDONS = os.path.join(control.HOMEPATH, 'addons')
[ 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 11, 302, 11, 4423, 346, 11, 640, 11, 2124, 20475, 66, 198, 6738, 4133, 13, 8019, 13, 18170, 1330, 1630, 198, 28311, 25, 1330, 33918, 355, 2829, 17752, ...
2.857143
77
""" All things for a HAP characteristic. A Characteristic is the smallest unit of the smart home, e.g. a temperature measuring or a device status. """ import logging from pyhap.const import ( HAP_PERMISSION_READ, HAP_REPR_DESC, HAP_REPR_FORMAT, HAP_REPR_IID, HAP_REPR_MAX_LEN, HAP_REPR_PERM, HAP_REPR_TYPE, HAP_REPR_VALID_VALUES, HAP_REPR_VALUE, ) from .util import hap_type_to_uuid, uuid_to_hap_type logger = logging.getLogger(__name__) # ### HAP Format ### HAP_FORMAT_BOOL = "bool" HAP_FORMAT_INT = "int" HAP_FORMAT_FLOAT = "float" HAP_FORMAT_STRING = "string" HAP_FORMAT_ARRAY = "array" HAP_FORMAT_DICTIONARY = "dictionary" HAP_FORMAT_UINT8 = "uint8" HAP_FORMAT_UINT16 = "uint16" HAP_FORMAT_UINT32 = "uint32" HAP_FORMAT_UINT64 = "uint64" HAP_FORMAT_DATA = "data" HAP_FORMAT_TLV8 = "tlv8" HAP_FORMAT_DEFAULTS = { HAP_FORMAT_BOOL: False, HAP_FORMAT_INT: 0, HAP_FORMAT_FLOAT: 0.0, HAP_FORMAT_STRING: "", HAP_FORMAT_ARRAY: "", HAP_FORMAT_DICTIONARY: "", HAP_FORMAT_UINT8: 0, HAP_FORMAT_UINT16: 0, HAP_FORMAT_UINT32: 0, HAP_FORMAT_UINT64: 0, HAP_FORMAT_DATA: "", HAP_FORMAT_TLV8: "", } HAP_FORMAT_NUMERICS = ( HAP_FORMAT_INT, HAP_FORMAT_FLOAT, HAP_FORMAT_UINT8, HAP_FORMAT_UINT16, HAP_FORMAT_UINT32, HAP_FORMAT_UINT64, ) # ### HAP Units ### HAP_UNIT_ARC_DEGREE = "arcdegrees" HAP_UNIT_CELSIUS = "celsius" HAP_UNIT_LUX = "lux" HAP_UNIT_PERCENTAGE = "percentage" HAP_UNIT_SECONDS = "seconds" # ### Properties ### PROP_FORMAT = "Format" PROP_MAX_VALUE = "maxValue" PROP_MIN_STEP = "minStep" PROP_MIN_VALUE = "minValue" PROP_PERMISSIONS = "Permissions" PROP_UNIT = "unit" PROP_VALID_VALUES = "ValidValues" PROP_NUMERIC = (PROP_MAX_VALUE, PROP_MIN_VALUE, PROP_MIN_STEP, PROP_UNIT)
[ 37811, 198, 3237, 1243, 329, 257, 367, 2969, 16704, 13, 198, 198, 32, 15684, 2569, 318, 262, 18197, 4326, 286, 262, 4451, 1363, 11, 304, 13, 70, 13, 198, 64, 5951, 15964, 393, 257, 3335, 3722, 13, 198, 37811, 198, 11748, 18931, 198,...
1.988914
902
dataset_type = 'UNITER_VqaDataset' data_root = '/home/datasets/mix_data/UNITER/VQA/' train_datasets = ['train'] test_datasets = ['minival'] # name not in use, but have defined one to run vqa_cfg = dict( train_txt_dbs=[ data_root + 'vqa_train.db', data_root + 'vqa_trainval.db', data_root + 'vqa_vg.db', ], train_img_dbs=[ data_root + 'coco_train2014/', data_root + 'coco_val2014', data_root + 'vg/', ], val_txt_db=data_root + 'vqa_devval.db', val_img_db=data_root + 'coco_val2014/', ans2label_file=data_root + 'ans2label.json', max_txt_len=60, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36, train_batch_size=20480, # 5120, val_batch_size=40960, # 10240, ) BUCKET_SIZE = 8192 train_data = dict( samples_per_gpu=vqa_cfg['train_batch_size'], workers_per_gpu=4, pin_memory=True, batch_sampler=dict( type='TokenBucketSampler', bucket_size=BUCKET_SIZE, batch_size=vqa_cfg['train_batch_size'], drop_last=True, size_multiple=8, ), data=dict( type=dataset_type, datacfg=vqa_cfg, train_or_val=True, ), ) test_data = dict( samples_per_gpu=vqa_cfg['val_batch_size'], workers_per_gpu=4, batch_sampler=dict( type='TokenBucketSampler', bucket_size=BUCKET_SIZE, batch_size=vqa_cfg['val_batch_size'], drop_last=False, size_multiple=8, ), pin_memory=True, data=dict( type=dataset_type, datacfg=vqa_cfg, train_or_val=False, ), ) post_processor = dict( type='Evaluator', metrics=[dict(type='UNITER_AccuracyMetric')], dataset_converters=[dict(type='UNITER_DatasetConverter')], )
[ 19608, 292, 316, 62, 4906, 796, 705, 4944, 2043, 1137, 62, 53, 20402, 27354, 292, 316, 6, 198, 7890, 62, 15763, 796, 31051, 11195, 14, 19608, 292, 1039, 14, 19816, 62, 7890, 14, 4944, 2043, 1137, 14, 53, 48, 32, 14, 6, 198, 198, ...
1.950331
906
from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include, re_path from django.views.generic import TemplateView urlpatterns = [ path('api-auth/', include('rest_framework.urls')), path('rest-auth/', include('rest_auth.urls')), path('rest-auth/registration/', include('rest_auth.registration.urls')), path('admin/', admin.site.urls), path('api/', include('core.api.urls')), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if not settings.DEBUG: urlpatterns += [re_path(r'^.*', TemplateView.as_view(template_name='index.html'))]
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 13, 12708, 1330, 9037, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 2291, 11, ...
2.490066
302
#!/bin/bash # -*- coding: UTF-8 -*- # from PyQt5.QtWebEngineWidgets import QWebEngineView from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QGridLayout, QMessageBox, QFileDialog, QLabel, QLineEdit, QPushButton, QComboBox, QCheckBox, QDateTimeEdit, QTextEdit, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView) from PyQt5.QtGui import QPalette, QColor, QBrush from PyQt5.QtCore import Qt, QDateTime from pyqtgraph import GraphicsLayoutWidget, setConfigOption, setConfigOptions import qdarkstyle, sys import mylibrary.genmail as gm from GenAndSendMail import insert_send_mail from server.database import Database from server.sendmail import Smtp from server.client import Client from email import generator from pandas import DataFrame from copy import deepcopy def main(): app = QApplication(sys.argv) gui = MailserverUi() gui.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
[ 2, 48443, 8800, 14, 41757, 198, 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 2, 220, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 13908, 13798, 54, 312, 11407, 1330, 1195, 13908, 13798, 7680, 198, 6738, 9485, 48, 83, ...
2.620053
379
""" GpuCorrMM-based convolutional layers """ import numpy as np import theano import theano.tensor as T from theano.sandbox.cuda.basic_ops import gpu_contiguous from theano.sandbox.cuda.blas import GpuCorrMM from .. import init from .. import nonlinearities from . import base # base class for all layers that rely on GpuCorrMM directly
[ 37811, 198, 38, 19944, 10606, 81, 12038, 12, 3106, 3063, 2122, 282, 11685, 198, 37811, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 262, 5733, 198, 11748, 262, 5733, 13, 83, 22854, 355, 309, 198, 6738, 262, 5733, 13, 3814...
3.071429
112
#! /usr/bin/env python # # =============================================================== # Description: Sanity check for fresh install. # # Created: 2014-08-12 16:42:52 # # Author: Ayush Dubey, dubey@cs.cornell.edu # # Copyright (C) 2013, Cornell University, see the LICENSE file # for licensing agreement # =============================================================== # import sys try: import weaver.client as client except ImportError: import client config_file='' if len(sys.argv) > 1: config_file = sys.argv[1] # create client object c = client.Client('128.84.167.220', 2002, config_file) # check aux index assert c.aux_index() # 1. create node for user ayush c.begin_tx() c.create_node('ayush') c.set_node_properties({'type': 'user', 'age': '25'}, 'ayush') c.end_tx() # 2. create node for user egs c.begin_tx() c.create_node('egs') c.set_node_property('type', 'user', 'egs') c.end_tx() # 3. ayush follows egs c.begin_tx() c.create_edge('ayush', 'egs', 'e1') c.set_edge_property(edge='e1', key='type', value='follows') c.create_edge('egs', 'ayush', 'e2') c.set_edge_property(edge='e2', key='type', value='followed_by') c.end_tx() # 4. add a post and restrict visibility to followers only c.begin_tx() c.create_node('post') c.set_node_property('type', 'post', 'post') c.set_node_property('visibility', 'followers', 'post') e3 = c.create_edge('egs', 'post') c.set_edge_property(edge=e3, key='type', value='posted') c.end_tx() # 5. 'like' the post c.begin_tx() e4 = c.create_edge('post', 'ayush') c.set_edge_property(edge=e4, key='type', value='liked_by') c.end_tx() # 6. list all the people who like egs's post return_nodes = c.traverse('egs', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute() assert len(return_nodes) == 1, 'traversal returned incorrect #nodes' assert 'ayush' in return_nodes, 'traversal returned bad node handle' # 7. try to create node with same handle as before c.begin_tx() c.create_node('ayush') try: c.end_tx() assert False, 'create node passed' except client.WeaverError: pass # 8. try to create edge with same handle as before c.begin_tx() c.create_edge('ayush', 'egs', 'e1') try: c.end_tx() assert False, 'create edge passed' except client.WeaverError: pass # 9. add auxiliary handles to nodes c.begin_tx() c.add_alias('ad688', 'ayush') c.add_alias('el33th4x0r', 'egs') c.end_tx() # 10. list all the people who like egs's post # this time with aliases instead of handles return_nodes = c.traverse('el33th4x0r', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute() assert len(return_nodes) == 1, 'traversal returned incorrect #nodes' assert 'ayush' in return_nodes, 'traversal returned bad node handle' # 11. get node and check it is valid ad = c.get_node('ayush') assert 'ad688' in ad.aliases assert 'type' in ad.properties assert 'user' in ad.properties['type'] assert 'age' in ad.properties assert '25' in ad.properties['age'] assert 'e1' in ad.out_edges print 'Correctly executed 11 transactions of varying complexity, pass simple_test.' print 'Success, you have a working Weaver setup!'
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 220, 198, 2, 46111, 4770, 25609, 855, 198, 2, 220, 220, 220, 12489, 25, 220, 2986, 414, 2198, 329, 4713, 2721, 13, 220, 198, 2, 220, 198, 2, 220, 220, 220, 220, 220, 220, ...
2.646302
1,244
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals try: unicode except NameError: basestring = unicode = str # Python 3 import logging import rdflib from rdflib import compare logger = logging.getLogger("ldtools") RESET_SEQ = "\033[0m" COLOR_SEQ = "\033[1;%dm" BOLD_SEQ = "\033[1m" # The background is set with 40 plus the number of the color, and # the foreground with 30 # These are the sequences need to get colored ouput BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) COL = { 'DEBUG': BLUE, 'INFO': MAGENTA, 'WARNING': YELLOW, 'CRITICAL': YELLOW, 'ERROR': RED} def my_graph_diff(graph1, graph2): """Compares graph2 to graph1 and highlights everything that changed. Colored if pygments available""" # quick fix for wrong type if not type(graph1) == type(graph2) == rdflib.Graph: if type(graph1) == rdflib.ConjunctiveGraph: g1contexts = list(graph1.contexts()) assert len(g1contexts) == 1 graph1 = g1contexts[0] if type(graph2) == rdflib.ConjunctiveGraph: g2contexts = list(graph2.contexts()) assert len(g2contexts) == 1 graph2 = g2contexts[0] # Return if both graphs are isomorphic iso1 = compare.to_isomorphic(graph1) iso2 = compare.to_isomorphic(graph2) if graph1.identifier == graph2.identifier: str_bit = u"The 2 '%s' Graphs" % graph1.identifier else: str_bit = (u"Graphs '%s' and '%s'" % (graph1.identifier, graph2.identifier)) if iso1 == iso2: logger.debug(u"%s are isomorphic" % str_bit) return print(u"Differences between %s." % str_bit) in_both, in_first, in_second = compare.graph_diff(iso1, iso2) sorted_first = dump_nt_sorted(in_first) sorted_second = dump_nt_sorted(in_second) import difflib diff = difflib.unified_diff( sorted_first, sorted_second, u'Original', u'Current', lineterm='' ) try: from pygments import highlight from pygments.formatters import terminal from pygments.lexers import web lexer = web.XmlLexer() formatter = terminal.TerminalFormatter() print(highlight(u'\n'.join(diff), lexer, formatter)) except ImportError: logger.info("Install pygments for colored diffs") print(u'\n'.join(diff)) except UnicodeDecodeError: print(u"Only in first", unicode(sorted_first)) print(u"Only in second", unicode(sorted_second))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 28000, 1098, 62, 17201, 874, 198, 198, 28311, 25, 198, 220, 220, 220, 28000, 1098, 198, 16341, 6530, 12331, 25, 198, ...
2.326993
1,104
# Debug print levels for fine-grained debug trace output control DNFQUEUE = (1 << 0) # netfilterqueue DGENPKT = (1 << 1) # Generic packet handling DGENPKTV = (1 << 2) # Generic packet handling with TCP analysis DCB = (1 << 3) # Packet handlign callbacks DPROCFS = (1 << 4) # procfs DIPTBLS = (1 << 5) # iptables DNONLOC = (1 << 6) # Nonlocal-destined datagrams DDPF = (1 << 7) # DPF (Dynamic Port Forwarding) DDPFV = (1 << 8) # DPF (Dynamic Port Forwarding) Verbose DIPNAT = (1 << 9) # IP redirection for nonlocal-destined datagrams DMANGLE = (1 << 10) # Packet mangling DPCAP = (1 << 11) # Pcap write logic DIGN = (1 << 12) # Packet redirect ignore conditions DFTP = (1 << 13) # FTP checks DMISC = (1 << 27) # Miscellaneous DCOMP = 0x0fffffff # Component mask DFLAG = 0xf0000000 # Flag mask DEVERY = 0x0fffffff # Log everything, low verbosity DEVERY2 = 0x8fffffff # Log everything, complete verbosity DLABELS = { DNFQUEUE: 'NFQUEUE', DGENPKT: 'GENPKT', DGENPKTV: 'GENPKTV', DCB: 'CB', DPROCFS: 'PROCFS', DIPTBLS: 'IPTABLES', DNONLOC: 'NONLOC', DDPF: 'DPF', DDPFV: 'DPFV', DIPNAT: 'IPNAT', DMANGLE: 'MANGLE', DPCAP: 'PCAP', DIGN: 'IGN', DFTP: 'FTP', DIGN | DFTP: 'IGN-FTP', DMISC: 'MISC', } DLABELS_INV = {v.upper(): k for k, v in DLABELS.items()}
[ 2, 31687, 3601, 2974, 329, 3734, 12, 2164, 1328, 14257, 12854, 5072, 1630, 198, 35, 21870, 48, 8924, 8924, 796, 357, 16, 9959, 657, 8, 220, 220, 220, 220, 1303, 2010, 24455, 36560, 198, 35, 35353, 40492, 51, 796, 357, 16, 9959, 352,...
2.125749
668
''' Script for training the model ''' import tensorflow as tf import numpy as np from input import BatchGenerator from model import MultiRnn import time from datetime import datetime import os import matplotlib as mpl mpl.use('Agg') from matplotlib import pyplot as plt sum_dir = 'sum' # dir to write summary train_dir = 'ckpt' # dir to store the model data_dir = 'train.pkl' # dir of the data set NEFF = 129 # effective FFT points batch_size = 128 num_steps = 20 epochs = 2000 cell_type = 'NL_LSTM' state_size = 256 output_size = 129 num_layer = 3 learning_rate = 0.0001 # build the model rnn_model = MultiRnn( cell_type, state_size, output_size, batch_size, num_layer, learning_rate, num_steps) # input data and referene data placeholder in_data = tf.placeholder( tf.float32, [batch_size, num_steps, 2 * NEFF]) ref_data = tf.placeholder( tf.float32, [batch_size, num_steps, NEFF]) # make inference init_state, final_state, inf_data = rnn_model.inference(in_data) # compute loss loss = rnn_model.loss(inf_data, ref_data) saver = tf.train.Saver(tf.all_variables()) summary_op = tf.merge_all_summaries() train_op = rnn_model.train(loss) batch_gen = BatchGenerator(data_dir, batch_size, num_steps, epochs) with tf.Session() as sess: summary_writer = tf.train.SummaryWriter( sum_dir, sess.graph) sess.run(tf.initialize_all_variables()) steps = 0 # generator for epoch data for idx, epoch in enumerate(batch_gen.gen_epochs()): training_state = None # generator for batch data for f_data, b_data, r_data, v_data in epoch: start_time = time.time() steps += 1 in_data_np = np.concatenate((f_data, b_data), axis=2) if steps % 100 == 0: feed_dict = {in_data: in_data_np, ref_data: r_data} if training_state is not None: feed_dict[init_state] = training_state # training the net loss_value, training_state, _, summary_str, test_inf = sess.run( [loss, final_state, train_op, summary_op, inf_data], feed_dict) duration = time.time() - start_time sec_per_batch = float(duration) examples_per_sec = batch_size / duration format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch, epoch %d)') print (format_str % (datetime.now(), steps, loss_value, examples_per_sec, sec_per_batch, idx)) summary_writer.add_summary(summary_str, steps) else: feed_dict = {in_data: in_data_np, ref_data: r_data} if training_state is not None: feed_dict[init_state] = training_state loss_value, training_state, _ = sess.run( [loss, final_state, train_op], feed_dict) if steps % 10000 == 0: checkpoint_path = os.path.join(train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=steps)
[ 7061, 6, 198, 7391, 329, 3047, 262, 2746, 198, 7061, 6, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 5128, 1330, 347, 963, 8645, 1352, 198, 6738, 2746, 1330, 15237, 49, 20471, 198, 11748, 6...
2.132304
1,489
import os import sys import threading import time import warnings from contextlib import ExitStack import click import pendulum from dagster import __version__ from dagster.core.instance import DagsterInstance from dagster.daemon.controller import ( DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS, DagsterDaemonController, all_daemons_healthy, all_daemons_live, daemon_controller_from_instance, debug_daemon_heartbeats, get_daemon_status, ) from dagster.utils.interrupts import capture_interrupts, raise_interrupts_as def create_dagster_daemon_cli(): commands = { "run": run_command, "health-check": health_check_command, "liveness-check": liveness_check_command, "wipe": wipe_command, "debug": debug_group, } return group cli = create_dagster_daemon_cli()
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 4704, 278, 198, 11748, 640, 198, 11748, 14601, 198, 6738, 4732, 8019, 1330, 29739, 25896, 198, 198, 11748, 3904, 198, 11748, 44017, 14452, 198, 6738, 48924, 1706, 1330, 11593, 9641, 834, 198, 6...
2.65
320
import sys, os sys.path.append(os.path.dirname(os.path.dirname(sys.path[0]))) from sportsreference.nfl.teams import Teams for team in Teams(): print(team.name) for player in team.roster.players: print(player.name) for game in team.schedule: print(game.dataframe) print(game.dataframe_extended)
[ 11748, 25064, 11, 28686, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 15908, 3672, 7, 17597, 13, 6978, 58, 15, 60, 22305, 198, 6738, 5701, 35790, 13, 77, 2704, 13, 660, 4105, 1330, 2469...
2.451852
135
import numpy as np import matplotlib import matplotlib.pyplot as plt import sys sys.path.append("../") from quelea import * nx = 217 ny = 133 x0 = 0 x1 = 30 # lambdas y0 = 0 y1 = 20 # lambdas xs = np.linspace(x0, x1, nx) ys = np.linspace(y0, y1, ny) # 2d array of (x, y, z, t) coords = np.array( [ [x, y, 0, 0] for x in xs for y in ys ] ) # for map_fields function this should be converted from 2D to 1D array coords = coords.reshape((4 * nx * ny,)) ftype = 1 # plane wave a0 = 1 # normalized field amplitude omega = 1 # frequency fparam = [a0, 1, 0, 0, 0, 1, 0, 0, omega] # parameters of the plane wave ex, ey, ez, bx, by, bz = map_fields(coords, ftype, fparam) # now convert to 2d arrays ex = ex.reshape((nx, ny)) ey = ey.reshape((nx, ny)) ez = ez.reshape((nx, ny)) bx = bx.reshape((nx, ny)) by = by.reshape((nx, ny)) bz = bz.reshape((nx, ny)) ex = ex.transpose() ey = ey.transpose() ez = ez.transpose() bx = bx.transpose() by = by.transpose() bz = bz.transpose() plt.imshow(ey, cmap = 'RdYlBu', origin = 'lower', extent = [x0, x1, y0, y1]) plt.colorbar() plt.clim(-a0, a0) plt.savefig("map_fields.pdf")
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 7203, 40720, 4943, 198, 6738, 8358, 293, 64, 1330, 1635...
2.19685
508
import os from absl import app from absl import flags import numpy as np import tqdm from tensorflow.keras import Model from albumentations import ( Compose, HorizontalFlip, RandomBrightness,RandomContrast, ShiftScaleRotate, ToFloat, VerticalFlip) from utils import reset_tf from eval_utils import calc_score_variance from models import build_seg_model, build_pixel_mlp_class_model from VegetableSequence import VegetableDataset, VegetableSequence from temporal_random_seed import TemporalRandomSeed import myFlags FLAGS = flags.FLAGS if __name__ == "__main__": app.run(main)
[ 11748, 28686, 198, 6738, 2352, 75, 1330, 598, 198, 6738, 2352, 75, 1330, 9701, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 256, 80, 36020, 198, 198, 6738, 11192, 273, 11125, 13, 6122, 292, 1330, 9104, 198, 6738, 435, 65, 1713, 602...
3.160428
187
from django.conf import settings from suit import apps from suit.apps import DjangoSuitConfig from suit.menu import ParentItem, ChildItem APP_NAME = settings.APP_NAME WIKI_URL = settings.WIKI_URL
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 198, 6738, 6050, 1330, 6725, 198, 6738, 6050, 13, 18211, 1330, 37770, 50, 5013, 16934, 198, 6738, 6050, 13, 26272, 1330, 16774, 7449, 11, 5932, 7449, 198, 198, 24805, 62, 20608, 796, 6460,...
3.3
60
import h5py import numpy as np np.set_printoptions(threshold=np.nan) from shutil import copyfile copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5 bl = h5py.File("baseline_pruned.h5", 'r') #dummy = h5py.File("dummy.h5", 'r') pretrained = h5py.File("pretrained_bin.h5", 'r+') # dense layer 1 bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"] zero_fill = np.zeros(np.shape(np.array(bl_w1))) pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"] pret_w1[...] = np.array(bl_w1) p_gamma[...] = np.array(bl_gamma) pret_pruning_mask[...] = np.array(bl_pruning_mask) print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 2 bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 3 bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 4 bl_w1 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 5 bl_w1 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # bn 1 bl_beta = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 2 bl_beta = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 3 bl_beta = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 4 bl_beta = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 5 bl_beta = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) pretrained.close()
[ 11748, 289, 20, 9078, 198, 11748, 299, 32152, 355, 45941, 198, 37659, 13, 2617, 62, 4798, 25811, 7, 400, 10126, 28, 37659, 13, 12647, 8, 198, 198, 6738, 4423, 346, 1330, 4866, 7753, 198, 198, 30073, 7753, 7203, 67, 13513, 62, 75, 31...
2.235448
17,231
# Natural Language Toolkit: Interface to Weka Classsifiers # # Copyright (C) 2001-2015 NLTK Project # Author: Edward Loper <edloper@gmail.com> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Classifiers that make use of the external 'Weka' package. """ from __future__ import print_function import time import tempfile import os import subprocess import re import zipfile from sys import stdin from nltk import compat from nltk.probability import DictionaryProbDist from nltk.internals import java, config_java from nltk.classify.api import ClassifierI _weka_classpath = None _weka_search = ['.', '/usr/share/weka', '/usr/local/share/weka', '/usr/lib/weka', '/usr/local/lib/weka',] if __name__ == '__main__': from nltk.classify.util import names_demo, binary_names_demo_features classifier = names_demo(make_classifier, binary_names_demo_features)
[ 2, 12068, 15417, 16984, 15813, 25, 26491, 284, 775, 4914, 5016, 82, 13350, 201, 198, 2, 201, 198, 2, 15069, 357, 34, 8, 5878, 12, 4626, 22879, 51, 42, 4935, 201, 198, 2, 6434, 25, 10443, 406, 3575, 1279, 276, 75, 3575, 31, 14816, ...
2.460199
402
import pandas as pd import numpy as np from src.si.util.util import label_gen __all__ = ['Dataset'] def hasLabel(self): """Returns True if the dataset constains labels (a dependent variable)""" return self.Y is not None def getNumFeatures(self): """Returns the number of features""" return self.X.shape[1] def getNumClasses(self): """Returns the number of label classes or 0 if the dataset has no dependent variable.""" return len(np.unique(self.Y)) if self.hasLabel() else 0 def writeDataset(self, filename, sep=","): """Saves the dataset to a file :param filename: The output file path :type filename: str :param sep: The fields separator, defaults to "," :type sep: str, optional """ fullds = np.hstack((self.X, self.Y.reshape(len(self.Y), 1))) np.savetxt(filename, fullds, delimiter=sep) def toDataframe(self): """ Converts the dataset into a pandas DataFrame""" if self.hasLabel(): df = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname))) else: df = pd.DataFrame(self.X.copy(), columns=self.xnames[:]) return df def summary(dataset, format='df'): """ Returns the statistics of a dataset(mean, std, max, min) :param dataset: A Dataset object :type dataset: si.data.Dataset :param format: Output format ('df':DataFrame, 'dict':dictionary ), defaults to 'df' :type format: str, optional """ if format not in ["df", "dict"]: raise Exception("Invalid format. Choose between 'df' and 'dict'.") if dataset.hasLabel(): data = np.hstack((dataset.X, dataset.Y.reshape(len(dataset.Y), 1))) #data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))]) columns = dataset.xnames[:] + [dataset.yname] else: data = dataset.X columns = dataset.xnames[:] stats = {} if type(dataset.Y[0]) is str: for i in range(data.shape[1]-1): #ve colunas _means = np.mean(data[:, i], axis=0) _vars = np.var(data[:, i], axis=0) _maxs = np.max(data[:, i], axis=0) _mins = np.min(data[:, i], axis=0) stat = {"mean": _means, "var": _vars, "max": _maxs, "min": _mins } stats[columns[i]] = stat else: for i in range(data.shape[1]): # ve colunas _means = np.mean(data[:, i], axis=0) _vars = np.var(data[:, i], axis=0) _maxs = np.max(data[:, i], axis=0) _mins = np.min(data[:, i], axis=0) stat = {"mean": _means, "var": _vars, "max": _maxs, "min": _mins } stats[columns[i]] = stat # _means = np.mean(data, axis=0) # _vars = np.var(data, axis=0) # _maxs = np.max(data, axis=0) # _mins = np.min(data, axis=0) # stats = {} # for i in range(data.shape[1]): # stat = {"mean": _means[i], # "var": _vars[i], # "max": _maxs[i], # "min": _mins[i] # } # stats[columns[i]] = stat if format == "dict": return stats else: return pd.DataFrame(stats)
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 12351, 13, 13396, 13, 22602, 13, 22602, 1330, 6167, 62, 5235, 198, 198, 834, 439, 834, 796, 37250, 27354, 292, 316, 20520, 628, 220, 220, 220, 825, 468, ...
2.024662
1,703
""" Module: 'display' on M5 FlowUI v1.4.0-beta """ # MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32') # Stubber: 1.3.1
[ 37811, 198, 26796, 25, 705, 13812, 6, 319, 337, 20, 27782, 10080, 410, 16, 13, 19, 13, 15, 12, 31361, 198, 37811, 198, 2, 13122, 52, 25, 357, 17597, 3672, 11639, 9774, 2624, 3256, 18666, 12453, 11639, 9774, 2624, 3256, 2650, 11639, ...
2.244681
94
import json, requests, datetime from cron_descriptor import get_description from .dbclient import dbclient from .JobsClient import JobsClient from .ClustersClient import ClustersClient from .WorkspaceClient import WorkspaceClient from .ScimClient import ScimClient from .LibraryClient import LibraryClient from .HiveClient import HiveClient from .parser import *
[ 11748, 33918, 11, 7007, 11, 4818, 8079, 198, 6738, 1067, 261, 62, 20147, 1968, 273, 1330, 651, 62, 11213, 198, 198, 6738, 764, 9945, 16366, 1330, 20613, 16366, 198, 6738, 764, 41, 8158, 11792, 1330, 19161, 11792, 198, 6738, 764, 2601, ...
4
91
# -*- coding: utf-8 -*- # SkinWeights command and component editor # Copyright (C) 2018 Trevor van Hoof # Website: http://www.trevorius.com # # pyqt attribute sliders # Copyright (C) 2018 Daniele Niero # Website: http://danieleniero.com/ # # neighbour finding algorythm # Copyright (C) 2018 Jan Pijpers # Website: http://www.janpijpers.com/ # # skinningTools and UI # Copyright (C) 2018 Perry Leijten # Website: http://www.perryleijten.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See http://www.gnu.org/licenses/gpl.html for a copy of the GNU General # Public License. # --------------------------------------------------------------------------------------
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 2, 17847, 1135, 2337, 3141, 290, 7515, 5464, 201, 198, 2, 15069, 357, 34, 8, 2864, 25389, 5719, 9544, 1659, 201, 198, 2, 15887, 25, 2638, 1378, 2503, 13, 83, 18...
3.338192
343
"""AVM FRITZ!Box binary sensors.""" from __future__ import annotations from collections.abc import Callable from dataclasses import dataclass from datetime import datetime, timedelta import logging from typing import Any, Literal from fritzconnection.core.exceptions import ( FritzActionError, FritzActionFailedError, FritzConnectionException, FritzInternalError, FritzServiceError, ) from fritzconnection.lib.fritzstatus import FritzStatus from homeassistant.components.sensor import ( STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, SensorEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( DATA_GIGABYTES, DATA_RATE_KILOBITS_PER_SECOND, DATA_RATE_KILOBYTES_PER_SECOND, DEVICE_CLASS_TIMESTAMP, ENTITY_CATEGORY_DIAGNOSTIC, SIGNAL_STRENGTH_DECIBELS, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.util.dt import utcnow from .common import FritzBoxBaseEntity, FritzBoxTools from .const import DOMAIN, DSL_CONNECTION, UPTIME_DEVIATION _LOGGER = logging.getLogger(__name__) def _uptime_calculation(seconds_uptime: float, last_value: datetime | None) -> datetime: """Calculate uptime with deviation.""" delta_uptime = utcnow() - timedelta(seconds=seconds_uptime) if ( not last_value or abs((delta_uptime - last_value).total_seconds()) > UPTIME_DEVIATION ): return delta_uptime return last_value def _retrieve_device_uptime_state( status: FritzStatus, last_value: datetime ) -> datetime: """Return uptime from device.""" return _uptime_calculation(status.device_uptime, last_value) def _retrieve_connection_uptime_state( status: FritzStatus, last_value: datetime | None ) -> datetime: """Return uptime from connection.""" return _uptime_calculation(status.connection_uptime, last_value) def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str: """Return external ip from device.""" return status.external_ip # type: ignore[no-any-return] def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload transmission rate.""" return round(status.transmission_rate[0] / 1000, 1) # type: ignore[no-any-return] def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float: """Return download transmission rate.""" return round(status.transmission_rate[1] / 1000, 1) # type: ignore[no-any-return] def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload max transmission rate.""" return round(status.max_bit_rate[0] / 1000, 1) # type: ignore[no-any-return] def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float: """Return download max transmission rate.""" return round(status.max_bit_rate[1] / 1000, 1) # type: ignore[no-any-return] def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload total data.""" return round(status.bytes_sent / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return] def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float: """Return download total data.""" return round(status.bytes_received / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return] def _retrieve_link_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload link rate.""" return round(status.max_linked_bit_rate[0] / 1000, 1) # type: ignore[no-any-return] def _retrieve_link_kb_s_received_state(status: FritzStatus, last_value: str) -> float: """Return download link rate.""" return round(status.max_linked_bit_rate[1] / 1000, 1) # type: ignore[no-any-return] def _retrieve_link_noise_margin_sent_state( status: FritzStatus, last_value: str ) -> float: """Return upload noise margin.""" return status.noise_margin[0] / 10 # type: ignore[no-any-return] def _retrieve_link_noise_margin_received_state( status: FritzStatus, last_value: str ) -> float: """Return download noise margin.""" return status.noise_margin[1] / 10 # type: ignore[no-any-return] def _retrieve_link_attenuation_sent_state( status: FritzStatus, last_value: str ) -> float: """Return upload line attenuation.""" return status.attenuation[0] / 10 # type: ignore[no-any-return] def _retrieve_link_attenuation_received_state( status: FritzStatus, last_value: str ) -> float: """Return download line attenuation.""" return status.attenuation[1] / 10 # type: ignore[no-any-return] SENSOR_TYPES: tuple[FritzSensorEntityDescription, ...] = ( FritzSensorEntityDescription( key="external_ip", name="External IP", icon="mdi:earth", value_fn=_retrieve_external_ip_state, ), FritzSensorEntityDescription( key="device_uptime", name="Device Uptime", device_class=DEVICE_CLASS_TIMESTAMP, entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_device_uptime_state, ), FritzSensorEntityDescription( key="connection_uptime", name="Connection Uptime", device_class=DEVICE_CLASS_TIMESTAMP, entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_connection_uptime_state, ), FritzSensorEntityDescription( key="kb_s_sent", name="Upload Throughput", state_class=STATE_CLASS_MEASUREMENT, native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND, icon="mdi:upload", value_fn=_retrieve_kb_s_sent_state, ), FritzSensorEntityDescription( key="kb_s_received", name="Download Throughput", state_class=STATE_CLASS_MEASUREMENT, native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND, icon="mdi:download", value_fn=_retrieve_kb_s_received_state, ), FritzSensorEntityDescription( key="max_kb_s_sent", name="Max Connection Upload Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:upload", entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_max_kb_s_sent_state, ), FritzSensorEntityDescription( key="max_kb_s_received", name="Max Connection Download Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:download", entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_max_kb_s_received_state, ), FritzSensorEntityDescription( key="gb_sent", name="GB sent", state_class=STATE_CLASS_TOTAL_INCREASING, native_unit_of_measurement=DATA_GIGABYTES, icon="mdi:upload", value_fn=_retrieve_gb_sent_state, ), FritzSensorEntityDescription( key="gb_received", name="GB received", state_class=STATE_CLASS_TOTAL_INCREASING, native_unit_of_measurement=DATA_GIGABYTES, icon="mdi:download", value_fn=_retrieve_gb_received_state, ), FritzSensorEntityDescription( key="link_kb_s_sent", name="Link Upload Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:upload", value_fn=_retrieve_link_kb_s_sent_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_kb_s_received", name="Link Download Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:download", value_fn=_retrieve_link_kb_s_received_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_noise_margin_sent", name="Link Upload Noise Margin", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:upload", value_fn=_retrieve_link_noise_margin_sent_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_noise_margin_received", name="Link Download Noise Margin", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:download", value_fn=_retrieve_link_noise_margin_received_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_attenuation_sent", name="Link Upload Power Attenuation", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:upload", value_fn=_retrieve_link_attenuation_sent_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_attenuation_received", name="Link Download Power Attenuation", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:download", value_fn=_retrieve_link_attenuation_received_state, connection_type=DSL_CONNECTION, ), )
[ 37811, 10116, 44, 8782, 2043, 57, 0, 14253, 13934, 15736, 526, 15931, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 17268, 13, 39305, 1330, 4889, 540, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 4818, 80...
2.48489
3,640
from Ifc.ClassRegistry import ifc_class, ifc_abstract_class, ifc_fallback_class class Omitted: """ Marked with '*' it states that some supertype had defined that attribute, but in the subtype it is a derived (calculated) value, so it no longer makes sense to explicitely assign value to it. """ # TODO: Haven't tried if it can be handled 'just as expected' # class-level, enough to reference, no need to create multiple instances (doesn't hurt though) omitted = Omitted() # vim: set sw=4 ts=4 et:
[ 6738, 1002, 66, 13, 9487, 8081, 4592, 1330, 611, 66, 62, 4871, 11, 611, 66, 62, 397, 8709, 62, 4871, 11, 611, 66, 62, 7207, 1891, 62, 4871, 628, 628, 198, 4871, 440, 3291, 25, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 2940,...
3.149701
167
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2016 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import logging import os from io_monitor.constants import DOMAIN from io_monitor.utils.data_window import DataCollectionWindow LOG = logging.getLogger(DOMAIN)
[ 2, 43907, 25, 7400, 11338, 28, 19, 6482, 10394, 28, 19, 2705, 8658, 11338, 28, 19, 198, 2, 198, 2, 15069, 357, 66, 8, 1584, 3086, 5866, 11998, 11, 3457, 13, 198, 2, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, ...
3.030612
98
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet). GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss. """ import logging import math import os from dataclasses import dataclass, field from typing import Optional import torch from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, HfArgumentParser, # LineByLineTextDatasetLabels, LineByLineTextDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) import ray from ray import tune from transformers.file_utils import is_torch_tpu_available from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from ray.tune.schedulers import PopulationBasedTraining from ray.tune import CLIReporter # if is_wandb_available(): # import wandb ray.shutdown() ray.init(log_to_driver=True, ignore_reinit_error=True) logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def get_dataset( args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate: bool = False, cache_dir: Optional[str] = None, ): file_path = args.eval_data_file if evaluate else args.train_data_file if args.line_by_line: return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size) # return LineByLineTextDatasetLabels(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size) else: return TextDataset( tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=cache_dir, ) class TuneTransformerTrainer(Trainer): def recover_checkpoint(tune_checkpoint_dir, model_name=None): if tune_checkpoint_dir is None or len(tune_checkpoint_dir) == 0: return model_name # Get subdirectory used for Huggingface. subdirs = [ os.path.join(tune_checkpoint_dir, name) for name in os.listdir(tune_checkpoint_dir) if os.path.isdir(os.path.join(tune_checkpoint_dir, name)) ] # There should only be 1 subdir. assert len(subdirs) == 1, subdirs return subdirs[0] # def train_transformer(config, checkpoint_dir=None): # train_dataset, eval_dataset = get_datasets(config) # # training_args = TrainingArguments( # output_dir=tune.get_trial_dir(), # learning_rate=config["learning_rate"], # do_train=True, # do_eval=True, # evaluate_during_training=True, # # Run eval after every epoch. # eval_steps=(len(train_dataset) // config["per_gpu_train_batch_size"]) + # 1, # # We explicitly set save to 0, and do checkpointing in evaluate instead # save_steps=0, # num_train_epochs=config["num_epochs"], # max_steps=config["max_steps"], # per_device_train_batch_size=config["per_gpu_train_batch_size"], # per_device_eval_batch_size=config["per_gpu_val_batch_size"], # warmup_steps=0, # weight_decay=config["weight_decay"], # logging_dir="./logs", # ) # # model_name_or_path = recover_checkpoint(checkpoint_dir, config["model_name"]) # # num_labels = glue_tasks_num_labels[config["task_name"]] # # config = AutoConfig.from_pretrained( # model_name_or_path, # num_labels=num_labels, # finetuning_task=task_name, # ) # model = AutoModelForSequenceClassification.from_pretrained( # model_name_or_path, # config=config, # ) # # # Use our modified TuneTransformerTrainer # tune_trainer = TuneTransformerTrainer( # model=model, # args=training_args, # train_dataset=train_dataset, # eval_dataset=eval_dataset, # compute_metrics=utils.build_compute_metrics_fn(task_name), # ) # tune_trainer.train(model_name_or_path) if __name__ == "__main__": parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() config = { # These 3 configs below were defined earlier "model_name": model_args.model_name_or_path, "task_name": "CLM", "data_dir": "", "per_gpu_val_batch_size": 32, "per_gpu_train_batch_size": tune.choice([16, 32, 64]), "learning_rate": tune.uniform(1e-5, 5e-5), "weight_decay": tune.uniform(0.0, 0.3), "num_epochs": tune.choice([2, 3, 4, 5]), "max_steps": -1, # We use num_epochs instead. "wandb": { "project": "pbt_transformers", "reinit": True, "allow_val_change": True } } logger.info(config) scheduler = PopulationBasedTraining( time_attr="training_iteration", metric="eval_loss", mode="min", perturbation_interval=2, hyperparam_mutations={ "weight_decay": lambda: tune.uniform(0.0, 0.3).func(None), "learning_rate": lambda: tune.uniform(1e-5, 5e-5).func(None), "per_gpu_train_batch_size": [16, 32, 64], }) reporter = CLIReporter( parameter_columns={ "weight_decay": "w_decay", "learning_rate": "lr", "per_gpu_train_batch_size": "train_bs/gpu", "num_epochs": "num_epochs" }, metric_columns=[ "eval_acc", "eval_loss", "epoch", "training_iteration" ]) analysis = tune.run( train_transformer, resources_per_trial={ "cpu": 1, "gpu": 1 }, config=config, num_samples=3, scheduler=scheduler, keep_checkpoints_num=3, checkpoint_score_attr="training_iteration", progress_reporter=reporter, local_dir="./ray_results/", name="tune_trans") best_config = analysis.get_best_config(metric="eval_loss", mode="min") print(best_config)
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 2864, 383, 3012, 9552, 15417, 4816, 46665, 290, 383, 12905, 2667, 32388, 3457, 13, 1074, 13, 198, 2, 15069, 357, 66, 8, 2864, 11, 15127, 23929, 44680, 6234, 13, 220, 1439, 2489, 10395, 13,...
2.327139
3,121
from net_common import * import struct import sys if __name__ == "__main__": sock = get_connected_local_socket() path = encodeString('/dev/shm/exampleDir') # path = encodeString('/dev/null') sock.sendall(bytearray(b'\x0A')) # HASH request # sock.sendall(bytearray(b'\x01')) # choose MD5 algorithm sock.sendall(bytearray(b'\x06')) # choose SHA3-224 algorithm sock.sendall(getDirHashOpts(withNames=True,ignoreUnixHiddenFiles=False)) # send dirHashOpts byte (unused for regular files) sock.sendall(struct.pack("@H", len(path))) # len of path as unsigned short sock.sendall(path) resp = sock.recv(1) # response first byte: \x00 OK or \xFF ERROR if resp != b'\x00': print("Error byte received, errno is:", struct.unpack("@i", sock.recv(4))[0]) sys.exit(0) # print(toHex(sock.recv(16))) # 128 bit (16 byte) md5 digest size print(toHex(sock.recv(28))) # 224 bit (28 byte) sha3-224 digest size sock.close()
[ 6738, 2010, 62, 11321, 1330, 1635, 198, 11748, 2878, 198, 11748, 25064, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 32263, 796, 651, 62, 15236, 62, 12001, 62, 44971, 3419, 628, 220, 220, 220,...
2.458647
399
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-09-11 10:05 from __future__ import unicode_literals import config.s3 from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 1485, 319, 2864, 12, 2931, 12, 1157, 838, 25, 2713, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.761905
63
"""Validate that number of threads in thread pools is set to 1.""" import numexpr import blosc import threadpoolctl # APIs that return previous number of threads: assert numexpr.set_num_threads(2) == 1 assert blosc.set_nthreads(2) == 1 for d in threadpoolctl.threadpool_info(): assert d["num_threads"] == 1, d
[ 37811, 7762, 20540, 326, 1271, 286, 14390, 287, 4704, 20354, 318, 900, 284, 352, 526, 15931, 198, 198, 11748, 997, 31937, 198, 11748, 698, 17500, 198, 11748, 4704, 7742, 34168, 198, 198, 2, 23113, 326, 1441, 2180, 1271, 286, 14390, 25, ...
3.07767
103
#!/usr/bin/env python """A simple viewer for Stokes patterns based on two far-field pattern files. (Possibly based on one FF pattern files if it has two requests: one for each polarization channel.)""" import os import argparse import numpy import matplotlib.pyplot as plt from antpat.reps.sphgridfun.tvecfun import TVecFields from antpat.radfarfield import RadFarField from antpat.dualpolelem import DualPolElem FEKOsuffix = 'ffe' GRASPsuffix = 'swe' NECsuffix = 'out' def Jones2Stokes(Jones): """Convert Jones matrix to Stokes vector. This assumes dual-pol antenna receiving unpolarized unit valued radiation i.e. incoming Stokes = (1,0,0,0).""" brightmat = numpy.matmul(Jones, numpy.swapaxes(numpy.conjugate(Jones),-1,-2)) StokesI = numpy.real(brightmat[...,0,0]+brightmat[...,1,1]) StokesQ = numpy.real(brightmat[...,0,0]-brightmat[...,1,1]) StokesU = numpy.real(brightmat[...,0,1]+brightmat[...,1,0]) StokesV = numpy.imag(brightmat[...,0,1]-brightmat[...,1,0]) return StokesI, StokesQ, StokesU, StokesV if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("p_chan_file", help='Filename of polarization channel p') parser.add_argument("q_chan_file", help='Filename of polarization channel p') parser.add_argument("freq", nargs='?', type=float, help="Frequency in Hertz") args = parser.parse_args() if args.p_chan_file.endswith(FEKOsuffix): plotStokes_fromFEKOfiles(args.p_chan_file, args.q_chan_file, args.freq) elif args.p_chan_file.endswith(GRASPsuffix): print("Not implemented yet.") elif args.p_chan_file.endswith(NECsuffix): print("Not implemented yet.") else: print("Far-field pattern file type not known") exit(1)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 32, 2829, 19091, 329, 520, 3369, 7572, 1912, 319, 734, 1290, 12, 3245, 3912, 3696, 13, 198, 7, 47, 20846, 1912, 319, 530, 18402, 3912, 3696, 611, 340, 468, 734, 7007, 25, 530,...
2.454424
746
'''Some helper functions for PyTorch, including: - progress_bar: progress bar mimic xlua.progress. - set_lr : set the learning rate - clip_gradient : clip gradient ''' import os import sys import time import math import torch import torch.nn as nn import torch.nn.init as init from torch.autograd import Function # if sys.platform == 'win32': term_width = 80 else: print('###', os.popen('stty size', 'r').read()) _, term_width = os.popen('stty size', 'r').read().split() term_width = int(term_width) TOTAL_BAR_LENGTH = 30. last_time = time.time() begin_time = last_time #[==>........ 19/225 ...........] | Loss: 1.961 | Acc: 22.000% (537/2432)
[ 7061, 6, 4366, 31904, 5499, 329, 9485, 15884, 354, 11, 1390, 25, 201, 198, 220, 220, 220, 532, 4371, 62, 5657, 25, 4371, 2318, 26332, 2124, 40211, 13, 33723, 13, 201, 198, 220, 220, 220, 532, 900, 62, 14050, 1058, 900, 262, 4673, ...
2.57037
270
from __future__ import absolute_import, division, print_function import logging import sys logging.basicConfig( stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s %(name)s-%(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') import numpy as np import utils logger = logging.getLogger("indexconverter")
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 6404, 2667, 13, 35487, 16934, 7, 198, 220, 220, 220, 4269, 28, 17597, 13, 19282, 448, 11, 198, 220, 2...
2.5
134
print('\033[7;30mOla mundo\033[m!!!')
[ 4798, 10786, 59, 44427, 58, 22, 26, 1270, 76, 46, 5031, 27943, 78, 59, 44427, 58, 76, 10185, 11537, 198 ]
1.9
20
# Copyright 2020 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import FrozenSet, Callable, List, Sequence, Any, Union, Dict import numpy as np import networkx as nx import cirq from cirq import _compat, GridQubit, LineQubit from cirq.ops import NamedQubit from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset
[ 2, 15069, 12131, 383, 21239, 80, 34152, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921...
3.489796
245
import os import sys import numpy as np import iotbx.phil from cctbx import uctbx from dxtbx.model.experiment_list import ExperimentListFactory from scitbx.math import five_number_summary import dials.util from dials.array_family import flex from dials.util import Sorry, tabulate help_message = """ Examples:: dials.show models.expt dials.show image_*.cbf dials.show observations.refl """ phil_scope = iotbx.phil.parse( """\ show_scan_varying = False .type = bool .help = "Whether or not to show the crystal at each scan point." show_shared_models = False .type = bool .help = "Show which models are linked to which experiments" show_all_reflection_data = False .type = bool .help = "Whether or not to print individual reflections" show_intensities = False .type = bool show_centroids = False .type = bool show_profile_fit = False .type = bool show_flags = False .type = bool .help = "Show a summary table of reflection flags" show_identifiers = False .type = bool .help = "Show experiment identifiers map if set" image_statistics{ show_corrected = False .type = bool .help = "Show statistics on the distribution of values in each corrected image" show_raw = False .type = bool .help = "Show statistics on the distribution of values in each raw image" } max_reflections = None .type = int .help = "Limit the number of reflections in the output." """, process_includes=True, ) def _create_flag_count_table(table): """Generate a summary table of flag values in a reflection table. :param table: A reflection table :returns: A string of the formatted flags table """ # Calculate the counts of entries that match each flag numpy_flags = table["flags"].as_numpy_array() flag_count = { flag: np.sum(numpy_flags & value != 0) for value, flag in table.flags.values.items() } # Work out the numeric-value order of the flags flag_order = sorted(table.flags.values.values(), key=lambda x: x.real) # Build the actual table flag_rows = [["Flag", "Count", "%"]] max_count_len = max(5, len(str(max(flag_count.values())))) last_flag = None for flag in flag_order: indent = "" # As a hint for reading, indent any 'summary' flags. # A summary flag is any flag which overlaps with the previous one. if last_flag and (last_flag.real & flag.real): indent = " " last_flag = flag # Add the row to the table we're building flag_rows.append( [ indent + flag.name, "{:{:d}d}".format(flag_count[flag], max_count_len), f"{100 * flag_count[flag] / len(table):5.01f}", ] ) # Build the array of output strings text = [] text.append("Reflection flags:") text.append(tabulate(flag_rows, headers="firstrow")) return "\n".join(text) if __name__ == "__main__": run()
[ 11748, 28686, 198, 11748, 25064, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 1312, 313, 65, 87, 13, 28864, 198, 6738, 269, 310, 65, 87, 1330, 334, 310, 65, 87, 198, 6738, 288, 742, 65, 87, 13, 19849, 13, 23100, 3681, ...
2.693213
1,105
# -*- coding: utf-8 -*- from enum import Enum, IntEnum, unique import os APP_NAME = "mine2farm" NETWORK_NAME = "CenterAxis" LOG_LEVEL_CONSOLE = "WARNING" LOG_LEVEL_FILE = "INFO" APP_FOLDER = os.getenv("JESA_MINE2FARM_HOME", "C:/GitRepos/mine2farm/") LOG_FOLDER = APP_FOLDER + "app/log/" LOG_FILE = "%(asctime)_" + APP_NAME + ".log" OUTPUT_FOLDER = "%s%s" % (APP_FOLDER, "outputs/") CANVAS_URL = "http://127.0.0.1/canvas.xlsm" # DB DB_NAME = None DB_HOST = "172.29.161.208" DB_PORT = 5006 DATA_SERVICE_ADD = "172.29.161.208" DATA_SERVICE_PORT = 5001 # Results DB_RESULT_NAME = "%s_results" % DB_NAME if DB_NAME is not None else None DB_DETAILED_RESULT_COLLECTION_NAME = "detailed" DB_GLOBAL_RESULT_COLLECTION_NAME = "global" DB_GLOBAL_BEST_RESULT_COLLECTION_NAME = "global_best" DB_DETAILED_BEST_RESULT_COLLECTION_NAME = "detailed_best" DB_SENSITIVITY_COLLECTION_NAME = "sensitivity" RESULT_BATCHES_SIZE = 25 HEAD_DATA_BITS = 17 DB_NAME_BITS = 20 RANDOMIZE_RESULTS = False # RabbitMQ RABBITMQ_SERVER = "localhost" RABBITMQ_SIMULATOR_QUEUE_NAME = "SIMULATE" RABBITMQ_CYCLE = 3 RABBITMQ_DETAILED_RESULT_QUEUE_NAME = "SAVE_DETAIL" RABBITMQ_GLOBAL_RESULT_QUEUE_NAME = "SAVE_GLOBAL" RABBITMQ_MAX_WORKER = RABBITMQ_CYCLE RABBITMQ_PATH = "C:\\Program Files\\RabbitMQ Server\\rabbitmq_server-3.8.1\\sbin" # Memcached MEMCACHED_SERVER = 'localhost' MEMCACHED_PORT = 11211 # Dashboard DB_LOAD_FROM_SERVICE = True # Monitoring MONITORING_APP_NAME = "mine2farm_monitor" MONITORING_SERVER = "172.29.161.208" MONITORING_PORT = 5002 MONITORING_DB_NAME = "task_history" MONITORING_COLLECTION_HISTORY_NAME = "task" MONITORING_COLLECTION_HISTORY_BEST_NAME = "best_scenarios_history" MONITORING_STEP = 1 MONITORING_NB_PAGE = 10 # Mongodb-bi MONGODB_BI_PATH = "C:\\Program Files\\MongoDB\\Connector for BI\\2.13\\bin" # Mongodb MONGO_SERVER_PATH = "C:\\Program Files\\MongoDB\\Server\\4.0\\bin" # params LOGISTICS_LP = False MODE_DEBUG = False GRANUL_RELAX = False # Model MONIKER_SEPARATOR = "/" WACC = 0.1 T0 = 2020 TMAX = 2031 PIPELINE_SCHEMA = { PipelineLayer.COMMON: { "type": PipelineType.COMMON, "dico": ["location", "opex", "unit", "currency", "output", "names", "products"] }, PipelineLayer.MINE: { "type": PipelineType.PRODUCER, "dico": ["mine.name", "mine.extraction", "mine.quality", "mine.capex"], "options": "mining_options", "production": "mining_specific_production", "opex": "mining_opex___specific_consumptions", "capex": "mining_capex", "priority_mines": "prioritymines" }, PipelineLayer.BENEFICIATION: { "type": PipelineType.PRODUCER, "dico": ["beneficiation.name", "beneficitation.process", "beneficitation.quality", "beneficitation.capex"], "options": "beneficiation_options", "production": "beneficiation_production", "opex": "beneficiation_opex___specific_consumptions", "capex": "beneficiation_capex" }, PipelineLayer.SAP: { "type": PipelineType.PRODUCER, "dico": ["sap.name", "sap.process", "sap.product", "sap.capex", "sap.capacity[kt]"], "options": "sap___power_plant_options", "production": "sap___power_plant_production", "opex": "sap___power_plant_opex___specific_consumptions", "capex": "sap___power_plant_capex", "product_type": "sap.product" }, PipelineLayer.PAP: { "type": PipelineType.PRODUCER, "dico": ["pap.name", "pap.process", "pap.product", "pap.capex", "pap.size[kt]", "pap.input"], "options": "pap_options", "production": "pap_production", "opex": "pap_opex___specific_consumptions", "capex": "pap_capex", "product_type": "pap.product" }, PipelineLayer.GRANULATION: { "type": PipelineType.PRODUCER, "dico": ["granulation.name", "granulation.process", "granulation.product", "granulation.capex", "granulation.input"], "options": "granulation_options", "production": "granulation_production", "opex": "granulation_opex", "capex": "granulation_capex" }, PipelineLayer.LOGISTICS: { "type": PipelineType.TRANSPORT, "dico": ["logistics.name", "logistics.process", "logistics.product", "logistics.capex"], "options": "logistics_options", "production": None, "opex": "logistics_opex", "capex": "logistics_capex" }, PipelineLayer.RAW_MATERIALS: { "type": PipelineType.PRICE, "data": "raw_materials" }, PipelineLayer.SALES_PLAN: { "type": PipelineType.SALES, "data": "sales_plan" }, PipelineLayer.UNIT_CONVERSION_MATRIX: { "type": PipelineType.COMMON, "data": "conv_matrix" }, } SUPPLY_CHAIN = "mine2port" DEPARTURE_ARRIVAL = {SUPPLY_CHAIN: (PipelineLayer.MINE), "sap2pap": (PipelineLayer.SAP, PipelineLayer.PAP)} COMBO_NODES = { PipelineLayer.MINE_BENEFICIATION: { "url": "mining_wp_connections", "upstream_layer": PipelineLayer.MINE, "downstream_layer": PipelineLayer.BENEFICIATION } } COMBO_NODES_SEPARATION = "--" SCENARIO_GEN_TYPE = ScenarioGeneratorType.FROM_OPTIONS PIPELINE_METADATA = { PipelineLayer.MINE: { "type": PipelineType.PRODUCER, "production": ["Name", "Extraction", "Quality", "Unit"], "opex": ["Name", "Extraction", "Capacity", "Item", "Unit"], "capex": ["Name", "Extraction", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.BENEFICIATION: { "type": PipelineType.PRODUCER, "production": ["Process", "InputQuality", "OutputQuality", "Humidity", "Unit"], "opex": ["Process", "InputQuality", "OutputQuality", "Item", "Unit"], "capex": ["Name", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.SAP: { "type": PipelineType.PRODUCER, "production": ["Location", "Process", "Product", "Unit"], "opex": ["Location", "Process", "Item", "Unit"], "capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.PAP: { "type": PipelineType.PRODUCER, "production": ["Process", "Input", "Product", "Unit"], "opex": ["Location", "Process", "Capacity", "Input", "Item", "Product", "Unit"], "capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.GRANULATION: { "type": PipelineType.PRODUCER, "production": ["Process", "Input", "Product", "Unit"], "opex": ["Location", "ProductionSite", "Process", "Capacity", "Product", "Item", "Unit"], "capex": ["Location", "ProductionSite", "Product", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.LOGISTICS: { "type": PipelineType.TRANSPORT, "opex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit"], "capex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.RAW_MATERIALS: { "type": PipelineType.PRICE, "columns": ["Item", "Unit"] }, PipelineLayer.SALES_PLAN: { "type": PipelineType.PRICE, "columns": ["Type", "Product", "Unit"] }, PipelineLayer.UNIT_CONVERSION_MATRIX: { "type": PipelineType.COMMON, "columns": ["Initial Unit", "Uniform Unit", "Conversion Rate"] }, } SHUFFLE_LEVELS = { PipelineLayer.MINE: ShuffleLevel.UNDEFINED, PipelineLayer.BENEFICIATION: ShuffleLevel.UNDEFINED, PipelineLayer.SAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED, PipelineLayer.PAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED, PipelineLayer.GRANULATION: ShuffleLevel.UNDEFINED, PipelineLayer.LOGISTICS: ShuffleLevel.UNDEFINED, PipelineLayer.MINE_BENEFICIATION: ShuffleLevel.UNDEFINED }
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 628, 198, 6738, 33829, 1330, 2039, 388, 11, 2558, 4834, 388, 11, 3748, 198, 11748, 28686, 628, 198, 24805, 62, 20608, 796, 366, 3810, 17, 43323, 1, 198, 12884, 33249, 62, 2...
2.264113
3,472
from django.db import models # Create your models here.
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 2, 13610, 534, 4981, 994, 13 ]
3.733333
15
import base64 import io import dash import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output import numpy as np import tensorflow as tf from PIL import Image from constants import CLASSES import yaml with open('app.yaml') as yaml_data : params = yaml.safe_load(yaml_data) IMAGE_WIDTH = params['IMAGE_WIDTH'] IMAGE_HEIGHT = params['IMAGE_HEIGHT'] PATH_MODEL = params['PATH_MODEL'] # Load DNN model classifier = tf.keras.models.load_model(PATH_MODEL) def classify_image(image, model, image_box=None): """Classify image by model Parameters ---------- content: image content model: tf/keras classifier Returns ------- class id returned by model classifier """ images_list = [] image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box) # box argument clips image to (x1, y1, x2, y2) image = np.array(image) images_list.append(image) return model.predict_classes(np.array(images_list)) app = dash.Dash('Traffic Signs Recognition', external_stylesheets=[dbc.themes.BOOTSTRAP]) pre_style = { 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all', 'whiteSpace': 'normal' } # Define application layout navbar = dbc.NavbarSimple( children=[ dbc.DropdownMenu( children=[ dbc.DropdownMenuItem('Rseau de Neurones', header=True), dbc.DropdownMenuItem('SVM', href="#"), ], nav=True, in_navbar=True, label='Modle', ), ], brand="Menu", brand_href="#", color= "#d90054", dark=True ) cards = html.Div( [ dbc.Card( dbc.CardBody( [ html.H5("Prsentation", className="card-title"), html.P( [ 'Cette application pour but de raliser des modles capables de classer des panneaux de signalisation allemand partir d\'une image. L\'application fonctionne de la manire suivante : vous dposer une image l\'emplacement indiqu et la prdiction du modle apparait immdiatement en dessous. En haut droite vous pouvez slectionner le modle que vous voulez tester.', ], className='card-text', ), ] ), className='w-75 mb-3', color='#f1cbd1', outline='Black', style={ 'margin-top': '75px', 'margin-left': '185px'}, ), ] ) app.layout = html.Div([ html.Div([navbar]), html.Div(cards), dcc.Upload( id='bouton-chargement', children=html.Div([ 'Cliquer-dposer ou ', html.A('slectionner une image') ]), style={ 'width': '50%', 'height': '60px', 'lineHeight': '60px', 'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '5px', 'textAlign': 'center', 'margin-top': '75px', 'margin-left': '370px', } ), html.Div(id='mon-image'), html.Div(id='ma-zone-resultat') ]) # Manage interactions with callbacks # Start the application if __name__ == '__main__': app.run_server(debug=True)
[ 11748, 2779, 2414, 198, 11748, 33245, 198, 198, 11748, 14470, 198, 11748, 14470, 62, 7295, 62, 5589, 3906, 355, 288, 535, 198, 11748, 14470, 62, 6494, 62, 5589, 3906, 355, 27711, 198, 11748, 14470, 62, 18769, 26418, 62, 5589, 3906, 355,...
2.04451
1,685
import json import requests from django.conf import settings
[ 11748, 33918, 198, 11748, 7007, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 628, 198 ]
4.2
15
print(int(input(""))**0.5)
[ 4798, 7, 600, 7, 15414, 7203, 48774, 1174, 15, 13, 20, 8 ]
2.166667
12
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, List, Optional from azure.core.exceptions import HttpResponseError import msrest.serialization
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 19...
5.033333
120
from django.shortcuts import render,redirect,reverse from . import forms,models from django.db.models import Sum from django.contrib.auth.models import Group from django.http import HttpResponseRedirect from django.contrib.auth.decorators import login_required,user_passes_test #for showing signup/login button for teacher(by sumit) #for showing signup/login button for teacher(by sumit) #for showing signup/login button for student(by sumit) #for checking user is techer , student or admin(by sumit) #for dashboard of adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #for teacher sectionnnnnnnn by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #for student by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #attendance related viewwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww(by sumit) #fee related view by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #notice related viewsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss(by sumit) #for TEACHER LOGIN SECTIONNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN(by sumit) #FOR STUDENT AFTER THEIR Loginnnnnnnnnnnnnnnnnnnnn(by sumit) # for aboutus and contact ussssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss (by sumit) def aboutus_view(request): return render(request,'school/aboutus.html') def contactus_view(request): sub = forms.ContactusForm() if request.method == 'POST': sub = forms.ContactusForm(request.POST) if sub.is_valid(): email = sub.cleaned_data['Email'] name=sub.cleaned_data['Name'] message = sub.cleaned_data['Message'] send_mail(str(name)+' || '+str(email),message, EMAIL_HOST_USER, ['wapka1503@gmail.com'], fail_silently = False) return render(request, 'school/contactussuccess.html') return render(request, 'school/contactus.html', {'form':sub})
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 445, 1060, 11, 50188, 198, 6738, 764, 1330, 5107, 11, 27530, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 5060, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, ...
2.467986
859
import os import numpy as np from paddle import fluid from ltr.models.bbreg.atom import atom_resnet50, atom_resnet18 from ltr.models.siamese.siam import siamfc_alexnet from ltr.models.siam.siam import SiamRPN_AlexNet, SiamMask_ResNet50_sharp, SiamMask_ResNet50_base from pytracking.admin.environment import env_settings from pytracking.features.featurebase import MultiFeatureBase from pytracking.libs import TensorList from pytracking.libs.paddle_utils import n2p
[ 11748, 28686, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 6738, 39517, 1330, 11711, 201, 198, 201, 198, 6738, 300, 2213, 13, 27530, 13, 11848, 2301, 13, 37696, 1330, 22037, 62, 411, 3262, 1120, 11, 22037, 62, 411, 326...
2.789773
176
#!/usr/bin/env python2 import sys import random import os.path import shutil import commands import types import math #gsPath = '/usr/local/bin/gs' gsPath = 'gs' logFile = '/dev/null' #logFile = 'plot.log' #--- class PsPlot(fname, pageHeader, pageSubHeader, plotsPerPage) # #--- Main # def main(): tMin = 0 tMax = 100000 stateList = [0,1,2,2,3,3,3,3,4] fname = 'sched.txt' if len(sys.argv) == 2: fname = sys.argv[1] elif len(sys.argv) == 3: tMin = int(sys.argv[1]) tMax = int(sys.argv[2]) elif len(sys.argv) == 4: tMin = int(sys.argv[1]) tMax = int(sys.argv[2]) fname = sys.argv[3] elif len(sys.argv) != 1: print 'USAGE: psPlot.py [tMin tMax] [fname]' sys.exit(1) print 'tMin,tMax: ', tMin, tMax, 'fname: ', fname p = PsPlot('./p', 'Header', 'SubHeader', 1) fromStateList = [] toStateList = [] time1List = [] time2List = [] indx = 0 oldTime = 0 fin = open(fname, 'r') for inputLine in fin: inputLine = inputLine.replace(' ','') inputLine = inputLine.replace("'", '') i1 = inputLine.find('(') i2 = inputLine.find(')') inputList = inputLine[i1+1:i2-1].split(',') s1 = stateList[int(inputList[0])] s2 = stateList[int(inputList[1])] t = int(inputList[2]) if indx != 0 and t >= tMin and t <= tMax: fromStateList.append(s1) toStateList.append(s2) time1List.append(oldTime) time2List.append(t) oldTime = t indx += 1 p.SetPlot(tMin, tMax, 0, 0, 2, 0, 'Time', 'Socket/State', 'Chavey\'s Plot') state = 0 while state <= 4: t1List = [] t2List = [] sList = [] indx = 0 for s in toStateList: if s == state: t1List.append(time1List[indx]) t2List.append(time2List[indx]) sList.append(0.10 + s*0.20) indx += 1 p.PlotData(1,t1List, t2List, sList, 'Test', '0.1 in 0 '+p.SetColor(p.colors[state])+' plotWbarsC', sys.stdout) state += 1 image = p.GetImage(sys.stdout) print 'Image file: ', image p.End() if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 198, 11748, 25064, 198, 11748, 4738, 198, 11748, 28686, 13, 6978, 198, 11748, 4423, 346, 198, 11748, 9729, 198, 11748, 3858, 198, 11748, 10688, 198, 198, 2, 14542, 15235, 796, 31051,...
1.89604
1,212
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-11-28 06:53 from __future__ import unicode_literals from django.db import migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 940, 13, 18, 319, 1584, 12, 1157, 12, 2078, 9130, 25, 4310, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.690909
55
# Copyright 2021 Northern.tech AS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools import re VERSIONFILE = "src/mender/_version.py" version_string_line = open(VERSIONFILE, "rt").read() VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" match = re.search(VSRE, version_string_line, re.M) if match: version_string = match.group(1) else: raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,)) with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="mender-python-client-mendersoftware", version=version_string, license="Apache 2.0", author="Mendersoftware", author_email="contact@mender.io", description="A Python implementation of the Mender client interface", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/mendersoftware/mender-python-client", classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", ], keywords=["mender", "OTA", "updater"], packages=setuptools.find_packages(where="src"), install_requires=["cryptography", "requests", "msgpack", "websockets"], entry_points={"console_scripts": ["mender-python-client=mender.mender:main"]}, package_dir={"": "src"}, python_requires=">=3.6", zip_safe=False, include_package_data=True, )
[ 2, 15069, 33448, 8342, 13, 13670, 7054, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 220, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 35...
2.903039
691
""" 295 find median from data stream hard """ from heapq import * sol = MedianFinder() sol.addNum(1) print(sol.findMedian()) sol.addNum(2) print(sol.findMedian())
[ 37811, 198, 25710, 198, 19796, 14288, 422, 1366, 4269, 198, 10424, 198, 37811, 198, 198, 6738, 24575, 80, 1330, 1635, 628, 628, 198, 198, 34453, 796, 26178, 37, 5540, 3419, 198, 34453, 13, 2860, 33111, 7, 16, 8, 198, 4798, 7, 34453, ...
2.625
64
import os import numpy as np import raisimpy as raisim import math import time raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/activation.raisim") world = raisim.World() ground = world.addGround() world.setTimeStep(0.001) world.setMaterialPairProp("steel", "steel", 0.1, 1.0, 0.0) pin1 = world.addSphere(0.1, 0.8) pin1.setAppearance("1,0,0,0.3") pin1.setPosition(0.0, 0.0, 3.0) pin1.setBodyType(raisim.BodyType.STATIC) pin2 = world.addSphere(0.1, 0.8) pin2.setAppearance("0,1,0,0.3") pin2.setPosition(0.3, 0.0, 3.0) pin2.setBodyType(raisim.BodyType.STATIC) pin3 = world.addSphere(0.1, 0.8) pin3.setAppearance("0,0,1,0.3") pin3.setPosition(0.6, 0.0, 3.0) pin3.setBodyType(raisim.BodyType.STATIC) pin4 = world.addSphere(0.1, 0.8) pin4.setAppearance("1,0,0,0.3") pin4.setPosition(0.9, 0.0, 3.0) pin4.setBodyType(raisim.BodyType.STATIC) pin5 = world.addSphere(0.1, 0.8) pin5.setPosition(0.9, 0.0, 6.0) pin5.setBodyType(raisim.BodyType.STATIC) pin6 = world.addSphere(0.1, 0.8) pin6.setPosition(-3., 0.0, 7.0) pin6.setBodyType(raisim.BodyType.STATIC) pin7 = world.addSphere(0.1, 0.8) pin7.setPosition(-4., 0.0, 7.0) pin7.setBodyType(raisim.BodyType.STATIC) anymalB_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal/urdf/anymal.urdf" anymalC_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal_c/urdf/anymal.urdf" anymalC = world.addArticulatedSystem(anymalC_urdf_file) anymalB = world.addArticulatedSystem(anymalB_urdf_file) jointNominalConfig = np.array([-3, 0, 4.54, 1.0, 0.0, 0.0, 0.0, 0.03, 0.4, -0.8, -0.03, 0.4, -0.8, 0.03, -0.4, 0.8, -0.03, -0.4, 0.8]) jointVelocityTarget = np.zeros([anymalC.getDOF()]) jointPgain = np.ones(anymalC.getDOF()) * 100.0 jointDgain = np.ones(anymalC.getDOF()) * 1.0 anymalC.setGeneralizedCoordinate(jointNominalConfig) anymalC.setPdGains(jointPgain, jointDgain) anymalC.setPdTarget(jointNominalConfig, jointVelocityTarget) anymalC.setName("anymalC") jointNominalConfig[0] = -4 anymalB.setGeneralizedCoordinate(jointNominalConfig) anymalB.setPdGains(jointPgain, jointDgain) anymalB.setPdTarget(jointNominalConfig, jointVelocityTarget) anymalB.setName("anymalB") ball1 = world.addSphere(0.1498, 0.8, "steel") ball1.setPosition(0, 0.0, 1.0) ball2 = world.addSphere(0.1499, 0.8, "steel") ball2.setPosition(0.3, 0.0, 1.0) ball3 = world.addSphere(0.1499, 0.8, "steel") ball3.setPosition(0.6, 0.0, 1.0) ball4 = world.addSphere(0.1499, 0.8, "steel") ball4.setPosition(2.9, 0.0, 3.0) box = world.addBox(.1, .1, .1, 1) box.setPosition(0.9, 0.0, 4.2) world.addStiffWire(pin1, 0, np.zeros(3), ball1, 0, np.zeros(3), 2.0) world.addStiffWire(pin2, 0, np.zeros(3), ball2, 0, np.zeros(3), 2.0) world.addStiffWire(pin3, 0, np.zeros(3), ball3, 0, np.zeros(3), 2.0) world.addStiffWire(pin4, 0, np.zeros(3), ball4, 0, np.zeros(3), 2.0) wire5 = world.addCompliantWire(pin5, 0, np.zeros(3), box, 0, np.zeros(3), 2.0, 200) wire5.setStretchType(raisim.StretchType.BOTH) wire6 = world.addCompliantWire(pin6, 0, np.zeros(3), anymalC, 0, np.zeros(3), 2.0, 1000) wire6.setStretchType(raisim.StretchType.BOTH) wire7 = world.addCustomWire(pin7, 0, np.zeros(3), anymalB, 0, np.zeros(3), 2.0) wire7.setTension(310) server = raisim.RaisimServer(world) server.launchServer(8080) for i in range(500000): time.sleep(0.001) server.integrateWorldThreadSafe() if i == 5000: world.removeObject(wire7) server.killServer()
[ 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2179, 271, 320, 9078, 355, 2179, 271, 320, 198, 11748, 10688, 198, 11748, 640, 198, 198, 430, 271, 320, 13, 10603, 13, 2617, 34156, 8979, 7, 418, 13, 6978, 13, 15908, 3672,...
2.103175
1,638
# Copyright 2012 Pedro Navarro Perez # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Storage-related functions (attach, detach, etc). """ import time from os_brick.initiator import connector from os_win import utilsfactory from oslo_log import log as logging from oslo_utils import strutils import nova.conf from nova import exception from nova.i18n import _, _LE, _LI, _LW from nova import utils from nova.virt import driver from nova.virt.hyperv import constants LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class FCVolumeDriver(BaseVolumeDriver): _is_block_dev = True _protocol = constants.STORAGE_PROTOCOL_FC
[ 2, 15069, 2321, 28855, 13244, 34852, 23058, 198, 2, 15069, 2211, 10130, 8692, 23555, 21714, 75, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 341...
3.242268
388
# -------------- # Importing header files import numpy as np import pandas as pd from scipy.stats import mode # code starts here bank = pd.read_csv(path) categorical_var = bank.select_dtypes(include = 'object') print(categorical_var) numerical_var = bank.select_dtypes(include = 'number') print(numerical_var) banks = bank.drop(columns=['Loan_ID']) bank_mode = banks.mode() banks = banks.fillna(bank_mode.iloc[0]) print(banks.isnull().sum()) avg_loan_amount = pd.pivot_table(banks, index=['Gender', 'Married', 'Self_Employed'], values='LoanAmount', aggfunc = 'mean') print(avg_loan_amount) loan_approved_se = banks[ (banks['Self_Employed'] == "Yes") & (banks['Loan_Status'] == "Y") ] loan_approved_nse = banks[ (banks['Self_Employed'] == "No") & (banks['Loan_Status'] == "Y") ] percentage_se = (len(loan_approved_se) / 614) * 100 percentage_nse = (len(loan_approved_nse) / 614) * 100 # loan amount term loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 ) big_loan_term=len(loan_term[loan_term>=25]) print(big_loan_term) columns_to_show = ['ApplicantIncome', 'Credit_History'] loan_groupby=banks.groupby(['Loan_Status'])[columns_to_show] # Check the mean value mean_values=loan_groupby.agg([np.mean]) print(mean_values) # code ends here
[ 2, 220, 26171, 198, 2, 17267, 278, 13639, 3696, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 6738, 629, 541, 88, 13, 34242, 1330, 4235, 220, 201, 198, 201, 198, 201, 198, 201, 198, ...
2.389189
555
''' Training Scropt for V2C captioning task. ''' __author__ = 'Jacob Zhiyuan Fang' import os import numpy as np from opts import * from utils.utils import * import torch.optim as optim from model.Model import Model from torch.utils.data import DataLoader from utils.dataloader import VideoDataset from model.transformer.Optim import ScheduledOptim if __name__ == '__main__': opt = parse_opt() opt = vars(opt) main(opt)
[ 7061, 6, 13614, 1446, 305, 457, 329, 569, 17, 34, 8305, 278, 4876, 13, 705, 7061, 198, 198, 834, 9800, 834, 796, 705, 46751, 1168, 5303, 88, 7258, 24468, 6, 198, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2172...
2.939189
148
import influxdb_client from influxdb_client import InfluxDBClient bucket = "python-client-sandbox" org = "Energy Monitor" token = "miQdAvNXHiNDVVzPzV5FpkCaR_8qdQ-L1FlPCOXQPI325Kbrh1fgfhkcDUZ4FepaebDdpZ-A1gmtnnjU0_hViA==" url = "http://localhost:9999" client = InfluxDBClient(url=url, token=token, org=org) writeApi = client.write_api() write_api.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"}, "fields": {"water_level": 1}, "time": 1}])
[ 11748, 25065, 9945, 62, 16366, 198, 6738, 25065, 9945, 62, 16366, 1330, 4806, 22564, 11012, 11792, 198, 198, 27041, 316, 796, 366, 29412, 12, 16366, 12, 38142, 3524, 1, 198, 2398, 796, 366, 28925, 18289, 1, 198, 30001, 796, 366, 11632, ...
2.29717
212
# No shebang line, this module is meant to be imported # # Copyright 2014 Oliver Palmer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import namedtuple from pprint import pprint from random import randint from StringIO import StringIO from textwrap import dedent try: from unittest.mock import patch except ImportError: # pragma: no cover from mock import patch from twisted.internet.protocol import ServerFactory from twisted.cred.portal import Portal from twisted.conch.telnet import ( ITelnetProtocol, TelnetBootstrapProtocol, TelnetTransport) from pyfarm.agent.testutil import TestCase from pyfarm.agent.manhole import ( LoggingManhole, TransportProtocolFactory, TelnetRealm, manhole_factory, show) Peer = namedtuple("Peer", ("host", "port"))
[ 2, 1400, 673, 36668, 1627, 11, 428, 8265, 318, 4001, 284, 307, 17392, 198, 2, 198, 2, 15069, 1946, 15416, 18918, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743...
3.557377
366
# coding: utf-8 """ """ #---------------------------------------------------------------------- def klSigmode(self): """""" if self.mode == 'deal': self.canvas.updateSig(self.signalsOpen) self.mode = 'dealOpen' else: self.canvas.updateSig(self.signals) self.mode = 'deal'
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 37811, 198, 198, 37811, 198, 198, 2, 10097, 23031, 198, 4299, 479, 75, 50, 328, 14171, 7, 944, 2599, 198, 220, 220, 220, 13538, 15931, 15931, 220, 220, 220, 198, 220, 220, 220, 611, 2116, 13, ...
2.439394
132
# 2020, BackThen Maps # Coded by Remi Petitpierre https://github.com/RPetitpierre # For Bibliothque nationale de France (BnF) import cv2, thinning, os import numpy as np import pandas as pd import shapefile as shp from skimage.measure import approximate_polygon from PIL import Image, ImageDraw from utils.utils import * from utils.match import toLatLon Image.MAX_IMAGE_PIXELS = 500000000 def skeletonize(road_network: np.ndarray, path: str = "workshop/vectorized.png", largest_component: bool = False): ''' Thinning/skeletonization of the road network image to a wired model. Input(s): road_network: black and white image of the road network (streets in white) path: path where the skeletonized image should be saved largest_component: if True, only the largest road network component will be kept Output(s): vectorized: skeletonized image ''' assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image' img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2)) vectorized = thinning.guo_hall_thinning(img) vectorized[vectorized > 100] = 255 vectorized[vectorized <= 100] = 0 if largest_component: try: _, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA) stats = stats[1:] main_component = (np.argmax(stats[:,4])+1).astype('int32') vectorized = (labels == main_component).astype('uint8')*255 except: 'Warning: Skeletonization failed to apply largest_component = True param. Skipping.' cv2.imwrite(path, vectorized) return vectorized def findNodes(image: np.ndarray): ''' Find the nodes in the road network skeleton image. Input(s): image: skeletonized image Output(s): nodes: array of nodes coordinates (x, y) degree: degrees of the nodes (2=endpoint, 4=crossroads of 3 streets, 5=crossroads of 4 streets, etc.) addresses: directions of the crossing roads, with regard to the node ''' img = image.copy() # Find row and column locations that are non-zero (rows, cols) = np.nonzero(img) nodes, degree, addresses = [], [], [] for (r,c) in zip(rows, cols): if r > 0 and c > 0 and r < image.shape[0]-1 and c < image.shape[1]-1: # Extract an 8-connected neighbourhood (col_neigh, row_neigh) = np.meshgrid(np.array([c-1, c, c+1]), np.array([r-1, r, r+1])) # Cast to int to index into image col_neigh = col_neigh.astype('int') row_neigh = row_neigh.astype('int') # Convert into a single 1D array and check for non-zero locations pix_neighbourhood = img[row_neigh, col_neigh].ravel() != 0 # If the number of non-zero locations equals 2, add this to our list of coordinates n_neighbours = np.sum(pix_neighbourhood) if (n_neighbours == 2) or (n_neighbours >= 4): nodes.append((r, c)) degree.append(n_neighbours) direction_set = np.where(pix_neighbourhood == True)[0] direction_set = direction_set[direction_set != 4] addresses.append(direction_set) nodes = np.asarray(nodes) return nodes, degree, addresses def explorePath(start_x: int, start_y: int, start_dir: int, image: np.ndarray, nodes_grid: np.ndarray): ''' Follow the path from one given start node and direction until the next node, and stores the pixels on the way. Input(s): start_x: start node x-coordinate start_y: start node y-coordinate start_dir: starting direction ({0, 1, 2, 3, -, 5, 6, 7, 8}) image: skeletonized image of the road network nodes_grid: grid of the nodes of the skeletonized image Output(s): way: list of pixel coordinates on the way direction: last direction to reach the 2nd node nodes_grid[x, y]: degree of the arrival node ''' direction = start_dir x, y = start_x, start_y assert image[x, y] != 0, 'ERROR: start point is not white' end = False way = [(x, y)] # First iteration new_x, new_y = absoluteWay(x, y, direction) assert image[new_x, new_y] != 0, 'ERROR: 2nd point is not white' way.append((new_x, new_y)) x, y = new_x, new_y wrong_paths = noTurnBack(direction) wrong_paths_active = True if nodes_grid[x, y]: end = True direction = 8-start_dir while not(end): if x > 0 and y > 0 and x < image.shape[0]-1 and y < image.shape[1]-1: # Extract an 8-connected neighbourhood (row_neigh, col_neigh) = np.meshgrid(np.array([x-1, x, x+1]), np.array([y-1, y, y+1])) # Cast to int to index into image col_neigh, row_neigh = col_neigh.astype('int'), row_neigh.astype('int') # Convert into a single 1D array and check for non-zero locations try: pix_neighbourhood = image[row_neigh, col_neigh].transpose().ravel() != 0 except: print(x, y, image.shape, ) raise AssertionError() # If the number of non-zero locations equals 2, add this to our list of coordinates n_neighbours = np.sum(pix_neighbourhood) direction_set = np.where(pix_neighbourhood == True)[0] last_ds = [wrong_paths] last_ds.append(direction_set) direction_set = direction_set[direction_set != 4] last_ds.append(direction_set) direction_set = direction_set[direction_set != (8-direction)] last_ds.append(direction_set) direction_set = np.asarray(avoidDiagonalEdges(direction_set, direction)) last_ds.append(direction_set) if wrong_paths_active: for wrong_path in wrong_paths: direction_set = direction_set[direction_set != wrong_path] wrong_paths_active = False if len(direction_set) != 1: end = True break direction = direction_set[0] new_x, new_y = absoluteWay(x, y, direction) way.append((new_x, new_y)) x, y = new_x, new_y if nodes_grid[x, y]: end = True else: end = True return way, direction, nodes_grid[x, y] def findSegments(df_nodes: pd.DataFrame, image: np.ndarray, min_length: int = 30, return_simple_ways: bool = True): ''' Find all the road segments in the network. Keep the ones that are longer than a given length or non-terminal. Optionally, compute the Douglas-Peucker simple itinerary of each segment and return it. Input(s): df_nodes: list of nodes image: skeletonized image of the road network min_length: min segment length if the segment is terminal return_simple_ways: if True, compute the Douglas-Peucker simple itinerary of each segment and return it Output(s): (Optional)(simple_ways: the Douglas-Peucker simple itinerary of each segmenty) ways: list of segments, containing all the pixels on the way between each couple of nodes nodes_grid: image containing all the nodes found in the image and their degree ''' img = image.copy() done, ways = [], [] df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True) nodes_grid = np.zeros(image.shape) for ind, row in df_nodes[['x', 'y', 'degree']].iterrows(): nodes_grid[row['x'], row['y']] = row['degree'] nodes_grid = nodes_grid.astype('int') for ind, node in df_nodes.iterrows(): for direct in node['address']: code = str(node['x']) + '_' + str(node['y']) + '_' + str(direct) if not(code in done): way, last_direct, degree = explorePath(start_x=node['x'], start_y=node['y'], start_dir=direct, image=img, nodes_grid=nodes_grid) if not((len(way) <= min_length) and ((node['degree'] == 2) or (degree == 2))): done.append(str(way[-1][0]) + '_' + str(way[-1][1]) + '_' + str(8-last_direct)) ways.append(way) if return_simple_ways: simple_ways = [] for way in ways: inv_way = np.asarray([np.asarray(way)[:,1], image.shape[0]-np.asarray(way)[:,0]]).transpose() simple_ways.append(approximate_polygon(np.asarray(inv_way), tolerance=1.6).tolist()) return simple_ways, ways, nodes_grid else: return ways, nodes_grid def toPNG(segments: list, vectorized: np.ndarray, out_path: str): ''' Save a given set of segments as a bitmap image from the road network. Input(s): segments: list of segments, containing all the pixels on the way between each couple of nodes vectorized: skeletonized image of the road network out_path: the path, where the output bitmap image should be save ''' canvas = (np.ones(vectorized.shape)*255).astype('uint8') cv2.imwrite('workshop/canvas.png', canvas); bitmap = Image.open('workshop/canvas.png') draw = ImageDraw.Draw(bitmap) for segment in segments: coords = [] for point in segment: coords.append((point[1], point[0])) draw.line(coords, fill = 'black', width=0) bitmap.save(out_path)
[ 2, 12131, 11, 5157, 6423, 20347, 220, 198, 2, 327, 9043, 416, 3982, 72, 4767, 270, 79, 31058, 3740, 1378, 12567, 13, 785, 14, 20031, 316, 270, 79, 31058, 198, 2, 1114, 347, 29142, 849, 4188, 2260, 68, 390, 4881, 357, 33, 77, 37, ...
2.252884
4,334
from unittest import TestCase from unittest.mock import patch from easy2fa import cli
[ 6738, 555, 715, 395, 1330, 6208, 20448, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 2562, 17, 13331, 1330, 537, 72, 628 ]
3.259259
27
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from bert_finetuning.data import GermanData """ ** FOR DEBUGGING ** if __name__ == "__main__": ## define data paths germeval_data_paths = { "train": "./datasets/hasoc_dataset/hasoc_german_train.csv", "dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv", "test": "./datasets/hasoc_dataset/hasoc_german_test.csv", } hasoc_german_data_paths = { "train": "./datasets/hasoc_dataset/hasoc_german_train.csv", "dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv", "test": "./datasets/hasoc_dataset/hasoc_german_test.csv", } ## create dataloaders print("creating germeval dataloaders...") germ_eval_dataloader = GermanDataLoader(germeval_data_paths) print("creating hasoc dataloaders...") hasoc_german_dataloader = GermanDataLoader(hasoc_german_data_paths) """
[ 6738, 28034, 13, 26791, 13, 7890, 1330, 309, 22854, 27354, 292, 316, 11, 6060, 17401, 11, 14534, 16305, 20053, 11, 24604, 1843, 16305, 20053, 201, 198, 201, 198, 6738, 275, 861, 62, 15643, 316, 46493, 13, 7890, 1330, 2679, 6601, 201, ...
2.173626
455
import torch import numpy as np import torch.utils.data as data from torch.utils.data import Subset from data.fast_mnist import create_MNIST_dataset from data.ambiguous_mnist.ambiguous_mnist_dataset import AmbiguousMNIST
[ 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 13, 26791, 13, 7890, 355, 1366, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 3834, 2617, 198, 198, 6738, 1366, 13, 7217, 62, 10295, 396, 1330, 2251, 62, 39764, 8808, ...
3.15493
71
# -*- coding: utf-8 -*- import logging import datetime from flask import request, render_template from flask_jwt_extended import ( create_access_token, decode_token ) from jwt.exceptions import DecodeError from flasgger import swag_from from http import HTTPStatus from pathlib import Path from sqlalchemy.orm.exc import NoResultFound from vantage6.common import logger_name from vantage6.server import db from vantage6.server.resource import ( ServicesResources ) module_name = logger_name(__name__) log = logging.getLogger(module_name) # ------------------------------------------------------------------------------ # Resources / API's # ------------------------------------------------------------------------------
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 18931, 198, 11748, 4818, 8079, 198, 198, 6738, 42903, 1330, 2581, 11, 8543, 62, 28243, 198, 6738, 42903, 62, 73, 46569, 62, 2302, 1631, 1330, 357, 198, 220, 220, ...
3.951872
187
import os from typing import Any, Callable, Dict import tomodachi from tomodachi import aws_sns_sqs, aws_sns_sqs_publish from tomodachi.discovery import AWSSNSRegistration from tomodachi.envelope import JsonBase
[ 11748, 28686, 198, 6738, 19720, 1330, 4377, 11, 4889, 540, 11, 360, 713, 198, 198, 11748, 284, 4666, 14299, 198, 6738, 284, 4666, 14299, 1330, 3253, 82, 62, 82, 5907, 62, 31166, 82, 11, 3253, 82, 62, 82, 5907, 62, 31166, 82, 62, 1...
2.945205
73
types_of_people = 10 x = f"There are {types_of_people} types of people." binary = "binary" do_not = "don't" y = f"Those who know {binary} and those who {do_not}." print(x) print(y) print(f"I said: {x}") print(f"I also said: '{y}'") hilarious = False joke_evaluation = "Isn't that joke so funny?! {}" print(joke_evaluation.format(hilarious)) w="This is the left side of..." e="a string with a right side." print(w + e)
[ 19199, 62, 1659, 62, 15332, 796, 838, 198, 87, 796, 277, 1, 1858, 389, 1391, 19199, 62, 1659, 62, 15332, 92, 3858, 286, 661, 526, 198, 198, 39491, 796, 366, 39491, 1, 198, 4598, 62, 1662, 796, 366, 9099, 470, 1, 198, 88, 796, 27...
2.5
170
import base64 from google.protobuf import json_format from importlib import import_module import json import numpy as np import os import sys from mmdnn.conversion.caffe.errors import ConversionError from mmdnn.conversion.caffe.common_graph import fetch_attr_value from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
[ 11748, 2779, 2414, 198, 6738, 23645, 13, 11235, 672, 3046, 1330, 33918, 62, 18982, 198, 6738, 1330, 8019, 1330, 1330, 62, 21412, 198, 11748, 33918, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 6738, ...
3.296296
108
""" leetcode-85 0 1 , rows x cols , 1 , """ from typing import List
[ 37811, 198, 293, 316, 8189, 12, 5332, 198, 657, 220, 352, 837, 220, 15274, 2124, 951, 82, 837, 220, 352, 837, 220, 198, 37811, 198, 6738, 19720, 1330, 7343 ]
2.482759
29
# -*- coding: utf-8 -*- # Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel. All rights reserved. import numpy as np import pytest from pandapower.optimal_powerflow import OPFNotConverged import pandapower as pp try: import pplog as logging except ImportError: import logging logger = logging.getLogger(__name__) logger.setLevel("DEBUG") def test_cost_piecewise_linear_gen(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -100], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_gen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_eg(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10) pp.create_ext_grid(net, 0, max_p_kw=0, min_p_kw=-50) pp.create_gen(net, 1, p_kw=-10, max_p_kw=0, min_p_kw=-50, controllable=True) # pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "ext_grid", np.array([[-50, -500], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - - net.res_ext_grid.p_kw.values * 10 < 1e-3 # check and assert result def test_get_costs(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -300], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost == 2 * net.res_gen.p_kw.values # check and assert result def test_cost_piecewise_linear_sgen(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -100], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_load(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0, min_q_kvar=0) pp.create_ext_grid(net, 0) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 50], [150, 100]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3 def test_cost_piecewise_linear_sgen_uneven_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -200], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_load_uneven_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0, min_q_kvar=0) pp.create_ext_grid(net, 0) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 51], [150, 101]])) # run OPF with pytest.raises(OPFNotConverged): pp.runopp(net, verbose=False) assert net["OPF_converged"] assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3 def test_cost_piecewise_linear_sgen_very_unsteady_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.5 vm_min = 0.5 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-1000, controllable=True, max_p_kw=0, min_p_kw=-1500, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-1500, 2],[-750,1 ], [0,2]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] # assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 if __name__ == "__main__": # test_cost_piecewise_linear_sgen_very_unsteady_slopes() pytest.main(["test_costs_pwl.py", "-s"])
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 15069, 357, 66, 8, 1584, 12, 7908, 416, 2059, 286, 15035, 741, 290, 39313, 403, 71, 30288, 5136, 329, 6682, 18963, 198, 2, 290, 6682, 4482, 8987, 357, 40, 65...
1.976528
4,729
# -*- coding: utf-8 -*- # @Author: GXR # @CreateTime: 2022-01-20 # @UpdateTime: 2022-01-20 import redis import config import cookie_login from cookie_api import app red = redis.Redis( host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB, decode_responses=True, ) # cookie if __name__ == "__main__": run_cookie_refresh()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 13838, 25, 402, 55, 49, 198, 2, 2488, 16447, 7575, 25, 33160, 12, 486, 12, 1238, 198, 2, 2488, 10260, 7575, 25, 33160, 12, 486, 12, 1238, 198, 198, 11748, ...
2.320513
156
from feemodel.app.transient import TransientOnline from feemodel.app.pools import PoolsOnlineEstimator from feemodel.app.predict import Prediction from feemodel.app.simonline import SimOnline __all__ = [ 'TransientOnline', 'PoolsOnlineEstimator', 'Prediction', 'SimOnline' ]
[ 6738, 730, 368, 375, 417, 13, 1324, 13, 7645, 1153, 1330, 3602, 1153, 14439, 198, 6738, 730, 368, 375, 417, 13, 1324, 13, 7742, 82, 1330, 350, 10141, 14439, 22362, 320, 1352, 198, 6738, 730, 368, 375, 417, 13, 1324, 13, 79, 17407, ...
2.754717
106
# -*- coding: utf-8 -*- from __future__ import unicode_literals # start tutorial from django.db import models from djng.forms import NgModelFormMixin, NgFormValidationMixin from djng.styling.bootstrap3.forms import Bootstrap3ModelForm
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 2, 923, 11808, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 782, 13, 23914, 1330, 34...
3.118421
76
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Compute and schedule for add, multiply, subtract slice op Please note the following assumptions made by the implementation: 1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting.""" from tvm import te from tvm import tir from tvm import topi from ..utils import get_layout_transform_fn def add_broadcast_compute(input_a, input_b): """Call the add op from topi""" return topi.add(input_a, input_b) def subtract_broadcast_compute(input_a, input_b): """Call the subtract op from topi""" return topi.subtract(input_a, input_b) def multiply_broadcast_compute(input_a, input_b): """Call the multiply op from topi""" return topi.multiply(input_a, input_b) def tir_broadcast_schedule( out_m, input_a, input_b, output_layout: str, input_a_layout: str, input_b_layout: str, op_name: str, ): """Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast""" func = te.create_prim_func([input_a, input_b, out_m]) s = tir.Schedule(func) block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"} block = s.get_block(block_dict[op_name]) if input_a_layout == "nhwc-8h2w32c2w-2d": input_a_transformed_layout = get_layout_transform_fn(input_a_layout) s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout) if input_b_layout == "nhwc-8h2w32c2w-2d": input_b_transformed_layout = get_layout_transform_fn(input_b_layout) s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout) output_transformed_layout = get_layout_transform_fn(output_layout) s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout) n, h, w, c = s.get_loops(block) h_o, h_i = s.split(h, [None, 8]) w_o, w_i = s.split(w, [None, 4]) c_o, c_i = s.split(c, [None, 32]) wio, wii = s.split(w_i, [None, 2]) s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii) fused = s.fuse(c_i, wii) s.vectorize(fused) return s
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 201, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 201, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 201, 198, 2, 5115, 6634, 9238, 13, ...
2.53768
1,181
import contextlib import os from collections import namedtuple import yaml from dagster import __version__ as current_dagster_version from dagster import check from .ecr import ecr_image, get_aws_account_id, get_aws_region from .utils import ( execute_docker_build, execute_docker_push, execute_docker_tag, python_version_image_tag, ) # Default repository prefix used for local images DEFAULT_LOCAL_PREFIX = "dagster" # Location of the template assets used here IMAGES_PATH = os.path.join(os.path.dirname(__file__), "images")
[ 11748, 4732, 8019, 198, 11748, 28686, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 198, 11748, 331, 43695, 198, 6738, 48924, 1706, 1330, 11593, 9641, 834, 355, 1459, 62, 67, 363, 1706, 62, 9641, 198, 6738, 48924, 1706, 1330, 2198, 198,...
3.061453
179
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import os import unittest from telemetry.core import browser_finder from telemetry.core import exceptions from telemetry.core import extension_to_load from telemetry.core import util from telemetry.core.backends.chrome import cros_interface from telemetry.unittest import options_for_unittests
[ 2, 15069, 357, 66, 8, 2321, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 1...
3.782946
129
import sqlite3
[ 11748, 44161, 578, 18, 628 ]
3.2
5
from django import forms from django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import get_user_organization from crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
[ 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 23914, 13, 26791, 1330, 13047, 8053, 198, 198, 6738, 1955, 82, 13, 35012, 82, 13, 35012, 1330, 13718, 198, 6738, 1955, 82, 13, 7295, 13, 23914, 1330, 751, 62, 27041, 316, 48...
3.377622
143
"""Monte Carlo receding horizon control.""" from abc import ABC, abstractmethod from multiprocessing import Pipe, Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd, x = remote.recv() if cmd == "seed": mc.seed(x) elif cmd == "search": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd == "close": remote.close() break else: raise NotImplementedError except KeyboardInterrupt: print("MonteCarloParallel worker: got KeyboardInterrupt") finally: dynamics.close()
[ 37811, 9069, 660, 40089, 664, 8228, 17810, 1630, 526, 15931, 198, 198, 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 6738, 18540, 305, 919, 278, 1330, 36039, 11, 10854, 198, 198, 11748, 11550, 198, 6738, 8245, 62, 12093, 20655, 13, ...
2.25885
452
import numpy as np if __name__ == '__main__': postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec) print(p0V, p1V, pAb)
[ 198, 11748, 299, 32152, 355, 45941, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 628, 220, 220, 220, 1281, 259, 8053, 11, 1398, 53, 721, 796, 3440, 6601, 7248, 3419, 198, 220, 220, 220, 616, 53, 420, 397,...
2.141361
191
from .swd import SWD from .ahb import AHB from .debugger import Debugger, HaltError, NotHaltedError try: from .dwarf import ELFDebugger except ImportError: pass
[ 6738, 764, 2032, 67, 1330, 12672, 35, 198, 6738, 764, 993, 65, 1330, 28159, 33, 198, 6738, 764, 24442, 1362, 1330, 31687, 1362, 11, 367, 2501, 12331, 11, 1892, 39, 29590, 12331, 198, 28311, 25, 198, 220, 220, 220, 422, 764, 67, 5767...
2.816667
60
import subprocess subprocess.Popen(['sh', '../Switches/Switch3_On.sh'])
[ 11748, 850, 14681, 198, 7266, 14681, 13, 47, 9654, 7, 17816, 1477, 3256, 705, 40720, 10462, 9249, 14, 38978, 18, 62, 2202, 13, 1477, 6, 12962, 198 ]
2.666667
27