content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# Copyright (C) 2014-2015 LiuLang <gsushzhsosgsu@gmail.com> # Use of this source code is governed by GPLv3 license that can be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib CHUNK = 2 ** 20
[ 198, 2, 15069, 357, 34, 8, 1946, 12, 4626, 18258, 43, 648, 1279, 14542, 1530, 89, 11994, 418, 70, 2385, 31, 14816, 13, 785, 29, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 38644, 85, 18, 5964, 326, 460, 307, 1043, 198, ...
2.712644
87
#!/usr/bin/python # encoding: utf-8 '''a rich client 1. for one server (instead of multi like in libmc.Client) 2. encapsulate @, ?, gc ... use is instead of libmc.Client ''' import telnetlib import logging import libmc import string import urllib import itertools import warnings from collections import defaultdict from beansdbadmin.core.hint import parse_new_hint_body from beansdbadmin.core.data import parse_records from beansdbadmin.core.hash import get_khash64 def get_buckets_keys_count(store): """ return dict: buckets -> count """ st = {} try: for line in (store.get('@') or '').split('\n'): if line: d, _, c = line.split(' ') if not d.endswith('/'): continue st[int(d[0], 16)] = int(c) return st except IOError: raise Exception("cannot get @ from %s" % (store)) def get_primary_buckets(store): """ return possible primary buckets, might be wrong on temporary nodes, result is list of buckets in integer """ ss = get_buckets_keys_count(store) bucket_list = ss.items() bucket_list = [x for x in bucket_list if x[1] > 0] if not bucket_list: return None bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True) result = [bucket_list[0]] for i in bucket_list[1:]: if result[-1][1] / i[1] >= 2: break result.append(i) return [x[0] for x in result] def get_key_info_disk(store, key): '''return ver, vhash, flag, vsz, ts, fid, pos''' info = store.get('??' + key) if info: return [int(x) for x in info.split()] def test_new(addr, bucket): b = bucket c = DBClient(addr) print "stats:", c.stats() print 'version:', c.get_server_version() print "isold:", c.is_old() print "dir root:", c.get_dir("@") print "bucket key count:", c.get_bucket_keys_count(int(b)) print "item_count:", c.item_count() print "primary_buckets", get_primary_buckets(c) leaf = c.get_dir("@" + b + "000000") print "a dir leaf:", leaf khash_str = list(leaf)[0] print "a khash_str", khash_str r = c.get_records_by_khash(khash_str)[0] k = r[0] print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:] print "key info mem:", c.get_key_info_mem(k) print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \ c.get_key_info_disk(k) print "key version:", c.get_version(k) print "collision_summary", c.get_collision_summary(int(b)) print "gc status:", c.get_gc_status() if __name__ == '__main__': test_new("rosa3a:7900", '3')
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 21004, 25, 3384, 69, 12, 23, 198, 7061, 6, 64, 5527, 5456, 198, 220, 220, 220, 352, 13, 329, 530, 4382, 357, 38070, 286, 5021, 588, 287, 9195, 23209, 13, 11792, 8, 198, 220, 220, 220...
2.271637
1,167
import matplotlib.pyplot __author__ = 'xiongyi' line1 = [(200, 100), (200, 400)] line2 = [(190, 190), (210, 210)] if __name__ == '__main__': matplotlib.pyplot.plot((line1[0][0],line1[1][0]),(line1[0][1],line1[1][1])) matplotlib.pyplot.hold(True) matplotlib.pyplot.plot((line2[0][0],line2[1][0]),(line2[0][1],line2[1][1])) print(overlap()) matplotlib.pyplot.show()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 198, 834, 9800, 834, 796, 705, 87, 295, 1360, 72, 6, 198, 1370, 16, 796, 47527, 2167, 11, 1802, 828, 357, 2167, 11, 7337, 15437, 198, 1370, 17, 796, 47527, 19782, 11, 19884, 828, 357, 21536...
2.120879
182
from sqlalchemy import select from sqlalchemy.schema import Column from .declarative import Model class ModelLoader(Loader): class AliasLoader(ModelLoader): class ColumnLoader(Loader): class TupleLoader(Loader): class CallableLoader(Loader): class ValueLoader(Loader):
[ 6738, 44161, 282, 26599, 1330, 2922, 198, 6738, 44161, 282, 26599, 13, 15952, 2611, 1330, 29201, 198, 198, 6738, 764, 32446, 283, 876, 1330, 9104, 628, 198, 198, 4871, 9104, 17401, 7, 17401, 2599, 628, 198, 4871, 978, 4448, 17401, 7, ...
3.582278
79
# Standard imports import logging import math import json from uuid import UUID from datetime import datetime, timedelta import time # Our imports from emission.core.get_database import get_trip_db, get_section_db import emission.analysis.result.carbon as carbon import emission.core.common as common import emission.net.api.stats as stats from emission.core.wrapper.user import User from emission.clients.leaderboard import leaderboard from emission.clients.gamified import gamified from emission.clients.recommendation import recommendation from emission.clients.commontrips import commontrips from emission.clients.data import data # TODO: Consider subclassing to provide client specific user functions # These are copy/pasted from our first client, the carshare study # TODO: Simplify this. runBackgroundTasks are currently only invoked from the # result precomputation code. We could change that code to pass in the day, and # remove this interface. Extra credit: should we pass in the day, or a date # range? Passing in the date range could make it possible for us to run the # scripts more than once a day...
[ 2, 8997, 17944, 198, 11748, 18931, 198, 11748, 10688, 198, 11748, 33918, 198, 6738, 334, 27112, 1330, 471, 27586, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 11748, 640, 198, 198, 2, 3954, 17944, 198, 6738, 25592, 13...
3.947183
284
#!/usr/bin/env python """ HAR Formatter for REDbot. """ __author__ = "Jerome Renard <jerome.renard@gmail.com>" __copyright__ = """\ Copyright (c) 2008-2010 Mark Nottingham Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import operator import nbhttp.error as nberr import redbot.speak as rs from redbot.formatter import Formatter nl = u"\n" # TODO: errors and status on stderr with CLI?
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 39, 1503, 5178, 1436, 329, 23848, 13645, 13, 198, 37811, 198, 198, 834, 9800, 834, 796, 366, 36134, 462, 7152, 446, 1279, 44009, 462, 13, 918, 446, 31, 14816, 13, 78...
3.576623
385
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part # of the Robot Operating System project, released under the MIT License. Please # see the LICENSE file included as part of this package. # # author: Murray Altheim # created: 2020-09-19 # modified: 2020-09-19 # import sys, colorsys import ioexpander as io from colorama import init, Fore, Style init() from lib.logger import Logger # .............................................................................. # return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min #EOF
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 12131, 12, 1238, 2481, 416, 12164, 978, 1169, 320, 13, 1439, 2489, 10395, 13, 770, 2393, 318, ...
3.212963
216
""" Module: 'urequests' on esp32 1.12.0 """ # MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32') # Stubber: 1.3.2 usocket = None
[ 37811, 198, 26796, 25, 705, 495, 421, 3558, 6, 319, 15024, 2624, 352, 13, 1065, 13, 15, 198, 37811, 198, 2, 13122, 52, 25, 357, 17597, 3672, 11639, 9774, 2624, 3256, 18666, 12453, 11639, 9774, 2624, 3256, 2650, 11639, 16, 13, 1065, ...
2.393258
89
#!/usr/bin/env python #-*- coding=utf-8 -*- # # Copyright 2012 Jike Inc. All Rights Reserved. # Author: liwei@jike.com import re from urlparse import urlparse parse1()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 12, 9, 12, 19617, 28, 40477, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 2321, 449, 522, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 6434, 25, 7649, 42990, 31, 73, 522, 13, 785,...
2.725806
62
from kivy.uix.gridlayout import GridLayout from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg from kivy.uix.anchorlayout import AnchorLayout from kivy.uix.boxlayout import BoxLayout from kivy.uix.button import Button import matplotlib.pyplot as plt import matplotlib import datetime from TransactionBook.model.Filter import Filter from datetime import datetime from kivy.uix.popup import Popup from kivy.properties import NumericProperty, ReferenceListProperty from kivy.uix.checkbox import CheckBox from kivy.core.window import Window if __name__ == "__main__": from kivy.base import runTouchApp c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True]) runTouchApp(c)
[ 6738, 479, 452, 88, 13, 84, 844, 13, 25928, 39786, 1330, 24846, 32517, 198, 6738, 479, 452, 88, 13, 84, 844, 13, 18242, 1330, 36052, 198, 6738, 479, 452, 88, 13, 84, 844, 13, 5239, 15414, 1330, 8255, 20560, 198, 6738, 479, 452, 88...
2.985816
282
from dataclasses import dataclass, field from typing import List from Car2 import Car
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 11, 2214, 198, 6738, 19720, 1330, 7343, 198, 6738, 1879, 17, 1330, 1879, 628 ]
3.954545
22
import subprocess import threading import time import errno import socket import urllib import pathlib from io import StringIO from http.server import BaseHTTPRequestHandler, HTTPServer import lib.stations as stations import lib.epg2xml as epg2xml import lib.channels_m3u as channels_m3u from lib.templates import templates # with help from https://www.acmesystems.it/python_http # and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler # mostly from https://github.com/ZeWaren/python-upnp-ssdp-example # and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port def start(config, locast, location): serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serverSocket.bind((config["main"]['bind_ip'], int(config["main"]['bind_port']))) serverSocket.listen(int(config["main"]["concurrent_listeners"])) print("Now listening for requests.") for i in range(int(config["main"]["concurrent_listeners"])): PlexHttpServer(serverSocket, config, locast, location)
[ 11748, 850, 14681, 198, 11748, 4704, 278, 198, 11748, 640, 198, 11748, 11454, 3919, 198, 11748, 17802, 198, 11748, 2956, 297, 571, 198, 11748, 3108, 8019, 198, 6738, 33245, 1330, 10903, 9399, 198, 6738, 2638, 13, 15388, 1330, 7308, 40717,...
2.942643
401
from django import template from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.template import loader
[ 6738, 42625, 14208, 1330, 11055, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 17594, 62, 35827, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 42625, 14208, 13, 28243, 1330, 40213,...
3.809524
42
import os from pathlib import Path import numpy as np AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav") def get_file_paths( root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True ): """Return a list of paths to all files with the given filename extensions in a directory. Also traverses subdirectories by default. """ file_paths = [] for root, dirs, filenames in os.walk(root_path): filenames = sorted(filenames) for filename in filenames: input_path = os.path.abspath(root) file_path = os.path.join(input_path, filename) if filename.lower().endswith(filename_endings): file_paths.append(Path(file_path)) if not traverse_subdirectories: # prevent descending into subfolders break return file_paths def calculate_rms(samples): """Given a numpy array of audio samples, return its Root Mean Square (RMS).""" return np.sqrt(np.mean(np.square(samples), axis=-1)) def calculate_desired_noise_rms(clean_rms, snr): """ Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR), calculate the desired RMS of a noise sound to be mixed in. Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20 :param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0 :param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60 :return: """ a = float(snr) / 20 noise_rms = clean_rms / (10 ** a) return noise_rms def is_waveform_multichannel(samples): """ Return bool that answers the question: Is the given ndarray a multichannel waveform or not? :param samples: numpy ndarray :return: """ return len(samples.shape) > 1 def is_spectrogram_multichannel(spectrogram): """ Return bool that answers the question: Is the given ndarray a multichannel spectrogram? :param samples: numpy ndarray :return: """ return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1 def convert_float_samples_to_int16(y): """Convert floating-point numpy array of audio samples to int16.""" if not issubclass(y.dtype.type, np.floating): raise ValueError("input samples not floating-point") return (y * np.iinfo(np.int16).max).astype(np.int16)
[ 11748, 28686, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 48877, 9399, 62, 46700, 1677, 10067, 62, 10619, 20754, 796, 357, 1911, 64, 733, 1600, 27071, 2704, 330, 1600, 27071, 76, 19, 64, 1600, ...
2.547325
972
s = "([}}])" stack = [] if len(s) % 2 == 1: print(False) exit() for i in s: if i == "(": stack.append("(") elif i == "[": stack.append("[") elif i == "{": stack.append("{") elif i == ")": if len(stack) < 1: print(False) exit() if stack[-1] == "(": stack.pop() else: print(False) exit() elif i == "]": if len(stack) < 1: print(False) exit() if stack[-1] == "[": stack.pop() else: print(False) exit() elif i == "}": if len(stack) < 1: print(False) exit() if stack[-1] == "{": stack.pop() else: print(False) exit() if len(stack) == 0: print(True) else: print(False)
[ 82, 796, 366, 26933, 11709, 12962, 1, 198, 198, 25558, 796, 17635, 198, 198, 361, 18896, 7, 82, 8, 4064, 362, 6624, 352, 25, 198, 220, 220, 220, 3601, 7, 25101, 8, 198, 220, 220, 220, 8420, 3419, 198, 198, 1640, 1312, 287, 264, ...
1.647388
536
import random import string import os from IPython.display import display, HTML from .utils import html_loader from .utils import get_content from jinja2 import Template
[ 11748, 4738, 198, 11748, 4731, 198, 11748, 28686, 198, 198, 6738, 6101, 7535, 13, 13812, 1330, 3359, 11, 11532, 198, 6738, 764, 26791, 1330, 27711, 62, 29356, 198, 6738, 764, 26791, 1330, 651, 62, 11299, 198, 6738, 474, 259, 6592, 17, ...
3.909091
44
from decimal import Decimal from fixtures import * # noqa: F401,F403 from fixtures import TEST_NETWORK from flaky import flaky # noqa: F401 from pyln.client import RpcError, Millisatoshi from utils import ( only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND ) import os import pytest import subprocess import time import unittest def test_minconf_withdraw(node_factory, bitcoind): """Issue 2518: ensure that ridiculous confirmation levels don't overflow The number of confirmations is used to compute a maximum height that is to be accepted. If the current height is smaller than the number of confirmations we wrap around and just select everything. The fix is to clamp the maxheight parameter to a positive small number. """ amount = 1000000 # Don't get any funds from previous runs. l1 = node_factory.get_node(random_hsm=True) addr = l1.rpc.newaddr()['bech32'] # Add some funds to withdraw later for i in range(10): l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01) bitcoind.generate_block(1) wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10) with pytest.raises(RpcError): l1.rpc.withdraw(destination=addr, satoshi=10000, feerate='normal', minconf=9999999) def test_addfunds_from_block(node_factory, bitcoind): """Send funds to the daemon without telling it explicitly """ # Previous runs with same bitcoind can leave funds! l1 = node_factory.get_node(random_hsm=True) addr = l1.rpc.newaddr()['bech32'] bitcoind.rpc.sendtoaddress(addr, 0.1) bitcoind.generate_block(1) wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1) outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;') assert only_one(outputs)['value'] == 10000000 # The address we detect must match what was paid to. output = only_one(l1.rpc.listfunds()['outputs']) assert output['address'] == addr # Send all our money to a P2WPKH address this time. addr = l1.rpc.newaddr("bech32")['bech32'] l1.rpc.withdraw(addr, "all") bitcoind.generate_block(1) time.sleep(1) # The address we detect must match what was paid to. output = only_one(l1.rpc.listfunds()['outputs']) assert output['address'] == addr # this test does a 'listtransactions' on a yet unconfirmed channel def test_fundchannel_listtransaction(node_factory, bitcoind): l1, l2 = node_factory.get_nodes(2) l1.fundwallet(10**6) l1.connect(l2) txid = l1.rpc.fundchannel(l2.info['id'], 10**5)['txid'] # next call warned about SQL Accessing a null column # and crashed the daemon for accessing random memory or null txs = l1.rpc.listtransactions()['transactions'] tx = [t for t in txs if t['hash'] == txid][0] assert tx['blockheight'] == 0 def test_withdraw_nlocktime(node_factory): """ Test that we don't set the nLockTime to 0 for withdrawal transactions. """ l1 = node_factory.get_node(1) l1.fundwallet(10**4) addr = l1.rpc.newaddr()["bech32"] tx = l1.rpc.withdraw(addr, 10**3)["tx"] nlocktime = node_factory.bitcoind.rpc.decoderawtransaction(tx)["locktime"] tip = node_factory.bitcoind.rpc.getblockcount() assert nlocktime > 0 and nlocktime <= tip
[ 6738, 32465, 1330, 4280, 4402, 198, 6738, 34609, 1330, 1635, 220, 1303, 645, 20402, 25, 376, 21844, 11, 37, 31552, 198, 6738, 34609, 1330, 43001, 62, 12884, 33249, 198, 6738, 781, 15492, 1330, 781, 15492, 220, 1303, 645, 20402, 25, 376,...
2.637161
1,254
"""Provides plots of mutations for Isolates and Lines.""" from microbepy.common import constants as cn from microbepy.common.dataframe_sorter import DataframeSorter from microbepy.common.isolate import Isolate from microbepy.common import util from microbepy.correlation import genome_correlation from microbepy.data.model_data_provider import ModelDataProvider from microbepy.data import util_data from microbepy.plot.mutation_cofraction import MutationCofraction from microbepy.plot.util_plot import PlotParms import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns COLORS = ['red', 'green', 'blue'] SPECIES = {cn.SPECIES_MIX_DVH: "DVH", cn.SPECIES_MIX_MMP: "MMP", None: "both"} FONTSIZE_TITLE = 16 FONTSIZE_LABEL = 8 MAX_LINES = 9 MIN_FRACTION = 0.25 THRESHOLD_FRAC = 0.2 MAX_SIGLVL = 0.01 COLORBAR_MIN = 1.0 COLORBAR_MAX = 4.0
[ 37811, 15946, 1460, 21528, 286, 220, 23005, 329, 1148, 349, 689, 290, 26299, 526, 15931, 198, 198, 6738, 4580, 65, 538, 88, 13, 11321, 1330, 38491, 355, 269, 77, 198, 6738, 4580, 65, 538, 88, 13, 11321, 13, 7890, 14535, 62, 82, 4337...
2.656627
332
# Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import os from scipy.misc import imread from args import get_args import matplotlib.pyplot as plt def encode_label(label): ''' Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2 ''' h, w, c = label.shape new_label = np.zeros((h, w, 1), dtype=np.int32) cls_to_clr_map = get_color() for i in range(cls_to_clr_map.shape[0]): #new_label[(label == cls_to_clr_map[i])[:,:,0]] = i #new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i print(np.where((label.astype(np.int32) == [120, 0, 128]).all(axis=2))) if i == 21: new_label[np.where( (label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = 255 else: new_label[np.where( (label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = i return new_label # this method should generate train-image.txt and train-label.txt def main(): ''' Arguments: train-file = txt file containing randomly selected image filenames to be taken as training set. val-file = txt file containing randomly selected image filenames to be taken as validation set. data-dir = dataset directory Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir="" ''' args = get_args() data_dir = args.data_dir if not os.path.exists(data_dir+'SegmentationClass/' + 'encoded/'): os.makedirs(data_dir+'SegmentationClass/' + 'encoded/') for filename in os.listdir(data_dir+'SegmentationClass/'): if os.path.isdir(data_dir+'SegmentationClass/' + filename): continue label = imread(data_dir+'SegmentationClass/' + filename).astype('float32') label = encode_label(label) np.save(data_dir+'SegmentationClass/' + 'encoded/' + filename.split('.')[0] + '.npy', label) generate_path_files(args.data_dir, args.train_file, args.val_file) if __name__ == '__main__': main()
[ 2, 15069, 357, 66, 8, 2177, 10184, 10501, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351...
2.506925
1,083
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # import collections import os import torch import math from fairseq import bleu, data, options, utils from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter from fairseq.multiprocessing_trainer import MultiprocessingTrainer from fairseq.progress_bar import progress_bar from fairseq.sequence_generator import SequenceGenerator def train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus): """Train the model for one epoch.""" itr = dataset.dataloader(args.train_subset, batch_size=args.batch_size, test_batch_size=args.test_batch_size, valid_batch_size=args.valid_batch_size, num_workers=args.workers, max_tokens=args.max_tokens, seed=args.seed, epoch=epoch, max_positions=args.max_positions, sample_without_replacement=args.sample_without_replacement) loss_meter = AverageMeter() bsz_meter = AverageMeter() # sentences per batch wpb_meter = AverageMeter() # words per batch wps_meter = TimeMeter() # words per second clip_meter = AverageMeter() # % of updates clipped gnorm_meter = AverageMeter() # gradient norm desc = '| epoch {:03d}'.format(epoch) lr = trainer.get_lr() with progress_bar(itr, desc, leave=False) as t: for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset): loss, grad_norm = trainer.train_step(sample, criterion) ntokens = sum(s['ntokens'] for s in sample) src_size = sum(s['src_tokens'].size(0) for s in sample) loss_meter.update(loss, ntokens) bsz_meter.update(src_size) wpb_meter.update(ntokens) wps_meter.update(ntokens) clip_meter.update(1 if grad_norm > args.clip_norm else 0) gnorm_meter.update(grad_norm) t.set_postfix(collections.OrderedDict([ ('loss', '{:.2f} ({:.2f})'.format(loss, loss_meter.avg)), ('wps', '{:5d}'.format(round(wps_meter.avg))), ('wpb', '{:5d}'.format(round(wpb_meter.avg))), ('bsz', '{:5d}'.format(round(bsz_meter.avg))), ('lr', lr), ('clip', '{:3.0f}%'.format(clip_meter.avg * 100)), ('gnorm', '{:.4f}'.format(gnorm_meter.avg)), ])) if i == 0: # ignore the first mini-batch in words-per-second calculation wps_meter.reset() if args.save_interval > 0 and (i + 1) % args.save_interval == 0: trainer.save_checkpoint(args, epoch, i + 1) fmt = desc + ' | train loss {:2.2f} | train ppl {:3.2f}' fmt += ' | s/checkpoint {:7d} | words/s {:6d} | words/batch {:6d}' fmt += ' | bsz {:5d} | lr {:0.6f} | clip {:3.0f}% | gnorm {:.4f}' t.write(fmt.format(loss_meter.avg, math.pow(2, loss_meter.avg), round(wps_meter.elapsed_time), round(wps_meter.avg), round(wpb_meter.avg), round(bsz_meter.avg), lr, clip_meter.avg * 100, gnorm_meter.avg)) def validate(args, epoch, trainer, criterion, dataset, subset, ngpus): """Evaluate the model on the validation set and return the average loss.""" itr = dataset.dataloader(subset, batch_size=None, max_tokens=args.max_tokens, max_positions=args.max_positions) loss_meter = AverageMeter() desc = '| epoch {:03d} | valid on \'{}\' subset'.format(epoch, subset) with progress_bar(itr, desc, leave=False) as t: for _, sample in data.skip_group_enumerator(t, ngpus): ntokens = sum(s['ntokens'] for s in sample) loss = trainer.valid_step(sample, criterion) loss_meter.update(loss, ntokens) t.set_postfix(loss='{:.2f}'.format(loss_meter.avg)) val_loss = loss_meter.avg t.write(desc + ' | valid loss {:2.2f} | valid ppl {:3.2f}' .format(val_loss, math.pow(2, val_loss))) # update and return the learning rate return val_loss def score_test(args, model, dataset, subset, beam, cuda_device): """Evaluate the model on the test set and return the BLEU scorer.""" translator = SequenceGenerator([model], dataset.dst_dict, beam_size=beam) if torch.cuda.is_available(): translator.cuda() scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk()) itr = dataset.dataloader(subset, batch_size=4, max_positions=args.max_positions) for _, _, ref, hypos in translator.generate_batched_itr(itr, cuda_device=cuda_device): scorer.add(ref.int().cpu(), hypos[0]['tokens'].int().cpu()) return scorer if __name__ == '__main__': main()
[ 2, 15069, 357, 66, 8, 2177, 12, 25579, 11, 3203, 11, 3457, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 5964, 1043, 287, 262, 38559, 24290, 2393, 287, 198, 2, 262, 6808, 8619, 286, 42...
2.104292
2,493
#!/usr/bin/env python """ This parses a log file series (i.e. log, log.1, log.2, etc..) and outputs timing and call frequency information for HAL messages. Hazen 5/18 """ from datetime import datetime import os pattern = '%Y-%m-%d %H:%M:%S,%f' def getIterable(dict_or_list): """ Returns an iterable given a dictionary of a list. """ if isinstance(dict_or_list, dict): iterable = list(dict_or_list.values()) elif isinstance(dict_or_list, list): iterable = dict_or_list else: raise Exception("Unknown type '" + str(type(dict_or_list)) + "'") return iterable def groupByMsgType(messages): """ Returns a dictionary keyed by message type, with a list of one or more message objects per message type. """ return groupByX(lambda x : x.getType(), messages) def groupBySource(messages): """ Returns a dictionary keyed by message source, with a list of one or more message objects per message source. """ return groupByX(lambda x : x.getSource(), messages) def groupByX(grp_fn, messages): """ Returns a dictionary keyed by the requested group. """ m_grp = {} for msg in getIterable(messages): # Ignore messages that we don't have all the timing for. if msg.isComplete() or not ignore_incomplete: m_type = grp_fn(msg) if m_type in m_grp: m_grp[m_type].append(msg) else: m_grp[m_type] = [msg] return m_grp def logTiming(basename, ignore_incomplete = False): """ Returns a dictionary of Message objects keyed by their ID number. """ zero_time = None messages = {} for ext in [".5", ".4", ".3", ".2", ".1", ""]: fname = basename + ".out" + ext if not os.path.exists(fname): print(fname, "not found.") continue with open(fname) as fp: for line in fp: try: [time, command] = map(lambda x: x.strip(), line.split(":hal4000:INFO:")) except ValueError: continue if zero_time is None: zero_time = time # Message queued. if (command.startswith("queued,")): [m_id, source, m_type] = command.split(",")[1:] messages[m_id] = Message(m_type = m_type, source = source, time = time, zero_time = zero_time) # Message sent. elif (command.startswith("sent,")): m_id = command.split(",")[1] messages[m_id].sent(time) # Message processed. elif (command.startswith("processed,")): m_id = command.split(",")[1] messages[m_id].processed(time) elif (command.startswith("worker done,")): m_id = command.split(",")[1] messages[m_id].incNWorkers() # Ignore messages that we don't have all the timing for. if not ignore_incomplete: temp = {} for m_id in messages: msg = messages[m_id] if msg.isComplete(): temp[m_id] = msg return temp else: return messages def processingTime(messages): """ Returns the total processing time for a collection of messages. """ accum_time = 0 for msg in getIterable(messages): if isinstance(msg, list): for elt in msg: accum_time += elt.getProcessingTime() else: accum_time += msg.getProcessingTime() return accum_time def queuedTime(messages): """ Returns the total queued time for a a collection of messages. """ accum_time = 0 for msg in getIterable(messages): if isinstance(msg, list): for elt in msg: accum_time += elt.getQueuedTime() else: accum_time += msg.getQueuedTime() return accum_time if (__name__ == "__main__"): import sys if (len(sys.argv) != 2): print("usage: <log file>") exit() messages = logTiming(sys.argv[1]) groups = groupByMsgType(messages) print() print("All messages:") for key in sorted(groups): grp = groups[key] print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp))) print("Total queued time {0:.3f} seconds".format(queuedTime(groups))) print("Total processing time {0:.3f} seconds".format(processingTime(groups))) print() print("Film messages:") groups = groupByMsgType(groupBySource(messages)["film"]) for key in sorted(groups): grp = groups[key] print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp))) print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 198, 1212, 13544, 274, 257, 2604, 2393, 2168, 357, 72, 13, 68, 13, 2604, 11, 2604, 13, 16, 11, 2604, 13, 17, 11, 3503, 492, 8, 290, 198, 22915, 82, 10576, 290, 869, 8373, ...
2.05309
2,524
from django.core.management.base import BaseCommand from django.utils import termcolors from jsonschema import Draft4Validator from jsonschema.exceptions import SchemaError import json
[ 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 198, 6738, 42625, 14208, 13, 26791, 1330, 3381, 4033, 669, 198, 6738, 44804, 684, 2395, 2611, 1330, 13650, 19, 47139, 1352, 198, 6738, 44804, 684, 2395, 2611, 13, 106...
3.72
50
import cv2, time import numpy as np import Tkinter """ Wraps up some interfaces to opencv user interface methods (displaying image frames, event handling, etc). If desired, an alternative UI could be built and imported into get_pulse.py instead. Opencv is used to perform much of the data analysis, but there is no reason it has to be used to handle the UI as well. It just happens to be very effective for our purposes. """ """ The rest of this file defines some GUI plotting functionality. There are plenty of other ways to do simple x-y data plots in python, but this application uses cv2.imshow to do real-time data plotting and handle user interaction. This is entirely independent of the data calculation functions, so it can be replaced in the get_pulse.py application easily. """ def combine(left, right): """Stack images horizontally. """ h = max(left.shape[0], right.shape[0]) w = left.shape[1] + right.shape[1] hoff = left.shape[0] shape = list(left.shape) shape[0] = h shape[1] = w comb = np.zeros(tuple(shape),left.dtype) # left will be on left, aligned top, with right on right comb[:left.shape[0],:left.shape[1]] = left comb[:right.shape[0],left.shape[1]:] = right return comb
[ 11748, 269, 85, 17, 11, 640, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 309, 74, 3849, 198, 198, 37811, 198, 36918, 862, 510, 617, 20314, 284, 1280, 33967, 2836, 7071, 5050, 357, 13812, 278, 198, 9060, 13431, 11, 1785, 9041, 11, ...
3.033254
421
# -*- coding: utf-8 -*- # Natural Language Toolkit: Transformation-based learning # # Copyright (C) 2001-2018 NLTK Project # Author: Marcus Uneson <marcus.uneson@gmail.com> # based on previous (nltk2) version by # Christopher Maloof, Edward Loper, Steven Bird # URL: <http://nltk.org/> # For license information, see LICENSE.TXT from __future__ import print_function, division from collections import defaultdict, Counter from nltk.tag import TaggerI from nltk.tbl import Feature, Template from nltk import jsontags ###################################################################### # Brill Templates ###################################################################### def nltkdemo18(): """ Return 18 templates, from the original nltk demo, in multi-feature syntax """ return [ Template(Pos([-1])), Template(Pos([1])), Template(Pos([-2])), Template(Pos([2])), Template(Pos([-2, -1])), Template(Pos([1, 2])), Template(Pos([-3, -2, -1])), Template(Pos([1, 2, 3])), Template(Pos([-1]), Pos([1])), Template(Word([-1])), Template(Word([1])), Template(Word([-2])), Template(Word([2])), Template(Word([-2, -1])), Template(Word([1, 2])), Template(Word([-3, -2, -1])), Template(Word([1, 2, 3])), Template(Word([-1]), Word([1])), ] def nltkdemo18plus(): """ Return 18 templates, from the original nltk demo, and additionally a few multi-feature ones (the motivation is easy comparison with nltkdemo18) """ return nltkdemo18() + [ Template(Word([-1]), Pos([1])), Template(Pos([-1]), Word([1])), Template(Word([-1]), Word([0]), Pos([1])), Template(Pos([-1]), Word([0]), Word([1])), Template(Pos([-1]), Word([0]), Pos([1])), ] def fntbl37(): """ Return 37 templates taken from the postagging task of the fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/ (37 is after excluding a handful which do not condition on Pos[0]; fntbl can do that but the current nltk implementation cannot.) """ return [ Template(Word([0]), Word([1]), Word([2])), Template(Word([-1]), Word([0]), Word([1])), Template(Word([0]), Word([-1])), Template(Word([0]), Word([1])), Template(Word([0]), Word([2])), Template(Word([0]), Word([-2])), Template(Word([1, 2])), Template(Word([-2, -1])), Template(Word([1, 2, 3])), Template(Word([-3, -2, -1])), Template(Word([0]), Pos([2])), Template(Word([0]), Pos([-2])), Template(Word([0]), Pos([1])), Template(Word([0]), Pos([-1])), Template(Word([0])), Template(Word([-2])), Template(Word([2])), Template(Word([1])), Template(Word([-1])), Template(Pos([-1]), Pos([1])), Template(Pos([1]), Pos([2])), Template(Pos([-1]), Pos([-2])), Template(Pos([1])), Template(Pos([-1])), Template(Pos([-2])), Template(Pos([2])), Template(Pos([1, 2, 3])), Template(Pos([1, 2])), Template(Pos([-3, -2, -1])), Template(Pos([-2, -1])), Template(Pos([1]), Word([0]), Word([1])), Template(Pos([1]), Word([0]), Word([-1])), Template(Pos([-1]), Word([-1]), Word([0])), Template(Pos([-1]), Word([0]), Word([1])), Template(Pos([-2]), Pos([-1])), Template(Pos([1]), Pos([2])), Template(Pos([1]), Pos([2]), Word([1])) ] def brill24(): """ Return 24 templates of the seminal TBL paper, Brill (1995) """ return [ Template(Pos([-1])), Template(Pos([1])), Template(Pos([-2])), Template(Pos([2])), Template(Pos([-2, -1])), Template(Pos([1, 2])), Template(Pos([-3, -2, -1])), Template(Pos([1, 2, 3])), Template(Pos([-1]), Pos([1])), Template(Pos([-2]), Pos([-1])), Template(Pos([1]), Pos([2])), Template(Word([-1])), Template(Word([1])), Template(Word([-2])), Template(Word([2])), Template(Word([-2, -1])), Template(Word([1, 2])), Template(Word([-1, 0])), Template(Word([0, 1])), Template(Word([0])), Template(Word([-1]), Pos([-1])), Template(Word([1]), Pos([1])), Template(Word([0]), Word([-1]), Pos([-1])), Template(Word([0]), Word([1]), Pos([1])), ] def describe_template_sets(): """ Print the available template sets in this demo, with a short description" """ import inspect import sys # a bit of magic to get all functions in this module templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction) for (name, obj) in templatesets: if name == "describe_template_sets": continue print(name, obj.__doc__, "\n") ###################################################################### # The Brill Tagger ######################################################################
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 12068, 15417, 16984, 15813, 25, 49127, 12, 3106, 4673, 198, 2, 198, 2, 15069, 357, 34, 8, 5878, 12, 7908, 22879, 51, 42, 4935, 198, 2, 6434, 25, 17068, 791, 42038...
2.301616
2,228
import json import logging import sys import numpy as np import torch from task_config import SuperGLUE_LABEL_MAPPING from snorkel.mtl.data import MultitaskDataset sys.path.append("..") # Adds higher directory to python modules path. logger = logging.getLogger(__name__) TASK_NAME = "WSC"
[ 11748, 33918, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 4876, 62, 11250, 1330, 3115, 8763, 8924, 62, 48780, 3698, 62, 44, 24805, 2751, 198, 198, 6738, 3013, 273, 7750, 1...
2.950495
101
import re import json __all__ = ["Simplimental"]
[ 11748, 302, 198, 11748, 33918, 198, 198, 834, 439, 834, 796, 14631, 8890, 489, 9134, 8973, 198 ]
2.941176
17
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string. # You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings. # It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API. # # More information about the RoboDK API here: # https://robodk.com/doc/en/RoboDK-API.html # For more information visit: # https://robodk.com/doc/en/PythonAPI/robolink.html from robolink import * # RoboDK API # JSON tools import json # Start the RoboDK API RDK = Robolink() # Ask the user to select a robot arm (6 axis robot wich can have external axes) robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM) # Default optimization settings test template AxesOptimSettings = { # Optimization parameters: "Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled "Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead "MaxIter": 650, # Max. number of iterations "Tol": 0.0016, # Tolerance to stop iterations # Absolute Reference joints (double): "AbsJnt_1": 104.17, "AbsJnt_2": 11.22, "AbsJnt_3": 15.97, "AbsJnt_4": -87.48, "AbsJnt_5": -75.36, "AbsJnt_6": 63.03, "AbsJnt_7": 174.13, "AbsJnt_8": 173.60, "AbsJnt_9": 0, # Using Absolute reference joints (0: No, 1: Yes): "AbsOn_1": 1, "AbsOn_2": 1, "AbsOn_3": 1, "AbsOn_4": 1, "AbsOn_5": 1, "AbsOn_6": 1, "AbsOn_7": 1, "AbsOn_8": 1, "AbsOn_9": 1, # Weight for absolute reference joints (double): "AbsW_1": 100, "AbsW_2": 100, "AbsW_3": 100, "AbsW_4": 89, "AbsW_5": 90, "AbsW_6": 92, "AbsW_7": 92, "AbsW_8": 96, "AbsW_9": 50, # Using for relative joint motion smoothing (0: No, 1: Yes): "RelOn_1": 1, "RelOn_2": 1, "RelOn_3": 1, "RelOn_4": 1, "RelOn_5": 1, "RelOn_6": 1, "RelOn_7": 1, "RelOn_8": 1, "RelOn_9": 1, # Weight for relative joint motion (double): "RelW_1": 5, "RelW_2": 47, "RelW_3": 44, "RelW_4": 43, "RelW_5": 36, "RelW_6": 47, "RelW_7": 53, "RelW_8": 59, "RelW_9": 0, } # Update one value, for example, make it active: ToUpdate = {} ToUpdate["Active"] = 1 json_str = json.dumps(json.dumps(ToUpdate)) status = robot.setParam("OptimAxes", json_str) print(status) # Example to make a partial or full update count = 1 while True: for i in range(7): # Partial update ToUpdate = {} ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4 ToUpdate["AbsOn_" + str(i+1)] = count % 2 ToUpdate["AbsW_" + str(i+1)] = (count+i) json_str = json.dumps(json.dumps(ToUpdate)) status = robot.setParam("OptimAxes", json_str) print(status) # Full update #OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4 #OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i) #OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2 # Full update #print(robot.setParam("OptimAxes", str(AxesOptimSettings))) count = count + 1 # Read settings json_data = robot.setParam("OptimAxes") json_object = json.loads(json_data) print(json.dumps(json_object, indent=4)) pause(0.2) # Example to read the current axes optimization settings: while True: json_data = robot.setParam("OptimAxes") json_object = json.loads(json_data) print(json.dumps(json_object, indent=4)) pause(0.2)
[ 2, 770, 1672, 2523, 703, 284, 1100, 393, 13096, 262, 12176, 274, 30011, 1634, 6460, 1262, 262, 39702, 48510, 7824, 290, 257, 19449, 4731, 13, 198, 2, 921, 460, 2922, 366, 31554, 274, 23989, 1, 287, 257, 9379, 3235, 3191, 6859, 393, ...
2.274547
1,599
from slr_parser.grammar import Grammar import unittest if __name__ == '__main__': unittest.main()
[ 6738, 1017, 81, 62, 48610, 13, 4546, 3876, 1330, 20159, 3876, 198, 11748, 555, 715, 395, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 555, 715, 395, 13, 12417, 3419, 198 ]
2.625
40
# Generated by Django 3.1 on 2020-09-08 07:43 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 319, 12131, 12, 2931, 12, 2919, 8753, 25, 3559, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, 628 ]
2.904762
42
import torch import numpy as np from mpi4py import MPI from parallel_pytorch.ops import tensor_merge from parallel_pytorch.utils import abort_on_exception def run_all(): test_1() test_2() if __name__ == '__main__': run_all()
[ 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 285, 14415, 19, 9078, 1330, 4904, 40, 198, 198, 6738, 10730, 62, 9078, 13165, 354, 13, 2840, 1330, 11192, 273, 62, 647, 469, 198, 6738, 10730, 62, 9078, 13165, 354, 13, 2679...
2.595745
94
"""Day 07""" if __name__ == '__main__': process('test.txt') process('input.txt')
[ 37811, 12393, 8753, 37811, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1429, 10786, 9288, 13, 14116, 11537, 198, 220, 220, 220, 1429, 10786, 15414, 13, 14116, 11537, 198 ]
2.432432
37
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ utilities @author: boyangzhao """ import pandas as pd import re
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 315, 2410, 198, 31, 9800, 25, 2933, 648, 89, 23778, 198, 37811, 198, 198, 11748, 19798, 292, 355, 279...
2.32
50
#!/usr/bin/python3.7 # Copyright 2020 Aragubas # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # -- Imports -- # from ENGINE import APPDATA as reg from ENGINE import UTILS as utils import ENGINE as tge from Fogoso.MAIN import ClassesUtils as gameObjs from Fogoso import MAIN as gameMain import pygame, sys import importlib import time from random import randint OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton OptionsScreen_NumberFormatting = gameObjs.UpDownButton ElementsX = 0 ElementsY = 0
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 13, 22, 198, 2, 220, 220, 15069, 12131, 943, 363, 549, 292, 198, 2, 198, 2, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 220, 22...
3.328221
326
from robotpy_ext.control.toggle import Toggle from robotpy_ext.misc.precise_delay import NotifierDelay
[ 6738, 9379, 9078, 62, 2302, 13, 13716, 13, 44256, 1330, 34098, 198, 6738, 9379, 9078, 62, 2302, 13, 44374, 13, 3866, 37561, 62, 40850, 1330, 1892, 7483, 13856, 323, 628, 628 ]
3.419355
31
''' This file contains test cases for tflearn ''' import tensorflow.compat.v1 as tf import tflearn import unittest if __name__ == "__main__": unittest.main()
[ 7061, 6, 198, 220, 220, 220, 770, 2393, 4909, 1332, 2663, 329, 256, 27919, 1501, 198, 7061, 6, 198, 198, 11748, 11192, 273, 11125, 13, 5589, 265, 13, 85, 16, 355, 48700, 198, 11748, 256, 27919, 1501, 198, 11748, 555, 715, 395, 198, ...
2.609375
64
#!/usr/bin/python3 # ***************************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ****************************************************************************** import json import os import sys import subprocess if __name__ == "__main__": success = True try: subprocess.run('cd /root; fab install-libs', shell=True, check=True) except: success = False reply = dict() reply['request_id'] = os.environ['request_id'] if success: reply['status'] = 'ok' else: reply['status'] = 'err' reply['response'] = dict() try: with open("/root/result.json") as f: reply['response']['result'] = json.loads(f.read()) except: reply['response']['result'] = {"error": "Failed to open result.json"} reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id']) with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id']), 'w') as response_file: response_file.write(json.dumps(reply)) try: subprocess.run('chmod 666 /response/*', shell=True, check=True) except: success = False if not success: sys.exit(1)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 198, 2, 41906, 17174, 4557, 35625, 198, 2, 198, 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 23...
2.567148
901
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ config settings, will be used in finetune.py """ from easydict import EasyDict as edict import mindspore.common.dtype as mstype from .bert_model import BertConfig cfg = edict({ 'task': 'NER', 'num_labels': 41, 'data_file': '', 'schema_file': None, 'finetune_ckpt': '', 'use_crf': False, 'clue_benchmark': False, }) bert_net_cfg = BertConfig( batch_size=8 if not cfg.clue_benchmark else 1, seq_length=512, vocab_size=30522, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, use_relative_positions=False, input_mask_from_dataset=True, token_type_ids_from_dataset=True, dtype=mstype.float32, compute_type=mstype.float16, )
[ 2, 15069, 12131, 43208, 21852, 1766, 1539, 12052, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198...
2.80354
565
# -*- coding: utf-8 -*- # utopia-cms 2020. Anbal Pacheco. from django.core.management import BaseCommand from django.db.utils import IntegrityError from apps import core_articleviewedby_mdb from core.models import ArticleViewedBy
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 3384, 24464, 12, 46406, 12131, 13, 1052, 6893, 350, 4891, 1073, 13, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 1330, 7308, 21575, 198, 6738, 42625, 14208, 13,...
3.106667
75
import torch import torch.nn as nn from torch.optim import SGD import MinkowskiEngine as ME from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck from examples.common import data_loader from examples.resnet import ResNetBase if __name__ == '__main__': # loss and network criterion = nn.CrossEntropyLoss() net = MinkUNet14A(in_channels=3, out_channels=5, D=2) print(net) # a data loader must return a tuple of coords, features, and labels. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') net = net.to(device) optimizer = SGD(net.parameters(), lr=1e-2) for i in range(10): optimizer.zero_grad() # Get new data coords, feat, label = data_loader(is_classification=False) input = ME.SparseTensor(feat, coords=coords).to(device) label = label.to(device) # Forward output = net(input) # Loss loss = criterion(output.F, label) print('Iteration: ', i, ', Loss: ', loss.item()) # Gradient loss.backward() optimizer.step() # Saving and loading a network torch.save(net.state_dict(), 'test.pth') net.load_state_dict(torch.load('test.pth'))
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 28034, 13, 40085, 1330, 26147, 35, 198, 198, 11748, 337, 676, 12079, 13798, 355, 11948, 198, 198, 6738, 337, 676, 12079, 13798, 13, 18170, 13, 411, 3262, 62, 9967, 1...
2.458661
508
"""TODO.""" from setuptools import setup setup( name='nginx-access-tailer', version='0.1', author='swfrench', url='https://github.com/swfrench/nginx-tailer', packages=['nginx_access_tailer',], license='BSD three-clause license', entry_points={ 'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'], }, install_requires=[ 'python-gflags >= 3.1.1', 'google-cloud-monitoring >= 0.25.0', ], test_suite='nose.collector', tests_require=['nose', 'mock'], )
[ 37811, 51, 3727, 46, 526, 15931, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 11639, 782, 28413, 12, 15526, 12, 13199, 263, 3256, 198, 220, 220, 220, 2196, 11639, 15, 13, 16, 3256, 198...
2.259259
243
# Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .. import helpers from . import integration
[ 2, 15069, 357, 66, 8, 2177, 8180, 10501, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, ...
3.890244
164
#!/usr/bin/env python # -*- coding: utf-8 -* import os from setuptools import find_packages, setup # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) with open('requirements.txt') as f: install_requires = f.read().splitlines() setup( name='persistent-celery-beat-scheduler', version='0.1.1.dev0', packages=find_packages('src', exclude=('tests',)), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, description=( 'Celery Beat Scheduler that stores the scheduler data in Redis.' ), author='Richard O\'Dwyer', author_email='richard@richard.do', license='Apache 2', long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler', install_requires=install_requires, classifiers=[ 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Internet :: WWW/HTTP', ], )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 198, 198, 11748, 28686, 198, 198, 6738, 900, 37623, 10141, 1330, 1064, 62, 43789, 11, 9058, 198, 198, 2, 1249, 9058, 13, 9078, ...
2.584466
515
import collections import unittest import driver from driver.protocol import * _server = ('localhost', 11211) _dead_retry = 30 _socket_timeout = 3 _max_receive_size = 4096 def _raise_exception(message): raise Exception(message)
[ 11748, 17268, 198, 11748, 555, 715, 395, 198, 11748, 4639, 198, 6738, 4639, 13, 11235, 4668, 1330, 1635, 198, 198, 62, 15388, 796, 19203, 36750, 3256, 13539, 1157, 8, 198, 62, 25124, 62, 1186, 563, 796, 1542, 198, 62, 44971, 62, 48678...
3.118421
76
# -------------- # Import packages import numpy as np import pandas as pd from scipy.stats import mode path # code starts here bank = pd.read_csv(path) categorical_var = bank.select_dtypes(include = 'object') print(categorical_var) numerical_var = bank.select_dtypes(include = 'number') print(numerical_var) # code ends here # -------------- # code starts here banks = bank.drop('Loan_ID',axis = 1) print(banks) print(banks.isnull().sum()) bank_mode = banks.mode().iloc[0] banks = banks.fillna(bank_mode) #code ends here # -------------- # Code starts here avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount') # code ends here # -------------- # code starts here loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).value_counts() #print(loan_approved_se) loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).value_counts() print(loan_approved_nse) Loan_Status = 614 percentage_se = (56/Loan_Status)*100 percentage_nse = (366/Loan_Status)*100 # code ends here # -------------- # code starts here loan_term = banks['Loan_Amount_Term'].apply (lambda x : int(x)/12) print(loan_term.value_counts()) big_loan = [i for i in loan_term if i >= 25] big_loan_term = len(big_loan) print(big_loan_term) #[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25] # code ends here # -------------- # code starts here loan_groupby = banks.groupby('Loan_Status') loan_groupby = loan_groupby['ApplicantIncome','Credit_History'] mean_values = loan_groupby.mean() # code ends here
[ 2, 220, 26171, 198, 2, 17267, 10392, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 629, 541, 88, 13, 34242, 1330, 4235, 220, 198, 220, 198, 198, 6978, 198, 198, 2, 2438, 4940, 994, 198, 17796, ...
2.680713
617
# This file is part of Patsy # Copyright (C) 2013 Nathaniel Smith <njs@pobox.com> # See file LICENSE.txt for license information. # Regression tests for fixed bugs (when not otherwise better covered somewhere # else) from patsy import (EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin)
[ 2, 770, 2393, 318, 636, 286, 47216, 88, 198, 2, 15069, 357, 34, 8, 2211, 49536, 4176, 1279, 77, 8457, 31, 79, 672, 1140, 13, 785, 29, 198, 2, 4091, 2393, 38559, 24290, 13, 14116, 329, 5964, 1321, 13, 198, 198, 2, 3310, 2234, 525...
3.018519
108
__all__ = ['imread', 'imsave'] import numpy as np from PIL import Image from ...util import img_as_ubyte, img_as_uint def imread(fname, dtype=None, img_num=None, **kwargs): """Load an image from file. Parameters ---------- fname : str or file File name or file-like-object. dtype : numpy dtype object or string specifier Specifies data type of array elements. img_num : int, optional Specifies which image to read in a file with multiple images (zero-indexed). kwargs : keyword pairs, optional Addition keyword arguments to pass through. Notes ----- Files are read using the Python Imaging Library. See PIL docs [1]_ for a list of supported formats. References ---------- .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html """ if isinstance(fname, str): with open(fname, 'rb') as f: im = Image.open(f) return pil_to_ndarray(im, dtype=dtype, img_num=img_num) else: im = Image.open(fname) return pil_to_ndarray(im, dtype=dtype, img_num=img_num) def pil_to_ndarray(image, dtype=None, img_num=None): """Import a PIL Image object to an ndarray, in memory. Parameters ---------- Refer to ``imread``. """ try: # this will raise an IOError if the file is not readable image.getdata()[0] except IOError as e: site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries" pillow_error_message = str(e) error_message = ('Could not load "%s" \n' 'Reason: "%s"\n' 'Please see documentation at: %s' % (image.filename, pillow_error_message, site)) raise ValueError(error_message) frames = [] grayscale = None i = 0 while 1: try: image.seek(i) except EOFError: break frame = image if img_num is not None and img_num != i: image.getdata()[0] i += 1 continue if image.format == 'PNG' and image.mode == 'I' and dtype is None: dtype = 'uint16' if image.mode == 'P': if grayscale is None: grayscale = _palette_is_grayscale(image) if grayscale: frame = image.convert('L') else: if image.format == 'PNG' and 'transparency' in image.info: frame = image.convert('RGBA') else: frame = image.convert('RGB') elif image.mode == '1': frame = image.convert('L') elif 'A' in image.mode: frame = image.convert('RGBA') elif image.mode == 'CMYK': frame = image.convert('RGB') if image.mode.startswith('I;16'): shape = image.size dtype = '>u2' if image.mode.endswith('B') else '<u2' if 'S' in image.mode: dtype = dtype.replace('u', 'i') frame = np.fromstring(frame.tobytes(), dtype) frame.shape = shape[::-1] else: frame = np.array(frame, dtype=dtype) frames.append(frame) i += 1 if img_num is not None: break if hasattr(image, 'fp') and image.fp: image.fp.close() if img_num is None and len(frames) > 1: return np.array(frames) elif frames: return frames[0] elif img_num: raise IndexError('Could not find image #%s' % img_num) def _palette_is_grayscale(pil_image): """Return True if PIL image in palette mode is grayscale. Parameters ---------- pil_image : PIL image PIL Image that is in Palette mode. Returns ------- is_grayscale : bool True if all colors in image palette are gray. """ assert pil_image.mode == 'P' # get palette as an array with R, G, B columns palette = np.asarray(pil_image.getpalette()).reshape((256, 3)) # Not all palette colors are used; unused colors have junk values. start, stop = pil_image.getextrema() valid_palette = palette[start:stop + 1] # Image is grayscale if channel differences (R - G and G - B) # are all zero. return np.allclose(np.diff(valid_palette), 0) def ndarray_to_pil(arr, format_str=None): """Export an ndarray to a PIL object. Parameters ---------- Refer to ``imsave``. """ if arr.ndim == 3: arr = img_as_ubyte(arr) mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]] elif format_str in ['png', 'PNG']: mode = 'I;16' mode_base = 'I' if arr.dtype.kind == 'f': arr = img_as_uint(arr) elif arr.max() < 256 and arr.min() >= 0: arr = arr.astype(np.uint8) mode = mode_base = 'L' else: arr = img_as_uint(arr) else: arr = img_as_ubyte(arr) mode = 'L' mode_base = 'L' try: array_buffer = arr.tobytes() except AttributeError: array_buffer = arr.tostring() # Numpy < 1.9 if arr.ndim == 2: im = Image.new(mode_base, arr.T.shape) try: im.frombytes(array_buffer, 'raw', mode) except AttributeError: im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7 else: image_shape = (arr.shape[1], arr.shape[0]) try: im = Image.frombytes(mode, image_shape, array_buffer) except AttributeError: im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7 return im def imsave(fname, arr, format_str=None, **kwargs): """Save an image to disk. Parameters ---------- fname : str or file-like object Name of destination file. arr : ndarray of uint8 or float Array (image) to save. Arrays of data-type uint8 should have values in [0, 255], whereas floating-point arrays must be in [0, 1]. format_str: str Format to save as, this is defaulted to PNG if using a file-like object; this will be derived from the extension if fname is a string kwargs: dict Keyword arguments to the Pillow save function (or tifffile save function, for Tiff files). These are format dependent. For example, Pillow's JPEG save function supports an integer ``quality`` argument with values in [1, 95], while TIFFFile supports a ``compress`` integer argument with values in [0, 9]. Notes ----- Use the Python Imaging Library. See PIL docs [1]_ for a list of other supported formats. All images besides single channel PNGs are converted using `img_as_uint8`. Single Channel PNGs have the following behavior: - Integer values in [0, 255] and Boolean types -> img_as_uint8 - Floating point and other integers -> img_as_uint16 References ---------- .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html """ # default to PNG if file-like object if not isinstance(fname, str) and format_str is None: format_str = "PNG" # Check for png in filename if (isinstance(fname, str) and fname.lower().endswith(".png")): format_str = "PNG" arr = np.asanyarray(arr) if arr.dtype.kind == 'b': arr = arr.astype(np.uint8) if arr.ndim not in (2, 3): raise ValueError("Invalid shape for image array: %s" % (arr.shape, )) if arr.ndim == 3: if arr.shape[2] not in (3, 4): raise ValueError("Invalid number of channels in image array.") img = ndarray_to_pil(arr, format_str=format_str) img.save(fname, format=format_str, **kwargs)
[ 834, 439, 834, 796, 37250, 320, 961, 3256, 705, 12078, 1015, 20520, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 350, 4146, 1330, 7412, 198, 198, 6738, 2644, 22602, 1330, 33705, 62, 292, 62, 549, 88, 660, 11, 33705, 62, 292, ...
2.218286
3,500
# -*- coding: utf-8 -*- """ Linear chain of reactions. """ from __future__ import print_function, division import tellurium as te model = ''' model feedback() // Reactions: J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h); J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2); J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3); J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4); J4: S4 -> $X1; (V4 * S4) / (KS4 + S4); // Species initializations: S1 = 0; S2 = 0; S3 = 0; S4 = 0; X0 = 10; X1 = 0; // Variable initialization: VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5; end''' r = te.loada(model) result = r.simulate(0, 40, 500) r.plotWithLegend(result)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 14993, 451, 6333, 286, 12737, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 198, 11748, 1560, 333, 1505, 355, 573, 198, 198, ...
2.002882
347
from .users import User, UserCreate, UserUpdate from .transactions import Transaction, TransactionCreate, TransactionUpdate from .accounts import Account, AccountList, AccountSingle, AccountCreate, AccountUpdate from .categories import Category, CategoryCreate, CategoryUpdate
[ 6738, 764, 18417, 1330, 11787, 11, 11787, 16447, 11, 11787, 10260, 198, 6738, 764, 7645, 4658, 1330, 45389, 11, 45389, 16447, 11, 45389, 10260, 198, 6738, 764, 23317, 82, 1330, 10781, 11, 10781, 8053, 11, 10781, 28008, 11, 10781, 16447, ...
4.928571
56
def method_accepting_cls(cls, self): # Using plain `super()` is not valid here, since there's no `__class__` cell found # (Exact exception would be 'RuntimeError: super(): __class__ cell not found') # Instead, we expect to *not* see a warning about `super-with-arguments`. # Explicitly passing `cls`, and `self` to `super()` is what's required. super(cls, self).__init__()
[ 628, 628, 628, 198, 4299, 2446, 62, 13635, 278, 62, 565, 82, 7, 565, 82, 11, 2116, 2599, 198, 220, 220, 220, 1303, 8554, 8631, 4600, 16668, 3419, 63, 318, 407, 4938, 994, 11, 1201, 612, 338, 645, 4600, 834, 4871, 834, 63, 2685, ...
2.941176
136
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from flask import flash import numpy as np
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 384, 397, 1211, 355, 3013, 82, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 4512, 62, 9288, 62, 35312, 198, 6738, 1341, ...
3.662791
86
# This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline # The point of this script is to do link prediction # Imports and aliases import pickle import torch as t import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.datasets as datasets import numpy as np import matplotlib.pyplot as plt import cProfile import pandas as pd import datetime from scipy.sparse import csr_matrix import os.path import embedding_help_functions as ehf import scipy.io as sio unsq = t.unsqueeze sq = t.squeeze # Settings alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95] no_layers = 1 dataset = "OTC" # OTC or Alpha no_epochs = 1000 mat_f_name = "saved_content_bitcoin_otc.mat" no_trials = 1 beta1 = 19 beta2 = 19 cutoff = 95 eval_type = "MAP-MRR" # "MAP-MRR" or "F1" data_loc = "data/Bitcoin_" + dataset + "/" S_train, S_val, S_test = 95, 20, 20 lr = 0.01 momentum = 0.9 # Load and return relevant data A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False) # Create features for the nodes X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False) # Extract edges and labels from A_labels, and augment with nonexisting edges # edges, beta edges = A_labels._indices() edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff) # Divide adjacency matrices and labels into training, validation and testing sets edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False) if no_trials > 1: ep_acc_loss_vec = [] for tr in range(no_trials): for alpha in alpha_vec: class_weights = t.tensor([alpha, 1.0-alpha]) save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction" # Create gcn for training if no_layers == 2: gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu") elif no_layers == 1: gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2]) # Train optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target) if eval_type == "F1": ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test) elif eval_type == "MAP-MRR": ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test) for ep in range(no_epochs): # Compute loss and take step optimizer.zero_grad() output_train = gcn() loss_train = criterion(output_train, target_train[edges_train[0]!=0]) loss_train.backward() optimizer.step() # Things that don't require gradient with t.no_grad(): if ep % 100 == 0: # Compute stats for training data; no point in doing more often than this guess_train = t.argmax(output_train, dim=1) if eval_type == "F1": precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0]) elif eval_type == "MAP-MRR": MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0]) # Compute stats for validation data output_val = gcn(C_val[:-1], X_val[:-1], e_val) guess_val = t.argmax(output_val, dim=1) if eval_type == "F1": precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0]) elif eval_type == "MAP-MRR": MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0]) loss_val = criterion(output_val, target_val[edges_val[0]!=0]) # Compute stats for test data output_test = gcn(C_test[:-1], X_test[:-1], e_test) guess_test = t.argmax(output_test, dim=1) if eval_type == "F1": precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0]) elif eval_type == "MAP-MRR": MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0]) loss_test = criterion(output_test, target_test[edges_test[0]!=0]) # Print if eval_type == "F1": ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep) elif eval_type == "MAP-MRR": print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train)) print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val)) print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test)) # Store values with results if eval_type == "F1": ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test] elif eval_type == "MAP-MRR": ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test] if eval_type == "F1": ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True) elif eval_type == "MAP-MRR": print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train)) print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val)) print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test)) if no_trials == 1: pickle.dump(ep_acc_loss, open(save_res_fname, "wb")) print("Results saved for single trial") else: ep_acc_loss_vec.append(ep_acc_loss) if no_trials > 1: pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb")) print("Results saved for all trials")
[ 2, 770, 2196, 286, 262, 8550, 6306, 17944, 1366, 662, 14681, 276, 287, 6550, 23912, 11, 290, 3544, 262, 20145, 45, 14805, 198, 2, 383, 966, 286, 428, 4226, 318, 284, 466, 2792, 17724, 198, 198, 2, 1846, 3742, 290, 47217, 198, 11748,...
2.290851
2,809
import cPickle import numpy as np from elm import ELMClassifier from sklearn import linear_model if __name__ == '__main__': # Load data sets train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist()) # Build ELM cls = ELMClassifier(n_hidden=7000, alpha=0.93, activation_func='multiquadric', regressor=linear_model.Ridge(), random_state=21398023) cls.fit(train_x, train_y) # Evaluate model print 'Validation error:', cls.score(val_x, val_y) print 'Test error:', cls.score(test_x, test_y)
[ 11748, 269, 31686, 293, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1288, 76, 1330, 17852, 44, 9487, 7483, 198, 6738, 1341, 35720, 1330, 14174, 62, 19849, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, ...
2.006309
317
# -*- coding: utf-8 -*- #!/usr/bin/env python3 from PKC_Classes import NetworkUser, KDC from DES import DES from RSA_Class import RSA import socket import os import sys import threading import time if sys.version_info[0] < 3: raise Exception("Must be using Python 3") bob = NetworkUser('Alice', DES(), RSA(9973, 97), 200) print('bob:', bob.uid) # socket communication kdc_host, kdc_port = 'localhost', 9999 bob_host, bob_port = 'localhost', 9200 # talk to kdc for sess key try: sock_with_kdc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_with_kdc.connect((kdc_host, kdc_port)) print(sock_with_kdc.recv(1024)) # send cipher_key bob_cipher_key_packet = bob.send_cipher_key() sock_with_kdc.send(bob_cipher_key_packet.encode()) kdc_bob_cipher_key_packet = sock_with_kdc.recv(1024).decode() print(kdc_bob_cipher_key_packet) bob.process_packet(kdc_bob_cipher_key_packet) except socket.error as msg: print(msg); sys.exit(1) # sock_with_kdc.shutdown(socket.SHUT_WR) # talk to bob try: sock_self = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock_self.bind((bob_host, bob_port)) sock_self.listen(10) except socket.error as msg: print(msg); sys.exit(1) while 1: conn, addr = sock_self.accept() thread = threading.Thread(target=reply_conn, args=(conn, addr)) thread.start() # sock_self.close()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 6738, 29673, 34, 62, 9487, 274, 1330, 7311, 12982, 11, 509, 9697, 198, 6738, 22196, 1330, 22196, 198, 6738, ...
2.368506
616
from __future__ import print_function import argparse import itertools import os import pickle import sys from datetime import datetime import matplotlib import numpy as np import torch matplotlib.use('Agg') import matplotlib.pyplot as plt import proj.archs as archs from proj.utils.cluster.general import config_to_str, get_opt, update_lr from proj.utils.cluster.baselines.triplets import make_triplets_data, \ triplets_eval, triplets_loss """ Triplets. Makes output distribution same as that of attractor, and different to that of repeller. Greyscale version (no sobel). """ # Options ---------------------------------------------------------------------- parser = argparse.ArgumentParser() parser.add_argument("--model_ind", type=int, required=True) parser.add_argument("--arch", type=str, required=True) parser.add_argument("--opt", type=str, default="Adam") parser.add_argument("--dataset", type=str, required=True) parser.add_argument("--dataset_root", type=str, required=True) parser.add_argument("--gt_k", type=int, required=True) parser.add_argument("--output_k", type=int, required=True) parser.add_argument("--lr", type=float, default=0.01) parser.add_argument("--lr_schedule", type=int, nargs="+", default=[]) parser.add_argument("--lr_mult", type=float, default=0.1) parser.add_argument("--num_epochs", type=int, default=1000) parser.add_argument("--batch_sz", type=int, required=True) # num pairs parser.add_argument("--out_root", type=str, default="/scratch/shared/slow/xuji/iid_private") parser.add_argument("--restart", dest="restart", default=False, action="store_true") parser.add_argument("--test_code", dest="test_code", default=False, action="store_true") parser.add_argument("--save_freq", type=int, default=10) parser.add_argument("--kmeans_on_features", default=False, action="store_true") # transforms # used for "positive" sample parser.add_argument("--demean", dest="demean", default=False, action="store_true") parser.add_argument("--per_img_demean", dest="per_img_demean", default=False, action="store_true") parser.add_argument("--data_mean", type=float, nargs="+", default=[0.5, 0.5, 0.5]) parser.add_argument("--data_std", type=float, nargs="+", default=[0.5, 0.5, 0.5]) parser.add_argument("--crop_orig", dest="crop_orig", default=False, action="store_true") parser.add_argument("--crop_other", dest="crop_other", default=False, action="store_true") parser.add_argument("--tf1_crop", type=str, default="random") # type name parser.add_argument("--tf2_crop", type=str, default="random") parser.add_argument("--tf1_crop_sz", type=int, default=84) parser.add_argument("--tf2_crop_szs", type=int, nargs="+", default=[84]) # allow diff crop for imgs_tf parser.add_argument("--tf3_crop_diff", dest="tf3_crop_diff", default=False, action="store_true") parser.add_argument("--tf3_crop_sz", type=int, default=0) parser.add_argument("--input_sz", type=int, default=96) parser.add_argument("--rot_val", type=float, default=0.) parser.add_argument("--always_rot", dest="always_rot", default=False, action="store_true") parser.add_argument("--no_jitter", dest="no_jitter", default=False, action="store_true") parser.add_argument("--no_flip", dest="no_flip", default=False, action="store_true") config = parser.parse_args() # Fixed settings and checks ---------------------------------------------------- config.in_channels = 1 if config.output_k != config.gt_k: assert (config.output_k > config.gt_k) assert (config.kmeans_on_features) config.out_dir = os.path.join(config.out_root, str(config.model_ind)) config.dataloader_batch_sz = config.batch_sz config.num_dataloaders = 1 if not os.path.exists(config.out_dir): os.makedirs(config.out_dir) if config.restart: given_config = config reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle") print("Loading restarting config from: %s" % reloaded_config_path) with open(reloaded_config_path, "rb") as config_f: config = pickle.load(config_f) assert (config.model_ind == given_config.model_ind) config.restart = True # copy over new num_epochs and lr schedule config.num_epochs = given_config.num_epochs config.lr_schedule = given_config.lr_schedule if not hasattr(config, "kmeans_on_features"): config.kmeans_on_features = False else: print("Config: %s" % config_to_str(config)) # Data, nets, optimisers ------------------------------------------------------- dataloader_original, dataloader_positive, dataloader_negative, \ dataloader_test = make_triplets_data(config) train_dataloaders = [dataloader_original, dataloader_positive, dataloader_negative] net = archs.__dict__[config.arch](config) if config.restart: model_path = os.path.join(config.out_dir, "latest_net.pytorch") taking_best = not os.path.exists(model_path) if taking_best: print("using best instead of latest") model_path = os.path.join(config.out_dir, "best_net.pytorch") net.load_state_dict( torch.load(model_path, map_location=lambda storage, loc: storage)) net.cuda() net = torch.nn.DataParallel(net) net.train() optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr) if config.restart: opt_path = os.path.join(config.out_dir, "latest_optimiser.pytorch") if taking_best: opt_path = os.path.join(config.out_dir, "best_optimiser.pytorch") optimiser.load_state_dict(torch.load(opt_path)) # Results storage -------------------------------------------------------------- if config.restart: if not taking_best: next_epoch = config.last_epoch + 1 # corresponds to last saved model else: next_epoch = np.argmax(np.array(config.epoch_acc)) + 1 print("starting from epoch %d" % next_epoch) config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot config.epoch_loss = config.epoch_loss[:next_epoch] config.masses = config.masses[:next_epoch, :] config.per_class_acc = config.per_class_acc[:next_epoch, :] else: config.epoch_acc = [] config.epoch_loss = [] config.masses = None config.per_class_acc = None _ = triplets_eval(config, net, dataloader_test=dataloader_test, sobel=False) print("Pre: time %s: \n %s" % (datetime.now(), config.epoch_acc[-1])) sys.stdout.flush() next_epoch = 1 fig, axarr = plt.subplots(4, sharex=False, figsize=(20, 20)) # Train ------------------------------------------------------------------------ for e_i in xrange(next_epoch, config.num_epochs): print("Starting e_i: %d" % (e_i)) if e_i in config.lr_schedule: optimiser = update_lr(optimiser, lr_mult=config.lr_mult) avg_loss = 0. # over heads and head_epochs (and sub_heads) avg_loss_count = 0 sys.stdout.flush() iterators = (d for d in train_dataloaders) b_i = 0 for tup in itertools.izip(*iterators): net.module.zero_grad() imgs_orig = tup[0][0].cuda() imgs_pos = tup[1][0].cuda() imgs_neg = tup[2][0].cuda() outs_orig = net(imgs_orig) outs_pos = net(imgs_pos) outs_neg = net(imgs_neg) curr_loss = triplets_loss(outs_orig, outs_pos, outs_neg) if ((b_i % 100) == 0) or (e_i == next_epoch and b_i < 10): print("Model ind %d epoch %d batch %d " "loss %f time %s" % \ (config.model_ind, e_i, b_i, curr_loss.item(), datetime.now())) sys.stdout.flush() if not np.isfinite(float(curr_loss.item())): print("Loss is not finite... %s:" % str(curr_loss.item())) exit(1) avg_loss += curr_loss.item() avg_loss_count += 1 curr_loss.backward() optimiser.step() b_i += 1 if b_i == 2 and config.test_code: break avg_loss = float(avg_loss / avg_loss_count) config.epoch_loss.append(avg_loss) # Eval and storage ----------------------------------------------------------- # when epoch over both heads is finished is_best = triplets_eval(config, net, dataloader_test=dataloader_test, sobel=False) print("Time %s, acc %s" % (datetime.now(), config.epoch_acc[-1])) sys.stdout.flush() axarr[0].clear() axarr[0].plot(config.epoch_acc) axarr[0].set_title("acc, top: %f" % max(config.epoch_acc)) axarr[1].clear() axarr[1].plot(config.epoch_loss) axarr[1].set_title("Loss") axarr[2].clear() for c in xrange(config.gt_k): axarr[2].plot(config.masses[:, c]) axarr[2].set_title("masses") axarr[3].clear() for c in xrange(config.gt_k): axarr[3].plot(config.per_class_acc[:, c]) axarr[3].set_title("per_class_acc") fig.tight_layout() fig.canvas.draw_idle() fig.savefig(os.path.join(config.out_dir, "plots.png")) if is_best or (e_i % config.save_freq == 0): net.module.cpu() if is_best: torch.save(net.module.state_dict(), os.path.join(config.out_dir, "best_net.pytorch")) torch.save(optimiser.state_dict(), os.path.join(config.out_dir, "best_optimiser.pytorch")) if e_i % config.save_freq == 0: torch.save(net.module.state_dict(), os.path.join(config.out_dir, "latest_net.pytorch")) torch.save(optimiser.state_dict(), os.path.join(config.out_dir, "latest_optimiser.pytorch")) config.last_epoch = e_i # for last saved version net.module.cuda() with open(os.path.join(config.out_dir, "config.pickle"), 'wb') as outfile: pickle.dump(config, outfile) with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file: text_file.write("%s" % config) if config.test_code: exit(0)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 1822, 29572, 198, 11748, 340, 861, 10141, 198, 11748, 28686, 198, 11748, 2298, 293, 198, 11748, 25064, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 11748, 2603, 29487, ...
2.323216
4,415
"""empty message Revision ID: 0084_add_job_stats Revises: 0083_add_perm_types_and_svc_perm Create Date: 2017-05-12 13:16:14.147368 """ # revision identifiers, used by Alembic. revision = "0084_add_job_stats" down_revision = "0083_add_perm_types_and_svc_perm" import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql
[ 37811, 28920, 3275, 198, 198, 18009, 1166, 4522, 25, 3571, 5705, 62, 2860, 62, 21858, 62, 34242, 198, 18009, 2696, 25, 3571, 5999, 62, 2860, 62, 16321, 62, 19199, 62, 392, 62, 21370, 66, 62, 16321, 198, 16447, 7536, 25, 2177, 12, 27...
2.62963
135
import unittest from future.moves.urllib.parse import urlparse, urljoin, parse_qs import pytest from addons.twofactor.tests.utils import _valid_code from nose.tools import (assert_equal, assert_false, assert_is_none, assert_is_not_none, assert_true) from osf_tests.factories import UserFactory pytestmark = pytest.mark.django_db
[ 11748, 555, 715, 395, 198, 6738, 2003, 13, 76, 5241, 13, 333, 297, 571, 13, 29572, 1330, 19016, 29572, 11, 19016, 22179, 11, 21136, 62, 48382, 198, 198, 11748, 12972, 9288, 198, 6738, 751, 684, 13, 4246, 1659, 11218, 13, 41989, 13, ...
2.664179
134
import numpy as np from torchvision import transforms np.random.seed(1)
[ 11748, 299, 32152, 355, 45941, 198, 6738, 28034, 10178, 1330, 31408, 198, 37659, 13, 25120, 13, 28826, 7, 16, 8 ]
3.55
20
import torch import os from torch import nn import numpy as np import torch.nn.functional from termcolor import colored from .logger import get_logger
[ 11748, 28034, 198, 11748, 28686, 198, 6738, 28034, 1330, 299, 77, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 13, 20471, 13, 45124, 198, 6738, 3381, 8043, 1330, 16396, 198, 6738, 764, 6404, 1362, 1330, 651, 62, 6404, 1362, 62...
3.642857
42
from django.apps import AppConfig #pragma: no cover
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 1303, 1050, 363, 2611, 25, 645, 3002, 628 ]
3.3125
16
from __future__ import division from timeit import default_timer as timer import csv import numpy as np import itertools from munkres import Munkres, print_matrix, make_cost_matrix import sys from classes import * from functions import * from math import sqrt import Tkinter as tk import tkFileDialog as filedialog root = tk.Tk() root.withdraw() p_file = filedialog.askopenfilename(title='Please select the posting file') c_file = filedialog.askopenfilename(title='Please select the candidate file') """for use with /users/java_jonathan/postings_lge.csv and /Users/java_jonathan/candidates_lge.csv""" # p_file = raw_input("Please enter the path for the postings file: ") # p_file = p_file.strip() # c_file = raw_input("Please enter the path for the candidate file: ") # c_file = c_file.strip() start = timer() with open(p_file,'r') as f: #with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f: reader = csv.reader(f) postingsAll = list(reader) with open(c_file,'r') as f: reader = csv.reader(f) candidatesAll = list(reader) """create empty lists to fill with lists of lists output by iterating function below""" names = [] totalMatrix = [] for list in candidatesAll: candidate = Candidate(*list) names.append(candidate.name) n = 0 for list in postingsAll: posting = Posting(*list) totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate) +matchLocation(posting,candidate) + matchCompetency(posting,candidate) + matchSkill(posting,candidate)+matchCohort(posting,candidate)) n += 1 l = len(names) names.extend([0] * (n-l)) totalMatrix.extend([0] * (n**2 - len(totalMatrix))) totalMatrix = np.asarray(totalMatrix) totalMatrix = np.reshape(totalMatrix,(n,-1)) #at this point the matrix is structured as candidates down and jobs across totalMatrix = np.transpose(totalMatrix) #now it's switched! totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix) totalMatrix = np.array(totalMatrix) minSuitability = 18 check = [] result = [] m = Munkres() indexes = m.compute(totalMatrix) #print_matrix(totalMatrix, msg='Lowest cost through this matrix:') total = 0.0 unhappy_candidates = 0 medium_candidates = 0 tenpc_candidates = 0 qs_candidates = 0 vs_candidates = 0 f = open('output.txt', 'w') for row, column in indexes: if column < l: value = totalMatrix[row][column] if value > minSuitability*0.9: tenpc_candidates += 1 elif value > minSuitability*0.75: medium_candidates += 1 elif value > minSuitability/2: unhappy_candidates += 1 elif value > minSuitability*0.25: qs_candidates += 1 elif value > minSuitability*0.1: vs_candidates += 1 total += value check.append(column+1) result.append((row,column)) f.write('For candidate %s: \nOptimal position: %d (score %s)\n' % (names[column], column+1, value)) else: pass globalSatisfaction = 100*(1-(total/(l*minSuitability))) print('Global satisfaction: %.2f%%' % globalSatisfaction) print('Candidates who are more than 90%% suitable: %d' % vs_candidates) print('Candidates who are more than 75%% suitable: %d' % qs_candidates) print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates)) print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates) print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates) #output from excel: correct = [1,3,5,9,10,2,4,8,6,7] #this function tests output above against Excel: #test(correct,check) topMatrix = topFive(names,totalMatrix) #print(topMatrix) np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',', newline='\n', header='', footer='', comments='# ') np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',', newline='\n', header='', footer='', comments='# ') end = timer() print(end-start) """ #posting = [Posting(*postingsAll)] #print(posting[0].anchor) #print(posting) #print(candidatesAll) #print(postingsAll) #print(postingsAll[0].name) #print(preferences) #print(postings) #split up files into relative blocks postCode = [lists[0] for lists in postings] postDept = [lists[1] for lists in postings] postAnchor = [lists[2] for lists in postings] postSkills = [lists[3:5] for lists in postings] postLocation = [lists[5] for lists in postings] postCompetencies = [lists[7:10] for lists in postings] postSecurity = [lists[10] for lists in postings] #with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f: #gives first column ie candidate a a=totalMatrix[:,[0]] #b = totalMatrix[:,[0]] #print(a) #converts 1D matrix to list for ease a = np.array(a).tolist() #print(a) #creates list called output containing rank of score output = [0] * len(a) for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])): output[x] = i print(output) #creates tuples of rank, job and appends to list jobRank = [] # for rank, b in zip(output, postCode): # jobScore = (rank,b) # list(jobScore) # jobRank.append(jobScore) # print(jobRank) output = [0] * len(a) for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])): output[x] = i print(output) # #print(a) # jobRank = sorted(jobRank, reverse=False) # print(jobRank) # print('For candidate a, the best position is %s') % (jobRank[0][1]) # print(candidate[0].skills) """
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 640, 270, 1330, 4277, 62, 45016, 355, 19781, 198, 11748, 269, 21370, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 340, 861, 10141, 198, 6738, 285, 2954, 411, 1330, 337, 2954, 411, 11, ...
2.662274
2,049
"""Setup script for PySyReNN. Adapted from: https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/ """ import codecs import os import re from setuptools import setup, find_packages ################################################################### NAME = "pysyrenn" PACKAGES = [ "syrenn_proto", "pysyrenn", "pysyrenn.frontend", "pysyrenn.helpers", ] META_PATH = "__metadata__.py" KEYWORDS = ["class", "attribute", "boilerplate"] CLASSIFIERS = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules", ] INSTALL_REQUIRES = ["torch"] with open("requirements.txt") as requirements: reading = False for line in requirements.readlines(): if line.startswith("# PYSYRENN"): reading = True elif line.startswith("# END"): reading = False elif line.startswith("#"): pass elif reading: INSTALL_REQUIRES.append(line.strip().split("==")[0]) ################################################################### HERE = os.path.abspath(os.path.dirname(__file__)) def read(*parts): """ Build an absolute path from *parts* and and return the contents of the resulting file. Assume UTF-8 encoding. """ with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f: return f.read() META_FILE = read(META_PATH) def find_meta(meta): """Extract __*meta*__ from META_FILE. """ meta_match = re.search( r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M ) if meta_match: return meta_match.group(1) raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta)) if __name__ == "__main__": setup( name=NAME, description=find_meta("description"), license=find_meta("license"), url=find_meta("uri"), version=find_meta("version"), author=find_meta("author"), author_email=find_meta("email"), maintainer=find_meta("author"), maintainer_email=find_meta("email"), keywords=KEYWORDS, long_description=read("README.md"), long_description_content_type="text/markdown", packages=PACKAGES, package_dir={"": "."}, package_data={"": ["pysyrenn/**/*.py"]}, zip_safe=False, classifiers=CLASSIFIERS, install_requires=INSTALL_REQUIRES, )
[ 37811, 40786, 4226, 329, 9485, 13940, 3041, 6144, 13, 198, 198, 48003, 276, 422, 25, 198, 5450, 1378, 12114, 710, 74, 13, 1326, 14, 26845, 14, 21987, 12, 14108, 12, 75, 4820, 12, 1659, 12, 23205, 12, 79, 4464, 72, 12, 24209, 12, 3...
2.533871
1,240
REST_PATH = u"" WS_PATH = u"/api/notifications/v1"
[ 49, 6465, 62, 34219, 796, 334, 15931, 198, 19416, 62, 34219, 796, 334, 1, 14, 15042, 14, 1662, 6637, 14, 85, 16, 1, 198 ]
2.125
24
__all__ = ["load"] import imp import importlib def load(name, path): """Load and initialize a module implemented as a Python source file and return its module object""" if hasattr(importlib, "machinery"): loader = importlib.machinery.SourceFileLoader(name, path) return loader.load_module() return imp.load_source(name, path)
[ 834, 439, 834, 796, 14631, 2220, 8973, 628, 198, 11748, 848, 198, 11748, 1330, 8019, 628, 198, 4299, 3440, 7, 3672, 11, 3108, 2599, 198, 220, 220, 220, 37227, 8912, 290, 41216, 257, 8265, 9177, 355, 257, 11361, 2723, 2393, 290, 1441, ...
3.033898
118
import itertools from pygears.common.sieve import sieve from pygears.svgen.inst import SVGenInstPlugin from pygears.svgen.svmod import SVModuleGen from functools import partial from pygears.svgen.svgen import SVGenPlugin from pygears.svgen.util import svgen_visitor from pygears.core.hier_node import HierVisitorBase from pygears.svgen.inst import svgen_inst from pygears.rtl.gear import RTLGearHierVisitor, is_gear_instance class SVGenSievePlugin(SVGenInstPlugin, SVGenPlugin):
[ 11748, 340, 861, 10141, 198, 198, 6738, 12972, 70, 4127, 13, 11321, 13, 82, 12311, 1330, 264, 12311, 198, 6738, 12972, 70, 4127, 13, 21370, 5235, 13, 8625, 1330, 20546, 13746, 6310, 37233, 198, 6738, 12972, 70, 4127, 13, 21370, 5235, ...
2.963415
164
#encoding=utf-8 import qlib import pandas as pd import pickle import xgboost as xgb import numpy as np import re from qlib.constant import REG_US from qlib.utils import exists_qlib_data, init_instance_by_config from qlib.workflow import R from qlib.workflow.record_temp import SignalRecord, PortAnaRecord from qlib.utils import flatten_dict from qlib.data import LocalExpressionProvider from qlib.data.ops import Operators, OpsList from qlib.data.base import Feature from pyecharts import options as opts from pyecharts.charts import Kline, Line, Grid from my_data_handler import MyAlphaHandler # model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model' model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model' with open(model_file, 'rb') as fi: model = pickle.load(fi) exprs, columns = MyAlphaHandler.get_custom_config() raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time']) raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00") raw_data.set_index('time', inplace=True) raw_data["vwap"] = np.nan raw_data.sort_index(inplace=True) # print(raw_data) Operators.register(OpsList + [MyFeature]) obj = dict() for field in exprs: expression = eval(my_parse_field(field)) series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min") series = series.astype(np.float32) obj[field] = series data = pd.DataFrame(obj) data.columns = columns view_time_start = '2022-02-11' view_time_end = '2022-02-12' pre_data = raw_data.loc[view_time_start:view_time_end].copy() pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end])) pre_data['pred_score'] = pred records = pre_data.to_dict("records") cash = 50000 position = {} hold_thresh = 5 score_thresh = 0.001 x_axises, y_axises, mark_points, money = [], [], [], [] for record in records: x_axises.append(record['data_time']) y_axises.append([ record['open'], record['close'], record['low'], record['high'] ]) if 'hold_cnt' in position: position['hold_cnt'] += 1 if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh): cash += position['amount'] * record['open'] position = {} #print("sell") mark_points.append(opts.MarkPointItem( coord=[record['data_time'], record['high']], symbol='triangle', symbol_size=7, itemstyle_opts=opts.ItemStyleOpts(color="green") )) elif record['pred_score'] > score_thresh and not position: position = dict(record) position['amount'] = int(cash / position['open']) cash -= position['amount'] * position['open'] # buy #print("buy") position['hold_cnt'] = 0 mark_points.append(opts.MarkPointItem( coord=[record['data_time'], record['high']], symbol='arrow', symbol_size=7, itemstyle_opts=opts.ItemStyleOpts(color="yellow") )) cur_money = cash if position: cur_money += position['amount'] * record['close'] money.append(cur_money) if position: cash += position['amount'] * records[-1]['close'] print("cash:", cash) kline_graph = ( Kline() .add_xaxis(x_axises) .add_yaxis( "kline", y_axises, markpoint_opts=opts.MarkPointOpts( data=mark_points ), ) .set_global_opts( xaxis_opts=opts.AxisOpts(is_scale=True), yaxis_opts=opts.AxisOpts( is_scale=True, splitarea_opts=opts.SplitAreaOpts( is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1) ), ), title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)), datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)], ) ) kline_line = ( Line() .add_xaxis(xaxis_data=x_axises) .add_yaxis( series_name="cur_money", y_axis=money, is_smooth=True, linestyle_opts=opts.LineStyleOpts(opacity=0.5), label_opts=opts.LabelOpts(is_show=False), markline_opts=opts.MarkLineOpts( data=[opts.MarkLineItem(y=50000)] ), ) .set_global_opts( xaxis_opts=opts.AxisOpts( type_="category", grid_index=2, axislabel_opts=opts.LabelOpts(is_show=False), ), yaxis_opts=opts.AxisOpts( min_='dataMin' ) ) ) grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px')) grid_chart.add( kline_graph, grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"), ) grid_chart.add( kline_line, grid_opts=opts.GridOpts( pos_left="3%", pos_right="10%", pos_top="60%", height="30%" ), ) grid_chart.render("kline_markline.html")
[ 2, 12685, 7656, 28, 40477, 12, 23, 198, 11748, 10662, 8019, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 2298, 293, 198, 11748, 2124, 70, 39521, 355, 2124, 22296, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 302, 198, 6738, 10...
2.186313
2,265
from fastapi import APIRouter router = APIRouter()
[ 6738, 3049, 15042, 1330, 3486, 4663, 39605, 198, 198, 472, 353, 796, 3486, 4663, 39605, 3419, 628 ]
3.117647
17
import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import networkx as nx import time from embed_methods.dgi.models import DGI, LogReg from embed_methods.dgi.utils import process
[ 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 82, 29572, 355, 599, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 3127, 87, 355, 299, 87, 198, 11748, 640, 198, 198, 6738, 11525, 62, 24396, ...
3.058824
68
"""Deploys binaries to a GitHub release given the specified tag name.""" import argparse import os import time from github import Github THIS_FILE_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) GH_REPO_IDENT = "ETCLabs/RDMnet" GH_USERNAME = "svc-etclabs" GH_API_TOKEN = os.getenv("SVC_ETCLABS_REPO_TOKEN") def deploy_binaries(version: str): """Deploys staged binaries to a new GitHub Release.""" g = Github(login_or_token=GH_USERNAME, password=GH_API_TOKEN) repo = g.get_repo(GH_REPO_IDENT) print(f"Waiting for the correct GitHub tag v{version} to become available...") keep_trying = True while keep_trying: for tag in repo.get_tags(): if tag.name == f"v{version}": keep_trying = False # Tag now exists break if keep_trying: time.sleep(5) print(f"Tag v{version} available. Creating release...") new_release = repo.create_git_release( tag=f"v{version}", name=f"RDMnet v{version}", message=f"Automated release of RDMnet for v{version}", ) new_release.upload_asset("RDMnetSetup_x86.msi") new_release.upload_asset("RDMnetSetup_x64.msi") new_release.upload_asset("RDMnet.pkg") if __name__ == "__main__": main()
[ 37811, 49322, 82, 38640, 284, 257, 21722, 2650, 1813, 262, 7368, 7621, 1438, 526, 15931, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 640, 198, 198, 6738, 33084, 1330, 38994, 198, 198, 43559, 62, 25664, 62, 17931, 23988, 15513,...
2.350554
542
import logging import numpy from ..Fragments import Fragments from ..typing import SpectrumType logger = logging.getLogger("matchms") def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType: """Derive losses based on precursor mass. Parameters ---------- spectrum_in: Input spectrum. loss_mz_from: Minimum allowed m/z value for losses. Default is 0.0. loss_mz_to: Maximum allowed m/z value for losses. Default is 1000.0. """ if spectrum_in is None: return None spectrum = spectrum_in.clone() precursor_mz = spectrum.get("precursor_mz", None) if precursor_mz: assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.", "Consider applying 'add_precursor_mz' filter first.") peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities losses_mz = (precursor_mz - peaks_mz)[::-1] losses_intensities = peaks_intensities[::-1] # Add losses which are within given boundaries mask = numpy.where((losses_mz >= loss_mz_from) & (losses_mz <= loss_mz_to)) spectrum.losses = Fragments(mz=losses_mz[mask], intensities=losses_intensities[mask]) else: logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.") return spectrum
[ 11748, 18931, 198, 11748, 299, 32152, 198, 6738, 11485, 42974, 902, 1330, 24229, 902, 198, 6738, 11485, 774, 13886, 1330, 27217, 6030, 628, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7203, 15699, 907, 4943, 628, 198, 4299, 751, ...
2.258209
670
import re from .dict_functions import gen_schema, ParameterSchema, sort_dict from cornflow_client.constants import JSON_TYPES, DATASCHEMA
[ 11748, 302, 198, 6738, 764, 11600, 62, 12543, 2733, 1330, 2429, 62, 15952, 2611, 11, 25139, 2357, 27054, 2611, 11, 3297, 62, 11600, 198, 6738, 11676, 11125, 62, 16366, 13, 9979, 1187, 1330, 19449, 62, 9936, 47, 1546, 11, 360, 1404, 19...
3.088889
45
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import os import platform import unittest import rspub.util.resourcefilter as rf
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 28686, 198, 11748, 3859, 198, 11748, 555, 715, 395, 198, 198, 11748, 374, 2777, 549, 13, 22602, 13, 3109...
2.545455
55
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_package(host): """ check if packages are installed """ assert host.package('grafana').is_installed def test_service(host): """ Testing whether the service is running and enabled """ assert host.service('grafana-server').is_enabled assert host.service('grafana-server').is_running
[ 11748, 28686, 198, 198, 11748, 1332, 10745, 430, 13, 26791, 13, 504, 856, 62, 16737, 198, 198, 9288, 10745, 430, 62, 4774, 82, 796, 1332, 10745, 430, 13, 26791, 13, 504, 856, 62, 16737, 13, 2025, 82, 856, 49493, 7, 198, 220, 220, ...
2.861272
173
"""Class representations of heatsinks.""" import math from scipy import constants as const from materials import Aluminium_6063 as aluminium
[ 37811, 9487, 24612, 286, 37876, 2973, 526, 15931, 198, 198, 11748, 10688, 198, 6738, 629, 541, 88, 1330, 38491, 355, 1500, 198, 198, 6738, 5696, 1330, 978, 35241, 62, 1899, 5066, 355, 40412, 628, 198 ]
4.142857
35
import inspect foo()
[ 11748, 10104, 220, 198, 220, 198, 21943, 3419 ]
2.875
8
import base58 from plenum.common.signer_did import DidSigner from plenum.common.verifier import DidVerifier from plenum.common.eventually import eventually from plenum.test.helper import assertEquality from sovrin.common.identity import Identity MsgForSigning = {'sender': 'Mario', 'msg': 'Lorem ipsum'}
[ 11748, 2779, 3365, 198, 6738, 458, 44709, 13, 11321, 13, 12683, 263, 62, 20839, 1330, 7731, 11712, 263, 198, 6738, 458, 44709, 13, 11321, 13, 332, 7483, 1330, 7731, 13414, 7483, 198, 6738, 458, 44709, 13, 11321, 13, 15596, 935, 1330, ...
3.181818
99
import astropy import datetime import numpy as np from edibles.utils.edibles_spectrum import EdiblesSpectrum if __name__ == "__main__": filename = "HD170740_w860_redl_20140915_O12.fits" testEdiblesSpectrum(filename=filename)
[ 11748, 6468, 28338, 198, 11748, 4818, 8079, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1225, 18764, 13, 26791, 13, 276, 18764, 62, 4443, 6582, 1330, 1717, 18764, 49738, 6582, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 124...
2.788235
85
# Copyright (c) 2010-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods & Attributes for shared 'on-disk' data layouts.""" import os import sys import errno from hashlib import md5 from random import shuffle from ConfigParser import ConfigParser, NoSectionError, NoOptionError from swift import gettext_ as _ from swift.common.utils import listdir, quote # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. _hash_conf = ConfigParser() HASH_PATH_SUFFIX = '' HASH_PATH_PREFIX = '' if _hash_conf.read('/etc/swift/swift.conf'): try: HASH_PATH_SUFFIX = _hash_conf.get('swift-hash', 'swift_hash_path_suffix') except (NoSectionError, NoOptionError): pass try: HASH_PATH_PREFIX = _hash_conf.get('swift-hash', 'swift_hash_path_prefix') except (NoSectionError, NoOptionError): pass def hash_path(account, container=None, object=None, raw_digest=False): """ Get the canonical hash for an account/container/object :param account: Account :param container: Container :param object: Object :param raw_digest: If True, return the raw version rather than a hex digest :returns: hash string """ if object and not container: raise ValueError('container is required if object is provided') paths = [account] if container: paths.append(container) if object: paths.append(object) if raw_digest: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).digest() else: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).hexdigest() def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return "%016.05f" % (float(timestamp)) def validate_device_partition(device, partition): """ Validate that a device and a partition are valid and won't lead to directory traversal when used. :param device: device to validate :param partition: partition to validate :raises: ValueError if given an invalid device or partition """ invalid_device = False invalid_partition = False if not device or '/' in device or device in ['.', '..']: invalid_device = True if not partition or '/' in partition or partition in ['.', '..']: invalid_partition = True if invalid_device: raise ValueError('Invalid device: %s' % quote(device or '')) elif invalid_partition: raise ValueError('Invalid partition: %s' % quote(partition or '')) def storage_directory(datadir, partition, name_hash): """ Get the storage directory :param datadir: Base data directory :param partition: Partition :param name_hash: Account, container or object name hash :returns: Storage directory """ return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
[ 2, 15069, 357, 66, 8, 3050, 12, 6390, 4946, 25896, 11, 11419, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, ...
2.821937
1,404
# uncompyle6 version 2.11.3 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)] # Embedded file name: scripts/common/dossiers2/custom/cache.py import nations from items import vehicles _g_cache = {}
[ 2, 34318, 2349, 21, 2196, 362, 13, 1157, 13, 18, 198, 2, 11361, 18022, 8189, 362, 13, 22, 357, 21, 1828, 1157, 8, 198, 2, 4280, 3361, 3902, 422, 25, 11361, 362, 13, 22, 13, 940, 357, 12286, 11, 1737, 2242, 1853, 11, 7769, 25, ...
2.785714
98
#!/usr/bin/python from code import TreeNode from code import ThreeAddressCode from lexer import tokens from random import * from symbol_table import SymbolTable from symbol_table import SymbolTableNode import logging import ply.lex as lex import ply.yacc as yacc import sys from codegen import convert_tac from code import Code from codegen import generate_assembly three_addr_code = ThreeAddressCode() assembly_code = Code() parsed = [] symbol_table = SymbolTable() var_list = [] generated = {'temp': [], 'scope': ['scope_0'], 'label': [], 'str_list': []} precedence = ( ('left','IDENTIFIER'), ('right','ASSIGN_OP'), ('left','COMMA'), ('left','LSQUARE'), ('left','RSQUARE'), ('left','LCURLY'), ('left','RCURLY'), ('left','DDD'), ('left','DOT'), ('left','SEMICOLON'), ('left','COLON'), ('left','SINGLE_QUOTES'), ('left','DOUBLE_QUOTES'), ('left','DECIMAL_LIT'), ('left','OCTAL_LIT'), ('left','HEX_LIT'), ('left','FLOAT_LIT'), ('left','STRING_LIT'), ('left','NEWLINE'), ('left','BREAK'), ('left','CONTINUE'), ('left','RETURN'), ('left','RROUND'), ('left','LROUND'), ('left', 'OR_OR'), ('left', 'AMP_AMP'), ('left', 'EQ_EQ', 'NOT_EQ','LT','LT_EQ','GT','GT_EQ'), ('left', 'PLUS', 'MINUS','OR','CARET'), ('left', 'STAR', 'DIVIDE','MODULO','AMP','AND_OR','LS','RS'), ) def p_SourceFile(p): '''SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList ''' parsed.append(p.slice) # TODO: Ignoring package name and Imports for now p[0] = p[5] var_list = symbol_table.make_var_list() three_addr_code = convert_tac(p[0].TAC) symbol_table.fill_next_use(three_addr_code) assembly_code = generate_assembly(three_addr_code,var_list,symbol_table) # p[0].TAC.print_code() # three_addr_code.print_code() assembly_code.print_code() # symbol_table.print_symbol_table() return def p_ImportDeclList(p): '''ImportDeclList : ImportDecl SEMICOLON ImportDeclList | empty ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_TopLevelDeclList(p): '''TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList | empty ''' parsed.append(p.slice) if len(p) == 4: if p[3] != None: p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) else: p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]], p[1].TAC) return def p_TopLevelDecl(p): '''TopLevelDecl : Declaration | FunctionDecl ''' parsed.append(p.slice) p[0] = p[1] return def p_ImportDecl(p): '''ImportDecl : IMPORT LROUND ImportSpecList RROUND | IMPORT ImportSpec ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_ImportSpecList(p): '''ImportSpecList : ImportSpec SEMICOLON ImportSpecList | empty ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_ImportSpec(p): '''ImportSpec : DOT string_lit | IDENTIFIER string_lit | empty string_lit ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_Block(p): '''Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY ''' parsed.append(p.slice) p[0] = p[3] p[0].data = p[2].data p[0].name = 'Block' return def p_ScopeStart(p): '''ScopeStart : empty ''' parsed.append(p.slice) symbol_table.add_scope(gen('scope')) p[0] = TreeNode('ScopeStart', symbol_table.current_scope, 'None') return def p_ScopeEnd(p): '''ScopeEnd : empty ''' parsed.append(p.slice) symbol_table.end_scope() return def p_StatementList(p): '''StatementList : Statement SEMICOLON StatementList | empty ''' parsed.append(p.slice) if len(p) == 4: p[0] = TreeNode('StatementList', 0, 'INT', 0, [p[1].data] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) else: p[0] = TreeNode('StatementList', 0, 'INT') return def p_Statement(p): '''Statement : Declaration | SimpleStmt | ReturnStmt | Block | IfStmt | SwitchStmt | ForStmt | BreakStmt | ContinueStmt | GotoStmt | PrintIntStmt | PrintStrStmt ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Statement' return def p_PrintIntStmt(p): '''PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND | PRINTLN LROUND int_lit RROUND ''' if hasattr(p[3], 'name') and p[3].name == 'int_lit': p[0] = p[3] # p[0].isLvalue = 0 else: p[0] = TreeNode('IDENTIFIER', p[3], 'INT', 1, []) p[0].TAC.add_line(['print_int', check_variable(p[0]), '', '']) p[0].name = 'PrintIntStmt' return def p_PrintStrStmt(p): '''PrintStrStmt : PRINTLN LROUND string_lit RROUND ''' p[0] = p[3] name = symbol_table.current_scope + '_' + gen('str_list') parametersNode = SymbolTableNode(p[3].data, p[3].input_type) newNode = SymbolTableNode(name, p[3].input_type, parameters = [parametersNode]) symbol_table.add_var(newNode) p[0].TAC.add_line(['print_str', name, '', '']) p[0].name = 'PrintStrStmt' return def p_Declaration(p): '''Declaration : ConstDecl | TypeDecl | VarDecl ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Declaration' return def p_ConstDecl(p): '''ConstDecl : CONST LROUND ConstSpecList RROUND | CONST ConstSpec ''' parsed.append(p.slice) return def p_ConstSpecList(p): '''ConstSpecList : empty | ConstSpecList ConstSpec SEMICOLON ''' parsed.append(p.slice) return def p_ConstSpec(p): '''ConstSpec : IDENTIFIER | IdentifierList | IDENTIFIER EQ Expression | IdentifierList EQ ExpressionList | IDENTIFIER Type EQ Expression | IdentifierList Type EQ ExpressionList ''' parsed.append(p.slice) return def p_IdentifierList(p): '''IdentifierList : IDENTIFIER COMMA IdentifierBotList ''' parsed.append(p.slice) node = TreeNode('IDENTIFIER', p[1], 'INT', 1) p[0] = TreeNode('IdentifierList', 0, 'None', 0, [node] + p[3].children, p[3].TAC) return def p_IdentifierBotList(p): '''IdentifierBotList : IDENTIFIER COMMA IdentifierBotList | IDENTIFIER ''' parsed.append(p.slice) if len(p) == 2: node = TreeNode('IDENTIFIER', p[1], 'INT', 1) p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node]) elif len(p) == 4: node = TreeNode('IDENTIFIER', p[1], 'INT', 1) p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node] + p[3].children, p[3].TAC) return def p_ExpressionList(p): '''ExpressionList : Expression COMMA ExpressionBotList ''' parsed.append(p.slice) p[0] = TreeNode('ExpressionList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) return def p_ExpressionBotList(p): '''ExpressionBotList : Expression COMMA ExpressionBotList | Expression ''' parsed.append(p.slice) if len(p) == 2: p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]], p[1].TAC) elif len(p) == 4: p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) return def p_TypeDecl(p): '''TypeDecl : TYPE TypeSpecTopList ''' parsed.append(p.slice) return def p_TypeSpecTopList(p): '''TypeSpecTopList : TypeSpec | LROUND TypeSpecList RROUND ''' parsed.append(p.slice) return def p_TypeSpecList(p): '''TypeSpecList : empty | TypeSpecList TypeSpec SEMICOLON ''' parsed.append(p.slice) return def p_TypeSpec(p): '''TypeSpec : AliasDecl | TypeDef ''' parsed.append(p.slice) return def p_AliasDecl(p): '''AliasDecl : IDENTIFIER EQ Type ''' parsed.append(p.slice) return def p_TypeDef(p): '''TypeDef : IDENTIFIER Type ''' parsed.append(p.slice) return def p_Type(p): '''Type : TypeLit | StandardTypes | LROUND Type RROUND ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] else: p[0] = p[2] p[0].name = 'Type' return def p_StandardTypes(p): '''StandardTypes : PREDEFINED_TYPES ''' parsed.append(p.slice) p[0] = TreeNode('StandardTypes', p[1], 'NONE') return def p_TypeLit(p): '''TypeLit : ArrayType | StructType | FunctionType | PointerType ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'TypeLit' return def p_PointerType(p): '''PointerType : STAR Type ''' parsed.append(p.slice) return def p_ArrayType(p): '''ArrayType : LSQUARE ArrayLength RSQUARE Type ''' parsed.append(p.slice) p[0] = TreeNode('ArrayType', p[2].data, p[4].data) return def p_ArrayLength(p): '''ArrayLength : Expression ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'ArrayLength' return def p_StructType(p): '''StructType : STRUCT LCURLY FieldDeclList RCURLY ''' parsed.append(p.slice) return def p_FieldDeclList(p): '''FieldDeclList : empty | FieldDeclList FieldDecl SEMICOLON ''' parsed.append(p.slice) return def p_FieldDecl(p): '''FieldDecl : IdentifierList Type TagTop | IDENTIFIER Type TagTop ''' parsed.append(p.slice) return def p_TagTop(p): '''TagTop : empty | Tag ''' parsed.append(p.slice) return def p_Tag(p): '''Tag : string_lit ''' parsed.append(p.slice) return def p_FunctionType(p): '''FunctionType : FUNC Signature ''' parsed.append(p.slice) return def p_Signature(p): '''Signature : Parameters | Parameters Result ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Signature' s = 'scope_' + str(len(generated['scope'])) symbol_table.new_scope(s) for child in p[1].children: symbol_table.add_identifier(child, s) newNode = SymbolTableNode(s + '_' + child.data, child.input_type) symbol_table.add_var(newNode, s) # symbol_table.print_symbol_table() if len(p) == 2: p[0].input_type = TreeNode('Result', 0, 'None') else: p[0].input_type = p[2] return def p_Result(p): '''Result : Parameters | Type ''' parsed.append(p.slice) if p[1].name == 'Type': p[0] = TreeNode('Result', 1, 'None', 0, [p[1]]) else: p[0] = p[1] p[0].name = 'Result' return def p_Parameters(p): '''Parameters : LROUND RROUND | LROUND ParameterList RROUND ''' parsed.append(p.slice) if len(p) == 3: p[0] = TreeNode('Parameters', 0, 'None') else: p[0] = p[2] p[0].name = 'Parameters' return def p_ParameterList(p): '''ParameterList : ParameterDecl | ParameterList COMMA ParameterDecl ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] p[0].name = 'ParameterList' elif len(p) == 4: p[0] = TreeNode('ParameterList', p[1].data + p[3].data, 'None', 0, p[1].children + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) return def p_ParameterDecl(p): '''ParameterDecl : IdentifierList Type | IDENTIFIER Type | Type ''' parsed.append(p.slice) p[0] = TreeNode('ParameterDecl', 0, 'None') if len(p) == 3: if hasattr(p[1], 'name') and p[1].name == 'IdentifierList': for node in p[1].children: p[0].data += 1 node.input_type = p[2].data p[0].children += [node] else: node = TreeNode('IDENTIFIER', p[1], p[2].data, 1) p[0].data += 1 p[0].children += [node] else: p[0].data += 1 p[0].children += [p[1]] return def p_VarDecl(p): '''VarDecl : VAR VarSpecTopList ''' parsed.append(p.slice) p[0] = p[2] p[0].name = 'VarDecl' return def p_VarSpecTopList(p): '''VarSpecTopList : VarSpec | LROUND VarSpecList RROUND ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] else: p[0] = p[2] p[0].name = 'VarSpecTopList' return def p_VarSpecList(p): '''VarSpecList : empty | VarSpecList VarSpec SEMICOLON ''' return def p_VarSpec(p): '''VarSpec : IDENTIFIER Type | IDENTIFIER EQ Expression | IDENTIFIER Type EQ Expression | IdentifierList Type | IdentifierList EQ ExpressionList | IdentifierList Type EQ ExpressionList ''' # Insert into symbol table p[0] = TreeNode('VarSpec', 0, 'NONE') if hasattr(p[1], 'name') and p[1].name == 'IdentifierList': zero_val = TreeNode('decimal_lit', 0, 'INT') # l1 = len(p[1].children) # if len(p) == 3: # expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1) # elif len(p) == 4: # expr_list = p[3] # elif len(p) == 5: # expr_list = p[4] # l2 = len(expr_list.children) # p[0].TAC.append_TAC(expr_list.TAC) # p[0].TAC.append_TAC(p[1].TAC) # if l1 == l2: # for i in range(l1): # p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, '']) # else: # print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") else: p[1] = TreeNode('IDENTIFIER',p[1],'INT',1) if p[2].input_type != 'NONE': # array case # p[2].print_node() if symbol_table.add_identifier(p[1], size = p[2].data) == False: print_error("Unable to add to SymbolTable") return name = symbol_table.search_identifier(p[1].data) newNode = SymbolTableNode(name, p[1].input_type,size = p[2].data) symbol_table.add_var(newNode) p[0] = TreeNode('VarSpec',p[1].data,'INT') # expr = TreeNode('Expr', 0, 'NONE') # if len(p) == 4: # expr = p[3] # p[0].TAC.append_TAC(p[3].TAC) # p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), '']) # elif len(p) == 5: # expr = p[4] # p[0].TAC.append_TAC(p[4].TAC) # p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), '']) return def p_FunctionDecl(p): '''FunctionDecl : FUNC FunctionName Signature | FUNC FunctionName Signature FunctionBody ''' parsed.append(p.slice) # symbol_table.print_symbol_table() p[0] = TreeNode('FunctionDecl', 0, 'INT') # print symbol_table.current_scope # p[4].TAC.print_code() symbol_table.add_function(p[2].data, p[3].input_type, p[3].children) if len(p) == 5: noOfParams = 0 for f in symbol_table.symbol_table[symbol_table.current_scope]['functions']: if f.name == p[2].data: noOfParams = len(f.parameters) p[0].TAC.add_line(['func', check_variable(p[2]), str(noOfParams), '']) for child in reversed(p[3].children): p[0].TAC.add_line(['getparam', p[4].data + '_' + child.data, '', '']) p[0].TAC.add_line(['stack_push', '', '', '']) p[0].TAC.append_TAC(p[4].TAC) return def p_FunctionName(p): '''FunctionName : IDENTIFIER ''' parsed.append(p.slice) p[0] = TreeNode('FunctionName', p[1], 'INT', 1) return def p_FunctionBody(p): '''FunctionBody : Block ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'FunctionBody' return def p_SimpleStmt(p): '''SimpleStmt : Expression | Assignment | ShortVarDecl | IncDecStmt ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'SimpleStmt' return def p_IncDecStmt(p): '''IncDecStmt : Expression PLUS_PLUS | Expression MINUS_MINUS ''' parsed.append(p.slice) one_val = TreeNode('IncDecStmt', '1', 'INT') p[0] = p[1] if p[1].isLvalue == 1: if p[2] == '++': p[0].TAC.add_line(['+', check_variable(p[1]), check_variable(p[1]), one_val.data]) else: p[0].TAC.add_line(['-', check_variable(p[1]), check_variable(p[1]), one_val.data]) else: print_error("Lvalue required") p[0].name = 'IncDecStmt' return def p_ShortVarDecl(p): '''ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList | Expression ASSIGN_OP Expression ''' parsed.append(p.slice) # TODO: Add in symbol table p[0] = TreeNode('ShortVarDecl', 0, 'INT') if p[1].name == 'ExpressionList': l1 = len(p[1].children) l2 = len(p[3].children) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) if l1 == l2: for i in range(l1): if p[1].children[i].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.add_identifier(p[1].children[i]) == False: print_error("Unable to add to SymbolTable") return p[0].TAC.add_line([p[2], check_variable(p[1].children[i]), check_variable(p[3].children[i]), '']) else: print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") elif p[1].name == 'Expression': if p[1].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.add_identifier(p[1]) == False: print_error("Unable to add to SymbolTable") return p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.add_line([p[2], check_variable(p[1]), check_variable(p[3]), '']) return def p_Assignment(p): '''Assignment : ExpressionList assign_op ExpressionList | Expression assign_op Expression ''' parsed.append(p.slice) p[0] = TreeNode('Assignment', 0, 'INT') if p[1].name == 'ExpressionList': l1 = len(p[1].children) l2 = len(p[3].children) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) if l1 == l2: for i in range(l1): if p[1].children[i].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.search_identifier(p[1].children[i].data) == False and p[1].children[i].data not in generated['temp']: print_error("Variable " + p[1].children[i].data + " is undefined") return if p[3].children[i].isLvalue == 1 and symbol_table.search_identifier(p[3].children[i].data) == False and p[3].children[i].data not in generated['temp']: print_error("Variable " + p[3].children[i].data + " is undefined") return p[0].TAC.add_line([p[2].data, check_variable(p[1].children[i]), check_variable(p[3].children[i]), '']) else: print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") elif p[1].name == 'Expression': if p[1].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.search_identifier(p[1].data) == False and p[1].data not in generated['temp']: print_error("Variable " + p[1].data + " is undefined") return if p[3].isLvalue == 1 and symbol_table.search_identifier(p[3].data) == False and p[3].data not in generated['temp']: print_error("Variable " + p[3].data + " is undefined") return # print symbol_table.current_scope p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.add_line([p[2].data, check_variable(p[1]), check_variable(p[3]), '']) return def p_assign_op(p): '''assign_op : EQ | PLUS_EQ | MINUS_EQ | OR_EQ | CARET_EQ | STAR_EQ | DIVIDE_EQ | MODULO_EQ | LS_EQ | RS_EQ | AMP_EQ | AND_OR_EQ ''' parsed.append(p.slice) p[0] = TreeNode('assign_op', p[1], 'OPERATOR') return def p_IfStmt(p): '''IfStmt : IF Expression Block | IF Expression Block ELSE elseTail ''' parsed.append(p.slice) if len(p) == 4: l1 = gen('label') p[0] = TreeNode('IfStmt', 0, 'INT') p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1]) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line(['label', l1, '', '']) if len(p) == 6: l1 = gen('label') l2 = gen('label') p[0] = TreeNode('IfStmt', 0, 'INT') p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1]) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line(['goto', l2, '', '']) p[0].TAC.add_line(['label', l1, '', '']) p[0].TAC.append_TAC(p[5].TAC) p[0].TAC.add_line(['label', l2, '', '']) return def p_elseTail(p): '''elseTail : IfStmt | Block ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'elseTail' return def p_SwitchStmt(p): '''SwitchStmt : ExprSwitchStmt ''' parsed.append(p.slice) p[0] = TreeNode('SwitchStmt', 0, 'INT', 0, [], p[1].TAC) return def p_ExprSwitchStmt(p): '''ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY ''' parsed.append(p.slice) if len(p) == 8: l1 = gen('label') l2 = gen('label') p[0] = TreeNode('ExprSwitchStmt', 0, 'INT') p[0].TAC.append_TAC(p[2].TAC) t1 = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1) p[0].TAC.add_line(['=', check_variable(t1) , check_variable(p[2]), '']) p[0].TAC.append_TAC(p[5].data) for i in range(len(p[5].children)): p[0].TAC.add_line(['ifgotoeq', check_variable(t1), p[5].children[i][0], p[5].children[i][1]]) p[0].TAC.add_line(['goto', l2, '', '']) for i in range(p[5].TAC.length()): if i in p[5].TAC.leaders[1:]: p[0].TAC.add_line(['goto', l2, '', '']) p[0].TAC.add_line(p[5].TAC.code[i]) p[0].TAC.add_line(['label', l2, '', '']) return def p_ExprCaseClauseList(p): '''ExprCaseClauseList : empty | ExprCaseClauseList ExprCaseClause ''' parsed.append(p.slice) TAC1 = ThreeAddressCode() TAC2 = ThreeAddressCode() if len(p) == 3: TAC1 = p[1].data TAC2 = p[2].data p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT', 0, p[1].children + p[2].children, p[1].TAC) p[0].TAC.add_leader(p[0].TAC.length()) p[0].TAC.append_TAC(p[2].TAC) p[0].data.append_TAC(TAC2) else: p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT') return def p_ExprCaseClause(p): '''ExprCaseClause : ExprSwitchCase COLON StatementList ''' parsed.append(p.slice) l1 = gen('label') p[0] = TreeNode('ExprCaseClause', 0, 'INT') # p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.add_line(['label', l1, '', '']) # p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1]) p[0].TAC.append_TAC(p[3].TAC) p[0].children = [[p[1].data,l1]] p[0].data = p[1].TAC return def p_ExprSwitchCase(p): '''ExprSwitchCase : CASE ExpressionList | DEFAULT | CASE Expression ''' parsed.append(p.slice) p[0] = TreeNode('ExprSwitchCase', 0, 'INT') if len(p) == 3: p[0].data = p[2].data p[0].TAC = p[2].TAC return def p_ForStmt(p): '''ForStmt : FOR Expression Block | FOR Block ''' parsed.append(p.slice) p[0] = TreeNode('ForStmt', 0, 'INT') if len(p) == 4: l1 = gen('label') l2 = gen('label') p[0].TAC.add_line(['label', l1, '', '']) p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['ifgotoeq',check_variable(p[2]), '0', l2]) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line(['goto', l1, '', '']) p[0].TAC.add_line(['label', l2, '', '']) if len(p) == 3: l1 = gen('label') # l2 = gen('label') p[0].TAC.add_line(['label', l1, '', '']) p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['goto', l1, '', '']) # p[0].TAC.add_line([l2]) return def p_ReturnStmt(p): '''ReturnStmt : RETURN | RETURN Expression | RETURN ExpressionList ''' parsed.append(p.slice) if len(p) == 2: p[0] = TreeNode('ReturnStmt', 0, 'None') p[0].TAC.add_line(['return', '', '', '']) if len(p) == 3: if p[2].name == 'Expression': p[0] = p[2] p[0].name = 'ReturnStmt' p[0].TAC.add_line(['return', check_variable(p[2]), '', '']) return def p_BreakStmt(p): '''BreakStmt : BREAK IDENTIFIER ''' parsed.append(p.slice) return def p_ContinueStmt(p): '''ContinueStmt : CONTINUE IDENTIFIER ''' parsed.append(p.slice) return def p_GotoStmt(p): '''GotoStmt : GOTO IDENTIFIER ''' parsed.append(p.slice) return def p_Expression(p): '''Expression : UnaryExpr | Expression OR_OR Expression | Expression AMP_AMP Expression | Expression EQ_EQ Expression | Expression NOT_EQ Expression | Expression LT Expression | Expression LT_EQ Expression | Expression GT Expression | Expression GT_EQ Expression | Expression PLUS Expression | Expression MINUS Expression | Expression OR Expression | Expression CARET Expression | Expression STAR Expression | Expression DIVIDE Expression | Expression MODULO Expression | Expression LS Expression | Expression RS Expression | Expression AMP Expression | Expression AND_OR Expression ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] elif len(p) == 4: p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1, [], p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line([p[2],check_variable(p[0]), check_variable(p[1]), check_variable(p[3])]) p[0].name = 'Expression' return def p_UnaryExpr(p): '''UnaryExpr : PrimaryExpr | unary_op UnaryExpr ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] elif len(p) == 3: p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1) p[0].TAC.add_line([check_variable(p[1]), check_variable(p[0]), check_variable(p[2]), '']) p[0].name = 'UnaryExpr' return def p_unary_op(p): '''unary_op : PLUS | MINUS | NOT | CARET | STAR | AMP | LT_MINUS ''' parsed.append(p.slice) p[0] = TreeNode('unary_op', p[1], 'OPERATOR') return def p_PrimaryExpr(p): '''PrimaryExpr : Operand | IDENTIFIER | PrimaryExpr Selector | PrimaryExpr Index | PrimaryExpr Arguments ''' parsed.append(p.slice) if len(p) == 2: if p.slice[1].type == 'IDENTIFIER': p[0] = TreeNode('IDENTIFIER', p[1], 'INT', 1) elif p[1].name == 'Operand': p[0] = p[1] elif len(p) == 3: if p[2].name == 'Index': p[0] = TreeNode('IDENTIFIER', p[1].data, 'INT', 1, p[2].data) elif p[2].name == 'Arguments': p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1) p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.append_TAC(p[2].TAC) # p[1].print_node() func = check_variable(p[1]).split("_") scope, funcName = "_".join(func[:2]), "_".join(func[2:]) temp = 0 for f in symbol_table.symbol_table[scope]['functions']: if f.name == funcName: temp = len(f.parameters) # p[2].print_node() for child in p[2].children: p[0].TAC.add_line(['putparam', check_variable(child), '', '']) if temp != p[2].data: print_error('Function ' + funcName + ' requires ' + str(temp) + ' parameters but ' + str(p[2].data) + ' supplied') p[0].TAC.add_line(['call', check_variable(p[1]), str(p[2].data), '']) p[0].TAC.add_line(['return_value', check_variable(p[0]), '', '']) p[0].name = 'PrimaryExpr' return def p_Operand(p): '''Operand : Literal | LROUND Expression RROUND ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] else: p[0] = p[2] p[0].name = 'Operand' return def p_Literal(p): '''Literal : BasicLit | FunctionLit ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Literal' return def p_BasicLit(p): '''BasicLit : int_lit | float_lit | string_lit | rune_lit ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'BasicLit' return def p_int_lit(p): '''int_lit : decimal_lit | octal_lit | hex_lit ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'int_lit' return def p_decimal_lit(p): '''decimal_lit : DECIMAL_LIT ''' parsed.append(p.slice) p[0] = TreeNode('decimal_lit', p[1], 'INT') return def p_octal_lit(p): '''octal_lit : OCTAL_LIT ''' parsed.append(p.slice) p[0] = TreeNode('octal_lit', p[1], 'OCT') return def p_hex_lit(p): '''hex_lit : HEX_LIT ''' parsed.append(p.slice) p[0] = TreeNode('hex_lit', p[1], 'HEX') return def p_float_lit(p): '''float_lit : FLOAT_LIT ''' parsed.append(p.slice) p[0] = TreeNode('float_lit', p[1], 'FLOAT') return def p_FunctionLit(p): '''FunctionLit : FUNC Signature FunctionBody ''' parsed.append(p.slice) # Anonymous Function # Not implemented yet return def p_Selector(p): '''Selector : DOT IDENTIFIER ''' parsed.append(p.slice) return def p_Index(p): '''Index : LSQUARE Expression RSQUARE ''' parsed.append(p.slice) p[0] = p[2] p[0].name = 'Index' return def p_Arguments(p): '''Arguments : LROUND RROUND | LROUND ExpressionList RROUND | LROUND Expression RROUND | LROUND Type RROUND | LROUND Type COMMA ExpressionList RROUND | LROUND Type COMMA Expression RROUND ''' # print p.slice parsed.append(p.slice) if len(p) == 3: p[0] = TreeNode('Arguments', 0, 'None') if len(p) == 4: if p[2].name == 'Expression': p[0] = TreeNode('Arguments', 1, 'None', 0, [p[2]], p[2].TAC) if p[2].name == 'ExpressionList': p[0] = p[2] p[0].name = 'Arguments' p[0].data = len(p[2].children) return def p_string_lit(p): '''string_lit : STRING_LIT ''' parsed.append(p.slice) p[0] = TreeNode('string_lit', p[1], 'STRING') return def p_rune_lit(p): '''rune_lit : RUNE_LIT ''' parsed.append(p.slice) p[0] = TreeNode('rune_lit', p[1], 'RUNE') return def p_empty(p): 'empty :' pass # Standard Logger logging.basicConfig( level = logging.DEBUG, filename = "parselog.txt", filemode = "w", format = "%(filename)10s:%(lineno)4d:%(message)s" ) log = logging.getLogger() yacc.yacc(debug=True, debuglog=log) input_file = sys.argv[1] import os if os.path.isfile(input_file) is False: print('Input file ' + input_file + ' does not exist') sys.exit(1) input_code = open(input_file, 'r').read() if input_code[len(input_code)-1] != '\n': input_code += '\n' yacc.parse(input_code, debug=log, tracking=True)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 6738, 2438, 1330, 12200, 19667, 198, 6738, 2438, 1330, 7683, 20231, 10669, 198, 6738, 31191, 263, 1330, 16326, 198, 6738, 4738, 1330, 1635, 198, 6738, 6194, 62, 11487, 1330, 38357, 10962, ...
1.925113
17,533
import argparse import imageio import progressbar from _routines import ffi, lib from pylab import * from random import Random RESOLUTIONS = { "2160p": (3840, 2160), "1440p": (2560, 1440), "1080p": (1920, 1080), "720p": (1280, 720), "480p": (854, 480), "360p": (640, 360), "240p": (426, 240), "160p": (284, 160), "80p": (142, 80), "40p": (71, 40), } if __name__ == '__main__': parser = argparse.ArgumentParser(description='Render audio samples') parser.add_argument('outfile', type=str, help='Output file name') parser.add_argument('--params', type=str, help='Parameter YAML file name') parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution') parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W') parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H') parser.add_argument('--framerate', type=int, help='Video frame rate') parser.add_argument('--video-quality', type=int, help='Video quality factor') parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds') args = parser.parse_args() if not args.framerate: args.framerate = 24 if not args.video_quality: args.video_quality = 10 writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1) # Compute derived parameters if args.resolution: width, height = RESOLUTIONS[args.resolution] if not args.width: args.width = width if not args.height: args.height = height if (not args.width) or (not args.height): raise ValueError("Invalid or missing resolution") if not args.video_duration: raise ValueError("Missing video duration") args.aspect = args.width / args.height args.num_frames = int(args.video_duration * args.framerate) args.dt = 1.0 / args.num_frames do_render(args, writer) writer.close()
[ 11748, 1822, 29572, 198, 11748, 2939, 952, 198, 11748, 4371, 5657, 198, 6738, 4808, 81, 448, 1127, 1330, 277, 12463, 11, 9195, 198, 6738, 279, 2645, 397, 1330, 1635, 198, 6738, 4738, 1330, 14534, 198, 198, 19535, 3535, 3843, 11053, 796,...
2.682519
778
# Copyright (c) 2015 Nicolas JOUANIN # # See the file license.txt for copying permission. import anyio import unittest from hbmqtt.mqtt.subscribe import SubscribePacket, SubscribePayload from hbmqtt.mqtt.packet import PacketIdVariableHeader from hbmqtt.mqtt.constants import QOS_1, QOS_2 from hbmqtt.adapters import BufferAdapter
[ 2, 15069, 357, 66, 8, 1853, 29737, 449, 2606, 1565, 1268, 198, 2, 198, 2, 4091, 262, 2393, 5964, 13, 14116, 329, 23345, 7170, 13, 198, 11748, 597, 952, 198, 11748, 555, 715, 395, 198, 198, 6738, 289, 20475, 80, 926, 13, 76, 80, ...
2.990991
111
import logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=logging.INFO, ) logger = logging.getLogger("Main") import os,random import numpy as np import torch from processing import convert_examples_to_features, read_squad_examples from processing import ChineseFullTokenizer from pytorch_pretrained_bert.my_modeling import BertConfig from optimization import BERTAdam import config from utils import read_and_convert, divide_parameters from modeling import BertForQASimple, BertForQASimpleAdaptorTraining from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer from torch.utils.data import TensorDataset, DataLoader, RandomSampler from functools import partial from train_eval import predict if __name__ == "__main__": main()
[ 11748, 18931, 198, 6404, 2667, 13, 35487, 16934, 7, 198, 220, 220, 220, 5794, 11639, 4, 7, 292, 310, 524, 8, 82, 532, 4064, 7, 5715, 3672, 8, 82, 532, 4064, 7, 3672, 8, 82, 532, 220, 4064, 7, 20500, 8, 82, 3256, 198, 220, 220,...
3.083942
274
# EXPERIMENTAL: all may be removed soon from gym.benchmarks import scoring from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere register_benchmark( id='Atari200M', scorer=scoring.TotalReward(), name='Atari200M', view_group="Atari", description='7 Atari games, with pixel observations', tasks=[ { 'env_id': 'BeamRiderNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 363.9, 'reward_ceiling': 60000.0, }, { 'env_id': 'BreakoutNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 1.7, 'reward_ceiling': 800.0, }, { 'env_id': 'EnduroNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 0.0, 'reward_ceiling': 5000.0, }, { 'env_id': 'PongNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': -20.7, 'reward_ceiling': 21.0, }, { 'env_id': 'QbertNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 163.9, 'reward_ceiling': 40000.0, }, { 'env_id': 'SeaquestNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 68.4, 'reward_ceiling': 100000.0, }, { 'env_id': 'SpaceInvadersNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 148.0, 'reward_ceiling': 30000.0, }, ]) register_benchmark( id='Atari40M', scorer=scoring.TotalReward(), name='Atari40M', view_group="Atari", description='7 Atari games, with pixel observations', tasks=[ { 'env_id': 'BeamRiderNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 363.9, 'reward_ceiling': 60000.0, }, { 'env_id': 'BreakoutNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 1.7, 'reward_ceiling': 800.0, }, { 'env_id': 'EnduroNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 0.0, 'reward_ceiling': 5000.0, }, { 'env_id': 'PongNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': -20.7, 'reward_ceiling': 21.0, }, { 'env_id': 'QbertNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 163.9, 'reward_ceiling': 40000.0, }, { 'env_id': 'SeaquestNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 68.4, 'reward_ceiling': 100000.0, }, { 'env_id': 'SpaceInvadersNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 148.0, 'reward_ceiling': 30000.0, } ]) register_benchmark( id='AtariExploration40M', scorer=scoring.TotalReward(), name='AtariExploration40M', view_group="Atari", description='7 Atari games, with pixel observations', tasks=[ { 'env_id': 'FreewayNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 0.1, 'reward_ceiling': 31.0, }, { 'env_id': 'GravitarNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 245.5, 'reward_ceiling': 1000.0, }, { 'env_id': 'MontezumaRevengeNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 25.0, 'reward_ceiling': 10000.0, }, { 'env_id': 'PitfallNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': -348.8, 'reward_ceiling': 1000.0, }, { 'env_id': 'PrivateEyeNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 662.8, 'reward_ceiling': 100.0, }, { 'env_id': 'SolarisNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 2047.2, 'reward_ceiling': 5000.0, }, { 'env_id': 'VentureNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 18.0, 'reward_ceiling': 100.0, } ]) register_benchmark( id='ClassicControl2-v0', name='ClassicControl2', view_group="Control", description='Simple classic control benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'CartPole-v0', 'trials': 1, 'max_timesteps': 2000, }, {'env_id': 'Pendulum-v0', 'trials': 1, 'max_timesteps': 1000, }, ]) register_benchmark( id='ClassicControl-v0', name='ClassicControl', view_group="Control", description='Simple classic control benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'CartPole-v1', 'trials': 3, 'max_timesteps': 100000, 'reward_floor': 0.0, 'reward_ceiling': 500.0, }, {'env_id': 'Acrobot-v1', 'trials': 3, 'max_timesteps': 100000, 'reward_floor': -500.0, 'reward_ceiling': 0.0, }, {'env_id': 'MountainCar-v0', 'trials': 3, 'max_timesteps': 100000, 'reward_floor': -200.0, 'reward_ceiling': -100.0, }, {'env_id': 'Pendulum-v0', 'trials': 3, 'max_timesteps': 200000, 'reward_floor': -1400.0, 'reward_ceiling': 0.0, }, ]) ### Autogenerated by tinkerbell.benchmark.convert_benchmark.py register_benchmark( id='Mujoco10M-v0', name='Mujoco10M', view_group="Control", description='Mujoco benchmark with 10M steps', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'Ant-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'Hopper-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'Humanoid-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'HumanoidStandup-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'Walker2d-v1', 'trials': 1, 'max_timesteps': 1000000, } ]) register_benchmark( id='Mujoco1M-v0', name='Mujoco1M', view_group="Control", description='Mujoco benchmark with 1M steps', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'HalfCheetah-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': -280.0, 'reward_ceiling': 4000.0, }, {'env_id': 'Hopper-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 16.0, 'reward_ceiling': 4000.0, }, {'env_id': 'InvertedDoublePendulum-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 53.0, 'reward_ceiling': 10000.0, }, {'env_id': 'InvertedPendulum-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 5.6, 'reward_ceiling': 1000.0, }, {'env_id': 'Reacher-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': -43.0, 'reward_ceiling': -0.5, }, {'env_id': 'Swimmer-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 0.23, 'reward_ceiling': 500.0, }, {'env_id': 'Walker2d-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 1.6, 'reward_ceiling': 5500.0, } ]) register_benchmark( id='MinecraftEasy-v0', name='MinecraftEasy', view_group="Minecraft", description='Minecraft easy benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftBasic-v0', 'trials': 2, 'max_timesteps': 600000, 'reward_floor': -2200.0, 'reward_ceiling': 1000.0, }, {'env_id': 'MinecraftDefaultFlat1-v0', 'trials': 2, 'max_timesteps': 2000000, 'reward_floor': -500.0, 'reward_ceiling': 0.0, }, {'env_id': 'MinecraftTrickyArena1-v0', 'trials': 2, 'max_timesteps': 300000, 'reward_floor': -1000.0, 'reward_ceiling': 2800.0, }, {'env_id': 'MinecraftEating1-v0', 'trials': 2, 'max_timesteps': 300000, 'reward_floor': -300.0, 'reward_ceiling': 300.0, }, ]) register_benchmark( id='MinecraftMedium-v0', name='MinecraftMedium', view_group="Minecraft", description='Minecraft medium benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftCliffWalking1-v0', 'trials': 2, 'max_timesteps': 400000, 'reward_floor': -100.0, 'reward_ceiling': 100.0, }, {'env_id': 'MinecraftVertical-v0', 'trials': 2, 'max_timesteps': 900000, 'reward_floor': -1000.0, 'reward_ceiling': 8040.0, }, {'env_id': 'MinecraftMaze1-v0', 'trials': 2, 'max_timesteps': 600000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, {'env_id': 'MinecraftMaze2-v0', 'trials': 2, 'max_timesteps': 2000000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, ]) register_benchmark( id='MinecraftHard-v0', name='MinecraftHard', view_group="Minecraft", description='Minecraft hard benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftObstacles-v0', 'trials': 1, 'max_timesteps': 900000, 'reward_floor': -1000.0, 'reward_ceiling': 2080.0, }, {'env_id': 'MinecraftSimpleRoomMaze-v0', 'trials': 1, 'max_timesteps': 900000, 'reward_floor': -1000.0, 'reward_ceiling': 4160.0, }, {'env_id': 'MinecraftAttic-v0', 'trials': 1, 'max_timesteps': 600000, 'reward_floor': -1000.0, 'reward_ceiling': 1040.0, }, {'env_id': 'MinecraftComplexityUsage-v0', 'trials': 1, 'max_timesteps': 600000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, ]) register_benchmark( id='MinecraftVeryHard-v0', name='MinecraftVeryHard', view_group="Minecraft", description='Minecraft very hard benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftMedium-v0', 'trials': 2, 'max_timesteps': 1800000, 'reward_floor': -10000.0, 'reward_ceiling': 16280.0, }, {'env_id': 'MinecraftHard-v0', 'trials': 2, 'max_timesteps': 2400000, 'reward_floor': -10000.0, 'reward_ceiling': 32640.0, }, ]) register_benchmark( id='MinecraftImpossible-v0', name='MinecraftImpossible', view_group="Minecraft", description='Minecraft impossible benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftDefaultWorld1-v0', 'trials': 2, 'max_timesteps': 6000000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, ]) bandit_tasks = [] for n_arms in [5, 10, 50]: for n_episodes in [10, 100, 500]: bandit_tasks.append({ 'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes), 'trials': 1, 'max_timesteps': 10 ** 9, 'reward_floor': 0, 'reward_ceiling': n_episodes, }) register_benchmark( id='BernoulliBandit-v0', name='BernoulliBandit', description='Multi-armed Bernoulli bandits', scorer=scoring.ClipTo01ThenAverage(num_episodes=1000), tasks=bandit_tasks ) tabular_mdp_tasks = [] for n_states in [10]: for n_actions in [5]: for episode_length in [10]: for n_episodes in [10, 25, 50, 75, 100]: tabular_mdp_tasks.append({ 'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format( s=n_states, a=n_actions, t=episode_length, n=n_episodes, ), 'trials': 1, 'max_timesteps': 10 ** 9, 'reward_floor': 0, 'reward_ceiling': episode_length * n_episodes * 2, }) register_benchmark( id='RandomTabularMDP-v0', name='RandomTabularMDP', description='Random tabular MDPs', scorer=scoring.ClipTo01ThenAverage(num_episodes=1000), tasks=tabular_mdp_tasks )
[ 2, 7788, 18973, 3955, 3525, 1847, 25, 477, 743, 307, 4615, 2582, 198, 198, 6738, 11550, 13, 26968, 14306, 1330, 9689, 198, 6738, 11550, 13, 26968, 14306, 13, 2301, 33397, 1330, 18335, 62, 16684, 11, 7881, 62, 26968, 4102, 11, 20478, 1...
1.764511
7,839
#!/usr/bin/env python3 # Copyright 2020 Benjamin Ehret # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # title :data/timeseries/preprocess_audioset.py # author :be # contact :behret@ethz.ch # created :31/03/2020 # version :1.0 # python_version :3.7 """ Script to structure the audioset dataset, which can then be used via :class:`data.timeseries.audioset_data.AudiosetData`. The result of this script is available at https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0 If you want to recreate or modify this dataset, download the audioset data from https://research.google.com/audioset/download.html and extract the tar.gz into the following folder: ``datasets/sequential/audioset/audioset_download``. Subsequently executing this script will create a pickle file containing the 100 class subset of audioset used in this study. The dataset is stored in tensorflow files. Since we work with pytorch and there is no utility to read tensorflow files, we extract the data and safe them as numpy arrays in a pickle file. Furthermore the data are preprocessed to fit our continual learning experiments. The original dataset provides three subsets with different compositions of samples and classes. Since we only work with a subset of classes and samples, we load all available data and then filter and structure them according to our criteria. We use the same criteria as Kemker et al. Classes and samples are restricted in the following way: Classes: - no restriction according to ontology file (parsed from ontology.json) - no parent / child relationship (parsed from ontology.json) - confidence level > 70% (data was copied from website into txt file) - number of samples: we only take classes that have more samples than a certain threshold Samples: - since samples can have multiple labels, we only use samples which only belong to one of the classes we use - we exclude samples that don't have the full length of 10 seconds The chosen classes and samples are then split into train and test data and saved to a pickle file. """ import numpy as np import pickle import tensorflow as tf import os import json from warnings import warn warn('The script was created for one time usage and has to be adapted when ' + 'reusing it. All paths specified here are absolute.') # Tensorflow eager mode needs to be enabled for dataset mapping to work! tf.enable_eager_execution() # Set paths and parameters data_dir = '../../datasets/sequential/audioset/' download_dir = os.path.join(data_dir,'audioset_download') fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv') fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv') fpath_ontology = os.path.join(data_dir, 'ontology.json') target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle') n_classes = 100 n_sample = 1000 test_frac = 0.20 ### Load data by serializing files and applying decode function. def decode(serialized_example): """Decode data from TFRecord files. Args: serialized_example: serialized_example as created by tf.data.TFRecordDataset Returns: (tuple): Tuple containing: - **audio** (numpy.ndarray): Array of shape (10,128) representing one sample with 10 timesteps and 128 features - **label** (numpy.ndarray): Array of shape (1,) containing the class of the corresponding sample """ sequence_features = { 'audio_embedding': tf.FixedLenSequenceFeature([], tf.string), } context_features = { 'start_time_seconds': tf.FixedLenFeature([], tf.float32), 'labels': tf.VarLenFeature(dtype=tf.int64), } context_parsed, sequence_parsed = tf.parse_single_sequence_example( serialized_example, sequence_features=sequence_features, context_features=context_features ) audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8) label = tf.cast(context_parsed['labels'], tf.int64) return audio, label # Apply decode function to all dataset entries using map function. # Take files from all three data sets since we repartition anyway. fpaths = [] for path, subdirs, files in os.walk(download_dir): for name in files: if 'tfrecord' in name: fpaths.append(os.path.join(path, name)) # Create dataset and decode dataset = tf.data.TFRecordDataset(fpaths) dataset = dataset.map(decode) # Extract data to lists x = [] y = [] for d in dataset: x.append(d[0].numpy()) y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy()) ### Filter classes as described above. # Parse confidence values conf_data = {} with open(fpath_conf_data) as f: for line in f: tokens = line.split() # parse confidence c = 0 for t in tokens: if t.find('%') is not -1: c = int(t[:-1]) # parse class name n = '' for t in tokens: if t.find('%') == -1 and t != '-': if n == '': n = t else: n = n+' '+t else: break conf_data.update({n:c}) # Parse class numbers from label csv file l = -1 csv_data = {} with open(fpath_label_inds) as f: for line in f: if l == -1: l += 1 continue tokens = line.split('"') n = tokens[1] csv_data.update({n:l}) l +=1 # Parse ontology info from json file with open(fpath_ontology, 'r') as f: json_data = json.load(f) # Put all data into a single list. all_data = [] for j in json_data: if j['name'] in conf_data.keys(): class_info = { 'name' : j['name'], 'restricted' : j['restrictions'] != [], 'has_child' : j['child_ids'] != [], 'conf' : conf_data[j['name']], 'id' : csv_data[j['name']] } all_data.append(class_info) # Filter classes classes = [] for c in all_data: if not c['restricted'] and not c['has_child'] and c['conf'] >= 70: classes.append(c['id']) ### Filter the samples. # Find samples that belong to only one of the potential classes. # We also exclude some samples that don't have data for the full 10 seconds. # First discard labels that are not in the set of potential classes y_fil = [] for i in range(len(y)): y_fil.append( np.intersect1d(y[i],classes)) # Find samples with one label n_labels = np.asarray([len(y) for y in y_fil]) single_label_idx = np.where(n_labels == 1)[0] # Find samples that are shorter than 10 seconds (to be excluded) too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0] # Construct the set of valid samples valid_idx = np.setdiff1d(single_label_idx,too_short) # Count number of valid samples for potential classes y_single = np.asarray([y_fil[i][0] for i in valid_idx]) num_samples = [len(np.where(y_single == i)[0]) for i in classes] # Take the n classes with the highest number of samples n_sample_cutoff = np.sort(num_samples)[-n_classes] class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0] our_classes = [classes[i] for i in class_idx] ### Filter the data again according the the chosen classes y_fil = [] for i in range(len(y)): y_fil.append( np.intersect1d(y[i],our_classes)) # Find samples that belong to only one of the potential classes n_labels = np.asarray([len(y) for y in y_fil]) single_label_idx = np.where(n_labels == 1)[0] # Find samples that dont are shorter than 10 seconds too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0] # Construct the set of valid samples valid_idx = np.setdiff1d(single_label_idx,too_short) # Restructure data and relabel the classes to be between 0 and n_classes y_data = [y_fil[i][0] for i in valid_idx] y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data] y_data = np.asarray(y_data) x_data = [x[i] for i in valid_idx] x_data = np.stack(x_data) ### Split into test and train and restrict the number of samples per class np.random.seed(42) n_train = int(n_sample * (1-test_frac)) n_test = int(n_sample * test_frac) train_ind = [] test_ind = [] for i in range(n_classes): sample_idx = np.where(y_data == i)[0] n_sample_class = len(sample_idx) rand_idx = np.arange(n_sample_class) np.random.shuffle(rand_idx) train_ind.extend(sample_idx[rand_idx[0:n_train]]) test_ind.extend(sample_idx[rand_idx[n_train:n_sample]]) train_ind = np.asarray(train_ind) test_ind = np.asarray(test_ind) sub_sample_idx = np.hstack((train_ind,test_ind)) x_data_sub = x_data[sub_sample_idx,:,:] y_data_sub = y_data[sub_sample_idx] train_ind = np.arange(0,len(train_ind)) test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind)) ### Save data with open(target_path, 'wb') as f: pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 12131, 14533, 31480, 1186, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, ...
2.600327
3,668
from django.conf import settings from django.core import serializers from django.utils import timezone import requests from Posts.commentModel import Comments #from Posts.commentView import add_Comment from rest_framework import status from rest_framework.decorators import api_view, authentication_classes, permission_classes from rest_framework.response import Response from django.shortcuts import HttpResponse, render from requests import get from .serializers import CommentSerializer, PostSerializer from Author.serializers import LikeSerializer from Author.models import Like from Author.views import updateForeignAuthors, GetForeignAuthors from .models import Post, Author from .form import PostForm from Posts.commentForm import CommentForm import json import uuid import re import base64 from django.db.models import Q import django.core from permissions import CustomAuthentication, AccessPermission from django.core.paginator import Paginator import traceback
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 7295, 1330, 11389, 11341, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198, 11748, 7007, 198, 6738, 12043, 13, 23893, 17633, 1330, 19502, 198, 2, 6738, 12043...
4.061728
243
import pytest import sys, os import xarray as xr import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import process from process._common import ProcessArgumentInvalid, ProcessArgumentRequired ################################### # tests: ################################### def test_with_xarray_out_bounds(execute_array_element_process, generate_data): """ Test array_element process with xarray.DataArrays with out of bounds index """ with pytest.raises(ProcessArgumentInvalid) as ex: result = execute_array_element_process(index=5) assert ex.value.args[0] == "The argument 'index' in process 'array_element' is invalid: Index out of bounds."
[ 11748, 12972, 9288, 198, 11748, 25064, 11, 28686, 198, 11748, 2124, 18747, 355, 2124, 81, 198, 11748, 299, 32152, 355, 45941, 198, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 15908, 3672, ...
3.235556
225
#!/usr/bin/env python # # Copyright 2016 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Generate Android.bp for Skia from GN configuration. import json import os import pprint import string import subprocess import tempfile import gn_to_bp_utils # First we start off with a template for Android.bp, # with holes for source lists and include directories. bp = string.Template('''// This file is autogenerated by gn_to_bp.py. cc_library_static { name: "libskia", cflags: [ $cflags ], cppflags:[ $cflags_cc ], export_include_dirs: [ $export_includes ], local_include_dirs: [ $local_includes ], srcs: [ $srcs ], arch: { arm: { srcs: [ $arm_srcs ], neon: { srcs: [ $arm_neon_srcs ], }, }, arm64: { srcs: [ $arm64_srcs ], }, mips: { srcs: [ $none_srcs ], }, mips64: { srcs: [ $none_srcs ], }, x86: { srcs: [ $x86_srcs ], cflags: [ // Clang seems to think new/malloc will only be 4-byte aligned // on x86 Android. We're pretty sure it's actually 8-byte // alignment. tests/OverAlignedTest.cpp has more information, // and should fail if we're wrong. "-Wno-over-aligned" ], }, x86_64: { srcs: [ $x86_srcs ], }, }, defaults: ["skia_deps", "skia_pgo", ], } // Build libskia with PGO by default. // Location of PGO profile data is defined in build/soong/cc/pgo.go // and is separate from skia. // To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable // or set enable_profile_use property to false. cc_defaults { name: "skia_pgo", pgo: { instrumentation: true, profile_file: "hwui/hwui.profdata", benchmarks: ["hwui", "skia"], enable_profile_use: true, }, } // "defaults" property to disable profile use for Skia tools and benchmarks. cc_defaults { name: "skia_pgo_no_profile_use", defaults: [ "skia_pgo", ], pgo: { enable_profile_use: false, }, } cc_defaults { name: "skia_deps", shared_libs: [ "libEGL", "libGLESv2", "libdng_sdk", "libexpat", "libft2", "libheif", "libicui18n", "libicuuc", "libjpeg", "liblog", "libpiex", "libpng", "libvulkan", "libz", "libcutils", "libnativewindow", ], static_libs: [ "libarect", "libsfntly", "libwebp-decode", "libwebp-encode", ], group_static_libs: true, } cc_defaults { name: "skia_tool_deps", defaults: [ "skia_deps", "skia_pgo_no_profile_use" ], static_libs: [ "libjsoncpp", "libskia", ], cflags: [ "-Wno-unused-parameter", "-Wno-unused-variable", ], } cc_test { name: "skia_dm", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $dm_includes ], srcs: [ $dm_srcs ], shared_libs: [ "libbinder", "libutils", ], } cc_test { name: "skia_nanobench", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $nanobench_includes ], srcs: [ $nanobench_srcs ], data: [ "resources/*", ], }''') # We'll run GN to get the main source lists and include directories for Skia. gn_args = { 'is_official_build': 'true', 'skia_enable_tools': 'true', 'skia_enable_skottie': 'false', # requires rapidjson third-party 'skia_use_libheif': 'true', 'skia_use_vulkan': 'true', 'target_cpu': '"none"', 'target_os': '"android"', 'skia_vulkan_header': '"Skia_Vulkan_Android.h"', } js = gn_to_bp_utils.GenerateJSONFromGN(gn_args) srcs = strip_slashes(js['targets']['//:skia']['sources']) cflags = strip_slashes(js['targets']['//:skia']['cflags']) cflags_cc = strip_slashes(js['targets']['//:skia']['cflags_cc']) local_includes = strip_slashes(js['targets']['//:skia']['include_dirs']) export_includes = strip_slashes(js['targets']['//:public']['include_dirs']) defines = [str(d) for d in js['targets']['//:skia']['defines']] dm_srcs = strip_slashes(js['targets']['//:dm']['sources']) dm_includes = strip_slashes(js['targets']['//:dm']['include_dirs']) nanobench_target = js['targets']['//:nanobench'] nanobench_srcs = strip_slashes(nanobench_target['sources']) nanobench_includes = strip_slashes(nanobench_target['include_dirs']) gn_to_bp_utils.GrabDependentValues(js, '//:skia', 'sources', srcs, None) gn_to_bp_utils.GrabDependentValues(js, '//:dm', 'sources', dm_srcs, 'skia') gn_to_bp_utils.GrabDependentValues(js, '//:nanobench', 'sources', nanobench_srcs, 'skia') # skcms is a little special, kind of a second-party library. srcs .add("third_party/skcms/skcms.c") local_includes.add("third_party/skcms") dm_includes .add("third_party/skcms") # No need to list headers. srcs = {s for s in srcs if not s.endswith('.h')} dm_srcs = {s for s in dm_srcs if not s.endswith('.h')} nanobench_srcs = {s for s in nanobench_srcs if not s.endswith('.h')} cflags = gn_to_bp_utils.CleanupCFlags(cflags) cflags_cc = gn_to_bp_utils.CleanupCCFlags(cflags_cc) # We need to add the include path to the vulkan defines and header file set in # then skia_vulkan_header gn arg that is used for framework builds. local_includes.add("platform_tools/android/vulkan") export_includes.add("platform_tools/android/vulkan") here = os.path.dirname(__file__) defs = gn_to_bp_utils.GetArchSources(os.path.join(here, 'opts.gni')) gn_to_bp_utils.WriteUserConfig('include/config/SkUserConfig.h', defines) # Turn a list of strings into the style bpfmt outputs. # OK! We have everything to fill in Android.bp... with open('Android.bp', 'w') as f: print >>f, bp.substitute({ 'export_includes': bpfmt(8, export_includes), 'local_includes': bpfmt(8, local_includes), 'srcs': bpfmt(8, srcs), 'cflags': bpfmt(8, cflags, False), 'cflags_cc': bpfmt(8, cflags_cc), 'arm_srcs': bpfmt(16, defs['armv7']), 'arm_neon_srcs': bpfmt(20, defs['neon']), 'arm64_srcs': bpfmt(16, defs['arm64'] + defs['crc32']), 'none_srcs': bpfmt(16, defs['none']), 'x86_srcs': bpfmt(16, defs['sse2'] + defs['ssse3'] + defs['sse41'] + defs['sse42'] + defs['avx' ] + defs['hsw' ]), 'dm_includes' : bpfmt(8, dm_includes), 'dm_srcs' : bpfmt(8, dm_srcs), 'nanobench_includes' : bpfmt(8, nanobench_includes), 'nanobench_srcs' : bpfmt(8, nanobench_srcs), })
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 1584, 3012, 3457, 13, 198, 2, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, ...
1.904896
3,901
"""The Ray autoscaler uses tags/labels to associate metadata with instances.""" # Tag for the name of the node TAG_RAY_NODE_NAME = "ray-node-name" # Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag # value says 'type' instead of 'kind'. TAG_RAY_NODE_KIND = "ray-node-type" NODE_KIND_HEAD = "head" NODE_KIND_WORKER = "worker" NODE_KIND_UNMANAGED = "unmanaged" # Tag for user defined node types (e.g., m4xl_spot). This is used for multi # node type clusters. TAG_RAY_USER_NODE_TYPE = "ray-user-node-type" # Tag for autofilled node types for legacy cluster yamls without multi # node type defined in the cluster configs. NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type" NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type" # Tag that reports the current state of the node (e.g. Updating, Up-to-date) TAG_RAY_NODE_STATUS = "ray-node-status" STATUS_UNINITIALIZED = "uninitialized" STATUS_WAITING_FOR_SSH = "waiting-for-ssh" STATUS_SYNCING_FILES = "syncing-files" STATUS_SETTING_UP = "setting-up" STATUS_UPDATE_FAILED = "update-failed" STATUS_UP_TO_DATE = "up-to-date" # Tag uniquely identifying all nodes of a cluster TAG_RAY_CLUSTER_NAME = "ray-cluster-name" # Hash of the node launch config, used to identify out-of-date nodes TAG_RAY_LAUNCH_CONFIG = "ray-launch-config" # Hash of the node runtime config, used to determine if updates are needed TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config" # Hash of the contents of the directories specified by the file_mounts config # if the node is a worker, this also hashes content of the directories # specified by the cluster_synced_files config TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
[ 37811, 464, 7760, 44619, 9948, 263, 3544, 15940, 14, 23912, 1424, 284, 11602, 20150, 351, 10245, 526, 15931, 198, 198, 2, 17467, 329, 262, 1438, 286, 262, 10139, 198, 42197, 62, 30631, 62, 45, 16820, 62, 20608, 796, 366, 2433, 12, 174...
2.858844
588
import unittest from worldengine.plates import Step, center_land, world_gen from worldengine.world import World from tests.draw_test import TestBase if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 995, 18392, 13, 17041, 1330, 5012, 11, 3641, 62, 1044, 11, 995, 62, 5235, 198, 6738, 995, 18392, 13, 6894, 1330, 2159, 198, 198, 6738, 5254, 13, 19334, 62, 9288, 1330, 6208, 14881, 628, 198, 198, 361...
3.174603
63
import pytest import torch from mmedit.models.builder import build_component from mmedit.models.components.discriminators.light_cnn import MaxFeature
[ 11748, 12972, 9288, 198, 11748, 28034, 198, 198, 6738, 285, 1150, 270, 13, 27530, 13, 38272, 1330, 1382, 62, 42895, 198, 6738, 285, 1150, 270, 13, 27530, 13, 5589, 3906, 13, 15410, 3036, 47721, 13, 2971, 62, 66, 20471, 1330, 5436, 388...
3.477273
44
import json import logging from typing import Iterable from kafka import KafkaConsumer log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) # I've used this example: # https://github.com/aiven/aiven-examples/blob/master/kafka/python/consumer_example.py # as well as Aiven Kafka tutorials
[ 11748, 33918, 198, 11748, 18931, 198, 198, 6738, 19720, 1330, 40806, 540, 198, 198, 6738, 479, 1878, 4914, 1330, 46906, 49106, 628, 198, 6404, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 198, 6404, 13, 2860, 25060, 7, 640...
3.161616
99
from tensorflow.keras import * import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, Sequential,regularizers from tensorflow.keras.layers import Dropout # from tensorflow.keras import * # 3x3kernel_initializer='he_normal','glorot_normal' from tensorflow.python.keras.layers import Concatenate ############################### ############################### ############################### ############################### ###################################### def build_resblock(self, filter_num, blocks, stride=1): res_blocks = Sequential() # may down sample res_blocks.add(BasicBlock(filter_num, stride)) for _ in range(1, blocks): res_blocks.add(BasicBlock(filter_num, stride=1)) return res_blocks ###################################### ########################### pp2 ######################################## def network_up(input_layer_up,filters_num,dropout_rate,Block_res): # input_layer = Input(input_shape) # conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8 # conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8 conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3), padding='same', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) #kernel_initializer='he_normal', # conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1) # conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1) conv1_bn = layers.BatchNormalization()(conv1) conv1_relu = layers.Activation('relu')(conv1_bn) # conv1_relu = Dropout(0.5)(conv1_relu) # conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu) # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv1_relu) # filters_num = 16 conv2_bn = layers.BatchNormalization()(conv2) conv2_relu = layers.Activation('relu')(conv2_bn) # conv2_relu = Dropout(0.5)(conv2_relu) # conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu) conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv2_relu) # filters_num = 32 conv3_bn = layers.BatchNormalization()(conv3) conv3_relu = layers.Activation('relu')(conv3_bn) # conv3_relu = Dropout(0.5)(conv3_relu) # conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu) conv3_relu_reshape = layers.Reshape((conv3_relu.shape[1],conv3_relu.shape[2],conv3_relu.shape[3]*conv3_relu.shape[4]))(conv3_relu) conv3_relu_reshape = Dropout(0.5)(conv3_relu_reshape) ########################################### # conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same', # kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # conv11_bn = layers.BatchNormalization()(conv11) # conv11_relu = layers.Activation('relu')(conv11_bn) # # # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16 # conv22_bn = layers.BatchNormalization()(conv22) # conv22_relu = layers.Activation('relu')(conv22_bn) # # conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32 # conv33_bn = layers.BatchNormalization()(conv33) # conv33_relu = layers.Activation('relu')(conv33_bn) # # conv33_relu_reshape = layers.Reshape( # (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu) #################################################### # conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same', # kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # conv111_bn = layers.BatchNormalization()(conv111) # conv111_relu = layers.Activation('relu')(conv111_bn) # # # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16 # conv222_bn = layers.BatchNormalization()(conv222) # conv222_relu = layers.Activation('relu')(conv222_bn) # # conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32 # conv333_bn = layers.BatchNormalization()(conv333) # conv333_relu = layers.Activation('relu')(conv333_bn) # # conv333_relu_reshape = layers.Reshape( # (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu) #################concatenate######################## # conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape]) ######################################### conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv3_relu_reshape) # filters_num = 64 conv4_bn = layers.BatchNormalization()(conv4) conv4_relu = layers.Activation('relu')(conv4_bn) # conv4_relu = Dropout(0.5)(conv4_relu) # conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu) # conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu) conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv4_relu) # filters_num = ** conv5_bn = layers.BatchNormalization()(conv5) conv5_relu = layers.Activation('relu')(conv5_bn) # conv5_relu = Dropout(0.5)(conv5_relu) # conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu) # conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu) # conv5_dpout = layers.Dropout(dropout_rate)(conv5) # conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout) outputs2,outputs4 = Block_res(conv5_relu) return conv5,outputs2,outputs4 # layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64 # layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128 # layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256 # layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
[ 201, 198, 6738, 11192, 273, 11125, 13, 6122, 292, 1330, 1635, 201, 198, 11748, 220, 11192, 273, 11125, 355, 48700, 201, 198, 6738, 220, 220, 220, 11192, 273, 11125, 1330, 41927, 292, 201, 198, 6738, 220, 220, 220, 11192, 273, 11125, 1...
2.277826
3,441
""" A universal module with functions / classes without dependencies. """ import sys import contextlib import functools import re import os from medi._compatibility import reraise _sep = os.path.sep if os.path.altsep is not None: _sep += os.path.altsep _path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep def unite(iterable): """Turns a two dimensional array into a one dimensional.""" return set(typ for types in iterable for typ in types) def reraise_uncaught(func): """ Re-throw uncaught `AttributeError`. Usage: Put ``@rethrow_uncaught`` in front of the function which does **not** suppose to raise `AttributeError`. AttributeError is easily get caught by `hasattr` and another ``except AttributeError`` clause. This becomes problem when you use a lot of "dynamic" attributes (e.g., using ``@property``) because you can't distinguish if the property does not exist for real or some code inside of the "dynamic" attribute through that error. In a well written code, such error should not exist but getting there is very difficult. This decorator is to help us getting there by changing `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. This helps us noticing bugs earlier and facilitates debugging. .. note:: Treating StopIteration here is easy. Add that feature when needed. """ return wrapper
[ 37811, 317, 10112, 8265, 351, 5499, 1220, 6097, 1231, 20086, 13, 37227, 198, 11748, 25064, 198, 11748, 4732, 8019, 198, 11748, 1257, 310, 10141, 198, 11748, 302, 198, 11748, 28686, 198, 198, 6738, 16957, 13557, 5589, 25901, 1330, 302, 402...
3.202614
459
"""Bioconductor run git transition code. This module assembles the classes for the SVN --> Git transition can be run in a sequential manner. It runs the following aspects fo the Bioconductor transition. Note: Update the SVN dump 1. Run Bioconductor Software package transition 2. Run Bioconductor Experiment Data package transition 3. Run Workflow package transition 4. Run Manifest file transition 5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on software packages Manual tasks which need to be done: 1. Copy over bare repos to repositories/packages 2. Copy manifest bare git repo to repositories/admin """ import src.run_transition as rt import src.svn_dump_update as sdu import logging import time logging.basicConfig(filename='transition.log', format='%(levelname)s %(asctime)s %(message)s', level=logging.DEBUG) if __name__ == '__main__': start_time = time.time() config_file = "./settings.ini" svn_dump_update(config_file) run(config_file) # TODO: Run updates after dump update svn_dump_update(config_file) rt.run_updates(config_file) logging.info("--- %s seconds ---" % (time.time() - start_time))
[ 37811, 23286, 420, 40990, 1057, 17606, 6801, 2438, 13, 198, 198, 1212, 8265, 11156, 829, 262, 6097, 329, 262, 20546, 45, 14610, 15151, 6801, 198, 5171, 307, 1057, 287, 257, 35582, 5642, 13, 198, 198, 1026, 4539, 262, 1708, 7612, 11511, ...
3.015038
399
from __future__ import with_statement from .. import Lock, NeedRegenerationException from ..util import NameRegistry from . import exception from ..util import PluginLoader, memoized_property, coerce_string_conf from .util import function_key_generator, function_multi_key_generator from .api import NO_VALUE, CachedValue from .proxy import ProxyBackend from ..util import compat import time import datetime from numbers import Number from functools import wraps import threading _backend_loader = PluginLoader("dogpile.cache") register_backend = _backend_loader.register from . import backends # noqa value_version = 1 """An integer placed in the :class:`.CachedValue` so that new versions of dogpile.cache can detect cached values from a previous, backwards-incompatible version. """ def _unexpired_value_fn(self, expiration_time, ignore_expiration): if ignore_expiration: return lambda value: value else: if expiration_time is None: expiration_time = self.expiration_time current_time = time.time() return value_fn def get_multi(self, keys, expiration_time=None, ignore_expiration=False): """Return multiple values from the cache, based on the given keys. Returns values as a list matching the keys given. E.g.:: values = region.get_multi(["one", "two", "three"]) To convert values to a dictionary, use ``zip()``:: keys = ["one", "two", "three"] values = region.get_multi(keys) dictionary = dict(zip(keys, values)) Keys which aren't present in the list are returned as the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionadded:: 0.5.0 """ if not keys: return [] if self.key_mangler: keys = list(map(lambda key: self.key_mangler(key), keys)) backend_values = self.backend.get_multi(keys) _unexpired_value_fn = self._unexpired_value_fn( expiration_time, ignore_expiration) return [ value.payload if value is not NO_VALUE else value for value in ( _unexpired_value_fn(value) for value in backend_values ) ] def get_or_create( self, key, creator, expiration_time=None, should_cache_fn=None): """Return a cached value based on the given key. If the value does not exist or is considered to be expired based on its creation time, the given creation function may or may not be used to recreate the value and persist the newly generated value in the cache. Whether or not the function is used depends on if the *dogpile lock* can be acquired or not. If it can't, it means a different thread or process is already running a creation function for this key against the cache. When the dogpile lock cannot be acquired, the method will block if no previous value is available, until the lock is released and a new value available. If a previous value is available, that value is returned immediately without blocking. If the :meth:`.invalidate` method has been called, and the retrieved value's timestamp is older than the invalidation timestamp, the value is unconditionally prevented from being returned. The method will attempt to acquire the dogpile lock to generate a new value, or will wait until the lock is released to return the new value. .. versionchanged:: 0.3.0 The value is unconditionally regenerated if the creation time is older than the last call to :meth:`.invalidate`. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param creator: function which creates a new value. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive the value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. E.g.:: def dont_cache_none(value): return value is not None value = region.get_or_create("some key", create_value, should_cache_fn=dont_cache_none) Above, the function returns the value of create_value() if the cache is invalid, however if the return value is None, it won't be cached. .. versionadded:: 0.4.3 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` - applies :meth:`.get_or_create` to any function using a decorator. :meth:`.CacheRegion.get_or_create_multi` - multiple key/value version """ orig_key = key if self.key_mangler: key = self.key_mangler(key) if expiration_time is None: expiration_time = self.expiration_time if (expiration_time is None and self.region_invalidator.was_soft_invalidated()): raise exception.DogpileCacheException( "Non-None expiration time required " "for soft invalidation") if expiration_time == -1: expiration_time = None if self.async_creation_runner: else: async_creator = None with Lock( self._mutex(key), gen_value, get_value, expiration_time, async_creator) as value: return value def get_or_create_multi( self, keys, creator, expiration_time=None, should_cache_fn=None): """Return a sequence of cached values based on a sequence of keys. The behavior for generation of values based on keys corresponds to that of :meth:`.Region.get_or_create`, with the exception that the ``creator()`` function may be asked to generate any subset of the given keys. The list of keys to be generated is passed to ``creator()``, and ``creator()`` should return the generated values as a sequence corresponding to the order of the keys. The method uses the same approach as :meth:`.Region.get_multi` and :meth:`.Region.set_multi` to get and set values from the backend. If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend` that modifies values, take note this function invokes ``.set_multi()`` for newly generated values using the same values it returns to the calling function. A correct implementation of ``.set_multi()`` will not modify values in-place on the submitted ``mapping`` dict. :param keys: Sequence of keys to be retrieved. :param creator: function which accepts a sequence of keys and returns a sequence of new values. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive each value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. .. versionadded:: 0.5.0 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """ if expiration_time is None: expiration_time = self.expiration_time if (expiration_time is None and self.region_invalidator.was_soft_invalidated()): raise exception.DogpileCacheException( "Non-None expiration time required " "for soft invalidation") if expiration_time == -1: expiration_time = None mutexes = {} sorted_unique_keys = sorted(set(keys)) if self.key_mangler: mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys] else: mangled_keys = sorted_unique_keys orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys)) values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys))) for orig_key, mangled_key in orig_to_mangled.items(): with Lock( self._mutex(mangled_key), gen_value, lambda: get_value(mangled_key), expiration_time, async_creator=lambda mutex: async_creator(orig_key, mutex) ): pass try: if mutexes: # sort the keys, the idea is to prevent deadlocks. # though haven't been able to simulate one anyway. keys_to_get = sorted(mutexes) new_values = creator(*keys_to_get) values_w_created = dict( (orig_to_mangled[k], self._value(v)) for k, v in zip(keys_to_get, new_values) ) if not should_cache_fn: self.backend.set_multi(values_w_created) else: self.backend.set_multi(dict( (k, v) for k, v in values_w_created.items() if should_cache_fn(v[0]) )) values.update(values_w_created) return [values[orig_to_mangled[k]].payload for k in keys] finally: for mutex in mutexes.values(): mutex.release() def _value(self, value): """Return a :class:`.CachedValue` given a value.""" return CachedValue( value, { "ct": time.time(), "v": value_version }) def set(self, key, value): """Place a new value in the cache under the given key.""" if self.key_mangler: key = self.key_mangler(key) self.backend.set(key, self._value(value)) def set_multi(self, mapping): """Place new values in the cache under the given keys. .. versionadded:: 0.5.0 """ if not mapping: return if self.key_mangler: mapping = dict(( self.key_mangler(k), self._value(v)) for k, v in mapping.items()) else: mapping = dict((k, self._value(v)) for k, v in mapping.items()) self.backend.set_multi(mapping) def delete(self, key): """Remove a value from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) """ if self.key_mangler: key = self.key_mangler(key) self.backend.delete(key) def delete_multi(self, keys): """Remove multiple values from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) .. versionadded:: 0.5.0 """ if self.key_mangler: keys = list(map(lambda key: self.key_mangler(key), keys)) self.backend.delete_multi(keys) def cache_on_arguments( self, namespace=None, expiration_time=None, should_cache_fn=None, to_str=compat.string_type, function_key_generator=None): """A function decorator that will cache the return value of the function using a key derived from the function itself and its arguments. The decorator internally makes use of the :meth:`.CacheRegion.get_or_create` method to access the cache and conditionally call the function. See that method for additional behavioral details. E.g.:: @someregion.cache_on_arguments() def generate_something(x, y): return somedatabase.query(x, y) The decorated function can then be called normally, where data will be pulled from the cache region unless a new value is needed:: result = generate_something(5, 6) The function is also given an attribute ``invalidate()``, which provides for invalidation of the value. Pass to ``invalidate()`` the same arguments you'd pass to the function itself to represent a particular value:: generate_something.invalidate(5, 6) Another attribute ``set()`` is added to provide extra caching possibilities relative to the function. This is a convenience method for :meth:`.CacheRegion.set` which will store a given value directly without calling the decorated function. The value to be cached is passed as the first argument, and the arguments which would normally be passed to the function should follow:: generate_something.set(3, 5, 6) The above example is equivalent to calling ``generate_something(5, 6)``, if the function were to produce the value ``3`` as the value to be cached. .. versionadded:: 0.4.1 Added ``set()`` method to decorated function. Similar to ``set()`` is ``refresh()``. This attribute will invoke the decorated function and populate a new value into the cache with the new value, as well as returning that value:: newvalue = generate_something.refresh(5, 6) .. versionadded:: 0.5.0 Added ``refresh()`` method to decorated function. Lastly, the ``get()`` method returns either the value cached for the given key, or the token ``NO_VALUE`` if no such key exists:: value = generate_something.get(5, 6) .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. The default key generation will use the name of the function, the module name for the function, the arguments passed, as well as an optional "namespace" parameter in order to generate a cache key. Given a function ``one`` inside the module ``myapp.tools``:: @region.cache_on_arguments(namespace="foo") def one(a, b): return a + b Above, calling ``one(3, 4)`` will produce a cache key as follows:: myapp.tools:one|foo|3 4 The key generator will ignore an initial argument of ``self`` or ``cls``, making the decorator suitable (with caveats) for use with instance or class methods. Given the example:: class MyClass(object): @region.cache_on_arguments(namespace="foo") def one(self, a, b): return a + b The cache key above for ``MyClass().one(3, 4)`` will again produce the same cache key of ``myapp.tools:one|foo|3 4`` - the name ``self`` is skipped. The ``namespace`` parameter is optional, and is used normally to disambiguate two functions of the same name within the same module, as can occur when decorating instance or class methods as below:: class MyClass(object): @region.cache_on_arguments(namespace='MC') def somemethod(self, x, y): "" class MyOtherClass(object): @region.cache_on_arguments(namespace='MOC') def somemethod(self, x, y): "" Above, the ``namespace`` parameter disambiguates between ``somemethod`` on ``MyClass`` and ``MyOtherClass``. Python class declaration mechanics otherwise prevent the decorator from having awareness of the ``MyClass`` and ``MyOtherClass`` names, as the function is received by the decorator before it becomes an instance method. The function key generation can be entirely replaced on a per-region basis using the ``function_key_generator`` argument present on :func:`.make_region` and :class:`.CacheRegion`. If defaults to :func:`.function_key_generator`. :param namespace: optional string argument which will be established as part of the cache key. This may be needed to disambiguate functions of the same name within the same source file, such as those associated with classes - note that the decorator itself can't see the parent class on a function as the class is being declared. :param expiration_time: if not None, will override the normal expiration time. May be specified as a callable, taking no arguments, that returns a value to be used as the ``expiration_time``. This callable will be called whenever the decorated function itself is called, in caching or retrieving. Thus, this can be used to determine a *dynamic* expiration time for the cached function result. Example use cases include "cache the result until the end of the day, week or time period" and "cache until a certain date or time passes". .. versionchanged:: 0.5.0 ``expiration_time`` may be passed as a callable to :meth:`.CacheRegion.cache_on_arguments`. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`. .. versionadded:: 0.4.3 :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_key_generator: a function that will produce a "cache key". This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """ expiration_time_is_callable = compat.callable(expiration_time) if function_key_generator is None: function_key_generator = self.function_key_generator return decorator def cache_multi_on_arguments( self, namespace=None, expiration_time=None, should_cache_fn=None, asdict=False, to_str=compat.string_type, function_multi_key_generator=None): """A function decorator that will cache multiple return values from the function using a sequence of keys derived from the function itself and the arguments passed to it. This method is the "multiple key" analogue to the :meth:`.CacheRegion.cache_on_arguments` method. Example:: @someregion.cache_multi_on_arguments() def generate_something(*keys): return [ somedatabase.query(key) for key in keys ] The decorated function can be called normally. The decorator will produce a list of cache keys using a mechanism similar to that of :meth:`.CacheRegion.cache_on_arguments`, combining the name of the function with the optional namespace and with the string form of each key. It will then consult the cache using the same mechanism as that of :meth:`.CacheRegion.get_multi` to retrieve all current values; the originally passed keys corresponding to those values which aren't generated or need regeneration will be assembled into a new argument list, and the decorated function is then called with that subset of arguments. The returned result is a list:: result = generate_something("key1", "key2", "key3") The decorator internally makes use of the :meth:`.CacheRegion.get_or_create_multi` method to access the cache and conditionally call the function. See that method for additional behavioral details. Unlike the :meth:`.CacheRegion.cache_on_arguments` method, :meth:`.CacheRegion.cache_multi_on_arguments` works only with a single function signature, one which takes a simple list of keys as arguments. Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function is also provided with a ``set()`` method, which here accepts a mapping of keys and values to set in the cache:: generate_something.set({"k1": "value1", "k2": "value2", "k3": "value3"}) ...an ``invalidate()`` method, which has the effect of deleting the given sequence of keys using the same mechanism as that of :meth:`.CacheRegion.delete_multi`:: generate_something.invalidate("k1", "k2", "k3") ...a ``refresh()`` method, which will call the creation function, cache the new values, and return them:: values = generate_something.refresh("k1", "k2", "k3") ...and a ``get()`` method, which will return values based on the given arguments:: values = generate_something.get("k1", "k2", "k3") .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments` have the same meaning as those passed to :meth:`.CacheRegion.cache_on_arguments`. :param namespace: optional string argument which will be established as part of each cache key. :param expiration_time: if not None, will override the normal expiration time. May be passed as an integer or a callable. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create_multi`. This function is given a value as returned by the creator, and only if it returns True will that value be placed in the cache. :param asdict: if ``True``, the decorated function should return its result as a dictionary of keys->values, and the final result of calling the decorated function will also be a dictionary. If left at its default value of ``False``, the decorated function should return its result as a list of values, and the final result of calling the decorated function will also be a list. When ``asdict==True`` if the dictionary returned by the decorated function is missing keys, those keys will not be cached. :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_multi_key_generator: a function that will produce a list of keys. This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` :meth:`.CacheRegion.get_or_create_multi` """ expiration_time_is_callable = compat.callable(expiration_time) if function_multi_key_generator is None: function_multi_key_generator = self.function_multi_key_generator return decorator def make_region(*arg, **kw): """Instantiate a new :class:`.CacheRegion`. Currently, :func:`.make_region` is a passthrough to :class:`.CacheRegion`. See that class for constructor arguments. """ return CacheRegion(*arg, **kw)
[ 6738, 11593, 37443, 834, 1330, 351, 62, 26090, 198, 6738, 11485, 1330, 13656, 11, 10664, 8081, 877, 341, 16922, 198, 6738, 11485, 22602, 1330, 6530, 8081, 4592, 198, 6738, 764, 1330, 6631, 198, 6738, 11485, 22602, 1330, 42636, 17401, 11, ...
2.491484
10,216
# -*- coding: utf-8 -*- """ Module projectparallelprogrammeren.codesimulatie ================================================================= Deze module simuleert alles. """ import projectparallelprogrammeren def simulatie(): """ Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing. """ from importlib import import_module for i in range(4): #alle versies van de simulatie importeren en achtereenvolgens uitvoeren. version = f"montecarlo_v{i}" montecarlo = import_module(version) montecarlo.simulatie(100,50) #Deze waarden dienen enkel als test if __name__ == "__main__": simulatie() #eof
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 26796, 1628, 1845, 29363, 23065, 76, 14226, 13, 40148, 320, 377, 265, 494, 220, 198, 23926, 28, 198, 198, 5005, 2736, 8265, 985, 2261, 861, 477, 274, ...
2.780172
232
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes import base64 # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 2, 22507, 1277, 9706, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 555, 715, 395, 198, 17597, 13, 6978, ...
2.629442
197
import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc def generate_dropdown_selection(): "return a Div containing the dropdown selection box" return dcc.Dropdown( id='dropdown_select_process', style={"display": "none"}, options=[ {'label': 'Homogeneous Poisson process', 'value': 1}, {'label': 'Inhomogeneous Poisson process', 'value': 2}, {'label': 'Cluster process', 'value': 3}, # {'label': 'Strauss process', 'value': 4} ], # set the initial value=0 to hide the user input interface value=0) def generate_user_input(): "return a Div containing users' input interface" input_n_toolkits = html.Div(html.Div([html.A('Number of transects:', className='col-sm-4'), dcc.Input( type='number', placeholder=2, value = 2, id='input_n_toolkits', className='col-sm-4' ) ], className='row'), id='input_n_toolkits_container', style={'display': 'none'}) # slider # input_n_toolkits = html.Div(html.Div([ # html.A("Number of transects",className='col-sm-4'), # dcc.Slider(min=1, # max=5, # step=1, # value=2, # marks={i: '{}'.format(i) for i in range(1, 6)}, # id='input_n_toolkits', # className='col-sm-4') # ], className='row'), id='input_n_toolkits_container', # className='row', # style={'display': 'none'}) input_disease_prevalence = html.Div(html.Div([html.A('disease prevalence: ', id='input_disease_prevalence_tooltip', className='col-sm-4'), dcc.Input( type='number', placeholder=0.1, value = 0.1, step=0.1, min=0, max=1, id='input_disease_prevalence', className='col-sm-4' ) ], className='row'), id='input_disease_prevalence_container', style={'display': 'none'}) input_disease_prevalence_tooltip = dbc.Tooltip('the proportion of corals which get infected by a disease', target='input_disease_prevalence_tooltip') # text or number input input_fun_lambda = html.Div(html.Div([html.A('proportion cover function:', className='col-sm-4'), dcc.Input( id="input_fun_lambda", type='text', placeholder="1000 * np.exp(-(((x - 50) / 50) ** 2 + ((y - 50) / 50) ** 2) / 0.5 ** 2)", value="1000 * np.exp(-(((x - 50) / 50) ** 2 + ((y - 50) / 50) ** 2) / 0.5 ** 2)", className='col-sm-4' )],className='row'),id='show_input_fun_lambda',style={'display':'none'}) input_parent_prop = html.Div(html.Div([html.A('parent corals / total corals:', className='col-sm-4'), dcc.Input( id="input_parent_prop", type='number', placeholder=0.01, value=0.01, step=0.01, className='col-sm-4' )],className='row'),id='show_input_parent_prop',style={'display':'none'}) input_parent_range = html.Div(html.Div([html.A('parent range:', className='col-sm-4'), dcc.Input( id="input_parent_range", type='number', placeholder=5, value=5, className='col-sm-4' )],className='row'),id='show_input_parent_range',style={'display':'none'}) input_strauss_beta = dcc.Input( id="input_strauss_beta", type='number', placeholder="strauss_beta", style={'display': 'none'} ) input_strauss_gamma = dcc.Input( id="input_strauss_gamma", type='number', placeholder="strauss_gamma", style={'display': 'none'} ) input_strauss_R = dcc.Input( id="input_strauss_R", type='number', placeholder="strauss_R", style={'display': 'none'} ) input_transect_length = html.Div(html.Div([html.A('transect width (m): ', className='col-sm-4'), dcc.Input( type='number', placeholder=25, value=25, id='dcc_input_transect_length', className='col-sm-4' ) ], className='row'), id='input_transect_length', style={'display': 'none'}) input_transect_width = html.Div(html.Div([html.A('transect length (m): ', className='col-sm-4'), dcc.Input( type='number', placeholder=6, value = 6, id='dcc_input_transect_width', className='col-sm-4' ) ], className='row'), id='input_transect_width', style={'display': 'none'}) line_intercept_ratio = html.Div(html.Div([html.A('transect width / plot width', className='col-sm-4'), dcc.Input( type='number', placeholder=1/5, value = 1/5, step=0.1, id='dcc_line_intercept_ratio', className='col-sm-4') ],className='row'), id='line_intercept_ratio', style={'display': 'none'}) coral_size = html.Div(html.Div([html.A('coral size (m^2): ', id='coral_size_tooltip',className='col-sm-4'), dcc.Input( type='number', placeholder=0.0068, value = 0.0068, step=0.0001, id='coral_size', className='col-sm-4' ) ],className='row' ), id='coral_size_input', style={'display': 'none'}) coral_size_tooltip = dbc.Tooltip('the average size of an individual coral, measured in m^3', target='coral_size_tooltip') coral_size_std = html.Div(html.Div([html.A('coral size standard error: ', id='coral_size_std_tooltip', className='col-sm-4'), dcc.Input( type='number', placeholder=0.001, value = 0.001, step=0.001, id='coral_size_std', className='col-sm-4' )], className='row') , id='coral_size_std_input', style={'display': 'none'}) coral_size_std_tooltip = dbc.Tooltip('the standard deviation of the average size of an individual coral', target='coral_size_std_tooltip') prop_cover = html.Div(html.Div([html.A('proportion cover: ', className='col-sm-4', id='prop_cover_tooltip'), dcc.Input( type='number', placeholder=0, value = 0, step=0.1, min=0, max=1, id='prop_cover', className='col-sm-4' ) ],className='row'), id='prop_cover_input', style={'display': 'none'}) prop_cover_tooltip = dbc.Tooltip('Proportion cover of coral. If it equals 0, its estimation based on the historical data will be used in the simulation', target='prop_cover_tooltip') num_of_replications = html.Div(html.Div([html.A('number of replications', className='col-sm-4'), dcc.Input( type='number', placeholder=10, value = 10, step=1, min=1, id='num_of_replications', className='col-sm-4' ) ],className='row'), id='number_of_replications_input', style={'display': 'none'}) return html.Div([ input_n_toolkits, prop_cover, prop_cover_tooltip, input_fun_lambda, coral_size, coral_size_tooltip, coral_size_std, coral_size_std_tooltip, input_disease_prevalence, input_disease_prevalence_tooltip, input_parent_prop, input_parent_range, input_strauss_beta, input_strauss_gamma, input_strauss_R, input_transect_length, input_transect_width, line_intercept_ratio, num_of_replications ], id='input_process_parameters')
[ 11748, 14470, 62, 7295, 62, 5589, 3906, 355, 288, 535, 198, 11748, 14470, 62, 6494, 62, 5589, 3906, 355, 27711, 198, 11748, 14470, 62, 18769, 26418, 62, 5589, 3906, 355, 288, 15630, 628, 198, 4299, 7716, 62, 14781, 2902, 62, 49283, 33...
1.63074
6,272
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # This test is based on the test suite implemented for Recommenders project # https://github.com/Microsoft/Recommenders/tree/master/tests import papermill as pm import pytest import scrapbook as sb from utils_cv.common.data import unzip_url from utils_cv.detection.data import Urls # Unless manually modified, python3 should be # the name of the current jupyter kernel # that runs on the activated conda environment KERNEL_NAME = "python3" OUTPUT_NOTEBOOK = "output.ipynb"
[ 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 198, 198, 2, 770, 1332, 318, 1912, 319, 262, 1332, 18389, 9177, 329, 19237, 7338, 1628, 198, 2, 3740, 1378, 12567, 13, 785, 1...
3.512195
164
_base_ = [ '../retinanet_r50_fpn_1x_coco.py', '../../_base_/datasets/hdr_detection_minmax_glob_gamma.py', ] # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[10]) runner = dict( type='EpochBasedRunner', max_epochs=20)
[ 62, 8692, 62, 796, 685, 198, 220, 220, 220, 705, 40720, 1186, 259, 272, 316, 62, 81, 1120, 62, 69, 21999, 62, 16, 87, 62, 66, 25634, 13, 9078, 3256, 198, 220, 220, 220, 705, 40720, 40720, 62, 8692, 62, 14, 19608, 292, 1039, 14, ...
2.21097
237