content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# generate grouped data and a bipartite social graph
import synthetic_data_generation as gen
import numpy as np
if __name__ == "__main__":
main()
| [
2,
7716,
32824,
1366,
290,
257,
14141,
433,
578,
1919,
4823,
198,
198,
11748,
18512,
62,
7890,
62,
20158,
355,
2429,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,... | 3.255319 | 47 |
from sql_alchemy import database
| [
6738,
44161,
62,
282,
26599,
1330,
6831,
628
] | 4.25 | 8 |
import argparse
import math
import random
import os
import sys
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
from copy import deepcopy
import numpy
from metrics.lpips import LPIPS
from model import Generator, Extra
from model import Patch_Discriminator as Discriminator
from dataset import MultiResolutionDataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from losses import PatchLoss,ConstLoss
import clip
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, required=True)
parser.add_argument("--iter", type=int, default=2001)
parser.add_argument("--save_freq", type=int, default=1000)
parser.add_argument("--img_freq", type=int, default=100)
parser.add_argument("--highp", type=int, default=1)
parser.add_argument("--ref_freq", type=int, default=4)
parser.add_argument("--feat_ind", type=int, default=3)
parser.add_argument("--batch", type=int, default=2)
parser.add_argument("--n_sample", type=int, default=4)
parser.add_argument("--size", type=int, default=1024)
parser.add_argument("--r1", type=float, default=10)
parser.add_argument("--d_reg_every", type=int, default=16)
parser.add_argument("--g_reg_every", type=int, default=4)
parser.add_argument("--mixing", type=float, default=0.9)
parser.add_argument("--ckpt", type=str, default=None)
parser.add_argument("--exp", type=str, default=None, required=True)
parser.add_argument("--lr", type=float, default=0.002)
parser.add_argument("--f_lr", type=float, default=0.01)
parser.add_argument("--channel_multiplier", type=int, default=2)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--skip_init",action='store_true')
parser.add_argument("--init_iter", type=int, default=1001)
parser.add_argument("--lambda_optclip", type=float, default=1)
parser.add_argument("--lambda_optl2", type=float, default=0.01)
parser.add_argument("--lambda_optrec", type=float, default=1)
parser.add_argument("--lambda_patch", type=float, default=1)
parser.add_argument("--lambda_const", type=float, default=10)
parser.add_argument("--crop_size", type=int, default=128)
parser.add_argument("--num_crop", type=int, default=16)
parser.add_argument("--cars", action="store_true")
parser.add_argument("--nce_allbatch", action="store_true")
parser.add_argument("--tau", type=float, default=1.0)
args = parser.parse_args()
torch.manual_seed(1)
random.seed(1)
n_gpu = 1
args.distributed = n_gpu > 1
args.latent = 512
args.n_mlp = 8
args.start_iter = 0
generator = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
g_source = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier
).to(device)
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
extra = Extra().to(device)
clip_model, preprocess = clip.load("ViT-B/32", device=device)
g_ema.eval()
accumulate(g_ema, generator, 0)
g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
g_optim = optim.Adam(
generator.parameters(),
lr=args.lr * g_reg_ratio,
betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
e_optim = optim.Adam(
extra.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
if args.ckpt is not None:
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
ckpt_source = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
generator.load_state_dict(ckpt["g"], strict=False)
g_source.load_state_dict(ckpt_source["g"], strict=False)
g_ema.load_state_dict(ckpt["g_ema"], strict=False)
discriminator.load_state_dict(ckpt["d"])
if 'g_optim' in ckpt.keys():
g_optim.load_state_dict(ckpt["g_optim"])
if 'd_optim' in ckpt.keys():
d_optim.load_state_dict(ckpt["d_optim"])
if args.distributed:
geneator = nn.parallel.DataParallel(generator)
g_ema = nn.parallel.DataParallel(g_ema)
g_source = nn.parallel.DataParallel(g_source)
discriminator = nn.parallel.DataParallel(discriminator)
extra = nn.parallel.DataParallel(extra)
transform = transforms.Compose(
[
transforms.Resize([args.size,args.size]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
transform_or = transforms.Compose(
[
transforms.Resize([args.size,args.size]),
transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
dataset = MultiResolutionDataset(args.data_path, transform, args.size)
dataset_or = MultiResolutionDataset(args.data_path, transform_or, args.size)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=False),
drop_last=True,
)
loader_or = data.DataLoader(
dataset_or,
batch_size=1,
sampler=data_sampler(dataset_or, shuffle=True, distributed=False),
drop_last=True,
)
train(args, loader,loader_or, generator, discriminator, extra, g_optim,
d_optim, e_optim, g_ema, device, g_source,clip_model) | [
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
11,
1960,
519,
6335,
11,
6436,
198,
6738,
28034,
13,
... | 2.340548 | 2,772 |
import execnet
import cPickle as pickle
from collections import Counter
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from scipy.spatial.distance import cdist, pdist
from sklearn import cluster, metrics
from time import time
if __name__ == '__channelexec__':
while 1:
X = pickle.load(open("/media/jan2015/tmp/X","r+"))
kMeansVar = MiniBatchKMeans(n_clusters=channel.receive()).fit(X)
channel.send(pickle.dumps(kMeansVar)) | [
11748,
2452,
3262,
198,
11748,
269,
31686,
293,
355,
2298,
293,
198,
6738,
17268,
1330,
15034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
565,
5819,
1330,
12558,
33,
963,
42,
5308,
504,
198,
6738,
629,
541,
88,
13... | 2.81875 | 160 |
# Copyright (c) 2017 Alex Socha
# http://www.alexsocha.com/pynode
from pynode.src import communicate
| [
2,
15069,
357,
66,
8,
2177,
4422,
1406,
11693,
201,
198,
2,
2638,
1378,
2503,
13,
1000,
87,
568,
11693,
13,
785,
14,
79,
2047,
1098,
201,
198,
201,
198,
6738,
279,
2047,
1098,
13,
10677,
1330,
10996,
201,
198
] | 2.65 | 40 |
import gzip
import jsonlines
import os
import shutil
def read_jsonl(file):
"""Read a JSON lines file into a list of JSON dicts.
Args:
file (file object): An existing file object to be read from.
The file can either be a json lines file (extension `.jsonl`)
or a gzip file (extension `.gz`). In the latter case the file
will be unzipped before being read from.
Returns:
list: A list of JSON dicts.
"""
filename, file_ext = os.path.splitext(file)
# unzip the file
if file_ext == '.gz':
jsonl_file = filename
with gzip.open(file, 'rb') as src, open(jsonl_file, 'wb') as dest:
shutil.copyfileobj(src, dest)
else:
jsonl_file = file
# read in the lines
json_lines = []
with jsonlines.open(jsonl_file, mode='r') as reader:
for json_line in reader:
json_lines.append(json_line)
# delete file
if file_ext == '.gz':
os.remove(jsonl_file)
return json_lines
| [
11748,
308,
13344,
198,
11748,
33918,
6615,
198,
11748,
28686,
198,
11748,
4423,
346,
628,
198,
4299,
1100,
62,
17752,
75,
7,
7753,
2599,
198,
220,
220,
220,
37227,
5569,
257,
19449,
3951,
2393,
656,
257,
1351,
286,
19449,
8633,
82,
1... | 2.350114 | 437 |
import socket
import threading
import time
import http.client
import requests
import os
count = 1
#while 1:
count = 0
while count < 32:
r = requests.get("http://127.0.0.1:1123/index")
filename = "./tmp/"+str(count)
with open(os.path.join(os.path.dirname(os.path.abspath("__file__")),filename),"wb") as f:
f.write(r.content)
count = count + 1
r.close()
print("finished") | [
11748,
17802,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
2638,
13,
16366,
198,
11748,
7007,
198,
11748,
28686,
198,
198,
9127,
796,
352,
198,
2,
4514,
352,
25,
198,
9127,
796,
657,
198,
4514,
954,
1279,
3933,
25,
198,
220,
... | 2.509434 | 159 |
sample = """Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10"""
starting_decks = """Player 1:
3
42
4
25
14
36
32
18
33
10
35
50
16
31
34
46
9
6
41
7
15
45
30
27
49
Player 2:
8
11
47
21
17
39
29
43
23
28
13
22
5
20
44
38
26
37
2
24
48
12
19
1
40""" | [
39873,
796,
37227,
14140,
352,
25,
198,
24,
198,
17,
198,
21,
198,
18,
198,
16,
198,
198,
14140,
362,
25,
198,
20,
198,
23,
198,
19,
198,
22,
198,
940,
37811,
198,
198,
38690,
62,
12501,
591,
796,
37227,
14140,
352,
25,
198,
18,... | 1.624161 | 149 |
import logging
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from pynYNAB.ObjClient import RootObjClient
from pynYNAB.connection import nYnabConnection
from pynYNAB.exceptions import NoBudgetNameException, BudgetNotFound, NoCredentialsException
from pynYNAB.schema import Base, Catalog, Budget
LOG = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
198,
6738,
279,
2047,
56,
4535,
33,
13,
49201,
11792,
1330,
20410,
49201,
11792,
198,
6738,
279,
2047... | 3.306306 | 111 |
# flake8: noqa
from catalyst_rl.rl.exploration import *
| [
2,
781,
539,
23,
25,
645,
20402,
198,
6738,
31357,
62,
45895,
13,
45895,
13,
20676,
6944,
1330,
1635,
198
] | 2.8 | 20 |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop,Nadam,Adadelta,Adam
from tensorflow.keras.layers import BatchNormalization,LeakyReLU
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
import seaborn as sns
import scipy.stats as stats
import sklearn
import os
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
df = mydata = pd.read_csv("Data/cardio_train.csv", sep=";")
df.drop('id', inplace=True, axis=1)
df.head()
dfcol = df.columns
# duplicated_number = mydata.duplicated().sum()
# print(duplicated_number)
# #removing the duplicated values from the dataset
# duplicated = mydata[mydata.duplicated(keep=False)]
# #duplicated.head(2)
# mydata.drop_duplicates(inplace=True)
# duplicated_number2 = mydata.duplicated().sum()
# print(duplicated_number2)
# x = mydata.copy(deep=True)
# x.describe()
# s_list = ["age", "height", "weight", "ap_hi", "ap_lo"]
# def standartization(x):
# x_std = x.copy(deep=True)
# for column in s_list:
# x_std[column] = (x_std[column]-x_std[column].mean())/x_std[column].std()
# return x_std
# x_std=standartization(x)
# x_std.head()
# x_melted = pd.melt(frame=x_std, id_vars="cardio", value_vars=s_list, var_name="features", value_name="value", col_level=None)
# x_melted
# ap_list = ["ap_hi", "ap_lo"]
# boundary = pd.DataFrame(index=["lower_bound","upper_bound"]) # We created an empty dataframe
# for each in ap_list:
# Q1 = x[each].quantile(0.25)
# Q3 = x[each].quantile(0.75)
# IQR = Q3 - Q1
# lower_bound = Q1- 1.5*IQR
# upper_bound = Q3 + 1.5*IQR
# boundary[each] = [lower_bound, upper_bound ]
# boundary
# ap_hi_filter = (x["ap_hi"] > boundary["ap_hi"][1])
# ap_lo_filter = (x["ap_lo"] > boundary["ap_lo"][1])
# outlier_filter = (ap_hi_filter | ap_lo_filter)
# x_outliers = x[outlier_filter]
# x_outliers["cardio"].value_counts()
# out_filter = ((x["ap_hi"]>250) | (x["ap_lo"]>200) )
# print(x[out_filter]["cardio"].count())
# #count of outliers
# x = x[~out_filter]
# corr = x.corr()
# y = x["cardio"]
# x.drop("cardio", axis=1,inplace=True)
from sklearn import preprocessing
scaler=preprocessing.MinMaxScaler()
dfscale=scaler.fit_transform(df)
dfscale2=pd.DataFrame(dfscale, columns=dfcol)
dfscale2.head()
# x_train,x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=42)
# x_train = normalize(x_train)
# x_test = normalize(x_test)
# x = normalize(x)
xdf=dfscale2.iloc[:,0:11]
#xdf["gender"]=np.where(xdf["gender"]==1,"0","1") #Cambiar el 2 por 1, el 1 por 0 (por orden)
#Aca vendria un posible drop de variables xdf=xdf.drop(["gender","gluc"], axis=1)
ydf=dfscale2.iloc[:,-1]
x_training, x_testing, y_training, y_testing = train_test_split(xdf, ydf, test_size = 0.2, random_state=123, stratify=ydf)
ran = RandomForestClassifier(n_estimators=100)
ran2 = ran.fit(x_training,y_training)
# import tensorflow as tf
# from tensorflow.keras.optimizers import RMSprop,Nadam,Adadelta,Adam
# from tensorflow.keras.layers import BatchNormalization,LeakyReLU
# from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
# import numpy as np # linear algebra
# import pandas as pd # data processing
# import seaborn as sns # visualizations
# import matplotlib.pyplot as plt # visualizations
# from sklearn import preprocessing
# from sklearn.model_selection import train_test_split
# from tensorflow.keras import utils
# import os
# import pickle
# from keras.models import Sequential
# from keras.layers.core import Dense, Activation
# from keras.optimizers import SGD
# from keras.layers import Dropout
# from keras.constraints import maxnorm
# mydata = pd.read_csv("cardio_train.csv", sep=";")
# mydata.drop('id', inplace=True, axis=1)
# df = mydata
# dfcol=df.columns
# mydata.head()
# model = Sequential()
# model.add(Dense(25, input_dim=11, activation='softsign', kernel_constraint=maxnorm(2)))
# #model.add(Dropout(0))
# model.add(Dense(5, activation='softsign'))
# #model.add(Dropout(0))
# model.add(Dense(3, activation='softsign'))
# #model.add(Dropout(0))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss = 'binary_crossentropy', optimizer='Nadam', metrics=['accuracy'])
# from sklearn import preprocessing
# scaler=preprocessing.MinMaxScaler()
# dfscale=scaler.fit_transform(df)
# dfscale2=pd.DataFrame(dfscale, columns=dfcol)
# dfscale2.head()
# xdf=dfscale2.iloc[:,0:11]
# #xdf["gender"]=np.where(xdf["gender"]==1,"0","1") #Cambiar el 2 por 1, el 1 por 0 (por orden)
# #Aca vendria un posible drop de variables xdf=xdf.drop(["gender","gluc"], axis=1)
# ydf=dfscale2.iloc[:,-1]
# x_training, x_testing, y_training, y_testing = train_test_split(xdf, ydf, test_size = 0.2, random_state=123, stratify=ydf)
# model2 = model.fit(x_training, y_training, epochs=50, batch_size=50, verbose=0)
# score = model.evaluate(x_training, y_training)
# print("\n Training Accuracy:", score[1])
# score = model.evaluate(x_testing, y_testing)
# print("\n Testing Accuracy:", score[1])
filename = 'fcardio.sav'
pickle.dump(ran2, open(filename, 'wb')) | [
11748,
299,
32152,
355,
45941,
1303,
14174,
37139,
198,
11748,
19798,
292,
355,
279,
67,
1303,
1366,
7587,
11,
44189,
2393,
314,
14,
46,
357,
68,
13,
70,
13,
279,
67,
13,
961,
62,
40664,
8,
198,
6738,
2603,
29487,
8019,
1330,
12972,... | 2.524829 | 2,195 |
if __name__ == "__main__":
k = 2
a = [0, -1, 2, 1]
print(angryProfessor(k,a)) | [
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
479,
796,
362,
198,
220,
220,
220,
257,
796,
685,
15,
11,
532,
16,
11,
362,
11,
352,
60,
198,
220,
220,
220,
3601,
7,
648,
563,
25031,
7,
74,
11... | 1.875 | 48 |
import importlib
# These are special sizes
LENGTH_PREFIXED_VAR_SIZE = -1
CODEC_CACHE = {}
| [
11748,
1330,
8019,
628,
198,
2,
2312,
389,
2041,
10620,
198,
43,
49494,
62,
47,
31688,
10426,
1961,
62,
53,
1503,
62,
33489,
796,
532,
16,
628,
198,
198,
34,
3727,
2943,
62,
34,
2246,
13909,
796,
23884,
628
] | 2.461538 | 39 |
import sqlite3
from ioscrack import crack
| [
11748,
44161,
578,
18,
198,
198,
6738,
1312,
418,
6098,
441,
1330,
8469,
628,
628,
628
] | 3 | 16 |
"""Users Serializers"""
#Django
from django.contrib.auth import authenticate, password_validation
# Django REST Framework
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
#Models
from handwritten.users.models import User
class UserModelSerializer(serializers.ModelSerializer):
"""User model serializer"""
class Meta():
"""Meta Class."""
model = User
fields = (
'username',
'first_name',
'last_name',
'email'
)
class UserSignUpSerializer(serializers.Serializer):
"""User Sign Up serializer"""
email = serializers.EmailField(
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
min_length=4,
max_length=20,
validators=[UniqueValidator(queryset=User.objects.all())]
)
#Password
password = serializers.CharField(min_length=8,max_length=64)
password_confirmation = serializers.CharField(min_length=8,max_length=64)
#Name
first_name = serializers.CharField(min_length=2,max_length=30)
last_name = serializers.CharField(min_length=2,max_length=30)
class UserLoginSerializer(serializers.Serializer):
""" User login Serializer"""
email = serializers.EmailField()
password = serializers.CharField(min_length=8, max_length=64)
def validate(self,data):
"""Check credentials"""
user = authenticate(username=data['email'],password=data['password'])
if not user:
raise serializers.ValidationError('Invalid credential')
self.context['user'] = user
return data
def create(self,data):
"""Generate or retrive new tocken"""
tocken, created = Token.objects.get_or_create(user=self.context['user'])
return self.context['user'], tocken.key | [
37811,
14490,
23283,
11341,
37811,
198,
2,
35,
73,
14208,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
9206,
62,
12102,
341,
198,
2,
37770,
30617,
25161,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
... | 2.633242 | 728 |
class Vessel(object):
"""Dummy class to return pre-generated data
"""
def get_duty(self):
"""Dummy method, returns fake duty cycle
"""
return 50
def set_duty(self, duty):
"""Dummy method, will be used to set the output duty cycle
"""
pass
if __name__ == '__main__':
test = Vessel('dat')
for i in range(10):
print(float(test.read_data()))
| [
198,
4871,
44734,
7,
15252,
2599,
198,
220,
220,
220,
37227,
35,
13513,
1398,
284,
1441,
662,
12,
27568,
1366,
198,
220,
220,
220,
37227,
628,
628,
220,
220,
220,
825,
651,
62,
26278,
7,
944,
2599,
198,
220,
220,
220,
220,
220,
22... | 2.372222 | 180 |
"""
Exceptions for storage events.
"""
class SampleStorageError(Exception):
"""
Superclass of all storage related exceptions. Denotes a general storage error.
"""
class StorageInitError(SampleStorageError):
"""
Denotes an error during initialization of the storage system.
"""
class OwnerChangedError(SampleStorageError):
"""
Denotes that the owner designated by a save operation is not the same as the owner in the
database - e.g. the owner has changed since the ACLs were last pulled from the database.
This error generally denotes a race condition has occurred.
"""
| [
37811,
198,
3109,
11755,
329,
6143,
2995,
13,
198,
37811,
628,
198,
4871,
27565,
31425,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3115,
4871,
286,
477,
6143,
3519,
13269,
13,
5601,
6421,
257,
2276,
6143,
40... | 3.684524 | 168 |
import re
import sys
PASSWORD_RE = re.compile('(\d+)-(\d+) (\w): (\w+)')
if __name__ == '__main__':
correct_count = 0
for line in sys.stdin:
pos1, pos2, letter, password = PASSWORD_RE.match(line).groups()
pos1 = int(pos1) - 1
pos2 = int(pos2) - 1
if (letter == password[pos1]) ^ (letter == password[pos2]):
correct_count += 1
print(correct_count)
| [
11748,
302,
198,
11748,
25064,
628,
198,
47924,
54,
12532,
62,
2200,
796,
302,
13,
5589,
576,
10786,
38016,
67,
10,
13219,
38016,
67,
28988,
357,
59,
86,
2599,
357,
59,
86,
28988,
11537,
628,
198,
361,
11593,
3672,
834,
6624,
705,
8... | 2.170213 | 188 |
from typing import List, Tuple, Union
from tikzpy.drawing_objects.point import Point
from tikzpy.drawing_objects.drawing_object import DrawingObject
from tikzpy.utils.helpers import brackets
class PlotCoordinates(DrawingObject):
"""
A class to create plots in the tikz environment.
Attributes :
options (str) : String containing drawing options (e.g., "Blue")
plot_options (str) : String containing the plot options (e.g., "smooth cycle")
points (list) : A list of points to be drawn
"""
@property
@points.setter
@property
@property
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
4479,
198,
6738,
256,
1134,
89,
9078,
13,
19334,
278,
62,
48205,
13,
4122,
1330,
6252,
198,
6738,
256,
1134,
89,
9078,
13,
19334,
278,
62,
48205,
13,
19334,
278,
62,
15252,
1330,
40027,
1... | 2.940887 | 203 |
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import render, redirect
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
40213,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
628,
628,
628,
628,
198
] | 3.818182 | 33 |
import hashlib
import lxml.html
import os
import pickle
import requests
import sys
_ascii = ('01234567890123456789012345678901 '
'!"#$%&\'()*+,-./0123456789:;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
'abcdefghijklmnopqrstuvwxyz{|}~')
| [
11748,
12234,
8019,
198,
11748,
300,
19875,
13,
6494,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
7007,
198,
11748,
25064,
628,
198,
62,
292,
979,
72,
796,
19203,
486,
1954,
2231,
3134,
4531,
486,
1954,
2231,
3134,
4531,
486,
... | 1.861111 | 144 |
import paddle.fluid as fluid
import paddle
import paddorch.cuda
import paddorch.nn
import os
import paddorch.nn.functional
from paddle.fluid import dygraph
import numpy as np
| [
11748,
39517,
13,
35522,
312,
355,
11711,
198,
11748,
39517,
198,
11748,
14098,
273,
354,
13,
66,
15339,
198,
11748,
14098,
273,
354,
13,
20471,
198,
11748,
28686,
198,
11748,
14098,
273,
354,
13,
20471,
13,
45124,
198,
6738,
39517,
13,... | 3.320755 | 53 |
import logging
from .ibindex import IbIndexQueryService
from .nasdaq import NasdaqIndexScraper, NasdaqCompany
from .google import StockDomain, GoogleFinanceQueryService
from .bloomberg import BloombergQueryService
from .avanza import AvanzaQueryService
from .ig import IGQueryService
from .yahoo import YahooQueryService
from stockbot.db import Base
from sqlalchemy import Column, String
LOGGER = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
764,
571,
9630,
1330,
21089,
15732,
20746,
16177,
198,
6738,
764,
24716,
48539,
1330,
22767,
48539,
15732,
3351,
38545,
11,
22767,
48539,
39154,
198,
6738,
764,
13297,
1330,
10500,
43961,
11,
3012,
37,
14149,
20746,... | 3.9 | 110 |
from django.conf import settings
from django.db import models
class TimeStampedModel(models.Model):
""" Time Stamped Model """
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Voice(TimeStampedModel):
""" Voice Model to analyze and save pitches """
# 목소리 음역대 저장
max_pitch = models.CharField(max_length=10)
min_pitch = models.CharField(max_length=10, blank=True, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='voices')
class File(TimeStampedModel):
""" File Model to save voice files """
filename = models.FileField(upload_to="voices", blank=False, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='files')
class Song(TimeStampedModel):
""" Song Model to save songs with pitch and info """
title = models.CharField(max_length=200, blank=False, null=False)
max_pitch = models.CharField(max_length=5, blank=False, null=False) # 음역대
min_pitch = models.CharField(max_length=5, blank=True, null=True)
explanation = models.CharField(max_length=255, blank=True, null=True, default=None)
singer = models.ManyToManyField('Singer', related_name='songs')
genre = models.ManyToManyField('Genre', related_name='songs', help_text='Select a genre for this song')
def __str__(self):
"""String for representing the Model object."""
return self.title
class Genre(TimeStampedModel):
""" Genre Model """
name = models.CharField(max_length=200, help_text='Enter a song genre (e.g. Hip-Hop)')
def __str__(self):
"""String for representing the Model object."""
return str(self.pk) + '.' + self.name
class Singer(TimeStampedModel):
""" Singer Model """
name = models.CharField(max_length=100)
date_of_debut = models.DateField(null=True, blank=True)
def __str__(self):
"""String for representing the Model object."""
return str(self.pk) + '.' + self.name
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
4871,
3862,
1273,
13322,
17633,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
3862,
520,
13322,
9104,
37227,
628,
220,
220,
220,... | 2.722955 | 758 |
import numpy as np
import pandas as pd
np.random.seed(1)
LENGTH = 500
A = np.random.rand(LENGTH)
A[np.random.choice(LENGTH, 20, replace = False)] = np.nan
B = np.random.randint(100, size = LENGTH)
C = A + np.random.normal(0, 0.2, LENGTH)
D = A + np.random.normal(0, 0.1, LENGTH)
E = np.random.rand(LENGTH)
E[np.random.choice(LENGTH, 480, replace = False)] = np.nan
F = B + np.random.normal(0, 10, LENGTH)
target = np.random.randint(2, size = LENGTH)
frame = pd.DataFrame({
'A': A,
'B': B,
'C': C,
'D': D,
'E': E,
'F': F,
})
frame['target'] = target
if __name__ == '__main__':
frame.to_csv('test_data.csv', index = False)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
37659,
13,
25120,
13,
28826,
7,
16,
8,
198,
198,
43,
49494,
796,
5323,
198,
198,
32,
796,
45941,
13,
25120,
13,
25192,
7,
43,
49494,
8,
198,
32,
58,
... | 2.2 | 300 |
xmark = '<:xmark:820320509211574284>'
tick = '<:tick:820320509564551178>'
voice_channel = '<:Voice_channels:820162682883014667> '
text_channel = '<:Text_Channel:820162682970832897>'
error = '<:error:820162683147911169>'
questionmark = '<:questionmark:820319249867866143>'
info = '<:info:820332723121684530>'
youtube = '<:yotube:820657499895103518>'
loading = '<a:loading:824225352573255680>'
number_emojis = {
1: "\u0031\ufe0f\u20e3",
2: "\u0032\ufe0f\u20e3",
3: "\u0033\ufe0f\u20e3",
4: "\u0034\ufe0f\u20e3",
5: "\u0035\ufe0f\u20e3",
6: "\u0036\ufe0f\u20e3",
7: "\u0037\ufe0f\u20e3",
8: "\u0038\ufe0f\u20e3",
9: "\u0039\ufe0f\u20e3"
}
x = "\U0001f1fd"
o = "\U0001f1f4"
switch_on ='<:switch_on:845865302571089930>'
switch_off ='<:switch_off:845865362193252372>'
def regional_indicator(c: str) -> str:
"""Returns a regional indicator emoji given a character."""
return chr(0x1F1E6 - ord("A") + ord(c.upper()))
| [
87,
4102,
796,
705,
27,
25,
87,
4102,
25,
41739,
19504,
1120,
5892,
1157,
46900,
30336,
29,
6,
198,
42298,
796,
705,
27,
25,
42298,
25,
41739,
19504,
1120,
50148,
30505,
1157,
3695,
29,
6,
198,
38888,
62,
17620,
796,
705,
27,
25,
... | 1.908549 | 503 |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Log to `Weights and Biases <https://wandb.ai/>`_."""
from __future__ import annotations
import atexit
import os
import pathlib
import re
import sys
import tempfile
import warnings
from typing import Any, Dict, List, Optional
from composer.core.state import State
from composer.loggers.logger import Logger, LogLevel
from composer.loggers.logger_destination import LoggerDestination
from composer.utils import dist
from composer.utils.import_helpers import MissingConditionalImportError
__all__ = ["WandBLogger"]
class WandBLogger(LoggerDestination):
"""Log to `Weights and Biases <https://wandb.ai/>`_.
Args:
project (str, optional): WandB project name.
group (str, optional): WandB group name.
name (str, optional): WandB run name.
If not specified, the :attr:`.State.run_name` will be used.
entity (str, optional): WandB entity name.
tags (List[str], optional): WandB tags.
log_artifacts (bool, optional): Whether to log
`artifacts <https://docs.wandb.ai/ref/python/artifact>`_ (Default: ``False``).
rank_zero_only (bool, optional): Whether to log only on the rank-zero process.
When logging `artifacts <https://docs.wandb.ai/ref/python/artifact>`_, it is
highly recommended to log on all ranks. Artifacts from ranks ≥1 will not be
stored, which may discard pertinent information. For example, when using
Deepspeed ZeRO, it would be impossible to restore from checkpoints without
artifacts from all ranks (default: ``False``).
init_kwargs (Dict[str, Any], optional): Any additional init kwargs
``wandb.init`` (see
`WandB documentation <https://docs.wandb.ai/ref/python/init>`_).
"""
| [
2,
15069,
33160,
5826,
18452,
5805,
29936,
263,
7035,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
37811,
11187,
284,
4600,
1135,
2337,
290,
8436,
1386,
1279,
5450,
1378,
86,
392,
65,
13,
1872,... | 2.795796 | 666 |
import config
from controlClient import ControlClient
from recognizer import Recognizer
from voiceListener import VoiceListener
import logging
client=ControlClient(config.mqtt_broker_address,config.mqtt_broker_port,config.mqtt_voice_topic)
recognizer=Recognizer(config.model_directory)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
main() | [
11748,
4566,
198,
6738,
1630,
11792,
1330,
6779,
11792,
198,
6738,
3018,
7509,
1330,
31517,
7509,
198,
6738,
3809,
33252,
1330,
15282,
33252,
198,
11748,
18931,
628,
198,
16366,
28,
15988,
11792,
7,
11250,
13,
76,
80,
926,
62,
7957,
612... | 3.27027 | 111 |
from honeygrove.config import Config
from honeygrove.services.HTTPService import HTTPService
from twisted.internet import reactor
import requests
import threading
import unittest
| [
6738,
12498,
27333,
303,
13,
11250,
1330,
17056,
198,
6738,
12498,
27333,
303,
13,
30416,
13,
6535,
28820,
712,
501,
1330,
38288,
712,
501,
198,
198,
6738,
19074,
13,
37675,
1330,
21905,
198,
198,
11748,
7007,
198,
11748,
4704,
278,
198... | 3.956522 | 46 |
"""Parsing of CLI input (args)."""
import argparse
from ickafka.__version__ import VERSION
def get_args():
"""Parse args"""
parser = argparse.ArgumentParser(description="Consume from kafka")
parser.add_argument(
"-s", "--server", help="kafka broker ip or hostname", default="localhost"
)
parser.add_argument("-g", "--group", help="kafka consumer group", default=None)
parser.add_argument(
"-o",
"--offset",
help="which offset to start at. options: smallest, earliest, latest",
default="latest",
)
parser.add_argument("-t", "--topic", help="kafka topic name", required=True)
parser.add_argument("--capture", dest="capture", action="store_true")
parser.add_argument("--no-color", dest="no_color", action="store_true")
parser.add_argument(
"-v",
"--version",
action="version",
version=VERSION,
help="ickafka version",
default=None,
)
return parser.parse_args()
| [
37811,
47,
945,
278,
286,
43749,
5128,
357,
22046,
21387,
15931,
201,
198,
201,
198,
11748,
1822,
29572,
201,
198,
6738,
220,
624,
1878,
4914,
13,
834,
9641,
834,
1330,
44156,
2849,
201,
198,
201,
198,
201,
198,
4299,
651,
62,
22046,
... | 2.399072 | 431 |
import pytest
| [
11748,
12972,
9288,
628,
628,
198
] | 3 | 6 |
from construct import *
from ..common import debug_field, DirtyBits
ShipStatus_Partial = Struct(
"DirtyBits" / DirtyBits,
"NumSystems" / VarInt,
"Systems" / debug_field(Byte[this.NumSystems]),
)
ShipStatus_Full = Struct(
) | [
6738,
5678,
1330,
1635,
198,
198,
6738,
11485,
11321,
1330,
14257,
62,
3245,
11,
32052,
33,
896,
198,
198,
25586,
19580,
62,
7841,
498,
796,
32112,
7,
198,
220,
220,
220,
366,
35,
5893,
33,
896,
1,
1220,
32052,
33,
896,
11,
198,
2... | 2.644444 | 90 |
import multiprocessing, logging, sys, re, os, StringIO, threading, time, Queue, collections
from logging import Logger
class MultiProcessingLogHandler(logging.Handler):
"""taken from http://stackoverflow.com/questions/641420/how-should-i-log-while-using-multiprocessing-in-python
added counting of log messages.
"""
def initPool(queue, level):
"""
This causes the logging module to be initialized with the necessary info
in pool threads to work correctly.
"""
logging.getLogger('').addHandler(MultiProcessingLogHandler(logging.StreamHandler(), queue, child=True))
logging.getLogger('').setLevel(level)
| [
11748,
18540,
305,
919,
278,
11,
18931,
11,
25064,
11,
302,
11,
28686,
11,
10903,
9399,
11,
4704,
278,
11,
640,
11,
4670,
518,
11,
17268,
198,
198,
6738,
18931,
1330,
5972,
1362,
198,
198,
4871,
15237,
18709,
278,
11187,
25060,
7,
6... | 3.151961 | 204 |
_MAJOR = u"0"
_MINOR = u"6"
_REVISION = u"1-unreleased"
VERSION_SHORT = u"{0}.{1}".format(_MAJOR, _MINOR)
VERSION = u"{0}.{1}.{2}".format(_MAJOR, _MINOR, _REVISION)
| [
62,
5673,
41,
1581,
796,
334,
1,
15,
1,
198,
62,
23678,
1581,
796,
334,
1,
21,
1,
198,
62,
2200,
29817,
2849,
796,
334,
1,
16,
12,
403,
30147,
1,
198,
198,
43717,
62,
9693,
9863,
796,
334,
1,
90,
15,
27422,
90,
16,
92,
1911,... | 1.886364 | 88 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Display a variety of visual shapes whose attributes can be associated
with data columns from ``ColumnDataSources``.
All these glyphs share a minimal common interface through their base class
``Glyph``:
.. autoclass:: Glyph
:members:
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import Instance, List
from ..model import Model
from .graphics import Decoration
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ConnectedXYGlyph',
'Glyph',
'XYGlyph',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Glyph(Model):
''' Base class for all glyph models.
'''
# explicit __init__ to support Init signatures
decorations = List(Instance(Decoration), default=[], help="""
A collection of glyph decorations, e.g. arrow heads.
Use ``GlyphRenderer.add_decoration()`` for easy setup for all glyphs
of a glyph renderer. Use this property when finer control is needed.
.. note::
Decorations are only for aiding visual appearance of a glyph,
but they don't participate in hit testing, etc.
""")
@abstract
class XYGlyph(Glyph):
''' Base class of glyphs with `x` and `y` coordinate attributes.
'''
# explicit __init__ to support Init signatures
@abstract
class ConnectedXYGlyph(XYGlyph):
''' Base class of glyphs with `x` and `y` coordinate attributes and
a connected topology.
'''
# explicit __init__ to support Init signatures
@abstract
class LineGlyph(Glyph):
''' Glyphs with line properties
'''
# explicit __init__ to support Init signatures
@abstract
class FillGlyph(Glyph):
''' Glyphs with fill properties
'''
# explicit __init__ to support Init signatures
@abstract
class TextGlyph(Glyph):
''' Glyphs with text properties
'''
# explicit __init__ to support Init signatures
@abstract
class HatchGlyph(Glyph):
''' Glyphs with Hatch properties
'''
# explicit __init__ to support Init signatures
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2321,
532,
33160,
11,
1052,
330,
13533,
11,
3457,
1539,
290,
347,
2088,
71,
25767,
669,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
383,
1336,
5964,
318,
287,
262,
2393,
38559,
... | 4.3309 | 822 |
# IMPORTS
import cv2
# Read the video file
cap = cv2.VideoCapture("./Data/cars.avi")
# PROPids of the video frame
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# FourCC Codec to identify the video file format
fourcc = cv2.VideoWriter_fourcc(*"XVID")
saved_frame = cv2.VideoWriter(
"car_detection.avi", fourcc, 20.0, (frame_width, frame_height)
)
# Load the model
model = cv2.CascadeClassifier("haarcascade_car.xml")
# Capture the frames
while cap.isOpened():
_, frame = cap.read()
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cars = model.detectMultiScale(gray_frame, 1.1, 2)
for x, y, w, h in cars:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
saved_frame.write(frame)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == 27:
break
# Cleaning
cap.release()
saved_frame.release()
cv2.destroyAllWindows()
| [
2,
30023,
33002,
198,
11748,
269,
85,
17,
198,
198,
2,
4149,
262,
2008,
2393,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7,
1911,
14,
6601,
14,
37993,
13,
15820,
4943,
198,
198,
2,
4810,
3185,
2340,
286,
262,
2008,
5739,
198,... | 2.311436 | 411 |
import boto3
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key
import json
import os
import logging
import datetime
from dateutil import tz
from pprint import pprint
from lib.vpc import *
import logging
logger = logging.getLogger()
class AWSAccount(object):
"""Class to represent an AWS Account """
def __init__(self, account_id, config=None):
""" Create a new object representing the AWS account specified by account_id """
# Execute any parent class init()
super(AWSAccount, self).__init__()
self.account_id = account_id
if config is None:
account_table_name = os.environ['ACCOUNT_TABLE']
vpc_table_name = os.environ['VPC_TABLE']
role_name = os.environ['ROLE_NAME']
role_session_name = os.environ['ROLE_SESSION_NAME']
else:
try:
account_table_name = config['account_table_name']
vpc_table_name = config['vpc_table_name']
role_name = config['role_name']
role_session_name = config['role_session_name']
except KeyError as e:
logger.critical(f"AWSAccount passed a config that was missing a key: {e}")
return(None)
# # Save these as attributes
self.dynamodb = boto3.resource('dynamodb')
self.account_table = self.dynamodb.Table(account_table_name)
self.vpc_table = self.dynamodb.Table(vpc_table_name)
self.cross_account_role_arn = "arn:aws:iam::{}:role/{}".format(self.account_id, role_name)
self.default_session_name = role_session_name
response = self.account_table.query(
KeyConditionExpression=Key('account_id').eq(self.account_id),
Select='ALL_ATTRIBUTES'
)
try:
self.db_record = response['Items'][0]
# Convert the response into instance attributes
self.__dict__.update(self.db_record)
# self.account_name = str(self.account_name.encode('ascii', 'ignore'))
except IndexError as e:
raise AccountLookupError("ID {} not found".format(account_id))
except Exception as e:
logger.error("Got Other error: {}".format(e))
def __str__(self):
"""when converted to a string, become the account_id"""
return(self.account_id)
def __repr__(self):
"""Create a useful string for this class if referenced"""
return("<AWSAccount {} >".format(self.account_id))
#
# Cross Account Role Assumption Methods
#
def get_creds(self, session_name=None):
"""
Request temporary credentials for the account. Returns a dict in the form of
{
creds['AccessKeyId'],
creds['SecretAccessKey'],
creds['SessionToken']
}
Which can be passed to a new boto3 client or resource.
Takes an optional session_name which can be used by CloudTrail and IAM
Raises AntiopeAssumeRoleError() if the role is not found or cannot be assumed.
"""
client = boto3.client('sts')
if session_name is None:
session_name = self.default_session_name
try:
session = client.assume_role(RoleArn=self.cross_account_role_arn, RoleSessionName=session_name)
self.creds = session['Credentials'] # Save for later
return(session['Credentials'])
except ClientError as e:
raise AntiopeAssumeRoleError("Failed to assume role {} in account {} ({}): {}".format(self.cross_account_role_arn,
self.account_name.encode('ascii', 'ignore'), self.account_id, e.response['Error']['Code']))
def get_client(self, type, region=None, session_name=None):
"""
Returns a boto3 client for the service "type" with credentials in the target account.
Optionally you can specify the region for the client and the session_name for the AssumeRole.
"""
if 'creds' not in self.__dict__:
self.creds = self.get_creds(session_name=session_name)
client = boto3.client(type,
aws_access_key_id = self.creds['AccessKeyId'],
aws_secret_access_key = self.creds['SecretAccessKey'],
aws_session_token = self.creds['SessionToken'],
region_name = region)
return(client)
def get_resource(self, type, region=None, session_name=None):
"""
Returns a boto3 Resource for the service "type" with credentials in the target account.
Optionally you can specify the region for the resource and the session_name for the AssumeRole.
"""
if 'creds' not in self.__dict__:
self.creds = self.get_creds(session_name=session_name)
resource = boto3.resource(type,
aws_access_key_id = self.creds['AccessKeyId'],
aws_secret_access_key = self.creds['SecretAccessKey'],
aws_session_token = self.creds['SessionToken'],
region_name = region)
return(resource)
#
# VPC Methods
#
def get_regions(self):
"""Return an array of the regions this account is active in. Ordered with us-east-1 in the front."""
ec2 = self.get_client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def get_vpc_ids(self):
"""Return a list of VPC ids for the account (as cached in the VPC Table)."""
# TODO - Add support to filter by region
output = []
vpc_list = []
vpc_table = self.vpc_table
response = vpc_table.query(
IndexName='account-index',
Select='SPECIFIC_ATTRIBUTES',
ProjectionExpression='vpc_id',
Limit=123,
ConsistentRead=False,
KeyConditionExpression=Key('account_id').eq(self.account_id)
)
while 'LastEvaluatedKey' in response:
# Means that dynamoDB didn't return the full set, so as for more.
vpc_list = vpc_list + response['Items']
response = vpc_table.query(
IndexName='account-index',
Select='SPECIFIC_ATTRIBUTES',
ProjectionExpression='vpc_id',
Limit=123,
ConsistentRead=False,
KeyConditionExpression=Key('account_id').eq(self.account_id),
ExclusiveStartKey=response['LastEvaluatedKey']
)
vpc_list = vpc_list + response['Items']
# Take the list of vpc_ids and instantiate VPC Objects. Return that list
for v in vpc_list:
output.append(v['vpc_id'])
return(output)
def get_vpcs(self, region=None):
"""Return a list of VPCs for the account (as cached in the VPC Table). Optionally filter it by region"""
output = []
vpc_list = self.get_vpc_ids()
for v in vpc_list:
vpc = VPC(v, account=self)
if region is None:
output.append(vpc)
else:
if vpc.region == region:
output.append(vpc)
return(output)
def get_active_vpcs(self, region=None):
"""Return a list of active VPCs (one or more running instances) for the account. Optionally filter it by region"""
output = []
vpc_list = self.get_vpcs(region)
# This could also work?
# nonZeroVpcs =list(filter(lambda x: x.instance_count != '0', vpcs))
for v in vpc_list:
if v.is_active():
output.append(v)
# FIXME Filter out ones that haven't been updated in last 24 hrs
return(output)
#
# Compliance Functions
#
def discover_cft_info_by_resource(self, PhysicalResourceId, region=None, VersionOutputKey='TemplateVersion'):
"""Jump into the account, and ask Cloudformation in that region about the details of a template"""
output = {}
try:
if region is None:
cfn_client = self.get_client('cloudformation')
else:
cfn_client = self.get_client('cloudformation', region=region)
except AntiopeAssumeRoleError:
logger.error("Unable to assume role looking for {} in {}".format(PhysicalResourceId, self.account_id))
return(None)
# Ask Cloudformation "who owns PhysicalResourceId?"
try:
stack = cfn_client.describe_stack_resources(PhysicalResourceId=PhysicalResourceId)
except ClientError:
# More error checking needed here.
# logger.error("Failed to find CFT for {} in {}".format(PhysicalResourceId, self.account_id))
return(None) # Nothing else to do. Go home and cry.
for i in stack['StackResources']:
if i['PhysicalResourceId'] == PhysicalResourceId:
output['stack_name'] = i['StackName']
output['Region'] = region
break
else:
# How is it that describe_stack_resources() returned a stack, but the Resource we searched on wasn't in the resulting dataset?
logger.error("Found stack {} but resource not present {} in account {}".format(stack_name, PhysicalResourceId, self.account_id))
return(None)
# Time to get the stack version
response = cfn_client.describe_stacks(StackName=output['stack_name'])
stack = response['Stacks'][0]
output['Stack'] = stack
# Iterate down the outputs till we find the key TemplateVersion. That is our version
output['template_version'] = False
if 'Outputs' in stack:
for o in stack['Outputs']:
if o['OutputKey'] == VersionOutputKey:
output['template_version'] = o['OutputValue']
break
else:
output['template_version'] = "NotFound"
# Return the stackname and template_version
return(output)
#
# Database functions
#
def update_attribute(self, key, value):
"""
Update a specific attribute in a specific table for this account.
key is the column, value is the new value to set
"""
logger.info(u"Adding key:{} value:{} to account {}".format(key, value, self))
try:
response = self.account_table.update_item(
Key= {
'account_id': self.account_id
},
UpdateExpression="set #k = :r",
ExpressionAttributeNames={
'#k': key
},
ExpressionAttributeValues={
':r': value,
}
)
except ClientError as e:
raise AccountUpdateError("Failed to update {} to {} in account table: {}".format(key, value, e))
def get_attribute(self, key):
"""
Fetches a attribute from the specificed table for the account
"""
logger.info(u"Getting key:{} from account_table for account {}".format(key, self))
try:
response = self.account_table.get_item(
Key= {
'account_id': self.account_id
},
AttributesToGet=[key]
)
return(response['Item'][key])
except ClientError as e:
raise AccountLookupError("Failed to get {} from {} in account table: {}".format(key, self, e))
except KeyError as e:
raise AccountLookupError("Failed to get {} from {} in account table: {}".format(key, self, e))
def delete_attribute(self, key):
"""
Delete a attribute from the specificed table for the account
"""
logger.info(u"Deleting key:{} from account table for account {}".format(key, self))
table = self.account_table
try:
response = table.update_item(
Key= {
'account_id': self.account_id
},
UpdateExpression="remove #k",
ExpressionAttributeNames={
'#k': key
},
# ExpressionAttributeValues={
# ':r': value,
# }
)
except ClientError as e:
raise AccountLookupError("Failed to get {} from {} in account table: {}".format(key, self, e))
except KeyError as e:
raise AccountLookupError("Failed to get {} from {} in account table: {}".format(key, self, e))
class AntiopeAssumeRoleError(Exception):
"""raised when the AssumeRole Fails"""
class AccountUpdateError(Exception):
"""raised when an update to DynamoDB Fails"""
class AccountLookupError(LookupError):
"""Raised when the Account requested is not in the database"""
| [
198,
11748,
275,
2069,
18,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
6738,
275,
2069,
18,
13,
67,
4989,
375,
65,
13,
17561,
1756,
1330,
7383,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
18931,
198,
11... | 2.234921 | 5,836 |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import collections
import copy
# deadspots = 175.2 (full fov) / res / 2 * discretisation
# eg. 115x44 disc. 5 -> 175.2/115/2*5 = 3.81 deg
# 2nd July (93a92648a5e774c97e3368e3306782382f626b6d) - SR=1, rho=0.1, theta=5 deg
data = {
'115x44':{
'3.81': [1, 1, 1, 0, 1, 1, 1, 1, 1, 1],
'7.62': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'11.43': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'23x9':{
'3.81': [0, 1, 1, 1, 1, 0, 1, 1, 1, 1],
'7.62': [0, 0, 1, 0, 1, 1, 0, 0, 0, 1],
'11.43': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
},
}
mean = {}
stdev = np.zeros((len(data.keys()),max([len(v) for k,v in data.items()])))
for i, (k,v) in enumerate(data.items()):
mean[k] = {}
for j, (k2, v2) in enumerate(v.items()):
mean[k][k2] = np.mean(v2)
stdev[i,j] = mean[k][k2] * (1-mean[k][k2]) / (len(v2)-1)
df = pd.DataFrame(mean)
matplotlib.style.use('ggplot')
order = np.argsort([float(x) for x in df.index])
stdev = stdev[::-1,:]
(df*100).iloc[order].plot(kind='bar', yerr=100*stdev, capsize=3)
plt.xticks(rotation=0)
plt.ylabel('Proportion successful tests (%)')
plt.ylim((0,100))
plt.xlabel('"Dead spot", minimum detected orientation offset $(\circ)$')
plt.title('Artificially reduced offset resolution for larger images\nproduces performance similar to smaller images\n[Discrete correction, N=10]')
plt.tight_layout()
# deadspots = 175.2 (full fov) / res / 2 * discretisation
# eg. 115x44 disc. 5 -> 175.2/115/2*5 = 3.81 deg
# 2nd July (93a92648a5e774c97e3368e3306782382f626b6d) - SR=1, rho=0.1, theta=5 deg
data = {
'discrete 23x9':{
'3.81': [0, 1, 1, 1, 1, 0, 1, 1, 1, 1],
'7.62': [0, 0, 1, 0, 1, 1, 0, 0, 0, 1],
'11.43': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
},
'continuous 23x9':{
'3.81': [0, 1, 1, 1, 1],
'7.62': [1, 1, 1, 1, 1],
'11.43': [1, 1, 0, 1, 1],
'19.04': [0, 1, 1, 0, 0],
'26.66': [1, 0, 0, 1, 0],
'38.09': [0, 0, 0, 0, 0],
},
}
mean = {}
# stdev = np.zeros((len(data.keys()),max([len(v) for k,v in data.items()])))
for i, (k,v) in enumerate(data.items()):
mean[k] = {}
for j, (k2, v2) in enumerate(v.items()):
mean[k][k2] = np.mean(v2)
# stdev[i,j] = mean[k][k2] * (1-mean[k][k2]) / (len(v2)-1)
df = pd.DataFrame(mean)
order = np.argsort([float(x) for x in df.index])
stdev = stdev[::-1,:]
(df*100).iloc[order].plot(kind='bar', capsize=3)
plt.xticks(rotation=0)
plt.ylabel('Proportion successful tests (%)')
plt.ylim((0,100))
plt.xlabel('"Dead spot", minimum detected orientation offset $(\circ)$')
plt.title('Continuous correction works at a lower offset resolution')
plt.tight_layout()
data = collections.OrderedDict((
('normal', 0.4),
('middle', 0.8),
('depth', 0.6),
('both', 0.6),
('no correction', 0.2),
))
stdev = np.array(data.values())
stdev *= (1 - stdev) / 4.
df = pd.DataFrame(data.values(), index=data.keys())
(df*100).plot(kind='bar', yerr=100*stdev, capsize=3, legend=False)
plt.xticks(rotation=0)
plt.ylabel('Proportion successful tests (%)')
plt.ylim((0,100))
plt.xlabel('Reference image weighting type')
plt.title('Weighting reference images can improve performance')
plt.tight_layout()
plt.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
17268,
198,
11748,
4866,
198,
198,
2,
2636,
2777,
1747,
796,
... | 2.097739 | 1,504 |
s = Solution()
data = [
['0.1', '0.1.0.0'],
['1.0.1', '1'],
['7.5.2.4', '7.3'],
['1.1', '1.01'],
["19.8.3.17.5.01.0.0.4.0.0.0.0.0.0.0.0.0.0.0.0.0.00.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.000000.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.000000",
"19.8.3.17.5.01.0.0.4.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0000.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.000000"]
]
for d in data:
print(s.compareVersion(*d))
| [
198,
82,
796,
28186,
3419,
198,
198,
7890,
796,
685,
198,
220,
220,
220,
37250,
15,
13,
16,
3256,
705,
15,
13,
16,
13,
15,
13,
15,
6,
4357,
198,
220,
220,
220,
37250,
16,
13,
15,
13,
16,
3256,
705,
16,
6,
4357,
198,
220,
220... | 1.130556 | 720 |
import json
import os
import shutil
import sys
import zipfile
from tempfile import TemporaryDirectory
from unittest import TestCase, main, skipIf
from cogent3.app.data_store import (
OVERWRITE,
DataStoreMember,
ReadOnlyDirectoryDataStore,
ReadOnlyTinyDbDataStore,
ReadOnlyZippedDataStore,
SingleReadDataStore,
WritableDirectoryDataStore,
WritableTinyDbDataStore,
WritableZippedDataStore,
load_record_from_json,
)
from cogent3.parse.fasta import MinimalFastaParser
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Alpha"
class TestFunctions(TestCase):
"""test support functions"""
def test_load_record_from_json(self):
"""handle different types of input"""
orig = {"data": "blah", "identifier": "some.json", "completed": True}
data = orig.copy()
data2 = data.copy()
data2["data"] = json.dumps(data)
for d in (data, json.dumps(data), data2):
expected = "blah" if d != data2 else json.loads(data2["data"])
Id, data_, compl = load_record_from_json(d)
self.assertEqual(Id, "some.json")
self.assertEqual(data_, expected)
self.assertEqual(compl, True)
if __name__ == "__main__":
main()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
11748,
19974,
7753,
198,
198,
6738,
20218,
7753,
1330,
46042,
43055,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
11,
1388,
11,
14267,
1532,
198,
198,
6738,
... | 2.461149 | 592 |
from AppLauncher.backend.models.accounts import Account as ModelsAccount
| [
6738,
2034,
46182,
2044,
13,
1891,
437,
13,
27530,
13,
23317,
82,
1330,
10781,
355,
32329,
30116,
201,
198
] | 3.894737 | 19 |
N = int(input())
A = []
s2 = set()
for _ in range(N):
x,i = map(int, input().split())
A.append([x,i])
s2.add(i)
ID = dict()
cnt_id = 0
for i in sorted(list(s2)):
ID[i]=cnt_id
cnt_id+=1
B = []
for a,b in A:
B.append([a, ID[b]])
B.sort()
check = [0]*cnt_id
S=0
ans=1000000000
for E in range(len(B)):
check[B[E][1]]+=1
while 0 not in check:
ans = min(ans, B[E][0]-B[S][0])
check[B[S][1]]-=1
S+=1
print(ans)
| [
45,
796,
493,
7,
15414,
28955,
198,
32,
796,
17635,
198,
82,
17,
796,
900,
3419,
198,
1640,
4808,
287,
2837,
7,
45,
2599,
198,
197,
87,
11,
72,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
197,
32,
13,
33295,
26933,
87... | 1.822511 | 231 |
# -*- coding: utf-8 -*-
# ********************************************************
# Author and developer: Aleksandr Suvorov
# --------------------------------------------------------
# Licensed: BSD 3-Clause License (see LICENSE for details)
# --------------------------------------------------------
# Url: https://github.com/smartlegion/
# --------------------------------------------------------
# Donate: https://smartlegion.github.io/donate
# --------------------------------------------------------
# Copyright © 2021 Aleksandr Suvorov
# ========================================================
"""Singleton"""
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
41906,
8412,
4557,
198,
2,
6434,
290,
8517,
25,
9300,
591,
46273,
1778,
20867,
709,
198,
2,
20368,
22369,
198,
2,
49962,
25,
347,
10305,
513,
12,
2601,
682,
13789,
... | 4.782609 | 138 |
"""
Behavioral pattern:
Chain of responsibility
"""
from abc import ABC, abstractmethod
# -----------------------------------------------------------------
# -----------------------------------------------------------------
request = [3, 14, 34, 9]
c1 = Client()
c1.delegate(request)
| [
37811,
198,
220,
220,
220,
38483,
3912,
25,
198,
220,
220,
220,
220,
220,
220,
220,
21853,
286,
5798,
198,
37811,
198,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628,
198,
198,
2,
16529,
12,
628,
628,
198,
2,
16529,
12,
628... | 4.205479 | 73 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User, Persona
# Register your models here.
admin.site.register(User, UserAdmin)
admin.site.register(Persona)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
198,
6738,
764,
27530,
1330,
11787,
11,
41581,
628,
198,
2,
17296,
534,
4981,
994,
13,
198,
28482,
13,
1... | 3.47541 | 61 |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 8 16:54:36 2011
@author: ProfMobius
@version: v1.0
"""
import sys
import logging
from optparse import OptionParser
from commands import Commands, CLIENT, SERVER, CalledProcessError
from mcp import recompile_side
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2758,
220,
807,
1467,
25,
4051,
25,
2623,
2813,
198,
198,
31,
9800,
25,
4415,
44702,
3754,
198,
31,
9641,
25,
410,
16,
13,
15,
198,
37... | 2.923077 | 104 |
x = 4
result = 3*x - 2 == 10
print(result)
| [
87,
796,
604,
198,
20274,
796,
513,
9,
87,
532,
362,
6624,
838,
198,
4798,
7,
20274,
8,
198
] | 2.263158 | 19 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from morphine import features
from morphine.feature_extractor import FeatureExtractor
from morphine.basetagger import PartialTagger
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
43948,
1330,
3033,
198,
6738,
43948,
13,
30053,
62,
2302,
40450,
1330,
27018,
11627,
40450,
198,
6738,
4394... | 3.735849 | 53 |
from collections import OrderedDict
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
18980,
13290,
8479,
1436,
628
] | 3.666667 | 39 |
from neon.transforms.cost import Cost
| [
6738,
25988,
13,
7645,
23914,
13,
15805,
1330,
6446,
628,
198
] | 3.636364 | 11 |
# Generated by Django 3.2.6 on 2021-08-25 13:48
from django.db import migrations
import django.db.models.deletion
import paper_uploads.cloudinary.models.fields.image
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
21,
319,
33448,
12,
2919,
12,
1495,
1511,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
198,
11748,... | 3 | 56 |
from nltk.tokenize import word_tokenize
from server.db import MongoClientContext
from operator import itemgetter
from server import server_app
from collections import defaultdict
from itertools import chain
import string
import tempfile
import os
import nltk
import logging
nltk.data.path.append('nltk_data')
class Similarities(object):
"""
Class for text similarities stuff
"""
@staticmethod
def logger():
"""
Scrapper's specific logger instance. Use this to log inside scrappers.
:return: Returns a logging.Logger('openews.scrappers') instance.
"""
return logging.getLogger('openews.language')
@property
def considerable_doc_property(self):
"""
The document property to use for training. this is the actually data we take from the MongoDB documents to
parse and train.
:return: str
"""
return 'title'
@property
def dictionary_file(self):
"""
The filename to use when serializing gensim.corpora.dictionary.Dictionary to disk.
:return: str
"""
return "openews.processors.dict"
@property
def dictionary(self):
"""
The used Dictionary.
:return: gensim.corpora.dictionary.Dictionary
"""
return self._dictionary
@property
def lsi_model(self):
"""
The used LSI model.
:return: gensim.models.lsimodel.LsiModel
"""
return self._lsimodel
@property
def similarity_index(self):
"""
The similarity index instance
:return: gensim.similarities.docsim.MatrixSimilarity
"""
return self._sim_index
@property
def similarity_threshold(self):
"""
The similarity threshold.
Anything above or equals to this value will be considered as similar document.
:return: float
"""
return server_app.config['SIMILARITY_THRESHOLD']
@property
def lsi_index_mapping(self):
"""
A mapping between the LSI model index (key) and the documents (Collection the document is in, document)
:return: dict
"""
return self._lsi_mapping
@staticmethod
def _create_resource_path(resource_file):
"""
Creates a absolute path to resource_file based on the given system's temp directory.
:param resource_file: str
:return: str
"""
return os.path.join(tempfile.gettempdir(), resource_file)
def _resource_exists(self, resource_file):
"""
Checks if resource_file exists in the given system's temp directory.
:param resource_file: str
:return: bool
"""
return os.path.isfile(self._create_resource_path(resource_file))
def _run_transformers(self):
"""
Runs all the transformer methods listed providing the MongoDB client context instance.
"""
with MongoClientContext(self._mongo_connection_record) as client:
self._create_dictionary(client)
self._create_lsi_similarity_index(client)
def _create_dictionary(self, mongo_client):
"""
Creates the gensim Dictionary (gensim.corpora.dictionary.Dictionary) or loads it if it already exists and sets
the object's dictionary property.
:param mongo_client: server.db.MongoClientContext
"""
from gensim.corpora.dictionary import Dictionary
if self._resource_exists(self.dictionary_file):
self.logger().debug(
"Dictionary file found, loading it [%s]" % self._create_resource_path(self.dictionary_file))
self._dictionary = Dictionary.load(self._create_resource_path(self.dictionary_file))
else:
self.logger().debug("Dictionary file not found, creating a new Dictionary file")
self._dictionary = Dictionary()
documents = []
for doc in [di for d in mongo_client.scrappers_collections() for di in d.find()]:
documents.append(self.tokenize_sentence(doc[self.considerable_doc_property]))
self.logger().debug("Adding %d documents to dictionary (will skip existing ones)" % len(documents))
self._dictionary.add_documents(documents)
self._dictionary.save(self._create_resource_path(self.dictionary_file))
def _create_lsi_similarity_index(self, mongo_client):
"""
Creates a Similarity index based on LSI model from the available dictionary. Sets the object's lsi_model and
similarity_index object properties.
"""
from gensim.models import LsiModel
from gensim.similarities import MatrixSimilarity
self._lsi_mapping.clear()
bow_corpus = []
for idx, tp in enumerate([(c, di) for c in mongo_client.scrappers_collections() for di in c.find()]):
self._lsi_mapping[idx] = tp
bow_corpus.append(self.sentence_to_bow(tp[1][self.considerable_doc_property]))
self._lsimodel = LsiModel(bow_corpus, id2word=self.dictionary)
self._sim_index = MatrixSimilarity(self._lsimodel[bow_corpus])
def calculate_similarities(self):
"""
Find / calculate similarities between documents in the index.
Returns a defaultdict with the key as the LSI index and the value is a list of tuples with the following values
(LSI model Index, similarity threshold - numpy.float32)
tuple
:return: defaultdict(list)
"""
similarities = defaultdict(list)
if not self.lsi_index_mapping:
return
for idx, tp in sorted(self.lsi_index_mapping.items(), key=itemgetter(0)):
sentence = tp[1][self.considerable_doc_property]
bow = self.sentence_to_bow(sentence)
latent_space_vector = self.lsi_model[bow]
sim_vector = self.similarity_index[latent_space_vector]
sorted_mapped_vector = list(sorted(enumerate(sim_vector), key=itemgetter(1)))
for sit in [v for v in sorted_mapped_vector if
v[0] != idx and v[1] >= self.similarity_threshold and tp[0].name !=
self.lsi_index_mapping[v[0]][0].name]:
if sit[0] not in similarities:
similarities[idx].append(sit)
for s in similarities.items():
main_sentence = self.lsi_index_mapping[s[0]][1][self.considerable_doc_property]
print("[%s] %s:" % (self.lsi_index_mapping[s[0]][0].name, main_sentence))
for sm in s[1]:
print("\t[%f][%s]: %s" % (sm[1], self._lsi_mapping[sm[0]][0].name,
self.lsi_index_mapping[sm[0]][1][self.considerable_doc_property]))
return similarities
def store_similarities(self, update=False):
"""
Stores the similarities to the database
:param update: True to update existing, False to delete and add new items
"""
with MongoClientContext(self._mongo_connection_record) as client:
pass
def tokenize_sentence(self, sentence):
"""
Tokenize a sentence (see 'tokenized_corpus_sentences' method on what tokenization in this context means).
:param sentence: str
:return: a list
"""
excluded = set(chain(self._stopwords, string.punctuation))
return [w.lower() for w in word_tokenize(sentence) if w.lower() not in excluded]
def sentence_to_bow(self, sentence):
"""
Transforms a string sentence to a VSM bag-of-words representation.
:param sentence: str
:return: list of tuples
"""
return self.dictionary.doc2bow(self.tokenize_sentence(sentence))
| [
6738,
299,
2528,
74,
13,
30001,
1096,
1330,
1573,
62,
30001,
1096,
198,
6738,
4382,
13,
9945,
1330,
42591,
11792,
21947,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
6738,
4382,
1330,
4382,
62,
1324,
198,
6738,
17268,
1330,
4277,
11600... | 2.399567 | 3,236 |
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
| [
6738,
19720,
1330,
32233,
198,
6738,
10214,
420,
382,
13,
16366,
1330,
7308,
11792,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
4479,
198,
6738,
10214,
420,
382,
13,
79,
363,
4559,
1330,
31525,
20900,
198,
6738,
4818,
8079... | 4.032787 | 61 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import copy
import os
from time import perf_counter
import click
import imageio
import torch
import torch.nn.functional as F
import numpy as np
import PIL.Image
import clip
import dnnlib
import legacy
image_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).cuda()
image_std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).cuda()
def bit_conversion_16_to_8(images: torch.Tensor):
'''
Convert 16-bit input images into 8-bit and return the converted images.
'''
converted = images.to(torch.float32) / 256
converted = converted.clamp(0, 255)
return converted
#----------------------------------------------------------------------------
def spherical_dist_loss(x: torch.Tensor, y: torch.Tensor):
'''
Original code by Katherine Crowson, copied from https://github.com/afiaka87/clip-guided-diffusion/blob/main/cgd/losses.py
'''
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--target-image', 'target_fname', help='Target image file to project to', required=False, metavar='FILE', default=None)
@click.option('--target-text', help='Target text to project to', required=False, default=None)
@click.option('--initial-latent', help='Initial latent', default=None)
@click.option('--lr', help='Learning rate', type=float, default=0.3, show_default=True)
@click.option('--num-steps', help='Number of optimization steps', type=int, default=300, show_default=True)
@click.option('--seed', help='Random seed', type=int, default=303, show_default=True)
@click.option('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)
@click.option('--outdir', help='Where to save the output images', required=True, metavar='DIR')
@click.option('--use-cosine-dist', help='Use cosine distance when calculating the loss', type=bool, default=True, show_default=True)
@click.option('--use-spherical-dist', help='Use spherical distance when calculating the loss', type=bool, default=False, show_default=True)
@click.option('--16bit', 'is_16_bit', help='Set to true if the network is trained to output 16-bit images', type=bool, default=False, show_default=True)
@click.option('--use-w-only', help='Project into w space instead of w+ space', type=bool, default=False, show_default=True)
def run_projection(
network_pkl: str,
target_fname: str,
target_text: str,
initial_latent: str,
lr: float,
num_steps: int,
seed: int,
save_video: bool,
outdir: str,
use_cosine_dist: bool,
use_spherical_dist: bool,
is_16_bit: bool,
use_w_only: bool,
):
"""Project given image to the latent space of pretrained network pickle using CLIP.
Examples:
\b
python clip_search.py --outdir=out --target-text='An image of an apple.' \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
"""
# Set seed value
np.random.seed(seed)
torch.manual_seed(seed)
# Load networks.
print(f'Loading networks from {network_pkl}...')
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as fp:
G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
# Load target image.
target_image = None
if target_fname:
target_pil = PIL.Image.open(target_fname).convert('RGB').filter(PIL.ImageFilter.SHARPEN)
w, h = target_pil.size
s = min(w, h)
target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)
target_uint8 = np.array(target_pil, dtype=np.uint8)
target_image = torch.tensor(target_uint8.transpose([2, 0, 1]), device=device)
if target_text:
target_text = clip.tokenize(target_text).to(device)
# target_text = torch.cat([clip.tokenize(target_text)]).to(device)
if initial_latent is not None:
initial_latent = np.load(initial_latent)
initial_latent = initial_latent[initial_latent.files[0]]
# Optimize projection.
start_time = perf_counter()
projected_w_steps = project(
G,
target_image=target_image,
target_text=target_text,
initial_latent=initial_latent,
initial_learning_rate=lr,
num_steps=num_steps,
is_16_bit=is_16_bit,
use_w_only=use_w_only,
use_cosine_dist=use_cosine_dist,
use_spherical_dist=use_spherical_dist,
device=device,
verbose=True
)
print (f'Elapsed: {(perf_counter()-start_time):.1f} s')
# Save final projected frame and W vector.
os.makedirs(outdir, exist_ok=True)
if target_fname:
target_pil.save(f'{outdir}/target.png')
projected_w = projected_w_steps[-1]
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
if is_16_bit:
synth_image = (synth_image.permute(0, 2, 3, 1) * 32767.5 + 32767.5).clamp(0, 65535).to(torch.int32)
synth_image = synth_image[0].cpu().numpy().astype(np.uint16)
mode = 'I;16'
else:
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
mode = 'RGB'
PIL.Image.fromarray(synth_image, mode).save(f'{outdir}/proj.png')
np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())
# Render debug output: optional video and projected image and W vector.
if save_video:
video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')
print (f'Saving optimization progress video "{outdir}/proj.mp4"')
for projected_w in projected_w_steps:
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
if is_16_bit:
synth_image = (synth_image.permute(0, 2, 3, 1) * 32767.5 + 32767.5).clamp(0, 65535)
synth_image = bit_conversion_16_to_8(synth_image)
synth_image = synth_image[0].cpu().numpy().astype(np.uint8)
synth_image = synth_image.repeat(3, axis=-1)
else:
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
if target_fname:
video.append_data(np.concatenate([target_uint8, synth_image], axis=1))
else:
video.append_data(synth_image)
video.close()
#----------------------------------------------------------------------------
if __name__ == "__main__":
run_projection() # pylint: disable=no-value-for-parameter
#---------------------------------------------------------------------------- | [
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
15127,
23929,
44680,
6234,
290,
663,
8240,
669,
12377,
477,
9028,
3119,
198,
2,
290,
20622,
2489,
287,
290,
284,
428,
3788,
... | 2.452449 | 3,165 |
# load every working model from the model collection into workspace
import sys
import os
import importlib
import pandas as pd
import numpy as np
import libsbml
from C import DIR_MODELS_REGROUPED, DIR_MODELS
def get_submodel(submodel_path: str,
model_info: pd.DataFrame):
"""
This function load an amici model module, if the (relative) path to the
folder with this AMICI model module is provided.
It extracts the respective sbml file from the list and returns it alongside
with the model, if any postprecessing of the AMICI results is necessary
"""
# load the amici model
# add model path
amici_model_path = os.path.join(DIR_MODELS, submodel_path)
if os.path.abspath(amici_model_path) not in sys.path:
sys.path.insert(0, os.path.abspath(amici_model_path))
# import the module, get the model
amici_model_name = amici_model_path.split('/')[-1]
amici_model_module = importlib.import_module(amici_model_name)
amici_model = amici_model_module.getModel()
# get information about the model from the tsv table
if 'amici_path_final' in model_info.keys():
model_row = model_info.loc[model_info['amici_path_final'] == submodel_path]
else:
model_row = model_info.loc[model_info['amici_path'] == submodel_path]
id = int(model_row.index.values)
# get the timepoints according to the model info dataframe
amici_model.setTimepoints(np.linspace(
float(model_row.loc[id, 'start_time']),
float(model_row.loc[id, 'end_time']),
int(model_row.loc[id, 'n_timepoints'])
))
# import the sbml model
sbml_path = os.path.join(DIR_MODELS, model_row.loc[id, 'regrouped_path'])
sbml_model = (libsbml.readSBML(sbml_path)).getModel()
return amici_model, sbml_model
def get_submodel_list(model_name: str,
model_info: pd.DataFrame):
"""
This function loads a list of amici model modules, which all belong to the
same benchmark model, if a string with the id of the benchmark model id is
provided.
It also extracts the respective sbml files from the list and returns them
with the models, if any postprecessing of the AMICI results is necessary
"""
# get information about the model from the tsv table
model_rows = model_info.loc[model_info['short_id'] == model_name]
# only take accepted models
model_rows = model_rows[model_rows['accepted']]
submodel_paths = [path for path in model_rows['amici_path_final']]
# collect the submodels
sbml_model_list = []
amici_model_list = []
for submodel_path in submodel_paths:
amici_model, sbml_model = get_submodel(submodel_path, model_info)
sbml_model_list.append(sbml_model)
amici_model_list.append(amici_model)
return amici_model_list, sbml_model_list
def get_submodel_copasi(submodel_path: str,
model_info: pd.DataFrame):
"""
This function loads a copasi file, if the (relative) path to the folder
with this Copasi model is provided.
It extracts the respective sbml file from the list and returns it alongside
with the model, if any postprecessing of the Copasi results is necessary
"""
# load the amici model
if str(submodel_path) in ('', 'nan', 'NaN'):
return None, None
copasi_file = os.path.join(DIR_MODELS, submodel_path)
# if the amici import did not work, we don't want to consider this model
if 'amici_path_final' in model_info.keys():
model_row = model_info.loc[model_info['copasi_path_final'] == submodel_path]
elif 'amici_path' in model_info.keys():
model_row = model_info.loc[model_info['copasi_path'] == submodel_path]
else:
return None, None
id = int(model_row.index.values)
# import the sbml model
sbml_path = os.path.join(DIR_MODELS, model_row.loc[id, 'regrouped_path'])
sbml_model = (libsbml.readSBML(sbml_path)).getModel()
return copasi_file, sbml_model
def get_submodel_list_copasi(model_name: str,
model_info: pd.DataFrame):
"""
This function loads a list of Copasi model files, which all belong to the
same benchmark model, if a string with the id of the benchmark model id is
provided.
It also extracts the respective sbml files from the list and returns them
with the models, if any postprecessing of the Copasi results is necessary
"""
# get information about the model from the tsv table
model_rows = model_info.loc[model_info['short_id'] == model_name]
# only take accepted models
model_rows = model_rows[model_rows['accepted']]
submodel_paths = [path for path in model_rows['copasi_path_final']]
# collect the submodels
copasi_file_list = []
sbml_model_list = []
for submodel_path in submodel_paths:
copasi_file, sbml_model = get_submodel_copasi(submodel_path, model_info)
if copasi_file is not None:
copasi_file_list.append(copasi_file)
sbml_model_list.append(sbml_model)
return copasi_file_list, sbml_model_list
| [
2,
3440,
790,
1762,
2746,
422,
262,
2746,
4947,
656,
44573,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1330,
8019,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
9195,
36299,
4029,... | 2.589069 | 1,976 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.ml import feature, Pipeline
from pyspark import keyword_only, SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.ml.linalg import _convert_to_vector
from pyspark.ml.param.shared import *
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaTransformer, _jvm
from pyspark.ml.common import inherit_doc
__all__ = ['LogTransformFeaturizer', 'PowerTransformFeaturizer',
'MathFeaturizer', 'DayOfWeekFeaturizer', 'HourOfDayFeaturizer',
'MonthOfYearFeaturizer', 'PartsOfDayFeaturizer',
'AdditionFeaturizer', 'SubtractionFeaturizer',
'MultiplicationFeaturizer', 'DivisionFeaturizer',
'GroupByFeaturizer']
@inherit_doc
class LogTransformFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Perform Log Transformation on column.
"""
logType = Param(Params._dummy(), "logType", "log type to be used. " +
"Options are 'natural' (natural log), " +
"'log10' (log base 10), or 'log2' (log base 2).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, logType="natural"):
"""
__init__(self, inputCol=None, outputCol=None, logType="natural")
"""
super(LogTransformFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.numeric.LogTransformFeaturizer",
self.uid)
self._setDefault(logType="natural")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, logType="natural"):
"""
setParams(self, inputCol=None, outputCol=None, logType="natural")
Sets params for this LogTransformFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setLogType(self, value):
"""
Sets the value of :py:attr:`logType`.
"""
return self._set(logType=value)
def getLogType(self):
"""
Gets the value of logType or its default value.
"""
return self.getOrDefault(self.logType)
@inherit_doc
class PowerTransformFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Perform Power Transformation on column.
"""
powerType = Param(Params._dummy(), "powerType", "power type to be used. " +
"Any integer greater than 0. Default is power of 2",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, powerType=2):
"""
__init__(self, inputCol=None, outputCol=None, powerType=2)
"""
super(PowerTransformFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.numeric.PowerTransformFeaturizer",
self.uid)
self._setDefault(powerType=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, powerType=2):
"""
setParams(self, inputCol=None, outputCol=None, powerType=2)
Sets params for this PowerTransformFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setPowerType(self, value):
"""
Sets the value of :py:attr:`powerType`.
"""
return self._set(powerType=value)
def getPowerType(self):
"""
Gets the value of powerType or its default value.
"""
return self.getOrDefault(self.powerType)
@inherit_doc
class MathFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Perform Math Function Transformation on column.
"""
mathFunction = Param(Params._dummy(), "mathFunction", "math function to be used. " +
"Default is sqrt",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, mathFunction="sqrt"):
"""
__init__(self, inputCol=None, outputCol=None, mathFunction="sqrt")
"""
super(MathFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.numeric.MathFeaturizer",
self.uid)
self._setDefault(mathFunction="sqrt")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, mathFunction="sqrt"):
"""
setParams(self, inputCol=None, outputCol=None, mathFunction="sqrt")
Sets params for this MathFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setMathFunction(self, value):
"""
Sets the value of :py:attr:`mathFunction`.
"""
return self._set(mathFunction=value)
def getMathFunction(self):
"""
Gets the value of mathFunction or its default value.
"""
return self.getOrDefault(self.mathFunction)
@inherit_doc
class DayOfWeekFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Convert date time to day of week.
"""
format = Param(Params._dummy(), "format", "specify timestamp pattern. ",
typeConverter=TypeConverters.toString)
timezone = Param(Params._dummy(), "timezone", "specify timezone. ",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC"):
"""
__init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC")
"""
super(DayOfWeekFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.temporal.DayOfWeekFeaturizer",
self.uid)
self._setDefault(format="yyyy-MM-dd", timezone="UTC")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC"):
"""
setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC")
Sets params for this DayOfWeekFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setFormat(self, value):
"""
Sets the value of :py:attr:`format`.
"""
return self._set(format=value)
def getFormat(self):
"""
Gets the value of format or its default value.
"""
return self.getOrDefault(self.format)
def setTimezone(self, value):
"""
Sets the value of :py:attr:`timezone`.
"""
return self._set(timezone=value)
def getTimezone(self):
"""
Gets the value of timezone or its default value.
"""
return self.getOrDefault(self.timezone)
@inherit_doc
class HourOfDayFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Convert date time to hour of day.
"""
format = Param(Params._dummy(), "format", "specify timestamp pattern. ",
typeConverter=TypeConverters.toString)
timezone = Param(Params._dummy(), "timezone", "specify timezone. ",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC"):
"""
__init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC")
"""
super(HourOfDayFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.temporal.HourOfDayFeaturizer",
self.uid)
self._setDefault(format="yyyy-MM-dd HH:mm:ss", timezone="UTC")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC"):
"""
setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC")
Sets params for this HourOfDayFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setFormat(self, value):
"""
Sets the value of :py:attr:`format`.
"""
return self._set(format=value)
def getFormat(self):
"""
Gets the value of format or its default value.
"""
return self.getOrDefault(self.format)
def setTimezone(self, value):
"""
Sets the value of :py:attr:`timezone`.
"""
return self._set(timezone=value)
def getTimezone(self):
"""
Gets the value of timezone or its default value.
"""
return self.getOrDefault(self.timezone)
@inherit_doc
class MonthOfYearFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Convert date time to month of year.
"""
format = Param(Params._dummy(), "format", "specify timestamp pattern. ",
typeConverter=TypeConverters.toString)
timezone = Param(Params._dummy(), "timezone", "specify timezone. ",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC"):
"""
__init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC")
"""
super(MonthOfYearFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.temporal.MonthOfYearFeaturizer",
self.uid)
self._setDefault(format="yyyy-MM-dd", timezone="UTC")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC"):
"""
setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC")
Sets params for this MonthOfYearFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setFormat(self, value):
"""
Sets the value of :py:attr:`format`.
"""
return self._set(format=value)
def getFormat(self):
"""
Gets the value of format or its default value.
"""
return self.getOrDefault(self.format)
def setTimezone(self, value):
"""
Sets the value of :py:attr:`timezone`.
"""
return self._set(timezone=value)
def getTimezone(self):
"""
Gets the value of timezone or its default value.
"""
return self.getOrDefault(self.timezone)
@inherit_doc
class PartsOfDayFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Convert date time to parts of day.
"""
format = Param(Params._dummy(), "format", "specify timestamp pattern. ",
typeConverter=TypeConverters.toString)
timezone = Param(Params._dummy(), "timezone", "specify timezone. ",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC"):
"""
__init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC")
"""
super(PartsOfDayFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.temporal.PartsOfDayFeaturizer",
self.uid)
self._setDefault(format="yyyy-MM-dd HH:mm:ss", timezone="UTC")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC"):
"""
setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd HH:mm:ss", timezone="UTC")
Sets params for this PartsOfDayFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setFormat(self, value):
"""
Sets the value of :py:attr:`format`.
"""
return self._set(format=value)
def getFormat(self):
"""
Gets the value of format or its default value.
"""
return self.getOrDefault(self.format)
def setTimezone(self, value):
"""
Sets the value of :py:attr:`timezone`.
"""
return self._set(timezone=value)
def getTimezone(self):
"""
Gets the value of timezone or its default value.
"""
return self.getOrDefault(self.timezone)
@inherit_doc
class WeekendFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Check date time to see if it is on weekend or not.
"""
format = Param(Params._dummy(), "format", "specify timestamp pattern. ",
typeConverter=TypeConverters.toString)
timezone = Param(Params._dummy(), "timezone", "specify timezone. ",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC"):
"""
__init__(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC")
"""
super(WeekendFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.unary.temporal.WeekendFeaturizer",
self.uid)
self._setDefault(format="yyyy-MM-dd", timezone="UTC")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC"):
"""
setParams(self, inputCol=None, outputCol=None, format="yyyy-MM-dd", timezone="UTC")
Sets params for this WeekendFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setFormat(self, value):
"""
Sets the value of :py:attr:`format`.
"""
return self._set(format=value)
def getFormat(self):
"""
Gets the value of format or its default value.
"""
return self.getOrDefault(self.format)
def setTimezone(self, value):
"""
Sets the value of :py:attr:`timezone`.
"""
return self._set(timezone=value)
def getTimezone(self):
"""
Gets the value of timezone or its default value.
"""
return self.getOrDefault(self.timezone)
@inherit_doc
class AdditionFeaturizer(JavaTransformer, HasInputCols, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Add two numeric columns.
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(AdditionFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.binary.numeric.AdditionFeaturizer",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this AdditionFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class SubtractionFeaturizer(JavaTransformer, HasInputCols, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Subtract two numeric columns.
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(SubtractionFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.binary.numeric.SubtractionFeaturizer",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this SubtractionFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MultiplicationFeaturizer(JavaTransformer, HasInputCols, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Multiply two numeric columns.
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(MultiplicationFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.binary.numeric.MultiplicationFeaturizer",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this MultiplicationFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class DivisionFeaturizer(JavaTransformer, HasInputCols, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Divide two numeric columns.
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(DivisionFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.binary.numeric.DivisionFeaturizer",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this DivisionFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class GroupByFeaturizer(JavaTransformer, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Perform Group By Transformation.
"""
aggregateType = Param(Params._dummy(), "aggregateType", "aggregate type to be used. " +
"Default is count",
typeConverter=TypeConverters.toString)
aggregateCol = Param(Params._dummy(), "aggregateCol", "aggregate column to be used. ",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, aggregateType="count", aggregateCol=None):
"""
__init__(self, inputCol=None, outputCol=None, aggregateType="count", aggregateCol=None)
"""
super(GroupByFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.group.GroupByFeaturizer",
self.uid)
self._setDefault(aggregateType="count")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None, aggregateType="count", aggregateCol=None):
"""
setParams(self, inputCol=None, outputCol=None, aggregateType="count", aggregateCol=None)
Sets params for this GroupByFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setAggregateType(self, value):
"""
Sets the value of :py:attr:`aggregateType`.
"""
return self._set(aggregateType=value)
def getAggregateType(self):
"""
Gets the value of aggregateType or its default value.
"""
return self.getOrDefault(self.aggregateType)
def setAggregateCol(self, value):
"""
Sets the value of :py:attr:`aggregateCol`.
"""
return self._set(aggregateCol=value)
def getAggregateCol(self):
"""
Gets the value of aggregateCol or its default value.
"""
return self.getOrDefault(self.aggregateCol)
@inherit_doc
class GeohashFeaturizer(JavaTransformer, HasInputCols, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Perform Geohash Transformation of latitude and longitude
"""
precision = Param(Params._dummy(), "precision", "Precision level to be used. " +
"Default precision level is 5",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, inputCols=None, outputCol=None, precision=5):
"""
__init__(self, inputCols=None, outputCol=None, precision=5)
"""
super(GeohashFeaturizer, self).__init__()
self._java_obj = self._new_java_obj("com.adobe.platform.ml.feature.geo.GeohashFeaturizer",
self.uid)
self._setDefault(precision=5)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCol=None, precision=5):
"""
setParams(self, inputCols=None, outputCol=None, precision=5)
Sets params for this GeohashFeaturizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setPrecision(self, value):
"""
Sets the value of :py:attr:`precision`.
"""
return self._set(precision=value)
def getPrecision(self):
"""
Gets the value of precision or its default value.
"""
return self.getOrDefault(self.precision)
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
... | 2.235367 | 10,541 |
from ..algorithm.parameters import params
from ..fitness.base_ff_classes.base_ff import base_ff
import editdistance # https://pypi.python.org/pypi/editdistance
import lzstring # https://pypi.python.org/pypi/lzstring/
import dtw # https://pypi.python.org/pypi/dtw
"""
This fitness function is for a sequence-match problem: we're given
an integer sequence target, say [0, 5, 0, 5, 0, 5], and we try to synthesize a
program (loops, if-statements, etc) which will *yield* that sequence,
one item at a time.
There are several components of the fitness:
1. concerning the program:
i. length of the program (shorter is better)
ii. compressibility of the program (non-compressible, ie DRY, is better)
2. concerning distance from the target:
i. dynamic time warping distance from the program's output to the target
(lower is better).
ii. Levenshtein distance from the program's output to the target
(lower is better).
"""
# available for use in synthesized programs
def succ(n, maxv=6):
"""
Available for use in synthesized programs.
:param n:
:param maxv:
:return:
"""
return min(n+1, maxv)
def pred(n, minv=0):
"""
Available for use in synthesized programs.
:param n:
:param minv:
:return:
"""
return max(n-1, minv)
def truncate(n, g):
"""
the program will yield one item at a time, potentially forever. We only
up to n items.
:param n:
:param g:
:return:
"""
for i in range(n):
yield next(g)
def dist(t0, x0):
"""
numerical difference, used as a component in DTW.
:param t0:
:param x0:
:return:
"""
return abs(t0 - x0)
def dtw_dist(s, t):
"""
Dynamic time warping distance between two sequences.
:param s:
:param t:
:return:
"""
s = list(map(int, s))
t = list(map(int, t))
d, M, C, path = dtw.dtw(s, t, dist)
return d
def lev_dist(s, t):
"""
Levenshtein distance between two sequences, normalised by length of the
target -- hence this is *asymmetric*, not really a distance. Don't
normalise by length of the longer, because it would encourage evolution
to create longer and longer sequences.
:param s:
:param t:
:return:
"""
return editdistance.eval(s, t) / len(s)
def compress(s):
"""
Convert to a string and compress. lzstring is a special-purpose compressor,
more suitable for short strings than typical compressors.
:param s:
:return:
"""
s = ''.join(map(str, s))
return lzstring.LZString().compress(s)
def compressibility(s):
"""
Compressability is in [0, 1]. It's high when the compressed string
is much shorter than the original.
:param s:
:return:
"""
return 1 - len(compress(s)) / len(s)
def proglen(s):
"""
Program length is measured in characters, but in order to keep the values
in a similar range to that of compressibility, DTW and Levenshtein, we
divide by 100. This is a bit arbitrary.
:param s: A string of a program phenotype.
:return: The length of the program divided by 100.
"""
return len(s) / 100.0
if __name__ == "__main__":
# TODO write some tests here
pass
| [
6738,
11485,
282,
42289,
13,
17143,
7307,
1330,
42287,
198,
6738,
11485,
69,
3659,
13,
8692,
62,
487,
62,
37724,
13,
8692,
62,
487,
1330,
2779,
62,
487,
198,
198,
11748,
4370,
30246,
220,
1303,
3740,
1378,
79,
4464,
72,
13,
29412,
1... | 2.568234 | 1,297 |
from enum import Enum
from math import radians, cos, sin
THETAS = []
for theta in range(1080):
theta = radians(float(theta/3))
xd_d = float(cos(theta))
yd_d = float(sin(theta))
THETAS.append((xd_d,yd_d))
LINES = {
"top-left" : 201,
"bottom-right" : 188,
"top-right" : 187,
"left-right" : 186,
"bottom-left" : 200,
"top-bottom" : 205
}
SETTINGS = [
{
"name" : "Control Scheme",
"yval" : 3,
"sel" : 0,
"desc" : "INPUT_SEL"
},
{
"name" : "Font",
"yval" : 12,
"sel" : 0,
"desc" : "FONT_SEL"
},
{
"name" : "Continue Playing [Esc]",
"yval" : 15,
"sel" : 0,
"desc" : "NO_SEL"
},
{
"name" : "Save and Quit",
"yval" : 17,
"sel" : 0,
"desc" : "NO_SEL"
}
]
INPUT_SEL = [
"789 REST: [5],[.] \n"\
"4@6 JUMP: [F] \n"\
"123",
"QWE REST: [5],[.] \n"\
"A@D JUMP: [F] \n"\
"ZXC",
"YKU REST: [5],[.] \n"\
"H@L JUMP: [F] \n"\
"BJN",
]
INPUT_CON = [
"Control Scheme: [C]\n"\
"Reset Game: [R]\n"\
"Quit Game: [ESC]\n"
]
INPUT_SEL_NAME = ["standard numpad", "laptop \"numpad\"", "vi-keys"]
walldraw = []
for x in range(0,16):
walldraw.append(x+256)
pitdraw = []
for x in range(0,8):
pitdraw.append(x+288)
FONT_FILE = ["uc-tiles-16x16.png"]
TRAPS = {
0 : {"name" : "Just the Pits"},
1 : {"name" : "Slip'n'Slide"},
2 : {"name" : "Fling Back"},
3 : {"name" : "Oh No!"}
}
TERRAIN = {
"wall": {
"block_m" : True,
"block_j" : True,
"block_s" : True,
"char" : 178,
"fg" : "wall-fg",
"bg" : "wall-bg",
"type" : "wall",
},
"solidwall": {
"block_m" : True,
"block_j" : True,
"block_s" : True,
"char" : 256,
"fg" : "wall-fg",
"bg" : "wall-bg",
"type" : "solidwall",
},
"floor" : {
"block_m" : False,
"block_j" : False,
"block_s" : False,
"char" : 273,
"fg" : "floor-fg",
"bg" : "floor-bg",
"type" : "floor",
},
"door" : {
"block_m" : False,
"block_j" : False,
"block_s" : False,
"char" : 273,
"fg" : "floor-fg",
"bg" : "floor-bg",
"type" : "door",
},
"stairs" : {
"block_m" : False,
"block_j" : False,
"block_s" : False,
"char" : 273,
"fg" : "stairs-fg",
"bg" : "stairs-bg",
"type" : "floor",
},
"pit" : {
"block_m" : True,
"block_j" : False,
"block_s" : False,
"char" : 352,
"fg" : "pit-fg",
"bg" : "pit-bg",
"type" : "pit",
}
} | [
6738,
33829,
1330,
2039,
388,
201,
198,
6738,
10688,
1330,
2511,
1547,
11,
8615,
11,
7813,
201,
198,
201,
198,
4221,
2767,
1921,
796,
17635,
201,
198,
201,
198,
1640,
262,
8326,
287,
2837,
7,
24045,
2599,
201,
198,
197,
1169,
8326,
... | 1.735983 | 1,409 |
# Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import objects
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages.payloads import base
class SetAttributeRequestPayload(base.RequestPayload):
"""
A request payload for the SetAttribute operation.
Attributes:
unique_identifier: The unique ID of the object on which attribute
deletion should be performed.
new_attribute: The attribute to set on the specified object.
"""
def __init__(self,
unique_identifier=None,
new_attribute=None):
"""
Construct a SetAttribute request payload.
Args:
unique_identifier (string): The unique ID of the object on which
the attribute should be set. Optional, defaults to
None.
new_attribute (struct): A NewAttribute object containing the new
attribute value to set on the specified object. Optional,
defaults to None. Required for read/write.
"""
super(SetAttributeRequestPayload, self).__init__()
self._unique_identifier = None
self._new_attribute = None
self.unique_identifier = unique_identifier
self.new_attribute = new_attribute
@property
@unique_identifier.setter
@property
@new_attribute.setter
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data encoding the SetAttribute request payload and decode
it into its constituent part.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
VersionNotSupported: Raised when a KMIP version is provided that
does not support the SetAttribute operation.
InvalidKmipEncoding: Raised if fields are missing from the
encoding.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the SetAttribute operation.".format(
kmip_version.value
)
)
super(SetAttributeRequestPayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
self._unique_identifier = None
if self.is_tag_next(enums.Tags.NEW_ATTRIBUTE, local_buffer):
self._new_attribute = objects.NewAttribute()
self._new_attribute.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SetAttribute request payload encoding is missing the new "
"attribute field."
)
self.is_oversized(local_buffer)
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Write the data encoding the SetAttribute request payload to a
stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
VersionNotSupported: Raised when a KMIP version is provided that
does not support the SetAttribute operation.
InvalidField: Raised if a required field is missing from the
payload object.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the SetAttribute operation.".format(
kmip_version.value
)
)
local_buffer = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
if self._new_attribute:
self._new_attribute.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SetAttribute request payload is missing the new "
"attribute field."
)
self.length = local_buffer.length()
super(SetAttributeRequestPayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer)
class SetAttributeResponsePayload(base.ResponsePayload):
"""
A response payload for the SetAttribute operation.
Attributes:
unique_identifier: The unique ID of the object on which the attribute
was set.
"""
def __init__(self, unique_identifier=None):
"""
Construct a SetAttribute response payload.
Args:
unique_identifier (string): The unique ID of the object on
which the attribute was set. Defaults to None. Required for
read/write.
"""
super(SetAttributeResponsePayload, self).__init__()
self._unique_identifier = None
self.unique_identifier = unique_identifier
@property
@unique_identifier.setter
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data encoding the SetAttribute response payload and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
VersionNotSupported: Raised when a KMIP version is provided that
does not support the SetAttribute operation.
InvalidKmipEncoding: Raised if any required fields are missing
from the encoding.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the SetAttribute operation.".format(
kmip_version.value
)
)
super(SetAttributeResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SetAttribute response payload encoding is missing the "
"unique identifier field."
)
self.is_oversized(local_buffer)
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Write the data encoding the SetAttribute response payload to a
buffer.
Args:
output_buffer (buffer): A data buffer in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
VersionNotSupported: Raised when a KMIP version is provided that
does not support the SetAttribute operation.
InvalidField: Raised if a required field is missing from the
payload object.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the SetAttribute operation.".format(
kmip_version.value
)
)
local_buffer = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SetAttribute response payload is missing the unique "
"identifier field."
)
self.length = local_buffer.length()
super(SetAttributeResponsePayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer)
| [
2,
15069,
357,
66,
8,
13130,
383,
25824,
21183,
2059,
14,
4677,
18511,
23123,
18643,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
... | 2.259428 | 4,614 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-20 03:39
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
16,
319,
2177,
12,
2713,
12,
1238,
7643,
25,
2670,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
import os
import shutil
import uuid
from os.path import join, splitext
from typing import List
import mobi
import uvicorn
from fastapi import FastAPI, UploadFile, File, Form, HTTPException
import iscc
from iscc_cli.tika import detector, parser
from iscc_cli.const import SUPPORTED_MIME_TYPES, GMT
import iscc_service
from iscc_service.config import ALLOWED_ORIGINS, ISCC_STREAM
from iscc_service.conn import get_client
from iscc_service.models import (
Metadata,
Text,
ISCC,
MetaID,
ContentID,
DataID,
InstanceID,
StreamItem,
)
from iscc_service.tools import (
code_to_bits,
code_to_int,
stream_filter,
add_placeholders,
)
from pydantic import HttpUrl
from iscc_cli.lib import iscc_from_url
from iscc_cli.utils import iscc_split, get_title, mime_to_gmt, iscc_verify
from iscc_cli import APP_DIR, audio_id, video_id
from starlette.middleware.cors import CORSMiddleware
from starlette.status import (
HTTP_415_UNSUPPORTED_MEDIA_TYPE,
HTTP_422_UNPROCESSABLE_ENTITY,
HTTP_503_SERVICE_UNAVAILABLE,
HTTP_400_BAD_REQUEST,
)
app = FastAPI(
title="ISCC Web Service API",
version=iscc_service.__version__,
description="Microservice for creating ISCC Codes from Media Files.",
docs_url="/",
)
app.add_middleware(
CORSMiddleware,
allow_origins=ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post(
"/generate/from_file",
response_model=ISCC,
response_model_exclude_unset=True,
tags=["generate"],
summary="Generate ISCC from File",
)
def from_file(
file: UploadFile = File(...), title: str = Form(""), extra: str = Form("")
):
"""Generate Full ISCC Code from Media File with optional explicit metadata."""
media_type = detector.from_buffer(file.file)
if media_type not in SUPPORTED_MIME_TYPES:
raise HTTPException(
HTTP_415_UNSUPPORTED_MEDIA_TYPE,
"Unsupported media type '{}'. Please request support at "
"https://github.com/iscc/iscc-service/issues.".format(media_type),
)
if media_type == "application/x-mobipocket-ebook":
file.file.seek(0)
tempdir, filepath = mobi.extract(file.file)
tika_result = parser.from_file(filepath)
shutil.rmtree(tempdir)
else:
file.file.seek(0)
tika_result = parser.from_buffer(file.file)
if not title:
title = get_title(tika_result, guess=True)
mid, norm_title, norm_extra = iscc.meta_id(title, extra)
gmt = mime_to_gmt(media_type)
if gmt == GMT.IMAGE:
file.file.seek(0)
cid = iscc.content_id_image(file.file)
elif gmt == GMT.TEXT:
text = tika_result["content"]
if not text:
raise HTTPException(HTTP_422_UNPROCESSABLE_ENTITY, "Could not extract text")
cid = iscc.content_id_text(tika_result["content"])
elif gmt == GMT.AUDIO:
file.file.seek(0)
features = audio_id.get_chroma_vector(file.file)
cid = audio_id.content_id_audio(features)
elif gmt == GMT.VIDEO:
file.file.seek(0)
_, ext = splitext(file.filename)
fn = "{}{}".format(uuid.uuid4(), ext)
tmp_path = join(APP_DIR, fn)
with open(tmp_path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
features = video_id.get_frame_vectors(tmp_path)
cid = video_id.content_id_video(features)
os.remove(tmp_path)
file.file.seek(0)
did = iscc.data_id(file.file)
file.file.seek(0)
iid, tophash = iscc.instance_id(file.file)
if not norm_title:
iscc_code = "-".join((cid, did, iid))
else:
iscc_code = "-".join((mid, cid, did, iid))
components = iscc_split(iscc_code)
result = dict(
iscc=iscc_code,
tophash=tophash,
gmt=gmt,
bits=[code_to_bits(c) for c in components],
)
if norm_title:
result["title"] = title
result["title_trimmed"] = norm_title
if norm_extra:
result["extra"] = extra
result["extra_trimmed"] = norm_extra
file.file.close()
return result
@app.post(
"/generate/from_url",
response_model=ISCC,
tags=["generate"],
summary="Generate ISCC from URL",
)
def from_url(url: HttpUrl):
"""Generate Full ISCC from URL."""
result = iscc_from_url(url, guess=True)
result["title"] = result.pop("norm_title")
result["title_trimmed"] = result["title"]
components = iscc_split(result["iscc"])
result["bits"] = [code_to_bits(c) for c in components]
return result
@app.post(
"/generate/meta_id/",
response_model=MetaID,
response_model_exclude_unset=True,
tags=["generate"],
summary="Generate ISCC Meta-ID",
)
def meta_id(meta: Metadata):
"""Generate MetaID from 'title' and optional 'extra' metadata"""
extra = meta.extra or ""
mid, title_trimmed, extra_trimmed = iscc.meta_id(meta.title, extra)
result = {
"code": mid,
"bits": code_to_bits(mid),
"ident": code_to_int(mid),
"title": meta.title,
"title_trimmed": title_trimmed,
}
if extra:
result["extra"] = extra
result["extra_trimmed"] = extra_trimmed
return result
@app.post(
"/generate/content_id_text",
response_model=ContentID,
tags=["generate"],
summary="Generate ISCC Content-ID-Text",
)
def content_id_text(text: Text):
"""Generate ContentID-Text from 'text'"""
cid_t = iscc.content_id_text(text.text)
return {
"gmt": "text",
"bits": code_to_bits(cid_t),
"code": cid_t,
"ident": code_to_int(cid_t),
}
@app.post(
"/generate/data_id",
response_model=DataID,
tags=["generate"],
summary="Generate ISCC Data-ID",
)
def data_id(file: UploadFile = File(...)):
"""Generate Data-ID from raw binary data"""
did = iscc.data_id(file.file)
return {"code": did, "bits": code_to_bits(did), "ident": code_to_int(did)}
@app.post(
"/generate/instance_id",
response_model=InstanceID,
tags=["generate"],
summary="Generate ISCC Instance-ID",
)
def instance_id(file: UploadFile = File(...)):
"""Generate Instance-ID from raw binary data"""
iid, tophash = iscc.instance_id(file.file)
return {
"code": iid,
"bits": code_to_bits(iid),
"ident": code_to_int(iid),
"tophash": tophash,
}
@app.post(
"/generate/data_instance_id",
tags=["generate"],
summary="Generate ISCC Data-ID and Instance-ID",
)
def data_and_instance_id(file: UploadFile = File(...,)):
"""Generate Data-ID and Instance-ID from raw binary data"""
did = iscc.data_id(file.file)
file.file.seek(0)
iid, tophash = iscc.instance_id(file.file)
return {
"data_id": {"code": did, "bits": code_to_bits(did), "ident": code_to_int(did),},
"instance_id": {
"code": iid,
"bits": code_to_bits(iid),
"ident": code_to_int(iid),
"tophash": tophash,
},
}
@app.get(
"/lookup",
response_model=List[StreamItem],
tags=["lookup"],
summary="Lookup ISCC Codes",
)
def lookup(iscc: str):
"""Lookup an ISCC Code"""
client = get_client()
if client is None:
raise HTTPException(
HTTP_503_SERVICE_UNAVAILABLE, "ISCC lookup service not available"
)
try:
iscc_verify(iscc)
except ValueError as e:
raise HTTPException(HTTP_400_BAD_REQUEST, str(e))
components = iscc_split(iscc)
results = []
seen = set()
for component in components:
response = client.liststreamkeyitems(ISCC_STREAM, component, True, 100, 0, True)
for result in response:
txid = result.get("txid")
if txid is None or txid in seen:
continue
results.append(result)
seen.add(txid)
result = stream_filter.search(results)
cleaned = []
for entry in result:
keys = entry["keys"]
# Better be conservative until we have a similarity based index.
# So for now we only match if at least two components are identical.
matches = set(keys).intersection(set(components))
if not len(matches) >= 2:
continue
keys = add_placeholders(keys)
entry["bits"] = [code_to_bits(c) for c in keys]
while len(entry["bits"]) < 4:
entry["bits"].append("0" * 64)
cleaned.append(entry)
return cleaned
if __name__ == "__main__":
uvicorn.run("iscc_service.main:app", host="127.0.0.1", port=8000, reload=True)
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
334,
27112,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
4328,
578,
742,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
7251,
72,
198,
11748,
334,
25531,
1211,
198,
6738,
3049,
15042,
1330... | 2.266386 | 3,799 |
Modpack_name = "My modpack" #appears in the about dialog
Modpack_author = "My name" #appears in the about dialog
Modpack_url = "http://example.com/" #link to this appears in the about dialog
Modpack_license_name = "Creative Commons 0 license" #"Your modpack is licensed under the --"
Modpack_license_url = "https://creativecommons.org/share-your-work/public-domain/cc0/" #license name links to this
Launcher_title = "Modpack Launcher" #name of launcher window
Updater_title = "Modpack Installer" #name of updater window
Launcher_folder_path = ".mymodpack" #this is relative to the user/home folder
Forge_version = "1.12.2-14.23.5.2855" #exact forge version names can be found on the forge website
Forge_version_name = "1.12.2-forge-14.23.5.2855" #the folder forge generates in the minecraft/versions folder, as well as the name it shows in the vanilla minecraft launcher. Find this one out yourself
Is_below_1_13 = 1 #set to 1 if running 1.12.2. the launcher will ONLY work with latest forge 1.12.2 releases
Min_Mem = "2560" #suggested memory allocation in the launcher
Source_URL = "https://pepfof.com/minecraft/" #url of the distributor folder
Server_Autoconnect = 1 #whether to autoconnect on launch to:
Server_IP = "your server ip" #the server on this ip
Server_port = "25565" #at this port
| [
198,
5841,
8002,
62,
3672,
796,
366,
3666,
953,
8002,
1,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
1324,
4127,
287,
262,
546,
17310,
198,
5841,
8002,
62,
9800,
796,
366,
3666,
1438,... | 2.15621 | 781 |
import os
import six
import shutil
import yaml
import click
import pickle
import pandas as pd
from pyfiglet import figlet_format, Figlet
from prettytable import PrettyTable
from cognito.table import Table
from datetime import datetime
from tqdm import tqdm, trange
def save_to(path, df, encoder):
"""
Save the encoded dataframe to csv file and
picle file.
:param path: The path
:type path: { type_description }
:param df: { parameter_description }
:type df: { type_description }
:param encoder: The encoder
:type encoder: { type_description }
"""
filename = os.path.basename(path)
if '.' in filename:
fname, ext = filename.split('.')
else:
fname = filename
path = os.path.dirname(path)
save_path = os.path.join(path, fname)
# make directory
try:
os.mkdir(save_path)
#filenames
pkl_file = os.path.join(save_path, 'encoder.pkl')
df_file = os.path.join(save_path, filename)
df.to_csv(df_file, index=False)
f = open(pkl_file,"wb")
pickle.dump(encoder, f)
f.close()
return df
except Exception as e:
click.echo(
click.style(
"Abort: The {} file already exists.", fg="red"
).format(os.path.join(save_path, filename)), err=True)
try:
import colorama
colorama.init()
except ImportError:
colorama = None
try:
from termcolor import colored
except ImportError:
colored = None
def read_yaml(filename):
''' take filename as parameter and convert yaml to ordereddict '''
return yaml.load(open(filename))
custom_fig = Figlet(font='slant')
click.echo(custom_fig.renderText('cognito'))
@click.group()
def cli():
'''
Generate ML consumable datasets using advanced data preprocessing
and data wrangling.
USAGE: \n
$ cognito transform --input filepath --out filepath
'''
@cli.command('reverse', short_help=": re-transform generated dataset")
def reverse():
""" Reverse transform generated Machine Learning friendly dataset """
pass
@cli.command('prepare', short_help=': transform given dataset')
@click.option('--mode', '-m', type=click.Choice(['prepare', 'decode', 'autoML', 'help', 'report'], \
case_sensitive=False), help="Set any mode such as `prepare`, `autoML`, `clean`", metavar='<path>')
@click.option('--inp', '-i', help="Input dataset file in following format .csv", required=True, metavar='<path>')
@click.option('--out', '-o', help="Select output desitnation", required=True, metavar='<path>')
def prepare(mode, inp, out):
""" Transform the given dataset file """
if mode == 'help':
# log("Cognito CLI", color="blue", figlet=True)
click.echo(custom_fig.renderText('cognito'))
if mode == 'prepare':
df = Table(inp)
response, encoder = df.generate()
click.echo(save_to(out, response, encoder))
if mode == 'autoML':
df = Table(inp)
click.echo(df.total_columns())
if mode == 'report':
df = Table(inp)
table = PrettyTable(['Features', 'Feature Type', 'Outliers', '% of outliers', 'Missing', '%of missing'])
for col in df.columns():
table.add_row([col, '', '', '', '', ''])
click.echo(table)
if mode == 'decode':
with trange(11) as t:
for i in t:
t.set_description('C(x) decoding %i' % i)
sleep(0.1)
click.echo('Completed decoding')
click.echo(get_all_files())
if __name__ == '__main__':
cli()
| [
11748,
28686,
198,
11748,
2237,
198,
11748,
4423,
346,
198,
11748,
331,
43695,
198,
11748,
3904,
198,
11748,
2298,
293,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
12972,
5647,
1616,
1330,
2336,
1616,
62,
18982,
11,
12138,
1616,
19... | 2.351488 | 1,579 |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 12 12:51:11 2018
@author: admin
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 15:12:22 2018
@author: admin
"""
from video_pixels import video
import numpy as np
import cv2
from matplotlib import pyplot as plt
cap = cv2.VideoCapture('2.avi')
hsv_original = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)
roi = cv2.imread("person.jpg")
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
hue, saturation, value = cv2.split(hsv_roi)
# Histogram ROI
roi_hist = cv2.calcHist([hsv_roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
mask = cv2.calcBackProject([hsv_original], [0, 1], roi_hist, [0, 180, 0, 256], 1)
# Filtering remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
mask = cv2.filter2D(mask, -1, kernel)
_, mask = cv2.threshold(mask, 100, 255, cv2.THRESH_BINARY)
mask = cv2.merge((mask, mask, mask))
result = cv2.bitwise_and(original_image, mask)
cv2.imshow("Mask", mask)
cv2.imshow("Original image", original_image)
cv2.imshow("Result", result)
cv2.imshow("Roi", roi)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
2447,
1105,
1105,
25,
4349,
25,
1157,
2864,
201,
198,
201,
198,
31,
9800,
25,
13169,
201,
198,
37811,
201,
198,
201,
198,
2,
... | 2.131481 | 540 |
from nltk import corpus, stopwords, tokenize
output = tokenize.TextTilingTokenizer().tokenize(corpus.brown.raw()[0:10000])
output = [token for token in output if token not in stopwords]
| [
6738,
299,
2528,
74,
1330,
35789,
11,
2245,
10879,
11,
11241,
1096,
198,
198,
22915,
796,
11241,
1096,
13,
8206,
51,
4386,
30642,
7509,
22446,
30001,
1096,
7,
10215,
79,
385,
13,
33282,
13,
1831,
3419,
58,
15,
25,
49388,
12962,
198,
... | 3.186441 | 59 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard python modules.
import logging
# Our modules.
import citest.service_testing.cli_agent as cli_agent
from citest.base.json_scrubber import JsonScrubber
class KubeCtlAgent(cli_agent.CliAgent):
"""Agent that uses kubectl program to interact with Kubernetes."""
def __init__(self, logger=None):
"""Construct instance.
Args:
logger: The logger to inject if other than the default.
"""
logger = logger or logging.getLogger(__name__)
super(KubeCtlAgent, self).__init__(
'kubectl', output_scrubber=JsonScrubber(), logger=logger)
@staticmethod
def build_kubectl_command_args(action, resource=None, args=None):
"""Build commandline for an action.
Args:
action: The operation we are going to perform on the resource.
resource: The kubectl resource we are going to operate on (if applicable).
args: The arguments following [gcloud_module, gce_type].
Returns:
list of complete command line arguments following implied 'kubectl'
"""
return [action] + ([resource] if resource else []) + (args if args else [])
def list_resources(self, context, kube_type, format='json', extra_args=None):
"""Obtain a list of references to all Kubernetes resources of a given type.
Args:
kube_type: The type of resource to list.
format: The kubectl --format type.
extra_args: Array of extra arguments for the list command
to tack onto command line, or None.
Returns:
cli.CliRunStatus with execution results.
"""
args = ['--output', format] + (extra_args or [])
args = context.eval(args)
cmdline = self.build_kubectl_command_args(
action='get', resource=kube_type, args=args)
return self.run(cmdline)
| [
2,
15069,
1584,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.044099 | 771 |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
'''Tests for io.py'''
import pytest
import numpy as np
import os
import warnings
import shutil
import copy
from collections import OrderedDict as odict
import pyuvdata
from pyuvdata import UVCal, UVData, UVFlag
from pyuvdata.utils import parse_polstr, parse_jpolstr
import glob
import sys
from .. import io
from ..io import HERACal, HERAData
from ..datacontainer import DataContainer
from ..utils import polnum2str, polstr2num, jnum2str, jstr2num
from ..data import DATA_PATH
from hera_qm.data import DATA_PATH as QM_DATA_PATH
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.filterwarnings("ignore:invalid value encountered in double_scalars")
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
13130,
262,
24906,
32,
4935,
198,
2,
49962,
739,
262,
17168,
13789,
198,
198,
7061,
6,
51,
3558,
329,
33245,
13,
9078,
7061,
6,
198,
198,
11748,
12972,
9288,... | 3.232919 | 322 |
"""
>>> from massweb.mass_requests.mass_request import MassRequest
>>> from massweb.targets.target import Target
>>>
>>> target_1 = Target(url=u"http://course.hyperiongray.com/vuln1", data={"password": "blh123"}, ttype="post")
>>> target_2 = Target(url=u"http://course.hyperiongray.com/vuln2/898538a7335fd8e6bac310f079ba3fd1/", data={"how": "I'm good thx"}, ttype="post")
>>> target_3 = Target(url=u"http://www.hyperiongray.com/", ttype="get")
>>> targets = [target_1, target_2, target_3]
>>> mr = MassRequest()
>>> mr.request_targets(targets)
>>> for r in mr.results:
... print r
...
(<massweb.targets.target.Target object at 0x15496d0>, <Response [200]>)
(<massweb.targets.target.Target object at 0x1549650>, <Response [200]>)
(<massweb.targets.target.Target object at 0x1549490>, <Response [200]>)
>>> for target, response in mr.results:
... print target, response.status_code
...
http://course.hyperiongray.com/vuln2/898538a7335fd8e6bac310f079ba3fd1/ 200
http://www.hyperiongray.com/ 200
http://course.hyperiongray.com/vuln1 200``
"""
from massweb.mass_requests.mass_request import MassRequest
from massweb.targets.target import Target
target_1 = Target(url=u"http://course.hyperiongray.com/vuln1", data={"password": "blh123"}, ttype="post")
target_2 = Target(url=u"http://course.hyperiongray.com/vuln2/898538a7335fd8e6bac310f079ba3fd1/", data={"how": "I'm good thx"}, ttype="post")
target_3 = Target(url=u"http://www.hyperiongray.com/", ttype="get")
targets = [target_1, target_2, target_3]
mr = MassRequest()
mr.request_targets(targets)
for result in mr.results:
print result
for target, response in mr.results:
print target, response.status_code
| [
37811,
198,
220,
220,
220,
13163,
422,
2347,
12384,
13,
22208,
62,
8897,
3558,
13,
22208,
62,
25927,
1330,
5674,
18453,
198,
220,
220,
220,
13163,
422,
2347,
12384,
13,
83,
853,
1039,
13,
16793,
1330,
12744,
198,
220,
220,
220,
13163,... | 2.476056 | 710 |
from Router import *
from PyQt4 import QtCore
| [
6738,
48538,
1330,
1635,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
14055,
198
] | 3.285714 | 14 |
import pandas as pd
import sys
import json
import sqlite3
if __name__ == '__main__':
main(sys.argv)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
44161,
578,
18,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
7,
17597,
13,
853,
85,
8,
198
] | 2.585366 | 41 |
# -*- coding: utf-8 -*-
"""spacy.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1PvWiuiOWi9TFTT6hZQJrobxF87dT5LBY
"""
!pip3 install spacy
import spacy
import time
nlp=spacy.load('en') # this will use nltk_data folder to load library
data="i am doing great sometimes then i use feet to do not know"
# applying NLP --means tkonizing itself
process_data=nlp(data)
for i in process_data:
print(i,"--->finding lemma->",i.lemma_)
time.sleep(2)
print("pos of word is: ",i.pos_)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
2777,
1590,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
1378,
40... | 2.552511 | 219 |
import aiohttp
import logging
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
from .types import (
BCHydroAccount,
BCHydroInterval,
BCHydroRates,
BCHydroDailyElectricity,
BCHydroDailyUsage,
)
from .const import (
USER_AGENT,
URL_POST_LOGIN,
URL_GET_ACCOUNT_JSON,
URL_POST_CONSUMPTION_XML,
)
_LOGGER = logging.getLogger(__name__)
| [
11748,
257,
952,
4023,
198,
11748,
18931,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
6738,
764,
19199,
1330,
357,
198,
220,
220,
220,
347,
3398,
5173,
305,
... | 2.297619 | 168 |
# 1. After getting the duration iterate through each second and get the frames
# at each second
# 2. Next step is to get how much ever frames you want in that second with the upper-
# bound of min(X,FPS)
# 3. Need to see whether to save the images /Frames and then randomly select min(X,FPS)
# from it.
#----------------------------------------------------------------------------------------------------------
from moviepy.video.io.VideoFileClip import VideoFileClip
import cv2
import os
vidcap = cv2.VideoCapture('Shakira.mp4')
parts = 15 #can make it Dynamic
count_sec = 0
total_time = int(VideoFileClip('Shakira.mp4').duration)
min_parts = min(parts, int(vidcap.get(cv2.CAP_PROP_FPS)))
step = int(1000/min_parts)
#while(count_sec < int(total_time)):
folder = 'frames'
global_count = 1
while count_sec < total_time :
inner_count = 0
while inner_count < parts:
number = (count_sec * 1000) + (inner_count + 1)*step
# Do stuff taking number into consideration.
vidcap.set(cv2.CAP_PROP_POS_MSEC, number)
success, image = vidcap.read()
if success:
name = 'frame_' + str(global_count) + '.jpg'
location = os.path.join(folder, name)
cv2.imwrite(location, image)
global_count += 1
inner_count += 1
count_sec += 1
#clip = VideoFileClip('Shakira.mp4')
| [
2,
352,
13,
220,
220,
220,
2293,
1972,
262,
9478,
11629,
378,
832,
1123,
1218,
290,
651,
262,
13431,
220,
198,
2,
220,
220,
220,
220,
220,
220,
379,
1123,
1218,
198,
2,
362,
13,
220,
220,
220,
7406,
2239,
318,
284,
651,
703,
881... | 2.63138 | 529 |
WORD_MARK = None
| [
54,
12532,
62,
44,
14175,
796,
6045,
198
] | 2.125 | 8 |
''' Runs unit tests for Util functionality '''
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import unittest
from main.consolidate_uber_data import Pipeline
| [
7061,
6,
44743,
4326,
5254,
329,
7273,
346,
11244,
705,
7061,
198,
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
... | 2.985075 | 67 |
from .dolt import (
Branch,
Commit,
Dolt,
DoltException,
DoltHubContext,
KeyPair,
Remote,
Status,
Table,
_execute,
)
from .types import BranchT, CommitT, DoltT, KeyPairT, RemoteT, StatusT, TableT
from .utils import (
CREATE,
FORCE_CREATE,
REPLACE,
UPDATE,
columns_to_rows,
detach_head,
read_columns,
read_columns_sql,
read_rows,
read_rows_sql,
set_dolt_path,
write_columns,
write_file,
write_rows,
)
| [
6738,
764,
67,
5978,
1330,
357,
198,
220,
220,
220,
20551,
11,
198,
220,
220,
220,
35910,
11,
198,
220,
220,
220,
360,
5978,
11,
198,
220,
220,
220,
360,
5978,
16922,
11,
198,
220,
220,
220,
360,
5978,
16066,
21947,
11,
198,
220,
... | 2.101695 | 236 |
'''
Created on May 10, 2021
@author: Fred
'''
from PIL import Image
from datetime import datetime
import random, os
class tdeck():
'''
classdocs
'''
def __init__(self, card_dir='./card_images/', invert=True, invert_chance=25):
'''
Args
----
card_dir : str
the directory in which the cards and other files are contained within.
invert : boolean
defaults to True. If it is True, checks to see if the directory
contains a file named 'card_meanings_inverted.txt'
If so, it will potentially invert cards as they are drawn.
invert_chance : int
defaults to 25. The chance a card will be inverted.
'''
self.card_dir = card_dir
self.card_names = open(card_dir+'card_names.txt', 'r').read().split('\n')
self.deck = list(range(0,len(self.card_names))) #the deck of cards, numbered from 0 to 77
random.shuffle(self.deck)
self.hand = []
if invert:
if 'card_meanings_inverted.txt' in os.listdir(card_dir):
self.invert = True
self.invert_chance = invert_chance
else:
self.invert = False
self.invert_chance = 0
else:
self.invert = False
self.invert_chance = 0
def draw(self):
'''Draws a card, and then adds that card to the hand.
We always know which card was drawn last, because it will always be
at self.hand[-1]
'''
drawn_card = int(self.deck.pop())
if self.invert: #do we potentially invert cards?
if random.randint(1,100) < self.invert_chance:
invert = 1
else:
invert = 0
else:
invert = 0
self.hand.append((drawn_card, invert))
return self.card_names[drawn_card]
class card_table():
'''
classdocs
'''
def __init__(self, owner, deck, spread='Single', invert=True):
'''
Args
----
owner : str
the owner of the table, used mostly for file naming purposes
deck : tdeck
a tdeck object. the tdeck class is defined above in this file.
spread : str
a string, representing the various allowed tarot spreads.
spread defaults to 'Single', representing a single card.
Allowed spreads are as follows:
Single - 1 card is drawn and shown.
Each of the above is created as an attribute of the table class.
Additionally, the following attributes are created using this information
when the class is instantiated:
draw_max : int
the maximum number of cards to be drawn, checked with len(self.deck.hand)
decided by the chosen spread.
table : Image
the baseline image, representing the table on which the cards
are placed, using PIL's Image class. The dimensions
of this image are decided by the spread.
cross_loc : tuple
a tuple of x,y coordinates for use on the image self.table
only used for the cross spread.
'''
valid_spread = {'Single' : 1,
'Draw' : 3,
'Seven' : 7,
'Cross' : 10
}
self.owner = str(owner)
self.deck = deck
if spread.title() in valid_spread:
self.spread = spread.title()
else:
self.spread='Single'
self.draw_max = valid_spread[self.spread]
self.table = self.construct_table()
self.cross_loc = (0,0)
if __name__ == '__main__':
deck = tdeck()
table = card_table('Tester', deck, 'Seven')
while table.draw_max > len(table.deck.hand):
table.next_step()
print(table.deck.get_name())
print(table.deck.get_desc())
table.next_step() | [
7061,
6,
201,
198,
41972,
319,
1737,
838,
11,
33448,
201,
198,
201,
198,
31,
9800,
25,
8559,
201,
198,
7061,
6,
201,
198,
6738,
350,
4146,
1330,
7412,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
11748,
4738,
11,
28686,
... | 2.058853 | 2,022 |
import akutil as aku
import arkouda as ak
def expand(size, segs, vals):
""" Expand an array with values placed into the indicated segments.
Parameters
----------
size : ak.pdarray
The size of the array to be expanded
segs : ak.pdarray
The indices where the values should be placed
vals : ak.pdarray
The values to be placed in each segment
Returns
-------
pdarray
The expanded array.
"""
temp = ak.zeros(size, vals.dtype)
diffs = ak.concatenate((ak.array([vals[0]]), vals[1:]-vals[:-1]))
temp[segs] = diffs
return ak.cumsum(temp)
def invert_permutation(perm):
""" Find the inverse of a permutation array.
Parameters
----------
perm : ak.pdarray
The permutation array.
Returns
-------
ak.array
The inverse of the permutation array.
"""
# I think this suffers from overflow errors on large arrays.
#if perm.sum() != (perm.size * (perm.size -1)) / 2:
# raise ValueError("The indicated permutation is invalid.")
if ak.unique(perm).size != perm.size:
raise ValueError("The array is not a permutation.")
return ak.coargsort([perm, ak.arange(0, perm.size)])
| [
11748,
47594,
22602,
355,
257,
23063,
198,
11748,
610,
74,
2778,
64,
355,
47594,
198,
198,
4299,
4292,
7,
7857,
11,
384,
14542,
11,
410,
874,
2599,
198,
220,
220,
220,
37227,
49368,
281,
7177,
351,
3815,
4624,
656,
262,
8203,
17894,
... | 2.617521 | 468 |
"""
API Uptime Monitor
Author:
Kevin Xin
Amiteshk Sharma
Status:
Good,
Bad,
Incompatible,
Unknown
"""
import logging
from enum import Enum
import requests
# pylint:disable=import-error, ungrouped-imports
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # pylint:disable=no-member
# enums class to represent outcomes for cors check
# provide information on total APIs with CORS support
# takes in the total count of APIs provided
# used to increment the correct count
class DictQuery(dict):
"""
Extract the value from nested json based on path
"""
class API:
'''
An API corresponding to an es document
'''
def check_api_status(self):
'''
loop through each endpoint and extract parameter & example $ HTTP method information
'''
self._api_status = 'unknown'
self._cors_status = Cors.UNKNOWN.value
results = []
if not self.api_server:
return
for _endpoint, _endpoint_info in self.endpoints_info.items():
res = None
try:
res = self.test_endpoint(_endpoint, _endpoint_info)
except Exception as exception: # pylint: disable=broad-except
self._uptime_msg = _endpoint + ": " + type(exception).__name__
res = 'bad'
if res:
results.append(res)
if res == 'bad':
break
if not 'bad' in results:
if 'good' in results:
self._uptime_msg = 'Everything looks good!'
self._api_status = 'good'
else:
# msg will be populated during api call
self._api_status = 'unknown'
else:
# msg will be populated during api call
self._api_status = 'bad'
class Endpoint:
'''
An API Endpoint
'''
| [
37811,
198,
220,
220,
220,
7824,
471,
457,
524,
18289,
198,
220,
220,
220,
220,
198,
220,
220,
220,
6434,
25,
198,
220,
220,
220,
220,
220,
220,
220,
7939,
25426,
198,
220,
220,
220,
220,
220,
220,
220,
1703,
2737,
71,
74,
40196,
... | 2.23388 | 915 |
# -*- coding: utf-8 -*-
"""
Input arguments (Parameters) for Organizations resources RESTful API
-----------------------------------------------------------
"""
# from flask_marshmallow import base_fields
from flask_restx_patched import Parameters, PatchJSONParameters
from . import schemas
from .models import Organization
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
20560,
7159,
357,
48944,
8,
329,
41846,
4133,
30617,
913,
7824,
198,
43801,
6329,
198,
37811,
198,
198,
2,
422,
42903,
62,
76,
5406,
42725,
1330,
2779,
62,
... | 4.373333 | 75 |
from .unit import *
N = kg * m / s**2
J = N * m
W = J / s
Pa = N / m**2
# electricity
A = coulomb / s
V = W / A
ohm = V / A
T = kg * s**-2 / A
F = coulomb / V
gauss = 10**-4 * T
| [
6738,
764,
20850,
1330,
1635,
198,
198,
45,
796,
14211,
1635,
285,
1220,
264,
1174,
17,
198,
41,
796,
399,
1635,
285,
198,
54,
796,
449,
1220,
264,
198,
28875,
796,
399,
1220,
285,
1174,
17,
198,
198,
2,
8744,
198,
32,
796,
2284,
... | 2.011111 | 90 |
from .sessions import * | [
6738,
764,
82,
6202,
1330,
1635
] | 3.833333 | 6 |
from os import listdir, path
from client.util.HTMLUtil import HTMLUtil
from client.util.html.ButtonBuilder import ButtonBuilder
from client.util.html.ListBuilder import ListBuilder
from client.util.html.LinkBuider import LinkBuilder
valid_report_types = ['NLU', 'NLU_Timing', 'Refresh_DD', 'Markov_Chain']
| [
6738,
28686,
1330,
1351,
15908,
11,
3108,
198,
6738,
5456,
13,
22602,
13,
28656,
18274,
346,
1330,
11532,
18274,
346,
198,
6738,
5456,
13,
22602,
13,
6494,
13,
21864,
32875,
1330,
20969,
32875,
198,
6738,
5456,
13,
22602,
13,
6494,
13,
... | 3.322581 | 93 |
#
# Copyright (c) 2016-2019 Dickson S. Guedes.
#
# This module is free software; you can redistribute it and/or modify it under
# the [PostgreSQL License](http://www.opensource.org/licenses/postgresql).
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without a written agreement is
# hereby granted, provided that the above copyright notice and this paragraph
# and the following two paragraphs appear in all copies.
#
# In no event shall Dickson S. Guedes be liable to any party for direct,
# indirect, special, incidental, or consequential damages, including lost
# profits, arising out of the use of this software and its documentation, even
# if Dickson S. Guedes has been advised of the possibility of such damage.
#
# Dickson S. Guedes specifically disclaims any warranties, including, but not
# limited to, the implied warranties of merchantability and fitness for a
# particular purpose. The software provided hereunder is on an "as is" basis,
# and Dickson S. Guedes has no obligations to provide maintenance, support,
# updates, enhancements, or modifications.
#
from multicorn import ForeignDataWrapper, TableDefinition, ColumnDefinition
from multicorn.utils import log_to_postgres, DEBUG
from faker import Faker
from functools import lru_cache
| [
2,
198,
2,
15069,
357,
66,
8,
1584,
12,
23344,
360,
46381,
311,
13,
402,
1739,
274,
13,
198,
2,
198,
2,
770,
8265,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
340,
739,
198,
2,
262,
685,
6307,
47701,
... | 4.02719 | 331 |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'KogitoBuildSpecArgs',
'KogitoBuildSpecArtifactArgs',
'KogitoBuildSpecEnvArgs',
'KogitoBuildSpecEnvValueFromArgs',
'KogitoBuildSpecEnvValueFromConfigMapKeyRefArgs',
'KogitoBuildSpecEnvValueFromFieldRefArgs',
'KogitoBuildSpecEnvValueFromResourceFieldRefArgs',
'KogitoBuildSpecEnvValueFromResourceFieldRefDivisorArgs',
'KogitoBuildSpecEnvValueFromSecretKeyRefArgs',
'KogitoBuildSpecGitSourceArgs',
'KogitoBuildSpecResourcesArgs',
'KogitoBuildSpecResourcesLimitsArgs',
'KogitoBuildSpecResourcesRequestsArgs',
'KogitoBuildSpecWebHooksArgs',
'KogitoBuildStatusArgs',
'KogitoBuildStatusBuildsArgs',
'KogitoBuildStatusConditionsArgs',
'KogitoInfraSpecArgs',
'KogitoInfraSpecResourceArgs',
'KogitoInfraStatusArgs',
'KogitoInfraStatusConditionArgs',
'KogitoInfraStatusEnvArgs',
'KogitoInfraStatusEnvValueFromArgs',
'KogitoInfraStatusEnvValueFromConfigMapKeyRefArgs',
'KogitoInfraStatusEnvValueFromFieldRefArgs',
'KogitoInfraStatusEnvValueFromResourceFieldRefArgs',
'KogitoInfraStatusEnvValueFromResourceFieldRefDivisorArgs',
'KogitoInfraStatusEnvValueFromSecretKeyRefArgs',
'KogitoInfraStatusVolumesArgs',
'KogitoInfraStatusVolumesMountArgs',
'KogitoInfraStatusVolumesVolumeArgs',
'KogitoInfraStatusVolumesVolumeConfigMapArgs',
'KogitoInfraStatusVolumesVolumeConfigMapItemsArgs',
'KogitoInfraStatusVolumesVolumeSecretArgs',
'KogitoInfraStatusVolumesVolumeSecretItemsArgs',
'KogitoRuntimeSpecArgs',
'KogitoRuntimeSpecEnvArgs',
'KogitoRuntimeSpecEnvValueFromArgs',
'KogitoRuntimeSpecEnvValueFromConfigMapKeyRefArgs',
'KogitoRuntimeSpecEnvValueFromFieldRefArgs',
'KogitoRuntimeSpecEnvValueFromResourceFieldRefArgs',
'KogitoRuntimeSpecEnvValueFromResourceFieldRefDivisorArgs',
'KogitoRuntimeSpecEnvValueFromSecretKeyRefArgs',
'KogitoRuntimeSpecMonitoringArgs',
'KogitoRuntimeSpecResourcesArgs',
'KogitoRuntimeSpecResourcesLimitsArgs',
'KogitoRuntimeSpecResourcesRequestsArgs',
'KogitoRuntimeStatusArgs',
'KogitoRuntimeStatusCloudEventsArgs',
'KogitoRuntimeStatusCloudEventsConsumesArgs',
'KogitoRuntimeStatusCloudEventsProducesArgs',
'KogitoRuntimeStatusConditionsArgs',
'KogitoRuntimeStatusDeploymentConditionsArgs',
'KogitoSupportingServiceSpecArgs',
'KogitoSupportingServiceSpecEnvArgs',
'KogitoSupportingServiceSpecEnvValueFromArgs',
'KogitoSupportingServiceSpecEnvValueFromConfigMapKeyRefArgs',
'KogitoSupportingServiceSpecEnvValueFromFieldRefArgs',
'KogitoSupportingServiceSpecEnvValueFromResourceFieldRefArgs',
'KogitoSupportingServiceSpecEnvValueFromResourceFieldRefDivisorArgs',
'KogitoSupportingServiceSpecEnvValueFromSecretKeyRefArgs',
'KogitoSupportingServiceSpecMonitoringArgs',
'KogitoSupportingServiceSpecResourcesArgs',
'KogitoSupportingServiceSpecResourcesLimitsArgs',
'KogitoSupportingServiceSpecResourcesRequestsArgs',
'KogitoSupportingServiceStatusArgs',
'KogitoSupportingServiceStatusCloudEventsArgs',
'KogitoSupportingServiceStatusCloudEventsConsumesArgs',
'KogitoSupportingServiceStatusCloudEventsProducesArgs',
'KogitoSupportingServiceStatusConditionsArgs',
'KogitoSupportingServiceStatusDeploymentConditionsArgs',
]
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
1067,
67,
17,
79,
377,
12994,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.578841 | 1,985 |
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import icubam
import icubam.predicu.data
import icubam.predicu.plot
data_source = ["bedcounts", "combined_bedcounts_public"]
# Logic of this code: the computation of quantities, even when seemingly simple,
# is performed separately from the plotting. The computing is done once, at the
# dept level, and the function returns simple arrays, to be plotted. These
# arrays are meant to be added, to the regional leel (hence, no eprcentage at
# this level) The computing at the regional level comes from that of the
# departmental level. Percentages have to be recomputed (not averaged, but
# averaged with ponderation, so, recomputed)
LEGEND_ARGS = {"frameon": True, "facecolor": "white", "framealpha": 0.8}
def slid_window_avg(a, wi):
""" a simple window-averaging function, centerd on the current point """
# TODO: replace with pandas rolling average. - rth
acopy = np.array(a).copy()
a_smoothed = np.zeros(acopy.shape)
wi_half = wi // 2
wi_other_half = wi - wi_half
for i in range(acopy.shape[0]):
aslice = acopy[
max(0, i - wi_half) : min(i + wi_other_half, acopy.shape[0])
]
a_smoothed[i] = np.mean(aslice, axis=0)
# a_smoothed[i] += np.sum(aslice,axis=0)/ (aslice).shape[0]
# print(aslice,a_smoothed[i] , acopy[i])
return a_smoothed
def compute_all_for_plots_by_dept(d, bc, dep):
"""
where all computation takes place.
is supposed to return stuff that make sense to add
i.e. no percentages, only numbers (to aggregate from dept to region, we jsut
sum numbers of dept)
some quantities are defined, then set to zero, because at present the dat ais
not loaded + we haven't thought about it too much but these are the kind of
quantitites that we COULD think of plotting (are interestig for Antoine)
"""
dep_data = d[d["department"] == dep]
dep_data = dep_data.sort_values(by="date")
zeros = dep_data["n_covid_deaths"] * 0.0
nicu_dep = (
bc[bc.department == dep].icu_name.unique().size
) ## number of ICUs in the dept.
wi = 3 # sliding window time average
## flux covid (hopital, rea)
flow_hopital_covid = slid_window_avg(
(
dep_data[
set(
[
"n_hospitalised_patients",
"n_hospital_death",
"n_hospital_healed",
]
)
].sum(axis=1)
)
.diff(1)
.fillna(0),
wi,
)
flow_reanima_covid = slid_window_avg(
(
dep_data[
set(
[
"n_covid_deaths",
"n_covid_healed",
"n_covid_transfered",
"n_covid_occ",
]
)
].sum(axis=1)
)
.diff(1)
.fillna(0),
wi,
)
## des donnees sont aussi disponible depuis une autre source, SPF :
# Nombre de nouveaux cas : (i.e, le FLUX)
# Nombre quotidien de personnes nouvellement hospitalisées pour COVID-19
# Nombre quotidien de nouvelles admissions en réanimation pour COVID-19
## flux non-covid (hopital, rea)
flow_hopital_n_cov = (
zeros # est-ce estimable a partir des data SOS medecin ?
)
## donnees SOS medecin:
# nbre_pass_corona -> passage aux urgences liés au corona (par age) (attention ! tout passage ne debouche pas sur une hospitalisation !)
# nbre_pass_tot -> passage aux urgences (total) (par age) (idem, attention !)
# nbre_hospit_corona -> envoi à l'hopital, lié au corona (par age) (interpretatin a verifier !!)
flow_reanima_n_cov = zeros # il nous manque les flux sortants (morts, rad) de la rea non-covid
##-> ca c'est introuvable, je pense
wi = 3 # sliding window time average
## lits covid (hopital, rea)
numberBed_hopital_covid_occup = slid_window_avg(
dep_data.n_hospitalised_patients, wi
)
numberBed_reanima_covid_occup = slid_window_avg(dep_data.n_covid_occ, wi)
numberBed_reanima_covid_total = slid_window_avg(
(dep_data.n_covid_occ + dep_data.n_covid_free), wi
)
## lits non-covid (hopital, rea)
numberBed_hopital_n_cov_occup = zeros # unknown
numberBed_reanima_n_cov_occup = slid_window_avg(dep_data.n_ncovid_occ, wi)
numberBed_reanima_n_cov_total = slid_window_avg(
(dep_data.n_ncovid_occ + dep_data.n_ncovid_free), wi
)
cdep = pd.DataFrame(
{
"date": dep_data.date,
"flow_hopital_covid": flow_hopital_covid,
"flow_reanima_covid": flow_reanima_covid,
"numberBed_hopital_covid_occup": numberBed_hopital_covid_occup,
"numberBed_reanima_covid_occup": numberBed_reanima_covid_occup,
"numberBed_reanima_covid_total": numberBed_reanima_covid_total,
"flow_hopital_n_cov": flow_hopital_n_cov,
"flow_reanima_n_cov": flow_reanima_n_cov,
"numberBed_hopital_n_cov_occup": numberBed_hopital_n_cov_occup,
"numberBed_reanima_n_cov_occup": numberBed_reanima_n_cov_occup,
"numberBed_reanima_n_cov_total": numberBed_reanima_n_cov_total,
"nicu_dep": nicu_dep,
}
)
return cdep
def plot_all_departments(d, bc, d_dep2reg):
"""this plots one figure per department for which we have data, of course."""
depCodesList = list(d_dep2reg.departmentCode.unique())
figs = {}
for dep_code in depCodesList:
dep_name = d_dep2reg[
d_dep2reg.departmentCode == dep_code
].departmentName.iloc[0]
if dep_name in d["department"].unique():
print("Tracé ok pour le département: ", dep_name)
cdep = compute_all_for_plots_by_dept(d, bc, dep_name)
figs[f"flux-lits-dept-{dep_name}"] = plot_one_dep(cdep, dep_name)
else:
print(
"Désolé, mais le département : ",
dep_name,
" n'est pas présent dans nos données.",
)
return figs
def plot_all_regions(d, bc, d_dep2reg):
"""plots the regional total
one plot pre region
sometimes there are few departements for which we have dat ain that region
this will be reflected in the number of ICUs, displayed in the title
"""
figs = {}
for reg_code in d_dep2reg.regionCode.dropna().unique():
reg_name = d_dep2reg[d_dep2reg.regionCode == reg_code].regionName.iloc[
0
]
dep_codes = list(
d_dep2reg[d_dep2reg.regionCode == reg_code].departmentCode
) ## getting the dep_code of the departments of this region
dep_counter = 0
print(
"\nAggrégation des données pour la région",
reg_name,
" , incluant les départements:",
)
for dep_code in dep_codes: ## going through this region's departments
dep_name = d_dep2reg[
d_dep2reg.departmentCode == dep_code
].departmentName.iloc[0]
if (
dep_code in d.department_code_icubam.unique()
): ## check if we have the data in the database(s)
print(dep_name)
cdep = compute_all_for_plots_by_dept(d, bc, dep_name)
cdep = cdep.set_index(
"date"
) ## to be able to add stuff (add all but date)
if dep_counter == 0:
cregion = cdep.copy() ## initialize with the first dept.
else:
cregion += cdep
dep_counter += 1
# else: ## this makes too much printing
# print("Désolé, mais le département : ", dep_name, " n'est pas présent dans nos données (icubam/bedcounts).")
if dep_counter == 0:
print(
"Désolé, mais la REGION : ",
reg_name,
" n'est pas présente (du tout) dans nos données (icubam/bedcounts).",
)
else:
cregion = cregion.reset_index()
figs[f"flux-lits-region-{reg_name}"] = plot_one_dep(
cregion, reg_name
)
# cregion = cregion.rename( columns={"nicu_dep": "nicu_reg"})
# plt.show()
return figs
if __name__ == "__main__":
api_key = sys.argv[1]
plot(api_key=api_key)
| [
11748,
25064,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
14158,
549,
321,
198,
11748,
14158,
549,
321,
13,
28764,
291,
... | 2.028714 | 4,214 |
from datetime import datetime
import json
import os
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
11748,
28686,
628
] | 4.076923 | 13 |
import requests
from bs4 import BeautifulSoup
import sys
username = "" # Fill it
passwd = "" # Fill it
session = requests.Session()
url = "https://www.codechef.com/login"
r = session.get(url, verify = False)
soup = BeautifulSoup(r.text, features = "lxml")
_csrf = soup.find("input", attrs = {"name": "csrfToken"})["value"]
formData = {
'name': username,
'pass': passwd,
'form_id': 'new_login_form',
'op': 'Login',
'csrfToken': _csrf
}
r = session.post(url, formData, verify = False)
# page = open("out.html", "w", encoding = "utf-8")
# page.write(r.text)
url = "https://www.codechef.com/submit/ACEBIT"
r = session.get(url, verify = False)
soup = BeautifulSoup(r.text, features = "lxml")
formToken = soup.find("input", attrs = {"name": "form_token", "id": "edit-problem-submission-form-token"})["value"]
codeFile = open("ACEBIT.cpp")
code = codeFile.read()
nullFile = open("null")
files = {'files[sourcefile]': nullFile}
formData = {
'language': '44', # 44 for C++14 (gcc 6.3.0)
'problem_code': 'ACEBIT',
'form_id': 'problem_submission',
'form_token': formToken,
"program": code
}
print(formData)
r = session.post(url, data = formData, files = files, verify = False)
page = open("out.html", "w", encoding = "utf-8")
page.write(r.text)
| [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
25064,
198,
198,
29460,
796,
13538,
1303,
27845,
340,
198,
6603,
16993,
796,
13538,
1303,
27845,
340,
198,
198,
29891,
796,
7007,
13,
36044,
3419,
198,
198,
6371,
... | 2.588115 | 488 |
from graphutil import Graph
if __name__ == "__main__":
test_arcs()
test_write()
test_connected_components()
test_bfs()
test_bfs_full()
test_substitute()
| [
6738,
4823,
22602,
1330,
29681,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1332,
62,
5605,
82,
3419,
198,
220,
220,
220,
1332,
62,
13564,
3419,
198,
220,
220,
220,
1332,
62,
15236,
62... | 2.350649 | 77 |
import enum
__all__ = ("EventKind", "ALL_EVENTS")
ALL_EVENTS = (
EventKind.INSERT,
EventKind.UPDATE,
EventKind.DELETE
)
| [
11748,
33829,
628,
198,
834,
439,
834,
796,
5855,
9237,
35854,
1600,
366,
7036,
62,
20114,
15365,
4943,
628,
198,
198,
7036,
62,
20114,
15365,
796,
357,
198,
220,
220,
220,
8558,
35854,
13,
20913,
17395,
11,
198,
220,
220,
220,
8558,
... | 2.322034 | 59 |
from subprocess import check_output
from ast import literal_eval
from Segmentation.params import regions_file
from Segmentation.utilities import fix_json_fname
import numpy as np
def evaluation(fname_to_check, regions_fname=regions_file):
"""Evaluate results with neurofinder"""
fname_to_check = fix_json_fname(fname_to_check)
regions_fname = fix_json_fname(regions_fname)
# run command and get output
res = check_output(["neurofinder", "evaluate", regions_fname, fname_to_check], universal_newlines=True).rstrip()
res_dict = literal_eval(res)
return res_dict
def best_res(grid_search_results):
"""Finds the best result in the res list, produced by the grid search.
Based on the evaluate function."""
for k in grid_search_results[0]['evaluation'].keys(): # iterate over the each of the 5 keys
print(k, np.argmax((x['evaluation'][k] for x in grid_search_results)))
| [
6738,
850,
14681,
1330,
2198,
62,
22915,
201,
198,
6738,
6468,
1330,
18875,
62,
18206,
201,
198,
6738,
1001,
5154,
341,
13,
37266,
1330,
7652,
62,
7753,
201,
198,
6738,
1001,
5154,
341,
13,
315,
2410,
1330,
4259,
62,
17752,
62,
69,
... | 2.770588 | 340 |
from pyglet.gl import *
| [
6738,
12972,
70,
1616,
13,
4743,
1330,
1635,
628,
198
] | 2.6 | 10 |
from django.contrib import admin
from .models import Attachment, Message
@admin.register(Attachment)
@admin.register(Message)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
3460,
15520,
11,
16000,
628,
198,
31,
28482,
13,
30238,
7,
8086,
15520,
8,
628,
198,
31,
28482,
13,
30238,
7,
12837,
8,
198
] | 3.447368 | 38 |
# Based on the original https://www.reddit.com/r/deepfakes/ code sample
import cv2 | [
2,
13403,
319,
262,
2656,
3740,
1378,
2503,
13,
10748,
13,
785,
14,
81,
14,
22089,
69,
1124,
14,
2438,
6291,
198,
198,
11748,
269,
85,
17
] | 3.074074 | 27 |
import json
import os
import tempfile
from unittest import skipIf
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from nautobot.extras.choices import WebhookHttpMethodChoices
from nautobot.extras.context_managers import web_request_context
from nautobot.extras.models import Webhook
from nautobot.utilities.testing.integration import SeleniumTestCase
from example_plugin.models import ExampleModel
@skipIf(
"example_plugin" not in settings.PLUGINS,
"example_plugin not in settings.PLUGINS",
)
class PluginWebhookTest(SeleniumTestCase):
"""
This test case proves that plugins can use the webhook functions when making changes on a model.
Because webhooks use celery a class variable is set to True called `requires_celery`. This starts
a celery instance in a separate thread.
"""
requires_celery = True
def update_headers(self, new_header):
"""
Update webhook additional headers with the name of the running test.
"""
headers = f"Test-Name: {new_header}"
self.webhook.additional_headers = headers
self.webhook.validated_save()
def test_plugin_webhook_create(self):
"""
Test that webhooks are correctly triggered by a plugin model create.
"""
self.clear_worker()
self.update_headers("test_plugin_webhook_create")
# Make change to model
with web_request_context(self.user):
ExampleModel.objects.create(name="foo", number=100)
self.wait_on_active_tasks()
self.assertTrue(os.path.exists(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_create")))
os.remove(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_create"))
def test_plugin_webhook_update(self):
"""
Test that webhooks are correctly triggered by a plugin model update.
"""
self.clear_worker()
self.update_headers("test_plugin_webhook_update")
obj = ExampleModel.objects.create(name="foo", number=100)
# Make change to model
with web_request_context(self.user):
obj.number = 200
obj.validated_save()
self.wait_on_active_tasks()
self.assertTrue(os.path.exists(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_update")))
os.remove(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_update"))
def test_plugin_webhook_delete(self):
"""
Test that webhooks are correctly triggered by a plugin model delete.
"""
self.clear_worker()
self.update_headers(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_delete"))
obj = ExampleModel.objects.create(name="foo", number=100)
# Make change to model
with web_request_context(self.user):
obj.delete()
self.wait_on_active_tasks()
self.assertTrue(os.path.exists(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_delete")))
os.remove(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_delete"))
def test_plugin_webhook_with_body(self):
"""
Verify that webhook body_template is correctly used.
"""
self.clear_worker()
self.update_headers("test_plugin_webhook_with_body")
self.webhook.body_template = '{"message": "{{ event }}"}'
self.webhook.save()
# Make change to model
with web_request_context(self.user):
ExampleModel.objects.create(name="bar", number=100)
self.wait_on_active_tasks()
self.assertTrue(os.path.exists(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_with_body")))
with open(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_with_body"), "r") as f:
self.assertEqual(json.loads(f.read()), {"message": "created"})
os.remove(os.path.join(tempfile.gettempdir(), "test_plugin_webhook_with_body"))
class PluginDocumentationTest(SeleniumTestCase):
"""
Integration tests for ensuring plugin provided docs are supported.
"""
def test_object_edit_help_provided(self):
"""The ExampleModel object provides model documentation, this test ensures the help link is rendered."""
self.browser.visit(f'{self.live_server_url}{reverse("plugins:example_plugin:examplemodel_add")}')
self.assertTrue(self.browser.links.find_by_partial_href("example_plugin/docs/models/examplemodel.html"))
def test_object_edit_help_not_provided(self):
"""The AnotherExampleModel object doesn't provide model documentation, this test ensures no help link is provided."""
self.browser.visit(f'{self.live_server_url}{reverse("plugins:example_plugin:anotherexamplemodel_add")}')
self.assertFalse(self.browser.links.find_by_partial_href("example_plugin/docs/models/anotherexamplemodel.html"))
class PluginReturnUrlTestCase(SeleniumTestCase):
"""
Integration tests for reversing plugin return urls.
"""
def test_plugin_return_url(self):
"""This test ensures that plugins return url for new objects is the list view."""
self.browser.visit(f'{self.live_server_url}{reverse("plugins:example_plugin:examplemodel_add")}')
form = self.browser.find_by_tag("form")
# Check that the Cancel button is a link to the examplemodel_list view.
element = form.first.links.find_by_text("Cancel").first
self.assertEqual(
element["href"], f'{self.live_server_url}{reverse("plugins:example_plugin:examplemodel_list")}'
)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
6738,
555,
715,
395,
1330,
14267,
1532,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
... | 2.641243 | 2,124 |
# import the necessary packages
from moviepy.editor import VideoFileClip
import time
import os
# 프레임당 이미지로 저장
if __name__ == '__main__':
video_path = './fifth_season9/fifth_season_landmark.mp4'
save_dir = './fifth_season9'
out_name = 'landmark_img'
video_to_image(video_path, save_dir, out_name)
| [
2,
1330,
262,
3306,
10392,
198,
6738,
3807,
9078,
13,
35352,
1330,
7623,
8979,
2601,
541,
198,
11748,
640,
198,
11748,
28686,
198,
198,
2,
220,
169,
242,
226,
167,
254,
230,
168,
252,
226,
46695,
117,
23821,
251,
112,
167,
107,
116,... | 2.186207 | 145 |
#! /usr/bin/env python
"""This module acts as an interface for acting on git logs"""
from string import Template
from git_wrapper import exceptions
from git_wrapper.utils.decorators import reference_exists
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
1212,
8265,
6529,
355,
281,
7071,
329,
7205,
319,
17606,
17259,
37811,
198,
198,
6738,
4731,
1330,
37350,
198,
198,
6738,
17606,
62,
48553,
1330,
13269,
198,
6738,
17606,
62,
... | 3.87037 | 54 |
from src.data.download import download_database
from src.data.ranking_file import read_rankings
from src.data.rankings import get_and_save_rankings
from src.data.raw import save_names_from_database
from src.ranking.differences import calculate_ranking_difference_across_variants, save_ranking_differences
from src.ranking.uncertain import fill_and_filter_uncertain_values
from src.utils.filenames import get_ranking_filename
from src.ranking.fide import Regression, filter_rankings_by_fide
if __name__ == "__main__":
main()
| [
6738,
12351,
13,
7890,
13,
15002,
1330,
4321,
62,
48806,
198,
6738,
12351,
13,
7890,
13,
28405,
62,
7753,
1330,
1100,
62,
43027,
654,
198,
6738,
12351,
13,
7890,
13,
43027,
654,
1330,
651,
62,
392,
62,
21928,
62,
43027,
654,
198,
67... | 3.345912 | 159 |
import re
import psycopg2
from werkzeug.security import generate_password_hash, check_password_hash
from flask import Blueprint, request, jsonify, make_response
from app.api.v1.models.property_models import PropertyRecords
from app.api.v1.models.database import init_db
from app.api.v1.utils.validators import validate
from app.api.v1.utils.token import login_required
INIT_DB = init_db()
PROPERTY = Blueprint('property', __name__)
PROPERTY_RECORDS = PropertyRecords()
@PROPERTY.route('/property', methods=['POST'])
@login_required
def property_registration():
'''property registration'''
try:
data = request.get_json()
property_name = data["property_name"]
if not property_name.strip():
return jsonify({"error": "property name cannot be empty"}), 400
if not re.match(r"^[A-Za-z][a-zA-Z]", property_name):
return jsonify({"error": "input valid property name"}), 400
cur = INIT_DB.cursor()
cur.execute("""SELECT property_name FROM property WHERE property_name = '%s' """ %(property_name))
data = cur.fetchone()
print(data)
if data != None:
return jsonify({"message": "property already exists"}), 400
try:
return PROPERTY_RECORDS.register_property(property_name)
except (psycopg2.Error) as error:
return jsonify({"error":str(error)})
except KeyError:
return jsonify({"error": "a key is missing"}), 400
except Exception as e:
return jsonify({"error": str(e)}), 400
@PROPERTY.route('/property', methods=['GET'])
def view_all():
'''view all properties'''
return PROPERTY_RECORDS.view_properties()
@PROPERTY.route('/property/<int:property_id>', methods=['GET'])
def view_one(property_id):
'''view property by property id'''
return PROPERTY_RECORDS.view_property(property_id)
@PROPERTY.route('/property/<string:property_name>', methods=['GET'])
def view_one_by_name(property_name):
'''view property by property name'''
return PROPERTY_RECORDS.view_property_by_name(property_name) | [
11748,
302,
198,
11748,
17331,
22163,
70,
17,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
11,
2198,
62,
28712,
62,
17831,
198,
6738,
42903,
1330,
39932,
11,
2581,
11,
33918,
1958,
11,
787,
62,
26209,... | 2.621554 | 798 |
##############################
# #
# loxygenK/musical_typer #
# ゲームシステム #
# (c)2020 loxygenK #
# All rights reversed. #
# #
##############################
import re
import chardet
import pygame
import romkan
from lib import DrawingUtil, Romautil
class Screen:
"""
画面処理を簡単にするためのクラス。
このクラスのインスタンスは画面そのものも持つ
"""
big_font = pygame.font.Font("mplus-1m-medium.ttf", 72)
nihongo_font = pygame.font.Font("mplus-1m-medium.ttf", 48)
alphabet_font = pygame.font.Font("mplus-1m-medium.ttf", 32)
full_font = pygame.font.Font("mplus-1m-medium.ttf", 24)
rank_font = pygame.font.Font("mplus-1m-medium.ttf", 20)
system_font = pygame.font.Font("mplus-1m-medium.ttf", 16)
@property
def screen_size(self):
"""
スクリーンのサイズを返す。
:return: (横幅, 縦幅)
"""
return pygame.display.get_surface().get_size()
def print_str(self, x, y, font, text, color=(255, 255, 255)):
"""
ウィンドウに文字を描画する。
:param x: X座標
:param y: Y座標
:param font: 描画に使用するフォント
:param text: 描画する文字列
:param color: 描画する色
:return: なし
"""
DrawingUtil.print_str(self.screen, x, y, font, text, color)
def add_fg_effector(self, living_frame, section_name, draw_func, argument=None):
"""
前面エフェクターを追加する。
:param living_frame: 生存時間
:param section_name: エフェクターのセクション名
:param draw_func: 描画メソッド
:param argument: 描画メソッドに渡す引数
:return: なし
"""
# if draw_func.__name__ in self.fg_effector.keys() は遅い:/
try:
del self.effector[0][draw_func.__name__ + section_name]
except KeyError:
pass
self.effector[0].setdefault(draw_func.__name__ + section_name, [living_frame, 0, draw_func, argument])
def add_bg_effector(self, living_frame, section_name, draw_func, argument=None):
"""
背面エフェクターを追加する。
:param living_frame: 生存時間
:param section_name: エフェクターのセクション名
:param draw_func: 描画メソッド
:param argument: 描画メソッドに渡す引数
:return: なし
"""
# if draw_func.__name__ in self.fg_effector.keys() は遅い:/
try:
del self.effector[1][draw_func.__name__ + section_name]
except KeyError:
pass
self.effector[1].setdefault(draw_func.__name__ + section_name, [living_frame, 0, draw_func, argument])
def update_effector(self, mode: int):
"""
エフェクターを更新
:param mode: 0なら前面エフェクターを更新する
1なら背面エフェクターを更新する
:return: なし
"""
key_list = list(self.effector[mode].keys())
for k in key_list:
self.effector[mode][k][2](
self.effector[mode][k][1],
self.effector[mode][k][0],
self,
self.effector[mode][k][3]
)
self.effector[mode][k][1] += 1
if self.effector[mode][k][1] > self.effector[mode][k][0]:
del self.effector[mode][k]
@DeprecationWarning
def get_font_by_size(self, size):
"""
フォントをサイズから取得する。なんかそれなりに重いので使わんほうがいい
使うなら何回も呼び出すんじゃなくて変数に入れるとかしよう
:param size: サイズ
:return: フォント
"""
return pygame.font.Font("mplus-1m-medium.ttf", size)
class GameInfo:
"""
ゲームの情報を統合して管理する。
"""
ONE_CHAR_POINT = 10
PERFECT_POINT = 100
SECTION_PERFECT_POINT = 300
SPECIAL_POINT = 50
CLEAR_POINT = 50
MISS_POINT = -30
COULDNT_TYPE_POINT = -2
IDEAL_TYPE_SPEED = 3.0
# ----- プロパティ -----
# *** タイプ情報 ***
@property
def typed_kana(self):
"""
すでに打ったローマ字を取得する。
:return: すでに打ったローマ字
"""
typed_index = self.full_kana.rindex(self.target_kana)
if len(self.target_kana) > 0:
return self.full_kana[:typed_index]
else:
return self.full_kana
@property
def typed(self):
"""
打ったキーの合計。
:return: 打ったキーの合計
"""
return self.count + self.missed
@property
def sent_typed(self):
"""
文単位で打ったキーの数。
:return: 打ったキー数
"""
return self.sent_count + self.sent_miss
@property
def section_typed(self):
"""
セクション単位で打ったキーの数。
:return: 打ったキーの数
"""
return self.section_count + self.section_miss
@property
def all_typed(self):
"""
打ち終わったか(もともと歌詞がなかった場合はFalseを返す)
:return: 歌詞がなく、一文字以上打っている場合はTrue
"""
return self.completed and self.sent_typed > 0
@property
def is_ac(self):
"""
ACしたか
:return: GameInfo.all_typedを満たし、かつミス数が0で「ある」場合True
"""
return self.completed and self.sent_typed > 0 and self.sent_miss == 0
@property
def is_wa(self):
"""
WAだったか
:return: GameInfo.all_typedを満たし、かつミス数が0で「ない」場合True
"""
return self.completed and self.sent_typed > 0 and self.sent_miss > 0
@property
def has_to_prevent_miss(self):
"""
輪唱をまだタイプしていない場合など、特殊なケースにより
ミス判定をしてはいけない場合にTrueを返す。
:return: ミス判定をしてはいけない場合にTrue
"""
if self.full[:1] == "/" and self.sent_count == 0:
return True
return False
# ----- メソッド -----
# *** 現在の位置から情報を求める ***
def update_current_lyrincs(self):
"""
与えられた時間に置いて打つべき歌詞のデータを求める。
:return: データ, lyrincs_indexが変化したか
"""
# 歌詞がない場合は無条件に終了する
if len(self.score.score) == 0:
self.song_finished = True
return False
# 一番最後の歌詞かどうか
if self.song_finished:
# 一番最後からは変化しない
return False
else:
# 現在の歌詞がすでに終わっているか(次の歌詞の開始時間を過ぎているか)
if self.score.score[self.lyrincs_index + 1][0] > self.pos:
return False
# 次の歌詞を探す
# pos i
# ↓ |
# ---|//(i-1)/////|-----(i)-----|---
# └→ここが引っかかる
for i in range(self.lyrincs_index, len(self.score.score)):
if i < 0:
continue
# i番目の歌詞の開始時間がposを超えているか
if self.score.score[i][0] > self.pos:
# 歌詞が変わっているか
is_lidx_changes = i - 1 != self.lyrincs_index
if is_lidx_changes:
# 更新する
self.lyrincs_index = i - 1
# 歌詞が変わっているかどうかを返す
return is_lidx_changes
# ヒットしなかった(歌詞が終了した)
if not self.song_finished:
self.song_finished = True
return True
return False
def get_current_section(self):
"""
与えられた時間に置いて打つべき歌詞のデータを求める。
:return: データ, lyrincs_indexが変化したか
"""
if len(self.score.section) == 0:
self.section_finished = True
return False
if self.section_index > len(self.score.section) - 1:
return False
else:
if self.score.section[self.section_index + 1][0] > self.pos:
return False
for i in range(self.section_index, len(self.score.section)):
if i < 0:
continue
if self.score.section[i][0] >= self.pos:
is_lidx_changes = (i - 1) != self.section_index
if is_lidx_changes:
self.section_index = i - 1
return is_lidx_changes
self.section_finished = True
return False
def update_current_zone(self):
"""
与えられた時間が属するゾーンを求める。
:return: ゾーン名。ゾーンに属していない場合はNoneを返す
"""
if len(self.score.zone) == 0:
self.is_in_zone = False
return
if self.zone_index == len(self.score.zone) - 2:
if self.score.zone[self.zone_index + 1][0] > self.pos:
self.is_in_zone = True
return
else:
self.is_in_zone = False
return
else:
if self.score.zone[self.zone_index][0] <= self.pos < self.score.zone[self.zone_index + 1][0]:
return
for i in range(self.zone_index, len(self.score.zone)):
if i < 0:
continue
if self.score.zone[i][0] >= self.pos and self.score.zone[i][2] == "end":
if self.score.zone[i - 1][0] <= self.pos and self.score.zone[i - 1][2] == "start":
is_lidx_changes = (i - 1) != self.zone_index
if is_lidx_changes:
self.zone_index = i - 1
self.is_in_zone = True
return
else:
if self.zone_index != 0:
self.zone_index = 0
self.is_in_zone = False
return
self.is_in_zone = False
return
self.is_in_zone = False
return
# *** 残り時間情報 ***
def get_sentence_full_time(self):
"""
現在の歌詞が表示される時間を求める。
:return: 現在の歌詞時間。
"""
next_sentence_time = self.score.score[self.lyrincs_index + 1][0]
this_sentence_time = self.score.score[self.lyrincs_index][0]
return next_sentence_time - this_sentence_time
def get_sentence_elapsed_time(self):
"""
現在の歌詞が表示されてから経った時間を求める。
:return: 経った時間。
"""
next_sentence_time = self.score.score[self.lyrincs_index + 1][0]
return next_sentence_time - self.pos
def get_time_remain_ratio(self):
"""
0~1で、どのくらい時間が経ったかを求める。
:return: 経った時間を0~1で。
"""
return self.get_sentence_elapsed_time() / self.get_sentence_full_time()
# *** ミス率 ****
@staticmethod
def get_full_accuracy(self):
"""
全体での成功比率を求める。
成功回数+失敗回数が0の場合は、成功回数を返す。(つまり0になる)
:return: 成功比率(成功回数/(成功回数+失敗回数))
"""
return self.calc_accuracy(self.count, self.missed)
def get_sentence_accuracy(self):
"""
歌詞ごとの成功比率を求める。
成功回数+失敗回数が0の場合は、成功回数を返す。(つまり0になる)
:return: 成功比率(成功回数/(成功回数+失敗回数))
"""
return self.calc_accuracy(self.sent_count, self.sent_miss)
# *** 歌詞情報アップデート ***
def update_current_lyrics(self, full=None, kana=None):
"""
現在打つべき歌詞を設定する。kanaのローマ字変換結果が0文字だった場合は、self.completed はFalseになる。
:param full: 歌詞
:param kana: 歌詞のふりがな
:return: なし
"""
self.reset_sentence_condition()
if full is None:
full = self.score.score[self.lyrincs_index][1]
if kana is None:
kana = self.score.score[self.lyrincs_index][2]
self.full = full
self.target_kana = kana
self.full_kana = kana
self.target_roma = Romautil.hira2roma(self.target_kana)
if len(self.target_roma) == 0:
self.completed = True
def apply_TLE(self):
"""
TLE計算をする
:return: なし
"""
if len(self.target_roma) == 0:
return
if self.has_to_prevent_miss:
return
self.point += GameInfo.COULDNT_TYPE_POINT * len(self.target_roma)
self.standard_point += GameInfo.ONE_CHAR_POINT * len(self.target_roma) * 40
self.standard_point += GameInfo.CLEAR_POINT + GameInfo.PERFECT_POINT
self.missed += len(self.target_roma)
self.sent_miss += len(self.target_roma)
self.section_miss += len(self.target_roma)
def get_section_missrate(self):
"""
セクションごとの成功比率を求める。
成功回数+失敗回数が0の場合は、成功回数を返す。(つまり0になる)
:return: 成功比率(成功回数/(成功回数+失敗回数))
"""
return self.calc_accuracy(self.section_count, self.section_miss)
def reset_sentence_condition(self):
"""
歌詞ごとの進捗情報を消去する。
:return: なし
"""
self.sent_count = 0
self.sent_miss = 0
self.typed_roma = ""
self.completed = False
def reset_section_score(self):
"""
セクションごとの進捗情報を消去する。
:return: なし
"""
self.section_count = 0
self.section_miss = 0
def count_success(self):
"""
タイプ成功をカウントする。
"""
# スコア/理想スコアをカウントする
self.count += 1
self.sent_count += 1
self.section_count += 1
self.combo += 1
self.point += int(GameInfo.ONE_CHAR_POINT * 10 * self.get_key_per_second() * (self.combo / 10))
# self.point += int(10 * self.get_key_per_second())
self.standard_point += int(GameInfo.ONE_CHAR_POINT * GameInfo.IDEAL_TYPE_SPEED * 10 * (self.combo / 10))
# tech-zone ゾーン内にいるか
if self.is_in_zone and self.score.zone[self.zone_index] == "tech-zone":
self.point += self.SPECIAL_POINT
# 歌詞情報を更新する
self.typed_roma += self.target_roma[:1]
self.target_roma = self.target_roma[1:]
# 打つべきかなを取得する
self.target_kana = Romautil.get_not_halfway_hr(self.target_kana, self.target_roma)
# ひらがな一つのタイプが終了した?
if not Romautil.is_halfway(self.target_kana, self.target_roma):
# キータイプをカウントする
self.keytype_tick()
# これ以上打つ必要がないか
if len(self.target_roma) == 0:
# クリアポイントを付与
self.point += GameInfo.CLEAR_POINT
# ポイントを更新
self.standard_point += GameInfo.CLEAR_POINT + GameInfo.PERFECT_POINT
if self.sent_miss == 0:
self.point += GameInfo.PERFECT_POINT
self.completed = True
return int(GameInfo.ONE_CHAR_POINT * 10 * self.get_key_per_second() * (self.combo / 10))
def count_failure(self):
"""
失敗をカウントする。
:return: なし
"""
self.missed += 1
self.sent_miss += 1
self.section_miss += 1
self.point += GameInfo.MISS_POINT
self.combo = 0
def is_exactly_expected_key(self, code):
"""
タイプされたキーが正確に期待されているキーか確認する。
is_excepted_keyと違って、ローマ字表記の仕方の違いを許容しない。
ゲーム内での判定では、is_expected_keyを使おう
:param code: タイプされたキー
:return: 正しい場合はTrue
"""
if len(self.target_roma) == 0:
return False
# l? は x? でもOK
if self.target_roma[0] == "x":
return code == "x" or code == "l"
return self.target_roma[0] == code
def is_expected_key(self, code):
"""
タイプされたキーが期待されているキーか確認する。
:param code: タイプされたキー
:return: 正しい場合はTrue
"""
if len(self.target_roma) == 0:
return False
if not Romautil.is_halfway(self.target_kana, self.target_roma):
first_syllable = Romautil.get_first_syllable(self.target_kana)
kunrei = romkan.to_kunrei(first_syllable)
hepburn = romkan.to_hepburn(first_syllable)
optimized = Romautil.hira2roma(first_syllable)
if kunrei[0] == "x":
return self.is_exactly_expected_key(code)
if kunrei[0] == code:
print("Kunrei, approve.")
return True
elif hepburn[0] == code:
print("Hepburn, approve.")
self.target_roma = hepburn + self.target_roma[len(kunrei):]
return True
elif optimized[0] == code:
print("Optimized, approve.")
self.target_roma = optimized + self.target_roma[len(kunrei):]
return True
else:
print("kunrei nor hepburn, deny.")
return False
else:
return self.is_exactly_expected_key(code)
def get_rate(self, accuracy=-1, limit=False):
"""
達成率を計算する
:param accuracy: 計算に使用する達成率情報。省略すると全体の達成率を使用する
:param limit: 100%を超えないようにするか
:return: 達成率
"""
if accuracy == -1:
accuracy = self.get_full_accuracy()
standard = (self.standard_point + self.count * 45)
score = self.point * accuracy
if score <= 0:
return 0
if limit:
score = min(score, standard)
return score / standard
def calculate_rank(self, accuracy=-1):
"""
達成率からランクのIDを取得する
:param accuracy: 計算に使用する達成率。
:return: ランクのID
"""
rank_standard = [200, 150, 125, 100, 99.50, 99, 98, 97, 94, 90, 80, 60, 40, 20, 10, 0]
rate = self.get_rate(accuracy)
for i in range(0, len(rank_standard)):
if rank_standard[i] < rate * 100:
return i
return len(rank_standard) - 1
def keytype_tick(self):
"""
キータイプを記録する。
:return: なし
"""
if self.prev_time == 0:
self.prev_time = self.pos
return
self.key_log.append(self.pos - self.prev_time)
self.prev_time = self.pos
if len(self.key_log) > self.length:
del self.key_log[0]
def override_key_prev_pos(self, pos=-1):
"""
前回のキータイプ時間を指定した時間で上書きする。
:param pos: 上書きするキータイプ時間。省略すると現在の時間になる。
:return: なし
"""
self.prev_time = pos if pos != -1 else self.pos
def get_key_type_average(self):
"""
1つのキータイプに要する平均時間を求める
:return: キータイプ時間
"""
if len(self.key_log) == 0:
return 0
return sum(self.key_log) / len(self.key_log)
def get_key_per_second(self):
"""
一秒ごとにタイプするキーを求める。
:return: [key/sec]
"""
if len(self.key_log) == 0:
return 0
return 1 / self.get_key_type_average()
class SoundEffectConstants:
"""
効果音ファイルの集合体。
"""
success = pygame.mixer.Sound("ses/success.wav")
special_success = pygame.mixer.Sound("ses/special.wav")
failed = pygame.mixer.Sound("ses/failed.wav")
unneccesary = pygame.mixer.Sound("ses/unneccesary.wav")
gameover = pygame.mixer.Sound("ses/gameover.wav")
ac = pygame.mixer.Sound("ses/ac.wav")
wa = pygame.mixer.Sound("ses/wa.wav")
fast = pygame.mixer.Sound("ses/fast.wav")
tle = pygame.mixer.Sound("ses/tle.wav")
class Score:
"""
譜面データ。
"""
LOG_ERROR = 1
LOG_WARN = 2
def log_error(self, line, text, init=True):
"""
エラーログを記録し、データを削除する。
:param line: ログを出力するときの行。
:param text: ログ内容。
:param init: データを削除するか(デフォルト: True)
:return: なし
"""
self.log.append([Score.LOG_ERROR, line, text])
if init:
self.re_initialize_except_log()
def log_warn(self, line, text):
"""
警告ログを記録する。
:param line: ログを出力するときの行。
:param text: ログ内容。
:return: なし
"""
self.log.append([Score.LOG_WARN, line, text])
def re_initialize_except_log(self):
"""
ログ以外を再初期化する。
:return: なし
"""
self.properties = {}
self.score = []
self.zone = []
self.section = []
def read_score(self, file_name):
"""
ファイルから譜面データを読み込み、このインスタンスに値をセットする。
:param file_name: 譜面データの入ったファイル
:return: なし(このメソッドは破壊性である)
"""
# ----- [ 下準備 ] -----
# 便利なやつ
re_rect_bracket = re.compile(r"\[(.*)\]")
# エンコードを判別する
with open(file_name, mode="rb") as f:
detect_result = chardet.detect(f.read())
encoding = detect_result["encoding"]
# ファイルを読み込む
with open(file_name, mode="r", encoding=encoding) as f:
lines = f.readlines()
# ----- [ パース ] -----
current_minute = 0
current_time = 0
song = ""
phon = ""
is_in_song = False
for i in range(len(lines)):
line = lines[i].strip()
# ----- 処理対象行かの確認
# コメント
if line.startswith("#"):
continue
# 空行
if len(line) == 0:
continue
# カギカッコ
rect_blk_match = re_rect_bracket.match(line)
# ----- 曲外での処理
if not is_in_song:
# 曲に関するプロパティ
if line.startswith(":") and not is_in_song:
line = line[1:]
key, value = line.split()
set_val_to_dictionary(self.properties, key, value)
continue
if rect_blk_match is not None:
command = rect_blk_match[1]
# 曲開始コマンド?
if command == "start":
is_in_song = True
continue
# 上記の条件にヒットしない文字列は、
# 曲データの外では許可されない
self.log_error(i + 1, "Unknown text outside song section")
self.re_initialize_except_log()
break
# ----- 曲外での処理
# カギカッコで囲まれているか
if rect_blk_match is not None:
command = rect_blk_match[1]
# 間奏などで歌詞データがない
if command == "break":
self.score.append([current_time, "", ""])
continue
if command == "end":
self.score.append([current_time, ">>end<<", ""])
is_in_song = False
continue
# 歌詞のみ(キャプションなど)
if line.startswith(">>"):
self.score.append([current_time, line[2:], ""])
continue
# 分指定演算子
if line.startswith("|"):
line = line[1:]
current_minute = int(line)
continue
# 秒指定演算子
# 秒指定演算子で、現在時間の更新と一緒に歌詞データの書き込みも実施する
if line.startswith("*"):
line = line[1:]
# 歌詞データが提供されているのにも関わらず、ふりがなデータがない
if len(song) != 0:
if len(phon) == 0:
# これはダメで、エラーを吐く
self.log_error(i + 1, "No pronunciation data")
break
else:
self.score.append([current_time, song, phon])
# リセットする
song = ""
phon = ""
# 現在時間をセットする
current_time = 60 * current_minute + float(line)
continue
# セクション演算子
if line.startswith("@"):
line = line[1:]
self.section.append([current_time, line])
continue
# ゾーン演算子
if line.startswith("!"):
line = line[1:]
flag, zone_name = line.split()
# ゾーンが始まる
if flag == "start":
self.zone.append([current_time, zone_name, "start"])
continue
# ゾーンが終わる
elif flag == "end":
self.zone.append([current_time, zone_name, "end"])
continue
# ふりがなデータ
if line.startswith(":"):
phon += line[1:]
continue
# 特に何もなければそれは歌詞
song += line
# 読み込み終わり
self.score.insert(0, [0, "", ""])
# エラーは出ていないか
if len(list(filter(lambda x: x[0] == Score.LOG_ERROR, self.log))) == 0:
# wavは定義されているか
if "song_data" not in self.properties.keys():
# それはダメ
raise ScoreFormatError(0, "Song is not specified")
else:
# 読み込む
pygame.mixer.music.load(self.properties["song_data"])
else:
# エラーなので例外をスローする
raise ScoreFormatError(self.log[0][1], self.log[0][2])
def set_val_to_dictionary(dictionary, key, value):
"""
キーの有無に関わらず辞書にデータを書き込む。
キーが辞書に無かった場合は追加し、すでにある場合は更新する。
:param dictionary: 辞書
:param key: キー
:param value: 値
:return:
"""
if key in dictionary.keys():
dictionary[key] = value
else:
dictionary.setdefault(key, value)
| [
14468,
7804,
4242,
2235,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
198,
2,
220,
220,
300,
23536,
5235,
42,
14,
14664,
605... | 1.568284 | 15,399 |