content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from math import *
#Calculates Lowest Common Multiple
in1 = int(input("Insert 1st number: "))
in2 = int(input("Insert 2nd number: "))
print("LCM of %d and %d is"%(in1, in2), compute_lcm(in1, in2)) | [
6738,
10688,
1330,
1635,
198,
198,
2,
9771,
3129,
689,
7754,
395,
8070,
20401,
198,
198,
259,
16,
796,
493,
7,
15414,
7203,
44402,
352,
301,
1271,
25,
366,
4008,
198,
259,
17,
796,
493,
7,
15414,
7203,
44402,
362,
358,
1271,
25,
3... | 2.551282 | 78 |
from django.urls import path,include
from . import views
from .views import *
urlpatterns = [
path('',views.employee_form ),
path('list/',views.employee_list ),
path('safety_ohs_a9/',safety_ohs_a9,name='safety_ohs_a9'),
path('homepagef/',homepagef,name='homepagef'),
path('charityregisterf/',charityregisterf,name='charityregisterf'),
path('editschemef/',editschemef,name='editschemef'),
path('acceptf/',acceptf,name='acceptf'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
17256,
201,
198,
6738,
764,
1330,
5009,
201,
198,
6738,
764,
33571,
1330,
1635,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
220,
220,
220,
3108,
10786,
3256,
33571,
13,
7033,
1453,
... | 2.416244 | 197 |
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#########################################################
# filename : generateReadme.py
# author : geekhall
# version : v1.0.0
# function : Gererate root README.md file.
#########################################################
import os
# Constant
ROOT_DIR = './'
# link address : https://github.com/geekhall/leetcode/tree/main/00003_LongestSubstring
LEETCODE_PRE_GITHUB = 'https://github.com/geekhall/algorithms/tree/main/leetcode/'
LEETCODE_PRE_GITEE='https://gitee.com/geekhall/algorithms/tree/main/leetcode/'
LINTCODE_PRE_GITHUB = 'https://github.com/geekhall/algorithms/tree/main/lintcode/'
LINTCODE_PRE_GITEE='https://gitee.com/geekhall/algorithms/tree/main/lintcode/'
CODEWARS_PRE_GITHUB = 'https://github.com/geekhall/algorithms/tree/main/codewars/'
CODEWARS_PRE_GITEE='https://gitee.com/geekhall/algorithms/tree/main/codewars/'
EXCLUDES = ['QuickSort', '.vscode', '.git', 'TempSource','00000_Template','00000_Template_Go','static']
# Generate quiz list from folder started by numbers
if __name__ == '__main__':
quizs = generate_quiz_list()
f = open('./problem-list.md', 'w', encoding='utf-8')
f.write('## Problem List\n')
f.write('\n')
f.write('### LeetCode\n')
f.write('\n')
f.write('| id |Name(Github)|Name(Gitee)|\n')
f.write('|----|----|----|\n')
for q in quizs:
if q.type == 1:
line = '|' + q.id + '|[' + q.name+']('+LEETCODE_PRE_GITHUB+q.id+'_'+q.name+')|[' + q.name+']('+LEETCODE_PRE_GITEE+q.id+'_'+q.name+')|' +'\n'
f.write(line)
f.write('\n')
f.write('### LintCode\n')
f.write('\n')
f.write('| id |Name(Github)|Name(Gitee)|\n')
f.write('|----|----|----|\n')
for q in quizs:
if q.type == 2:
line = '|' + q.id + '|[' + q.name+']('+LINTCODE_PRE_GITHUB+q.id+'_'+q.name+')|[' + q.name+']('+LINTCODE_PRE_GITEE+q.id+'_'+q.name+')|' +'\n'
f.write(line)
f.write('\n')
f.write('### codewars\n')
f.write('\n')
f.write('| id |Name(Github)|Name(Gitee)|\n')
f.write('|----|----|----|\n')
for q in quizs:
if q.type == 3:
line = '|' + q.id + '|[' + q.name+']('+CODEWARS_PRE_GITHUB+q.id+'_'+q.name+')|[' + q.name+']('+CODEWARS_PRE_GITEE+q.id+'_'+q.name+')|' +'\n'
f.write(line)
f.close() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
48504,
12,
23,
532,
9,
12,
198,
29113,
14468,
7804,
2,
198,
2,
29472,
1058,
7716,
5569,
1326,
13,
9078,
198,
2,
1772,
220,
220,
1058,
27314,
18323,
... | 2.169307 | 1,010 |
import discord, asyncio, json
if __name__ == '__main__':
bot = PorkchopBot() | [
11748,
36446,
11,
30351,
952,
11,
33918,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
10214,
796,
44062,
354,
404,
20630,
3419
] | 2.857143 | 28 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Chengwei Luo (cluo@broadinstitute.org)'
__version__ = '0.1.0'
__date__ = 'Oct 2015'
"""
BOTA: Bacteria-Origin T-cell Antigen predictor
Copyright(c) 2015 Chengwei Luo (luo.chengwei@gatech.edu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
https://bitbucket.org/luo-chengwei/BOTA
This script is part of the metaHGT package,
for help for BOTA, type:
python BOTA.py --help
"""
USAGE = \
"""Usage: %prog -c/--config <config_file> -o/--outdir <output directory> [options]
BOTA: Bacteria-Origin T-cell Antigen predictor
The configuration file format follows:
# this a comment line
hmmscan='hmmscan' # if alread in the PATH, then just leave it as 'hmmscan', if not, specify the path
hmmtop='hmmtop' # the same as hmmscan
psort='psort' # the same as hmmscan
[genome_name] # you need the squared bracket to contain whatever you want to call the genome
fna /path/to/genomics/fna/file # this is a compulsory field
gff /path/to/gff/file # optional. if not supplied, we will do prodigal protein-coding gene predictions
hmmtop /path/to/hmmtop/file # optional. if not supplied, we will do hmmtop calculation.
hmmscan /path/to/text-hmmscan/file # optional
psort /path/to/psort/file # optional
alleles list_of_alleles_separated_by_comma # Optional. you can also supply human or mouse to select all available alleles
# if you don't specify, default to all alleles.
gram # Optional. specify the organism is 'P', gram-positive, 'N', gram-negative, or 'A', achaea; if not specified, BOTA
will try to determine it.
Add --help to see a full list of required and optional
arguments to run BOTA
Additional information can also be found at:
https://bitbucket.org/luo-chengwei/bota/wiki
If you use BOTA in your work, please cite it as:
<BOTA citation TBD>
Copyright: Chengwei Luo, Broad Institute of MIT and Harvard, 2015
"""
import sys, os, re, glob, shutil, random
import _pickle as cPickle
from optparse import OptionParser, OptionGroup
from operator import itemgetter
from time import ctime, time
import multiprocessing as mp
from subprocess import call, PIPE, Popen
from itertools import groupby
import networkx
import numpy as np
from Bio import SeqIO
from Bio.Seq import Seq
import ptvsd
# KERAS init
from keras.models import Sequential, model_from_json, model_from_config
#from keras.layers.core import Dense, Dropout, Activation
#from keras.optimizers import SGD, Adam, RMSprop
#from keras.utils import np_utils
sys.path.append(os.path.basename(sys.argv[0])+'/algorithms/')
class GenomeProj:
"""The base class to hold a BOTA project's sub genome project information"""
class ProjInfo:
"""The base class to hold a BOTA project information"""
def __init__(self):
""" base init method """
self.prodigal = None
self.psort = None
self.hmmscan = None
self.hmmtop = None
self.hmmtop_arch = [None, None]
self.pfam = None
self.HLA_db = None
self.gram_db = None
self.pwm_db = None
self.genomes = {}
self.models={}
def init_from_config(self, config):
"""Initiate a BOTA project information for configuration file.
Parameters
----------
config: the path to the configuration file with format detailed in usage.
"""
# init the db files
script_path = os.path.dirname(os.path.realpath(__file__))
db_path = os.path.join(script_path, 'db')
if not os.path.exists(db_path):
sys.stderr.write('[FATAL] Cannot locate the db directory.\n')
exit(1)
self.hmmtop_arch=[os.path.join(db_path, 'hmmtop.arch'), os.path.join(db_path, 'hmmtop.psv')]
self.HLA_db=os.path.join(db_path, 'HLA.db')
self.gram_db=os.path.join(db_path, 'Gram.faa')
self.pwm_db = os.path.join(db_path, 'PWMatrix.pkl')
if not os.path.exists(self.hmmtop_arch[0]):
sys.stderr.write('[FATAL] Cannot locate HMMTOP arch file hmmtop.arch in db/.\n')
if not os.path.exists(self.hmmtop_arch[1]):
sys.stderr.write('[FATAL] Cannot locate HMMTOP parameters file hmmtop.psv in db/.\n')
if not os.path.exists(self.HLA_db):
sys.stderr.write('[FATAL] Cannot locate HLA allele DB in db/.\n')
if not os.path.exists(self.gram_db):
sys.stderr.write('[FATAL] Cannot locate the DB for Gram typing in db/.\n')
if not os.path.exists(self.pwm_db):
sys.stderr.write('[FATAL] Cannot locate the DB for PW matrix in db/.\n')
# check pfam, if not there, then download it
if self.pfam == None:
for ext in ['h3f', 'h3i', 'h3m', 'h3p']:
pfam_file = os.path.join(db_path, 'Pfam-A.hmm.%s' % ext)
if not os.path.exists(pfam_file):
sys.stderr.write('[FATAL] Cannot locate Pfam-A.hmm.%s\n' % ext)
exit(1)
self.pfam=os.path.join(db_path, 'Pfam-A.hmm')
# get all alleles
all_alleles = {'human':[], 'mouse':[]}
for line in open(self.HLA_db, 'r'):
cols = line[:-1].split(',')
if line[:3] == 'HLA': all_alleles['human'] += cols
else: all_alleles['mouse'] += cols
allele_set = set(all_alleles['human']+all_alleles['mouse'])
genome_id = ''
missing_execs = []
for line in open(config, 'r'):
if line[0] == '#': continue # the comment line
if re.search('blat|hmmscan|prodigal|hmmtop|psort|gram|alleles\=\'.+\'', line[:-1]) != None:
try:
k, v = re.search('(.+)\=\'(.+)\'', line.rstrip()).group(1, 2)
except:
sys.stderr.write('[FATAL] Error in parsing the config file genome file section.\n')
exit(1)
# print("v",v)
av = which(v)
if av != None:
if k == 'blat': self.blat = av
elif k == 'psort': self.psort = av
elif k == 'prodigal': self.prodigal = av
elif k == 'hmmscan': self.hmmscan=av
elif k == 'hmmtop': self.hmmtop=av
else: raise ValueError
else:
sys.stderr.write('[FATAL] Error in parsing config. %s path %s does not seem to work.\n' % (k, v))
#exit(1)
elif re.search('^\[.+\]', line.rstrip()) != None:
genome_id = re.search('^\[(.+)\]', line.rstrip()).group(1)
self.genomes[genome_id] = GenomeProj()
self.genomes[genome_id].name=genome_id
elif re.search('^gff|fna|psort|hmmtop|hmmscan|alleles|gram\t.+$', line.rstrip()) != None:
k, v = line.rstrip().split('\t')
if k != 'alleles' and not os.path.exists(v):
sys.stderr.write('[FATAL] cannot locate %s file for genome=\'%s\', you supplied %s.\n' % (k, genome_id, v))
exit(1)
elif k == 'fna': self.genomes[genome_id].fna=v
elif k == 'gff': self.genomes[genome_id].gff=v
elif k == 'hmmscan': self.genomes[genome_id].hmmscan=v
elif k == 'hmmtop': self.genomes[genome_id].hmmtop=v
elif k == 'psort': self.genomes[genome_id].psort=v
elif k == 'gram':
if v in list('APV'): self.genomes[genome_id].gram=v
else:
sys.stderr.write('[FATAL] for genome %s Gram stain type you supplied: %s. \
It has to be A, P, or N.\n' % (genome_id, v))
exit(1)
elif k == 'alleles':
alleles = v.split(',')
if len(alleles) == 1 and alleles[0] == 'human':
self.genomes[genome_id].alleles=all_alleles['human']
elif len(alleles) == 1 and alleles[0] == 'mouse':
self.genomes[genome_id].alleles=all_alleles['mouse']
elif len(alleles) == 1 and alleles[0] in allele_set:
self.genomes[genome_id].alleles=alleles
elif len(all_alleles) > 1:
for allele in all_alleles:
if allele not in allele_set:
sys.stderr.write('[FATAL] You supplied %s allele, not in the allele list.\n' % allele)
sys.stderr.write('This is a list of all alleles available:\n')
sys.stderr.write('[human]\n')
for allele in all_alleles['human']: sys.stderr.write(allele+'\n')
sys.stderr.write('[mouse]\n')
for allele in all_alleles['mouse']: sys.stderr.write(allele+'\n')
sys.stderr.write('\n')
exit(1)
else:
self.genomes[genome_id].alleles.append(allele)
else:
sys.stderr.write('[FATAL] Illegal keyword %s found. \
Specify files with \'gff\', \'fna\', \'hmmscan\', \'alleles\', \'gram\', or \'hmmtop\'.\n' % (k))
elif line[:-1] == '': pass
else:
sys.stderr.write('[FATAL] Error in parsing config, no keyword found.\n')
exit(1)
# refresh alleles, add models
model_path = os.path.join(script_path, 'models')
if not os.path.exists(model_path):
sys.stderr.write('[FATAL] Cannot locate the DNN model path.\n')
exit(1)
alleles_needed = set()
for genome_id in self.genomes:
if len(self.genomes[genome_id].alleles) == 0:
self.genomes[genome_id].alleles = list(allele_set)
for allele in self.genomes[genome_id].alleles:
alleles_needed.add(allele)
for allele in alleles_needed:
model_arch = os.path.join(model_path, '%s.model_arch.json' % allele)
model_weights = os.path.join(model_path, '%s.model_weights.h5' % allele)
if not os.path.exists(model_arch):
sys.stderr.write('[FATAL] Cannot locate the DL architecture for allele: %s\n' % (allele))
exit()
if not os.path.exists(model_weights):
sys.stderr.write('[FATAL] Cannot locate the DL weigths for allele: %s\n' % (allele))
exit()
self.models[allele] = [model_arch, model_weights]
return 0
def print_projInfo(self, stream=sys.stdout):
""" by default prints to sys.stdout;
can be redirected to other streams such as a file handle."""
stream.write('################ Project Configurations ##############\n')
stream.write('[3rd party programs]\n')
stream.write(' prodigal: %s\n' % self.prodigal)
stream.write(' hmmscan: %s\n' % self.hmmscan)
stream.write(' hmmtop: %s\n' % self.hmmtop)
stream.write(' psort: %s\n' % self.psort)
stream.write(' \n')
for genome_id in self.genomes:
stream.write('[%s]\n' % genome_id)
stream.write(' fna=%s\n' % self.genomes[genome_id].fna)
stream.write(' gff=%s\n' % self.genomes[genome_id].gff)
stream.write(' hmmtop=%s\n' % self.genomes[genome_id].hmmtop)
stream.write(' hmmscan=%s\n' % self.genomes[genome_id].hmmscan)
stream.write(' psort=%s\n' % self.genomes[genome_id].psort)
stream.write(' Gram=%s\n' % self.genomes[genome_id].gram)
stream.write(' [Alleles]\n')
for allele in self.genomes[genome_id].alleles:
stream.write(' %s\n' % allele)
stream.write('\n')
return 0
def which(program):
"""Tests if a program is executable or exists
Parameters
----------
program: the path to the executable
Returns
-------
abspath: the absolute path of the program if it is executable and the system can
locate it; otherwise None.
"""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return os.path.abspath(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
def parse_config(config_file):
"""Parses the configuration file, returns the ProjInfo object
Parameters
----------
config_file: the configuration file formatted as detailed in usage
Returns
-------
proj_info: the initiated ProjInfo object.
"""
proj_info = ProjInfo()
proj_info.init_from_config(config_file)
return proj_info
if __name__ == '__main__': main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
198,
834,
9800,
834,
796,
705,
7376,
782,
42990,
25956,
357,
565,
20895,
31,
36654,
8625,
3678,
13,
2398,
33047,
... | 2.390635 | 4,933 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-26 21:49
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
16,
319,
1584,
12,
2931,
12,
2075,
2310,
25,
2920,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.818182 | 66 |
import collections
import math
import os
import shutil
import time
import zipfile
import d2lzh as d2l
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import data as gdata, loss as gloss, model_zoo, nn
# 如果使用下载的Kaggle比赛的完整数据集,把demo变量改为False
demo = True
data_dir = '../data/kaggle_dog'
if demo:
zipfiles = ['train_valid_test_tiny.zip']
else:
zipfiles = ['train.zip', 'test.zip', 'labels.csv.zip']
for f in zipfiles:
with zipfile.ZipFile(data_dir + '/' + f, 'r') as z:
z.extractall(data_dir)
def reorg_dog_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio):
"""读取训练数据标签、切分验证集并整理测试集。"""
# 读取训练数据标签
with open(os.path.join(data_dir, label_file), 'r') as f:
# 跳过文件头行(栏名称)
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
idx_label = dict(((idx, label) for idx, label in tokens))
reorg_train_valid(data_dir, train_dir, input_dir, valid_ratio, idx_label)
# 整理测试集
d2l.mkdir_if_not_exist([data_dir, input_dir, 'test', 'unknown'])
for test_file in os.listdir(os.path.join(data_dir, test_dir)):
shutil.copy(os.path.join(data_dir, test_dir, test_file),
os.path.join(data_dir, input_dir, 'test', 'unknown'))
# 数据处理
if demo:
# 注意,此处使用小数据集并将批量大小相应设小。使用Kaggle比赛的完整数据集时可设批量大小
# 为较大整数
input_dir, batch_size = 'train_valid_test_tiny', 1
else:
label_file, train_dir, test_dir = 'labels.csv', 'train', 'test'
input_dir, batch_size, valid_ratio = 'train_valid_test', 128, 0.1
reorg_dog_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio)
transform_train = gdata.vision.transforms.Compose([
# 随机对图像裁剪出面积为原图像面积0.08~1倍、且高和宽之比在3/4~4/3的图像,再放缩为高和
# 宽均为224像素的新图像
gdata.vision.transforms.RandomResizedCrop(224, scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0)),
gdata.vision.transforms.RandomFlipLeftRight(),
# 随机变化亮度、对比度和饱和度
gdata.vision.transforms.RandomColorJitter(brightness=0.4, contrast=0.4,
saturation=0.4),
# 随机加噪声
gdata.vision.transforms.RandomLighting(0.1),
gdata.vision.transforms.ToTensor(),
# 对图像的每个通道做标准化
gdata.vision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
transform_test = gdata.vision.transforms.Compose([
# todo 直觉上,还不如直接resize到224来的方便
# 也许是为了凸显图片的中心部分方便预测吧
gdata.vision.transforms.Resize(256),
# 将图像中央的高和宽均为224的正方形区域裁剪出来
gdata.vision.transforms.CenterCrop(224),
gdata.vision.transforms.ToTensor(),
gdata.vision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# 读取数据集
train_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train'), flag=1)
valid_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'valid'), flag=1)
train_valid_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train_valid'), flag=1)
test_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'test'), flag=1)
train_iter = gdata.DataLoader(train_ds.transform_first(transform_train),
batch_size, shuffle=True, last_batch='keep')
valid_iter = gdata.DataLoader(valid_ds.transform_first(transform_test),
batch_size, shuffle=True, last_batch='keep')
train_valid_iter = gdata.DataLoader(train_valid_ds.transform_first(
transform_train), batch_size, shuffle=True, last_batch='keep')
test_iter = gdata.DataLoader(test_ds.transform_first(transform_test),
batch_size, shuffle=False, last_batch='keep')
loss = gloss.SoftmaxCrossEntropyLoss()
def train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay):
"""训练神经网络"""
# 只训练自定义的小规模输出网络
trainer = gluon.Trainer(net.output_new.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
for epoch in range(num_epochs):
train_l_sum, n, start = 0.0, 0, time.time()
if epoch > 0 and epoch % lr_period == 0:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
for X, y in train_iter:
y = y.as_in_context(ctx)
output_features = net.features(X.as_in_context(ctx))
with autograd.record():
outputs = net.output_new(output_features)
l = loss(outputs, y).sum()
l.backward()
trainer.step(batch_size)
train_l_sum += l.asscalar()
n += y.size
time_s = "time %.2f sec" % (time.time() - start)
if valid_iter is not None:
valid_loss = evaluate_loss(valid_iter, net, ctx)
epoch_s = ("epoch %d, train loss %f, valid loss %f, "
% (epoch + 1, train_l_sum / n, valid_loss))
else:
epoch_s = ("epoch %d, train loss %f, "
% (epoch + 1, train_l_sum / n))
print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate))
# ctx用于检测能否使用gpu
ctx, num_epochs, lr, wd = d2l.try_gpu(), 1, 0.01, 1e-4
lr_period, lr_decay, net = 10, 0.1, get_net(ctx)
net.hybridize()
train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
net = get_net(ctx)
net.hybridize()
# 开始训练
train(net, train_valid_iter, None, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
# 使用测试集对测试训练结果
preds = []
for data, label in test_iter:
output_features = net.features(data.as_in_context(ctx))
output = nd.softmax(net.output_new(output_features))
preds.extend(output.asnumpy())
ids = sorted(os.listdir(os.path.join(data_dir, input_dir, 'test/unknown')))
with open('submission.csv', 'w') as f:
f.write('id,' + ','.join(train_valid_ds.synsets) + '\n')
for i, output in zip(ids, preds):
f.write(i.split('.')[0] + ',' + ','.join(
[str(num) for num in output]) + '\n')
| [
11748,
17268,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
640,
198,
11748,
19974,
7753,
198,
198,
11748,
288,
17,
75,
23548,
355,
288,
17,
75,
198,
6738,
285,
87,
3262,
1330,
1960,
519,
6335,
11,
1278,
84,... | 1.759409 | 3,454 |
# -*- coding: utf8 -*-
"""
Script to delete unused darks of each image.
For each experiment we acquire something like 30 images, from which only about
3 to 4 contain data, the rest of the images are darks.
We don't have to keep all the darks; this script deletes all but one at the
beginning and the two darks adjacent to the images with signal.
The detection of folders and other things are based on
DetectWhichImageIsRadiography.py
"""
import glob
import os
import time
import numpy
import resource
import functions
# Setup
ReallyRemove = True
# Increase limit of open files, according to http://is.gd/f50dCm
# Otherwise we cannot run the file for *all* folders at the same time
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, 4096))
# Where shall we start?
RootFolder = ('/afs/psi.ch/project/EssentialMed/MasterArbeitBFH/' +
'XrayImages')
case = 2
if case == 1:
# Look through all folders
StartingFolder = RootFolder
elif case == 2:
# Look for images of only one scintillator
Scintillators = ('AppScinTech-HE', 'Pingseng', 'Hamamatsu', 'Toshiba')
ChosenScintillator = functions.AskUser(
'Which scintillator do you want to look at?', Scintillators)
StartingFolder = os.path.join(RootFolder, ChosenScintillator)
elif case == 3:
# Ask for what to do
Scintillators = ('AppScinTech-HE', 'Pingseng', 'Hamamatsu', 'Toshiba')
Sensors = ('AR0130', 'AR0132', 'MT9M001')
Lenses = ('Computar-11A', 'Framos-DSL219D-650-F2.0',
'Framos-DSL224D-650-F2.0', 'Framos-DSL311A-NIR-F2.8',
'Framos-DSL949A-NIR-F2.0', 'Lensation-CHR4020',
'Lensation-CHR6020', 'Lensation-CM6014N3', 'Lensation-CY0614',
'TIS-TBL-6C-3MP', '')
ChosenScintillator = functions.AskUser(
'Which scintillator do you want to look at?', Scintillators)
ChosenSensor = functions.AskUser(
'Which sensor do you want to look at?', Sensors)
ChosenLens = functions.AskUser(
'Which lens do you want to look at? ("empty" = "all")',
Lenses)
StartingFolder = os.path.join(RootFolder, ChosenScintillator,
ChosenSensor, ChosenLens)
# Look for all folders matching the naming convention
Experiment, ExperimentID = functions.get_experiment_list(StartingFolder)
print 'I found', len(Experiment), 'experiment IDs in', StartingFolder
# Get list of files in each folder, these are all the radiographies we acquired
# The length of this list is then obviously the number of radiographies
Radiographies = [sorted(glob.glob(os.path.join(Folder, '*.raw')))
for Folder in Experiment]
NumberOfRadiographies = [len(Radiographies[i])
for i in range(len(Experiment))]
AnalyisList = []
AnalyisList = range(len(Experiment))
# Go through each selected experiment
for Counter, SelectedExperiment in enumerate(AnalyisList):
# Inform the user and start logging
# See if TarToArchive.py was already run on this experiment
ArchivalLog = os.path.join(
os.path.dirname(Experiment[SelectedExperiment]),
ExperimentID[SelectedExperiment] + '.archive.log')
if not os.path.isfile(ArchivalLog):
ReallyRemove = False
print 'I could not find an archival log file for experiment', \
ExperimentID[SelectedExperiment], 'at', ArchivalLog
print 'I thus set "ReallyRemove" to false'
print
print 'Please archive the data for this Experiment with',\
'TarToArchive.py, then run this script again'
break
print 80 * '-'
# print str(Counter + 1) + '/' + str(len(AnalyisList)) + \
# ': Deleting darks experiment', ExperimentID[SelectedExperiment]
logfile = functions.myLogger(
os.path.dirname(Experiment[SelectedExperiment]),
ExperimentID[SelectedExperiment] + '.deletion.log')
# Go through the log file. Under the 'Details' section we specify if the
# image is a 'Dark' or was used ('Image'). Save 'Image's, adjacent 'Dark's
# and the second 'Dark', delete the rest.
AnalysisLogFile = os.path.join(
os.path.dirname(Experiment[SelectedExperiment]),
ExperimentID[SelectedExperiment] + '.analysis.log')
if not os.path.isfile(AnalysisLogFile):
print 'The analysis of experiment', \
ExperimentID[SelectedExperiment], 'has not been done yet'
print 'Run DetectWhichImageIsRadiography.py on', \
Experiment[SelectedExperiment]
break
Keepers = []
LogFileToRead = open(AnalysisLogFile, 'r')
for line in LogFileToRead:
if len(line.split('-->')) == 2:
FileNumber = int(line.split('/')[0])
if line.split('--> ')[1].strip() == 'Image':
# Keep 'Image' and the 'Dark' adjacent to the 'Image's
Keepers.append(FileNumber - 1)
Keepers.append(FileNumber)
Keepers.append(FileNumber + 1)
LogFileToRead.close()
print 'For Experiment', ExperimentID[SelectedExperiment], 'in folder', \
Experiment[SelectedExperiment][len(StartingFolder) + 1:]
# Always keep second image
Keepers.append(2)
Keepers = numpy.unique(Keepers)
print 'We keep', len(Keepers), 'images and delete', \
NumberOfRadiographies[SelectedExperiment] - len(Keepers), 'images'
if NumberOfRadiographies[SelectedExperiment] - len(Keepers) > 1:
# When we have as many files left as we have 'Keepers' we most probably
# have already done a deletion round, then the 'if' clause above
# evaluates to 'True'
logfile.info(
'Deletion log file for Experiment ID %s, deletion done on %s',
ExperimentID[SelectedExperiment],
time.strftime('%d.%m.%Y at %H:%M:%S'))
logfile.info('\nMade with "%s" at Revision %s',
os.path.basename(__file__), functions.get_git_hash())
logfile.info(80 * '-')
logfile.info('Grabbing Information from %s', AnalysisLogFile)
logfile.info(80 * '-')
logfile.info('In the folder %s we keep image',
Experiment[SelectedExperiment])
LogFileToRead = open(AnalysisLogFile, 'r')
for line in LogFileToRead:
if len(line.split('-->')) == 2:
FileNumber = int(line.split('/')[0]) - 1
if FileNumber in Keepers:
logfile.info('%s/%s | %s | with info "%s"',
str(FileNumber).rjust(2),
NumberOfRadiographies[SelectedExperiment],
os.path.basename(Radiographies[SelectedExperiment][FileNumber - 1]),
line.strip())
LogFileToRead.close()
logfile.info(80 * '-')
logfile.info('In the folder %s we delete image',
Experiment[SelectedExperiment])
LogFileToRead = open(AnalysisLogFile, 'r')
for line in LogFileToRead:
if len(line.split('-->')) == 2:
FileNumber = int(line.split('/')[0])
if FileNumber not in Keepers:
logfile.info('%s/%s | %s | with info "%s"',
str(FileNumber).rjust(2),
NumberOfRadiographies[SelectedExperiment],
os.path.basename(Radiographies[SelectedExperiment][FileNumber - 1]),
line.strip())
# Actually delete the image now
if ReallyRemove:
os.remove(
Radiographies[SelectedExperiment][FileNumber - 1])
LogFileToRead.close()
else:
print 'We have as many Keepers as radiographies in the folder.', \
'Either it does not make sense to delete any files or we', \
'already did delete them...'
if not ReallyRemove:
print '\nWe did not really remove anything, set "ReallyRemove" at', \
'the beginnig of the script to "True" or archive the files with', \
'"TarToArchive.py" first...'
logfile.info(80 * '-')
logfile.info('We did not really remove anything')
logfile.info('Set "ReallyRemove" on line 22 of the script to "True" '
'at the beginnig of the script to really delete the '
'superfluous files')
if ReallyRemove:
print
print 'Deletion of unnecessary darks of', StartingFolder, 'finished'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
37811,
198,
7391,
284,
12233,
21958,
288,
5558,
286,
1123,
2939,
13,
198,
1890,
1123,
6306,
356,
12831,
1223,
588,
1542,
4263,
11,
422,
543,
691,
546,
198,
18,
284,
6... | 2.348953 | 3,628 |
# myTeam.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from captureAgents import CaptureAgent
from captureAgents import AgentFactory
from game import Directions
import random, time, util
from util import nearestPoint
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first = 'MasterAAgent', second = 'MasterDAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class AttackerAgentHelper(EvaluationBasedAgentHelper):
"Gera Carlo, o agente ofensivo."
def getFeatures(self, gameState, action):
"""
Get features used for state evaluation.
"""
features = util.Counter()
successor = self.getSuccessor(gameState, action)
# Compute score from successor state
features['successorScore'] = self.cap.getScore(successor)
# Compute remain food
features['targetFood'] = len(self.cap.getFood(gameState).asList())
# Compute distance to the nearest food
foodList = self.cap.getFood(successor).asList()
if len(foodList) > 0:
myPos = successor.getAgentState(self.index).getPosition()
minDistance = min([self.cap.getMazeDistance(myPos, food) for food in foodList])
features['distanceToFood'] = minDistance
# Compute the carrying dots
features['carryDot'] = successor.getAgentState(self.index).numCarrying
# Compute distance to closest ghost
myPos = successor.getAgentState(self.index).getPosition()
enemies = [successor.getAgentState(i) for i in self.cap.getOpponents(successor)]
inRange = filter(lambda x: not x.isPacman and x.getPosition() != None, enemies)
if len(inRange) > 0:
positions = [agent.getPosition() for agent in inRange]
closest = min(positions, key=lambda x: self.cap.getMazeDistance(myPos, x))
closestDist = self.cap.getMazeDistance(myPos, closest)
if closestDist <= 5:
features['distanceToGhost'] = closestDist
# Compute if is pacman
features['isPacman'] = 1 if successor.getAgentState(self.index).isPacman else 0
# Get the closest distance to the middle of the board.
features['distanceToMid'] = min([self.cap.distancer.getDistance(myPos, i)
for i in self.noWallSpots])
# Get whether there is a power pill we are chasing.
capsulesChasing = None
if self.cap.red:
capsulesChasing = gameState.getBlueCapsules()
else:
capsulesChasing = gameState.getRedCapsules()
# distance and minimum distance to the capsule.
capsulesChasingDistances = [self.cap.distancer.getDistance(myPos, capsule) for capsule in
capsulesChasing]
minCapsuleChasingDistance = min(capsulesChasingDistances) if len(capsulesChasingDistances) else 0
features['distoCapsule'] = minCapsuleChasingDistance
return features
def getWeights(self, gameState, action):
"""
Get weights for the features used in the evaluation.
"""
#If tha agent is locked, we will make him try and atack
if self.inactiveTime > 80:
return {'successorScore': 10, 'distanceToFood': -10, 'distanceToGhost': 50, 'carryDot': 50,
'isPacman': 0, 'targetFood': -1000, 'distanceToMid': 0, 'distoCapsule': -500}
# If opponent is scared, the agent should not care about distanceToGhost
successor = self.getSuccessor(gameState, action)
myPos = successor.getAgentState(self.index).getPosition()
enemies = [successor.getAgentState(i) for i in self.cap.getOpponents(successor)]
inRange = filter(lambda x: not x.isPacman and x.getPosition() != None, enemies)
# if len(inRange) > 0:
# positions = [agent.getPosition() for agent in inRange]
# closestPos = min(positions, key=lambda x: self.getMazeDistance(myPos, x))
# closestDist = self.getMazeDistance(myPos, closestPos)
# closest_enemies = filter(lambda x: x[0] == closestPos, zip(positions, inRange))
# for agent in closest_enemies:
# if agent[1].scaredTimer > 3:
# return {'successorScore': 2, 'distanceToFood': -500, 'distanceToGhost': 0,'carryDot': 50,
# 'isPacman': 0, 'targetFood': -1000, 'distanceToMid': 0,'distoCapsule':0}
# Weights normally used
scaredTimes = gameState.getAgentState(self.oppoents[0]).scaredTimer
if scaredTimes > 3:
return {'successorScore': 2, 'distanceToFood': -500, 'distanceToGhost': 0,'carryDot': 100,
'isPacman': 0, 'targetFood': -100, 'distanceToMid': -10,'distoCapsule':0}
elif self.retreat:
return {'successorScore': 10, 'distanceToFood': 0, 'distanceToGhost': 500, 'carryDot': 50,
'isPacman': -100, 'targetFood': 20, 'distanceToMid': -100,'distoCapsule':0}
else:
return {'successorScore': 10, 'distanceToFood': -500, 'distanceToGhost': 50, 'carryDot': 50,
'isPacman': 0,'targetFood': -1000, 'distanceToMid': 0,'distoCapsule':-500}
def randomSimulation(self, depth, gameState):
"""
Random simulate some actions for the agent. The actions other agents can take
are ignored, or, in other words, we consider their actions is always STOP.
The final state from the simulation is evaluated.
"""
new_state = gameState.deepCopy()
while depth > 0:
# Get valid actions
actions = new_state.getLegalActions(self.index)
# The agent should not stay put in the simulation
actions.remove(Directions.STOP)
current_direction = new_state.getAgentState(self.index).configuration.direction
# The agent should not use the reverse direction during simulation
reversed_direction = Directions.REVERSE[new_state.getAgentState(self.index).configuration.direction]
if reversed_direction in actions and len(actions) > 1:
actions.remove(reversed_direction)
# Randomly chooses a valid action
a = random.choice(actions)
# Compute new state and update depth
new_state = new_state.generateSuccessor(self.index, a)
depth -= 1
# Evaluate the final simulation state
return self.evaluate(new_state, Directions.STOP)
def takeToEmptyAlley(self, gameState, action, depth):
"""
Verify if an action takes the agent to an alley with
no pacdots.
"""
if depth == 0:
return False
# if self.retreat:
# return True
# else:
targetFood = len(self.cap.getFood(gameState).asList())
new_state = gameState.generateSuccessor(self.index, action)
new_targetFood = len(self.cap.getFood(new_state).asList())
if new_targetFood < targetFood:
return False
actions = new_state.getLegalActions(self.index)
actions.remove(Directions.STOP)
reversed_direction = Directions.REVERSE[new_state.getAgentState(self.index).configuration.direction]
if reversed_direction in actions:
actions.remove(reversed_direction)
if len(actions) == 0:
return True
for a in actions:
if not self.takeToEmptyAlley(new_state, a, depth - 1):
return False
return True
# Implemente este metodo para controlar o agente (1s max).
class DefenderAgentHelper():
"Gera Monte, o agente defensivo."
def distFoodToPatrol(self, gameState):
"""
This method calculates the minimum distance from our patrol
points to our pacdots. The inverse of this distance will
be used as the probability to select the patrol point as
target.
"""
food = self.cap.getFoodYouAreDefending(gameState).asList()
total = 0
# Get the minimum distance from the food to our
# patrol points.
for position in self.noWallSpots:
closestFoodDist = "+inf"
for foodPos in food:
dist = self.cap.getMazeDistance(position, foodPos)
if dist < closestFoodDist:
closestFoodDist = dist
# We can't divide by 0!
if closestFoodDist == 0:
closestFoodDist = 1
self.patrolDict[position] = 1.0 / float(closestFoodDist)
total += self.patrolDict[position]
# Normalize the value used as probability.
if total == 0:
total = 1
for x in self.patrolDict.keys():
self.patrolDict[x] = float(self.patrolDict[x]) / float(total)
def selectPatrolTarget(self):
"""
Select some patrol point to use as target.
"""
rand = random.random()
sum = 0.0
for x in self.patrolDict.keys():
sum += self.patrolDict[x]
if rand < sum:
return x
# Implemente este metodo para pre-processamento (15s max).
# Implemente este metodo para controlar o agente (1s max).
| [
2,
616,
15592,
13,
9078,
198,
2,
45337,
198,
2,
10483,
26426,
6188,
25,
220,
921,
389,
1479,
284,
779,
393,
9117,
777,
4493,
329,
198,
2,
9856,
4959,
2810,
326,
357,
16,
8,
345,
466,
407,
14983,
393,
7715,
198,
2,
8136,
11,
357,... | 2.854441 | 3,490 |
import networkx as nx
import datetime
import time
from datetime import datetime, timedelta
import sys
# October
days = ['1','2','3','4','5','6','7','8','9','10','11', '12','13','14','15','16','17', '18','19','20','21','22','23','24','25','26','27','28','29','30','31']
startTime = '09:00'
endTime = '23:59'
timeSlices = generateTimeSlices(startTime, endTime)
with open(sys.argv[1], 'w') as out_f:
for day in days:
for i in xrange(0,len(timeSlices), 3):
time1 = timeSlices[i][0].translate(None, ':')
time2 = timeSlices[i+1][0].translate(None, ':')
time3 = timeSlices[i+2][0].translate(None, ':')
time4 = timeSlices[i+2][1].translate(None, ':')
(timeT, discard) = timeSlices[i+2][1].strip().split(':')
try:
print "Reading Graph 1"
inputgraph1 = nx.read_graphml("2012-10-" + day + "_" + time1 + "_2012-10-" + day +"_"+time2+".graphml")
print "Reading Graph 2"
inputgraph2 = nx.read_graphml("2012-10-" + day + "_" + time2 + "_2012-10-" + day +"_"+time3+".graphml")
print "Combining Graphs"
testGraph = nx.disjoint_union(inputgraph1,inputgraph2)
print "Reading Graph 3"
inputgraph3 = nx.read_graphml("2012-10-" + day + "_" + time3 + "_2012-10-" + day +"_"+time4+".graphml")
print "Combining Graphs"
testGraph2 = nx.disjoint_union(testGraph,inputgraph3)
nx.write_graphml(testGraph2, "2012-10-" + day + "T"+ timeT + ".graphml")
out_f.write("%s\n" % ("2012-10-" + day + "T"+ timeT + ".graphml"))
except:
print "Couldn't generate merged file for 2012-10-" + day
| [
11748,
3127,
87,
355,
299,
87,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
25064,
198,
198,
2,
3267,
198,
12545,
796,
37250,
16,
41707,
17,
41707,
18,
41707,
19,
4170... | 2.261696 | 684 |
# Generated by Django 2.2 on 2020-04-02 09:24
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
12131,
12,
3023,
12,
2999,
7769,
25,
1731,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
n=int(input())
x=list(map(int,input().split()))
pos=soma=tot=0
aux=True
for i,num in enumerate(x):
tot+=num
if num%2==0 and aux:
att = i+1
soma+=((i*2)+1)-pos
aux=False
if num-1==0 and aux:pos=i+1
else:
if(soma>0):tot-=soma
else:
att=n
tot-=att
print(att,tot)
| [
77,
28,
600,
7,
15414,
28955,
198,
87,
28,
4868,
7,
8899,
7,
600,
11,
15414,
22446,
35312,
3419,
4008,
198,
1930,
28,
82,
6086,
28,
83,
313,
28,
15,
198,
14644,
28,
17821,
198,
1640,
1312,
11,
22510,
287,
27056,
378,
7,
87,
2599... | 1.72973 | 185 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from six.moves import xrange
def generate_random_df(n_seqs=5,
max_seq_length=10,
unique_times=True,
starttimes_min=0,
starttimes_max=0):
"""Generates random dataframe for testing.
For every sequence:
1. generate a random seq_length from [1,`max_seq_length`]
2. generate the number of observations in the sequence from [1,seq_length]
3. randomly pick observation elapsed times from [1,`seq_length`]
4. randomly pick a starttime [0,`starttimes_max`]
5. Generate random data in the columns at these timesteps
This means that the only thing we know about a sequence is that it's at maximum `max_seq_length`
:param df: pandas dataframe with columns
* `id`: integer
* `t`: integer
* `dt`: integer mimmicking a global event time
* `t_ix`: integer contiguous user time count per id 0,1,2,..
* `t_elapsed`: integer the time from starttime per id ex 0,1,10,..
* `event`: 0 or 1
* `int_column`: random data
* `double_column`: dandom data
:param int unique_times: whether there id,elapsed_time has only one obs. Default true
:param int starttimes_min: integer to generate `dt` the absolute time
:param int starttimes_max: integer to generate `dt` the absolute time
:return df: A randomly generated dataframe.
"""
seq_lengths = np.random.randint(max_seq_length, size=n_seqs) + 1
id_list = []
t_list = []
dt_list = []
if starttimes_min < starttimes_max:
starttimes = np.sort(np.random.randint(
low=starttimes_min, high=starttimes_max, size=n_seqs))
else:
starttimes = np.zeros(n_seqs)
for s in xrange(n_seqs):
# Each sequence consists of n_obs in the range 0-seq_lengths[s]
n_obs = np.sort(np.random.choice(
seq_lengths[s], 1, replace=False)) + 1
# Each obs occurred at random times
t_elapsed = np.sort(np.random.choice(
seq_lengths[s], n_obs, replace=not unique_times))
# there's always an obs at the assigned first and last timestep for
# this seq
if seq_lengths[s] - 1 not in t_elapsed:
t_elapsed = np.append(t_elapsed, seq_lengths[s] - 1)
if 0 not in t_elapsed:
t_elapsed = np.append(t_elapsed, 0)
t_elapsed = np.sort(t_elapsed)
id_list.append(np.repeat(s, repeats=len(t_elapsed)))
dt_list.append(starttimes[s] + t_elapsed)
t_list.append(t_elapsed)
# unlist to one array
id_column = [item for sublist in id_list for item in sublist]
dt_column = [item for sublist in dt_list for item in sublist]
t_column = [item for sublist in t_list for item in sublist]
del id_list, dt_list, t_list
# do not assume row indicates event!
event_column = np.random.randint(2, size=len(t_column))
int_column = np.random.randint(low=-5, high=5, size=len(t_column)).astype(int)
double_column = np.random.uniform(high=1, low=0, size=len(t_column))
df = pd.DataFrame({'id': id_column,
'dt': dt_column,
't_elapsed': t_column,
'event': event_column,
'int_column': int_column,
'double_column': double_column
})
df['t_ix'] = df.groupby(['id'])['t_elapsed'].rank(
method='dense').astype(int) - 1
df = df[['id', 'dt', 't_ix', 't_elapsed',
'event', 'int_column', 'double_column']]
df = df.reset_index(drop=True)
return df
def generate_weibull(A, B, C, shape, discrete_time):
"""Generate Weibull random variables.
Inputs can be scalar or broadcastable to `shape`.
:param A: Generating alpha
:param B: Generating beta
:param C: Censoring time
:return: list of `[W, Y, U]`
* `W`: Actual TTE
* `Y`: Censored TTE
* `U`: non-censoring indicators
:rtype: ndarray
"""
W = np.sort(A * np.power(-np.log(np.random.uniform(0, 1, shape)), 1 / B))
if discrete_time:
C = np.floor(C)
W = np.floor(W)
U = np.less_equal(W, C) * 1.
Y = np.minimum(W, C)
return W, Y, U
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
2237... | 2.253976 | 1,949 |
import pandas as pd
from typing import Sequence
from visions.core.model.type import VisionsBaseType
from visions.core.model import TypeRelation
class visions_generic(VisionsBaseType):
"""**Generic** implementation of :class:`visions.core.models.VisionsBaseType`.
Examples:
>>> import numpy as np
>>> x = pd.Series(['a', 1, np.nan])
>>> x in visions_generic
True
"""
@classmethod
@classmethod
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
19720,
1330,
45835,
198,
198,
6738,
26096,
13,
7295,
13,
19849,
13,
4906,
1330,
569,
3279,
14881,
6030,
198,
6738,
26096,
13,
7295,
13,
19849,
1330,
5994,
6892,
341,
628,
198,
4871,
26096,
6... | 2.760736 | 163 |
import numpy as np
import pytest
from simple_nlp import *
@pytest.fixture
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
2829,
62,
21283,
79,
1330,
1635,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198
] | 2.884615 | 26 |
from six.moves import cPickle
| [
6738,
2237,
13,
76,
5241,
1330,
269,
31686,
293,
628
] | 3.1 | 10 |
import unittest
from vcfremapper.samples import Sample, Samples
class TestSample(unittest.TestCase):
''' test Sample class
'''
def test_sample__init__(self):
''' check initialising a Sample
'''
Sample.set_format('GT:DP')
samp = Sample('1/1:100')
self.assertEqual(samp.keys(), ['GT', 'DP'])
self.assertEqual(samp['GT'], '1/1')
self.assertEqual(samp['DP'], '100')
# check when all fields are blank
samp = Sample('.')
self.assertEqual(samp['GT'], '.')
self.assertEqual(samp['DP'], '.')
# check for an error if the sample and expected fields are different
with self.assertRaises(ValueError):
Sample('1/1')
# check that Sample can have different keys
Sample.set_format('GT')
samp = Sample('1/1')
self.assertEqual(samp.keys(), ['GT'])
def test_sample__str__(self):
''' check conversion of Sample to str
'''
Sample.set_format('GT:DP')
samp = Sample('1/1:100')
self.assertEqual(str(samp), '1/1:100')
samp = Sample('.')
self.assertEqual(str(samp), '.:.')
def test_sample__setitem__(self):
''' check setting Sample fields
'''
Sample.set_format('GT:DP')
samp = Sample('1/1:100')
samp['DP'] = 200
self.assertEqual(samp['DP'], 200)
self.assertEqual(str(samp), '1/1:200')
def test_sample__del__(self):
''' check sample field deletion
'''
Sample.set_format('GT:DP')
samp = Sample('1/1:100')
self.assertEqual(samp.keys(), ['GT', 'DP'])
# deleting an entry just gives a NA value
del samp['DP']
self.assertEqual(samp.keys(), ['GT', 'DP'])
self.assertEqual(samp['DP'], '.')
class TestSamples(unittest.TestCase):
''' test Samples class
'''
def test_samples__init__(self):
''' check initialising Samples
'''
samples = Samples('GT:DP', ['1/1:100', '0/1:.', '.'])
self.assertEqual(samples.idx, -1)
self.assertEqual(samples.samples, [Sample('1/1:100'), Sample('0/1:.'),
Sample('.')])
def test_samples__str__(self):
''' check conversion of Samples to str
'''
samples = Samples('GT:DP', ['1/1:100', '0/1:.', '.'])
self.assertEqual(str(samples), 'GT:DP\t1/1:100\t0/1:.\t.:.')
def test_samples__iter__(self):
''' check iterating through Samples
'''
samples = Samples('GT:DP', ['1/1:100', '0/1:.', '.'])
matched = [Sample('1/1:100'), Sample('0/1:.'), Sample('.')]
self.assertEqual(len(samples), len(matched))
for x, y in zip(samples, matched):
self.assertEqual(x, y)
| [
198,
11748,
555,
715,
395,
198,
198,
6738,
410,
12993,
2787,
11463,
13,
82,
12629,
1330,
27565,
11,
3409,
2374,
198,
198,
4871,
6208,
36674,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
705,
7061,
1332,
27565,
1398,
... | 1.953877 | 1,496 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math as m
import matplotlib.pyplot as plt
def rotate_points_2D(points, angle):
"""Rotate points using complex numbers"""
# Encode the rotation operation
z = complex(m.cos(angle), m.sin(angle))
points_rot = []
for point in points:
p_rotated = complex(*point)*z
points_rot.append([p_rotated.real, p_rotated.imag])
return points_rot
def main():
"""Rotate a box"""
box = [
[-1, -1],
[1, -1],
[1, 1],
[-1, 1],
[-1, -1],
]
fig, axs = plt.subplots(1, 1)
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
axs.set_aspect('equal')
for angle in range(0, 90, 2):
box_rotated = rotate_points_2D(box, m.radians(angle))
x = [p[0] for p in box_rotated]
y = [p[1] for p in box_rotated]
axs.plot(x, y, marker='o', color='blue')
plt.pause(0.1)
plt.show()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
10688,
355,
285,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
4299,
2... | 2.004049 | 494 |
"""
Wrapper around existing BERT code to create a classification
estimator.
"""
| [
37811,
198,
36918,
2848,
1088,
4683,
347,
17395,
2438,
284,
2251,
257,
17923,
198,
395,
320,
1352,
13,
198,
37811,
198
] | 3.809524 | 21 |
# Copyright 2013 by Kamil Koziara. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio._py3k import StringIO
import unittest
from Bio.Ontology.IO.OboIO import OboIterator, OboWriter
from Bio.Ontology.Data import OntologyTerm
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner=runner)
| [
2,
15069,
2211,
416,
12670,
346,
40772,
72,
3301,
13,
1439,
2489,
10395,
13,
198,
2,
770,
2438,
318,
636,
286,
262,
8436,
404,
7535,
6082,
290,
21825,
416,
663,
198,
2,
5964,
13,
220,
4222,
766,
262,
38559,
24290,
2393,
326,
815,
... | 3.228758 | 153 |
# encoding: utf-8
from .wscn_interface import * | [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
6738,
764,
86,
1416,
77,
62,
39994,
1330,
1635
] | 2.666667 | 18 |
import datetime
import pytest
import requests
from datavault_api_client import crawler
from datavault_api_client.data_structures import DiscoveredFileInfo
| [
11748,
4818,
8079,
198,
198,
11748,
12972,
9288,
198,
11748,
7007,
198,
198,
6738,
4818,
615,
1721,
62,
15042,
62,
16366,
1330,
27784,
1754,
198,
6738,
4818,
615,
1721,
62,
15042,
62,
16366,
13,
7890,
62,
7249,
942,
1330,
8444,
2557,
... | 3.4375 | 48 |
"""Sub module"""
| [
37811,
7004,
8265,
37811,
198
] | 3.4 | 5 |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.gpdb.tests.storage.fts.fts_transitions.fts_transitions import FTSTestCase
class FtsTransitionsPart03(FTSTestCase):
''' State of FTS at different fault points
'''
def test_primary_resync_postmaster_reset_with_faults(self):
'''
@data_provider pr_faults
'''
filerep_fault = self.test_data[1][0]
fault_type = self.test_data[1][1]
filerep_role = self.test_data[1][2]
gpstate_role = self.test_data[1][3]
gprecover = self.test_data[1][4]
tinctest.logger.info("\n ===============================================")
tinctest.logger.info("\n Starting New Test: %s " % self.test_data[0][1])
tinctest.logger.info("\n ===============================================")
self.primary_resync_postmaster_reset_with_faults(filerep_fault, fault_type, filerep_role, gpstate_role, gprecover)
@tinctest.dataProvider('pr_faults')
| [
37811,
198,
15269,
357,
34,
8,
5472,
12,
4626,
350,
452,
4997,
10442,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
198,
1212,
1430,
290,
262,
19249,
5696,
389,
925,
1695,
739,
198,
1169,
2846,
286,
262,
739,
262,
24843,
13789,
11,
1062... | 2.850435 | 575 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time : 19-5-24 下午2:29
#@Author: elgong_hdu
#@File : makeLabel.py
import os
Class = ("apple", "orange", "bed", "bottle", "bus", "clock", "elephant", "keyboard", "motorcycle",
"mushroom", "rocket", "rose", "tank", "television", "tiger", "train", "turtle",
"tree", "woman", "worm")
# 投票阈值
# 380个组别
union_group = []
for i in range(len(Class)):
for j in range(len(Class)):
if i == j:
continue
union_group.append((Class[i] + "_" + Class[j]))
# 投票器 版本1
def vote_version1(cls = "apple", Threshold = 38, predict_path = "/home/elgong/GEL/one_shot/torch/pytorch-cifar-master/result_predict_unlabel.csv"):
"""
版本1: 预测是01的时候
:param cls: 要筛选的类别
:param Threshold: 投票的阈值 (0~38)
:param predict_path:
:return:
"""
# 保存相关模型
class1 = {"first": [], "second": []}
for group in union_group:
if cls + "_" in group:
class1["first"].append(group)
elif "_" + cls in group:
class1["second"].append(group)
# 记住投票出来的结果
img_list = []
with open(predict_path, "r") as df:
for line in df:
# 跳过第一行
if "apple_orange" in line:
continue
vote_count = 0
line = line.strip().split(",")
img_name = line[0]
label = line[1:]
# print(len(label))
# 统计set1 票数
# print(type(class1["first"]))
for g1 in class1["first"]:
vote_count += (1 - float(label[union_group.index(g1)]))
# 统计其他set模型中 票数
for g2 in class1["second"]:
vote_count += (float(label[union_group.index(g2)]))
if vote_count >= Threshold:
# pass
img_list.append(img_name)
# 上帝视角, 验证结果的准确性
acc(cls, img_list)
# for cls in Class:
# vote_version1(cls)
# 投票器 版本2
def vote_version2(cls = "apple", vote_Threshold = 0.9, cls_Threshold = 0.8, predict_path = "/home/elgong/GEL/one_shot/torch/pytorch-cifar-master/result_predict_unlabel.csv"):
"""
版本2: 预测是概率的时候
:param cls: 要筛选的类别
:param Threshold: 投票的阈值 (0~38)
:param predict_path:
:return:
"""
# 保存相关模型
class1 = {"first": [], "second": []}
for group in union_group:
if cls + "_" in group:
class1["first"].append(group)
elif "_" + cls in group:
class1["second"].append(group)
# 记住投票出来的结果
img_list = []
with open(predict_path, "r") as df:
for line in df:
# 跳过第一行
if "apple_orange" in line:
continue
vote_count = 0
line = line.strip().split(",")
img_name = line[0]
label = line[1:]
# 概率投票
for g1 in class1["first"]:
score1, score2 = label[union_group.index(g1)].split("/")
if max(float(score1), float(score2)) < cls_Threshold:
score1 = 0
score2 = 0
vote_count += float(score1) if float(score1) > float(float(score2)) else 0
# 统计其他set模型中 票数
for g2 in class1["second"]:
score3, score4 = label[union_group.index(g2)].split("/")
if max(float(score3), float(score4)) < cls_Threshold:
score3 = 0
score4 = 0
vote_count += float(score4) if float(score3) < float(float(score4)) else 0
vote_count = vote_count/38.0
if vote_count >= vote_Threshold:
# pass
img_list.append(img_name)
# 上帝视角, 验证结果的准确性
acc(cls, img_list)
for cls in Class:
vote_version2(cls, 0.86, 0.8) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
31,
7575,
220,
1058,
678,
12,
20,
12,
1731,
220,
10310,
233,
39355,
230,
17,
25,
1959,
198,
2,
31,
13838,
25,
1288,... | 1.652632 | 2,280 |
# Adapted from https://github.com/HobbitLong/SupContrast/blob/master/losses.py
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
2,
30019,
276,
422,
3740,
1378,
12567,
13,
785,
14,
39,
672,
2545,
14617,
14,
40784,
4264,
5685,
14,
2436,
672,
14,
9866,
14,
22462,
274,
13,
9078,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
2803... | 2.96 | 50 |
#!/usr/bin/python
# This is used to serve as a library such that some functions do not need to be rewritten over and ove again
import sys, os
from random import randint
import numpy as np
import math
from klampt.math import so3,vectorops
# This part is for the operation on list
# This part of functions is used for file operation: read, write
# Some dynamics functions
# Related to the operation of the robot contact link
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
770,
318,
973,
284,
4691,
355,
257,
5888,
884,
326,
617,
5499,
466,
407,
761,
284,
307,
30101,
625,
290,
31471,
757,
198,
11748,
25064,
11,
28686,
198,
6738,
4738,
1330,
43720,
600,... | 3.855856 | 111 |
from django.shortcuts import render
from .models import *
from .forms import *
import datetime
# Create your views here. | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
764,
27530,
1330,
1635,
198,
6738,
764,
23914,
1330,
1635,
198,
11748,
4818,
8079,
198,
2,
13610,
534,
5009,
994,
13
] | 3.870968 | 31 |
###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
###############################################################################
import torch.utils.data as data
import numpy as np
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
EXTENSIONS = ['.npy']
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
6127,
422,
198,
2,
3740,
1378,
12567,
13,
785,
14,
9078,
13165,
354,
14,
10178,
14,
2436,
672,
14,
9866,
14,
13165,
354,
10178,
14,
19608,
292,
1039,
14,
43551,
13,
9078,
198,
2,
40499,
262,
... | 3.502994 | 167 |
# Generated by Django 3.2.12 on 2022-03-10 13:08
import django.core.validators
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
1065,
319,
33160,
12,
3070,
12,
940,
1511,
25,
2919,
198,
198,
11748,
42625,
14208,
13,
7295,
13,
12102,
2024,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.97561 | 41 |
import glob
import os
import zipfile
import nibabel as nib
import numpy as np
import traceback
def convert_mri_masks(type='fast'):
"""
:param type: can be "fast" or "first"
"""
source_path = "/mnt/projects/ukbiobank/original/imaging/brain_mri/T1_structural_brain_mri/unzipped/"
destination_path = "/mnt/projects/ukbiobank/derived/imaging/brain_mri/"
filename_list = sorted(glob.glob(destination_path + "T1/**/" + "*.npy", recursive=True))
count = 0
for filename in filename_list:
id = os.path.basename(filename).split(".")[0]
count += 1
if type == 'fast':
mask = nib.load(source_path + str(id) + '_20252_2_0/T1/T1_fast/T1_brain_seg.nii.gz').get_data()
np.save(os.path.join(destination_path, "fast_masks", str(id) + ".npy"), mask)
else:
mask = nib.load(
source_path + str(id) + '_20252_2_0/T1/T1_first/T1_first_all_fast_firstseg.nii.gz').get_data()
np.save(os.path.join(destination_path, "first_masks", str(id) + ".npy"), mask)
if count % 100 == 0:
print("Processed " + str(count) + " masks so far.")
if __name__ == "__main__":
# convert_mri_unlabeled()
convert_mri_masks()
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
19974,
7753,
198,
198,
11748,
33272,
9608,
355,
33272,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12854,
1891,
628,
198,
198,
4299,
10385,
62,
76,
380,
62,
5356,
591,
7,
4906,
11639,
721... | 2.173684 | 570 |
import tensorflow as tf
from rfho.datasets import load_iris
import numpy as np
import rfho as rf
TRACK = 'TRACK'
def iris_logistic_regression(augment=0):
"""
Simple model for testing purposes
:param augment:
:return:
"""
iris = load_iris(partitions_proportions=(.3,.3))
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
model = rf.LinearModel(x, 4, 3)
model_w, model_y = rf.vectorize_model(model.var_list, model.inp[-1], augment=augment)
error = tf.reduce_mean(
rf.cross_entropy_loss(model_y, y)
)
correct_prediction = tf.equal(tf.argmax(model_y, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
return iris, x, y, model, model_w, model_y, error, accuracy
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
374,
69,
8873,
13,
19608,
292,
1039,
1330,
3440,
62,
29616,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
374,
69,
8873,
355,
374,
69,
198,
198,
5446,
8120,
796,
705,
5446,
8120,
6,
... | 2.318966 | 348 |
class dotnetDoubleList_t(object):
""" dotnetDoubleList_t(size: int) """
def FromStruct(self,doubleList):
""" FromStruct(self: dotnetDoubleList_t,doubleList: DoubleList) """
pass
def ToStruct(self,doubleList):
""" ToStruct(self: dotnetDoubleList_t,doubleList: DoubleList) """
pass
@staticmethod
def __new__(self,size):
"""
__new__[dotnetDoubleList_t]() -> dotnetDoubleList_t
__new__(cls: type,size: int)
"""
pass
aDoubleList=None
ClientId=None
IndexCurrentItem=None
NumberItems=None
NumberItemsInSet=None
| [
4871,
16605,
3262,
25628,
8053,
62,
83,
7,
15252,
2599,
201,
198,
37227,
16605,
3262,
25628,
8053,
62,
83,
7,
7857,
25,
493,
8,
37227,
201,
198,
825,
3574,
44909,
7,
944,
11,
23352,
8053,
2599,
201,
198,
220,
37227,
3574,
44909,
7,
... | 2.654028 | 211 |
# Buy and Sell stoks once
prices = [310, 315, 275, 295, 260, 270, 290, 230, 255, 250]
print(buy_and_sell_stock_once(prices))
print(buy_and_sell_stock_once([]))
| [
2,
11763,
290,
25688,
336,
28194,
1752,
198,
198,
1050,
1063,
796,
685,
26717,
11,
32647,
11,
25829,
11,
34772,
11,
21148,
11,
20479,
11,
26481,
11,
18395,
11,
14280,
11,
8646,
60,
198,
4798,
7,
17846,
62,
392,
62,
7255,
62,
13578,
... | 2.476923 | 65 |
"""shrt URL Configuration
'/admin' for the built-in django admin interface
'/graphql' displays the graphiql interface provided by graphene
'/<str:tag>' redirects to the URL that matches the shorted tag
"""
from django.contrib import admin
from django.urls import path
from graphene_django.views import GraphQLView
from shrt.url.views import UrlView
def favicon(request):
"""Provides a transparent 16x16 favicon to suppress django errors"""
from textwrap import dedent
from django.http import HttpResponse
import base64
icon = """\
AAABAAEAEBACAAEAAQCwAAAAFgAAACgAAAAQAAAAIAAAAAEAAQAAAAAAgAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAA////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAD//wAA//8AAP//AAD//wAA//8AAP//AAD//wAA//8AAP//AAD/
/wAA//8AAP//AAD//wAA//8AAP//AAD//wAA"""
icon = dedent(icon)
icon = base64.b64decode(icon)
return HttpResponse(icon, content_type="image/x-icon")
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql', GraphQLView.as_view(graphiql=True)),
path('favicon.ico', favicon, name='favicon'),
path('<str:tag>', UrlView.as_view()),
]
| [
37811,
1477,
17034,
10289,
28373,
198,
198,
26488,
28482,
6,
329,
262,
3170,
12,
259,
42625,
14208,
13169,
7071,
198,
26488,
34960,
13976,
6,
11298,
262,
4823,
72,
13976,
7071,
2810,
416,
42463,
198,
26488,
27,
2536,
25,
12985,
29,
6,
... | 2.75 | 424 |
# This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from spot_motion_monitor.views import BaseConfigurationDialog, CentroidPlotConfigTab, PsdPlotConfigTab
__all__ = ['PlotConfigurationDialog']
class PlotConfigurationDialog(BaseConfigurationDialog):
"""Class that generates the dialog for handling plot configuration.
Attributes
----------
centroidPlotConfigTab : CentroidPlotConfigTab
Instance of the centroid plot configuration tab.
psdPlotConfigTab : PsdPlotConfigTab
Instance of the Power Spectrum Distribution plot configuration tab.
"""
def __init__(self, parent=None):
"""Initialize the class.
Parameters
----------
parent : None, optional
Top-level widget.
"""
super().__init__(parent)
self.centroidPlotConfigTab = CentroidPlotConfigTab(self)
self.psdPlotConfigTab = PsdPlotConfigTab(self)
self.tabWidget.addTab(self.centroidPlotConfigTab, self.centroidPlotConfigTab.name)
self.tabWidget.addTab(self.psdPlotConfigTab, self.psdPlotConfigTab.name)
self.centroidPlotConfigTab.hasValidInput.connect(self.inputFromTabsValid)
self.psdPlotConfigTab.hasValidInput.connect(self.inputFromTabsValid)
def getPlotConfiguration(self):
"""Get the current plotting configuration from all the tabs.
Returns
-------
dict, dict
The current centroid and PSD plot configurations.
"""
centroidConfig = self.centroidPlotConfigTab.getConfiguration()
psdConfig = self.psdPlotConfigTab.getConfiguration()
return centroidConfig, psdConfig
def setPlotConfiguration(self, centroidConfig, psdConfig):
"""Set the current plotting configuration in the plot tab's widgets.
Parameters
----------
centroidConfig : dict
The current set of Centroid plot configuration parameters.
psdConfig : dict
The current set of Power Spectrum Distribution plot configuration
parameters.
"""
self.centroidPlotConfigTab.setConfiguration(centroidConfig)
self.psdPlotConfigTab.setConfiguration(psdConfig)
| [
2,
770,
2393,
318,
636,
286,
4136,
62,
38714,
62,
41143,
13,
198,
2,
198,
2,
6013,
276,
329,
30948,
2257,
4482,
38410,
11,
6208,
290,
4513,
278,
13,
198,
2,
198,
2,
4091,
262,
38559,
24290,
2393,
379,
262,
1353,
12,
5715,
8619,
... | 2.888112 | 858 |
from pathlib import Path
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from se_resnet import se_resnet20
from baseline import resnet20
from utils import Trainer
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser()
p.add_argument("--batchsize", type=int, default=64)
p.add_argument("--reduction", type=int, default=16)
p.add_argument("--baseline", action="store_true")
args = p.parse_args()
main(args.batchsize, args.baseline, args.reduction)
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
198,
1... | 3.035714 | 196 |
# -*- coding: utf-8 -*-
import sys
import os
import pickle
import numpy as np
from MySQLWrapper import MySQLWrapper
from MultinominalModelNaiveBayes import MultinominalModelNaiveBayes
from DocCategoryDBUtil import *
# 指定idのDocCategoryTypeのカテゴリ情報を生成
# 実行main
if __name__ == '__main__':
argvs = sys.argv
cmd = None
category_type = "Genre"
input_data = np.array([])
is_exist_opt = False
for i in range(len(argvs)):
if i == 0:
continue
if is_exist_opt:
is_exist_opt = False
continue
if argvs[i] == "--learn":
cmd = "learn"
elif argvs[i] == "--predict":
cmd = "predict"
elif argvs[i] == "--category_type":
category_type = argvs[i+1]
is_exist_opt = True
else:
input_data = np.append(input_data, [(int)(argvs[i])])
pickle_path = create_pickle_path(category_type)
is_exist_pickle = os.path.exists(pickle_path)
if cmd == "learn" or is_exist_pickle == False:
learn(category_type)
if cmd == "predict":
model = create_model(category_type)
category_array_id = model.decide(input_data)
mysql = init_mysql()
category_type_id = get_category_type_id_from_db(mysql, category_type)
print(get_category_name_en_from_db(mysql, category_type_id, category_array_id))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
33476,
36918,
2848,
1330,
33476,
36918,
2848,
198,
67... | 2.112633 | 657 |
from a10sdk.common.A10BaseClass import A10BaseClass
class SamplingEnable(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param counters1: {"enum": ["all", "total_req", "req_allowed", "req_denied", "bot_check_succ", "bot_check_fail", "form_consistency_succ", "form_consistency_fail", "form_csrf_tag_succ", "form_csrf_tag_fail", "url_check_succ", "url_check_fail", "url_check_learn", "buf_ovf_url_len_fail", "buf_ovf_cookie_len_fail", "buf_ovf_hdrs_len_fail", "buf_ovf_post_size_fail", "max_cookies_fail", "max_hdrs_fail", "http_method_check_succ", "http_method_check_fail", "http_check_succ", "http_check_fail", "referer_check_succ", "referer_check_fail", "referer_check_redirect", "uri_wlist_succ", "uri_wlist_fail", "uri_blist_succ", "uri_blist_fail", "post_form_check_succ", "post_form_check_sanitize", "post_form_check_reject", "ccn_mask_amex", "ccn_mask_diners", "ccn_mask_visa", "ccn_mask_mastercard", "ccn_mask_discover", "ccn_mask_jcb", "ssn_mask", "pcre_mask", "cookie_encrypt_succ", "cookie_encrypt_fail", "cookie_encrypt_limit_exceeded", "cookie_encrypt_skip_rcache", "cookie_decrypt_succ", "cookie_decrypt_fail", "sqlia_chk_url_succ", "sqlia_chk_url_sanitize", "sqlia_chk_url_reject", "sqlia_chk_post_succ", "sqlia_chk_post_sanitize", "sqlia_chk_post_reject", "xss_chk_cookie_succ", "xss_chk_cookie_sanitize", "xss_chk_cookie_reject", "xss_chk_url_succ", "xss_chk_url_sanitize", "xss_chk_url_reject", "xss_chk_post_succ", "xss_chk_post_sanitize", "xss_chk_post_reject", "resp_code_hidden", "resp_hdrs_filtered", "learn_updates", "num_drops", "num_resets", "form_non_ssl_reject", "form_non_post_reject", "sess_check_none", "sess_check_succ", "sess_check_fail", "soap_check_succ", "soap_check_failure", "wsdl_fail", "wsdl_succ", "xml_schema_fail", "xml_schema_succ", "xml_sqlia_chk_fail", "xml_sqlia_chk_succ", "xml_xss_chk_fail", "xml_xss_chk_succ", "json_check_failure", "json_check_succ", "xml_check_failure", "xml_check_succ", "buf_ovf_cookie_value_len_fail", "buf_ovf_cookies_len_fail", "buf_ovf_hdr_name_len_fail", "buf_ovf_hdr_value_len_fail", "buf_ovf_max_data_parse_fail", "buf_ovf_line_len_fail", "buf_ovf_parameter_name_len_fail", "buf_ovf_parameter_value_len_fail", "buf_ovf_parameter_total_len_fail", "buf_ovf_query_len_fail", "max_entities_fail", "max_parameters_fail", "buf_ovf_cookie_name_len_fail", "xml_limit_attr", "xml_limit_attr_name_len", "xml_limit_attr_value_len", "xml_limit_cdata_len", "xml_limit_elem", "xml_limit_elem_child", "xml_limit_elem_depth", "xml_limit_elem_name_len", "xml_limit_entity_exp", "xml_limit_entity_exp_depth", "xml_limit_namespace", "xml_limit_namespace_uri_len", "json_limit_array_value_count", "json_limit_depth", "json_limit_object_member_count", "json_limit_string", "form_non_masked_password", "form_non_ssl_password", "form_password_autocomplete", "redirect_wlist_succ", "redirect_wlist_fail", "redirect_wlist_learn", "form_set_no_cache", "resp_denied"], "type": "string", "description": "'all': all; 'total_req': Total Requests; 'req_allowed': Requests Allowed; 'req_denied': Requests Denied; 'bot_check_succ': Botnet Check Success; 'bot_check_fail': Botnet Check Failure; 'form_consistency_succ': Form Consistency Success; 'form_consistency_fail': Form Consistency Failure; 'form_csrf_tag_succ': Form CSRF tag Success; 'form_csrf_tag_fail': Form CSRF tag Failure; 'url_check_succ': URL Check Success; 'url_check_fail': URL Check Failure; 'url_check_learn': URL Check Learn; 'buf_ovf_url_len_fail': Buffer Overflow - URL Length Failure; 'buf_ovf_cookie_len_fail': Buffer Overflow - Cookie Length Failure; 'buf_ovf_hdrs_len_fail': Buffer Overflow - Headers length Failure; 'buf_ovf_post_size_fail': Buffer Overflow - Post size Failure; 'max_cookies_fail': Max Cookies Failure; 'max_hdrs_fail': Max Headers Failure; 'http_method_check_succ': Http Method Check Success; 'http_method_check_fail': Http Method Check Failure; 'http_check_succ': Http Check Success; 'http_check_fail': Http Check Failure; 'referer_check_succ': Referer Check Success; 'referer_check_fail': Referer Check Failure; 'referer_check_redirect': Referer Check Redirect; 'uri_wlist_succ': URI White List Success; 'uri_wlist_fail': URI White List Failure; 'uri_blist_succ': URI Black List Success; 'uri_blist_fail': URI Black List Failure; 'post_form_check_succ': Post Form Check Success; 'post_form_check_sanitize': Post Form Check Sanitized; 'post_form_check_reject': Post Form Check Rejected; 'ccn_mask_amex': Credit Card Number Mask Amex; 'ccn_mask_diners': Credit Card Number Mask Diners; 'ccn_mask_visa': Credit Card Number Mask Visa; 'ccn_mask_mastercard': Credit Card Number Mask Mastercard; 'ccn_mask_discover': Credit Card Number Mask Discover; 'ccn_mask_jcb': Credit Card Number Mask Jcb; 'ssn_mask': Social Security Number Mask; 'pcre_mask': PCRE Mask; 'cookie_encrypt_succ': Cookie Encrypt Success; 'cookie_encrypt_fail': Cookie Encrypt Failure; 'cookie_encrypt_limit_exceeded': Cookie Encrypt Limit Exceeded; 'cookie_encrypt_skip_rcache': Cookie Encrypt Skip RCache; 'cookie_decrypt_succ': Cookie Decrypt Success; 'cookie_decrypt_fail': Cookie Decrypt Failure; 'sqlia_chk_url_succ': SQLIA Check URL Success; 'sqlia_chk_url_sanitize': SQLIA Check URL Sanitized; 'sqlia_chk_url_reject': SQLIA Check URL Rejected; 'sqlia_chk_post_succ': SQLIA Check Post Success; 'sqlia_chk_post_sanitize': SQLIA Check Post Sanitized; 'sqlia_chk_post_reject': SQLIA Check Post Rejected; 'xss_chk_cookie_succ': XSS Check Cookie Success; 'xss_chk_cookie_sanitize': XSS Check Cookie Sanitized; 'xss_chk_cookie_reject': XSS Check Cookie Rejected; 'xss_chk_url_succ': XSS Check URL Success; 'xss_chk_url_sanitize': XSS Check URL Sanitized; 'xss_chk_url_reject': XSS Check URL Rejected; 'xss_chk_post_succ': XSS Check Post Success; 'xss_chk_post_sanitize': XSS Check Post Sanitized; 'xss_chk_post_reject': XSS Check Post Rejected; 'resp_code_hidden': Response Code Hidden; 'resp_hdrs_filtered': Response Headers Filtered; 'learn_updates': Learning Updates; 'num_drops': Number Drops; 'num_resets': Number Resets; 'form_non_ssl_reject': Form Non SSL Rejected; 'form_non_post_reject': Form Non Post Rejected; 'sess_check_none': Session Check None; 'sess_check_succ': Session Check Success; 'sess_check_fail': Session Check Failure; 'soap_check_succ': Soap Check Success; 'soap_check_failure': Soap Check Failure; 'wsdl_fail': WSDL Failure; 'wsdl_succ': WSDL Success; 'xml_schema_fail': XML Schema Failure; 'xml_schema_succ': XML Schema Success; 'xml_sqlia_chk_fail': XML Sqlia Check Failure; 'xml_sqlia_chk_succ': XML Sqlia Check Success; 'xml_xss_chk_fail': XML XSS Check Failure; 'xml_xss_chk_succ': XML XSS Check Success; 'json_check_failure': JSON Check Failure; 'json_check_succ': JSON Check Success; 'xml_check_failure': XML Check Failure; 'xml_check_succ': XML Check Success; 'buf_ovf_cookie_value_len_fail': Buffer Overflow - Cookie Value Length Failure; 'buf_ovf_cookies_len_fail': Buffer Overflow - Cookies Length Failure; 'buf_ovf_hdr_name_len_fail': Buffer Overflow - Header Name Length Failure; 'buf_ovf_hdr_value_len_fail': Buffer Overflow - Header Value Length Failure; 'buf_ovf_max_data_parse_fail': Buffer Overflow - Max Data Parse Failure; 'buf_ovf_line_len_fail': Buffer Overflow - Line Length Failure; 'buf_ovf_parameter_name_len_fail': Buffer Overflow - HTML Parameter Name Length Failure; 'buf_ovf_parameter_value_len_fail': Buffer Overflow - HTML Parameter Value Length Failure; 'buf_ovf_parameter_total_len_fail': Buffer Overflow - HTML Parameter Total Length Failure; 'buf_ovf_query_len_fail': Buffer Overflow - Query Length Failure; 'max_entities_fail': Max Entities Failure; 'max_parameters_fail': Max Parameters Failure; 'buf_ovf_cookie_name_len_fail': Buffer Overflow - Cookie Name Length Failure; 'xml_limit_attr': XML Limit Attribue; 'xml_limit_attr_name_len': XML Limit Name Length; 'xml_limit_attr_value_len': XML Limit Value Length; 'xml_limit_cdata_len': XML Limit CData Length; 'xml_limit_elem': XML Limit Element; 'xml_limit_elem_child': XML Limit Element Child; 'xml_limit_elem_depth': XML Limit Element Depth; 'xml_limit_elem_name_len': XML Limit Element Name Length; 'xml_limit_entity_exp': XML Limit Entity Exp; 'xml_limit_entity_exp_depth': XML Limit Entity Exp Depth; 'xml_limit_namespace': XML Limit Namespace; 'xml_limit_namespace_uri_len': XML Limit Namespace URI Length; 'json_limit_array_value_count': JSON Limit Array Value Count; 'json_limit_depth': JSON Limit Depth; 'json_limit_object_member_count': JSON Limit Object Number Count; 'json_limit_string': JSON Limit String; 'form_non_masked_password': Form Non Masked Password; 'form_non_ssl_password': Form Non SSL Password; 'form_password_autocomplete': Form Password Autocomplete; 'redirect_wlist_succ': Redirect Whitelist Success; 'redirect_wlist_fail': Redirect Whitelist Failure; 'redirect_wlist_learn': Redirect Whitelist Learn; 'form_set_no_cache': Form Set No Cache; 'resp_denied': Responses Denied; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class Global(A10BaseClass):
""" :param sampling_enable: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "total_req", "req_allowed", "req_denied", "bot_check_succ", "bot_check_fail", "form_consistency_succ", "form_consistency_fail", "form_csrf_tag_succ", "form_csrf_tag_fail", "url_check_succ", "url_check_fail", "url_check_learn", "buf_ovf_url_len_fail", "buf_ovf_cookie_len_fail", "buf_ovf_hdrs_len_fail", "buf_ovf_post_size_fail", "max_cookies_fail", "max_hdrs_fail", "http_method_check_succ", "http_method_check_fail", "http_check_succ", "http_check_fail", "referer_check_succ", "referer_check_fail", "referer_check_redirect", "uri_wlist_succ", "uri_wlist_fail", "uri_blist_succ", "uri_blist_fail", "post_form_check_succ", "post_form_check_sanitize", "post_form_check_reject", "ccn_mask_amex", "ccn_mask_diners", "ccn_mask_visa", "ccn_mask_mastercard", "ccn_mask_discover", "ccn_mask_jcb", "ssn_mask", "pcre_mask", "cookie_encrypt_succ", "cookie_encrypt_fail", "cookie_encrypt_limit_exceeded", "cookie_encrypt_skip_rcache", "cookie_decrypt_succ", "cookie_decrypt_fail", "sqlia_chk_url_succ", "sqlia_chk_url_sanitize", "sqlia_chk_url_reject", "sqlia_chk_post_succ", "sqlia_chk_post_sanitize", "sqlia_chk_post_reject", "xss_chk_cookie_succ", "xss_chk_cookie_sanitize", "xss_chk_cookie_reject", "xss_chk_url_succ", "xss_chk_url_sanitize", "xss_chk_url_reject", "xss_chk_post_succ", "xss_chk_post_sanitize", "xss_chk_post_reject", "resp_code_hidden", "resp_hdrs_filtered", "learn_updates", "num_drops", "num_resets", "form_non_ssl_reject", "form_non_post_reject", "sess_check_none", "sess_check_succ", "sess_check_fail", "soap_check_succ", "soap_check_failure", "wsdl_fail", "wsdl_succ", "xml_schema_fail", "xml_schema_succ", "xml_sqlia_chk_fail", "xml_sqlia_chk_succ", "xml_xss_chk_fail", "xml_xss_chk_succ", "json_check_failure", "json_check_succ", "xml_check_failure", "xml_check_succ", "buf_ovf_cookie_value_len_fail", "buf_ovf_cookies_len_fail", "buf_ovf_hdr_name_len_fail", "buf_ovf_hdr_value_len_fail", "buf_ovf_max_data_parse_fail", "buf_ovf_line_len_fail", "buf_ovf_parameter_name_len_fail", "buf_ovf_parameter_value_len_fail", "buf_ovf_parameter_total_len_fail", "buf_ovf_query_len_fail", "max_entities_fail", "max_parameters_fail", "buf_ovf_cookie_name_len_fail", "xml_limit_attr", "xml_limit_attr_name_len", "xml_limit_attr_value_len", "xml_limit_cdata_len", "xml_limit_elem", "xml_limit_elem_child", "xml_limit_elem_depth", "xml_limit_elem_name_len", "xml_limit_entity_exp", "xml_limit_entity_exp_depth", "xml_limit_namespace", "xml_limit_namespace_uri_len", "json_limit_array_value_count", "json_limit_depth", "json_limit_object_member_count", "json_limit_string", "form_non_masked_password", "form_non_ssl_password", "form_password_autocomplete", "redirect_wlist_succ", "redirect_wlist_fail", "redirect_wlist_learn", "form_set_no_cache", "resp_denied"], "type": "string", "description": "'all': all; 'total_req': Total Requests; 'req_allowed': Requests Allowed; 'req_denied': Requests Denied; 'bot_check_succ': Botnet Check Success; 'bot_check_fail': Botnet Check Failure; 'form_consistency_succ': Form Consistency Success; 'form_consistency_fail': Form Consistency Failure; 'form_csrf_tag_succ': Form CSRF tag Success; 'form_csrf_tag_fail': Form CSRF tag Failure; 'url_check_succ': URL Check Success; 'url_check_fail': URL Check Failure; 'url_check_learn': URL Check Learn; 'buf_ovf_url_len_fail': Buffer Overflow - URL Length Failure; 'buf_ovf_cookie_len_fail': Buffer Overflow - Cookie Length Failure; 'buf_ovf_hdrs_len_fail': Buffer Overflow - Headers length Failure; 'buf_ovf_post_size_fail': Buffer Overflow - Post size Failure; 'max_cookies_fail': Max Cookies Failure; 'max_hdrs_fail': Max Headers Failure; 'http_method_check_succ': Http Method Check Success; 'http_method_check_fail': Http Method Check Failure; 'http_check_succ': Http Check Success; 'http_check_fail': Http Check Failure; 'referer_check_succ': Referer Check Success; 'referer_check_fail': Referer Check Failure; 'referer_check_redirect': Referer Check Redirect; 'uri_wlist_succ': URI White List Success; 'uri_wlist_fail': URI White List Failure; 'uri_blist_succ': URI Black List Success; 'uri_blist_fail': URI Black List Failure; 'post_form_check_succ': Post Form Check Success; 'post_form_check_sanitize': Post Form Check Sanitized; 'post_form_check_reject': Post Form Check Rejected; 'ccn_mask_amex': Credit Card Number Mask Amex; 'ccn_mask_diners': Credit Card Number Mask Diners; 'ccn_mask_visa': Credit Card Number Mask Visa; 'ccn_mask_mastercard': Credit Card Number Mask Mastercard; 'ccn_mask_discover': Credit Card Number Mask Discover; 'ccn_mask_jcb': Credit Card Number Mask Jcb; 'ssn_mask': Social Security Number Mask; 'pcre_mask': PCRE Mask; 'cookie_encrypt_succ': Cookie Encrypt Success; 'cookie_encrypt_fail': Cookie Encrypt Failure; 'cookie_encrypt_limit_exceeded': Cookie Encrypt Limit Exceeded; 'cookie_encrypt_skip_rcache': Cookie Encrypt Skip RCache; 'cookie_decrypt_succ': Cookie Decrypt Success; 'cookie_decrypt_fail': Cookie Decrypt Failure; 'sqlia_chk_url_succ': SQLIA Check URL Success; 'sqlia_chk_url_sanitize': SQLIA Check URL Sanitized; 'sqlia_chk_url_reject': SQLIA Check URL Rejected; 'sqlia_chk_post_succ': SQLIA Check Post Success; 'sqlia_chk_post_sanitize': SQLIA Check Post Sanitized; 'sqlia_chk_post_reject': SQLIA Check Post Rejected; 'xss_chk_cookie_succ': XSS Check Cookie Success; 'xss_chk_cookie_sanitize': XSS Check Cookie Sanitized; 'xss_chk_cookie_reject': XSS Check Cookie Rejected; 'xss_chk_url_succ': XSS Check URL Success; 'xss_chk_url_sanitize': XSS Check URL Sanitized; 'xss_chk_url_reject': XSS Check URL Rejected; 'xss_chk_post_succ': XSS Check Post Success; 'xss_chk_post_sanitize': XSS Check Post Sanitized; 'xss_chk_post_reject': XSS Check Post Rejected; 'resp_code_hidden': Response Code Hidden; 'resp_hdrs_filtered': Response Headers Filtered; 'learn_updates': Learning Updates; 'num_drops': Number Drops; 'num_resets': Number Resets; 'form_non_ssl_reject': Form Non SSL Rejected; 'form_non_post_reject': Form Non Post Rejected; 'sess_check_none': Session Check None; 'sess_check_succ': Session Check Success; 'sess_check_fail': Session Check Failure; 'soap_check_succ': Soap Check Success; 'soap_check_failure': Soap Check Failure; 'wsdl_fail': WSDL Failure; 'wsdl_succ': WSDL Success; 'xml_schema_fail': XML Schema Failure; 'xml_schema_succ': XML Schema Success; 'xml_sqlia_chk_fail': XML Sqlia Check Failure; 'xml_sqlia_chk_succ': XML Sqlia Check Success; 'xml_xss_chk_fail': XML XSS Check Failure; 'xml_xss_chk_succ': XML XSS Check Success; 'json_check_failure': JSON Check Failure; 'json_check_succ': JSON Check Success; 'xml_check_failure': XML Check Failure; 'xml_check_succ': XML Check Success; 'buf_ovf_cookie_value_len_fail': Buffer Overflow - Cookie Value Length Failure; 'buf_ovf_cookies_len_fail': Buffer Overflow - Cookies Length Failure; 'buf_ovf_hdr_name_len_fail': Buffer Overflow - Header Name Length Failure; 'buf_ovf_hdr_value_len_fail': Buffer Overflow - Header Value Length Failure; 'buf_ovf_max_data_parse_fail': Buffer Overflow - Max Data Parse Failure; 'buf_ovf_line_len_fail': Buffer Overflow - Line Length Failure; 'buf_ovf_parameter_name_len_fail': Buffer Overflow - HTML Parameter Name Length Failure; 'buf_ovf_parameter_value_len_fail': Buffer Overflow - HTML Parameter Value Length Failure; 'buf_ovf_parameter_total_len_fail': Buffer Overflow - HTML Parameter Total Length Failure; 'buf_ovf_query_len_fail': Buffer Overflow - Query Length Failure; 'max_entities_fail': Max Entities Failure; 'max_parameters_fail': Max Parameters Failure; 'buf_ovf_cookie_name_len_fail': Buffer Overflow - Cookie Name Length Failure; 'xml_limit_attr': XML Limit Attribue; 'xml_limit_attr_name_len': XML Limit Name Length; 'xml_limit_attr_value_len': XML Limit Value Length; 'xml_limit_cdata_len': XML Limit CData Length; 'xml_limit_elem': XML Limit Element; 'xml_limit_elem_child': XML Limit Element Child; 'xml_limit_elem_depth': XML Limit Element Depth; 'xml_limit_elem_name_len': XML Limit Element Name Length; 'xml_limit_entity_exp': XML Limit Entity Exp; 'xml_limit_entity_exp_depth': XML Limit Entity Exp Depth; 'xml_limit_namespace': XML Limit Namespace; 'xml_limit_namespace_uri_len': XML Limit Namespace URI Length; 'json_limit_array_value_count': JSON Limit Array Value Count; 'json_limit_depth': JSON Limit Depth; 'json_limit_object_member_count': JSON Limit Object Number Count; 'json_limit_string': JSON Limit String; 'form_non_masked_password': Form Non Masked Password; 'form_non_ssl_password': Form Non SSL Password; 'form_password_autocomplete': Form Password Autocomplete; 'redirect_wlist_succ': Redirect Whitelist Success; 'redirect_wlist_fail': Redirect Whitelist Failure; 'redirect_wlist_learn': Redirect Whitelist Learn; 'form_set_no_cache': Form Set No Cache; 'resp_denied': Responses Denied; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
WAF global stats.
Class global supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/waf/global`.
"""
| [
6738,
257,
940,
21282,
74,
13,
11321,
13,
32,
940,
14881,
9487,
1330,
317,
940,
14881,
9487,
628,
198,
4871,
3409,
11347,
36695,
7,
32,
940,
14881,
9487,
2599,
198,
220,
220,
220,
220,
198,
220,
220,
220,
37227,
1212,
1398,
857,
407... | 2.82234 | 6,580 |
# solutions_UnixShell2.py
'''
Solutions for Volume 3, Lab 2: More on Unix Shell
Written Summer 2015
'''
# PROBLEM 1: Make count_files.py an executable script
'''
SHELL COMMANDS: (Executed in the Shell-Lab/Python directory)
$ which python
On the author's system, this was: /home/tanner/anaconda/bin/python
Open count_files.py and add the shebang at the first line of the file. One the
author's system, this was
#!/home/tanner/anaconda/bin/python
$ chmod ug+x count_files.py
'''
# PROBLEM 2:
'''
SHELL COMMANDS: (Executed in the Shell-Lab/Scripts directory
$ ./script1 &
$ ./script2 &
$ ./script3 &
$ jobs > log.txt
'''
import subprocess
import numpy as np
import scipy as sp
# PROBLEM 5: Secure copy with partner
'''
SHELL COMMAND:
(follow example boxes and just plug in system-specific information
'''
# PROBLEM 6: Download files in urls.txt
'''
SHELL COMMAND: (Execute in Shell-Lab/Documents)
wget -i urls.txt
'''
# PROBLEM 7: Format using awk
'''
$ awk ' BEGIN{FS = "\t"};{ print $7,$9 } ' < files.txt | sort -r > date-modified.txt
'''
| [
2,
8136,
62,
47000,
23248,
17,
13,
9078,
198,
7061,
6,
198,
50,
14191,
329,
14701,
513,
11,
3498,
362,
25,
3125,
319,
33501,
17537,
198,
25354,
10216,
1853,
198,
7061,
6,
198,
198,
2,
4810,
9864,
2538,
44,
352,
25,
6889,
954,
62,
... | 2.638821 | 407 |
import pytest
from sgkit import display_genotypes
from sgkit.display import truncate
from sgkit.testing import simulate_genotype_call_dataset
| [
11748,
12972,
9288,
198,
198,
6738,
264,
70,
15813,
1330,
3359,
62,
5235,
13567,
198,
6738,
264,
70,
15813,
13,
13812,
1330,
40122,
378,
198,
6738,
264,
70,
15813,
13,
33407,
1330,
29308,
62,
5235,
8690,
62,
13345,
62,
19608,
292,
316... | 3.23913 | 46 |
import sys
from xsdata.codegen.models import SIMPLE_TYPES
from xsdata.models.enums import DataType
from xsdata.models.enums import Namespace
from xsdata.models.enums import Tag
from xsdata.utils.namespaces import build_qname
from xsdata.utils.testing import AttrFactory
from xsdata.utils.testing import AttrTypeFactory
from xsdata.utils.testing import ClassFactory
from xsdata.utils.testing import ExtensionFactory
from xsdata.utils.testing import FactoryTestCase
| [
11748,
25064,
198,
198,
6738,
2124,
82,
7890,
13,
8189,
5235,
13,
27530,
1330,
23749,
16437,
62,
9936,
47,
1546,
198,
6738,
2124,
82,
7890,
13,
27530,
13,
268,
5700,
1330,
6060,
6030,
198,
6738,
2124,
82,
7890,
13,
27530,
13,
268,
5... | 3.376812 | 138 |
#creates just a miss section of knit (no tube)
import sys
sys.path.append('../knitout-frontend-py')
from library import knitout
from library import castonbindoff
# from crossover_full import *
# from library.crossover_half import *
# from library.crossover_full import *
# from seedKnit import*
from library.jersey import*
from library.tuckstuff import*
# from library.fairIsleStiffFxn import*
# from library.jerseyVariedStitches import*
from library.ribbing import*
import numpy as np
import math
k = knitout.Writer('1 2 3 4 5 6')
k.addHeader('Machine','kniterate')
c1='1'
c2='2'
c3='3'
c5='5'
k.ingripper(c1)
k.ingripper(c2)
k.ingripper(c3)
k.ingripper(c5)
width=30
k.stitchNumber(5)
castonbindoff.caston(k,width,[c1,c2,c3,c5])
#
for w in range(width):
k.xfer(('b',w),('f',w))
k.stitchNumber(4)
k.speedNumber(400)
k.rollerAdvance(400)
jerseyKnit(k,width,30,c3,'l')
k.stitchNumber(4)
k.speedNumber(400)
k.rollerAdvance(400)
jerseyKnit(k,width,30,c5,'l')
k.stitchNumber(4)
k.speedNumber(400)
k.rollerAdvance(400)
jerseyKnit(k,width,10,c2,'l')
# # # jerseyarray = [4,4,4,4,4,4,8,8,8,8,8,8,8,8,8,8,4,4,4,4,4,4,4]
# # # # k.rollerAdvance(300)
# # # k.speedNumber(300)
# # # jerseyStitches(k,jerseyarray,width,6,c3,'l')
# #
# #
# # k.stitchNumber(5)
# # k.speedNumber(300)
# # k.rollerAdvance(250)
# # fairArray=[1,1,1,1,2,2,2,2]
# # stiffFairIsle(k,fairArray,width,50,c2,c3,'l')
# # # k.rack(0)
# # # crossoverHalf(k,width,6,c3,'l')
#
#
# # k.rack(0)
# # crossoverFull(k,width,50,c3,'l')
# #
# # k.rack(0)
# # k.stitchNumber(6)
# # k.speedNumber(400)
# # k.rollerAdvance(400)
# # jerseyKnit(k,width,20,c3,'l')
#
for s in range(width):
k.drop(('f',s))
k.outgripper(c1)
k.outgripper(c2)
k.outgripper(c3)
k.outgripper(c5)
k.write('jerseySensor.k')
| [
2,
20123,
274,
655,
257,
2051,
2665,
286,
30495,
357,
3919,
12403,
8,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
47095,
448,
12,
8534,
437,
12,
9078,
11537,
198,
6738,
5888,
1330,
30495,
448,
198,
6738,
5888,
13... | 2.121248 | 833 |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from datetime import date as pydate
from atom.api import Typed, ForwardTyped, observe
from enaml.core.declarative import d_
from .control import Control, ProxyControl
class ProxyBoundedDate(ProxyControl):
""" The abstract defintion of a proxy BoundedDate object.
"""
#: A reference to the BoundedDate declaration.
declaration = ForwardTyped(lambda: BoundedDate)
class BoundedDate(Control):
""" A base class for components which edit a Python datetime.date
object bounded between minimum and maximum values.
This class is not meant to be used directly.
"""
#: The minimum date available in the date edit. If the minimum value
#: is changed such that it becomes greater than the current value or
#: the maximum value, then those values will be adjusted. The default
#: value is September 14, 1752.
minimum = d_(Typed(pydate, args=(1752, 9, 14)))
#: The maximum date available in the date edit. If the maximum value
#: is changed such that it becomes smaller than the current value or
#: the minimum value, then those values will be adjusted. The default
#: value is December 31, 7999.
maximum = d_(Typed(pydate, args=(7999, 12, 31)))
#: The date in the control. This will be clipped to the supplied
#: maximum and minimum values. The default is date.today().
date = d_(Typed(pydate, factory=pydate.today))
#: A reference to the ProxyBoundedDate object.
proxy = Typed(ProxyBoundedDate)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('minimum', 'maximum', 'date')
def _update_proxy(self, change):
""" An observer which updates the proxy when the data changes.
"""
# The superclass implementation is sufficient.
super(BoundedDate, self)._update_proxy(change)
#--------------------------------------------------------------------------
# Post Setattr Handlers
#--------------------------------------------------------------------------
def _post_setattr_minimum(self, old, new):
""" Post setattr the minimum date.
If the new minimum is greater than the current value or the
maximum, those values are adjusted up.
"""
if new > self.maximum:
self.maximum = new
if new > self.date:
self.date = new
def _post_setattr_maximum(self, old, new):
""" Post setattr the maximum date.
If the new maximum is less than the current value or the
minimum, those values are adjusted down.
"""
if new < self.minimum:
self.minimum = new
if new < self.date:
self.date = new
#--------------------------------------------------------------------------
# Post Validate Handlers
#--------------------------------------------------------------------------
def _post_validate_date(self, old, new):
""" Post validate the date for the control.
If it lies outside of minimum and maximum bounds, it will be
clipped to the bounds.
"""
return max(self.minimum, min(new, self.maximum))
| [
2,
10097,
26171,
198,
2,
15069,
357,
66,
8,
2211,
11,
399,
14913,
291,
7712,
4816,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
13789,
13,
198,
2,
198,
2,
383,
1336,
5964,
318,
287,
262,
2393,
385... | 3.39868 | 1,061 |
""" Definition and Scratch implementations for CSIL's instruction set """
from . import scratch
from .block_builder import makeBlockInput, makeBroadcastInput, makeListField, makeReporterInput, makeValueInput, makeVariableField, makeVariableInput
""" Base Classes """
""" Basic """
""" Memory Manipulation """
""" Arithmetic """
""" Boolean """
registry = InstructionRegistry([
Set,
Copy,
Load,
Store,
Add,
Sub,
Mul,
Div,
Gt,
Lt,
Eq
]) | [
37811,
30396,
290,
1446,
36722,
25504,
329,
9429,
4146,
338,
12064,
900,
37227,
198,
198,
6738,
764,
1330,
12692,
198,
6738,
764,
9967,
62,
38272,
1330,
787,
12235,
20560,
11,
787,
30507,
2701,
20560,
11,
787,
8053,
15878,
11,
787,
6207... | 2.963855 | 166 |
n = int(input())
masukan = list(map(int, input().split()))
angka = 0
n = int(input())
print()
for k in range (2,n+1) :
cek=True
for i in range(2,k) :
if k%i==0:
cek=False
break
if cek==True:
print(k) | [
77,
796,
493,
7,
15414,
28955,
201,
198,
5356,
2724,
272,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
201,
198,
648,
4914,
796,
657,
201,
198,
201,
198,
77,
796,
493,
7,
15414,
28955,
201,
198,
4798,
3419,
201,... | 1.738562 | 153 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""External script for generating Cloud Endpoints related files.
The gen_discovery_doc subcommand takes a list of fully qualified ProtoRPC
service names and calls a cloud service which generates a discovery document in
REST or RPC style.
Example:
endpointscfg.py gen_discovery_doc -o . -f rest postservice.GreetingsV1
The gen_client_lib subcommand takes a discovery document and calls a cloud
service to generate a client library for a target language (currently just Java)
Example:
endpointscfg.py gen_client_lib java -o . greetings-v0.1.discovery
The get_client_lib subcommand does both of the above commands at once.
Example:
endpointscfg.py get_client_lib java -o . postservice.GreetingsV1
The gen_api_config command outputs an .api configuration file for a service.
Example:
endpointscfg.py gen_api_config -o . -a /path/to/app \
--hostname myhost.appspot.com postservice.GreetingsV1
"""
from __future__ import with_statement
from __future__ import print_function
import argparse
import collections
import contextlib
try:
import json
except ImportError:
import simplejson as json
import os
import re
import sys
import urllib
import urllib2
from endpoints import api_config
from protorpc import remote
import yaml
from google.appengine.tools.devappserver2 import stub_util
DISCOVERY_DOC_BASE = ('https://webapis-discovery.appspot.com/_ah/api/'
'discovery/v1/apis/generate/')
CLIENT_LIBRARY_BASE = 'https://google-api-client-libraries.appspot.com/generate'
_VISIBLE_COMMANDS = ('get_client_lib', 'get_discovery_doc')
class ServerRequestException(Exception):
"""Exception for problems with the request to a server."""
def __init__(self, http_error):
"""Create a ServerRequestException from a given urllib2.HTTPError.
Args:
http_error: The HTTPError that the ServerRequestException will be
based on.
"""
error_details = None
error_response = None
if http_error.fp:
try:
error_response = http_error.fp.read()
error_body = json.loads(error_response)
error_details = ['%s: %s' % (detail['message'], detail['debug_info'])
for detail in error_body['error']['errors']]
except (ValueError, TypeError, KeyError):
pass
if error_details:
error_details_str = ', '.join(error_details)
error_message = ('HTTP %s (%s) error when communicating with URL: %s. '
'Details: %s' % (http_error.code, http_error.reason,
http_error.filename, error_details_str))
else:
error_message = ('HTTP %s (%s) error when communicating with URL: %s. '
'Response: %s' % (http_error.code, http_error.reason,
http_error.filename,
error_response))
super(ServerRequestException, self).__init__(error_message)
class _EndpointsParser(argparse.ArgumentParser):
"""Create a subclass of argparse.ArgumentParser for Endpoints."""
def error(self, message):
"""Override superclass to support customized error message.
Error message needs to be rewritten in order to display visible commands
only, when invalid command is called by user. Otherwise, hidden commands
will be displayed in stderr, which is not expected.
Refer the following argparse python documentation for detailed method
information:
http://docs.python.org/2/library/argparse.html#exiting-methods
Args:
message: original error message that will be printed to stderr
"""
subcommands_quoted = ', '.join(
[repr(command) for command in _VISIBLE_COMMANDS])
subcommands = ', '.join(_VISIBLE_COMMANDS)
message = re.sub(
r'(argument {%s}: invalid choice: .*) \(choose from (.*)\)$'
% subcommands, r'\1 (choose from %s)' % subcommands_quoted, message)
super(_EndpointsParser, self).error(message)
def _WriteFile(output_path, name, content):
"""Write given content to a file in a given directory.
Args:
output_path: The directory to store the file in.
name: The name of the file to store the content in.
content: The content to write to the file.close
Returns:
The full path to the written file.
"""
path = os.path.join(output_path, name)
with open(path, 'wb') as f:
f.write(content)
return path
def GenApiConfig(service_class_names, config_string_generator=None,
hostname=None, application_path=None):
"""Write an API configuration for endpoints annotated ProtoRPC services.
Args:
service_class_names: A list of fully qualified ProtoRPC service classes.
config_string_generator: A generator object that produces API config strings
using its pretty_print_config_to_json method.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback.
application_path: A string with the path to the AppEngine application.
Raises:
TypeError: If any service classes don't inherit from remote.Service.
messages.DefinitionNotFoundError: If a service can't be found.
Returns:
A map from service names to a string containing the API configuration of the
service in JSON format.
"""
api_service_map = collections.OrderedDict()
for service_class_name in service_class_names:
module_name, base_service_class_name = service_class_name.rsplit('.', 1)
module = __import__(module_name, fromlist=base_service_class_name)
service = getattr(module, base_service_class_name)
if not isinstance(service, type) or not issubclass(service, remote.Service):
raise TypeError('%s is not a ProtoRPC service' % service_class_name)
services = api_service_map.setdefault(
(service.api_info.name, service.api_info.version), [])
services.append(service)
app_yaml_hostname = _GetAppYamlHostname(application_path)
service_map = collections.OrderedDict()
config_string_generator = (
config_string_generator or api_config.ApiConfigGenerator())
for api_info, services in api_service_map.iteritems():
assert len(services) > 0, 'An API must have at least one ProtoRPC service'
hostname = services[0].api_info.hostname or hostname or app_yaml_hostname
service_map['%s-%s' % api_info] = (
config_string_generator.pretty_print_config_to_json(
services, hostname=hostname))
return service_map
def _GetAppYamlHostname(application_path, open_func=open):
"""Build the hostname for this app based on the name in app.yaml.
Args:
application_path: A string with the path to the AppEngine application. This
should be the directory containing the app.yaml file.
open_func: Function to call to open a file. Used to override the default
open function in unit tests.
Returns:
A hostname, usually in the form of "myapp.appspot.com", based on the
application name in the app.yaml file. If the file can't be found or
there's a problem building the name, this will return None.
"""
try:
app_yaml_file = open_func(os.path.join(application_path or '.', 'app.yaml'))
config = yaml.safe_load(app_yaml_file.read())
except IOError:
return None
application = config.get('application')
if not application:
return None
if ':' in application:
return None
tilde_index = application.rfind('~')
if tilde_index >= 0:
application = application[tilde_index + 1:]
if not application:
return None
return '%s.appspot.com' % application
def _FetchDiscoveryDoc(config, doc_format):
"""Fetch discovery documents generated from a cloud service.
Args:
config: An API config.
doc_format: The requested format for the discovery doc. (rest|rpc)
Raises:
ServerRequestException: If fetching the generated discovery doc fails.
Returns:
A list of discovery doc strings.
"""
body = json.dumps({'config': config}, indent=2, sort_keys=True)
request = urllib2.Request(DISCOVERY_DOC_BASE + doc_format, body)
request.add_header('content-type', 'application/json')
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
return response.read()
except urllib2.HTTPError as error:
raise ServerRequestException(error)
def _GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=None,
application_path=None):
"""Write discovery documents generated from a cloud service to file.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: The requested format for the discovery doc. (rest|rpc)
output_path: The directory to output the discovery docs to.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Raises:
ServerRequestException: If fetching the generated discovery doc fails.
Returns:
A list of discovery doc filenames.
"""
output_files = []
service_configs = GenApiConfig(service_class_names, hostname=hostname,
application_path=application_path)
for api_name_version, config in service_configs.iteritems():
discovery_doc = _FetchDiscoveryDoc(config, doc_format)
discovery_name = api_name_version + '.discovery'
output_files.append(_WriteFile(output_path, discovery_name, discovery_doc))
return output_files
def _GenClientLib(discovery_path, language, output_path, build_system):
"""Write a client library from a discovery doc, using a cloud service to file.
Args:
discovery_path: Path to the discovery doc used to generate the client
library.
language: The client library language to generate. (java)
output_path: The directory to output the client library zip to.
build_system: The target build system for the client library language.
Raises:
IOError: If reading the discovery doc fails.
ServerRequestException: If fetching the generated client library fails.
Returns:
The path to the zipped client library.
"""
with open(discovery_path) as f:
discovery_doc = f.read()
client_name = re.sub(r'\.discovery$', '.zip',
os.path.basename(discovery_path))
return _GenClientLibFromContents(discovery_doc, language, output_path,
build_system, client_name)
def _GenClientLibFromContents(discovery_doc, language, output_path,
build_system, client_name):
"""Write a client library from a discovery doc, using a cloud service to file.
Args:
discovery_doc: A string, the contents of the discovery doc used to
generate the client library.
language: A string, the client library language to generate. (java)
output_path: A string, the directory to output the client library zip to.
build_system: A string, the target build system for the client language.
client_name: A string, the filename used to save the client lib.
Raises:
IOError: If reading the discovery doc fails.
ServerRequestException: If fetching the generated client library fails.
Returns:
The path to the zipped client library.
"""
body = urllib.urlencode({'lang': language, 'content': discovery_doc,
'layout': build_system})
request = urllib2.Request(CLIENT_LIBRARY_BASE, body)
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
content = response.read()
return _WriteFile(output_path, client_name, content)
except urllib2.HTTPError as error:
raise ServerRequestException(error)
def _GetClientLib(service_class_names, language, output_path, build_system,
hostname=None, application_path=None):
"""Fetch client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
build_system: The target build system for the client library language.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of paths to client libraries.
"""
client_libs = []
service_configs = GenApiConfig(service_class_names, hostname=hostname,
application_path=application_path)
for api_name_version, config in service_configs.iteritems():
discovery_doc = _FetchDiscoveryDoc(config, 'rest')
client_name = api_name_version + '.zip'
client_libs.append(
_GenClientLibFromContents(discovery_doc, language, output_path,
build_system, client_name))
return client_libs
def _GenApiConfigCallback(args, api_func=GenApiConfig):
"""Generate an api file.
Args:
args: An argparse.Namespace object to extract parameters from.
api_func: A function that generates and returns an API configuration
for a list of services.
"""
service_configs = api_func(args.service,
hostname=args.hostname,
application_path=args.application)
for api_name_version, config in service_configs.iteritems():
_WriteFile(args.output, api_name_version + '.api', config)
def _GetClientLibCallback(args, client_func=_GetClientLib):
"""Generate discovery docs and client libraries to files.
Args:
args: An argparse.Namespace object to extract parameters from.
client_func: A function that generates client libraries and stores them to
files, accepting a list of service names, a client library language,
an output directory, a build system for the client library language, and
a hostname.
"""
client_paths = client_func(
args.service, args.language, args.output, args.build_system,
hostname=args.hostname, application_path=args.application)
for client_path in client_paths:
print('API client library written to %s' % client_path)
def _GenDiscoveryDocCallback(args, discovery_func=_GenDiscoveryDoc):
"""Generate discovery docs to files.
Args:
args: An argparse.Namespace object to extract parameters from
discovery_func: A function that generates discovery docs and stores them to
files, accepting a list of service names, a discovery doc format, and an
output directory.
"""
discovery_paths = discovery_func(args.service, args.format,
args.output, hostname=args.hostname,
application_path=args.application)
for discovery_path in discovery_paths:
print('API discovery document written to %s' % discovery_path)
def _GenClientLibCallback(args, client_func=_GenClientLib):
"""Generate a client library to file.
Args:
args: An argparse.Namespace object to extract parameters from
client_func: A function that generates client libraries and stores them to
files, accepting a path to a discovery doc, a client library language, an
output directory, and a build system for the client library language.
"""
client_path = client_func(args.discovery_doc[0], args.language, args.output,
args.build_system)
print('API client library written to %s' % client_path)
def MakeParser(prog):
"""Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
"""
def AddStandardOptions(parser, *args):
"""Add common endpoints options to a parser.
Args:
parser: The parser to add options to.
*args: A list of option names to add. Possible names are: application,
format, output, language, service, and discovery_doc.
"""
if 'application' in args:
parser.add_argument('-a', '--application', default='.',
help='The path to the Python App Engine App')
if 'format' in args:
parser.add_argument('-f', '--format', default='rest',
choices=['rest', 'rpc'],
help='The requested API protocol type')
if 'hostname' in args:
help_text = ('Default application hostname, if none is specified '
'for API service.')
parser.add_argument('--hostname', help=help_text)
if 'output' in args:
parser.add_argument('-o', '--output', default='.',
help='The directory to store output files')
if 'language' in args:
parser.add_argument('language',
help='The target output programming language')
if 'service' in args:
parser.add_argument('service', nargs='+',
help='Fully qualified service class name')
if 'discovery_doc' in args:
parser.add_argument('discovery_doc', nargs=1,
help='Path to the discovery document')
if 'build_system' in args:
parser.add_argument('-bs', '--build_system', default='default',
help='The target build system')
parser = _EndpointsParser(prog=prog)
subparsers = parser.add_subparsers(
title='subcommands', metavar='{%s}' % ', '.join(_VISIBLE_COMMANDS))
get_client_lib = subparsers.add_parser(
'get_client_lib', help=('Generates discovery documents and client '
'libraries from service classes'))
get_client_lib.set_defaults(callback=_GetClientLibCallback)
AddStandardOptions(get_client_lib, 'application', 'hostname', 'output',
'language', 'service', 'build_system')
get_discovery_doc = subparsers.add_parser(
'get_discovery_doc',
help='Generates discovery documents from service classes')
get_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(get_discovery_doc, 'application', 'format', 'hostname',
'output', 'service')
gen_api_config = subparsers.add_parser('gen_api_config')
gen_api_config.set_defaults(callback=_GenApiConfigCallback)
AddStandardOptions(gen_api_config, 'application', 'hostname', 'output',
'service')
gen_discovery_doc = subparsers.add_parser('gen_discovery_doc')
gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname',
'output', 'service')
gen_client_lib = subparsers.add_parser('gen_client_lib')
gen_client_lib.set_defaults(callback=_GenClientLibCallback)
AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc',
'build_system')
return parser
if __name__ == '__main__':
main(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
4343,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.837873 | 6,976 |
# -*- coding: utf-8 -*-
import settings
from flask import Flask
from views import router
app = Flask(__name__)
app.register_blueprint(router)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=settings.API_DEBUG)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
6460,
198,
6738,
42903,
1330,
46947,
198,
6738,
5009,
1330,
20264,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
30238,
62,
17585,
4798,
7,
4... | 2.616279 | 86 |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This script is made to visualise data from era5 with cartopy
"""
# Imports
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import matplotlib as mpl
# Load data
path_era5 = '/work/FAC/FGSE/IDYST/tbeucler/default/meryam/data/era5/media/rasp'+\
'/Elements/weather-benchmark'
ds = xr.open_dataset(path_era5+'/1.40625deg/2m_temperature/'+\
'2m_temperature_1979_1.40625deg.nc')
time_test = np.datetime64('1979-12-01T00:00:00.000000000') #datetime64 format enables to pick a year, daytime, hour...
# Geographic coordinates
central_lon, central_lat = 33.5731104, -7.5898434 #coordinates of Casablanca
lat_min = 28
lat_max = 35
lon_min = -11
lon_max = -4
extent = [lon_min, lon_max, lat_min, lat_max]
norm = mpl.colors.Normalize(vmin=283.15, vmax=303.15) #sets a temperature range colorbar
# Visualise data
fig, ax = plt.subplots(subplot_kw={'projection': ccrs.PlateCarree(central_lon)})
ax.set_extent(extent)
ax.coastlines(resolution='50m')
map = ds.sel(time=time_test)['t2m'].plot.imshow(cmap='coolwarm', norm=norm)
sub_set = ds.sel(time=time_test)['t2m']
lon_indices = np.logical_and(ds.lon>=175, #gives the longitudes array's indices of the area around Casablanca
ds.lon<=182)
lat_indices = np.logical_and(ds.lat>=28, #gives the latitudes array's indices of the area around Casablanca
ds.lat<=38)
ds.isel({'lon':lon_indices,'lat':lat_indices}) #isolate Casablanca's data
plt.plot(central_lon, central_lat, 'ro')
map.grid(which='minor', color='k', linestyle='-', linewidth=10)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
4561,
88,
1082,
12058,
198,
198,
1212,
4226,
318,
925,
284,
5874,
786,
1366,
422,
6980,
20,
351,
6383,
11081,
198,
37811,
198,
198,
2,
1846,
3742,
220,
198... | 2.280537 | 745 |
import requests
| [
11748,
7007,
628
] | 5.666667 | 3 |
import prodigy
from prodigy.components.loaders import Audio, Video, JSONL, ImageServer
from plumcot_prodigy.forced_alignment import ForcedAlignment
from plumcot_prodigy.video import mkv_to_base64
from plumcot_prodigy.custom_loaders import *
from prodigy.components.preprocess import add_tokens, fetch_media
from prodigy.util import file_to_b64
from typing import Dict, List, Text
from pathlib import Path
import random
import os
import json
import spacy
import ast
def remove_video_before_db(examples: List[Dict]) -> List[Dict]:
"""Remove (heavy) "video" and "pictures" key from examples before saving to Prodigy database
Parameters
----------
examples : list of dict
Examples.
Returns
-------
examples : list of dict
Examples with 'video' or 'pictures' key removed.
"""
for eg in examples:
if "video" in eg:
del eg["video"]
if "video" in eg:
del eg["options"]
return examples
def stream_char(episode, user_path):
"""
Annotate not_available characters
Displays lines with "not_available" character in aligned file
Display pictures of all the characters of the current episode
Arguments : ep : episode to annotate (e.g TheWalkingDead.Season01.Episode01),
user_path : path to Plumcot corpora
Start prodigy : prodigy select_char select_characters <episode_name> <user_path> -F plumcot_prodigy/recipes.py
"""
# path to shows directories
DATA_PLUMCOT = user_path
show = episode.split('.')[0]
season = episode.split('.')[1]
ep = episode.split('.')[2]
# load episodes list
episodes_list = [episode]
for episode in episodes_list:
print("\nCurrent episode", episode)
# process serie or film
if len(episode.split('.')) == 3:
series, _, _ = episode.split('.')
elif len(episode.split('.')) == 2:
series, _ = episode.split('.')
# load mkv & aligned sentences
mkv, aligned, sentences = load_files(series, episode, DATA_PLUMCOT)
if mkv == "" and aligned == "":
continue
else:
# credits for the current episode
episode_characters = load_credits(episode, series, DATA_PLUMCOT)
print("\nCHARACTERS\n")
for idx, char in enumerate(episode_characters):
print(idx+1, char)
# load pictures for the characters of the current episode
pictures = load_photo(episode_characters, series, DATA_PLUMCOT)
# options to load in the choice box
options = []
for name, val in pictures.items():
# display photo in options
if "centroid" in val:
options.append({"id":name, "image":file_to_b64(val)})
else :
# display character's name when no picture
options.append({"id":name, "text": name})
# selection for all@ and #unknown#
options.append({"id":"all@","text": "all@"})
options.append({"id":f"#unknown#{episode}","text":f"#unknown#{episode}"})
# find all sentences with non available character
sentences_choice_not_available = [(sentence, idx) for idx, sentence in enumerate(sentences) if sentence._.speaker == 'not_available' if str(sentence) != '']
print("Sentences to annotate :", len(sentences_choice_not_available))
for el in sentences_choice_not_available:
sentence = el[0]
sentence_id = el[1]
try :
if sentences.index(sentence) != 0:
left = sentences[sentences.index(sentence)-1]
right = sentences[sentences.index(sentence)+1]
# beug : left index = last sentence index in the list when current sentence is 0
else:
left = " "
right = sentences[sentences.index(sentence)+1]
except IndexError:
left = " "
right = " "
# video
if str(left) != " " and str(right) != " ":
start_time = left._.start_time
end_time= right._.end_time + 0.1
else:
start_time = sentence._.start_time
end_time = sentence._.end_time +0.1
speaker = sentence._.speaker
# extract corresponding video excerpt
video_excerpt = mkv_to_base64(mkv, start_time, end_time)
yield {
"video": video_excerpt,
"speaker": f"{speaker}",
"text": f"{sentence}",
"pictures" : pictures,
"options" : options,
"start_time": f"{sentence._.start_time}",
"end_time": f"{sentence._.end_time}",
"sentence_id" : sentence_id,
"meta": {"start_extract": start_time, "end_extract": end_time,
"episode": episode, "mkv_path": mkv},
}
@prodigy.recipe(
"select_char",
dataset=("The dataset to save to", "positional", None, str),
episode=("Episode to annotate (e.g : TheWalkingDead.Season01.Episode01", "positional", None, str),
user_path=("Path to Plumcot corpora", "positional", None, str),
)
| [
11748,
386,
41923,
198,
6738,
386,
41923,
13,
5589,
3906,
13,
2220,
364,
1330,
13491,
11,
7623,
11,
19449,
43,
11,
7412,
10697,
198,
6738,
22802,
25557,
62,
1676,
41923,
13,
12072,
62,
282,
16747,
1330,
40731,
2348,
16747,
198,
6738,
... | 1.983989 | 2,998 |
from typing import Optional
from TagScriptEngine import Interpreter, adapter
from TagScriptEngine.interface import Block
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
17467,
7391,
13798,
1330,
4225,
3866,
353,
11,
21302,
198,
6738,
17467,
7391,
13798,
13,
39994,
1330,
9726,
628,
198
] | 4.592593 | 27 |
from typing import Callable, Iterable, Tuple
from numpy import zeros
import math
import torch
from torch.optim import Optimizer
| [
6738,
19720,
1330,
4889,
540,
11,
40806,
540,
11,
309,
29291,
198,
6738,
299,
32152,
1330,
1976,
27498,
198,
11748,
10688,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
40085,
1330,
30011,
7509,
628
] | 3.823529 | 34 |
__author__ = 'gleicher'
"""
infrastructure for spacetime problems
at each time, there needs to be a STATE for the robot
these states are created by interpolating the KEYS
a state sequence is something that provides a state vector (for a robot)
at every frame in the movement. it might be stored as a keyvector
(or computed from a KeyVector). but it is very basic.
Note: states (and therefore keyvectors) give array access to the states
a KEYVECTOR is a big vector that concatenates all of the keys
in the future, it might be best if this isn't actually just a vector
it might be a list or a numpy array
it might have adnumbers or oovars in it
but its a big flat list of length n*m (keys * vars per state)
there are utility routines for handling them
to make things functional, its probably best if this is just a vector
however, we need to keep n and m around with it, so it cannot be
a key vector is designed so that it can act like an StateSequence
a KEYVARIABLES is a vector of the variables that are actually changing in the
spacetime problem
there is a trick that not everything in the keyvector will be allowed to change
(for example, endpoints to interpolate)
so we need to "expand" a set of KEYVARIABLES into a KEYVECTOR
the basic strategy is most likely to be:
1) build a KeyVector (like an initial configuration for a solver)
2) extract KeyVariables (a smaller vector subset)
3) inject KeyVariables into the initial KeyVector (to produce a new KeyVector)
4) use an interpolator, or the KeyVector directly as a state list
for now, injection / extraction creates new objects - which is wastefull for copying,
but gets around the statedness problems. in the future, this can be fixed if there are
efficiency concerns
"""
import numpy as N
| [
834,
9800,
834,
796,
705,
70,
293,
291,
372,
6,
198,
198,
37811,
198,
10745,
6410,
329,
34752,
8079,
2761,
198,
198,
265,
1123,
640,
11,
612,
2476,
284,
307,
257,
35454,
329,
262,
9379,
198,
27218,
2585,
389,
2727,
416,
39555,
803,
... | 3.643287 | 499 |
from where_was_i import util
| [
6738,
810,
62,
9776,
62,
72,
1330,
7736,
628
] | 3.333333 | 9 |
# FUNCTION - COMPUTE I AND J GEOMETRIC INTEGRALS FOR SOURCE PANEL METHOD
# Written by: JoshTheEngineer
# YouTube : www.youtube.com/joshtheengineer
# Website : www.joshtheengineer.com
# Started : 02/03/19 - Transferred from MATLAB to Python
# - Works as expected
# : 04/28/20 - Fixed E value error handling
#
# PURPOSE
# - Compute the integral expression for constant strength source panels
# - Source panel strengths are constant, but can change from panel to panel
# - Geometric integral for panel-normal : I(ij)
# - Geometric integral for panel-tangential: J(ij)
#
# REFERENCES
# - [1]: Normal Geometric Integral SPM, I(ij)
# Link: https://www.youtube.com/watch?v=76vPudNET6U
# - [2]: Tangential Geometric Integral SPM, J(ij)
# Link: https://www.youtube.com/watch?v=JRHnOsueic8
#
# INPUTS
# - XC : X-coordinate of control points
# - YC : Y-coordinate of control points
# - XB : X-coordinate of boundary points
# - YB : Y-coordinate of boundary points
# - phi : Angle between positive X-axis and interior of panel
# - S : Length of panel
#
# OUTPUTS
# - I : Value of panel-normal integral (Eq. 3.163 in Anderson or Ref [1])
# - J : Value of panel-tangential integral (Eq. 3.165 in Anderson or Ref [2])
import numpy as np
import math as math
np.seterr('raise')
| [
2,
29397,
4177,
2849,
532,
24301,
37780,
314,
5357,
449,
22319,
2662,
2767,
41132,
17828,
7156,
49,
23333,
7473,
311,
31033,
40468,
3698,
337,
36252,
201,
198,
2,
22503,
416,
25,
8518,
464,
13798,
263,
201,
198,
2,
7444,
220,
220,
105... | 2.594697 | 528 |
import copy
import sys
import unittest
import pygsl._numobj as Numeric
from pygsl import Float
import pygsl
from pygsl import multiminimize
sys.stdout = sys.stderr
if __name__ == '__main__':
unittest.main()
| [
11748,
4866,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
11748,
12972,
70,
6649,
13557,
22510,
26801,
355,
399,
39223,
198,
6738,
12972,
70,
6649,
1330,
48436,
198,
11748,
12972,
70,
6649,
198,
6738,
12972,
70,
6649,
1330,
43104,
... | 2.779221 | 77 |
from typing import Any, Dict, Optional
import faker
fake = faker.Factory.create()
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
198,
11748,
277,
3110,
198,
198,
30706,
796,
277,
3110,
13,
22810,
13,
17953,
3419,
628,
198
] | 3.185185 | 27 |
from tool.runners.python import SubmissionPy
import heapq
| [
6738,
2891,
13,
36740,
13,
29412,
1330,
42641,
20519,
198,
11748,
24575,
80,
628
] | 4.214286 | 14 |
## https://leetcode.com/submissions/detail/230651834/
## problem is to find the numbers between 1 and length of the
## array that aren't in the array. simple way to do that is to
## do the set difference between range(1, len(ar)+1) and the
## input numbers
## hits 98th percentile in terms of runtime, though only
## 14th percentile in memory usage | [
2235,
3740,
1378,
293,
316,
8189,
13,
785,
14,
7266,
8481,
14,
49170,
14,
19214,
2996,
1507,
2682,
14,
198,
198,
2235,
1917,
318,
284,
1064,
262,
3146,
1022,
352,
290,
4129,
286,
262,
220,
198,
2235,
7177,
326,
3588,
470,
287,
262,
... | 3.54 | 100 |
class SudokuSolver:
"""
Solves sudoko puzzle. Save the puzzle in a file (space separated)
and pass the file name as parameter.
The missing digits has to be marked `0`
Sample input : (txt file containig sudoku)
3 0 6 5 0 8 4 0 0
5 2 0 0 0 0 0 0 0
0 8 7 0 0 0 0 3 1
0 0 3 0 1 0 0 8 0
9 0 0 8 6 3 0 0 5
0 5 0 0 9 0 6 0 0
1 3 0 0 0 0 2 5 0
0 0 0 0 0 0 0 7 4
0 0 5 2 0 6 3 0 0
sudoku /home/ws/pranav/sudoku_input
3 1 6 5 2 8 4 9 7
5 2 1 3 4 7 8 6 9
2 8 7 6 5 4 9 3 1
6 4 3 9 1 5 7 8 2
9 7 2 8 6 3 1 4 5
7 5 8 4 9 1 6 2 3
1 3 4 7 8 9 2 5 6
8 6 9 1 3 2 5 7 4
4 9 5 2 7 6 3 1 8
"""
def _is_valid(self, row, col, num):
"""
At a given point of time, validate the given combination for the
specified row and col
"""
# Check across the row
for i in range(0, self.row):
if self.sudoku[row][i] == num:
return False
# Check across column
for i in range(0, self.col):
if self.sudoku[i][col] == num:
return False
# If not found, return True
return True
sudoku = SudokuSolver("Path to your sudoku problem here") | [
4871,
14818,
11601,
50,
14375,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4294,
1158,
424,
67,
16044,
15027,
13,
12793,
262,
15027,
287,
257,
2393,
357,
13200,
11266,
8,
198,
220,
220,
220,
290,
1208,
262,
2393,
1438,
355,
1... | 2.071786 | 599 |
import sublime
from unittest import TestCase
class ViewTestCase(TestCase):
"""Providing basic functionality for testing against views.
Taken from:
https://github.com/randy3k/AlignTab/blob/master/tests/test_basic.py
https://github.com/randy3k/AutoWrap/blob/master/tests/test_python.py
"""
| [
11748,
41674,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
628,
198,
4871,
3582,
14402,
20448,
7,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
15946,
2530,
4096,
11244,
329,
4856,
1028,
5009,
13,
628,
220,
220,
220,
30222,
422,
... | 2.836364 | 110 |
from sys import exit
from CassandraWrapper import CassandraWrapper
try:
CassandraWrapper()
except:
exit(1)
exit(0)
| [
6738,
25064,
1330,
8420,
198,
6738,
46750,
36918,
2848,
1330,
46750,
36918,
2848,
198,
198,
28311,
25,
198,
220,
220,
220,
46750,
36918,
2848,
3419,
198,
16341,
25,
198,
220,
220,
220,
8420,
7,
16,
8,
198,
198,
37023,
7,
15,
8,
198
... | 2.906977 | 43 |
from django.db import models
from djrest_wrapper.interfaces import BaseModel
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
2118,
62,
48553,
13,
3849,
32186,
1330,
7308,
17633,
628
] | 3.9 | 20 |
# coding: utf-8
n, x, y = [int(i) for i in input().split()]
ans = int((y/100*n)-x)
if (ans+x)/n < y/100:
ans += 1
if ans > 0:
print(ans)
else:
print(0)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
77,
11,
2124,
11,
331,
796,
685,
600,
7,
72,
8,
329,
1312,
287,
5128,
22446,
35312,
3419,
60,
198,
504,
796,
493,
19510,
88,
14,
3064,
9,
77,
13219,
87,
8,
198,
361,
357,
504,
10,
87,
206... | 1.906977 | 86 |
"""Generates a random date and uses that date to print information about
the corresponding Zodiac sign.
Zodiac date ranges come from here:
- http://astrostyle.com/zodiac-sign-dates/
Zodiac sign characterstics come from here:
- http://nuclear.ucdavis.edu/~rpicha/personal/astrology/
Astrology is absolute BS and is not a science, but it's consistent enough
to use for an example like this.
"""
import random
aries = """Adventurous and energetic
Pioneering and courageous
Enthusiastic and confident
Dynamic and quick-witted
Selfish and quick-tempered
Impulsive and impatient
Foolhardy and daredevil"""
taurus = """Patient and reliable
Warmhearted and loving
Persistent and determined
Placid and security loving
Jealous and possessive
Resentful and inflexible
Self-indulgent and greedy"""
gemini = """Adaptable and versatile
Communicative and witty
Intellectual and eloquent
Youthful and lively
Nervous and tense
Superficial and inconsistent
Cunning and inquisitive"""
cancer = """Emotional and loving
Intuitive and imaginative
Shrewd and cautious
Protective and sympathetic
Changeable and moody
Overemotional and touchy
Clinging and unable to let go"""
leo = """Generous and warmhearted
Creative and enthusiastic
Broad-minded and expansive
Faithful and loving
Pompous and patronizing
Bossy and interfering
Dogmatic and intolerant"""
virgo = """Modest and shy
Meticulous and reliable
Practical and diligent
Intelligent and analytical
Fussy and a worrier
Overcritical and harsh
Perfectionist and conservative"""
libra = """Diplomatic and urbane
Romantic and charming
Easygoing and sociable
Idealistic and peaceable
Indecisive and changeable
Gullible and easily influenced
Flirtatious and self-indulgent"""
scorpio = """Determined and forceful
Emotional and intuitive
Powerful and passionate
Exciting and magnetic
Jealous and resentful
Compulsive and obsessive
Secretive and obstinate"""
sagittarius = """Optimistic and freedom-loving
Jovial and good-humored
Honest and straightforward
Intellectual and philosophical
Blindly optimistic and careless
Irresponsible and superficial
Tactless and restless"""
capricorn = """Practical and prudent
Ambitious and disciplined
Patient and careful
Humorous and reserved
Pessimistic and fatalistic
Miserly and grudging"""
aquarius = """Friendly and humanitarian
Honest and loyal
Original and inventive
Independent and intellectual
Intractable and contrary
Perverse and unpredictable
Unemotional and detached"""
pisces = """Imaginative and sensitive
Compassionate and kind
Selfless and unworldly
Intuitive and sympathetic
Escapist and idealistic
Secretive and vague
Weak-willed and easily led"""
month = random.randint(1, 12)
if month == 2:
day = random.randint(1, 29)
elif (month == 4) or (month == 6) or (month == 9) or (month == 11):
day = random.randint(1, 30)
else:
day = random.randint(1, 31)
msg = f"{month}/{day} is for "
if ((month == 1) and (day <= 20)) or ((month == 12) and (day >= 23)):
print(msg + "Capricorn!\n")
print(capricorn)
elif ((month == 1) and (day >= 21)) or (month == 2) and (day <= 19):
print(msg + "Aquarius!\n")
print(aquarius)
elif ((month == 2) and (day >= 20)) or (month == 3) and (day <= 20):
print(msg + "Pisces!\n")
print(pisces)
elif ((month == 3) and (day >= 21)) or (month == 4) and (day <= 20):
print(msg + "Aries!\n")
print(aries)
elif ((month == 4) and (day >= 21)) or (month == 5) and (day <= 21):
print(msg + "Taurus!\n")
print(taurus)
elif ((month == 5) and (day >= 22)) or (month == 6) and (day <= 21):
print(msg + "Gemini!\n")
print(gemini)
elif ((month == 6) and (day >= 22)) or (month == 7) and (day <= 22):
print(msg + "Cancer!\n")
print(cancer)
elif ((month == 7) and (day >= 23)) or (month == 8) and (day <= 21):
print(msg + "Leo!\n")
print(leo)
elif ((month == 8) and (day >= 22)) or (month == 9) and (day <= 23):
print(msg + "Virgo!\n")
print(virgo)
elif ((month == 9) and (day >= 24)) or (month == 10) and (day <= 23):
print(msg + "Libra!\n")
print(libra)
elif ((month == 10) and (day >= 24)) or (month == 11) and (day <= 22):
print(msg + "Scorpio!\n")
print(scorpio)
elif ((month == 11) and (day >= 23)) or (month == 12) and (day <= 22):
print(msg + "Sagittarius!\n")
print(sagittarius)
| [
37811,
8645,
689,
257,
4738,
3128,
290,
3544,
326,
3128,
284,
3601,
1321,
546,
198,
1169,
11188,
1168,
40096,
1051,
13,
198,
198,
57,
40096,
3128,
16069,
1282,
422,
994,
25,
220,
198,
12,
2638,
1378,
459,
305,
7635,
13,
785,
14,
89,... | 2.978796 | 1,462 |
# Python - 3.6.0
test.describe('Example Tests')
tests = [
('asdfadsf', ['as', 'df', 'ad', 'sf']),
('asdfads', ['as', 'df', 'ad', 's_']),
('', []),
('x', ['x_'])
]
for inp, exp in tests:
test.assert_equals(solution(inp), exp)
| [
2,
11361,
532,
513,
13,
21,
13,
15,
198,
198,
9288,
13,
20147,
4892,
10786,
16281,
30307,
11537,
198,
198,
41989,
796,
685,
198,
220,
220,
220,
19203,
292,
7568,
5643,
69,
3256,
37250,
292,
3256,
705,
7568,
3256,
705,
324,
3256,
705... | 2.049587 | 121 |
__author__ = 'Sergey Lihobabin'
__version__ = '0.5.8'
default_app_config = 'protector.apps.ProtectorConfig'
| [
834,
9800,
834,
796,
705,
7089,
39608,
406,
4449,
672,
6014,
6,
198,
834,
9641,
834,
796,
705,
15,
13,
20,
13,
23,
6,
198,
12286,
62,
1324,
62,
11250,
796,
705,
11235,
9250,
13,
18211,
13,
19703,
9250,
16934,
6,
198
] | 2.571429 | 42 |
# -*- coding: utf-8 -*-
import scrapy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
628
] | 2.166667 | 18 |
from configparser import ConfigParser
from piradio.stations import Station
from piradio.utils import clamp
class Settings:
"""
The application settings.
"""
DEFAULT_VOLUME = 128
DEFAULT_STATION = 0
DEFAULT_STATIONS = [Station('ByteFM', 'http://www.byte.fm/stream/bytefm.m3u')]
@staticmethod
@classmethod
| [
6738,
4566,
48610,
1330,
17056,
46677,
198,
198,
6738,
12276,
324,
952,
13,
301,
602,
1330,
9327,
198,
6738,
12276,
324,
952,
13,
26791,
1330,
29405,
628,
198,
4871,
16163,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
383,
3586,... | 2.826446 | 121 |
from typing import Dict
from django.http import HttpResponseRedirect
from django.views import generic
from django.shortcuts import render, get_object_or_404
from ipware import get_client_ip
from pprint import pprint
from .forms import RecipeForm
from .models import Recipe, StepsOfRecipe, IngredientsOfStep, IngredientToBeAdded, StepToBeAdded, IngredientType, \
AmountType
# https://docs.djangoproject.com/en/3.0/intro/tutorial03/
# def index(request):
# latest_recipes = Recipe.objects.order_by('-pub_date')[:5]
# context = {
# 'latest_recipes': latest_recipes
# }
# https://docs.djangoproject.com/en/3.0/ref/request-response/
| [
6738,
19720,
1330,
360,
713,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
33571,
1330,
14276,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
27... | 2.887387 | 222 |
# -*- coding: utf-8 -*-
# Copyright: 2020, Diez B. Roggisch, Berlin . All rights reserved.
import os
from setuptools import setup, find_packages
# Meta information
version = open("VERSION").read().strip()
dirname = os.path.dirname(__file__)
author = open("AUTHOR").read().strip()
# Save version and author to __meta__.py
path = os.path.join(dirname, "src", "laptimer", "__meta__.py")
data = f"""# Automatically created. Please do not edit.
__version__ = u"{version}"
__author__ = u"{author}"
"""
with open(path, "wb") as outf:
outf.write(data.encode())
setup(
# Basic info
name="deets-fpv-laptimer",
version=version,
author=author,
author_email="deets@web.de",
url="https://github.com/deets/deets-fpv-laptimer",
description="A FPV drone racing laptimer",
#long_description=open("README.rst").read(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Programming Language :: Python",
],
# Packages and depencies
package_dir={"": "src"},
packages=find_packages("src"),
install_requires=[
"tornado",
"rx",
],
extras_require={
"dev": [
],
},
# Data files
package_data={
# "python_boilerplate": [
# "templates/*.*",
# "templates/license/*.*",
# "templates/docs/*.*",
# "templates/package/*.*"
# ],
},
# Scripts
entry_points={
"console_scripts": [
"deets-fpv-laptimer = laptimer:main",
"deets-fpv-recorder = laptimer:recorder_main",
],
},
# Other configurations
zip_safe=False,
platforms="any",
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
25,
12131,
11,
6733,
89,
347,
13,
8041,
70,
25308,
11,
11307,
764,
1439,
2489,
10395,
13,
198,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
90... | 2.32166 | 771 |
from idom.sample import run_sample_app
from idom.server.utils import find_available_port
| [
6738,
4686,
296,
13,
39873,
1330,
1057,
62,
39873,
62,
1324,
198,
6738,
4686,
296,
13,
15388,
13,
26791,
1330,
1064,
62,
15182,
62,
634,
628
] | 3.461538 | 26 |
# setup.py for coverage.
"""Code coverage measurement for Python
Coverage.py measures code coverage, typically during test execution. It uses
the code analysis tools and tracing hooks provided in the Python standard
library to determine which lines are executable, and which have been executed.
Coverage.py runs on Pythons 2.3 through 3.1.
Documentation is at `nedbatchelder.com <%s>`_. Code repository and issue
tracker are at `bitbucket.org <http://bitbucket.org/ned/coveragepy>`_.
New in 3.2: Branch coverage!
"""
# This file is used unchanged under all versions of Python, 2.x and 3.x.
classifiers = """
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Operating System :: OS Independent
Programming Language :: Python
Topic :: Software Development :: Quality Assurance
Topic :: Software Development :: Testing
"""
# Pull in the tools we need.
import sys
# Distribute is a new fork of setuptools. It's supported on Py3.x, so we use
# it there, but stick with classic setuptools on Py2.x until Distribute becomes
# more accepted.
if sys.hexversion > 0x03000000:
from distribute_setup import use_setuptools
else:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
from distutils.core import Extension
# Get or massage our metadata.
from coverage import __url__, __version__
doclines = (__doc__ % __url__).split('\n')
classifier_list = [c for c in classifiers.split("\n") if c]
if 'a' in __version__:
devstat = "3 - Alpha"
elif 'b' in __version__:
devstat = "4 - Beta"
else:
devstat = "5 - Production/Stable"
classifier_list.append("Development Status :: " + devstat)
# Set it up!
setup(
name = 'coverage',
version = __version__,
packages = [
'coverage',
],
package_data = {
'coverage': [
'htmlfiles/*.*',
]
},
ext_modules = [
Extension("coverage.tracer", sources=["coverage/tracer.c"])
],
entry_points = {
'console_scripts': [
'coverage = coverage:main',
]
},
# We need to get HTML assets from our htmlfiles dir.
zip_safe = False,
author = 'Ned Batchelder',
author_email = 'ned@nedbatchelder.com',
description = doclines[0],
long_description = '\n'.join(doclines[2:]),
keywords = 'code coverage testing',
license = 'BSD',
classifiers = classifier_list,
url = __url__,
)
| [
2,
9058,
13,
9078,
329,
5197,
13,
198,
198,
37811,
10669,
5197,
15558,
329,
11361,
198,
198,
7222,
1857,
13,
9078,
5260,
2438,
5197,
11,
6032,
1141,
1332,
9706,
13,
632,
3544,
198,
1169,
2438,
3781,
4899,
290,
35328,
26569,
2810,
287,... | 2.810872 | 883 |
from django.http import HttpResponse
from django.utils.html import escape
class PopcornMixin(object):
"""Cool popups when mixed with FormViews"""
template_name = 'popcorn/popcorn.html'
def form_valid(self, form):
"""
Plug-in the pre_render hook plus the special popup response
that closes the popup.
"""
new_obj = form.save()
return HttpResponse("""
<script type="text/javascript">
opener.dismissAddAnotherPopup(window, "%s", "%s");
$('.selectpicker').selectpicker('render');
</script>""" % (escape(new_obj._get_pk_val()), escape(new_obj))) | [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
6654,
628,
198,
4871,
8099,
20772,
35608,
259,
7,
15252,
2599,
198,
220,
220,
220,
37227,
34530,
1461,
4739,
618,
7668,
351,
5178... | 2.387681 | 276 |
#!/usr/bin/env python
"""Tests for signer_ecdsa."""
import unittest
from ct.proto import client_pb2
from ct.crypto.signing import signer_ecdsa
PRIVATE_KEY_PEM = ("-----BEGIN EC PRIVATE KEY-----\n"
"MHcCAQEEIFLw4uhuCruGKjrS9MoNeXFbypqZe+Sgh+EL1gnRn1d4oAoGCCqGSM49\n"
"AwEHoUQDQgAEmXg8sUUzwBYaWrRb+V0IopzQ6o3UyEJ04r5ZrRXGdpYM8K+hB0pX\n"
"rGRLI0eeWz+3skXrS0IO83AhA3GpRL6s6w==\n"
"-----END EC PRIVATE KEY-----\n")
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
51,
3558,
329,
1051,
263,
62,
721,
9310,
64,
526,
15931,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
269,
83,
13,
1676,
1462,
1330,
5456,
62,
40842,
17,
198,
198,
6... | 1.712766 | 282 |
from copy import deepcopy
| [
6738,
4866,
1330,
2769,
30073,
628
] | 4.5 | 6 |
import json
from pathlib import Path
from typing import Union
import pytest
from mockito import mock, unstub, verifyStubbedInvocationsAreUsed, when
from ...core.entities.mod import Mod
from ...core.entities.mod_loaders import ModLoaders
from ...core.entities.sites import Site, Sites
from ...core.entities.version_info import Stabilities, VersionInfo
from ...gateways.downloader import Downloader
from .modrinth_api import ModrinthApi
testdata_dir = Path(__file__).parent.joinpath("testdata").joinpath("modrinth_api")
search_result_file = testdata_dir.joinpath("search_fabric-api.json")
versions_result_file = testdata_dir.joinpath("versions_fabric-api.json")
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
site_id = "P7dR8mSH"
@pytest.mark.parametrize(
"name,mod,expected",
[
(
"Find site id by slug",
mod(),
(site_id, "fabric-api"),
),
(
"Find site id by id",
mod(site_id=None),
(site_id, "fabric-api"),
),
(
"Find site id from filename",
mod(id="invalid", site_slug=None, file="fabric-api-1.14.4-1.2.0+v191024.jar"),
(site_id, "fabric-api"),
),
(
"Site id not found",
mod(id="invalid", site_slug=None),
None,
),
],
)
| [
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
12972,
9288,
198,
6738,
15290,
10094,
1330,
15290,
11,
15014,
549,
11,
11767,
1273,
549,
3077,
818,
18893,
602,
8491,
38052,
11,
618,
198,
... | 2.194888 | 626 |
#!/usr/bin/env python3
# https://leetcode.com/problems/integer-to-english-words/
import unittest
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
41433,
12,
1462,
12,
39126,
12,
10879,
14,
198,
198,
11748,
555,
715,
395,
628,
198
] | 2.631579 | 38 |
import re
from collections import defaultdict
from enum import unique, Enum
from pathlib import Path
from typing import Union
from modelci.types.models.common import Task, Framework, Engine
from modelci.types.trtis_objects import DataType, ModelInputFormat
def parse_path(path: Path):
"""Obtain filename, task, framework and engine from saved path.
"""
if re.match(r'^.*?[!/]*/[A-Za-z]+-[A-Za-z]+/[A-Za-z_]+/\d+$', str(path.with_suffix(''))):
filename = path.name
architecture = path.parent.parent.parent.stem
task = Task[path.parent.name]
info = path.parent.parent.name.split('-')
framework = Framework[info[0]]
engine = Engine[info[1]]
version = int(Path(filename).stem)
return {
'architecture': architecture,
'task': task,
'framework': framework,
'engine': engine,
'version': version,
'filename': filename,
'base_dir': path.parent
}
else:
raise ValueError('Incorrect model path pattern')
def parse_path_plain(path: Union[str, Path]):
"""Obtain filename, task, framework and engine from saved path. Use plain object as return.
"""
path = Path(path)
if re.match(r'^.*?[!/]*/[A-Za-z]+-[A-Za-z]+/[A-Za-z_]+/\d+$', str(path.with_suffix(''))):
filename = path.name
architecture = path.parent.parent.parent.stem
task = path.parent.name
info = path.parent.parent.name.split('-')
framework = info[0]
engine = info[1]
version = Path(filename).stem
return {
'architecture': architecture,
'task': task,
'framework': framework,
'engine': engine,
'version': version,
'filename': filename,
'base_dir': path.parent
}
else:
raise ValueError('Incorrect model path pattern')
def generate_path(model_name: str, task: Task, framework: Framework, engine: Engine,
version: int):
"""Generate saved path from model
"""
model_name = str(model_name)
if not isinstance(task, Task):
raise ValueError(f'Expecting framework type to be `Task`, but got {type(task)}')
if not isinstance(framework, Framework):
raise ValueError(f'Expecting framework type to be `Framework`, but got {type(framework)}')
if not isinstance(engine, Engine):
raise ValueError(f'Expecting engine type to be `Engine`, but got {type(engine)}')
return Path.home() / f'.modelci/{model_name}/' \
f'{framework.name}-{engine.name}' \
f'/{task.name}/{str(version)}'
@unique
class TensorRTPlatform(Enum):
"""TensorRT platform type for model configuration
"""
TENSORRT_PLAN = 0
TENSORFLOW_GRAPHDEF = 1
TENSORFLOW_SAVEDMODEL = 2
CAFFE2_NETDEF = 3
ONNXRUNTIME_ONNX = 4
PYTORCH_LIBTORCH = 5
CUSTOM = 6
TensorRTModelInputFormat = ModelInputFormat
| [
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
33829,
1330,
3748,
11,
2039,
388,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
2746,
979,
13,
19199,
13,
27530,
13,
11321,
1330,
15941,
11,... | 2.316679 | 1,301 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
5366,
13,
26791,
1330,
4818,
8079,
62,
26791,
355,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
... | 3.113208 | 53 |
import torch
import numpy as np
import dataclasses
from functools import partial
from math import ceil
@dataclasses.dataclass
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
330,
28958,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
10688,
1330,
2906,
346,
628,
198,
31,
19608,
330,
28958,
13,
19608,
330,
31172,
628,
628,
628,
628,
628... | 3.255814 | 43 |
#!/usr/bin/env python3
import os
import sys
is_virtual_environment = sys.prefix != sys.base_prefix
is_conda_environment = os.path.exists(os.path.join(sys.prefix, "conda-meta", "history"))
if not (is_virtual_environment or is_conda_environment):
sys.exit(
"Sorry, you are not using neither a virtual environment"
" nor a conda environment; aborting operation."
)
for name, state in [
("Python virtual environment {}", is_virtual_environment),
("Conda environment {}", is_conda_environment),
]:
print(name.format("detected." if state else "not detected."))
print(f"Executable: {sys.executable}")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
271,
62,
32844,
62,
38986,
796,
25064,
13,
40290,
14512,
25064,
13,
8692,
62,
40290,
198,
271,
62,
66,
13533,
62,
38986,
796,
28686,
13,
... | 2.930556 | 216 |
from flask import Blueprint
from flask_restx import Api
from .superheroes import api as superheroes_api
blueprint = Blueprint("apiV2", __name__)
api = Api(
blueprint,
title="Superhero Api",
version="2.0",
description="v2 of the Superhero Api",
# All API metadatas
)
api.add_namespace(superheroes_api)
| [
6738,
42903,
1330,
39932,
198,
6738,
42903,
62,
2118,
87,
1330,
5949,
72,
198,
198,
6738,
764,
16668,
11718,
274,
1330,
40391,
355,
40896,
62,
15042,
628,
198,
17585,
4798,
796,
39932,
7203,
15042,
53,
17,
1600,
11593,
3672,
834,
8,
1... | 2.754237 | 118 |
import numpy as np
from numpy.testing import assert_allclose
from diffhod.distributions import NFW
from halotools.empirical_models import NFWProfile
def test_nfw_mass_cdf():
"""
Compares CDF values to halotools
"""
model = NFWProfile()
scaled_radius = np.logspace(-2, 0, 100)
for c in [5, 10, 20]:
distr = NFW(concentration=c, Rvir=1)
y = model.cumulative_mass_PDF(scaled_radius, conc=c)
y_tf = distr.cdf(scaled_radius)
assert_allclose(y, y_tf.numpy(), rtol=1e-4)
def test_nfw_mc_positions():
"""
Compares samples with halotools and analytic density
"""
model = NFWProfile()
scaled_radius = np.logspace(-2, 0, 100)
for c in [5, 10, 20]:
distr = NFW(concentration=c, Rvir=1)
samples = model.mc_generate_nfw_radial_positions(
num_pts=int(1e6), conc=c, halo_radius=1)
samples_tf = distr.sample(1e6)
h = np.histogram(samples, 32, density=True, range=[0.01, 1])
h_tf = np.histogram(samples_tf, 32, density=True, range=[0.01, 1])
x = 0.5 * (h[1][:-1] + h[1][1:])
p = distr.prob(x)
# Comparing histograms
assert_allclose(h[0], h_tf[0], rtol=5e-2)
# Comparing to prob
assert_allclose(h_tf[0], p, rtol=5e-2)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
198,
6738,
814,
2065,
13,
17080,
2455,
507,
1330,
399,
24160,
198,
6738,
10284,
313,
10141,
13,
368,
4063,
605,
62,
27530,
1330,
399,
24... | 2.259398 | 532 |
import random
from time import sleep
lista = [0,1,2,3,4,5]
aleatorio = random.choice(lista)
print(20*'=')
print(" JOGO DA ADIVINHAÇÃO")
print(20*'=')
escolha = int(input("Digite um numero de 0 a 5: "))
print('PROCESSANDO...')
sleep(2)
while escolha != aleatorio:
escolha = int(input("Voce errou. Tente novamente um numero de 0 a 5: "))
print('O numero era {} e você escolheu correto.'.format(aleatorio))
print(20*'=') | [
11748,
4738,
201,
198,
6738,
640,
1330,
3993,
201,
198,
4868,
64,
796,
685,
15,
11,
16,
11,
17,
11,
18,
11,
19,
11,
20,
60,
201,
198,
1000,
1352,
952,
796,
4738,
13,
25541,
7,
4868,
64,
8,
201,
198,
4798,
7,
1238,
9,
6,
28,
... | 2.237113 | 194 |
from textwrap import dedent
import jinja2
import pytest
from jinja2 import Markup
from dmcontent.content_loader import ContentManifest
from dmcontent.html import text_to_html, to_html, to_summary_list_rows, to_summary_list_row
@pytest.fixture
@pytest.mark.parametrize(
"question, expected",
[
("myLowercaseText", "Text to be capitalised"),
("myLowercaseTextarea", "Text to be capitalised<br>with a line break."),
(
"myLowercaseList",
"""<ul class="govuk-list govuk-list--bullet">\n <li>Line 1</li>\n <li>Line 2</li>\n</ul>""",
),
("myCheckboxWithOneItem", "Check 2"),
],
)
@pytest.mark.parametrize(
"question, expected",
[
(
"myLinkText",
"""<a href="https://www.gov.uk" class="govuk-link" rel="external">https://www.gov.uk</a>""",
),
(
"myLinkTextarea",
"""Here is a URL:<br><br>"""
"""<a href="https://www.gov.uk" class="govuk-link" rel="external">https://www.gov.uk</a>""",
),
(
"myLinkList",
dedent(
"""\
<ul class="govuk-list govuk-list--bullet">
<li><a href="https://www.gov.uk" class="govuk-link" rel="external">https://www.gov.uk</a></li>
<li><a href="https://www.gov.uk/" class="govuk-link" rel="external">https://www.gov.uk/</a></li>
</ul>"""
),
),
],
)
link_attributes = 'class="govuk-link" rel="external noreferrer noopener" target="_blank"'
@pytest.mark.parametrize(
"question, expected",
[
(
"myLinkText",
f'<a href="https://www.gov.uk" {link_attributes}>https://www.gov.uk</a>'
),
(
"myLinkTextarea",
"""Here is a URL:<br><br>"""
f'<a href="https://www.gov.uk" {link_attributes}>https://www.gov.uk</a>'
),
(
"myLinkList",
dedent(
f"""\
<ul class="govuk-list govuk-list--bullet">
<li><a href="https://www.gov.uk" {link_attributes}>https://www.gov.uk</a></li>
<li><a href="https://www.gov.uk/" {link_attributes}>https://www.gov.uk/</a></li>
</ul>"""
),
),
],
)
@pytest.mark.parametrize(
"question, expected",
(
("myUpload", """<a class="govuk-link" href="#">Upload Question</a>"""),
("myScriptInjection", "<script>do something bad</script>"),
(
"mySimpleList",
dedent(
"""\
<ul class="govuk-list govuk-list--bullet">
<li>Hello</li>
<li>World</li>
</ul>"""
),
),
),
)
@pytest.mark.parametrize("autoescape", (True, False))
@pytest.mark.parametrize("filter_empty", [True, False])
| [
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
11748,
474,
259,
6592,
17,
198,
11748,
12972,
9288,
198,
6738,
474,
259,
6592,
17,
1330,
2940,
929,
198,
198,
6738,
288,
76,
11299,
13,
11299,
62,
29356,
1330,
14041,
5124,
8409,
198,
6738... | 1.93531 | 1,484 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from server.models.base_model_ import Model
from server import util
class Point(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, x: float=None, y: float=None, z: float=None, r: float=None, lat: float=None, lng: float=None): # noqa: E501
"""Point - a model defined in Swagger
:param x: The x of this Point. # noqa: E501
:type x: float
:param y: The y of this Point. # noqa: E501
:type y: float
:param z: The z of this Point. # noqa: E501
:type z: float
:param r: The r of this Point. # noqa: E501
:type r: float
:param lat: The lat of this Point. # noqa: E501
:type lat: float
:param lng: The lng of this Point. # noqa: E501
:type lng: float
"""
self.swagger_types = {
'x': float,
'y': float,
'z': float,
'r': float,
'lat': float,
'lng': float
}
self.attribute_map = {
'x': 'x',
'y': 'y',
'z': 'z',
'r': 'r',
'lat': 'lat',
'lng': 'lng'
}
self._x = x
self._y = y
self._z = z
self._r = r
self._lat = lat
self._lng = lng
@classmethod
def from_dict(cls, dikt) -> 'Point':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Point of this Point. # noqa: E501
:rtype: Point
"""
return util.deserialize_model(dikt, cls)
@property
def x(self) -> float:
"""Gets the x of this Point.
:return: The x of this Point.
:rtype: float
"""
return self._x
@x.setter
def x(self, x: float):
"""Sets the x of this Point.
:param x: The x of this Point.
:type x: float
"""
self._x = x
@property
def y(self) -> float:
"""Gets the y of this Point.
:return: The y of this Point.
:rtype: float
"""
return self._y
@y.setter
def y(self, y: float):
"""Sets the y of this Point.
:param y: The y of this Point.
:type y: float
"""
self._y = y
@property
def z(self) -> float:
"""Gets the z of this Point.
:return: The z of this Point.
:rtype: float
"""
return self._z
@z.setter
def z(self, z: float):
"""Sets the z of this Point.
:param z: The z of this Point.
:type z: float
"""
self._z = z
@property
def r(self) -> float:
"""Gets the r of this Point.
:return: The r of this Point.
:rtype: float
"""
return self._r
@r.setter
def r(self, r: float):
"""Sets the r of this Point.
:param r: The r of this Point.
:type r: float
"""
self._r = r
@property
def lat(self) -> float:
"""Gets the lat of this Point.
:return: The lat of this Point.
:rtype: float
"""
return self._lat
@lat.setter
def lat(self, lat: float):
"""Sets the lat of this Point.
:param lat: The lat of this Point.
:type lat: float
"""
self._lat = lat
@property
def lng(self) -> float:
"""Gets the lng of this Point.
:return: The lng of this Point.
:rtype: float
"""
return self._lng
@lng.setter
def lng(self, lng: float):
"""Sets the lng of this Point.
:param lng: The lng of this Point.
:type lng: float
"""
self._lng = lng
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
220,
1303,
645,
20402,
25,
376,
21844,
198,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
220... | 1.987487 | 1,998 |
from typing import Final
class FileResetter:
"""
File resetter.
"""
__DEFAULT_RUNTIME_AND_VERIFICATION_FILE: Final = f"""
# Default file, will be overwritten while running
import dataclasses
@dataclasses.dataclass
class AppState:
INT_0: int = 0
INT_1: int = 0
INT_2: int = 0
INT_3: int = 0
FLOAT_0: float = 0.0
FLOAT_1: float = 0.0
FLOAT_2: float = 0.0
FLOAT_3: float = 0.0
BOOL_0: bool = False
BOOL_1: bool = False
BOOL_2: bool = False
BOOL_3: bool = False
STR_0: str = ""
STR_1: str = ""
STR_2: str = ""
STR_3: str = ""
@dataclasses.dataclass
class PhysicalState:
GA_1_1_1: float
GA_1_1_2: float
GA_1_1_3: bool
GA_1_1_4: bool
""".strip()
def reset_conditions_file(self):
"""
Resets the conditions file.
"""
file = f"""
# Default file, will be overwritten while running
from .runtime_file import PhysicalState
def check_conditions(state: PhysicalState) -> bool:
return True
""".strip()
with open(self.__conditions_file_path, "w+") as output_file:
output_file.write(file)
def reset_verification_file(self):
"""
Resets the verification file.
"""
with open(self.__verification_file_path, "w") as output_file:
output_file.write(self.__DEFAULT_RUNTIME_AND_VERIFICATION_FILE)
def reset_runtime_file(self):
"""
Resets the runtime file.
"""
with open(self.__runtime_file_path, "w") as output_file:
output_file.write(self.__DEFAULT_RUNTIME_AND_VERIFICATION_FILE)
| [
6738,
19720,
1330,
8125,
628,
198,
4871,
9220,
4965,
40088,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
9220,
13259,
353,
13,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
11593,
7206,
38865,
62,
49,
4944,
34694,
62,
6981,
6... | 2.200809 | 742 |
# run all of them in one go with slurm
# # # EXAMPLE RUN SINGLE MODEL
if __name__ == '__main__':
import os, subprocess
script_names = [ 'model_diffs_metrics_epscor_se.py',\
'model_variability_metrics_epscor_se_DECADAL_multidomain.py', \
'model_variability_metrics_epscor_se_DECADAL_multidomain_cru.py' ]
slurm_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/slurm_files'
os.chdir( slurm_path )
for script in script_names:
command = 'ipython /workspace/UA/malindgren/repos/downscale/snap_scripts/tabular_test/' + script
fn = os.path.join( slurm_path, '_'.join(['slurm_run', script.replace('.py', '')]) + '.slurm' )
_ = run_model( fn, command ) | [
2,
1057,
477,
286,
606,
287,
530,
467,
351,
40066,
76,
198,
198,
2,
1303,
1303,
7788,
2390,
16437,
32494,
311,
2751,
2538,
19164,
3698,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
197,
198,
197,
11748,
28686,
... | 2.359322 | 295 |
# pylint: skip-file
# flake8: noqa
'''
TODO:
- Online needs that the model have such method
def get_data_with_date(self, date, **kwargs):
"""
Will be called in online module
need to return the data that used to predict the label (score) of stocks at date.
:param
date: pd.Timestamp
predict date
:return:
data: the input data that used to predict the label (score) of stocks at predict date.
"""
raise NotImplementedError("get_data_with_date for this model is not implemented.")
'''
| [
2,
279,
2645,
600,
25,
14267,
12,
7753,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
198,
7061,
6,
198,
51,
3727,
46,
25,
198,
198,
12,
7467,
2476,
326,
262,
2746,
423,
884,
2446,
198,
220,
220,
220,
825,
651,
62,
7890,
62,
4480... | 2.441667 | 240 |
import nltk
from nltk import word_tokenize,pos_tag,sent_tokenize
if __name__ == '__main__':
main()
| [
11748,
299,
2528,
74,
198,
6738,
299,
2528,
74,
1330,
1573,
62,
30001,
1096,
11,
1930,
62,
12985,
11,
34086,
62,
30001,
1096,
628,
198,
220,
220,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220... | 2.391304 | 46 |
#!/usr/bin/python2.7
##############################################################################
# Global settings
##############################################################################
# Describes all the garage doors being monitored
GARAGE_DOORS = [
# {
# 'pin': 16,
# 'name': "Garage Door 1",
# 'alerts': [
# {
# 'state': 'open',
# 'time': 120,
# 'recipients': [ 'sms:+11112223333', 'sms:+14445556666' ]
# },
# {
# 'state': 'open',
# 'time': 600,
# 'recipients': [ 'sms:+11112223333', 'sms:+14445556666' ]
# }
# ]
# },
{
'pin': 7,
'name': "Example Garage Door",
'alerts': [
# {
# 'state': 'open',
# 'time': 120,
# 'recipients': [ 'sms:+11112223333', 'email:someone@example.com', 'twitter_dm:twitter_user', 'pushbullet:access_token', 'gcm', 'tweet', 'ifttt:garage_door' ]
# },
# {
# 'state': 'open',
# 'time': 600,
# 'recipients': [ 'sms:+11112223333', 'email:someone@example.com', 'twitter_dm:twitter_user', 'pushbullet:access_token', 'gcm', 'tweet', 'ifttt:garage_door' ]
# }
]
}
]
# All messages will be logged to stdout and this file
LOG_FILENAME = "/var/log/pi_garage_alert.log"
##############################################################################
# Email settings
##############################################################################
SMTP_SERVER = 'localhost'
SMTP_PORT = 25
SMTP_USER = ''
SMTP_PASS = ''
EMAIL_FROM = 'Garage Door <user@example.com>'
EMAIL_PRIORITY = '1'
# 1 High, 3 Normal, 5 Low
##############################################################################
# Cisco Spark settings
##############################################################################
# Obtain your access token from https://developer.ciscospark.com, click
# on your avatar at the top right corner.
SPARK_ACCESSTOKEN = "" #put your access token here between the quotes.
##############################################################################
# Twitter settings
##############################################################################
# Follow the instructions on http://talkfast.org/2010/05/31/twitter-from-the-command-line-in-python-using-oauth/
# to obtain the necessary keys
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_KEY = ''
TWITTER_ACCESS_SECRET = ''
##############################################################################
# Twilio settings
##############################################################################
# Sign up for a Twilio account at https://www.twilio.com/
# then these will be listed at the top of your Twilio dashboard
TWILIO_ACCOUNT = ''
TWILIO_TOKEN = ''
# SMS will be sent from this phone number
TWILIO_PHONE_NUMBER = '+11234567890'
##############################################################################
# Jabber settings
##############################################################################
# Jabber ID and password that status updates will be sent from
# Leave this blank to disable Jabber support
JABBER_ID = ''
JABBER_PASSWORD = ''
# Uncomment to override the default server specified in DNS SRV records
#JABBER_SERVER = 'talk.google.com'
#JABBER_PORT = 5222
# List of Jabber IDs allowed to perform queries
JABBER_AUTHORIZED_IDS = []
##############################################################################
# Google Cloud Messaging settings
##############################################################################
GCM_KEY = ''
GCM_TOPIC = ''
##############################################################################
# IFTTT Maker Channel settings
# Create an applet using the "Maker" channel, pick a event name,
# and use the event name as a recipient of one of the alerts,
# e.g. 'recipients': [ 'ifft:garage_event' ]
#
# Get the key by going to https://ifttt.com/services/maker/settings.
# The key is the part of the URL after https://maker.ifttt.com/use/.
# Do not include https://maker.ifttt.com/use/ in IFTTT_KEY.
##############################################################################
IFTTT_KEY = ''
##############################################################################
# Slack settings
# Send messages to a team slack channel
# e.g. 'recipients': [ 'slack:<your channel ID>']
# where <your channel ID> is the name or ID of the slack channel you want to
# send to
#
# To use this functionality you will need to create a bot user to do the posting
# For information on how to create the bot user and get your API token go to:
# https://api.slack.com/bot-users
#
# Note that the bot user must be added to the channel you want to post
# notifications in
##############################################################################
SLACK_BOT_TOKEN = ''
| [
2,
48443,
14629,
14,
8800,
14,
29412,
17,
13,
22,
198,
198,
29113,
29113,
7804,
4242,
2235,
198,
2,
8060,
6460,
198,
29113,
29113,
7804,
4242,
2235,
198,
198,
2,
39373,
22090,
477,
262,
15591,
8215,
852,
20738,
198,
38,
1503,
11879,
... | 3.077586 | 1,624 |
import requirements as rp
from great_expectations.data_context.util import file_relative_path
def test_requirements_files():
"""requirements.txt should be a subset of requirements-dev.txt"""
with open(file_relative_path(__file__, "../requirements.txt")) as req:
requirements = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
with open(file_relative_path(__file__, "../requirements-dev.txt")) as req:
requirements_dev = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
with open(file_relative_path(__file__, "../requirements-dev-util.txt")) as req:
requirements_dev_util = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
with open(file_relative_path(__file__, "../requirements-dev-spark.txt")) as req:
requirements_dev_spark = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
with open(
file_relative_path(__file__, "../requirements-dev-sqlalchemy.txt")
) as req:
requirements_dev_sqlalchemy = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
with open(file_relative_path(__file__, "../requirements-dev-test.txt")) as req:
requirements_dev_test = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
with open(file_relative_path(__file__, "../requirements-dev-build.txt")) as req:
requirements_dev_build = set(
[f'{line.name}{"".join(line.specs[0])}' for line in rp.parse(req)]
)
assert requirements <= requirements_dev
assert requirements_dev_util.intersection(requirements_dev_spark) == set()
assert requirements_dev_util.intersection(requirements_dev_sqlalchemy) == set()
assert requirements_dev_util.intersection(requirements_dev_test) == set()
assert requirements_dev_util.intersection(requirements_dev_build) == set()
assert requirements_dev_spark.intersection(requirements_dev_sqlalchemy) == set()
assert requirements_dev_spark.intersection(requirements_dev_test) == set()
assert requirements_dev_spark.intersection(requirements_dev_build) == set()
assert requirements_dev_sqlalchemy.intersection(requirements_dev_test) == set()
assert requirements_dev_sqlalchemy.intersection(requirements_dev_build) == set()
assert requirements_dev_test.intersection(requirements_dev_build) == set()
assert (
requirements_dev
- (
requirements
| requirements_dev_util
| requirements_dev_sqlalchemy
| requirements_dev_spark
| requirements_dev_test
| requirements_dev_build
)
== set()
)
| [
11748,
5359,
355,
374,
79,
198,
198,
6738,
1049,
62,
1069,
806,
602,
13,
7890,
62,
22866,
13,
22602,
1330,
2393,
62,
43762,
62,
6978,
628,
198,
4299,
1332,
62,
8897,
18883,
62,
16624,
33529,
198,
220,
220,
220,
37227,
8897,
18883,
1... | 2.410714 | 1,176 |
import GPsim
import numpy as np
if __name__ == '__main__':
main() | [
11748,
402,
12016,
320,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3419
] | 2.576923 | 26 |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Utilities'
]
setup(name='filefinder',
version=get_version('src/filefinder/__init__.py'),
description="Find files using a simple syntax.",
long_description=get_long_description('README.md'),
long_description_content_type='text/markdown',
keywords='find files filename regular expression regex xarray',
classifiers=CLASSIFIERS,
url='https://github.com/Descanonge/filefinder',
project_urls={
'Source': 'https://github.com/Descanonge/filefinder',
'Documentation': 'https://filefinder.readthedocs.io'
},
author='Clément Haëck',
author_email='clement.haeck@posteo.net',
python_requires='>=3.7',
package_dir={'': 'src'},
packages=find_packages(where='src'),
)
| [
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
628,
628,
198,
31631,
5064,
40,
4877,
796... | 2.709544 | 482 |
#addition a 2 number with a command line program
import sys
n=len(sys.argv)
print("total arguments passed:",n)
print("\nName of python script:",sys.argv[0])
print("\nArguments passed:" , end= "")
for i in range(1,n):
print(sys.argv[i], end = " ")
sum=0
for i in range(1,n):
sum+= int(sys.argv[i])
print("\n\nResult:" ,sum)
| [
2,
2860,
653,
257,
362,
1271,
351,
257,
3141,
1627,
1430,
201,
198,
201,
198,
11748,
25064,
201,
198,
77,
28,
11925,
7,
17597,
13,
853,
85,
8,
201,
198,
201,
198,
4798,
7203,
23350,
7159,
3804,
25,
1600,
77,
8,
201,
198,
201,
19... | 2.153846 | 169 |
"""
Example used in the readme. In this example a Bell state is made
"""
import sys
import os
from pprint import pprint
# so we need a relative position from this file path.
# TODO: Relative imports for intra-package imports are highly discouraged.
# http://stackoverflow.com/a/7506006
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from qiskit import QuantumProgram, QISKitError, available_backends, register
try:
import Qconfig
register(Qconfig.APItoken, Qconfig.config["url"], verify=False,
hub=Qconfig.config["hub"],
group=Qconfig.config["group"],
project=Qconfig.config["project"])
except:
offline = True
print("""WARNING: There's no connection with IBMQuantumExperience servers.
cannot test I/O intesive tasks, will only test CPU intensive tasks
running the jobs in the local simulator""")
# Running this block before registering quietly returns a list of local-only simulators
#
print("The backends available for use are:")
backends = available_backends()
pprint(backends)
print("\n")
if 'CK_IBM_BACKEND' in os.environ:
backend = os.environ['CK_IBM_BACKEND']
if backend not in backends:
print("Your choice '%s' was not available, so picking a random one for you..." % backend)
backend = backends[0]
print("Picked '%s' backend!" % backend)
try:
# Create a QuantumProgram object instance.
Q_program = QuantumProgram()
# Create a Quantum Register called "qr" with 2 qubits.
qr = Q_program.create_quantum_register("qr", 2)
# Create a Classical Register called "cr" with 2 bits.
cr = Q_program.create_classical_register("cr", 2)
# Create a Quantum Circuit called "qc". involving the Quantum Register "qr"
# and the Classical Register "cr".
qc = Q_program.create_circuit("bell", [qr], [cr])
# Add the H gate in the Qubit 0, putting this qubit in superposition.
qc.h(qr[0])
# Add the CX gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state
qc.cx(qr[0], qr[1])
# Add a Measure gate to see the state.
qc.measure(qr, cr)
# Compile and execute the Quantum Program in the local_qasm_simulator.
result = Q_program.execute(["bell"], backend=backend, shots=1024, seed=1)
# Show the results.
print(result)
print(result.get_data("bell"))
except QISKitError as ex:
print('There was an error in the circuit!. Error = {}'.format(ex))
| [
37811,
198,
16281,
973,
287,
262,
1100,
1326,
13,
554,
428,
1672,
257,
7459,
1181,
318,
925,
198,
37811,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
2,
523,
356,
761,
257,
3585,
2292,
422,
428,
... | 2.79955 | 888 |
import os
import re
import sys
from os import path
sys.path.insert(0, path.abspath('..'))
project = 'Finetuner'
slug = re.sub(r'\W+', '-', project.lower())
author = 'Jina AI'
copyright = 'Jina AI Limited. All rights reserved.'
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = 'en'
repo_dir = '../'
try:
if 'JINA_VERSION' not in os.environ:
pkg_name = 'finetuner'
libinfo_py = path.join(repo_dir, pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r').readlines()
version_line = [
l.strip() for l in libinfo_content if l.startswith('__version__')
][0]
exec(version_line)
else:
__version__ = os.environ['JINA_VERSION']
except FileNotFoundError:
__version__ = '0.0.0'
version = __version__
release = __version__
templates_path = ['_templates']
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'tests',
'page_templates',
'.github',
]
pygments_style = 'rainbow_dash'
html_theme = 'furo'
base_url = '/'
html_baseurl = 'https://finetuner.jina.ai'
sitemap_url_scheme = '{link}'
sitemap_locales = [None]
sitemap_filename = "sitemap.xml"
html_theme_options = {
'light_logo': 'logo-light.svg',
'dark_logo': 'logo-dark.svg',
"sidebar_hide_name": True,
"light_css_variables": {
"color-brand-primary": "#009191",
"color-brand-content": "#009191",
},
"dark_css_variables": {
"color-brand-primary": "#FBCB67",
"color-brand-content": "#FBCB67",
},
}
html_static_path = ['_static']
html_extra_path = ['html_extra']
html_css_files = [
'main.css',
'docbot.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta2/css/all.min.css',
]
html_js_files = []
htmlhelp_basename = slug
html_show_sourcelink = False
html_favicon = '_static/favicon.png'
latex_documents = [(master_doc, f'{slug}.tex', project, author, 'manual')]
man_pages = [(master_doc, slug, project, [author], 1)]
texinfo_documents = [
(master_doc, slug, project, author, slug, project, 'Miscellaneous')
]
epub_title = project
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinxcontrib.apidoc',
'sphinxarg.ext',
'sphinx_markdown_tables',
'sphinx_copybutton',
'sphinx_sitemap',
'sphinx.ext.intersphinx',
'sphinxext.opengraph',
'notfound.extension',
'myst_parser',
'sphinx_design',
'sphinx_inline_tabs',
'sphinx_multiversion',
]
myst_enable_extensions = ['colon_fence', 'dollarmath']
myst_dmath_double_inline = True
# -- Custom 404 page
# sphinx-notfound-page
# https://github.com/readthedocs/sphinx-notfound-page
notfound_context = {
'title': 'Page Not Found',
'body': '''
<h1>Page Not Found</h1>
<p>Oops, we couldn't find that page. </p>
<p>You can try using the search box or check our menu on the left hand side of this page.</p>
<p>If neither of those options work, please create a Github issue ticket <a href="https://github.com/jina-ai/finetuner/">here</a>, and one of our team will respond.</p>
''',
}
notfound_no_urls_prefix = True
apidoc_module_dir = repo_dir
apidoc_output_dir = 'api'
apidoc_excluded_paths = ['tests', 'legacy', 'hub', 'toy*', 'setup.py']
apidoc_separate_modules = True
apidoc_extra_args = ['-t', 'template/']
autodoc_member_order = 'bysource'
autodoc_mock_imports = [
'argparse',
'numpy',
'np',
'tensorflow',
'torch',
'scipy',
'keras',
'paddle',
]
autoclass_content = 'both'
set_type_checking_flag = False
html_last_updated_fmt = ''
nitpicky = True
nitpick_ignore = [('py:class', 'type')]
linkcheck_ignore = [
# Avoid link check on local uri
'http://0.0.0.0:*',
'pods/encode.yml',
'https://github.com/jina-ai/jina/commit/*',
'.github/*',
'extra-requirements.txt',
'fastentrypoints.py' '../../101',
'../../102',
'http://www.twinsun.com/tz/tz-link.htm', # Broken link from pytz library
'https://urllib3.readthedocs.io/en/latest/contrib.html#google-app-engine', # Broken link from urllib3 library
'https://linuxize.com/post/how-to-add-swap-space-on-ubuntu-20-04/',
# This link works but gets 403 error on linkcheck
]
linkcheck_timeout = 20
linkcheck_retries = 2
linkcheck_anchors = False
ogp_site_url = 'https://finetuner.jina.ai/'
ogp_image = 'https://finetuner.jina.ai/_static/banner.png'
ogp_use_first_image = False
ogp_description_length = 300
ogp_type = 'website'
ogp_site_name = f'Finetuner {os.environ.get("SPHINX_MULTIVERSION_VERSION", version)} Documentation'
ogp_custom_meta_tags = [
'<meta name="twitter:card" content="summary_large_image">',
'<meta name="twitter:site" content="@JinaAI_">',
'<meta name="twitter:creator" content="@JinaAI_">',
'<meta name="description" content="Finetuner allows one to finetune any deep neural network for better embedding on search tasks.">',
'<meta property="og:description" content="Finetuner allows one to finetune any deep neural network for better embedding on search tasks.">',
'''
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-1ESRNDCK35"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-1ESRNDCK35');
</script>
<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src="https://buttons.github.io/buttons.js"></script>
<script async defer src="https://cdn.jsdelivr.net/npm/qabot@0.2"></script>
''',
]
html_context = {
'latest_finetuner_version': os.environ.get('LATEST_FINETUNER_VERSION', 'main')
}
smv_tag_whitelist = smv_config(os.environ.get('SMV_TAG_WHITELIST', 'v2.4.7'))
smv_branch_whitelist = smv_config(os.environ.get('SMV_BRANCH_WHITELIST', 'main'))
smv_remote_whitelist = None
| [
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
28686,
1330,
3108,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
3108,
13,
397,
2777,
776,
10786,
492,
6,
4008,
198,
198,
16302,
796,
705,
18467,
316,
38886,
6,
198,
66... | 2.418 | 2,500 |
# module2.py
| [
2,
8265,
17,
13,
9078,
198
] | 2.166667 | 6 |