content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
BASE_GRAPH_URL = 'https://graph.facebook.com'
BASE_AUTHORIZATION_URL = 'https://www.facebook.com'
GRAPH_VERSION_V2_5 = 'v2.5'
GRAPH_VERSION_V2_6 = 'v2.6'
GRAPH_VERSION_V2_7 = 'v2.7'
GRAPH_VERSION_V2_8 = 'v2.8'
GRAPH_VERSION_V2_9 = 'v2.9'
GRAPH_VERSION_V2_10 = 'v2.10'
GRAPH_VERSION_V2_11 = 'v2.11'
GRAPH_VERSION_V2_12 = 'v2.12'
ISO_8601_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S%z'
DEFAULT_GRAPH_VERSION = GRAPH_VERSION_V2_12
DEFAULT_REQUEST_TIMEOUT = 60
METHOD_DELETE = 'DELETE'
METHOD_GET = 'GET'
METHOD_POST = 'POST'
METHOD_PUT = 'PUT'
| [
33,
11159,
62,
10761,
31300,
62,
21886,
796,
705,
5450,
1378,
34960,
13,
19024,
13,
785,
6,
198,
33,
11159,
62,
32,
24318,
1581,
14887,
6234,
62,
21886,
796,
705,
5450,
1378,
2503,
13,
19024,
13,
785,
6,
198,
198,
10761,
31300,
62,
... | 1.927798 | 277 |
import warnings
warnings.filterwarnings("ignore")
import os
import json
import easydict
import time
import torch
from misc.dataloader import DataLoader
import torch.optim as optim
import misc.datasets as datasets
import ctrlfnet_model_dtp as ctrlf
from misc.h5_dataset import H5Dataset
from train_opts import parse_args
from evaluate_dtp import mAP
opt = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)
torch.backends.cudnn.benchmark = False
opt.dtp_train = 1
if opt.h5:
trainset = H5Dataset(opt, split=0)
valset = H5Dataset(opt, split=1)
testset = H5Dataset(opt, split=2)
else:
if opt.dataset.find('iiit_hws') > -1:
trainset = datasets.SegmentedDataset(opt, 'train')
else:
trainset = datasets.Dataset(opt, 'train')
valset = datasets.Dataset(opt, 'val')
testset = datasets.Dataset(opt, 'test')
sampler=datasets.RandomSampler(trainset, opt.max_iters)
trainloader = DataLoader(trainset, batch_size=1, sampler=sampler, num_workers=opt.num_workers)
valloader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=0)
testloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=0)
torch.set_default_tensor_type('torch.FloatTensor')
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.cuda.device(opt.gpu)
# initialize the Ctrl-F-Net model object
model = ctrlf.CtrlFNet(opt)
show = not opt.quiet
if show:
print "number of parameters in ctrlfnet:", model.num_parameters()
model.load_weights(opt.weights)
model.cuda()
learning_rate = float(opt.learning_rate)
optimizer = optim.Adam(model.parameters(), learning_rate, (opt.beta1, opt.beta2),opt.epsilon, opt.weight_decay)
keys = ['e', 'eo', 'total_loss']
running_losses = {k:0.0 for k in keys}
it = 0
args = easydict.EasyDict()
args.nms_overlap = opt.query_nms_overlap
args.score_threshold = opt.score_threshold
args.num_queries = -1
args.score_nms_overlap = opt.score_nms_overlap
args.overlap_threshold = 0.5
args.gpu = True
args.use_external_proposals = int(opt.external_proposals)
args.max_proposals = opt.max_proposals
args.rpn_nms_thresh = opt.test_rpn_nms_thresh
args.num_workers = 6
args.numpy = False
trainlog = ''
start = time.time()
loss_history, mAPs = [], []
if opt.eval_first_iteration:
log, rf, rt = mAP(model, valloader, args, it)
trainlog += log
if show:
print(log)
best_score = (rt.mAP_qbe_50 + rt.mAP_qbs_50) / 2
mAPs.append((it, [rt.mAP_qbe_50, rt.mAP_qbs_50]))
else:
best_score = 0.0
if opt.weights:
opt.save_id += '_pretrained'
if not os.path.exists('checkpoints/ctrlfnet_mini/'):
os.makedirs('checkpoints/ctrlfnet_mini/')
oargs = ('ctrlfnet_mini', opt.embedding, opt.dataset, opt.fold, opt.save_id)
out_name = 'checkpoints/%s/%s_%s_fold%d_%s_best_val.pt' % oargs
for data in trainloader:
optimizer.zero_grad()
try:
losses = model.forward_backward(data, True)
except ValueError:
print 'value error on iteration', it
continue
optimizer.step()
# print statistics
running_losses = {k:v + losses[k] for k, v in running_losses.iteritems()}
if it % opt.print_every == opt.print_every - 1:
running_losses = {k:v / opt.print_every for k, v in running_losses.iteritems()}
loss_string = "[iter %5d] " % (it + 1)
for k, v in running_losses.iteritems():
loss_string += "%s: %.5f | " % (k , v)
trainlog += loss_string
if show:
print loss_string
loss_history.append((it, running_losses.values()))
running_losses = {k:0.0 for k, v in running_losses.iteritems()}
if it % opt.eval_every == opt.eval_every - 1:
log, rf, rt = mAP(model, valloader, args, it)
trainlog += log
if show:
print(log)
score = (rt.mAP_qbe_50 + rt.mAP_qbs_50) / 2
mAPs.append((it, [rt.mAP_qbe_50, rt.mAP_qbs_50]))
if score > best_score:
best_score = score
torch.save(model.state_dict(), out_name)
if show:
print 'saving ' + out_name
d = {}
d['opt'] = opt
d['loss_history'] = loss_history
d['map_history'] = mAPs
d['trainlog'] = trainlog
with open(out_name + '.json', 'w') as f:
json.dump(d, f)
if it % opt.reduce_lr_every == opt.reduce_lr_every - 1:
learning_rate /= 10.0
optimizer.param_groups[0]['lr'] = learning_rate
it += 1
if show:
if opt.val_dataset.find('iam') == -1:
model.load_weights(out_name)
log, _, rt = mAP(model, testloader, args, it)
print(log)
d = {}
d['opt'] = opt
d['loss_history'] = loss_history
d['map_history'] = mAPs
d['trainlog'] = trainlog
d['testlog'] = log
with open(out_name + '.json', 'w') as f:
json.dump(d, f)
duration = time.time() - start
print "training model took %0.2f hours" % (duration / 3600)
| [
11748,
14601,
198,
40539,
654,
13,
24455,
40539,
654,
7203,
46430,
4943,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
2562,
11600,
198,
11748,
640,
198,
11748,
28034,
198,
6738,
12747,
13,
67,
10254,
1170,
263,
1330,
6060,
17401,
198... | 2.198325 | 2,269 |
from __future__ import division
from time import time
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
from ctypes import *
# Linux users: change the filename below to './mandelbrot.so'
mandel_dll = CDLL('./mandelbrot.dll')
mandel_c = mandel_dll.launch_mandelbrot
mandel_c.argtypes = [POINTER(c_float), POINTER(c_float), c_int, c_float, c_int]
if __name__ == '__main__':
t1 = time()
mandel = mandelbrot(512,-2,2,256, 2)
t2 = time()
mandel_time = t2 - t1
print 'It took %s seconds to calculate the Mandelbrot graph.' % mandel_time
plt.figure(1)
plt.imshow(mandel, extent=(-2, 2, -2, 2))
plt.show()
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
640,
1330,
640,
198,
11748,
2603,
29487,
8019,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
269,
19199,
1330,
1635,
198,
1... | 2.437956 | 274 |
import json
import igorCA
import os
import sys
import tempfile
import urllib
DEBUG=False
| [
11748,
33918,
198,
11748,
220,
36274,
8141,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
11748,
2956,
297,
571,
198,
198,
30531,
28,
25101,
628,
220,
220,
220,
220,
198
] | 2.909091 | 33 |
#!/usr/bin/env python3
""" Annotate BLAST output """
import argparse
import pandas as pd
import os
from typing import NamedTuple, TextIO
class Args(NamedTuple):
""" Command-line arguments """
hits: TextIO
annotations: TextIO
outfile: TextIO
delimiter: str
pctid: float
# --------------------------------------------------
def get_args():
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Annotate BLAST output',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b',
'--blasthits',
metavar='FILE',
type=argparse.FileType('rt'),
help='BLAST -outfmt 6',
required=True)
parser.add_argument('-a',
'--annotations',
help='Annotations file',
metavar='FILE',
type=argparse.FileType('rt'),
required=True)
parser.add_argument('-o',
'--outfile',
help='Output file',
metavar='FILE',
type=argparse.FileType('wt'),
default='out.csv')
parser.add_argument('-d',
'--delimiter',
help='Output field delimiter',
metavar='DELIM',
type=str,
default='')
parser.add_argument('-p',
'--pctid',
help='Minimum percent identity',
metavar='PCTID',
type=float,
default=0.)
args = parser.parse_args()
return Args(hits=args.blasthits,
annotations=args.annotations,
outfile=args.outfile,
delimiter=args.delimiter or guess_delimiter(args.outfile.name),
pctid=args.pctid)
# --------------------------------------------------
def main():
""" Make a jazz noise here """
args = get_args()
annots = pd.read_csv(args.annotations, sep=',')
hits = pd.read_csv(args.hits,
sep=',',
names=[
'qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore'
])
data = []
for _, hit in hits[hits['pident'] >= args.pctid].iterrows():
meta = annots[annots['seq_id'] == hit['qseqid']]
if not meta.empty:
for _, seq in meta.iterrows():
data.append({
'qseqid': hit['qseqid'],
'pident': hit['pident'],
'depth': seq['depth'],
'lat_lon': seq['lat_lon'],
})
df = pd.DataFrame.from_records(data=data)
df.to_csv(args.outfile, index=False, sep=args.delimiter)
print(f'Exported {len(data):,} to "{args.outfile.name}".')
# --------------------------------------------------
def guess_delimiter(filename: str) -> str:
""" Guess the field separator from the file extension """
ext = os.path.splitext(filename)[1]
return ',' if ext == '.csv' else '\t'
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
1052,
1662,
378,
9878,
11262,
5072,
37227,
198,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
6738,
19720,
1330,
34441,
51,
29291,
11,... | 1.885051 | 1,853 |
"""
This file provides a wrapper for the convert objects and functions.
Users will not need to know the names of the specific objects they need to create.
"""
import os
from echopype.convert.azfp import ConvertAZFP
from echopype.convert.ek60 import ConvertEK60
| [
37811,
198,
1212,
2393,
3769,
257,
29908,
329,
262,
10385,
5563,
290,
5499,
13,
198,
14490,
481,
407,
761,
284,
760,
262,
3891,
286,
262,
2176,
5563,
484,
761,
284,
2251,
13,
198,
37811,
198,
11748,
28686,
198,
6738,
304,
354,
404,
... | 3.690141 | 71 |
import pathlib
def does_file_exist(path_to_file: str) -> bool:
"""
Performs a simple exists check on a given file path
"""
path = pathlib.Path(path_to_file)
return path.exists()
| [
11748,
3108,
8019,
628,
198,
4299,
857,
62,
7753,
62,
38476,
7,
6978,
62,
1462,
62,
7753,
25,
965,
8,
4613,
20512,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2448,
23914,
257,
2829,
7160,
2198,
319,
257,
1813,
2393,
3108,
... | 2.631579 | 76 |
import os
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
token=os.environ['token']
channel=os.environ['channel']
header=os.environ['header']
pre_message=os.environ['pre_message']
message=os.environ['message']
post_message=os.environ['post_message']
actor=os.environ['actor']
release=os.environ['release']
commit_id=os.environ['commit_id']
client = WebClient(token)
try:
# filepath="./tmp.txt"
# response = client.files_upload(channels='#random', file=filepath)
blocks=[
{
"type": "header",
"text": {
"type": "plain_text",
"text": header
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"text": pre_message,
"type": "mrkdwn"
}
},
{
"type": "section",
"text": {
"text": message,
"type": "mrkdwn"
},
"fields": [
{
"type": "mrkdwn",
"text": "*Release*"
},
{
"type": "mrkdwn",
"text": "*Commit*"
},
{
"type": "plain_text",
"text": release
},
{
"type": "plain_text",
"text": commit_id
}
]
},
{
"type": "section",
"text": {
"text": post_message,
"type": "mrkdwn"
}
},
{
"type": "section",
"text": {
"text": actor,
"type": "mrkdwn"
}
},
]
response = client.chat_postMessage(channel=channel, blocks=blocks)
# assert response["message"]["text"] == "Hello world!"
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error: {e.response['error']}") | [
11748,
28686,
198,
6738,
30740,
62,
21282,
74,
1330,
5313,
11792,
198,
6738,
30740,
62,
21282,
74,
13,
48277,
1330,
36256,
32,
14415,
12331,
198,
198,
30001,
28,
418,
13,
268,
2268,
17816,
30001,
20520,
198,
17620,
28,
418,
13,
268,
2... | 1.712086 | 1,299 |
from odoo.tests.common import TransactionCase
| [
6738,
16298,
2238,
13,
41989,
13,
11321,
1330,
45389,
20448,
628
] | 4.272727 | 11 |
# -*- coding: utf-8 -*-
"""## Task-specific heads
Different heads that expect their inputs to be flattened.
"""
from collections import OrderedDict
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
from ophthalmology.layers import activations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
2235,
15941,
12,
11423,
6665,
201,
198,
201,
198,
40341,
6665,
326,
1607,
511,
17311,
284,
307,
45096,
13,
201,
198,
37811,
201,
198,
201,
198,
6738,
17268,
... | 3.081633 | 98 |
import argparse
import numpy as np
import os
from Bio.PDB import PDBParser, Selection
parser = PDBParser()
# RADII for atoms in explicit case.
RADII = {}
RADII["N"] = "1.540000"
RADII["O"] = "1.400000"
RADII["C"] = "1.740000"
RADII["H"] = "1.200000"
RADII["S"] = "1.800000"
RADII["P"] = "1.800000"
RADII["Z"] = "1.39"
RADII["X"] = "0.770000" ## RADII of CB or CA in disembodied case.
# This polar hydrogen's names correspond to that of the program Reduce.
POLAR_HYDROGENS = {}
POLAR_HYDROGENS["ALA"] = ["H"]
POLAR_HYDROGENS["GLY"] = ["H"]
POLAR_HYDROGENS["SER"] = ["H", "HG"]
POLAR_HYDROGENS["THR"] = ["H", "HG1"]
POLAR_HYDROGENS["LEU"] = ["H"]
POLAR_HYDROGENS["ILE"] = ["H"]
POLAR_HYDROGENS["VAL"] = ["H"]
POLAR_HYDROGENS["ASN"] = ["H", "HD21", "HD22"]
POLAR_HYDROGENS["GLN"] = ["H", "HE21", "HE22"]
POLAR_HYDROGENS["ARG"] = ["H", "HH11", "HH12", "HH21", "HH22", "HE"]
POLAR_HYDROGENS["HIS"] = ["H", "HD1", "HE2"]
POLAR_HYDROGENS["TRP"] = ["H", "HE1"]
POLAR_HYDROGENS["PHE"] = ["H"]
POLAR_HYDROGENS["TYR"] = ["H", "HH"]
POLAR_HYDROGENS["GLU"] = ["H"]
POLAR_HYDROGENS["ASP"] = ["H"]
POLAR_HYDROGENS["LYS"] = ["H", "HZ1", "HZ2", "HZ3"]
POLAR_HYDROGENS["PRO"] = []
POLAR_HYDROGENS["CYS"] = ["H"]
POLAR_HYDROGENS["MET"] = ["H"]
HBOND_STD_DEV = np.pi / 3
# Dictionary from an acceptor atom to its directly bonded atom on which to
# compute the angle.
ACCEPTOR_ANGLES = {}
ACCEPTOR_ANGLES["O"] = "C"
ACCEPTOR_ANGLES["O1"] = "C"
ACCEPTOR_ANGLES["O2"] = "C"
ACCEPTOR_ANGLES["OXT"] = "C"
ACCEPTOR_ANGLES["OT1"] = "C"
ACCEPTOR_ANGLES["OT2"] = "C"
ACCEPTOR_ANGLES["OC1"] = "C"
ACCEPTOR_ANGLES["OC2"] = "C"
# Dictionary from acceptor atom to a third atom on which to compute the plane.
ACCEPTOR_PLANES = {}
ACCEPTOR_PLANES["O"] = "CA"
# Dictionary from an H atom to its donor atom.
DONOR_ATOMS = {}
DONOR_ATOMS["H"] = "N"
# Hydrogen bond information.
# ARG
# ARG NHX
# Angle: NH1, HH1X, point and NH2, HH2X, point 180 degrees.
# RADII from HH: RADII[H]
# ARG NE
# Angle: ~ 120 NE, HE, point, 180 degrees
DONOR_ATOMS["HH11"] = "NH1"
DONOR_ATOMS["HH12"] = "NH1"
DONOR_ATOMS["HH21"] = "NH2"
DONOR_ATOMS["HH22"] = "NH2"
DONOR_ATOMS["HE"] = "NE"
# ASN
# Angle ND2,HD2X: 180
# Plane: CG,ND2,OD1
# Angle CG-OD1-X: 120
DONOR_ATOMS["HD21"] = "ND2"
DONOR_ATOMS["HD22"] = "ND2"
# ASN Acceptor
ACCEPTOR_ANGLES["OD1"] = "CG"
ACCEPTOR_PLANES["OD1"] = "CB"
# ASP
# Plane: CB-CG-OD1
# Angle CG-ODX-point: 120
ACCEPTOR_ANGLES["OD2"] = "CG"
ACCEPTOR_PLANES["OD2"] = "CB"
# GLU
# PLANE: CD-OE1-OE2
# ANGLE: CD-OEX: 120
# GLN
# PLANE: CD-OE1-NE2
# Angle NE2,HE2X: 180
# ANGLE: CD-OE1: 120
DONOR_ATOMS["HE21"] = "NE2"
DONOR_ATOMS["HE22"] = "NE2"
ACCEPTOR_ANGLES["OE1"] = "CD"
ACCEPTOR_ANGLES["OE2"] = "CD"
ACCEPTOR_PLANES["OE1"] = "CG"
ACCEPTOR_PLANES["OE2"] = "CG"
# HIS Acceptors: ND1, NE2
# Plane ND1-CE1-NE2
# Angle: ND1-CE1 : 125.5
# Angle: NE2-CE1 : 125.5
ACCEPTOR_ANGLES["ND1"] = "CE1"
ACCEPTOR_ANGLES["NE2"] = "CE1"
ACCEPTOR_PLANES["ND1"] = "NE2"
ACCEPTOR_PLANES["NE2"] = "ND1"
# HIS Donors: ND1, NE2
# Angle ND1-HD1 : 180
# Angle NE2-HE2 : 180
DONOR_ATOMS["HD1"] = "ND1"
DONOR_ATOMS["HE2"] = "NE2"
# TRP Donor: NE1-HE1
# Angle NE1-HE1 : 180
DONOR_ATOMS["HE1"] = "NE1"
# LYS Donor NZ-HZX
# Angle NZ-HZX : 180
DONOR_ATOMS["HZ1"] = "NZ"
DONOR_ATOMS["HZ2"] = "NZ"
DONOR_ATOMS["HZ3"] = "NZ"
# TYR acceptor OH
# Plane: CE1-CZ-OH
# Angle: CZ-OH 120
ACCEPTOR_ANGLES["OH"] = "CZ"
ACCEPTOR_PLANES["OH"] = "CE1"
# TYR donor: OH-HH
# Angle: OH-HH 180
DONOR_ATOMS["HH"] = "OH"
ACCEPTOR_PLANES["OH"] = "CE1"
# SER acceptor:
# Angle CB-OG-X: 120
ACCEPTOR_ANGLES["OG"] = "CB"
# SER donor:
# Angle: OG-HG-X: 180
DONOR_ATOMS["HG"] = "OG"
# THR acceptor:
# Angle: CB-OG1-X: 120
ACCEPTOR_ANGLES["OG1"] = "CB"
# THR donor:
# Angle: OG1-HG1-X: 180
DONOR_ATOMS["HG1"] = "OG1"
def str2bool(v: str) -> bool:
"""Converts str to bool.
:param name: v - String element
:param type: str
:returns: boolean version of v
"""
v = v.lower()
if v == "true":
return True
elif v == "false":
return False
else:
raise argparse.ArgumentTypeError(f"Boolean value expected, got '{v}'.")
| [
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
16024,
13,
5760,
33,
1330,
350,
11012,
46677,
11,
29538,
198,
198,
48610,
796,
350,
11012,
46677,
3419,
198,
198,
2,
33540,
3978,
329,
23235,
287,
795... | 2.053527 | 1,999 |
# Generated by Django 2.0.9 on 2020-07-24 07:52
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
24,
319,
12131,
12,
2998,
12,
1731,
8753,
25,
4309,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import enum
| [
11748,
33829,
628,
198
] | 3.5 | 4 |
from PyQt5.QtWidgets import QApplication, QMainWindow, QAction, QFileDialog
import sys
from PyQt5.QtGui import QImage, QPainter, QPen
from PyQt5.QtCore import Qt, QPoint
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
window.show()
app.exec() | [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
13383,
27703,
11,
1195,
12502,
11,
1195,
8979,
44204,
198,
11748,
25064,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
5159,
11... | 2.483051 | 118 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import (
Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel,
Trapezoid1DKernel, TrapezoidDisk2DKernel, MexicanHat1DKernel,
Tophat2DKernel, MexicanHat2DKernel, AiryDisk2DKernel, Ring2DKernel,
CustomKernel, Model1DKernel, Model2DKernel, Kernel1D, Kernel2D)
from astropy.convolution.utils import KernelSizeError
from astropy.modeling.models import Box2D, Gaussian1D, Gaussian2D
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
try:
from scipy.ndimage import filters
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
WIDTHS_ODD = [3, 5, 7, 9]
WIDTHS_EVEN = [2, 4, 8, 16]
MODES = ['center', 'linear_interp', 'oversample', 'integrate']
KERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel,
Box1DKernel, Box2DKernel,
Trapezoid1DKernel, TrapezoidDisk2DKernel,
MexicanHat1DKernel, Tophat2DKernel, AiryDisk2DKernel,
Ring2DKernel]
NUMS = [1, 1., np.float32(1.), np.float64(1.)]
# Test data
delta_pulse_1D = np.zeros(81)
delta_pulse_1D[40] = 1
delta_pulse_2D = np.zeros((81, 81))
delta_pulse_2D[40, 40] = 1
random_data_1D = np.random.rand(61)
random_data_2D = np.random.rand(61, 61)
class TestKernels:
"""
Test class for the built-in convolution kernels.
"""
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_scipy_filter_gaussian(self, width):
"""
Test GaussianKernel against SciPy ndimage gaussian filter.
"""
gauss_kernel_1D = Gaussian1DKernel(width)
gauss_kernel_1D.normalize()
gauss_kernel_2D = Gaussian2DKernel(width)
gauss_kernel_2D.normalize()
astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill')
astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill')
scipy_1D = filters.gaussian_filter(delta_pulse_1D, width)
scipy_2D = filters.gaussian_filter(delta_pulse_2D, width)
assert_almost_equal(astropy_1D, scipy_1D, decimal=12)
assert_almost_equal(astropy_2D, scipy_2D, decimal=12)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_scipy_filter_gaussian_laplace(self, width):
"""
Test MexicanHat kernels against SciPy ndimage gaussian laplace filters.
"""
mexican_kernel_1D = MexicanHat1DKernel(width)
mexican_kernel_2D = MexicanHat2DKernel(width)
astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=False)
astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=False)
with pytest.raises(Exception) as exc:
astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=True)
assert 'sum is close to zero' in exc.value.args[0]
with pytest.raises(Exception) as exc:
astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=True)
assert 'sum is close to zero' in exc.value.args[0]
# The Laplace of Gaussian filter is an inverted Mexican Hat
# filter.
scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width)
scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width)
# There is a slight deviation in the normalization. They differ by a
# factor of ~1.0000284132604045. The reason is not known.
assert_almost_equal(astropy_1D, scipy_1D, decimal=5)
assert_almost_equal(astropy_2D, scipy_2D, decimal=5)
@pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))
def test_delta_data(self, kernel_type, width):
"""
Test smoothing of an image with a single positive pixel
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))
def test_random_data(self, kernel_type, width):
"""
Test smoothing of an image made of random noise
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(width)
else:
kernel = kernel_type(width, width * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_uniform_smallkernel(self, width):
"""
Test smoothing of an image with a single positive pixel
Instead of using kernel class, uses a simple, small kernel
"""
kernel = np.ones([width, width])
c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill')
c1 = convolve(delta_pulse_2D, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_ODD)
def test_smallkernel_vs_Box2DKernel(self, width):
"""
Test smoothing of an image with a single positive pixel
"""
kernel1 = np.ones([width, width]) / width ** 2
kernel2 = Box2DKernel(width)
c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill')
c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_convolve_1D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian1DKernel(3)
gauss_2 = Gaussian1DKernel(4)
test_gauss_3 = Gaussian1DKernel(5)
with pytest.warns(AstropyUserWarning, match=r'Both array and kernel '
r'are Kernel instances'):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
def test_convolve_2D_kernels(self):
"""
Check if convolving two kernels with each other works correctly.
"""
gauss_1 = Gaussian2DKernel(3)
gauss_2 = Gaussian2DKernel(4)
test_gauss_3 = Gaussian2DKernel(5)
with pytest.warns(AstropyUserWarning, match=r'Both array and kernel '
r'are Kernel instances'):
gauss_3 = convolve(gauss_1, gauss_2)
assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)
@pytest.mark.parametrize(('number'), NUMS)
def test_multiply_scalar(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12)
@pytest.mark.parametrize(('number'), NUMS)
def test_multiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = number * gauss
assert type(gauss_new) is Gaussian1DKernel
@pytest.mark.parametrize(('number'), NUMS)
def test_rmultiply_scalar_type(self, number):
"""
Check if multiplying a kernel with a scalar works correctly.
"""
gauss = Gaussian1DKernel(3)
gauss_new = gauss * number
assert type(gauss_new) is Gaussian1DKernel
def test_multiply_kernel1d(self):
"""Test that multiplying two 1D kernels raises an exception."""
gauss = Gaussian1DKernel(3)
with pytest.raises(Exception):
gauss * gauss
def test_multiply_kernel2d(self):
"""Test that multiplying two 2D kernels raises an exception."""
gauss = Gaussian2DKernel(3)
with pytest.raises(Exception):
gauss * gauss
def test_multiply_kernel1d_kernel2d(self):
"""
Test that multiplying a 1D kernel with a 2D kernel raises an
exception.
"""
with pytest.raises(Exception):
Gaussian1DKernel(3) * Gaussian2DKernel(3)
def test_add_kernel_scalar(self):
"""Test that adding a scalar to a kernel raises an exception."""
with pytest.raises(Exception):
Gaussian1DKernel(3) + 1
def test_model_1D_kernel(self):
"""
Check Model1DKernel against Gaussian1Dkernel
"""
stddev = 5.
gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev)
model_gauss_kernel = Model1DKernel(gauss, x_size=21)
gauss_kernel = Gaussian1DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,
decimal=12)
def test_model_2D_kernel(self):
"""
Check Model2DKernel against Gaussian2Dkernel
"""
stddev = 5.
gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev)
model_gauss_kernel = Model2DKernel(gauss, x_size=21)
gauss_kernel = Gaussian2DKernel(stddev, x_size=21)
assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,
decimal=12)
def test_custom_1D_kernel(self):
"""
Check CustomKernel against Box1DKernel.
"""
# Define one dimensional array:
array = np.ones(5)
custom = CustomKernel(array)
custom.normalize()
box = Box1DKernel(5)
c2 = convolve(delta_pulse_1D, custom, boundary='fill')
c1 = convolve(delta_pulse_1D, box, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_custom_2D_kernel(self):
"""
Check CustomKernel against Box2DKernel.
"""
# Define one dimensional array:
array = np.ones((5, 5))
custom = CustomKernel(array)
custom.normalize()
box = Box2DKernel(5)
c2 = convolve(delta_pulse_2D, custom, boundary='fill')
c1 = convolve(delta_pulse_2D, box, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
def test_custom_1D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([1, 1, 1, 1, 1])
assert custom.is_bool is True
def test_custom_2D_kernel_list(self):
"""
Check if CustomKernel works with lists.
"""
custom = CustomKernel([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
assert custom.is_bool is True
def test_custom_1D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [-2, -1, 0, 1, 2]
custom = CustomKernel(array)
with pytest.warns(AstropyUserWarning, match=r'kernel cannot be '
r'normalized because it sums to zero'):
custom.normalize()
assert custom.truncation == 0.
assert custom._kernel_sum == 0.
def test_custom_2D_kernel_zerosum(self):
"""
Check if CustomKernel works when the input array/list
sums to zero.
"""
array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]
custom = CustomKernel(array)
with pytest.warns(AstropyUserWarning, match=r'kernel cannot be '
r'normalized because it sums to zero'):
custom.normalize()
assert custom.truncation == 0.
assert custom._kernel_sum == 0.
def test_custom_kernel_odd_error(self):
"""
Check if CustomKernel raises if the array size is odd.
"""
with pytest.raises(KernelSizeError):
CustomKernel([1, 1, 1, 1])
def test_add_1D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box1DKernel(5)
box_2 = Box1DKernel(3)
box_3 = Box1DKernel(1)
box_sum_1 = box_1 + box_2 + box_3
box_sum_2 = box_2 + box_3 + box_1
box_sum_3 = box_3 + box_1 + box_2
ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.]
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
assert_almost_equal(box_sum_3.array, ref, decimal=12)
# Assert that the kernels haven't changed
assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12)
assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12)
assert_almost_equal(box_3.array, [1], decimal=12)
def test_add_2D_kernels(self):
"""
Check if adding of two 1D kernels works.
"""
box_1 = Box2DKernel(3)
box_2 = Box2DKernel(1)
box_sum_1 = box_1 + box_2
box_sum_2 = box_2 + box_1
ref = [[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 + 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.]]
ref_1 = [[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.],
[1 / 9., 1 / 9., 1 / 9.]]
assert_almost_equal(box_2.array, [[1]], decimal=12)
assert_almost_equal(box_1.array, ref_1, decimal=12)
assert_almost_equal(box_sum_1.array, ref, decimal=12)
assert_almost_equal(box_sum_2.array, ref, decimal=12)
def test_Gaussian1DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian1DKernel(3, x_size=10)
assert gauss.array.size == 10
def test_Gaussian2DKernel_even_size(self):
"""
Check if even size for GaussianKernel works.
"""
gauss = Gaussian2DKernel(3, x_size=10, y_size=10)
assert gauss.array.shape == (10, 10)
# https://github.com/astropy/astropy/issues/3605
def test_normalize_peak(self):
"""
Check if normalize works with peak mode.
"""
custom = CustomKernel([1, 2, 3, 2, 1])
custom.normalize(mode='peak')
assert custom.array.max() == 1
def test_check_kernel_attributes(self):
"""
Check if kernel attributes are correct.
"""
box = Box2DKernel(5)
# Check truncation
assert box.truncation == 0
# Check model
assert isinstance(box.model, Box2D)
# Check center
assert box.center == [2, 2]
# Check normalization
box.normalize()
assert_almost_equal(box._kernel_sum, 1., decimal=12)
# Check separability
assert box.separable
@pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES)))
def test_discretize_modes(self, kernel_type, mode):
"""
Check if the different modes result in kernels that work with convolve.
Use only small kernel width, to make the test pass quickly.
"""
if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:
pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy")
if not kernel_type == Ring2DKernel:
kernel = kernel_type(3)
else:
kernel = kernel_type(3, 3 * 0.2)
if kernel.dimension == 1:
c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
else:
c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('width'), WIDTHS_EVEN)
def test_box_kernels_even_size(self, width):
"""
Check if BoxKernel work properly with even sizes.
"""
kernel_1D = Box1DKernel(width)
assert kernel_1D.shape[0] % 2 != 0
assert kernel_1D.array.sum() == 1.
kernel_2D = Box2DKernel(width)
assert np.all([_ % 2 != 0 for _ in kernel_2D.shape])
assert kernel_2D.array.sum() == 1.
def test_kernel_normalization(self):
"""
Test that repeated normalizations do not change the kernel [#3747].
"""
kernel = CustomKernel(np.ones(5))
kernel.normalize()
data = np.copy(kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
kernel.normalize()
assert_allclose(data, kernel.array)
def test_kernel_normalization_mode(self):
"""
Test that an error is raised if mode is invalid.
"""
with pytest.raises(ValueError):
kernel = CustomKernel(np.ones(3))
kernel.normalize(mode='invalid')
def test_kernel1d_int_size(self):
"""
Test that an error is raised if ``Kernel1D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian1DKernel(3, x_size=1.2)
def test_kernel2d_int_xsize(self):
"""
Test that an error is raised if ``Kernel2D`` ``x_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=1.2)
def test_kernel2d_int_ysize(self):
"""
Test that an error is raised if ``Kernel2D`` ``y_size`` is not
an integer.
"""
with pytest.raises(TypeError):
Gaussian2DKernel(3, x_size=5, y_size=1.2)
def test_kernel1d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel1D``.
"""
with pytest.raises(TypeError):
Kernel1D()
def test_kernel2d_initialization(self):
"""
Test that an error is raised if an array or model is not
specified for ``Kernel2D``.
"""
with pytest.raises(TypeError):
Kernel2D()
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
198,
11748,
340,
861,
10141,
198,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
... | 2.141821 | 9,103 |
import contextlib
import itertools
import json
import os
import pathlib
import shutil
import sys
import typing
import datasets
import numpy
import ray.tune
import transformers
from . import metric
from . import tok
from . import train
class TailScheduler(ray.tune.schedulers.trial_scheduler.FIFOScheduler):
''' Trains a group of models in parallel. When there are more
trials than resources, the trials that are the furthest ahead
will be paused to make room for those which have fallen the
furthest behind.
Since checkpointing is not free, the strategy is not aggressive,
but attempts to maximize the amount of progress completed in all
trials, so that early-stopping strategies can make the most
educated decisions.
'''
def default_grid(cost=8):
''' returns a list of models for triage during hp_search '''
# there is a tradeoff between model_size and max_len we can exploit
# during model comparisons
model_size = [ 'medium', 'small', 'mini', 'tiny' ]
max_len = [ 1 << _ << cost for _ in range(4) ]
# at cost=8 this is [256, 512, 1024, 2048]
# cost=2 is viable for cpu smoke tests.
vocab = [
#'cc-html8K', # bert/mini,N=512/sphtml8K eval_loss=5.300
'cc-html8K,sd=t', # bert/mini,N=512/sphtml8K eval_loss=4.408
#'cc-html8K,sn=f', # bert/mini,N=512/sphtml8K eval_loss=5.342
#'cc-html8K,sn=f,sw=f', # bert/mini,N=512/sphtml8K eval_loss=5.437
#'cc-html8K,sn=f,ws=t', # bert/mini,N=512/sphtml8K eval_loss=5.417
#'cc-html8K,su=f', # bert/mini,N=512/sphtml8K eval_loss=5.912
#'cc-html8K,su=f,sn=f', # bert/mini,N=512/sphtml8K eval_loss=5.912
#'cc-html8K,su=f,sw=f', # bert/mini,N=512/sphtml8K eval_loss=5.878
#'cc-html8K,sw=f', # bert/mini,N=512/sphtml8K eval_loss=5.208
#'cc-html8K,sw=f,sd=t', # bert/mini,N=512/sphtml8K eval_loss=4.536
#'cc-html8K,sw=f,ws=t', # bert/mini,N=512/sphtml8K eval_loss=5.319
#'cc-html8K,sw=f,ws=t,sd=t', # bert/mini,N=512/sphtml8K eval_loss=4.349
#'cc-html8K,ws=t', # bert/mini,N=512/sphtml8K eval_loss=5.359
]
model_type = [
#'albert', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.670,
'bert', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.677,
#'big_bird', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.528,
'convbert', #tiny,N=2048/sphtml8K,sn=f eval_loss=3.203,
#'deberta', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.681,
#'deberta-v2', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.700,
#'electra', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.690,
#'layoutlm', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.673,
'megatron-bert', #tiny,N=2048/sphtml8K,sn=f eval_loss=5.464,
'roformer', #tiny,N=2048/sphtml8K,sn=f eval_loss=3.759,
]
for v, (s, n), t in itertools.product(vocab, zip(model_size, max_len), model_type):
#yield train.model_path(task='mlm', vocab=v, model_type=t, model_size=s, max_len=n)
yield train.pretrain(vocab=v, model_type=t, model_size=s, max_len=n)
def default_hp_space():
''' set of the traditional hp_search params (not model settings) '''
dflt = transformers.TrainingArguments
space = {}
samples = 1
if True:
space['learning_rate'] = ray.tune.loguniform(
dflt.learning_rate / 4,
dflt.learning_rate * 4,
base=4)
samples *= 2
if False:
space['weight_decay'] = ray.tune.sample_from(lambda _: (
numpy.random.gamma(0.125, 0.125)
))
samples *= 2
return samples, space
| [
198,
11748,
4732,
8019,
198,
11748,
340,
861,
10141,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
11748,
19720,
198,
198,
11748,
40522,
198,
11748,
299,
32152,
198,
11748,
26... | 1.989401 | 1,887 |
import unittest
import numpy as np
from lib import Kernel
| [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
9195,
1330,
32169,
628
] | 3.529412 | 17 |
from math import sqrt
# X, and Y variable names against [pylint]: C0104, but is the same as the stub, advise not to change this.
| [
6738,
10688,
1330,
19862,
17034,
198,
198,
2,
1395,
11,
290,
575,
7885,
3891,
1028,
685,
79,
2645,
600,
5974,
327,
486,
3023,
11,
475,
318,
262,
976,
355,
262,
17071,
11,
18595,
407,
284,
1487,
428,
13,
198
] | 3.333333 | 39 |
import torch
from torch import nn
from kme.models.base import FeatureNetBase
class RiskFeatNet1(FeatureNetBase):
"""
Simple network for risk score datasets
"""
class SmallTabFeatNet1(FeatureNetBase):
"""
Simple network for risk score datasets
"""
class SmallTabFeatNet2(FeatureNetBase):
"""
Simple network for risk score datasets
"""
class BaselineMLP1(FeatureNetBase):
"""
Implementation of a classic MLP as a feat net, for benchmarking purposes
"""
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
479,
1326,
13,
27530,
13,
8692,
1330,
27018,
7934,
14881,
628,
198,
4871,
19602,
37,
4098,
7934,
16,
7,
38816,
7934,
14881,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,... | 3.091463 | 164 |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown('## Predictions', className='mb-5'),
dcc.Markdown('#### Charges Due'),
dcc.Slider(
id='other_charges_due',
min=test[:,0],
max=test[:,0],
step=100,
value=-100,
marks={n: str(n) for n in range(-100,22000,1000)},
className='mb-5',
),
dcc.Markdown('#### Penalty'),
dcc.Slider(
id='penalty_due',
min=test[:,0],
max=test[:,0],
step=,
value=-47,
marks={n: str(n) for n in range(-47,170000,10000)},
className='mb-5',
),
dcc.Markdown('#### Interest Due'),
dcc.Slider(
id=' interest_due',
min=test[:,0],
max=test[:,0],
step=4000,
value=4000,
marks={n: str(n) for n in range(4000,60000,10000)},
className='mb-5',
],
md=6,
)
column2 = dbc.Col(
[
]
)
import pandas as pd
@app.callback(
Output('prediction-content', 'children'),
[Input('other_charges_due', 'value'), Input('penalty_due', 'value')], Input('interest_due', 'value')]
)
layout = dbc.Row([column1, column2])
| [
2,
1846,
3742,
422,
513,
4372,
2151,
12782,
198,
11748,
14470,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
39... | 1.975089 | 843 |
import logging
import os
import time
import torch
import torch.nn as nn
from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from torch.cuda import amp
import torchvision
import torch.nn.functional as F
import numpy as np
import math
import random
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
3384,
4487,
13,
27231,
1330,
13475,
44,
2357,
198,
6738,
3384,
4487,
13,
4164,
10466,
1330,
371,
16,
62,
76,
2... | 3.4125 | 80 |
import threading
from bable_interface.models import Packet
from bable_interface.flatbuffers_functions import build_packet
from bable_interface.BaBLE.Payload import Payload
| [
11748,
4704,
278,
198,
6738,
275,
540,
62,
39994,
13,
27530,
1330,
6400,
316,
198,
6738,
275,
540,
62,
39994,
13,
38568,
36873,
364,
62,
12543,
2733,
1330,
1382,
62,
8002,
316,
198,
6738,
275,
540,
62,
39994,
13,
34458,
19146,
13,
1... | 3.55102 | 49 |
"""
Module containing data augmentation techniques.
- 3D Affine/DDF Transforms for moving and fixed images.
"""
from abc import abstractmethod
from typing import Dict
import tensorflow as tf
from deepreg.model.layer_util import (
gen_rand_affine_transform,
gen_rand_ddf,
get_reference_grid,
resample,
resize3d,
warp_grid,
)
from deepreg.registry import REGISTRY
class RandomTransformation3D(tf.keras.layers.Layer):
"""
An interface for different types of transformation.
"""
def __init__(
self,
moving_image_size: tuple,
fixed_image_size: tuple,
batch_size: int,
name: str = "RandomTransformation3D",
trainable: bool = False,
):
"""
Abstract class for image transformation.
:param moving_image_size: (m_dim1, m_dim2, m_dim3)
:param fixed_image_size: (f_dim1, f_dim2, f_dim3)
:param batch_size: size of mini-batch
:param name: name of layer
:param trainable: if this layer is trainable
"""
super().__init__(trainable=trainable, name=name)
self.moving_image_size = moving_image_size
self.fixed_image_size = fixed_image_size
self.batch_size = batch_size
self.moving_grid_ref = get_reference_grid(grid_size=moving_image_size)
self.fixed_grid_ref = get_reference_grid(grid_size=fixed_image_size)
@abstractmethod
def gen_transform_params(self) -> (tf.Tensor, tf.Tensor):
"""
Generates transformation parameters for moving and fixed image.
:return: two tensors
"""
@staticmethod
@abstractmethod
def transform(
image: tf.Tensor, grid_ref: tf.Tensor, params: tf.Tensor
) -> tf.Tensor:
"""
Transforms the reference grid and then resample the image.
:param image: shape = (batch, dim1, dim2, dim3)
:param grid_ref: shape = (dim1, dim2, dim3, 3)
:param params: parameters for transformation
:return: shape = (batch, dim1, dim2, dim3)
"""
def call(self, inputs: Dict[str, tf.Tensor], **kwargs) -> Dict[str, tf.Tensor]:
"""
Creates random params for the input images and their labels,
and params them based on the resampled reference grids.
:param inputs: a dict having multiple tensors
if labeled:
moving_image, shape = (batch, m_dim1, m_dim2, m_dim3)
fixed_image, shape = (batch, f_dim1, f_dim2, f_dim3)
moving_label, shape = (batch, m_dim1, m_dim2, m_dim3)
fixed_label, shape = (batch, f_dim1, f_dim2, f_dim3)
indices, shape = (batch, num_indices)
else, unlabeled:
moving_image, shape = (batch, m_dim1, m_dim2, m_dim3)
fixed_image, shape = (batch, f_dim1, f_dim2, f_dim3)
indices, shape = (batch, num_indices)
:param kwargs: other arguments
:return: dictionary with the same structure as inputs
"""
moving_image = inputs["moving_image"]
fixed_image = inputs["fixed_image"]
indices = inputs["indices"]
moving_params, fixed_params = self.gen_transform_params()
moving_image = self.transform(moving_image, self.moving_grid_ref, moving_params)
fixed_image = self.transform(fixed_image, self.fixed_grid_ref, fixed_params)
if "moving_label" not in inputs: # unlabeled
return dict(
moving_image=moving_image, fixed_image=fixed_image, indices=indices
)
moving_label = inputs["moving_label"]
fixed_label = inputs["fixed_label"]
moving_label = self.transform(moving_label, self.moving_grid_ref, moving_params)
fixed_label = self.transform(fixed_label, self.fixed_grid_ref, fixed_params)
return dict(
moving_image=moving_image,
fixed_image=fixed_image,
moving_label=moving_label,
fixed_label=fixed_label,
indices=indices,
)
def get_config(self) -> dict:
"""Return the config dictionary for recreating this class."""
config = super().get_config()
config["moving_image_size"] = self.moving_image_size
config["fixed_image_size"] = self.fixed_image_size
config["batch_size"] = self.batch_size
return config
@REGISTRY.register_data_augmentation(name="affine")
class RandomAffineTransform3D(RandomTransformation3D):
"""Apply random affine transformation to moving/fixed images separately."""
def __init__(
self,
moving_image_size: tuple,
fixed_image_size: tuple,
batch_size: int,
scale: float = 0.1,
name: str = "RandomAffineTransform3D",
**kwargs,
):
"""
Init.
:param moving_image_size: (m_dim1, m_dim2, m_dim3)
:param fixed_image_size: (f_dim1, f_dim2, f_dim3)
:param batch_size: size of mini-batch
:param scale: a positive float controlling the scale of transformation
:param name: name of the layer
:param kwargs: extra arguments
"""
super().__init__(
moving_image_size=moving_image_size,
fixed_image_size=fixed_image_size,
batch_size=batch_size,
name=name,
**kwargs,
)
self.scale = scale
def get_config(self) -> dict:
"""Return the config dictionary for recreating this class."""
config = super().get_config()
config["scale"] = self.scale
return config
def gen_transform_params(self) -> (tf.Tensor, tf.Tensor):
"""
Function that generates the random 3D transformation parameters
for a batch of data for moving and fixed image.
:return: a tuple of tensors, each has shape = (batch, 4, 3)
"""
theta = gen_rand_affine_transform(
batch_size=self.batch_size * 2, scale=self.scale
)
return theta[: self.batch_size], theta[self.batch_size :]
@staticmethod
def transform(
image: tf.Tensor, grid_ref: tf.Tensor, params: tf.Tensor
) -> tf.Tensor:
"""
Transforms the reference grid and then resample the image.
:param image: shape = (batch, dim1, dim2, dim3)
:param grid_ref: shape = (dim1, dim2, dim3, 3)
:param params: shape = (batch, 4, 3)
:return: shape = (batch, dim1, dim2, dim3)
"""
return resample(vol=image, loc=warp_grid(grid_ref, params))
@REGISTRY.register_data_augmentation(name="ddf")
class RandomDDFTransform3D(RandomTransformation3D):
"""Apply random DDF transformation to moving/fixed images separately."""
def __init__(
self,
moving_image_size: tuple,
fixed_image_size: tuple,
batch_size: int,
field_strength: int = 1,
low_res_size: tuple = (1, 1, 1),
name: str = "RandomDDFTransform3D",
**kwargs,
):
"""
Creates a DDF transformation for data augmentation.
To simulate smooth deformation fields, we interpolate from a low resolution
field of size low_res_size using linear interpolation. The variance of the
deformation field is drawn from a uniform variable
between [0, field_strength].
:param moving_image_size: tuple
:param fixed_image_size: tuple
:param batch_size: int
:param field_strength: int = 1. It is used as the upper bound for the
deformation field variance
:param low_res_size: tuple = (1, 1, 1).
:param name: name of layer
:param kwargs: extra arguments
"""
super().__init__(
moving_image_size=moving_image_size,
fixed_image_size=fixed_image_size,
batch_size=batch_size,
name=name,
**kwargs,
)
assert tuple(low_res_size) <= tuple(moving_image_size)
assert tuple(low_res_size) <= tuple(fixed_image_size)
self.field_strength = field_strength
self.low_res_size = low_res_size
def get_config(self) -> dict:
"""Return the config dictionary for recreating this class."""
config = super().get_config()
config["field_strength"] = self.field_strength
config["low_res_size"] = self.low_res_size
return config
def gen_transform_params(self) -> (tf.Tensor, tf.Tensor):
"""
Generates two random ddf fields for moving and fixed images.
:return: tuple, one has shape = (batch, m_dim1, m_dim2, m_dim3, 3)
another one has shape = (batch, f_dim1, f_dim2, f_dim3, 3)
"""
kwargs = dict(
batch_size=self.batch_size,
field_strength=self.field_strength,
low_res_size=self.low_res_size,
)
moving = gen_rand_ddf(image_size=self.moving_image_size, **kwargs)
fixed = gen_rand_ddf(image_size=self.fixed_image_size, **kwargs)
return moving, fixed
@staticmethod
def transform(
image: tf.Tensor, grid_ref: tf.Tensor, params: tf.Tensor
) -> tf.Tensor:
"""
Transforms the reference grid and then resample the image.
:param image: shape = (batch, dim1, dim2, dim3)
:param grid_ref: shape = (dim1, dim2, dim3, 3)
:param params: DDF, shape = (batch, dim1, dim2, dim3, 3)
:return: shape = (batch, dim1, dim2, dim3)
"""
return resample(vol=image, loc=grid_ref[None, ...] + params)
def resize_inputs(
inputs: Dict[str, tf.Tensor], moving_image_size: tuple, fixed_image_size: tuple
) -> Dict[str, tf.Tensor]:
"""
Resize inputs
:param inputs:
if labeled:
moving_image, shape = (None, None, None)
fixed_image, shape = (None, None, None)
moving_label, shape = (None, None, None)
fixed_label, shape = (None, None, None)
indices, shape = (num_indices, )
else, unlabeled:
moving_image, shape = (None, None, None)
fixed_image, shape = (None, None, None)
indices, shape = (num_indices, )
:param moving_image_size: tuple, (m_dim1, m_dim2, m_dim3)
:param fixed_image_size: tuple, (f_dim1, f_dim2, f_dim3)
:return:
if labeled:
moving_image, shape = (m_dim1, m_dim2, m_dim3)
fixed_image, shape = (f_dim1, f_dim2, f_dim3)
moving_label, shape = (m_dim1, m_dim2, m_dim3)
fixed_label, shape = (f_dim1, f_dim2, f_dim3)
indices, shape = (num_indices, )
else, unlabeled:
moving_image, shape = (m_dim1, m_dim2, m_dim3)
fixed_image, shape = (f_dim1, f_dim2, f_dim3)
indices, shape = (num_indices, )
"""
moving_image = inputs["moving_image"]
fixed_image = inputs["fixed_image"]
indices = inputs["indices"]
moving_image = resize3d(image=moving_image, size=moving_image_size)
fixed_image = resize3d(image=fixed_image, size=fixed_image_size)
if "moving_label" not in inputs: # unlabeled
return dict(moving_image=moving_image, fixed_image=fixed_image, indices=indices)
moving_label = inputs["moving_label"]
fixed_label = inputs["fixed_label"]
moving_label = resize3d(image=moving_label, size=moving_image_size)
fixed_label = resize3d(image=fixed_label, size=fixed_image_size)
return dict(
moving_image=moving_image,
fixed_image=fixed_image,
moving_label=moving_label,
fixed_label=fixed_label,
indices=indices,
)
| [
37811,
198,
26796,
7268,
1366,
16339,
14374,
7605,
13,
198,
220,
532,
513,
35,
6708,
500,
14,
35,
8068,
3602,
23914,
329,
3867,
290,
5969,
4263,
13,
198,
37811,
198,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
19720,
1330,
360,... | 2.266875 | 5,126 |
#!/usr/bin/env python
import os
import sys
import rospy
import cv, cv2, cv_bridge
import numpy
from baxter_demos.msg import BlobInfo, BlobInfoArray
from sensor_msgs.msg import Image
import argparse
"""Script to test functionality of object_finder color segmentation
User needs to manually run object_finder and subscribe to topic name specified here
"""
global centroid, axis
centroid = None
axis = None
topic_name = "object_finder_test"
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
686,
2777,
88,
198,
11748,
269,
85,
11,
269,
85,
17,
11,
269,
85,
62,
9458,
198,
11748,
299,
32152,
198,
6738,
275,
40864,
62,
9536,... | 3.141935 | 155 |
from .sites import site
from django.utils.module_loading import autodiscover_modules
from .options import ModelAdmin
from django.contrib.admin import register
__all__ = ["site", 'register', 'autodiscover', 'ModelAdmin']
| [
6738,
764,
49315,
1330,
2524,
198,
6738,
42625,
14208,
13,
26791,
13,
21412,
62,
25138,
1330,
1960,
375,
29392,
62,
18170,
198,
6738,
764,
25811,
1330,
9104,
46787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
1330,
7881,
628,
198... | 3.639344 | 61 |
import unittest
import orca
from setup.settings import *
from pandas.util.testing import *
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
393,
6888,
198,
6738,
9058,
13,
33692,
1330,
1635,
198,
6738,
19798,
292,
13,
22602,
13,
33407,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,... | 2.877551 | 49 |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 13:26:06 2017
@author: nblago
"""
from __future__ import print_function
import datetime
from astropy.io import votable
import numpy as np
import os
import logging
import warnings
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
from astroquery.vizier import Vizier
from astropy.coordinates import Angle
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib import request
from urllib.request import HTTPError
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
from urllib import urlretrieve
from urllib2 import HTTPError
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
1526,
220,
767,
1511,
25,
2075,
25,
3312,
2177,
198,
198,
31,
9800,
25,
299,
2436,
3839,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
... | 3 | 257 |
import cv2
cap = cv2.VideoCapture(0)
w = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'DIVX') # *'DIVX' == 'D','I','V','X'
out = cv2.VideoWriter('output.avi', fourcc, 30, (w, h))
while True: # 카메라 프레임 처리
ret, frame = cap.read()
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(10) == 27:
break
| [
11748,
269,
85,
17,
198,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
198,
198,
86,
796,
2835,
7,
11128,
13,
1136,
7,
33967,
17,
13,
33177,
62,
4805,
3185,
62,
10913,
10067,
62,
54,
2389,
4221,
4008,
198,
71,
796,
... | 1.828947 | 228 |
"""
test_rosetta_flash.py
Copyright 2015 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import re
from w3af.plugins.tests.helper import PluginTest, PluginConfig, MockResponse
from w3af.core.data.parsers.doc.url import URL
CONFIG = {
'audit': (PluginConfig('rosetta_flash'),),
}
| [
37811,
198,
9288,
62,
305,
2617,
8326,
62,
34167,
13,
9078,
198,
198,
15269,
1853,
843,
411,
371,
666,
6679,
198,
198,
1212,
2393,
318,
636,
286,
266,
18,
1878,
11,
2638,
1378,
86,
18,
1878,
13,
2398,
14,
764,
198,
198,
86,
18,
... | 3.381481 | 270 |
from check_personal_identity_number_and_credit_card.check_Pesel import (
valid_pesel,
check_control_sum_pesel,
check_age,
pesel,
)
correct_pesel = ["53052486359", "94120679457", "01240316649"]
too_long_pesel = "5305248635923"
too_short_pesel = "530524863"
with_letters_pesel = "530524d6a59"
bad_pesel = "53052486339"
| [
6738,
2198,
62,
22682,
62,
738,
414,
62,
17618,
62,
392,
62,
43082,
62,
9517,
13,
9122,
62,
47,
274,
417,
1330,
357,
198,
220,
220,
220,
4938,
62,
12272,
417,
11,
198,
220,
220,
220,
2198,
62,
13716,
62,
16345,
62,
12272,
417,
1... | 2.218543 | 151 |
# Copyright (c) 2012 Stuart Walsh
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from pythonwrap import Client, Channel, NuhMask
import numerics
| [
2,
220,
15069,
357,
66,
8,
2321,
22559,
24104,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
198,
2,
220,
16727,
257,
4866,
286,
428,
3788,
290,
3917,
10314,
198,
2,
220,
3696,
357,
... | 3.656347 | 323 |
import os, sys
import torch
import torch.nn as nn
from models import *
from config import *
args = parse_arguments()
if not os.path.isfile(args.checkpoint):
sys.exit("model does not exist")
print(f"converting {args.checkpoint}...")
model = GeneratorDCGAN(z_dim=args.z_dim, model_dim=args.model_dim, num_classes=10)
model = torch.load(args.checkpoint)
torch.save(model.state_dict(), args.checkpoint, _use_new_zipfile_serialization=False)
| [
11748,
28686,
11,
25064,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
6738,
4981,
1330,
1635,
201,
198,
6738,
4566,
1330,
1635,
201,
198,
201,
198,
22046,
796,
21136,
62,
853,
2886,
3419,
201,
198... | 2.720238 | 168 |
import numpy as np
from nose.plugins.skip import SkipTest
import theano
from theano import tensor as T
from theano.sandbox import mkl
if not mkl.mkl_available:
raise SkipTest('Optional package MKL disabled')
if __name__ == '__main__':
x = T.ftensor4('x_4D')
run_test('forward', x)
run_test('backward', x)
x = T.fmatrix('x_2D')
run_test('forward', x)
run_test('backward', x)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
9686,
13,
37390,
13,
48267,
1330,
32214,
14402,
198,
198,
11748,
262,
5733,
198,
6738,
262,
5733,
1330,
11192,
273,
355,
309,
198,
6738,
262,
5733,
13,
38142,
3524,
1330,
33480,
75,
198,
198,
... | 2.503067 | 163 |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
| [
2,
24700,
1137,
11617,
11050,
509,
2662,
6981,
26144,
532,
8410,
5626,
48483,
198,
11748,
11281,
8443,
62,
33803,
62,
43282,
198,
11748,
33918,
628,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
628
] | 2.833333 | 36 |
from uberUtils import *
'''curl -F 'client_id=M5TNy7T9h9n8xUOQ_uWSHVLdGvfw7u6H' \
-F 'client_secret=w5P7E4Ld54d_ai3lJRhAgkU_9oWbKd9kkTJHfPgf' \
-F 'grant_type=authorization_code' \
-F 'redirect_uri=https://9bbc25ce.ngrok.io' \
-F 'code=fMiym9aqxIPA6aEIDgZcHRk0BBwVM4' \
https://login.uber.com/oauth/v2/token'''
| [
6738,
48110,
18274,
4487,
1330,
1635,
198,
198,
7061,
6,
66,
6371,
532,
37,
705,
16366,
62,
312,
28,
44,
20,
46559,
88,
22,
51,
24,
71,
24,
77,
23,
87,
52,
46,
48,
62,
84,
54,
9693,
47468,
67,
38,
85,
44482,
22,
84,
21,
39,
... | 1.678218 | 202 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.cdn import views
from openstack_dashboard.dashboards.cdn.cdn_monitor_report.views import ajax_view
TENANT = r'^(?P<tenant_id>[^/]+)/$'
DOMAIN = r'^(?P<tenant_id>[^/]+)/(?P<domain_id>[^/]+)/$'
urlpatterns = patterns(
'',
url(r'^$', views.AllDomainDetail.as_view(), name='index'),
url(DOMAIN, views.DomainDetailView.as_view(), name='domain_detail'),
url(TENANT, views.TenantDetailView.as_view(), name='tenant_detail'),
url(r'^(?P<tenant_id>[^/]+)/(?P<domain_id>[^/]+)/json$', ajax_view),
url(r'^(?P<tenant_id>[^/]+)/json$', ajax_view, kwargs={'domain_id': ''}),
)
| [
2,
15069,
2321,
1578,
1829,
5070,
355,
7997,
416,
262,
198,
2,
22998,
286,
262,
2351,
15781,
261,
2306,
873,
290,
4687,
8694,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
15069,
2321,
46915,
11,
3457,
13,
198,
2,
198,
2,
2... | 2.826923 | 520 |
"Reloading Callback Handlers Dynamically"
# reload callback handlers dynamically
from tkinter import *
import radactions # get initial callback handlers
from importlib import reload
Hello().mainloop()
'''
You can change this file any number of times while the rad
script’s GUI is active; each time you do so, you’ll change the behavior of the GUI when
a button press occurs.
'''
"""
There are other ways to change a GUI while it’s running.
hat appearances can be altered at any time by calling the widget config method,
and widgets can be added and deleted from a display dynamically with methods
such as pack_forget and pack (and their grid manager relatives).
"""
'''
Furthermore, passing a new command=action option setting to a widget’s config method might reset
a callback handler to a new action object on the fly; with enough support code, this
may be a viable alternative to the indirection scheme used earlier to make reloads more
effective in GUIs.
'''
| [
1,
6892,
1170,
278,
4889,
1891,
7157,
8116,
14970,
1146,
1,
198,
2,
18126,
23838,
32847,
32366,
198,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
2511,
4658,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 3.614286 | 280 |
"""
Generates partition function and fraction folded expressions for an NRC
capped homopolymer series, and generates a list of experiment filenames (melts) and a constructs list.
"""
from __future__ import division
import sympy as sp
import numpy as np
import json
import os
import time
start = time.time()
print("\nGenerating partition functions and fraction folded expressions...")
PATH = os.path.dirname(os.path.abspath(__file__))
proj_name = "cANK"
# Parameters for partition function calculation. Note these are sympy symbols.
RT = sp.Symbol("RT")
dGN = sp.Symbol("dGN")
dGR = sp.Symbol("dGR")
dGC = sp.Symbol("dGC")
mi = sp.Symbol("mi")
denat = sp.Symbol("denat")
Kn = sp.Symbol("Kn")
Kr = sp.Symbol("Kr")
Kc = sp.Symbol("Kc")
dGinter = sp.Symbol("dGinter")
W = sp.Symbol("W")
np.exp = sp.Function("np.exp")
with open(os.path.join(PATH, f"{proj_name}_constructs.json"), "r") as cons:
constructs = json.load(cons)
# define matricies and end vectors to be used to calculate partition functions
begin = sp.Matrix([[0, 1]])
N = sp.Matrix([[(Kn * W), 1], [Kn, 1]])
R = sp.Matrix([[(Kr * W), 1], [Kr, 1]])
C = sp.Matrix([[(Kc * W), 1], [Kc, 1]])
end = sp.Matrix([[1], [1]])
# Build dictionaries of partition functions, partial derivs with respect
# to K, and fraction folded.
q_dict = {}
dqdKn_dict = {}
dqdKr_dict = {}
dqdKc_dict = {}
frac_folded_dict = {}
# Number of repeats of each type. Seems like they should be floats, but
# I get an error in the matrix multiplication (q_dict) if they are declared to be.
for construct in constructs:
# Make partition function dictionary and expressions for fraction folded.
# Note, only one pf is generated per construct, even when there are multiple melts.
matrixlist = construct.split("_")
q_dict[construct + "_q"] = begin
for i in range(0, len(matrixlist)):
num_Ni = 0
num_Ri = 0
num_Ci = 0
if matrixlist[i] == "N":
num_Ni = 1
if matrixlist[i] == "R":
num_Ri = 1
if matrixlist[i] == "C":
num_Ci = 1
q_dict[construct + "_q"] = (
q_dict[construct + "_q"]
* np.linalg.matrix_power(N, num_Ni)
* np.linalg.matrix_power(R, num_Ri)
* np.linalg.matrix_power(C, num_Ci)
)
q_dict[construct + "_q"] = q_dict[construct + "_q"] * end
# Next two lines convert from sp.Matrix to np.array to something else.
# Not sure the logic here, but it works.
q_dict[construct + "_q"] = np.array(q_dict[construct + "_q"])
q_dict[construct + "_q"] = q_dict[construct + "_q"].item(0)
# Partial derivs wrt Kn dictionary.
dqdKn_dict[construct + "_dqdKn"] = sp.diff(q_dict[construct + "_q"], Kn)
# Partial derivs wrt Kr dictionary.
dqdKr_dict[construct + "_dqdKr"] = sp.diff(q_dict[construct + "_q"], Kr)
# Partial derivs wrt Kc dictionary.
dqdKc_dict[construct + "_dqdKc"] = sp.diff(q_dict[construct + "_q"], Kc)
# Fraction folded dictionary.
frac_folded_dict[construct + "_frac_folded"] = (
Kn / (q_dict[construct + "_q"]) * dqdKn_dict[construct + "_dqdKn"]
+ Kr / (q_dict[construct + "_q"]) * dqdKr_dict[construct + "_dqdKr"]
+ Kc / (q_dict[construct + "_q"]) * dqdKc_dict[construct + "_dqdKc"]
) / (len(matrixlist))
# The loop below replaces K's and W's the fraction folded terms in the
# dictionary with DGs, ms, and denaturant concentrations. The simplify line
# is really important for making compact expressions for fraction folded.
# This simplification greatly speeds up fitting. The last line
# converts from a sympy object to a string, to allow for json dump.
for construct in frac_folded_dict:
frac_folded_dict[construct] = frac_folded_dict[construct].subs(
{
Kn: (np.exp(-((dGN - (mi * denat)) / RT))),
Kr: (np.exp(-((dGR - (mi * denat)) / RT))),
Kc: (np.exp(-((dGC - (mi * denat)) / RT))),
W: (np.exp(-dGinter / RT)),
}
)
frac_folded_dict[construct] = sp.simplify(frac_folded_dict[construct])
frac_folded_dict[construct] = str(frac_folded_dict[construct])
with open(os.path.join(PATH, f"{proj_name}_frac_folded_dict.json"), "w") as f:
json.dump(frac_folded_dict, f)
# The code block below calculates the rank of the coefficient matrix
# and outputs it to the user.
num_constructs = len(constructs)
thermo_param_list = ['dGN','dGR','dGC','dGinter']
num_params = len(thermo_param_list)
coeff_matrix = np.zeros((num_constructs, num_params))
row = 0
for construct in constructs:
repeats_list = construct.split('_')
for repeat in repeats_list:
if repeat == 'N':
coeff_matrix[row, 0] = coeff_matrix[row, 0] + 1
elif repeat == 'R':
coeff_matrix[row, 1] = coeff_matrix[row, 1] + 1
else:
coeff_matrix[row, 2] = coeff_matrix[row, 2] + 1
coeff_matrix[row, 3] = len(repeats_list) - 1
row = row + 1
rank = np.linalg.matrix_rank(coeff_matrix)
if rank == num_params:
print("\nThe coefficeint matrix has full column rank (r=",rank,")") #leaves a space betw rank and ). Not sure why.
else:
print("\nThe coefficeint matrix has incomplete column rank (r=",rank,").")
print("You should revise your model or include the necessary constructs to obtain full rank.\n")
stop = time.time()
runtime = stop - start
print("\nThe elapsed time was " + str(runtime) + " sec")
| [
37811,
198,
8645,
689,
18398,
2163,
290,
13390,
24650,
14700,
329,
281,
399,
7397,
198,
66,
6320,
3488,
35894,
647,
2168,
11,
290,
18616,
257,
1351,
286,
6306,
1226,
268,
1047,
357,
17694,
912,
8,
290,
257,
34175,
1351,
13,
198,
37811... | 2.464706 | 2,210 |
from numpy import genfromtxt, zeros
from math import pi
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
map = genfromtxt('out.dat')
N = 512
projection = 'cyl' # 'cyl', 'moll', 'ortho'
save_as_png = False
save_as_svg = False
inside_map = zeros((int(N + 1), int(N / 2 + 1)))
x = zeros((int(N + 1), int(N / 2 + 1)))
y = zeros((int(N + 1), int(N / 2 + 1)))
for i in range(0, int(N + 1)):
for j in range(0, int(N / 2 + 1)):
x[i][j] = (2.0 * i - N) / N * pi
y[i][j] = 2.0 * j / N * pi - pi / 2.0
for i in range(0, int(N + 1) * int(N / 2 + 1)):
inside_map[int(map[i][1] - 1)][int(map[i][2] - 1)] = map[i][0]
rad = 180.0 / pi
fig = plt.figure(figsize=(8, 4))
fig.subplots_adjust(
left=0.0, right=1.0, top=1.0, bottom=0.0, wspace=0.0, hspace=0.0)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis('off')
cmb_map = Basemap(projection=projection, lon_0=0.0, lat_0=0.0, resolution='l')
cmb_map.contourf(
x * rad, y * rad, inside_map, 512, cmap=plt.cm.jet, latlon=True)
if save_as_png:
plt.savefig('out.png', dpi=300)
if save_as_svg:
plt.savefig('out.svg')
plt.show()
| [
6738,
299,
32152,
1330,
2429,
6738,
14116,
11,
1976,
27498,
198,
198,
6738,
10688,
1330,
31028,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
12093,
368,
499,
1330,
6455,
36... | 1.991197 | 568 |
from mtrack.cores import CoreSolver, CoreBuilder, DB
from mtrack.preprocessing import g1_to_nml, Chunker, extract_maxima_candidates
from mtrack.mt_utils import read_config, check_overlap
from mtrack.graphs.g1_graph import G1
from solve import solve
import numpy as np
import h5py
import os
import multiprocessing, logging
import sys
import signal
import traceback
import functools
import shutil
logger = logging.getLogger(__name__)
def connect_candidates_alias(db,
name_db,
collection,
x_lim,
y_lim,
z_lim,
distance_threshold):
"""
Alias for instance methods that allows us
to call them in a pool
"""
db.connect_candidates(name_db,
collection,
x_lim,
y_lim,
z_lim,
distance_threshold)
| [
6738,
285,
11659,
13,
66,
2850,
1330,
7231,
50,
14375,
11,
7231,
32875,
11,
20137,
198,
6738,
285,
11659,
13,
3866,
36948,
1330,
308,
16,
62,
1462,
62,
77,
4029,
11,
609,
21705,
11,
7925,
62,
9806,
8083,
62,
46188,
37051,
198,
6738,... | 1.863139 | 548 |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
import os.path
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
SAVE_SINGLE_IMAGE = True
import matplotlib.pyplot as plt
# CLASSES = ('__background__', # always index 0
# 'header', 'footer', 'logo', 'total_amount', 'total_amount_text', 'row', 'name', 'price')
CLASSES = ('__background__', # always index 0
# 'table', 'figure', 'formula')
'table', 'figure')
# Colors are in BGR format
CLASSES_COLORS = {1: (0, 0, 255), 2: (0, 255, 0), 3: (255, 0, 0)}
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
| [
2,
20368,
22369,
198,
2,
309,
22854,
11125,
38996,
371,
12,
18474,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
22503,
416,
25426,
293,
72,
12555,
198,
2,
20368,
22369,
198,
6738,
11593,
37443... | 2.287523 | 2,132 |
import glob
from collections import Counter
from typing import List
import sys
import os
import json
from preprocessing.scraping import JSONS_DIR
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
| [
11748,
15095,
198,
6738,
17268,
1330,
15034,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
6738,
662,
36948,
13,
1416,
2416,
278,
1330,
19449,
50,
62,
34720,
198,
198,
17597,
13,
6... | 3.089744 | 78 |
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
# def validator_file_max_size_in_mb(max_size):
# def validate_image(fieldfile_obj):
# filesize = fieldfile_obj.file.size
# megabyte_limit = 5.0
# if filesize > megabyte_limit * 1024 * 1024:
# raise ValidationError("Max file size is %sMB" % str(max_size))
#
# return validate_image
@deconstructible
| [
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
26791,
13,
12501,
261,
7249,
1330,
37431,
7249,
856,
628,
198,
198,
2,
825,
4938,
1352,
62,
7753,
62,
9806,
62,
7857,
62,
259,
62,
... | 2.511111 | 180 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# -*- coding: utf-8 -*-
import pytest
from rest_framework.status import HTTP_302_FOUND
import allure
from directory_tests_shared import URLs
from tests.smoke.cms_api_helpers import get_and_assert
pytestmark = [allure.suite("FAS redirects"), allure.feature("FAS redirects")]
@allure.issue("CMS-1834", "Links to legacy industry pages redirect to wrong place")
@allure.issue("ED-4152", "404s on old industry pages & contact-us page")
@pytest.mark.parametrize(
"old_url,to_new_endpoint",
[
(
URLs.FAS_INDUSTRIES_HEALTH.absolute,
URLs.FAS_INCOMING_REDIRECT.absolute_template.format(
endpoint="industries/health"
),
),
(
URLs.FAS_INDUSTRIES_TECH.absolute,
URLs.FAS_INCOMING_REDIRECT.absolute_template.format(
endpoint="industries/tech"
),
),
(
URLs.FAS_INDUSTRIES_CREATIVE.absolute,
URLs.FAS_INCOMING_REDIRECT.absolute_template.format(
endpoint="industries/creative"
),
),
],
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
6738,
1334,
62,
30604,
13,
13376,
1330,
14626,
62,
22709,
62,
37,
15919,
198,
198,
11748,
477,
495,
198,
6738,
8619,
62,
41989,
62,
28710,
1330,... | 2.095969 | 521 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
TRANSFER_MB = 2 # Data transfered in megabytes
BOTTLENECK_LINK = 1.5 # Mbps
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
... | 2.85 | 140 |
from __future__ import (absolute_import, division, print_function)
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
from codecs import open
from setuptools import find_packages, setup
import versioneer
here = os.path.abspath(os.path.dirname(__file__))
# Dependencies.
with open('requirements.txt') as f:
requirements = f.readlines()
install_requires = [t.strip() for t in requirements]
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='odm2api',
version=versioneer.get_version(),
description='Python interface for the Observations Data Model 2 (ODM2)',
long_description=long_description,
url='https://github.com/ODM2/ODM2PythonAPI',
author='ODM2 team-Stephanie Reeder',
author_email='stephanie.reeder@usu.edu',
maintainer='David Valentine',
maintainer_email='david.valentine@gmail.com',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering'
],
keywords='Observations Data Model ODM2',
packages=find_packages(exclude=['samplefiles', 'setup', 'tests*', 'Forms']),
install_requires=install_requires,
extras_require={
'mysql': ['pymysql'],
'postgis': ['psycopg2'],
'sqlite': ['pyspatialite >=3.0.0'],
},
cmdclass=versioneer.get_cmdclass(),
)
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
8,
198,
198,
37811,
32,
900,
37623,
10141,
1912,
9058,
8265,
13,
198,
198,
6214,
25,
198,
5450,
1378,
8002,
3039,
13,
29412,
13,
2398,
14,
268,
14,
... | 2.693816 | 663 |
# ----------------------------------------------------------------------
# Copyright (c) 2022
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#################################
## APPLICATION SPECIFIC WIDGETS #
#################################
#--------------------
# System wide imports
# -------------------
import os
import gettext
import tkinter as tk
from tkinter import ttk
# -------------------
# Third party imports
# -------------------
from pubsub import pub
# ---------------
# Twisted imports
# ---------------
from twisted.logger import Logger
#--------------
# local imports
# -------------
from zptess.utils import chop
from zptess.gui.widgets.contrib import ToolTip, LabelInput
from zptess.gui.widgets.validators import float_validator, ip_validator
# ----------------
# Module constants
# ----------------
# Support for internationalization
_ = gettext.gettext
NAMESPACE = 'gui'
# -----------------------
# Module global variables
# -----------------------
log = Logger(namespace=NAMESPACE)
# -----------------------
# Module global functions
# -----------------------
# -----------------------
# Module auxiliar classes
# -----------------------
| [
2,
16529,
23031,
198,
2,
15069,
357,
66,
8,
33160,
198,
2,
198,
2,
4091,
262,
38559,
24290,
2393,
329,
3307,
198,
2,
766,
262,
37195,
20673,
2393,
329,
7035,
198,
2,
16529,
23031,
198,
198,
29113,
2,
198,
2235,
39421,
6234,
28196,
... | 4.222222 | 306 |
"""ttLib.Chaining: Converting Chaining rules to TrueType."""
def lookup_type(self):
"""Mixin to determine the GSUB/GPOS lookup type of a fontFeatures.Chaining object
Returns: integer GSUB/GPOS lookup type."""
if self.stage == "pos":
return 8
else:
return 6
| [
37811,
926,
25835,
13,
1925,
1397,
25,
35602,
889,
609,
1397,
3173,
284,
6407,
6030,
526,
15931,
198,
198,
4299,
35847,
62,
4906,
7,
944,
2599,
198,
220,
220,
220,
37227,
35608,
259,
284,
5004,
262,
26681,
10526,
14,
16960,
2640,
3584... | 2.745283 | 106 |
import logging
from django import forms
from django.forms import ModelForm
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from .choices import ActionChoice
from .choices import StatusApproval
from .models import GreencheckIp
from .models import GreencheckIpApprove
from .models import GreencheckASN, GreencheckASNapprove
User = get_user_model()
logger = logging.getLogger(__name__)
class ChangeStateRequiredError(Exception):
"""
An exception to catch the case when the approval mixin us used,
but the `changed` attribute is not being set by a subclass.
"""
pass
class ApprovalMixin:
"""
A mixin to hold the logic for IP and ASN approval forms.
Contains the logic for our 'admin hack' to make approval requests
as new or updates
"""
ApprovalModel = None
def check_if_changed(self):
"""
Check if we have a changed attribute, otherwise raise an meaningful
helpful exception
"""
# add our sanity check
if not hasattr(self, "changed"):
raise ChangeStateRequiredError(
(
"the 'changed' attribute needs to be set on a form for "
"approval checking to work properly"
)
)
return self.changed
def _save_approval(self):
"""
Save the approval request, be it an IP Range or an AS Network.
We expect the form to have the attirbute 'changed' before any call to
save() - usually after passing values in, but before calling is_valid()
"""
changed = self.check_if_changed()
if self.ApprovalModel is None:
raise NotImplementedError("Approval model missing")
model_name = self.ApprovalModel._meta.model_name
if not self.cleaned_data["is_staff"]:
hosting_provider = self.instance.hostingprovider
# changed here is set in the formset and
# represents whether we are updating an existing ip range or ASN,
# or creating a totally new one.
action = ActionChoice.UPDATE if changed else ActionChoice.NEW
status = StatusApproval.UPDATE if changed else StatusApproval.NEW
kwargs = {
"action": action,
"status": status,
"hostingprovider": hosting_provider,
}
if model_name == "greencheckasnapprove":
self.instance = GreencheckASNapprove(asn=self.instance.asn, **kwargs)
else:
self.instance = GreencheckIpApprove(
ip_end=self.instance.ip_end,
ip_start=self.instance.ip_start,
**kwargs
)
hosting_provider.mark_as_pending_review(self.instance)
class GreencheckIpForm(ModelForm, ApprovalMixin):
"""
If a non staff user fills in the form, we return an unsaved
an unsaved approval record instead of greencheckip record
"""
ApprovalModel = GreencheckIpApprove
is_staff = forms.BooleanField(
label="user_is_staff", required=False, widget=forms.HiddenInput()
)
def save(self, commit=True):
"""
If a non-staff user creates an ip, instead of saving
the ip record directly, we save an approval record.
Once the IP range approval request is a approved, we create the
IP.
If a staff user saves, we create it directly.
"""
self._save_approval()
return super().save(commit=commit)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
7295,
13,
1069... | 2.454731 | 1,469 |
>>> for n in (254, 255, 256, 257, -2+(1<<16), -1+(1<<16), 1<<16, 1+(1<<16), 0x200000, 0x1fffff ):
print('int: %7i bin: %26s vlq: %35s vlq->int: %7i' % (n, tobits(n,_pad=True), tovlq(n), toint(tovlq(n))))
int: 254 bin: 11111110 vlq: 00000001_11111110 vlq->int: 254
int: 255 bin: 11111111 vlq: 00000001_11111111 vlq->int: 255
int: 256 bin: 00000001_00000000 vlq: 00000010_10000000 vlq->int: 256
int: 257 bin: 00000001_00000001 vlq: 00000010_10000001 vlq->int: 257
int: 65534 bin: 11111111_11111110 vlq: 00000011_11111111_11111110 vlq->int: 65534
int: 65535 bin: 11111111_11111111 vlq: 00000011_11111111_11111111 vlq->int: 65535
int: 65536 bin: 00000001_00000000_00000000 vlq: 00000100_10000000_10000000 vlq->int: 65536
int: 65537 bin: 00000001_00000000_00000001 vlq: 00000100_10000000_10000001 vlq->int: 65537
int: 2097152 bin: 00100000_00000000_00000000 vlq: 00000001_10000000_10000000_10000000 vlq->int: 2097152
int: 2097151 bin: 00011111_11111111_11111111 vlq: 01111111_11111111_11111111 vlq->int: 2097151
>>> vlqsend(tovlq(0x200000))
Sent byte 0: 0x80
Sent byte 1: 0x80
Sent byte 2: 0x80
Sent byte 3: 0x01
>>> vlqsend(tovlq(0x1fffff))
Sent byte 0: 0xff
Sent byte 1: 0xff
Sent byte 2: 0x7f
>>>
| [
33409,
329,
299,
287,
357,
24970,
11,
14280,
11,
17759,
11,
36100,
11,
532,
17,
33747,
16,
16791,
1433,
828,
532,
16,
33747,
16,
16791,
1433,
828,
352,
16791,
1433,
11,
352,
33747,
16,
16791,
1433,
828,
657,
87,
33470,
11,
657,
87,
... | 1.883268 | 771 |
import myexman
import torch
import utils
import datautils
import os
from logger import Logger
import time
import numpy as np
from models import flows, distributions
import matplotlib.pyplot as plt
from algo.em import init_kmeans2plus_mu
import warnings
from sklearn.mixture import GaussianMixture
import torch.nn.functional as F
from tqdm import tqdm
import sys
parser = myexman.ExParser(file=os.path.basename(__file__))
parser.add_argument('--name', default='')
parser.add_argument('--verbose', default=0, type=int)
parser.add_argument('--save_dir', default='')
parser.add_argument('--test_mode', default='')
# Data
parser.add_argument('--data', default='mnist')
parser.add_argument('--num_examples', default=-1, type=int)
parser.add_argument('--data_seed', default=0, type=int)
parser.add_argument('--sup_sample_weight', default=-1, type=float)
# parser.add_argument('--aug', dest='aug', action='store_true')
# parser.add_argument('--no_aug', dest='aug', action='store_false')
# parser.set_defaults(aug=True)
# Optimization
parser.add_argument('--opt', default='adam')
parser.add_argument('--ssl_alg', default='em')
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--train_bs', default=256, type=int)
parser.add_argument('--test_bs', default=512, type=int)
parser.add_argument('--lr_schedule', default='linear')
parser.add_argument('--lr_warmup', default=10, type=int)
parser.add_argument('--lr_gamma', default=0.5, type=float)
parser.add_argument('--lr_steps', type=int, nargs='*', default=[])
parser.add_argument('--log_each', default=1, type=int)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--pretrained', default='')
parser.add_argument('--weight_decay', default=0., type=float)
parser.add_argument('--sup_ohe', dest='sup_ohe', action='store_true')
parser.add_argument('--no_sup_ohe', dest='sup_ohe', action='store_false')
parser.set_defaults(sup_ohe=True)
parser.add_argument('--clip_gn', default=100., type=float)
# Model
parser.add_argument('--model', default='flow')
parser.add_argument('--logits', dest='logits', action='store_true')
parser.add_argument('--no_logits', dest='logits', action='store_false')
parser.set_defaults(logits=True)
parser.add_argument('--conv', default='full')
parser.add_argument('--hh_factors', default=2, type=int)
parser.add_argument('--k', default=4, type=int)
parser.add_argument('--l', default=2, type=int)
parser.add_argument('--hid_dim', type=int, nargs='*', default=[])
# Prior
parser.add_argument('--ssl_model', default='cond-flow')
parser.add_argument('--ssl_dim', default=-1, type=int)
parser.add_argument('--ssl_l', default=2, type=int)
parser.add_argument('--ssl_k', default=4, type=int)
parser.add_argument('--ssl_hd', default=256, type=int)
parser.add_argument('--ssl_conv', default='full')
parser.add_argument('--ssl_hh', default=2, type=int)
parser.add_argument('--ssl_nclasses', default=10, type=int)
# SSL
parser.add_argument('--supervised', default=0, type=int)
parser.add_argument('--sup_weight', default=1., type=float)
parser.add_argument('--cl_weight', default=0, type=float)
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
np.random.seed(args.data_seed)
torch.manual_seed(args.data_seed)
torch.cuda.manual_seed_all(args.data_seed)
trainloader, testloader, data_shape, bits = datautils.load_dataset(args.data, args.train_bs, args.test_bs,
seed=args.data_seed, shuffle=False)
# Seed for training process
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Create model
dim = int(np.prod(data_shape))
if args.ssl_dim == -1:
args.ssl_dim = dim
deep_prior = distributions.GaussianDiag(args.ssl_dim)
shallow_prior = distributions.GaussianDiag(dim - args.ssl_dim)
yprior = torch.distributions.Categorical(logits=torch.zeros((args.ssl_nclasses,)).to(device))
ssl_flow = flows.get_flow_cond(args.ssl_l, args.ssl_k, in_channels=args.ssl_dim, hid_dim=args.ssl_hd,
conv=args.ssl_conv, hh_factors=args.ssl_hh, num_cat=args.ssl_nclasses)
ssl_flow = torch.nn.DataParallel(ssl_flow.to(device))
prior = flows.DiscreteConditionalFlowPDF(ssl_flow, deep_prior, yprior, deep_dim=args.ssl_dim,
shallow_prior=shallow_prior)
flow = utils.create_flow(args, data_shape)
flow = torch.nn.DataParallel(flow.to(device))
model = flows.FlowPDF(flow, prior).to(device)
if args.pretrained != '':
model.load_state_dict(torch.load(os.path.join(args.pretrained, 'model.torch'), map_location=device))
# def get_embeddings(loader, model):
# zf, zh, labels = [], [], []
# for x, y in loader:
# x = x.to(device)
# print(model.log_prob(x).mean())
# z_ = flow(x)[1]
# zf.append(utils.tonp(z_).mean())
# # z_ = z_[:, -args.ssl_dim:, None, None]
# print(model.prior.log_prob(z_))
# print(torch.zeros((z_.shape[0],)).to(z_.device))
# print(model.prior.flow(z_, y))
# # print(x.device, z_.device)
# # print(torch.zeros((z_.shape[0],)).to(z_.device))
# # print(z_.shape)
# sys.exit(0)
# log_det_jac = torch.zeros((x.shape[0],)).to(x.device)
# ssl_flow.module.f(z_, y.to(device))
# zh.append(utils.tonp())
# labels.append(utils.tonp(y))
# return np.concatenate(zf), np.concatenate(zh), np.concatenate(labels)
y_test = np.array(testloader.dataset.targets)
y_train = np.array(trainloader.dataset.targets)
if args.test_mode == 'perm':
idxs = np.random.permutation(10000)[:5000]
testloader.dataset.data[idxs] = 255 - testloader.dataset.data[idxs]
testloader.dataset.targets[idxs] = 1 - testloader.dataset.targets[idxs]
elif args.test_mode == '':
pass
elif args.test_mode == 'inv':
testloader.dataset.data = 255 - testloader.dataset.data
testloader.dataset.targets = 1 - testloader.dataset.targets
else:
raise NotImplementedError
with torch.no_grad():
zf_train, zh_train, _ = get_embeddings(trainloader, model)
zf_test, zh_test, _ = get_embeddings(testloader, model)
np.save(os.path.join(args.save_dir, 'zf_train'), zf_train)
np.save(os.path.join(args.save_dir, 'zh_train'), zh_train)
np.save(os.path.join(args.save_dir, 'y_train'), y_train)
np.save(os.path.join(args.save_dir, 'zf_test'), zf_test)
np.save(os.path.join(args.save_dir, 'zh_test'), zh_test)
np.save(os.path.join(args.save_dir, 'y_test'), y_test)
| [
11748,
616,
1069,
805,
198,
11748,
28034,
198,
11748,
3384,
4487,
198,
11748,
1366,
26791,
198,
11748,
28686,
198,
6738,
49706,
1330,
5972,
1362,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4981,
1330,
15623,
11,
24570... | 2.436458 | 2,699 |
#!/usr/bin/env python3
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from datetime import datetime, timedelta
from src.misc import check_n_mkdir, Logger
from collections import defaultdict
import time
import os
import re
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
1... | 3.495327 | 107 |
from blockkit.attributes import TextAttr
from blockkit.generics import *
| [
6738,
2512,
15813,
13,
1078,
7657,
1330,
8255,
8086,
81,
198,
6738,
2512,
15813,
13,
8612,
873,
1330,
1635,
628,
198
] | 3.571429 | 21 |
from mock import patch
from nose.tools import assert_true, assert_false, assert_raises
from zabby.core.exceptions import WrongArgumentError
from zabby.core.six import b
from zabby.items.net import tcp
| [
6738,
15290,
1330,
8529,
198,
6738,
9686,
13,
31391,
1330,
6818,
62,
7942,
11,
6818,
62,
9562,
11,
6818,
62,
430,
2696,
198,
6738,
1976,
42457,
13,
7295,
13,
1069,
11755,
1330,
28843,
28100,
1713,
12331,
198,
198,
6738,
1976,
42457,
1... | 3.5 | 58 |
import pytest
from client.api import API
PROJECT_NAME = "test_acl"
USER_NAME = "test_artist"
PASSWORD = "123.456.AbCd"
ROLE_NAME = "test_artist_role"
@pytest.fixture
@pytest.mark.order(-1)
| [
11748,
12972,
9288,
198,
198,
6738,
5456,
13,
15042,
1330,
7824,
198,
198,
31190,
23680,
62,
20608,
796,
366,
9288,
62,
37779,
1,
198,
29904,
62,
20608,
796,
366,
9288,
62,
49016,
1,
198,
47924,
54,
12532,
796,
366,
10163,
13,
29228,
... | 2.385542 | 83 |
# -*- coding: utf-8 -*-
'''
process pdb file to calculate contact score and write to csv.
main function is prosess_save2csv()
usage: python3 RRCS_change.py file.pdb
input .pdb
output .csv
'''
import sys
# 当所有atom距离都大于 4.63 是不被记录的,应该计为0
# a function to eliminate 0 score from calc_contact()
# pack contact score into csv file
def calc_contact_save2csv(pdbbase: 'pdb file'):
"""
process pdb file to calculate contact score and write to csv
"""
contact, _ = calc_contact(pdbbase)
outf = pdbbase + '.cscore.csv'
fout = open(outf, 'w')
for a_res in contact:
b_res_list = contact[a_res].keys()
for b_res in b_res_list:
score = contact[a_res][b_res]
if score > 0:
# fout.write('%-12s\t%-12s%10.6f\n' %(a_res, b_res, score))
fout.write('%-12s,%s,%10.6f\n' % (a_res, b_res, score))
fout.close()
def calc_contact_save2json(pdbbase: 'pdb file'):
"""
process pdb file to calculate contact score and write to csv
"""
contact, _ = calc_contact_no_zero(pdbbase)
outf = pdbbase + '.cscore.json'
dict2json(contact, outf)
def delta_rrcs(pdbbase1: 'pdb file', pdbbase2: 'pdb file'):
"""
delta RRCS
calculate delta RRCS between two states of receptor, i.e. active inactive.
only resi exist in the intersection set of pdb_base1 and pdb_base2 have been seen as calculable.
only calculable resi exist in the union set of pdb_base1 and pdb_base2 contact score,
have been calculated delta RRCS
output: (delta_RRCS:dict,(set_not_calculated_a,set_not_calculated_b))
"""
contact_a, dict_coord_a = calc_contact(pdbbase1)
contact_b, dict_coord_b = calc_contact(pdbbase2)
# make intersection of ires for checking
list_ires_all_a = dict_coord_a.keys()
list_ires_all_b = dict_coord_b.keys()
list_ires_contact_a = contact_a.keys()
list_ires_contact_b = contact_b.keys()
# this is a set in which elements exist both in protein A and B
set_ires_inter = set(list_ires_all_a) & set(list_ires_all_b)
# contact res intersection set
# set_ires_contact_inter = set(list_ires_contact_a)&set(list_ires_contact_b)
# contact res union set
# set_ires_contact_union = set(list_ires_contact_a)|set(list_ires_contact_b)
# NOT CALCULATED RES FORM A AND B. for report.
set_not_calculated_a = set(list_ires_all_a) - set_ires_inter
set_not_calculated_b = set(list_ires_all_b) - set_ires_inter
# delta_RRCS
# list [((resi,resj),score),...]
contact_list_a = []
contact_list_b = []
## shorten list
for ires in list_ires_contact_a:
for jres, score in contact_a[ires].items():
if score > 0:
contact_list_a.append(((ires, jres), score))
for ires in list_ires_contact_b:
for jres, score in contact_b[ires].items():
if score > 0:
contact_list_b.append(((ires, jres), score))
## shorten list end
# contact list done
# print(len(contact_list_a))
# print(len(contact_list_b))
delta_RRCS = []
# filter (resi,resj) in A,B or only A
for resij_score_a in contact_list_a:
equal_flag = 0
for resij_score_b in contact_list_b:
if resij_score_a[0] == resij_score_b[0]: # (resi,resj) true means resii in both protein A and B contact
delta_RRCS.append((resij_score_a[0], resij_score_b[1] - resij_score_a[1]))
equal_flag = 1
break
if equal_flag == 0:
delta_RRCS.append((resij_score_a[0], 0 - resij_score_a[1]))
# filter (resi,resj) in only B
for resij_score_b in contact_list_b:
equal_flag = 0
for resij_score_a in contact_list_a:
if resij_score_a[0] == resij_score_b[0]: # (resi,resj)
equal_flag = 1
break
if equal_flag == 0:
delta_RRCS.append((resij_score_b[0], resij_score_b[1] - 0))
# make delta list
delta_rrcs_dict = {}
for (resi, resj), score in delta_RRCS:
if resi in (set_not_calculated_a | set_not_calculated_b): # make sure resi is 'calculable'
continue
if resi not in delta_rrcs_dict:
delta_rrcs_dict[resi] = {}
delta_rrcs_dict[resi][resj] = score
# delta_RRCS.sort(key=lambda elem:elem[0][0].split(':')[1].split('_')[0])
# delta_RRCS end
return delta_rrcs_dict, (set_not_calculated_a, set_not_calculated_b)
# # calculate delta RRCS save to delta_RRCS
# delta_RRCS={}
# for ires_a in list_ires_all_a: # loop over every res in protein A
# if ires_a in set_ires_inter: # if ture means this res is calculable, exist both in A and B
# delta_RRCS[ires_a]={}
# if ires_a in set_ires_contact_union: # ture means this res have at least one RRCS is not 0 :
# if ires_a in set_ires_contact_inter: # means res probably a "repacking"
# list_a_jres_contact = contact_a[ires_a].keys()
# list_b_jres_contact = contact_b[ires_a].keys()
# for jres_a in list_a_jres_contact:
# if jres_a in list_b_jres_contact:# if true means same res pair
# delta_RRCS[ires_a][jres_a] = contact_b[ires_a][jres_a]-contact_a[ires_a][jres_a]
# elif jres_a not in list_b_jres_contact:# jres's RRCS is 0 in b
# delta_RRCS[ires_a][jres_a] = 0-contact_a[ires_a][jres_a]
# for jres_b in list_b_jres_contact:
# if jres_b not in list_a_jres_contact:# jres's RRCS is 0 in a
# delta_RRCS[ires_a][jres_b]=contact_b[ires_a][jres_b]-0
#
# elif ires_a not in set_ires_contact_inter:
# if ires_a in set(list_ires_contact_a): # ires_a's RRCS is 0 in b
# list_a_jres_contact = contact_a[ires_a].keys()
# for jres_a in list_a_jres_contact:
# delta_RRCS[ires_a][jres_a] = 0 - contact_a[ires_a][jres_a]
# elif ires_a in set(list_ires_contact_b): # ires_a's RRCS is 0 in a
# list_b_jres_contact = contact_b[ires_a].keys()
# for jres_b in list_b_jres_contact:
# delta_RRCS[ires_a][jres_b]=contact_b[ires_a][jres_b]-0
# elif ires_a not in set_ires_contact_union:# ture means this A portein res's RRCS is 0
# continue
# else:
# continue
# return delta_RRCS,(set_not_calculated_a,set_not_calculated_b)
def delta_rrcs_save2csv(pdbbase1: 'pdb file', pdbbase2: 'pdb file', outf_path='./delta.csv'):
"""
input pdbbase1,pdbbase2
output csv
"""
deltaRRCS_dict, not_calculated_res_list = delta_rrcs(pdbbase1, pdbbase2)
outf_path = str(outf_path)
with open(outf_path, 'w') as outf:
for ires in deltaRRCS_dict.keys():
for jres in deltaRRCS_dict[ires].keys():
outf.write('%-12s,%s,%10.6f\n' % (ires, jres, deltaRRCS_dict[ires][jres]))
print('Brief report:')
print('>>>>>>>>>>>>')
print(not_calculated_res_list[0])
print('>>>>>>>>>>>>')
print(not_calculated_res_list[1])
print('>>>>>>>>>>>>')
def delta_rrcs_save2json(pdbbase1: 'pdb file', pdbbase2: 'pdb file', outf_path='./delta.json'):
"""
input pdbbase1,pdbbase2
output python dict [resi][resj][delta_score] as json file
"""
import json
dict_rrcs, not_calculated_res_list = delta_rrcs(pdbbase1, pdbbase2)
outf_path = str(outf_path)
with open(outf_path, 'w') as outf:
json.dump(dict_rrcs, outf)
print('Brief report from delta_rrcs_save2json:')
print('output file is ./delta.json')
print('>>>>>>>>>>>>')
print(not_calculated_res_list[0])
print('>>>>>>>>>>>>')
print(not_calculated_res_list[1])
print('>>>>>>>>>>>>')
def dict2json(python_dict: dict, outf: str):
"""
save python dict to json,
input: dict_object, out file path
output: json file
"""
import json
if isinstance(python_dict, dict):
with open(str(outf), 'w') as ofi:
json.dump(python_dict, ofi)
if __name__ == '__main__':
fin = sys.argv[1]
calc_contact_save2json(fin)
print('''
Before using, if there is more than one chain in pdb file,
please make sure what you want to calculate is in right order of chain number.
''')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
7061,
6,
201,
198,
14681,
279,
9945,
2393,
284,
15284,
2800,
4776,
290,
3551,
284,
269,
21370,
13,
201,
198,
220,
220,
220,
1388,
2163,
318,
10360,
408,... | 2.004173 | 4,313 |
# Begin
# node 1 text A.
# node 2 text B.
# End
| [
198,
2,
16623,
198,
198,
2,
10139,
352,
2420,
317,
13,
198,
2,
10139,
362,
2420,
347,
13,
198,
198,
2,
5268,
198
] | 2.217391 | 23 |
from BasicDS.ArrayQueue import ArrayQueue
if __name__ == "__main__":
queue = ArrayQueue()
for i in range(10):
queue.enqueue(i)
print(queue)
if i % 3 == 2:
queue.dequeue()
print(queue)
| [
6738,
14392,
5258,
13,
19182,
34991,
1330,
15690,
34991,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
16834,
796,
15690,
34991,
3419,
628,
220,
220,
220,
329,
1312,
287,
2837,
7,
940,
2599,
19... | 2.068376 | 117 |
import os, sys, numpy, torch, argparse, skimage, json, shutil
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.ticker import MaxNLocator
import matplotlib
if __name__ == '__main__':
main()
| [
11748,
28686,
11,
25064,
11,
299,
32152,
11,
28034,
11,
1822,
29572,
11,
1341,
9060,
11,
33918,
11,
4423,
346,
198,
6738,
2603,
29487,
8019,
13,
1891,
2412,
13,
1891,
437,
62,
9460,
1330,
11291,
6090,
11017,
46384,
355,
11291,
6090,
1... | 3.113636 | 88 |
"""
Root system data for type I
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2008-2009 Nicolas M. Thiery <nthiery at users.sf.net>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from .cartan_type import CartanType_standard_finite, CartanType_simple
| [
37811,
198,
30016,
1080,
1366,
329,
2099,
314,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
2,
17174,
17174,
4557,
35625,
198,
2,
220,
220,
220,
220,
220,
220,
15069,
357,
34,
8,
3648,
12,
10531,
29737,
337,
... | 3.705882 | 136 |
# coding=utf-8
###########################
# file: util.py
# date: 2021-7-16
# author: Sturmfy
# desc: utility
# version:
# 2021-7-16 init design
########################### | [
2,
19617,
28,
40477,
12,
23,
198,
198,
14468,
7804,
21017,
198,
2,
2393,
25,
7736,
13,
9078,
198,
2,
3128,
25,
33448,
12,
22,
12,
1433,
198,
2,
1772,
25,
26783,
76,
24928,
198,
2,
1715,
25,
10361,
198,
2,
2196,
25,
198,
2,
220... | 3.034483 | 58 |
#!/usr/bin/env python
# coding: utf-8
# # 5m - Df unification (10 calib. fn-s)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from os.path import join
import pickle
from copy import copy
# In[7]:
# MAIN
IDENT_TAG = "28_05_pre"
PATH_res = "data_1m_final_2805"
COLUMNS = ["model_name", "tag_name", "cgt_nr", "seed", "n_data", "binning", "n_bins", "c_hat_distance_p", "c_hat_distance_p_square", "c_hat_distance_p_debiased",
"c_hat_distance_p_square_debiased", "c_hat_distance_c", "c_hat_distance_c_square", "p_distance_c", "p_distance_c_square", "calibration_function"]
files_ECE = []
files_PW = []
files_KDE = []
files_BIP = [] # beta, iso, platt,
for file in os.listdir(PATH_res):
if file.endswith(".pkl") and not "_m_" in file:
if file.startswith("binning"):
files_ECE.append(file)
elif file.startswith("df_seed"):
if ("gt0_" in file) or ("gt1_" in file) or ("gt2_" in file) or ("gt3_" in file):
files_BIP.append(file)
else:
files_PW.append(file)
elif file.startswith("KDE"):
files_KDE.append(file)
print("ECE files:", len(files_ECE)) # cgt - 612*4, 44 missing? # TODO why? - 44 puudu
print("KDE files:", len(files_KDE)) # Right amount
print("PW files:", len(files_PW)) # PW_NN_mono + PW_NN_SWEEP # Mis siin puudu? 612*10 = 6120` rIGHT AMount
print("BIP files:", len(files_BIP)) # Right amount
print("Start prepping")
#files_ECE = []
if len(files_ECE) != 0:
prep_ECE(files_ECE, COLUMNS, PATH_res, IDENT_TAG)
print("ECE prepped")
if len(files_PW) != 0:
prep_PW(files_PW, COLUMNS, PATH_res, IDENT_TAG)
print("PW prepped")
if len(files_BIP) != 0:
prep_BIP(files_BIP, COLUMNS, PATH_res, IDENT_TAG)
print("BIP prepped")
if len(files_KDE) != 0:
prep_KDE(files_KDE, COLUMNS, PATH_res, IDENT_TAG)
print("KDE prepped")
# ### Put all together
res_dfs = []
if len(files_KDE) != 0:
with open("res_KDE_%s.pkl" % IDENT_TAG, "rb") as f:
res_KDE = pd.read_pickle(f)
res_dfs.append(res_KDE)
if len(files_PW) != 0:
with open("res_PW_%s.pkl" % IDENT_TAG, "rb") as f:
res_PW = pd.read_pickle(f)
res_dfs.append(res_PW)
if len(files_ECE) != 0:
with open("res_ECE_%s.pkl" % IDENT_TAG, "rb") as f:
res_ECE = pd.read_pickle(f)
res_dfs.append(res_ECE)
if len(files_BIP) != 0:
with open("res_BIP_%s.pkl" % IDENT_TAG, "rb") as f:
res_BIP = pd.read_pickle(f)
res_dfs.append(res_BIP)
# In[94]:
all_df = pd.concat(res_dfs)
all_df.reset_index(inplace=True, drop = True)
# Filter BIN NR from method name for CV rows
all_df["binning"] = all_df['binning'].str.replace('monotonic_eq_size_.+', "monotonic_eq_size")
for bin_nr in [5,10]:
for bin_type in ["width", "size"]:
all_df["binning"] = all_df['binning'].str.replace(f'eq_{bin_type}_([0-9]+)_{bin_nr}_trick', f"eq_{bin_type}_CV{bin_nr}tr")
all_df["binning"] = all_df['binning'].str.replace(f'eq_{bin_type}_([0-9]+)_{bin_nr}', f"eq_{bin_type}_CV{bin_nr}")
all_df["ECE_abs"] = np.abs(all_df["c_hat_distance_p_debiased"] - all_df["p_distance_c"])
all_df["ECE_square"] = np.abs(all_df["c_hat_distance_p_square_debiased"] - all_df["p_distance_c_square"])
# ## Save to pickle file
all_df.to_pickle("df_all_%s.pkl" % IDENT_TAG, protocol=4)
print("All data saved to %s" % ("df_all_%s.pkl" % IDENT_TAG))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
642,
76,
532,
360,
69,
49080,
357,
940,
27417,
13,
24714,
12,
82,
8,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
... | 2.085106 | 1,645 |
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('add_user', views.UserCreateView.as_view(), name="add_user"),
path('edit_user/<str:user_id>', views.UserEditView.as_view(), name="edit_user"),
path('manage_users', views.UsersManageView.as_view(), name="manage_users"),
path('manage_groups', views.GroupListView.as_view(), name='manage_groups'),
path('add_group', views.GroupCreateView.as_view(), name='add_group'),
path('edit_group/<int:group_id>/', views.GroupEditView.as_view(), name='edit_group'),
path('delete_group/<int:group_id>/', views.GroupDeleteView.as_view(), name='delete_group'),
path('groups/', views.GroupsView.as_view(), name='groups'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
68... | 2.769231 | 286 |
import _thread
import time
import socket
import copy
from mctypes import ParseVarInt, PackVarInt
from enums import Play, Login
from handlers import Handler
from senders import Sender
from helpers import calculate_yaw_and_pitch
| [
11748,
4808,
16663,
198,
11748,
640,
198,
11748,
17802,
198,
11748,
4866,
198,
6738,
285,
310,
9497,
1330,
2547,
325,
19852,
5317,
11,
6400,
19852,
5317,
198,
6738,
551,
5700,
1330,
3811,
11,
23093,
198,
6738,
32847,
1330,
32412,
198,
6... | 3.8 | 60 |
from flask import Flask, render_template, request, flash, redirect
import pickle
import numpy as np
app = Flask(__name__)
model = pickle.load(open("final_model.pkl", "rb"))
@app.route("/", methods = ['GET','POST'])
if __name__ == '__main__':
app.run(debug = True) | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
7644,
11,
18941,
201,
198,
11748,
2298,
293,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
201,
198,
201,
198,
19849... | 2.635514 | 107 |
import aiohttp
import asyncio
import time
start_time = time.time()
asyncio.run(main())
print("--- %s seconds ---" % (time.time() - start_time))
| [
11748,
257,
952,
4023,
198,
11748,
30351,
952,
198,
11748,
640,
628,
198,
9688,
62,
2435,
796,
640,
13,
2435,
3419,
628,
198,
292,
13361,
952,
13,
5143,
7,
12417,
28955,
198,
4798,
7203,
6329,
4064,
82,
4201,
11420,
1,
4064,
357,
24... | 2.846154 | 52 |
"""
Parla supports simple task parallelism.
.. testsetup::
T0 = None
code = None
from .cpu import cpu
"""
import logging
import threading
import inspect
from abc import abstractmethod, ABCMeta
from contextlib import asynccontextmanager, contextmanager, ExitStack
from typing import Awaitable, Collection, Iterable, Optional, Any, Union, List, FrozenSet, Dict
from parla.device import Device, Architecture, get_all_devices
from parla.task_runtime import TaskCompleted, TaskRunning, TaskAwaitTasks, TaskState, DeviceSetRequirements, Task, get_scheduler_context
from parla.utils import parse_index
try:
from parla import task_runtime, array
except ImportError as e:
# Ignore the exception if the stack includes the doc generator
if all("sphinx" not in f.filename for f in inspect.getouterframes(inspect.currentframe())):
raise
logger = logging.getLogger(__name__)
__all__ = [
"TaskID", "TaskSpace", "spawn", "get_current_devices", "tasks", "finish", "CompletedTaskSpace", "Task", "reserve_persistent_memory"
]
class TaskID:
"""The identity of a task.
This combines some ID value with the task object itself. The task
object is assigned by `spawn`. This can be used in place of the
task object in most places.
"""
_task: Optional[Task]
_id: Iterable[int]
def __init__(self, name, id: Iterable[int]):
""""""
self._name = name
self._id = id
self._task = None
@property
def task(self):
"""Get the `Task` associated with this ID.
:raises ValueError: if there is no such task.
"""
if not self._task:
raise ValueError("This task has not yet been spawned so it cannot be used.")
return self._task
@task.setter
@property
def id(self):
"""Get the ID object.
"""
return self._id
@property
def name(self):
"""Get the space name.
"""
return self._name
@property
def full_name(self):
"""Get the space name.
"""
return "_".join(str(i) for i in (self._name, *self._id))
class TaskSet(Awaitable, Collection, metaclass=ABCMeta):
"""
A collection of tasks.
"""
@property
@abstractmethod
@property
class tasks(TaskSet):
"""
An ad-hoc collection of tasks.
An instance is basically a reified dependency list as would be passed to `spawn`.
This object is awaitable and will block until all tasks are complete.
>>> await tasks(T1, T2)
>>> @spawn(None, tasks(T1, T2)) # Same as @spawn(None, [T1, T2])
>>> def f():
>>> pass
"""
@property
__slots__ = ("args",)
class TaskSpace(TaskSet):
"""A collection of tasks with IDs.
A `TaskSpace` can be indexed using any hashable values and any
number of "dimensions" (indicies). If a dimension is indexed with
numbers then that dimension can be sliced.
>>> T = TaskSpace()
... for i in range(10):
... @spawn(T[i], [T[0:i-1]])
... def t():
... code
This will produce a series of tasks where each depends on all previous tasks.
:note: `TaskSpace` does not support assignment to indicies.
"""
_data: Dict[int, TaskID]
@property
def __init__(self, name="", members=None):
"""Create an empty TaskSpace.
"""
self._name = name
self._data = members or {}
def __getitem__(self, index):
"""Get the `TaskID` associated with the provided indicies.
"""
if not isinstance(index, tuple):
index = (index,)
ret = []
parse_index((), index, lambda x, i: x + (i,),
lambda x: ret.append(self._data.setdefault(x, TaskID(self._name, x))))
# print(index, ret)
if len(ret) == 1:
return ret[0]
return ret
class CompletedTaskSpace(TaskSet):
"""
A task space that returns completed tasks instead of unused tasks.
This is useful as the base case for more complex collections of tasks.
"""
@property
# TODO (bozhi): We may need a centralized typing module to reduce types being imported everywhere.
PlacementSource = Union[Architecture, Device, Task, TaskID, Any]
# TODO (bozhi): We may need a `placement` module to hold these `get_placement_for_xxx` interfaces, which makes more sense than the `tasks` module here. Check imports when doing so.
_task_locals = _TaskLocals()
def _task_callback(task, body) -> TaskState:
"""
A function which forwards to a python function in the appropriate device context.
"""
try:
body = body
if inspect.iscoroutinefunction(body):
logger.debug("Constructing coroutine task: %s", task.taskid)
body = body()
if inspect.iscoroutine(body):
try:
in_value_task = getattr(task, "value_task", None)
in_value = in_value_task and in_value_task.result
logger.debug("Executing coroutine task: %s with input %s from %r", task.taskid,
in_value_task, in_value)
new_task_info = body.send(in_value)
task.value_task = None
if not isinstance(new_task_info, TaskAwaitTasks):
raise TypeError("Parla coroutine tasks must yield a TaskAwaitTasks")
dependencies = new_task_info.dependencies
value_task = new_task_info.value_task
if value_task:
assert isinstance(value_task, task_runtime.Task)
task.value_task = value_task
return TaskRunning(_task_callback, (body,), dependencies)
except StopIteration as e:
result = None
if e.args:
(result,) = e.args
return TaskCompleted(result)
else:
logger.debug("Executing function task: %s", task.taskid)
result = body()
return TaskCompleted(result)
finally:
logger.debug("Finished: %s", task.taskid)
assert False
def _make_cell(val):
"""
Create a new Python closure cell object.
You should not be using this. I shouldn't be either, but I don't know a way around Python's broken semantics.
"""
x = val
return closure.__closure__[0]
def spawn(taskid: Optional[TaskID] = None, dependencies = (), *,
memory: int = None,
vcus: float = None,
placement: Union[Collection[PlacementSource], Any, None] = None,
ndevices: int = 1,
tags: Collection[Any] = (),
data: Collection[Any] = None
):
"""
spawn(taskid: Optional[TaskID] = None, dependencies = (), *, memory: int = None, placement: Collection[Any] = None, ndevices: int = 1)
Execute the body of the function as a new task. The task may start
executing immediately, so it may execute in parallel with any
following code.
>>> @spawn(T1, [T0]) # Create task with ID T1 and dependency on T0
... def t():
... code
>>> @spawn(T1, [T0], placement=cpu)
... def t():
... code
:param taskid: the ID of the task in a `TaskSpace` or None if the task does not have an ID.
:param dependencies: any number of dependency arguments which may be `Tasks<Task>`, `TaskIDs<TaskID>`, or \
iterables of Tasks or TaskIDs.
:param memory: The amount of memory this task uses.
:param placement: A collection of values (`~parla.device.Architecture`, `~parla.device.Device`, or array data) which \
specify devices at which the task can be placed.
:param ndevices: The number of devices the task will use. If `ndevices` is greater than 1, the `memory` is divided \
evenly between the devices. In the task: `len(get_current_devices()) == ndevices<get_current_devices>`.
The declared task (`t` above) can be used as a dependency for later tasks (in place of the tasks ID).
This same value is stored into the task space used in `taskid`.
:see: :ref:`Fox's Algorithm` Example
"""
# :param vcus: The amount of compute power this task uses. It is specified in "Virtual Compute Units".
# TODO: Document tags argument
if not taskid:
taskid = TaskID("global_" + str(len(_task_locals.global_tasks)), (len(_task_locals.global_tasks),))
_task_locals.global_tasks += [taskid]
return decorator
# TODO (bozhi): Why not put it in task_runtime? Remember to update __all__ and clean up imports to do so.
def get_current_devices() -> List[Device]:
"""
:return: A list of `devices<parla.device.Device>` assigned to the current task. This will have one element unless `ndevices` was \
provided when the task was `spawned<spawn>`.
"""
return list(task_runtime.get_devices())
@contextmanager
# TODO: Move this to parla.device and import it from there. It's generally useful.
@contextmanager
def reserve_persistent_memory(amount, device = None):
"""
:param amount: The number of bytes reserved in the scheduler from tasks for persitent data. \
This exists, not as any kind of enforced limit on allocation, but rather to let the scheduler \
have an accurate measure of memory occupancy on the GPU beyond just memory that's used \
only during a task's execution. It can be specified as an integer representing the nubmer of \
bytes, an ndarray (cupy or numpy), or a list of ndarrays.
:param device: The device object where memory is to be reserved. \
This must be supplied if amount is an integer \
and may be supplied for an array. In the case of a list or other iterable it must \
be supplied if any element of the list is not an array. This may be a list of \
devices if amount is a list of array objects.
"""
# TODO: This function should be split up into simpler subunits.
# How exactly that should be done isn't settled yet, but there's
# some discussion on this at
# https://github.com/ut-parla/Parla.py/pull/40#discussion_r608857593
# https://github.com/ut-parla/Parla.py/pull/40#discussion_r608853345
# TODO: reduce nesting by separating out the try/except idioms for
# checking if something supports the buffer protocol and checking
# whether or not something is iterable into separate functions.
# TODO: Generalize the naming/interface here to allow reserving
# resources other than memory.
from . import cpu
if isinstance(amount, int):
memsize = amount
elif hasattr(amount, '__cuda_array_interface__'):
import cupy
if not isinstance(amount, cupy.ndarray):
raise NotImplementedError("Currently only CuPy arrays are supported for making space reservations on the GPU.")
memsize = amount.nbytes
if device is None:
device = amount.device
else:
# Check if "amount" supports the buffer protocol.
# if it does, we're reserving memory on the CPU
# unless the user says otherwise. If it does not,
# then assume it's a list of amount parameters
# that need to be handled individually.
amount_must_be_iterable = False
try:
view = memoryview(amount)
except TypeError:
amount_must_be_iterable = True
else:
memsize = view.nbytes
if device is None:
device = cpu(0)
# Not a cpu array, so try handling amount as
# an iterable of things that each need to be processed.
if amount_must_be_iterable:
try:
iter(amount)
except TypeError as exc:
raise ValueError("Persistent memory spec is not an integer, array, or iterable object") from exc
if device is None:
with ExitStack() as stack:
for arr in amount:
inner_must_be_iterable = False
try:
arr.__cuda_array_interface__
except AttributeError as exc:
inner_must_be_iterable = True
else:
stack.enter_context(reserve_persistent_memory(arr))
if inner_must_be_iterable:
try:
iter(arr)
except TypeError as exc:
# TODO: Just use parla.array.get_memory(a).device instead of this manual mess.
raise ValueError("Implicit location specification only supported for GPU arrays.") from exc
else:
stack.enter_context(reserve_persistent_memory(arr))
yield
return
device_must_be_iterable = False
try:
device = _get_parla_device(device)
except ValueError:
device_must_be_iterable = True
if device_must_be_iterable:
with ExitStack() as stack:
# TODO: do we actually want to support this implicit zip?
for arr, dev in zip(amount, device):
stack.enter_context(reserve_persistent_memory(arr, dev))
yield
return
else:
with ExitStack() as stack:
for arr in amount:
stack.enter_context(reserve_persistent_memory(arr, device))
yield
return
assert False
if device is None:
raise ValueError("Device cannot be inferred.")
device = _get_parla_device(device)
if isinstance(device, cpu._CPUDevice):
raise ValueError("Reserving space for persistent data in main memory is not yet supported.")
with _reserve_persistent_memory(memsize, device):
yield
@asynccontextmanager
async def finish():
"""
Execute the body of the `with` normally and then perform a barrier applying to all tasks created within this block
and in this task.
`finish` does not wait for tasks which are created by the tasks it waits on. This is because tasks are allowed to
complete before tasks they create. This is a difference from Cilk and OpenMP task semantics.
>>> async with finish():
... @spawn()
... def task():
... @spawn()
... def subtask():
... code
... code
... # After the finish block, task will be complete, but subtask may not be.
"""
my_tasks = []
_task_locals.task_scopes.append(my_tasks)
try:
yield
finally:
removed_tasks = _task_locals.task_scopes.pop()
assert removed_tasks is my_tasks
await tasks(my_tasks)
| [
37811,
198,
10044,
5031,
6971,
2829,
4876,
10730,
1042,
13,
198,
198,
492,
1332,
40406,
3712,
628,
220,
220,
220,
309,
15,
796,
6045,
198,
220,
220,
220,
2438,
796,
6045,
198,
220,
220,
220,
422,
764,
36166,
1330,
42804,
198,
198,
3... | 2.462987 | 6,052 |
import os
import logging
| [
11748,
28686,
198,
11748,
18931,
628,
198
] | 3.857143 | 7 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Package: mesxr.calibration
Module: plots
Author: Patrick VanMeter
Affiliation: Department of Physics, University of Wisconsin-Madison
Last Updated: November 2018
Description:
This module is used to generate common plots to analyse the ressults of the ME-SXR calibration procedure.
Usage:
TBD
"""
import numpy as np
import scipy as sp
import scipy.signal
import matplotlib.pyplot as plt
# Some colors to use for plotting
colors = ['xkcd:{0:}'.format(col) for col in ['red', 'green', 'blue', 'magenta', 'mustard yellow', 'burgundy',
'dark orange', 'steel blue', 'bluish purple']]
def s_curves_plot(det, x, y):
"""
Desctription:
Creates a plot of the calibration S-curves for the pixel at the selected coordinates. Responses are normalized so
that the last trimbit has a response of 0 and the first trimbit a response of 1.
Inputs:
- det = (Pilatus_Detector) The calibrated PILATUS detector object, found in the mesxr.calibration.trimscan module.
- x = (int) The global x coordinate for the pixel to plot.
- y = (int) The global y coordinate for the pixel to plot.
Returns:
- fig = (pyplot figure) The figure containing the plot.
"""
pixels = det.get_pixels()
trimscan_data = pixels[x,y].data
# Create the plot with the appropriate fine sizes for Jupyter notebooks
fig = plt.figure(1, figsize=(8,6), dpi=110)
ax = fig.add_subplot(1,1,1)
plt.tick_params(labelsize=14)
for index, elem in enumerate(pixels[x,y].elements):
# Scale for nicer plotting of the data
bottom_adj = np.amin(trimscan_data[index])
data_adj = trimscan_data[index] - bottom_adj
scale_fact = np.amax(data_adj)
data_scaled = data_adj / scale_fact
sigma_scaled = np.sqrt(trimscan_data[index]) / scale_fact
label = r'{0:}, $\chi^2 = {1:.1f}$'.format(elem, pixels[x,y].trimfit_chi2[index])
ax.errorbar(pixels[x,y].trimbits[index], data_scaled, yerr=sigma_scaled, label=label,
capsize=1, ms=2, marker='o', linestyle=':', color=colors[index])
# Scale and plot the model
model_trims = np.linspace(0, 64, num=200)
model_adj = pixels[x,y].s_curve_model(elem, model_trims) - bottom_adj
model_scale = model_adj / scale_fact
ax.plot(model_trims, model_scale, color=colors[index])
# Plot the a0 points
ax.axvline(x=pixels[x,y].trimfit_params[index, 0], color=colors[index], linestyle='--', linewidth=0.8)
# Annotate the plot
ax.legend(fontsize=12)
ax.set_xlim([0,64])
ax.set_ylim([0,1])
ax.set_xlabel('Trimbit', fontsize=14)
ax.set_ylabel('Response (norm.)', fontsize=14)
# Add coordinates
ax.text(0.5, 0.95, '(x,y) = {0:}'.format(pixels[x,y].coords), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
return fig
def trimscan_curve(det, x, y, xlim=[2,15]):
"""
Description:
Creates a plot of the mapping between trimbit setting and the resulting threshold energy for a calibrated Pilatus_Detector
object. Also includes uncerainty bands based on Poisson statistics.
Inputs:
- det = (Pilatus_Detector) The calibrated PILATUS detector object, found in the mesxr.calibration.trimscan module.
- x = (int) The global x coordinate for the pixel to plot.
- y = (int) The global y coordinate for the pixel to plot.
- xlim = (tuple of numbers) The Argument to be passed along to the plot set_xlim function. Sets the limits
on the x axis.
Returns:
- fig = (pyplot figure) The figure containing the plot.
"""
pixels = det.get_pixels()
# Create the plot with the appropriate fine sizes for Jupyter notebooks
fig = plt.figure(1, figsize=(8,6), dpi=110)
ax = fig.add_subplot(1,1,1)
plt.tick_params(labelsize=14)
# Plot the data with error bars
ax.errorbar(pixels[x,y].energies, pixels[x,y].trimfit_params[:, 0], yerr=np.sqrt(pixels[x,y].trimfit_cov[:, 0, 0]),
capsize=4, ms=4, marker='o', linestyle='none', color='xkcd:royal blue', label='Data')
# Plot the fit
en_model = np.linspace(xlim[0], xlim[1], num=200)
trim_model = pixels[x,y].en_curve_model(en_model)
ax.plot(en_model, trim_model, color='xkcd:orange', linewidth=1, label='Fit', zorder=100)
# Plot the uncertainty
model_sigma = pixels[x,y].en_curve_uncertainty(en_model)
ax.plot(en_model, trim_model+model_sigma, color='xkcd:light blue', label=r'$1\sigma$', alpha=0.75)
ax.plot(en_model, trim_model-model_sigma, color='xkcd:light blue', alpha=0.75)
ax.fill_between(en_model, trim_model-model_sigma, trim_model+model_sigma, alpha=0.5, color='xkcd:light blue')
ax.set_xlim(xlim)
ax.set_ylim([0, det.num_trimbits])
ax.set_xlabel(r'Threshold $E_c$ (keV)', fontsize=14)
ax.set_ylabel('Trimbit', fontsize=14)
ax.legend(loc='lower right', fontsize=14)
ax.text(0.165, 0.95, '(x,y) = {0:}'.format(pixels[x,y].coords), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax.text(0.14, 0.88, r'$\chi^2 = {0:.2f}$'.format(pixels[x,y].enfit_chi2), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
return fig
def trimscan_chi2_plot(det, element, chips, bins=500, plot_stdev=True, chi_range=(0,1000), cutoff=1000):
"""
"""
# Create the plot with the appropriate fine sizes for Jupyter notebooks
fig = plt.figure(1, figsize=(8,6), dpi=110)
ax = fig.add_subplot(1,1,1)
plt.tick_params(labelsize=14)
# Load in the chi^2 values
chi2_set = []
for det_chip in det.modules[0,0].chips.ravel():
if det_chip.number in chips:
try:
chi2_set.extend([pixel.trimfit_chi2[pixel.elements.index(element)] for pixel in det_chip.pixels.ravel() if element in pixel.elements])
except:
pass
chi2_set = np.array(chi2_set)
# Count and remove outliers
rem_indices = np.where(chi2_set > cutoff)[0]
num_removed = len(rem_indices)
slice_indices = [i for i in range(len(chi2_set)) if i not in rem_indices]
chi2_set = chi2_set[slice_indices]
# Make the histogram
hist = ax.hist(chi2_set, bins=bins, density=True, range=chi_range, color='xkcd:light blue')
# Statistical info
mean = np.nanmean(chi2_set)
sigma = np.nanstd(chi2_set)
ax.axvline(x=mean, color='xkcd:brick red', linestyle='dashed', label=r'$\chi^2$ mean')
if plot_stdev:
ax.axvline(x=mean+sigma, color='xkcd:red', label=r'$\chi^2$ stdev')
ax.axvline(x=mean-sigma, color='xkcd:red')
ax.set_xlabel(r'Trimbit fit $\chi^2$', fontsize=14)
ax.set_ylabel('Probability density', fontsize=14)
ax.legend(loc='upper right')
# Add statistics lables
ax.text(0.9, 0.8, element, color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax.text(0.88, 0.72, r'$\langle \chi^2 \rangle = {0:.1f}$'.format(mean), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
if plot_stdev:
ax.text(0.88, 0.64, r'$\sigma_{{\chi^2}} = {0:.1f}$'.format(sigma), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
# Include outlier information for transparency
ax.text(0.15, 0.95, '{0:} points removed'.format(num_removed), color='black',
fontsize=12, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax.text(0.15, 0.90, r'cutoff $\chi^2 > {0:}$'.format(cutoff), color='black',
fontsize=12, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
return fig
def energy_chi2_plot(det, chips, bins=500, plot_stdev=False, chi_range=(0,100), cutoff=100):
"""
"""
# Create the plot with the appropriate fine sizes for Jupyter notebooks
fig = plt.figure(1, figsize=(8,6), dpi=110)
ax = fig.add_subplot(1,1,1)
plt.tick_params(labelsize=14)
# Load in the chi^2 values
chi2_set = []
for det_chip in det.modules[0,0].chips.ravel():
if det_chip.number in chips:
chi2_set.extend([pixel.enfit_chi2 for pixel in det_chip.pixels.ravel()])
chi2_set = np.array(chi2_set)
# Count and remove outliers
rem_indices = np.where(chi2_set > cutoff)[0]
num_removed = len(rem_indices)
slice_indices = [i for i in range(len(chi2_set)) if i not in rem_indices]
chi2_set = chi2_set[slice_indices]
# Make the histogram
hist = ax.hist(chi2_set, bins=bins, density=True, range=chi_range, color='xkcd:light blue')
# Statistical info
mean = np.nanmean(chi2_set)
sigma = np.nanstd(chi2_set)
ax.axvline(x=mean, color='xkcd:brick red', linestyle='dashed', label=r'$\chi^2$ mean')
if plot_stdev:
ax.axvline(x=mean+sigma, color='xkcd:red', label=r'$\chi^2$ stdev')
ax.axvline(x=mean-sigma, color='xkcd:red')
ax.set_xlabel(r'Energy fit $\chi^2$', fontsize=14)
ax.set_ylabel('Probability density', fontsize=14)
ax.legend(loc='upper right')
# Add statistics lables
ax.text(0.9, 0.8, r'$E_c$ fits', color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax.text(0.88, 0.72, r'$\langle \chi^2 \rangle = {0:.1f}$'.format(mean), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
if plot_stdev:
ax.text(0.88, 0.64, r'$\sigma_{{\chi^2}} = {0:.1f}$'.format(sigma), color='black',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
# Include outlier information for transparency
ax.text(0.15, 0.95, '{0:} points removed'.format(num_removed), color='black',
fontsize=12, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax.text(0.15, 0.90, r'cutoff $\chi^2 > {0:}$'.format(cutoff), color='black',
fontsize=12, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
return fig
def corner_plot(data, labels, ranges, bins=250, figsize=(8,6), plt_label='none'):
"""
"""
fig = plt.figure(1, figsize=figsize, dpi=110)
num_params = len(data)
# Draw the scatter plots on the off-diagonals
for col in range(num_params):
for row in range(col+1):
index = row*num_params + (col+1)
ax = fig.add_subplot(num_params, num_params, index)
# Diagonal histograms
if row == col:
counts, edges, patches = ax.hist(data[col], bins=bins, range=ranges[col],
color='xkcd:royal blue')
# Enforce consisntent spacing of grid lines
delta_x = (ranges[col][1] - ranges[col][0])/5.
max_counts = np.amax(counts)
ticks_x = np.arange(ranges[col][0]+delta_x, ranges[col][1], delta_x)
ticks_y = np.arange(0, 1.2*max_counts, max_counts/4.)
ax.xaxis.set_ticks(ticks_x)
ax.yaxis.set_ticks(ticks_y[0:-1])
ax.set_ylabel('Counts')
ax.set_xlabel(labels[row])
ax.set_xlim(ranges[row])
ax.grid(axis='x')
# Add annotation to the first plot, if desired
if row == 0 and plt_label != 'none':
ax.text(0.85, 0.85, plt_label, color='red',
fontsize=16, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes,
bbox=dict(facecolor='none', edgecolor='black', fc='w', boxstyle='round'))
# Off-diagonal 2D histograms
else:
hbins = [np.linspace(ranges[col][0], ranges[col][1], num=100),
np.linspace(ranges[row][0], ranges[row][1], num=100)]
hist, xedges, yedges = np.histogram2d(data[col], data[row], bins=hbins)
hist_masked = np.ma.masked_where(hist == 0, hist)
pmesh = ax.pcolormesh(xedges, yedges, hist_masked.T, cmap='jet')
ax.set_xlim(ranges[col])
ax.set_ylim(ranges[row])
ax.grid()
# Get rid of overlapping tick marks and enforce consistnent spacing
delta_y = (ranges[row][1] - ranges[row][0])/5.
delta_x = (ranges[col][1] - ranges[col][0])/5.
ticks_y = np.arange(ranges[row][0]+delta_y, ranges[row][1], delta_y)
ticks_x = np.arange(ranges[col][0]+delta_x, ranges[col][1], delta_x)
ax.yaxis.set_ticks(ticks_y)
ax.xaxis.set_ticks(ticks_x)
if row == 0:
ax.xaxis.tick_top()
ax.set_xlabel(labels[col])
ax.xaxis.set_label_position('top')
else:
ax.xaxis.set_ticklabels([])
if col == num_params-1:
ax.yaxis.tick_right()
ax.set_ylabel(labels[row])
ax.yaxis.set_label_position('right')
else:
ax.yaxis.set_ticklabels([])
# Remove spacing between plots
plt.subplots_adjust(wspace=0, hspace=0)
return fig
def trimscan_corner_plot(det, element, ranges, chips=range(16)):
"""
"""
pixels = det.get_pixels()
labels = [r'$a_0$', r'$a_1$', r'$a_2$', r'$a_3$', r'$a_4$', r'$a_5$']
data = []
# Loop over pixels and do the analysis
for index in range(len(labels)):
fit_data = []
for x in range(pixels.shape[0]):
for y in range(pixels.shape[1]):
# Don't include data which is not used - this exludes all gap pixels
if pixels[x, y].good_enfit and pixels[x,y].chip in chips:
if element in pixels[x,y].elements:
elem_index = pixels[x, y].elements.index(element)
if pixels[x, y].good_trimfits[elem_index]:
fit_data.append(pixels[x, y].trimfit_params[elem_index, index])
data.append(fit_data)
# Make the plot using the general corner plot function
fig = corner_plot(data, labels, ranges, figsize=(16,12), plt_label=element)
return fig
def energy_corner_plot(det, ranges, chips=range(16)):
"""
"""
pixels = det.get_pixels()
labels = [r'$c_0$', r'$c_1$', r'$c_2$']
data = []
# Loop over pixels and do the analysis
for index in range(len(labels)):
fit_data = []
for x in range(pixels.shape[0]):
for y in range(pixels.shape[1]):
# Don't include data which is not used - this exludes all gap pixels
if pixels[x, y].good_enfit and pixels[x,y].chip in chips:
fit_data.append(pixels[x, y].enfit_params[index])
data.append(fit_data)
# Make the plot using the general corner plot function
fig = corner_plot(data, labels, ranges, figsize=(8,6))
return fig
def uniform_treshold_trimbit_maps(det, threshold_set, figsize=(12,12)):
"""
"""
# Get the trimbits from the calibration data
pixels = det.get_pixels()
trimbit_maps = np.zeros([len(threshold_set), pixels.shape[0], pixels.shape[1]])
for index, thresh in enumerate(threshold_set):
for x in range(pixels.shape[0]):
for y in range(pixels.shape[1]):
if pixels[x,y].good_enfit:
trimbit_maps[index, x, y] = pixels[x, y].en_curve_model(thresh)
else:
trimbit_maps[index, x, y] = np.nan
# Create the plots
fig = plt.figure(1, figsize=figsize, dpi=110)
num_col = len(threshold_set)/2 + len(threshold_set)%2
for index, thresh in enumerate(threshold_set):
ax = fig.add_subplot(num_col, 2, index+1)
image = ax.imshow(trimbit_maps[index, :, :].T, vmin=0, vmax=63)
# Format the plot to remove overlap
# Only label the axes on the edge
if index % 2 == 0:
ax.set_ylabel('Y Index')
else:
ax.yaxis.set_ticklabels([])
if index == 0 or index == 1:
ax.set_xlabel('X Index')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
else:
ax.xaxis.set_ticklabels([])
ax.text(0.85, 0.85, '{0:.0f} keV'.format(thresh), color='red',
fontsize=14, horizontalalignment='center', verticalalignment='center', transform = ax.transAxes,
bbox=dict(facecolor='none', edgecolor='black', fc='w', boxstyle='round'))
#plt.tick_params(labelsize=14)
plt.subplots_adjust(wspace=0.025, hspace=0.05)
cax = fig.add_axes([0.1, 0.05, 0.9, 0.02])
fig.colorbar(image, cax=cax, orientation='horizontal', label=r'Trimbit settings')
return fig
def uniform_treshold_trimbit_distributions(det, threshold_set, chips=range(16), figsize=(8, 6)):
"""
"""
# Create a figure to plot all the data
fig = plt.figure(1, figsize=figsize, dpi=110)
num_col = len(threshold_set)/2 + len(threshold_set)%2
# Don't include bad pixels or chip boundaries
pixels = det.get_pixels()
for index, thresh in enumerate(threshold_set):
ax = fig.add_subplot(num_col, 2, index+1)
trim_data = []
for x in range(pixels.shape[0]):
for y in range(pixels.shape[1]):
if pixels[x,y].good_enfit:
# Make sure that the result is within the valid range
tbit = pixels[x, y].en_curve_model(thresh)
if tbit < 0:
tbit = 0.
elif tbit > 63:
tbit = 63.
trim_data.append(tbit)
trim_data = np.array(trim_data)
# Make the histogram
hist, bins, patches = ax.hist(trim_data, bins=250, range=[0,64], color='xkcd:light blue')
# Include some basic statistics
stdev = np.std(trim_data)
mean = np.mean(trim_data)
ax.axvline(x=mean, color='black', linestyle='dashed')
ax.axvline(x=mean+stdev, color='green', linestyle='dashed')
ax.axvline(x=mean-stdev, color='green', linestyle='dashed')
# Position the text legibly
if mean <=25:
text_x_loc = mean + stdev + 2
else:
text_x_loc = mean - stdev - 20
ax.text(text_x_loc, 0.55*max(hist), r'$\sigma =$ {:.2f}'.format(stdev), fontsize=10, color='green')
ax.text(text_x_loc, 0.8*max(hist), r'$\langle t \rangle =$ {:.2f}'.format(mean), fontsize=10)
# Put the threshold label somewhere it will not overlap with other text
if mean < 44:
ax.text(0.85, 0.8, '{0:.1f} keV'.format(thresh), horizontalalignment='center', verticalalignment='center',
transform = ax.transAxes, fontsize=12, color='red',
bbox=dict(facecolor='none', edgecolor='black', fc='w', boxstyle='round'))
else:
ax.text(0.15, 0.8, '{0:.1f} keV'.format(thresh), horizontalalignment='center', verticalalignment='center',
transform = ax.transAxes, fontsize=12, color='red',
bbox=dict(facecolor='none', edgecolor='black', fc='w', boxstyle='round'))
max_counts = max(hist)
ticks = np.arange(0, 1.2*max_counts, max_counts/4)
ax.yaxis.set_ticks(ticks[0:-1])
ax.xaxis.set_ticks(range(4,65,8))
ax.set_xlim([0,63])
ax.grid(axis='x')
# Only label the axes on the edge
if index % 2 == 0:
ax.set_ylabel('Counts')
else:
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
if index == len(threshold_set)-1 or index == len(threshold_set)-2:
ax.set_xlabel(r'Requested Trimbit')
else:
ax.xaxis.set_ticklabels([])
# Remove spacing between plots
plt.subplots_adjust(wspace=0, hspace=0)
#plt.tick_params(labelsize=14)
return fig
def uniform_treshold_delta_E(det, threshold_set, chips=range(16), figsize=(8, 6), xrange=[-0.25,0.25]):
"""
"""
# Create a figure to plot all the data
fig = plt.figure(1, figsize=figsize, dpi=110)
num_col = len(threshold_set)/2 + len(threshold_set)%2
# Don't include bad pixels or chip boundaries
pixels = det.get_pixels()
# Keep up with threshold widths
width_set = np.zeros(len(threshold_set))
for index, thresh in enumerate(threshold_set):
ax = fig.add_subplot(num_col, 2, index+1)
thresh_data = []
# MODIFY: loop over pixels, find the trimbit, round, and map back to Ec
for x in range(pixels.shape[0]):
for y in range(pixels.shape[1]):
if pixels[x,y].good_enfit:
# Make sure that the result is within the valid range
tbit = pixels[x, y].trimbit_from_threshold(thresh)
if tbit < 0:
tbit = 0.
elif tbit > 63:
tbit = 63.
else:
tbit = np.round(tbit)
thresh_data.append( pixels[x,y].threshold_from_trimbit(tbit) )
thresh_data = np.array(thresh_data)
en_diff = thresh_data - thresh
hist, bins, patches = ax.hist(en_diff, bins=200, range=xrange, color='xkcd:light blue')
ax.text(0.85, 0.8, '{0:.1f} keV'.format(thresh), horizontalalignment='center', verticalalignment='center',
transform = ax.transAxes, fontsize=12, color='red',
bbox=dict(facecolor='none', edgecolor='black', fc='w', boxstyle='round'))
# Smooth the data to determine the width of the spread
bin_w = (max(bins) - min(bins)) / (len(bins) - 1.)
bin_points = np.arange(min(bins)+bin_w/2., max(bins)+bin_w/2., bin_w)
hist_smooth = sp.signal.savgol_filter(hist, 11, 2)
max_count = np.amax(hist_smooth)
zero_index = np.argmin(np.abs(bin_points))
half_index_l = np.argmin(np.abs(hist_smooth[:zero_index] - max_count/2.))
half_index_r = zero_index + np.argmin(np.abs(hist_smooth[zero_index:] - max_count/2.))
width_l = bin_points[zero_index] - bin_points[half_index_l]
width_r = bin_points[half_index_r] - bin_points[zero_index]
delta_eV = (width_r + width_l)*1000./2.
width_set[index] = delta_eV
ax.axvline(x=bin_points[half_index_l], color='red', linewidth=0.8)
ax.axvline(x=bin_points[half_index_r], color='red', linewidth=0.8)
ax.axvline(x=bin_points[zero_index], color='black', linestyle='dashed', linewidth=0.5)
ax.text(0.5, 0.1, r'$\Delta E = $ {0:3.0f} eV'.format(delta_eV), horizontalalignment='center',
verticalalignment='center', transform = ax.transAxes, fontsize=12, color='black')
max_counts = max(hist)
ticks = np.arange(0, 1.2*max_counts, max_counts/4)
ax.yaxis.set_ticks(ticks[0:-1])
ax.grid(axis='x')
# Only label the axes on the edge
if index % 2 == 0:
ax.set_ylabel('Counts')
else:
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
if index == len(threshold_set)-1 or index == len(threshold_set)-2:
ax.set_xlabel(r'Threshold - Avg. (keV)')
else:
ax.xaxis.set_ticklabels([])
# Remove spacing between plots
plt.subplots_adjust(wspace=0, hspace=0)
return fig, width_set | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
27813,
25,
18842,
87,
81,
13,
9948,
571,
1358,
198,
26796,
25,
21528,
198,
13838,
25,
9925,
6656,
44,
235... | 2.093828 | 11,617 |
#!/usr/bin/env python3
"""
BASICS
Admin functionalities
"""
from flask import Flask, request
import glob
import os
import time
import base_functions as bf
import remove_files as rmf
app = Flask(__name__)
GREYFISH_FOLDER = os.environ['greyfish_path']+"/sandbox/"
# Gets a list of all available users, comma-separated
# If there are no users, it will return an emprty string: ''
@app.route('/grey/admin/users/usernames/all', methods=['POST'])
# Removes all files older than X seconds
@app.route('/grey/admin/purge/olderthan/<Xsec>', methods=['POST'])
if __name__ == '__main__':
app.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
33,
1921,
19505,
198,
198,
46787,
10345,
871,
198,
37811,
628,
198,
6738,
42903,
1330,
46947,
11,
2581,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
640,
198,
... | 2.824074 | 216 |
""" Setup file"""
| [
37811,
31122,
2393,
37811,
201,
198
] | 3.166667 | 6 |
from translationstring import TranslationStringFactory
from pyramid.config import Configurator
from c2cwsgiutils.health_check import HealthCheck
from pyramid.events import BeforeRender, NewRequest
import c2cgeoform
from pkg_resources import resource_filename
from c2c.template.config import config as configuration
from c2cgeoportal_admin.subscribers import add_renderer_globals, add_localizer
search_paths = (
(resource_filename(__name__, 'templates/widgets'),)
+ c2cgeoform.default_search_paths
)
c2cgeoform.default_search_paths = search_paths
_ = TranslationStringFactory('c2cgeoportal_admin')
def main(_, **settings):
"""
This function returns a Pyramid WSGI application.
"""
configuration.init(settings.get('app.cfg'))
settings.update(configuration.get_config())
config = Configurator(settings=settings)
config.include('c2cwsgiutils.pyramid.includeme')
config.include('c2cgeoportal_admin')
from c2cgeoportal_commons.testing import (
generate_mappers,
get_engine,
get_session_factory,
get_tm_session,
)
# Initialize the dev dbsession
settings = config.get_settings()
settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# Make request.dbsession available for use in Pyramid
config.add_request_method(
# request.tm is the transaction manager used by pyramid_tm
lambda request: get_tm_session(session_factory, request.tm),
'dbsession',
reify=True
)
config.add_subscriber(add_renderer_globals, BeforeRender)
config.add_subscriber(add_localizer, NewRequest)
generate_mappers()
health_check = HealthCheck(config)
health_check.add_url_check('http://{}/'.format(settings['healthcheck_host']))
return config.make_wsgi_app()
| [
6738,
11059,
8841,
1330,
33322,
10100,
22810,
198,
6738,
27944,
13,
11250,
1330,
17056,
333,
1352,
198,
6738,
269,
17,
66,
18504,
12397,
26791,
13,
13948,
62,
9122,
1330,
3893,
9787,
198,
6738,
27944,
13,
31534,
1330,
7413,
45819,
11,
9... | 2.795652 | 690 |
#!/usr/bin/env python
"""BaseballSerialParserCOM.py: Collects data from a Daktronics All Sport 5000 connected via port J2 to a
Daktronics All Sport CG connected to a computer on COM port (defined on line 56), then parses data to
a .csv readable by broadcasting programs. This file has only been tested using game code 5501 on a
Daktronics All Sport 5000 (Baseball - Standard).
"""
__author__ = "Collin Moore"
__copyright__ = "Copyright 2021, Bristol Tennessee City Schools"
__credits__ = "Collin Moore"
__license__ = "MIT"
__version__ = "1.1.2"
__maintainer__ = "Collin Moore"
__email__ = "moorec@btcs.org"
__status__ = "Release"
"""Notes for reading serial bits: pulling individual characters
from 'res' will return integer values assigned to the unicode
character. Characters used for logic checks in this script are:
32 = no data / blank
42 = *
46 = .
49 = 1
50 = 2
51 = 3
52 = 4
"""
#Function definitions
def intSuffixer(passedTens, passedOnes):
"""Returns string with passed int value and corresponding suffix."""
intSuffix = chr(passedOnes)
if passedTens != 32:
intSuffix = (chr(passedTens) + chr(passedOnes) + "th")
elif passedOnes == 49:
intSuffix += "st"
elif passedOnes == 50:
intSuffix += "nd"
elif passedOnes == 51:
intSuffix += "rd"
elif passedOnes >= 52:
intSuffix += "th"
return(intSuffix)
def topBotFlipper(topBot):
"""When called, flips topBot from 'Top' to 'Bot' or vice-versa. This is a pretty rough method at the time. If your scoreboard
operator errantly sets outs to 3, this will flip and cause your innings to be off."""
if topBot == "TOP ":
topBot = "BOT "
elif topBot == "BOT ":
topBot = "TOP "
return(topBot)
#Set your COM Port name here:
COMPort = 'COM3'
#Import PySerial
import serial
#Open defined COM port and reset input buffer
ser = serial.Serial(COMPort, 9600)
ser.reset_input_buffer()
#Set topBot to Top by default from program start, and logic to know if flipping process has completed
topBot = "TOP "
inningFlipped = False
while True:
#read 50 bits from serial input
res = ser.read(169)
#encode characters to unicode for variables without functions
homeScore = chr(res[3]) + chr(res[4])
guestScore = chr(res[7]) + chr(res[8])
homeHits = chr(res[29]) + chr(res[30])
guestHits = chr(res[35]) + chr(res[36])
homeErrors = chr(res[32])
guestErrors = chr(res[38])
homeFirst = chr(res[85])
guestFirst = chr(res[109])
homeSecond = chr(res[87])
guestSecond = chr(res[111])
homeThird = chr(res[89])
guestThird = chr(res[113])
homeFourth = chr(res[91])
guestFourth = chr(res[115])
homeFifth = chr(res[93])
guestFifth = chr(res[117])
homeSixth = chr(res[95])
guestSixth = chr(res[119])
homeSeventh = chr(res[97])
guestSeventh = chr(res[121])
homeEighth = chr(res[99])
guestEighth = chr(res[123])
homeNinth = chr(res[101])
guestNinth = chr(res[125])
homeTenth = chr(res[103])
guestTenth = chr(res[127])
ball = chr(res[48])
strike = chr(res[49])
out = chr(res[50])
#Check if Outs have progressed to 3, and call topBotFlipper to swap inning segment
if out == "3" and inningFlipped == False:
topBot = topBotFlipper(topBot)
inningFlipped = True
#Reset inningFlipped once "out" is reset to "0"
if out == "0" and inningFlipped:
inningFlipped = False
#Call functions and assign values
quarterText = topBot + intSuffixer(res[9], res[10])
#Saves formatted data to variable in CSV format.
#"EOF" exists to mark end of file - potential empty columns at end were causing readability issues in vMix
scoreboardData = (homeScore + "," + guestScore + "," + quarterText + "," + homeHits + "," + guestHits + ","
+ homeErrors + "," + guestErrors + "," + guestFirst + "," + homeFirst + "," + guestSecond + ","
+ homeSecond + "," + guestThird + "," + homeThird + "," + guestFourth + "," + homeFourth + ","
+ guestFifth + "," + homeFifth + "," + guestSixth + "," + homeSixth + "," + guestSeventh + ","
+ homeSeventh + "," + guestEighth + "," + homeEighth + "," + guestNinth + "," + homeNinth + ","
+ guestTenth + "," + homeTenth + "," + ball + "-" + strike + "," + out + " Out," + "EOF")
#create/overwrite CSV data file
scoreboardDataFile = open("BaseballDataFile.csv", "w")
#saves and closes CSV file
scoreboardDataFile.write(scoreboardData)
scoreboardDataFile.close()
#Prints data sets for debugging. Comment out to run the script silently.
print(res)
print(scoreboardData)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
14881,
1894,
32634,
46677,
9858,
13,
9078,
25,
9745,
82,
1366,
422,
257,
11775,
83,
20844,
1439,
12771,
23336,
5884,
2884,
2493,
449,
17,
284,
257,
220,
198,
35,
461,
83,
... | 2.71202 | 1,639 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from imp import load_source
from setuptools import setup
version = load_source("version", "pylemon/__init__.py")
setup(
name="pylemon",
version=version.__version__,
license="MIT",
description="python daemon to monitor specific directories and react on changes",
author="Timo Furrer",
author_email="tuxtimo@gmail.com",
maintainer="Timo Furrer",
maintainer_email="tuxtimo@gmail.com",
platforms=["Linux"],
url="http://github.com/timofurrer/pylemon",
download_url="http://github.com/timofurrer/pylemon",
install_requires=["pysingleton"],
packages=["pylemon"],
entry_points={"console_scripts": ["pylemon = pylemon.main:main"]},
package_data={"pylemon": ["*.md"]},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Other Audience",
"Natural Language :: English",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: Implementation"
]
)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
848,
1330,
3440,
62,
10459,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
9641,
796,
3440,
62,
10459,
7203,
... | 2.64486 | 535 |
#MenuTitle: Round All Kerns
# -*- coding: utf-8 -*-
__doc__="""
If there are any floating-point kern values in the font, this will round them to the nearest integer.
"""
font = Glyphs.font
# print(font.kerning)
# for each value in kerning dictionary
for key, value in font.kerning.items():
for key, value in font.kerning.items():
print(value)
if type(value) == float:
print(value) | [
2,
23381,
19160,
25,
10485,
1439,
49132,
82,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
834,
15390,
834,
2625,
15931,
198,
1532,
612,
389,
597,
12462,
12,
4122,
479,
1142,
3815,
287,
262,
10369,
11,
428,
4... | 2.660256 | 156 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'IdentityPoolCognitoIdentityProviderArgs',
'IdentityPoolRoleAttachmentRoleMappingArgs',
'IdentityPoolRoleAttachmentRoleMappingMappingRuleArgs',
'ResourceServerScopeArgs',
'UserPoolAccountRecoverySettingArgs',
'UserPoolAccountRecoverySettingRecoveryMechanismArgs',
'UserPoolAdminCreateUserConfigArgs',
'UserPoolAdminCreateUserConfigInviteMessageTemplateArgs',
'UserPoolClientAnalyticsConfigurationArgs',
'UserPoolClientTokenValidityUnitsArgs',
'UserPoolDeviceConfigurationArgs',
'UserPoolEmailConfigurationArgs',
'UserPoolLambdaConfigArgs',
'UserPoolPasswordPolicyArgs',
'UserPoolSchemaArgs',
'UserPoolSchemaNumberAttributeConstraintsArgs',
'UserPoolSchemaStringAttributeConstraintsArgs',
'UserPoolSmsConfigurationArgs',
'UserPoolSoftwareTokenMfaConfigurationArgs',
'UserPoolUserPoolAddOnsArgs',
'UserPoolUsernameConfigurationArgs',
'UserPoolVerificationMessageTemplateArgs',
]
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.855769 | 624 |
import tornado.gen
import tornado.web
from core.requesthandler import RequestHandler
import core.util
| [
11748,
33718,
13,
5235,
198,
11748,
33718,
13,
12384,
198,
198,
6738,
4755,
13,
25927,
30281,
1330,
19390,
25060,
198,
11748,
4755,
13,
22602,
198
] | 4.12 | 25 |
#monotonous increase stack
| [
2,
2144,
18970,
516,
2620,
8931,
198
] | 3.857143 | 7 |
"""Problem 24: Lexographic permutations.
Iteratively generate permutations"""
import unittest
def is_permutation(n, d):
"""Checks to see if n is a permutation of the digits 0-d."""
n = [int(i) for i in str(n)]
if len(n) < d or len(n) > d+1:
return False
elif len(n) < d+1:
n.insert(0, 0)
while n:
i = n.pop(0)
if i in n or i > d:
return False
return True
def next_perm(digits):
"""Generate next Lexographic permutation."""
i = -1
for i_ in range(1, len(digits)):
if digits[i_-1] < digits[i_]:
i = i_
if i == -1:
return False
suffix = digits[i:]
prefix = digits[:i-1]
pivot = digits[i-1]
j = 0
for j_ in range(i, len(digits)):
if digits[j_] > pivot:
j = j_
suffix[j-i] = pivot
pivot = digits[j]
new_digits = prefix + [pivot] + suffix[::-1]
return new_digits
if __name__ == "__main__":
print(solution())
unittest.main()
| [
37811,
40781,
1987,
25,
17210,
6826,
9943,
32855,
13,
198,
198,
29993,
9404,
7716,
9943,
32855,
37811,
198,
11748,
555,
715,
395,
198,
198,
4299,
318,
62,
16321,
7094,
7,
77,
11,
288,
2599,
198,
220,
220,
220,
37227,
7376,
4657,
284,
... | 2.072614 | 482 |
import os.path
from unittest import TestCase
import numpy as np
import pandas as pd
from ipycli.notebookmanager import NotebookManager
from IPython.utils.tempdir import TemporaryDirectory
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| [
11748,
28686,
13,
6978,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
20966,
88,
44506,
13,
11295,
2070,
37153,
1330,
5740,
2070,
13511,
198,
... | 1.712329 | 292 |
import ftplib
import sys
import os
from lib.FTPUpload import FTPUpload
if __name__ == '__main__':
try:
ftp_upload = FTPUpload(os.path.abspath('config/config.json'))
ftp_upload.upload()
except FileNotFoundError as error:
sys.exit(f'File `{error.filename}` does not exist.')
except ftplib.all_errors as e:
sys.exit(e) | [
11748,
10117,
489,
571,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
9195,
13,
9792,
5105,
7304,
1330,
19446,
5105,
7304,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1949,
25,
198,
220,
... | 2.337662 | 154 |
import unittest, time, sys, random, math, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_import as h2i, h2o_common
import socket
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
DELETE_KEYS_EACH_ITER = True
DO_KMEANS = True
# assumes the cloud was built with CDH3? maybe doesn't matter as long as the file is there
FROM_HDFS = 'CDH3'
DO_REAL = True
if __name__ == '__main__':
h2o.unit_main()
| [
11748,
555,
715,
395,
11,
640,
11,
25064,
11,
4738,
11,
10688,
11,
33918,
198,
17597,
13,
6978,
13,
2302,
437,
7,
17816,
2637,
4032,
492,
41707,
40720,
492,
41707,
9078,
6,
12962,
198,
11748,
289,
17,
78,
11,
289,
17,
78,
62,
2875... | 2.485149 | 202 |
from scipy.optimize import fsolve
# apt-get install python-scipy
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# REFLEX TIMING
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# | R0u R0d R1u R1d T
# | +-----------+ +---------------------------------+ #
# | | | | | #
# --+---+-----------+---/ /---+---------------------------------+-----------#-----> t
# 0 <---dR0---> <-----------------DR1-----------> <---dr---->#
# #
#
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# VALVE TIMING
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# | G1u G1d G2u G2d T
# | +------+ +------+ TG1 TG2 #
# | | | | | #
# --+-------------------------+------+----+------+------------X------X------#-----> t
# 0 <--------------dG--------------> <----dt1---->#
# <-dVg--> <-dVg--> <-dt2->#
#
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Drop vars
t5 = -50 # t5 | __
t0 = 0 # \ | / \
t1 = 50 # \|t0 / t3 \
t2 = 100 # -----+------/------+----------------> t
t3 = 150 # |\ t1 /t2 t4
t4 = 200 # | \__/
# Total duration
T = 3000
# Reflex vars
dR = 100 # delay between trigger off and actual flash
DR1 = 1000 # "long" exposuer
DR0 = 1000 # time for miror to stabilise
# Valve vars
dG = 320 # falling time of a drop (dG = sqrt((2 * 0.5m) / g(= 9.81))
dVg = 50 # valve aperture time for a drop
dt1 = t3 # state of drop G1 at shot
dt2 = t5 # state of drop G2 at shot
def reflex_eq(var):
""" reflex equations """
R0u, R0d, R1u, R1d = var[0], var[1], var[2], var[3]
eq1 = T - dR - R1d
eq2 = R1d - DR1 - R1u
eq3 = R1u - DR0 - R0u
eq4 = R0u + dR - R0d
res = [eq1, eq2, eq3, eq4]
return res
def valve_eq(var):
""" valve equation """
G1u, G1d, G2u, G2d = var[0], var[1], var[2], var[3]
eq1 = (T - dt1) - dG - G1u # (T - dt) = TG1
eq2 = (T - dt2) - dG - G2u # (T - dt) = TG2
eq3 = G1u + dVg - G1d
eq4 = G2u + dVg - G2d
res = [eq1, eq2, eq3, eq4]
return res
# Solve reflex timing equations
reflex_eq_sol = fsolve(reflex_eq, [0, 0, 0, 0])
# Solve valve timing equations
valve_eq_sol = fsolve(valve_eq, [0, 0, 0, 0])
print "Reflex timing :"
print reflex_eq_sol
print "Valve timing :"
print valve_eq_sol
# make the ordered timetable
timetable = []
state_reflex = True
state_valve = True
i = 0
j = 0
R = 23
V = 24
while True:
if i == len(reflex_eq_sol) and j == len(valve_eq_sol):
break
elif j == len(valve_eq_sol):
timetable.append((reflex_eq_sol[i], R, state_reflex))
state_reflex = not state_reflex
i += 1
elif i == len(reflex_eq_sol):
timetable.append((valve_eq_sol[j], V, state_valve))
state_valve = not state_valve
j += 1
elif reflex_eq_sol[i] < valve_eq_sol[j]:
timetable.append((reflex_eq_sol[i], R, state_reflex))
state_reflex = not state_reflex
i += 1
else:
timetable.append((valve_eq_sol[j], V, state_valve))
state_valve = not state_valve
j += 1
print timetable
import datetime
import time as time
current_milli_time = lambda: int(round(time.time() * 1000))
log_t = current_milli_time()
t = 0
tt = 0
while True:
if len(timetable) == 0:
break
dt, pin, state = timetable[0]
print (current_milli_time() - log_t), tt, dt, pin, state
time.sleep((dt-t)/1000.0)
tt += dt - t
t = dt
timetable.pop(0)
print 'done'
| [
6738,
629,
541,
88,
13,
40085,
1096,
1330,
43458,
6442,
201,
198,
2,
15409,
12,
1136,
2721,
21015,
12,
1416,
541,
88,
201,
198,
201,
198,
2,
43661,
43661,
43661,
43661,
43661,
43661,
43661,
43661,
43661,
43661,
24022,
25,
201,
198,
2,... | 1.879068 | 2,274 |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
628,
198,
2,
13610,
534,
4981,
994,
13,
628
] | 3.551724 | 29 |
from funlib.show.neuroglancer import add_layer, ScalePyramid
import argparse
import daisy
import glob
import neuroglancer
import numpy as np
import os
import webbrowser
from swc_parser import _parse_swc
from pathlib import Path
import itertools
import random
import logging
ngid = itertools.count(start=1)
parser = argparse.ArgumentParser()
parser.add_argument(
"--file", "-f", type=str, action="append", help="The path to the container to show"
)
parser.add_argument(
"--datasets",
"-d",
type=str,
nargs="+",
action="append",
help="The datasets in the container to show",
)
parser.add_argument(
"--synapses",
"-s",
type=str,
action="append",
help="A numpy npz containing synapse annotations as stored by "
"synful.gunpowder.ExtractSynapses",
)
parser.add_argument(
"--time",
"-t",
type=int,
action="store",
dest="minutes",
default=0,
help="How long you want neuroglancer to stay available",
)
parser.add_argument(
"--output",
"-o",
type=str,
action="store",
dest="log",
default="",
help="Where to output url to",
)
args = parser.parse_args()
print("passed in arguments: {}".format(args))
minutes = args.minutes
print("showing neuroglancer for {} minutes".format(minutes))
if args.log != "":
logging.basicConfig(level=logging.INFO, filename=args.log)
else:
logging.basicConfig(level=logging.INFO)
neuroglancer.set_server_bind_address("0.0.0.0")
viewer = neuroglancer.Viewer()
swc_path = Path(
"/nrs/funke/mouselight-v2/2017-07-02",
"consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc",
)
swc_path = Path(
"/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc"
)
n5_path = Path(
"/nrs/funke/mouselight-v2/2018-07-02",
"consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/",
)
transform = Path("/nrs/mouselight/SAMPLES/2018-07-02/transform.txt")
# swc
neuron_graph = _parse_swc(swc_path)
origin, spacing = load_transform(transform)
voxel_size = spacing
voxel_size_rounded = np.array((10, 3, 3)[::-1])
nodes = []
edges = []
print(len(neuron_graph.nodes))
for node_a, node_b in neuron_graph.edges:
a = swc_to_voxel_coords(neuron_graph.nodes[node_a]["location"], origin, spacing)
b = swc_to_voxel_coords(neuron_graph.nodes[node_b]["location"], origin, spacing)
pos_u = a
pos_v = b
nodes.append(
neuroglancer.EllipsoidAnnotation(
center=pos_u, radii=(3, 3, 3) / voxel_size, id=next(ngid)
)
)
edges.append(
neuroglancer.LineAnnotation(point_a=pos_u, point_b=pos_v, id=next(ngid))
)
if len(nodes) > 10000:
break
nodes.append(
neuroglancer.EllipsoidAnnotation(
center=pos_v, radii=(1, 1, 1) / voxel_size, id=next(ngid)
)
)
a = daisy.open_ds(str(n5_path.absolute()), "volume")
with viewer.txn() as s:
add_layer(s, a, "volume", shader="rgb", c=[0, 0, 0])
with viewer.txn() as s:
s.layers["edges"] = neuroglancer.AnnotationLayer(
filter_by_segmentation=False, annotation_color="#add8e6", annotations=edges
)
s.layers["nodes"] = neuroglancer.AnnotationLayer(
filter_by_segmentation=False, annotation_color="#ff00ff", annotations=nodes
)
url = str(viewer)
logging.info(url)
import time
time.sleep(60 * minutes)
try:
if minutes < 1:
input("Press ENTER to exit:")
except:
pass
| [
6738,
1257,
8019,
13,
12860,
13,
710,
1434,
4743,
8250,
1330,
751,
62,
29289,
11,
21589,
20519,
20255,
198,
11748,
1822,
29572,
198,
11748,
12379,
13560,
198,
11748,
15095,
198,
11748,
7669,
4743,
8250,
198,
11748,
299,
32152,
355,
45941,... | 2.38092 | 1,457 |
from .mpi import MPINonRootEventLoop, MPIRootEventLoop
__all__ = [
"MPINonRootEventLoop",
"MPIRootEventLoop",
]
| [
6738,
764,
3149,
72,
1330,
4904,
1268,
261,
30016,
9237,
39516,
11,
4904,
4663,
1025,
9237,
39516,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
7378,
1268,
261,
30016,
9237,
39516,
1600,
198,
220,
220,
220,
366,
7378,
... | 2.42 | 50 |
__author__ = "James Clark"
__copyright__ = "Copyright 2020, F.R.A.M.E Project"
__credits__ = ["James Clark"]
__version__ = "1.0"
# Import in necessary libraries
import threading
import time
import sqlForGui
import gui
from datetime import datetime, date
current_time_and_date = ''
# Function that runs the system timer
# When the system timer reaches 15, updates the SQL database with late and attended SQL queries
# Also calls the createAttendanceList function, takes the current module code and class time and date.
# Initializes isLate boolean.
isLate = False
# lateTimer that activates when late_timer reaches
# Function that gets the current time and date.
# Calls the getClassDate sql statement to check if the date/time matches a class.
# @params, current time and date, and the room number selected on start up. Default = Room_001
# Initializes global variable
classCheckOver = True
# Class check function that checks if a class is found.
# Get the current time and date, and recursively calls itself again.
# Function that starts both the class thread and the late thread.
# @param class_length this is the class length for the class found on the database.
| [
834,
9800,
834,
796,
366,
14731,
11264,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
12131,
11,
376,
13,
49,
13,
32,
13,
44,
13,
36,
4935,
1,
198,
834,
66,
20696,
834,
796,
14631,
14731,
11264,
8973,
198,
834,
9641,
834,
796,
... | 3.878689 | 305 |
################################################################################
# _ ____ ___ _____ _ _ _ _ #
# / \ / ___|_ _| |_ _|__ ___ | | | _(_) |_ #
# / _ \| | | | | |/ _ \ / _ \| | |/ / | __| #
# / ___ \ |___ | | | | (_) | (_) | | <| | |_ #
# ____ /_/ \_\____|___|___|_|\___/ \___/|_|_|\_\_|\__| #
# / ___|___ __| | ___ / ___| __ _ _ __ ___ _ __ | | ___ ___ #
# | | / _ \ / _` |/ _ \ \___ \ / _` | '_ ` _ \| '_ \| |/ _ \/ __| #
# | |__| (_) | (_| | __/ ___) | (_| | | | | | | |_) | | __/\__ \ #
# \____\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|\___||___/ #
# |_| #
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Simple demonstration application configuring a basic ACI fabric
"""
from acitoolkit.acisession import Session
from acitoolkit.acitoolkit import Credentials, Tenant, AppProfile, EPG
from acitoolkit.acitoolkit import Context, BridgeDomain, Contract, FilterEntry
def main():
""" Create 2 EPGs within the same Context and have
1 EPG provide a contract to the other EPG.
"""
description = ('Create 2 EPGs within the same Context and have'
'1 EPG provide a contract to the other EPG.')
creds = Credentials('apic', description)
args = creds.get()
# Create the Tenant
tenant = Tenant('aci-toolkit-demo')
# Create the Application Profile
app = AppProfile('my-demo-app', tenant)
# Create the EPGs
web_epg = EPG('web-frontend', app)
db_epg = EPG('database-backend', app)
# Create a Context and BridgeDomain
# Place both EPGs in the Context and in the same BD
context = Context('VRF-1', tenant)
bd = BridgeDomain('BD-1', tenant)
bd.add_context(context)
web_epg.add_bd(bd)
db_epg.add_bd(bd)
# Define a contract with a single entry
contract = Contract('mysql-contract', tenant)
entry1 = FilterEntry('entry1',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='3306',
dToPort='3306',
etherT='ip',
prot='tcp',
sFromPort='1',
sToPort='65535',
tcpRules='unspecified',
parent=contract)
# Provide the contract from 1 EPG and consume from the other
db_epg.provide(contract)
web_epg.consume(contract)
# Login to APIC and push the config
session = Session(args.url, args.login, args.password)
session.login()
# Cleanup (uncomment the next line to delete the config)
# tenant.mark_as_deleted()
resp = tenant.push_to_apic(session)
if resp.ok:
# Print what was sent
print('Pushed the following JSON to the APIC')
print('URL: ' + str(tenant.get_url()))
print('JSON: ' + str(tenant.get_json()))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| [
29113,
29113,
14468,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4808,
220,
220,
220,
220,
1427,
46444,
220,
220,
220,
29343,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4808,
4808... | 1.897843 | 2,457 |
import logging
from pure_aionsq.settings import loggerName
_logger = logging.getLogger(loggerName)
_logger.setLevel(logging.DEBUG)
logger = _logger
| [
11748,
18931,
198,
198,
6738,
5899,
62,
64,
507,
80,
13,
33692,
1330,
49706,
5376,
628,
198,
62,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
6404,
1362,
5376,
8,
198,
62,
6404,
1362,
13,
2617,
4971,
7,
6404,
2667,
13,
30531,
... | 2.903846 | 52 |
import json
import os
from time import sleep
import paho.mqtt.client as mqtt
from nhc2_coco.const import MQTT_TOPIC_PUBLIC_AUTH_RSP, MQTT_PROTOCOL, MQTT_TRANSPORT, MQTT_TOPIC_PUBLIC_AUTH_CMD
class CoCoProfiles:
"""CoCoDiscover will collect a list of profiles on a NHC2
"""
| [
11748,
33918,
198,
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
198,
6738,
299,
71,
66,
17,
62,
66,
25634,
13,
9979,
1330,
337,
48,
15751,
62,
35222,
21... | 2.375 | 120 |
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A programming puzzle from Einav in OR-tools CP-SAT Solver.
From
'A programming puzzle from Einav'
http://gcanyon.wordpress.com/2009/10/28/a-programming-puzzle-from-einav/
'''
My friend Einav gave me this programming puzzle to work on. Given
this array of positive and negative numbers:
33 30 -10 -6 18 7 -11 -23 6
...
-25 4 16 30 33 -23 -4 4 -23
You can flip the sign of entire rows and columns, as many of them
as you like. The goal is to make all the rows and columns sum to positive
numbers (or zero), and then to find the solution (there are more than one)
that has the smallest overall sum. So for example, for this array:
33 30 -10
-16 19 9
-17 -12 -14
You could flip the sign for the bottom row to get this array:
33 30 -10
-16 19 9
17 12 14
Now all the rows and columns have positive sums, and the overall total is
108.
But you could instead flip the second and third columns, and the second
row, to get this array:
33 -30 10
16 19 9
-17 12 14
All the rows and columns still total positive, and the overall sum is just
66. So this solution is better (I don't know if it's the best)
A pure brute force solution would have to try over 30 billion solutions.
I wrote code to solve this in J. I'll post that separately.
'''
This is a port of my old CP model einav_puzzle.py
After a bit of tweaking with the exclusion of 0 in the domain
of row_signs and col_signs this model now work. See comments below.
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
if __name__ == '__main__':
main()
| [
2,
15069,
33448,
24734,
272,
509,
73,
12368,
2536,
392,
387,
74,
962,
31,
14816,
13,
785,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.269799 | 745 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A file-like object that decrypts the data it reads.
It reads the ciphertext from a given other file-like object, and decrypts it.
"""
import io
from typing import BinaryIO
from tink import core
from tink.cc.pybind import tink_bindings
from tink.streaming_aead import _file_object_adapter
class RawDecryptingStream(io.RawIOBase):
"""A file-like object which decrypts reads from an underlying object.
It reads the ciphertext from the wrapped file-like object, and decrypts it.
"""
def __init__(self, stream_aead: tink_bindings.StreamingAead,
ciphertext_source: BinaryIO, associated_data: bytes, *,
close_ciphertext_source: bool):
"""Create a new RawDecryptingStream.
Args:
stream_aead: C++ StreamingAead primitive from which a C++ DecryptingStream
will be obtained.
ciphertext_source: A readable file-like object from which ciphertext bytes
will be read.
associated_data: The associated data to use for decryption.
close_ciphertext_source: Whether ciphertext_source should be closed when
close() is called.
"""
super().__init__()
self._ciphertext_source = ciphertext_source
self._close_ciphertext_source = close_ciphertext_source
if not ciphertext_source.readable():
raise ValueError('ciphertext_source must be readable')
cc_ciphertext_source = _file_object_adapter.FileObjectAdapter(
ciphertext_source)
self._input_stream_adapter = self._get_input_stream_adapter(
stream_aead, associated_data, cc_ciphertext_source)
@staticmethod
@core.use_tink_errors
def _get_input_stream_adapter(cc_primitive, aad, source):
"""Implemented as a separate method to ensure correct error transform."""
return tink_bindings.new_cc_decrypting_stream(
cc_primitive, aad, source)
@core.use_tink_errors
def _read_from_input_stream_adapter(self, size: int) -> bytes:
"""Implemented as a separate method to ensure correct error transform."""
return self._input_stream_adapter.read(size)
def read(self, size=-1) -> bytes:
"""Read and return up to size bytes, where size is an int.
It blocks until at least one byte can be returned.
Args:
size: Maximum number of bytes to read. As a convenience, if size is
unspecified or -1, all bytes until EOF are returned.
Returns:
Bytes read. If b'' is returned and size was not 0, this indicates EOF.
Raises:
TinkError if there was a permanent error.
"""
if self.closed: # pylint:disable=using-constant-test
raise ValueError('read on closed file.')
if size is None:
size = -1
if size < 0:
return self.readall()
try:
# _input_stream_adapter may return an empty string when there is currently
# no data is available. In Python (in blocking mode), read is expected to
# block until some data is available.
# https://docs.python.org/3/library/io.html also mentions a
# non-blocking mode, but according to https://bugs.python.org/issue13322
# that mode is not properly implemented and not really used.
while True:
data = self._read_from_input_stream_adapter(size)
if data:
return data
except tink_bindings.PythonTinkStreamFinishedException:
return b''
def readinto(self, b: bytearray) -> int:
"""Read bytes into a pre-allocated bytes-like object b.
Args:
b: Bytes-like object to which data will be read.
Returns:
Number of bytes read. It returns 0 if EOF is reached, and None if no data
is available at the moment.
Raises:
TinkError if there was a permanent error.
"""
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def close(self) -> None:
"""Close the stream. Has no effect on a closed stream."""
if self.closed: # pylint:disable=using-constant-test
return
if self._close_ciphertext_source:
self._ciphertext_source.close()
super().close()
def readable(self) -> bool:
"""Return True if the stream can be read from."""
return True
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.910105 | 1,613 |
'''
Created on 14/9/2015
@author: victor
'''
import sys
import numpy
from nma_algo_char.data_retrieval import calculate_rmsds
if __name__ == '__main__':
current_coords = numpy.delete(numpy.loadtxt(sys.argv[1]),0,1)
proposal_coords = numpy.delete(numpy.loadtxt(sys.argv[2]),0,1)
anm_final_coords = numpy.delete(numpy.loadtxt(sys.argv[3]),0,1)
minim_final_coords = numpy.delete(numpy.loadtxt(sys.argv[4]),0,1)
executions = {
"curr_vs_prop":(current_coords, proposal_coords),
"curr_vs_anmf":(current_coords, anm_final_coords),
"curr_vs_minim":(current_coords, minim_final_coords),
"prop_vs_anmf":(proposal_coords, anm_final_coords),
"prop_vs_minim":(proposal_coords, minim_final_coords),
"anmf_vs_minim":(anm_final_coords, minim_final_coords)
}
for execution_label in executions:
print "Calculating ",execution_label
reference = executions[execution_label][0]
conf = executions[execution_label][1]
numpy.savetxt(execution_label, calculate_rmsds(reference, conf))
| [
7061,
6,
198,
41972,
319,
1478,
14,
24,
14,
4626,
198,
198,
31,
9800,
25,
2210,
273,
198,
7061,
6,
198,
11748,
25064,
198,
11748,
299,
32152,
198,
6738,
299,
2611,
62,
282,
2188,
62,
10641,
13,
7890,
62,
1186,
380,
18206,
1330,
15... | 2.017331 | 577 |
#
# @lc app=leetcode id=355 lang=python3
#
# [355] Design Twitter
#
import collections
import heapq
# @lc code=start
# Your Twitter object will be instantiated and called as such:
# obj = Twitter()
# obj.postTweet(1,5)
# param_2 = obj.getNewsFeed(1)
# obj.follow(1,2)
# obj.postTweet(2,6)
# param_2 = obj.getNewsFeed(1)
# print(param_2)
# obj.unfollow(followerId,followeeId)
# @lc code=end
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
28567,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
28567,
60,
8495,
3009,
198,
2,
198,
11748,
17268,
198,
11748,
24575,
80,
198,
2,
2488,
44601,
2438,
28,
9688,
628,
198,
... | 2.551948 | 154 |
from __future__ import absolute_import
import json
import logging
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
33918,
198,
11748,
18931,
628
] | 4.25 | 16 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python3
import json
import gzip
MAX = 1000
if __name__ == "__main__":
with gzip.open("./tests/testdata.ldj.gz", "wt") as outp:
for n in range(0, MAX):
print(json.dumps({"foo": n,
"bar": MAX-n,
"baz": "test{}".format(n)
}), file=outp)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
33918,
198,
11748,
308,
13344,
198,
198,
22921,
796,
8576,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
351,
308,
13344,
13,
9654,
... | 1.67757 | 214 |
from discord import Embed, DMChannel, File
import os
| [
6738,
36446,
1330,
13302,
276,
11,
14848,
29239,
11,
9220,
198,
11748,
28686,
628
] | 3.857143 | 14 |
import os
import urllib.request
import csv
csv_file_path = 'mathscinet.csv'
urllib.request.urlretrieve('https://mathscinet.ams.org/msnhtml/annser.csv', csv_file_path)
with open(csv_file_path, mode='r') as csv_file, open('journal_abbreviations_mathematics.txt', mode='w') as abbreviations_file:
abbreviations_file = csv.writer(abbreviations_file, delimiter='=', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'Column names are {", ".join(row)}')
line_count += 1
abbreviations_file.writerow([row["Full Title"], row["Abbrev"]])
# print(f'\t{row["Full Title"]} abbreviated {row["Abbrev"]}')
line_count += 1
print(f'Processed {line_count} lines.')
| [
11748,
28686,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
269,
21370,
198,
198,
40664,
62,
7753,
62,
6978,
796,
705,
11018,
1416,
42504,
13,
40664,
6,
198,
333,
297,
571,
13,
25927,
13,
6371,
1186,
30227,
10786,
5450,
1378,
110... | 2.37464 | 347 |