content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
length = 28
dog = np.load('stabilization_small.npy')
num_plot = 0
if num_plot == 1:
for j in range(10):
img = dog[j,:].reshape((length, length)) # for printing numbers
# img = np.random.randint(0,2,size=(length, length))
imgplot = plt.imshow(img)
plt.savefig('example' + str(j) + '.png')
else:
small_plot = np.load('stabilization_small.npy')
img = small_plot[:112,:].T
imgplot = plt.imshow(img)
plt.title('Node Stabilization for Small Weights')
plt.xlabel('Sweep Number')
plt.ylabel('Node Number')
plt.savefig('stabilization_small.png')
plt.clf()
large_plot = np.load('stabilization_large.npy')
img = large_plot[:112, :].T
imgplot = plt.imshow(img)
plt.title('Node Stabilization for Large Weights')
plt.xlabel('Sweep Number')
plt.ylabel('Node Number')
plt.savefig('stabilization_large.png') | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
9060,
355,
29034,
9600,
198,
11748,
299,
32152,
355,
45941,
198,
198,
13664,
796,
2579,
198,
198,
9703,
796,
45941,
13,
2220,
10786,
301,
14991,... | 2.337321 | 418 |
from itertools import chain
from json import load
import numpy as np
if __name__ == "__main__":
samples = load(open("samples.json"))
rdfs = list(chain.from_iterable([s["rdf"] for s in samples]))
hal = np.sum([s["hal"] for s in samples])
total = len(rdfs)
exists = len([r for s, r, o, res in rdfs if res == "yes"])
doesnt = len([r for s, r, o, res in rdfs if res == "no"])
wrong = len([r for s, r, o, res in rdfs if res == "no-lex"])
wrong_reg = len([r for s, r, o, res in rdfs if res == "no-reg"])
print([(s,r,o) for s, r, o, res in rdfs if res == "no"])
print("rdfs", total, "hallucinations", hal, "exists", exists, "doesn't", doesnt, "wrong-lex", wrong, "wrong-reg", wrong_reg)
print("verify", exists, "+", doesnt, "+", wrong, "+", wrong_reg, "=", total)
| [
6738,
340,
861,
10141,
1330,
6333,
198,
6738,
33918,
1330,
3440,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
8405,
796,
3440,
7,
9654,
7203,
82,
12629... | 2.442424 | 330 |
from django.core import management
from django.core.management.base import BaseCommand
| [
6738,
42625,
14208,
13,
7295,
1330,
4542,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
628
] | 4.190476 | 21 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-05 22:01
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
319,
2177,
12,
2998,
12,
2713,
2534,
25,
486,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,... | 2.873418 | 79 |
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.models as models
import argparse
import re
import os
from torchsummary import summary
# local imports
import model
from preprocess import mean, std, preprocess_input_function
from resnet_features import resnet18_features, resnet34_features, resnet50_features, resnet101_features, resnet152_features
from densenet_features import densenet121_features, densenet161_features, densenet169_features, densenet201_features
from vgg_features import vgg11_features, vgg11_bn_features, vgg13_features, vgg13_bn_features, vgg16_features, vgg16_bn_features,\
vgg19_features, vgg19_bn_features
from receptive_field import compute_proto_layer_rf_info_v2
if __name__ == "__main__":
# book keeping namings and code
from settings import base_architecture, img_size, prototype_shape, num_classes, \
prototype_activation_function, add_on_layers_type, experiment_run
base_architecture_type = re.match('^[a-z]*', base_architecture).group(0)
# load the data
from settings import train_dir, test_dir, train_push_dir, \
train_batch_size, test_batch_size, train_push_batch_size
normalize = transforms.Normalize(mean=mean,
std=std)
# construct the model
ppnet = model.construct_PPNet(base_architecture=base_architecture,
pretrained=True, img_size=img_size,
prototype_shape=prototype_shape,
num_classes=num_classes,
prototype_activation_function=prototype_activation_function,
add_on_layers_type=add_on_layers_type)
# if prototype_activation_function == 'linear':
# ppnet.set_last_layer_incorrect_connection(incorrect_strength=0)
ppnet_multi = torch.nn.DataParallel(ppnet)
class_specific = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
model = ppnet.to(device)
layer_filter_sizes, layer_strides, layer_paddings = model.features.conv_info()
n_out, j_out, r_out, start_out = model.proto_layer_rf_info # output size, receptive field jump of output layer, receptive field size of output layer, center of receptive field of output layer
print("ProtoPNet summary")
summary(model, (3, 224, 224))
print("base_architecture", base_architecture)
print("prototype_shape", model.prototype_shape)
print("num_prototypes", model.num_prototypes)
print("prototype_vectors", model.prototype_vectors.shape)
print("add_on_layers", model.add_on_layers)
print("img_size", model.img_size)
print("num_classes", model.num_classes)
print("prototype_activation_function", model.prototype_activation_function)
print("features", model.features)
print("prototype_shape", model.prototype_shape)
print("prototype_class_identity", model.prototype_class_identity.size())
print("last_layer.weight.data", model.last_layer.weight.data.size())
print("prototype_distances", model.prototype_distances) | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
26791,
13,
19849,
62,
89,
2238,
355,
2746,
62,
89,
2238,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
10178,
13,
7645,
23914,
35... | 2.599362 | 1,253 |
# pylint:disable=missing-class-docstring,no-self-use
import os
import unittest
import angr
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
if __name__ == "__main__":
unittest.main()
| [
2,
279,
2645,
600,
25,
40223,
28,
45688,
12,
4871,
12,
15390,
8841,
11,
3919,
12,
944,
12,
1904,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
11748,
281,
2164,
628,
198,
9288,
62,
24886,
796,
28686,
13,
6978,
13,
22179,
... | 2.535354 | 99 |
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
def _crps_tf(y_true, y_pred, factor=0.05):
'''
core of (pseudo) CRPS loss
y_true: two-dimensional arrays
y_pred: two-dimensional arrays
factor: importance of std term
'''
# mean absolute error
mae = K.mean(tf.abs(y_pred - y_true))
dist = tf.math.reduce_std(y_pred)
return mae - factor*dist
def crps2d_tf(y_true, y_pred, factor=0.05):
'''
(Experimental)
An approximated continuous ranked probability score (CRPS) loss function:
CRPS = mean_abs_err - factor * std
* Note that the "real CRPS" = mean_abs_err - mean_pairwise_abs_diff
Replacing mean pairwise absolute difference by standard deviation offers
a complexity reduction from O(N^2) to O(N*logN)
** factor > 0.1 may yield negative loss values.
Compatible with high-level Keras training methods
Input
----------
y_true: training target with shape=(batch_num, x, y, 1)
y_pred: a forward pass with shape=(batch_num, x, y, 1)
factor: relative importance of standard deviation term.
'''
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
y_pred = tf.squeeze(y_pred)
y_true = tf.squeeze(y_true)
batch_num = y_pred.shape.as_list()[0]
crps_out = 0
for i in range(batch_num):
crps_out += _crps_tf(y_true[i, ...], y_pred[i, ...], factor=factor)
return crps_out/batch_num
def _crps_np(y_true, y_pred, factor=0.05):
'''
Numpy version of _crps_tf
'''
# mean absolute error
mae = np.nanmean(np.abs(y_pred - y_true))
dist = np.nanstd(y_pred)
return mae - factor*dist
def crps2d_np(y_true, y_pred, factor=0.05):
'''
(Experimental)
Nunpy version of `crps2d_tf`.
Documentation refers to `crps2d_tf`.
'''
y_true = np.squeeze(y_true)
y_pred = np.squeeze(y_pred)
batch_num = len(y_pred)
crps_out = 0
for i in range(batch_num):
crps_out += _crps_np(y_true[i, ...], y_pred[i, ...], factor=factor)
return crps_out/batch_num
# ========================= #
# Dice loss and variants
def dice_coef(y_true, y_pred, const=K.epsilon()):
'''
Sørensen–Dice coefficient for 2-d samples.
Input
----------
y_true, y_pred: predicted outputs and targets.
const: a constant that smooths the loss gradient and reduces numerical instabilities.
'''
# flatten 2-d tensors
y_true_pos = tf.reshape(y_true, [-1])
y_pred_pos = tf.reshape(y_pred, [-1])
# get true pos (TP), false neg (FN), false pos (FP).
true_pos = tf.reduce_sum(y_true_pos * y_pred_pos)
false_neg = tf.reduce_sum(y_true_pos * (1-y_pred_pos))
false_pos = tf.reduce_sum((1-y_true_pos) * y_pred_pos)
# 2TP/(2TP+FP+FN) == 2TP/()
coef_val = (2.0 * true_pos + const)/(2.0 * true_pos + false_pos + false_neg)
return coef_val
def dice(y_true, y_pred, const=K.epsilon()):
'''
Sørensen–Dice Loss.
dice(y_true, y_pred, const=K.epsilon())
Input
----------
const: a constant that smooths the loss gradient and reduces numerical instabilities.
'''
# tf tensor casting
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
# <--- squeeze-out length-1 dimensions.
y_pred = tf.squeeze(y_pred)
y_true = tf.squeeze(y_true)
loss_val = 1 - dice_coef(y_true, y_pred, const=const)
return loss_val
# ========================= #
# Tversky loss and variants
def tversky_coef(y_true, y_pred, alpha=0.5, const=K.epsilon()):
'''
Weighted Sørensen–Dice coefficient.
Input
----------
y_true, y_pred: predicted outputs and targets.
const: a constant that smooths the loss gradient and reduces numerical instabilities.
'''
# flatten 2-d tensors
y_true_pos = tf.reshape(y_true, [-1])
y_pred_pos = tf.reshape(y_pred, [-1])
# get true pos (TP), false neg (FN), false pos (FP).
true_pos = tf.reduce_sum(y_true_pos * y_pred_pos)
false_neg = tf.reduce_sum(y_true_pos * (1-y_pred_pos))
false_pos = tf.reduce_sum((1-y_true_pos) * y_pred_pos)
# TP/(TP + a*FN + b*FP); a+b = 1
coef_val = (true_pos + const)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + const)
return coef_val
def tversky(y_true, y_pred, alpha=0.5, const=K.epsilon()):
'''
Tversky Loss.
tversky(y_true, y_pred, alpha=0.5, const=K.epsilon())
----------
Hashemi, S.R., Salehi, S.S.M., Erdogmus, D., Prabhu, S.P., Warfield, S.K. and Gholipour, A., 2018.
Tversky as a loss function for highly unbalanced image segmentation using 3d fully convolutional deep networks.
arXiv preprint arXiv:1803.11078.
Input
----------
alpha: tunable parameter within [0, 1]. Alpha handles imbalance classification cases.
const: a constant that smooths the loss gradient and reduces numerical instabilities.
'''
# tf tensor casting
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
# <--- squeeze-out length-1 dimensions.
y_pred = tf.squeeze(y_pred)
y_true = tf.squeeze(y_true)
loss_val = 1 - tversky_coef(y_true, y_pred, alpha=alpha, const=const)
return loss_val
def focal_tversky(y_true, y_pred, alpha=0.5, gamma=4/3, const=K.epsilon()):
'''
Focal Tversky Loss (FTL)
focal_tversky(y_true, y_pred, alpha=0.5, gamma=4/3)
----------
Abraham, N. and Khan, N.M., 2019, April. A novel focal tversky loss function with improved
attention u-net for lesion segmentation. In 2019 IEEE 16th International Symposium on Biomedical Imaging
(ISBI 2019) (pp. 683-687). IEEE.
----------
Input
alpha: tunable parameter within [0, 1]. Alpha handles imbalance classification cases
gamma: tunable parameter within [1, 3].
const: a constant that smooths the loss gradient and reduces numerical instabilities.
'''
# tf tensor casting
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
# <--- squeeze-out length-1 dimensions.
y_pred = tf.squeeze(y_pred)
y_true = tf.squeeze(y_true)
# (Tversky loss)**(1/gamma)
loss_val = tf.math.pow((1-tversky_coef(y_true, y_pred, alpha=alpha, const=const)), 1/gamma)
return loss_val
# ========================= #
def triplet_1d(y_true, y_pred, N, margin=5.0):
'''
(Experimental)
Semi-hard triplet loss with one-dimensional vectors of anchor, positive, and negative.
triplet_1d(y_true, y_pred, N, margin=5.0)
Input
----------
y_true: a dummy input, not used within this function. Appeared as a requirment of tf.keras.loss function format.
y_pred: a single pass of triplet training, with `shape=(batch_num, 3*embeded_vector_size)`.
i.e., `y_pred` is the ordered and concatenated anchor, positive, and negative embeddings.
N: Size (dimensions) of embedded vectors
margin: a positive number that prevents negative loss.
'''
# anchor sample pair separations.
Embd_anchor = y_pred[:, 0:N]
Embd_pos = y_pred[:, N:2*N]
Embd_neg = y_pred[:, 2*N:]
# squared distance measures
d_pos = tf.reduce_sum(tf.square(Embd_anchor - Embd_pos), 1)
d_neg = tf.reduce_sum(tf.square(Embd_anchor - Embd_neg), 1)
loss_val = tf.maximum(0., margin + d_pos - d_neg)
loss_val = tf.reduce_mean(loss_val)
return loss_val
| [
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
13,
6122,
292,
13,
1891,
437,
355,
509,
198,
198,
4299,
4808,
6098,
... | 2.217146 | 3,546 |
from .ball_query import BallQuery
from .frustum import FrustumPointNetLoss
from .loss import KLLoss
from .pointnet import PointNetAModule, PointNetSAModule, PointNetFPModule
from .pvconv import PVConv
from .se import SE3d
from .shared_mlp import SharedMLP
from .voxelization import Voxelization
| [
6738,
764,
1894,
62,
22766,
1330,
6932,
20746,
198,
6738,
764,
8310,
436,
388,
1330,
1305,
436,
388,
12727,
7934,
43,
793,
198,
6738,
764,
22462,
1330,
509,
3069,
793,
198,
6738,
764,
4122,
3262,
1330,
6252,
7934,
2390,
375,
2261,
11,... | 3.172043 | 93 |
import gzip
import pickle
import os
if __name__ == '__main__':
# Load sample
with gzip.open("E:\\ECG_Data\\icentia11k\\00000_batched.pkl.gz", "rb") as file:
data = pickle.load(file)
print(data.shape)
# Load label
with gzip.open("E:\\ECG_Data\\icentia11k\\00000_batched_lbls.pkl.gz", "rb") as file:
lbls = pickle.load(file)
print(lbls)
classes = []
for index, file in enumerate(os.listdir("E:\\ECG_Data\\icentia11k")):
print(index)
if "lbls" in file:
with gzip.open(os.path.join("E:\\ECG_Data\\icentia11k", file), "rb") as file:
lbls = pickle.load(file)
for lbl in lbls:
classes.append(sum([label.shape[0] != 0 for label in lbl["rtype"]]))
if classes[-1] >= 3:
print("Stop")
if index > 1000:
exit(22) | [
11748,
308,
13344,
198,
11748,
2298,
293,
198,
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
8778,
6291,
198,
220,
220,
220,
351,
308,
13344,
13,
9654,
7203,
36,
25,
6852,
... | 1.942478 | 452 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import Tio
import asyncio
import unittest
site = Tio.Tio()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
309,
952,
198,
11748,
30351,
952,
198,
11748,
555,
715,
395,
198,
198,
15654,
796,
309,
952,
13,
51,
952,... | 2.276596 | 47 |
data2=pd.read_csv("Raw/pndictionary.csv",error_bad_lines=False,header=None)
data2=data2.drop([0,3,4,5],axis=1)
print(data2.head())
data2=data2.drop_duplicates(subset=[1],keep='first')
data2=data2.reset_index(drop=True)
l=[]
for i in range(len(data2)):
s=data2[2][i]
if s==' `value`' or s==" 'GN ara[times]'":
l.append("NULL")
continue
s=re.sub('\'',"",s)
s=re.sub(" ","",s)
s=s.split('|')
k=list()
for j in s:
if len(j)==2:
k.append(j)
s="|".join(k)
l.append(s)
data2[3]=l
data2=data2[data2[3]!="NULL"]
data2=data2.reset_index(drop=True)
data2=data2.drop([2],axis=1)
data2.rename(columns = {1:'Text',3:'NER'}, inplace = True)
data2.to_csv("pndictioanry_processed.csv") | [
7890,
17,
28,
30094,
13,
961,
62,
40664,
7203,
27369,
14,
79,
358,
14188,
13,
40664,
1600,
18224,
62,
14774,
62,
6615,
28,
25101,
11,
25677,
28,
14202,
8,
198,
7890,
17,
28,
7890,
17,
13,
14781,
26933,
15,
11,
18,
11,
19,
11,
20... | 1.910486 | 391 |
#!/usr/bin/env python3
import argparse
import xml.etree.ElementTree as ET
from dumb_round import dumb_round
VESTIGIAL_ATTRS = {
'Walker': ['fill', 'frame', 'r'],
'Sphere': ['r'],
'Hologram': ['r'],
'Solid': ['r'],
'WallSolid': ['angle', 'cx', 'cz', 'r'],
'FreeSolid': ['r'],
'Dome': [],
'Ramp': ['cx', 'cz', 'r'],
'TriPyramid': ['r'],
'Door': ['r'],
'Door2': ['r'],
'WallDoor': ['angle', 'cx', 'cz', 'r'],
'Area': ['fill', 'frame', 'angle'],
'Text': ['fill', 'frame', 'angle'],
'Field': ['r'],
'Goody': ['r'],
'Switch': ['r'],
'Guard': ['r'],
'GroundColor': ['frame', 'angle', 'cx', 'cz', 'r'],
'SkyColor': ['angle', 'cx', 'cz', 'r'],
'Incarnator': ['fill', 'frame', 'r'],
'Teleporter': ['r'],
'Pill': ['r'],
'Ball': ['r'],
'Goal': ['r'],
'Mine': ['r'],
'Parasite': ['r'],
'Ufo': ['r'],
'Sound': ['fill', 'frame', 'r'],
'Timer': ['fill', 'frame', 'angle', 'cx', 'cz', 'r'],
'Delay': ['fill', 'frame', 'angle', 'cx', 'cz', 'r'],
'Counter': ['fill', 'frame', 'angle', 'cx', 'cz', 'r'],
'And': ['fill', 'frame', 'angle', 'cx', 'cz', 'r'],
'Distributor': ['fill', 'frame', 'angle', 'cx', 'cz', 'r'],
'Base': ['fill', 'frame', 'angle', 'r'],
'YonBox': ['fill', 'frame'],
'YonSphere': ['fill', 'frame'],
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=open)
args = parser.parse_args()
last_wallHeight = 3 # This is the default value in rsrc/default.avarascript
did_output_wallHeight_zero = False
to_remove = []
root = ET.fromstring(args.file.read())
for child in root:
if child.tag == 'set':
if 'wallHeight' in child.attrib:
last_wallHeight = float(child.attrib['wallHeight'])
if last_wallHeight != 0 or did_output_wallHeight_zero:
if len(child.attrib) == 1:
to_remove.append(child)
else:
del child.attrib['wallHeight']
else:
did_output_wallHeight_zero = True
elif child.tag in ['Wall', 'WallSolid', 'FreeSolid', 'WallDoor', 'Field']:
if 'h' in child.attrib and child.attrib['h'] == '0':
child.attrib['h'] = dumb_round(last_wallHeight)
if child.tag in VESTIGIAL_ATTRS.keys():
child.attrib = {k: v for k, v in child.attrib.items()
if k not in VESTIGIAL_ATTRS[child.tag]}
for child in to_remove:
root.remove(child)
print(ET.tostring(root, encoding="unicode"))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
628,
198,
6738,
13526,
62,
744,
1330,
13526,
62,
744,
628,
198,
53,
6465,
3528,
12576,
62,
... | 2.04528 | 1,303 |
import re
import json
import string
import urllib.request
| [
11748,
302,
198,
11748,
33918,
198,
11748,
4731,
198,
11748,
2956,
297,
571,
13,
25927,
198
] | 3.625 | 16 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNibabel(PythonPackage):
"""Access a multitude of neuroimaging data formats"""
homepage = "https://nipy.org/nibabel"
pypi = "nibabel/nibabel-3.2.1.tar.gz"
version('3.2.1', sha256='4d2ff9426b740011a1c916b54fc25da9348282e727eaa2ea163f42e00f1fc29e')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools@30.3.0:', type=('build', 'run'))
depends_on('py-numpy@1.14:', type='run')
depends_on('py-packaging@14.3:', type='run')
| [
2,
15069,
2211,
12,
1238,
2481,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.412969 | 293 |
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
from launch import LaunchDescription
from launch_ros.actions import Node
| [
29113,
29113,
14468,
2,
198,
2,
220,
220,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.094703 | 623 |
from pybrain.rl.learners.directsearch.__init__ import *
from pybrain.rl.learners.valuebased.__init__ import *
from pybrain.rl.learners.modelbased.__init__ import *
| [
6738,
12972,
27825,
13,
45895,
13,
35720,
364,
13,
12942,
12947,
13,
834,
15003,
834,
1330,
1635,
198,
6738,
12972,
27825,
13,
45895,
13,
35720,
364,
13,
8367,
3106,
13,
834,
15003,
834,
1330,
1635,
198,
6738,
12972,
27825,
13,
45895,
... | 3.037037 | 54 |
from ..entities import entity_post_move, entity_rotated
from ..services import map
| [
6738,
11485,
298,
871,
1330,
9312,
62,
7353,
62,
21084,
11,
9312,
62,
10599,
515,
201,
198,
6738,
11485,
30416,
1330,
3975,
201
] | 3.652174 | 23 |
#!/usr/bin/env python3
"""
Writes a lexicon to disk with the relevant counts of each word.
Use with e.g. "find . -type f -iname "*.txt" -exec ./write_token_counts.py {} +"
"""
__author__ = "Todd Shore <errantlinguist+github@gmail.com>"
__copyright__ = "Copyright (C) 2018 Todd Shore"
__license__ = "Apache License, Version 2.0"
import argparse
import csv
import sys
from typing import MutableMapping
import nltk
if __name__ == "__main__":
__main(__create_argparser().parse_args())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
20257,
274,
257,
31191,
4749,
284,
11898,
351,
262,
5981,
9853,
286,
1123,
1573,
13,
198,
198,
11041,
351,
304,
13,
70,
13,
366,
19796,
764,
532,
4906,
277,
532,... | 2.843931 | 173 |
#!/usr/bin/env python
#
# Builds archives for SDK releases.
#
# Usage:
# python tools/sdk.py <version>
#
import os
import shutil
import sys
import tempfile
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
10934,
82,
22415,
329,
26144,
10050,
13,
198,
2,
198,
2,
29566,
25,
198,
2,
21015,
4899,
14,
21282,
74,
13,
9078,
1279,
9641,
29,
198,
2,
198,
198,
11748,
28686,
198,
... | 2.69863 | 73 |
import sys
try:
from setuptools import setup
except ImportError:
sys.exit('ERROR: setuptools is required.\nTry using "pip install setuptools".')
# Use README.rst for the long description
with open('README.rst') as fh:
long_description = fh.read()
def get_package_version(verfile):
'''Scan the script for the version string'''
version = None
with open(verfile) as fh:
try:
version = [line.split('=')[1].strip().strip("'") for line in fh if \
line.startswith('__version__')][0]
except IndexError:
pass
return version
version = get_package_version('symbolator.py')
if version is None:
raise RuntimeError('Unable to find version string in file: {0}'.format(version_file))
setup(name='symbolator',
version=version,
author='Kevin Thibedeau',
author_email='kevin.thibedeau@gmail.com',
url='http://kevinpt.github.io/symbolator',
download_url='http://kevinpt.github.io/symbolator',
description='HDL symbol generator',
long_description=long_description,
platforms = ['Any'],
install_requires = ['hdlparse>=1.0.4'],
packages = ['nucanvas', 'nucanvas/color', 'symbolator_sphinx'],
py_modules = ['symbolator'],
entry_points = {
'console_scripts': ['symbolator = symbolator:main']
},
include_package_data = True,
use_2to3 = False,
keywords='HDL symbol',
license='MIT',
classifiers=['Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Documentation',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License'
]
)
| [
198,
11748,
25064,
198,
198,
28311,
25,
198,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
197,
17597,
13,
37023,
10786,
24908,
25,
900,
37623,
10141,
318,
2672,
13,
59,
77,
23433,
1262,
366,
79,
541,
272... | 2.628447 | 689 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 13:01:33 2016
this is a low-cost way to compute a decent solution to the traveling salesman
problem using the christofides algorythm. It guarentees approximately 1.5 the global minimum
@author: Landen Blackburn
"""
import scipy.io as scipy
import numpy as np
from munkres import Munkres
from datetime import datetime
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D #don't listen to the compiler. This library is necessary for 3d graphing
#from methods import distance3D
import dubins
# fig=plt.figure()
# ax=fig.gca(projection='3d')
# ax.plot(xValues, yValues, thetaValues)
# print("Generating Pathway Plot")
# #plt.plot(xValues, yValues, thetaValues)
# plt.show()
#finds minimum spanning tree for point set
#finds minimum perfect matching set given minimum spanning tree and the redundant distance matrix
#remove duplicates by sorting each row, then sorting by column, then taking the even verticies
#this is the driving method that uses findCircuitRecursive to find Euler tour
#note:this method heavily modifies combinedPathwaySetCopy. Send a copy if you want the original preserved
#this is for testing the method. The actual method accepts an initial pointset with each row being an x,y,z value for a waypoint
#to test, simply select an amount of wayPoints and a random set will be generated
#wayPoints=101
#np.random.seed(seed=5)
#equalityBuster=np.array(range(wayPoints))/wayPoints
#initialPoints=np.vstack((np.random.randint(50, size=wayPoints),np.random.randint(50, size=wayPoints),equalityBuster+np.random.randint(10, size=wayPoints)))
#initialPoints=initialPoints.transpose()
#solution=Christofides(initialPoints)
##solution.finalDistance
##solution.finalPointSet
##solution.finalSolution
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2365,
220,
767,
1511,
25,
486,
25,
2091,
1584,
198,
198,
5661,
318,
257,
1877,
12,
15805,
835,
284,
24061,
257,
7709,
4610,
284,
262,
11... | 3.141156 | 588 |
# Your OrderedStream object will be instantiated and called as such:
# obj = OrderedStream(n)
# param_1 = obj.insert(id,value)
| [
198,
198,
2,
3406,
14230,
1068,
12124,
2134,
481,
307,
9113,
12931,
290,
1444,
355,
884,
25,
198,
2,
26181,
796,
14230,
1068,
12124,
7,
77,
8,
198,
2,
5772,
62,
16,
796,
26181,
13,
28463,
7,
312,
11,
8367,
8,
198
] | 3.071429 | 42 |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: Omar Sandoval <osandov@osandov.com>
# SPDX-License-Identifier: MIT
import argparse
import os
import os.path
import shutil
import subprocess
import sys
CHUNK_SIZE = 1024 * 1024 * 1024 # 1 GB
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
30628,
55,
12,
8979,
15269,
8206,
25,
24980,
3837,
8325,
1279,
418,
392,
709,
31,
418,
392,
709,
13,
785,
29,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
... | 2.716981 | 106 |
import copy
import math
import os
import random
import time
from functools import wraps
import numpy as np
import torch
from gobigger.agents import BotAgent
from pygame import Vector2
def match_clones(last_clones, cur_clones, avg_move_x, avg_move_y):
"""
匹配同一玩家的所有clones
:param avg_move_y:
:param avg_move_x:
:param last_clones:
:param cur_clones:
:return:
"""
last_clones = last_clones.reshape((-1, last_clones.shape[-1]))
cur_clones = cur_clones.reshape((-1, cur_clones.shape[-1]))
assert last_clones.shape[0] <= 16 and cur_clones.shape[0] <= 16
# 按r排序
last_clones = sort_clone_by_r(last_clones, 2)
cur_clones = sort_clone_by_r(cur_clones, 2)
n_last = last_clones.shape[0]
n_cur = cur_clones.shape[0]
# x, y, r, team, player, last_idx, cur_idx
last_clones_plus = np.c_[last_clones, np.zeros(n_last), np.zeros(n_last)]
cur_clones_plus = np.c_[cur_clones, np.zeros(n_cur), np.zeros(n_cur)]
last_clones_plus[:, -2] = np.arange(0, n_last)
cur_clones_plus[:, -1] = np.arange(0, n_cur)
last_clones_plus[:, -1] = -1
cur_clones_plus[:, -2] = -1
adj_cur_clones = cur_clones.copy()
adj_cur_clones[:, 0] -= avg_move_x
adj_cur_clones[:, 1] -= avg_move_y
if n_cur == n_last:
if n_cur == 1:
last_clones_plus[0, -1] = 0
cur_clones_plus[0, -2] = 0
else:
for cur_idx in range(n_cur):
cl = adj_cur_clones[cur_idx]
dis = get_dis(cl[0], cl[1], last_clones_plus[:, 0], last_clones_plus[:, 1])
# sus_dis = dis[dis < (500 / 13) * 0.2 * 2]
# if sus_dis.shape[0] == 0:
# print("Error:n_last == n_cur Match No suspect cl")
# continue
# 按距离排序
idx = np.argmin(dis)
last_idx = int(last_clones_plus[idx, -2])
last_clones_plus[last_idx, -1] = cur_idx
cur_clones_plus[cur_idx, -2] = last_idx
elif n_cur > n_last:
return last_clones_plus, cur_clones_plus
else: # n_cur < n_last
for cur_idx in range(n_cur):
cl = adj_cur_clones[cur_idx]
dis = get_dis(cl[0], cl[1], last_clones_plus[:, 0], last_clones_plus[:, 1])
if dis.shape[0] == 0:
print("Error:n_cur < n_last Match No suspect cl")
continue
# 按距离排序
idx = np.argmin(dis)
last_idx = int(last_clones_plus[idx, -2])
last_clones_plus[last_idx, -1] = cur_idx
cur_clones_plus[cur_idx, -2] = last_idx
pass
return last_clones_plus, cur_clones_plus
def dis_point_2_linear(x0, y0, A, B, C):
"""
(x0,y0) Ax + By + C = 0
:param x0:
:param y0:
:param A:
:param B:
:param C:
:return:
"""
up = np.abs(A * x0 + B * y0 + C)
down = np.sqrt(A * A + B * B) + 1e-5
return up / down
def check_split_eat_by_enemy(my_cl, enemy_cl, player_clone_np, my_clones, my_merging_clones, friend_clones,
enemy_clones, thorns, split_num=1):
'''
:param my_cl:
:param enemy_cl:
:param player_clone_np:
:param my_clones:
:param my_merging_clones:
:param friend_clones:
:param enemy_clones:
:param thorns:
:return:
True:对我的cl有威胁
False:对我的cl没威胁
'''
n_my_clones = len(my_clones)
my_cl_pos = my_cl['position']
my_cl_r = my_cl['radius']
my_cl_v = my_cl_r * my_cl_r
team = int(my_clones[0]['team'])
my_player = int(my_clones[0]['player'])
n_can_split = 0
n_can_split_twice = 0
my_cl_idx = 0
my_total_v = 0.0
for idx, b in enumerate(my_clones):
if b['radius'] > 10.:
n_can_split += 1
elif b['radius'] > 20.:
n_can_split_twice += 1
if b['position'] == my_cl_pos:
my_cl_idx = idx
my_total_v += b['radius'] * b['radius']
n_aft_split = min(16, n_my_clones + n_can_split)
n_aft_two_split = min(16, n_my_clones + n_can_split_twice)
my_cl_can_split_once = my_cl_r > 10. and n_my_clones + my_cl_idx - 1 < 16
my_cl_can_split_twice = my_cl_r > 20. and my_cl_idx == 0 and n_aft_split + my_cl_idx - 1 < 16
enemy_cl_pos = enemy_cl['position']
enemy_cl_r = enemy_cl['radius']
enemy_cl_v = enemy_cl_r * enemy_cl_r
enemy_cl_name = int(enemy_cl['player'])
enemy_cl_team = int(enemy_cl['team'])
enemy_player_clone = player_clone_np[enemy_cl_name]
n_enemy_cl = enemy_player_clone.shape[0]
enemy_idx = 0
for j, temp_cl in enumerate(enemy_player_clone):
if int(enemy_cl_pos[0]) == int(temp_cl[0]) and int(enemy_cl_pos[1]) == int(temp_cl[1]):
enemy_idx = j
break
n_aft_split = min(16, n_my_clones + n_can_split)
my_to_enemy_dis = (my_cl_pos - enemy_cl_pos).length()
if my_to_enemy_dis == 0.0:
direction = Vector2(0.1, 0.1)
else:
direction = (my_cl_pos - enemy_cl_pos).normalize()
# collide bug
if my_to_enemy_dis < my_cl_r:
return True
# can split
split_danger = False
# eat other ball by the way
fake_ball_v_1 = enemy_cl_v / 2
fake_ball_r_1 = math.sqrt(fake_ball_v_1)
fake_ball_x_1 = enemy_cl_pos.x + direction.x * 2 * fake_ball_r_1
fake_ball_y_1 = enemy_cl_pos.y + direction.y * 2 * fake_ball_r_1
fake_ball_x_1 = np.clip(fake_ball_x_1, fake_ball_r_1, 1000. - fake_ball_r_1)
fake_ball_y_1 = np.clip(fake_ball_y_1, fake_ball_r_1, 1000. - fake_ball_r_1)
# danger of collide my cl
if my_clones:
for b in my_clones:
fake_dis = math.sqrt(math.pow(fake_ball_x_1 - b['position'].x, 2) + math.pow(
fake_ball_y_1 - b['position'].y, 2))
b_r = b['radius']
# collide and be eat
if fake_dis < b_r and fake_ball_r_1 < b_r:
# eat by other
split_danger = True
break
# danger of collide friend cl
if friend_clones and (not split_danger):
for b in friend_clones:
fake_dis = math.sqrt(math.pow(fake_ball_x_1 - b['position'].x, 2) + math.pow(
fake_ball_y_1 - b['position'].y, 2))
b_r = b['radius']
# collide and be eat
if fake_dis < b_r and fake_ball_r_1 < b_r:
# eat by other
split_danger = True
break
if split_danger:
return False
# safe collide
if not split_danger:
# eat friend
if friend_clones:
for b in friend_clones:
fake_dis = math.sqrt(math.pow(fake_ball_x_1 - b['position'].x, 2) + math.pow(
fake_ball_y_1 - b['position'].y, 2))
b_r = b['radius']
b_v = b_r * b_r
# collide and be eat
fake_ball_r_1 = math.sqrt(fake_ball_v_1)
if fake_dis < fake_ball_r_1 and fake_ball_r_1 > b_r:
fake_ball_v_1 += b_v
# eat my cl
if my_clones:
for b in my_clones:
fake_dis = math.sqrt(math.pow(fake_ball_x_1 - b['position'].x, 2) + math.pow(
fake_ball_y_1 - b['position'].y, 2))
b_r = b['radius']
b_v = b_r * b_r
# collide and eat enemy
fake_ball_r_1 = math.sqrt(fake_ball_v_1)
if fake_ball_r_1 > b_r and fake_dis < fake_ball_r_1:
fake_ball_v_1 += b_v
# eat thorns
if thorns:
temp_n = copy.deepcopy(n_aft_split)
for th in thorns:
th_r = th['radius']
th_v = th_r * th_r
if th_v > fake_ball_v_1:
continue
to_th_dis = math.sqrt(math.pow(fake_ball_x_1 - th['position'].x, 2) + math.pow(
fake_ball_y_1 - th['position'].y, 2))
if to_th_dis < math.sqrt(fake_ball_v_1):
split_n = min(16 - temp_n, 10)
if split_n > 0:
merge_v = fake_ball_v_1 + th_r * th_r
split_r = min(math.sqrt(merge_v / (split_n + 1)), 20)
split_v = split_r * split_r
middle_v = merge_v - split_v * split_n
fake_ball_v_1 = middle_v
temp_n = min(16, temp_n + split_n)
else:
fake_ball_v_1 += th_v
if split_num == 1:
fake_enemy = dict()
fake_enemy['position'] = Vector2(float(fake_ball_x_1), float(fake_ball_y_1))
fake_enemy['radius'] = fake_ball_r_1
fake_enemy['player'] = enemy_cl_name
fake_enemy['team'] = enemy_cl_team
# aft split be eat
team = int(my_clones[0]['team'])
my_player = int(my_clones[0]['player'])
enemy_clones_add_fake = copy.deepcopy(enemy_clones)
enemy_clones_add_fake.append(fake_enemy)
enemy_clones_add_fake.remove(enemy_cl)
for player_id in range(3 * team, 3 * team + 3):
player_clones = group_process_np_to_dict(player_clone_np[player_id])
if not player_clones:
continue
player_n = len(player_clones)
if player_id != my_player:
can_split = player_n < 16
else:
can_split = True
b = player_clones[0]
fake_dis = (fake_enemy['position'] - b['position']).length()
b['radius'] = b['radius'] / 1.414
b_r = b['radius']
b_v = b_r * b_r
# collide
if fake_ball_v_1 < b_v and fake_dis < b_r:
split_danger = True
break
# split once
elif fake_ball_v_1 < b_v / 3 and fake_dis < 2.1 * b_r + 15 and can_split:
eat_v = get_split_eat_enemy_volumn(b, fake_enemy, player_clone_np, player_clones, None, None,
enemy_clones_add_fake, thorns)
if eat_v > 0:
split_danger = True
break
if split_danger:
return False
else:
return True
# split twice
fake_ball_v_2 = fake_ball_v_1 / 2
fake_ball_r_2 = math.sqrt(fake_ball_v_2)
fake_ball_x_2 = fake_ball_x_1 + direction.x * 2 * fake_ball_r_2
fake_ball_y_2 = fake_ball_y_1 + direction.y * 2 * fake_ball_r_2
fake_ball_x_2 = np.clip(fake_ball_x_2, fake_ball_r_2, 1000. - fake_ball_r_2)
fake_ball_y_2 = np.clip(fake_ball_y_2, fake_ball_r_2, 1000. - fake_ball_r_2)
fake_ball_2 = dict()
fake_ball_2['position'] = Vector2(float(fake_ball_x_2), float(fake_ball_y_2))
fake_ball_2['player'] = enemy_cl_name
fake_ball_2['team'] = enemy_cl_team
for b in my_clones:
fake_dis = (b['position'] - fake_ball_2['position']).length()
b_r = b['radius']
b_v = b_r * b_r
# collide bigger
if b_r > fake_ball_r_2 and fake_dis < b_r:
split_danger = True
break
# collide eat smaller
elif (fake_ball_r_2 > fake_dis) and (fake_ball_r_2 > b_r):
fake_ball_v_2 += b_v
fake_ball_r_2 = math.sqrt(fake_ball_v_2)
for b in friend_clones:
fake_dis = (b['position'] - fake_ball_2['position']).length()
b_r = b['radius']
b_v = b_r * b_r
# collide bigger
if b_r > fake_ball_r_2 and fake_dis < b_r:
split_danger = True
break
# collide eat smaller
elif (fake_ball_r_2 > fake_dis) and (fake_ball_r_2 > b_r):
fake_ball_v_2 += b_v
fake_ball_2['radius'] = fake_ball_r_2
if split_danger:
return False
if split_num == 2:
enemy_clones_add_fake = copy.deepcopy(enemy_clones)
enemy_clones_add_fake.append(fake_ball_2)
enemy_clones_add_fake.remove(enemy_cl)
for player_id in range(3 * team, 3 * team + 3):
player_clones = group_process_np_to_dict(player_clone_np[player_id])
if not player_clones:
continue
player_n = len(player_clones)
if player_id != my_player:
can_split = player_n < 16
else:
can_split = True
b = player_clones[0]
fake_dis = (b['position'] - fake_ball_2['position']).length()
b['radius'] = b['radius'] / 1.414
b_r = b['radius']
b_v = b_r * b_r
# collide
if fake_ball_v_1 < b_v / 2 and fake_dis < b_r:
split_danger = True
break
# split once
elif fake_ball_v_1 < b_v / 3 and fake_dis < 2.1 * b_r + 15 and can_split:
eat_v = get_split_eat_enemy_volumn(b, fake_ball_2, player_clone_np, player_clones, None, None,
enemy_clones_add_fake,
thorns, 1, False)
if eat_v > 0:
split_danger = True
break
if split_danger:
return False
return True
# split three
fake_ball_v_3 = fake_ball_v_2 / 2
fake_ball_r_3 = math.sqrt(fake_ball_v_3)
fake_ball_x_3 = fake_ball_x_2 + direction.x * 2 * fake_ball_r_3
fake_ball_y_3 = fake_ball_y_2 + direction.y * 2 * fake_ball_r_3
fake_ball_x_3 = np.clip(fake_ball_x_3, fake_ball_r_3, 1000. - fake_ball_r_3)
fake_ball_y_3 = np.clip(fake_ball_y_3, fake_ball_r_3, 1000. - fake_ball_r_3)
fake_ball_3 = dict()
fake_ball_3['position'] = Vector2(float(fake_ball_x_3), float(fake_ball_y_3))
fake_ball_3['player'] = enemy_cl_name
fake_ball_3['team'] = enemy_cl_team
for b in my_clones:
fake_dis = (b['position'] - fake_ball_3['position']).length()
b_r = b['radius']
b_v = b_r * b_r
# collide bigger
if b_r > fake_ball_r_3 and fake_dis < b_r:
split_danger = True
break
# collide eat smaller
elif (fake_ball_r_3 > fake_dis) and (fake_ball_r_3 > b_r):
fake_ball_v_3 += b_v
if split_danger:
return False
fake_ball_r_3 = math.sqrt(fake_ball_v_3)
for b in friend_clones:
fake_dis = (b['position'] - fake_ball_3['position']).length()
b_r = b['radius']
b_v = b_r * b_r
# collide bigger
if b_r > fake_ball_r_3 and fake_dis < b_r:
split_danger = True
break
# collide eat smaller
elif (fake_ball_r_3 > fake_dis) and (fake_ball_r_3 > b_r):
fake_ball_v_3 += b_v
fake_ball_3['radius'] = fake_ball_r_3
enemy_clones_add_fake = copy.deepcopy(enemy_clones)
enemy_clones_add_fake.append(fake_ball_3)
enemy_clones_add_fake.remove(enemy_cl)
for player_id in range(3 * team, 3 * team + 3):
player_clones = group_process_np_to_dict(player_clone_np[player_id])
if not player_clones:
continue
player_n = len(player_clones)
if player_id != my_player:
can_split = player_n < 16
else:
can_split = True
b = player_clones[0]
fake_dis = (b['position'] - fake_ball_3['position']).length()
b['radius'] = b['radius'] / 1.414
b_r = b['radius']
b_v = b_r * b_r
# collide
if fake_ball_v_3 < b_v / 2 and fake_dis < b_r:
split_danger = True
break
# split once
elif fake_ball_v_3 < b_v / 3 and fake_dis < 2.1 * b_r + 15 and can_split:
eat_v = get_split_eat_enemy_volumn(b, fake_ball_3, player_clone_np, player_clones, None, None,
enemy_clones_add_fake,
thorns, 1, False)
if eat_v > 0:
split_danger = True
break
if split_danger:
return False
return True
| [
11748,
4866,
201,
198,
11748,
10688,
201,
198,
11748,
28686,
201,
198,
11748,
4738,
201,
198,
11748,
640,
201,
198,
6738,
1257,
310,
10141,
1330,
27521,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
201,
198... | 1.753059 | 9,480 |
from loguru import logger
from pathlib import Path
import sql_functions
if __name__ == "__main__":
LogPath = str(Path(__file__).resolve().parent) + "\\log\\file_{time}.log"
logger.add(LogPath, level='DEBUG')
sql_functions.main()
logger.info("Completed Successfully")
| [
6738,
2604,
14717,
1330,
49706,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
44161,
62,
12543,
2733,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
5972,
15235,
796,
965,
7,
15235,
7,
834,
77... | 2.89899 | 99 |
"""
Copyright:
2016 Fraunhofer Institute for Telecommunications, Heinrich-Hertz-Institut (HHI)
The copyright of this software source code is the property of HHI.
This software may be used and/or copied only with the written permission
of HHI and in accordance with the terms and conditions stipulated
in the agreement/contract under which the software has been supplied.
The software distributed under this license is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either expressed or implied.
"""
## @package mincut_model
# Library of functions for the Markov Random Field inference model using the
# technique of minimum-s-t-cut.
import numpy as np
import scipy.ndimage
import cv2
from hhi_stmrftracking import imgutils
from hhi_stmrftracking import mvutils
from hhi_stmrftracking.ioutils import MV_MAT_TYPE
from mincut import mincut
# Predefined Constants
BBOX_BORDER = 5
ERODE_KERNEL = np.ones((6,6), np.uint8)
LAPLACE_CORREC = 1
SMALL_MASK_SIZE = 20
GAUSSIAN_SIGMA = 1
# Reset procedure used by evaluation notebook
## Given the previous mask and the coordinates of the backward (in time)
# projection of the current frame's motion vectors of the current frame, compute
# the temporal energy (or temporal continuity) of the current frame.
#
# @type prev_mask HxW-ndarray of type bool, where H is the height and W the
# width of the mask
# @param prev_mask The previous mask, which is used to predict the next one
# @type back_proj_grid HxWx2-ndarray of type float, where H is the height and W
# the width of the frame
# @param back_proj_grid A matrix, where each entry has the coordinates of this
# block projected back into the previous frame according
# to the respective motion vector.
# @return HxW-ndarray of type float, where H is the height and W the width of
# the mask
## Estimate the new mask based on the previous one and on the current
# observation (i.e. current frame).
#
# The function estimates the mask by maximizing the maximum-a-posteriori
# probability, through optimization of minimum s-t-cut.
#
# @type prev_mask HxW-ndarray of type bool, where H is the height and W the
# width of the mask
# @param prev_mask The previous mask, which is used to predict the next one
#
# @type mv_frame HxWx2-ndarray, where H is the height and W the width of the
# frame
# @param mv_frame The current frame composed of motion vectors
# @return HxW-ndarray of type bool, where H is the height and W the width of
# the mask
| [
37811,
198,
220,
15069,
25,
198,
220,
1584,
39313,
403,
71,
30288,
5136,
329,
48667,
11,
26431,
7527,
12,
39,
861,
89,
12,
6310,
270,
315,
357,
16768,
40,
8,
198,
220,
383,
6634,
286,
428,
3788,
2723,
2438,
318,
262,
3119,
286,
36... | 2.952747 | 910 |
# _*_ coding: utf-8 _*_
"""
Created by lr on 2019/08/29.
"""
__author__ = 'lr'
| [
2,
4808,
9,
62,
19617,
25,
3384,
69,
12,
23,
4808,
9,
62,
198,
37811,
198,
220,
15622,
416,
300,
81,
319,
13130,
14,
2919,
14,
1959,
13,
198,
37811,
198,
834,
9800,
834,
796,
705,
14050,
6,
628,
628,
628,
628
] | 2.095238 | 42 |
#
# (c) Peralta Informatics 2007
# $Id: DeployPostgreSqlModule.py 302 2008-01-21 09:36:01Z andrei $
#
import clr
import sys
import os
import shutil
clr.AddReference("System.Data")
clr.AddReference("System.Xml")
import System.Data
import System.Data.SqlClient
import System.Diagnostics
import System.Text
import System.Xml
from Pi.Deploy import DeployUtilities
from Pi.Deploy.Database.DeployDatabaseModule import DeployDatabaseModule
from Pi.Deploy.Database.DeployDatabaseConfiguration import DatabaseConfiguration
NpgsqlLocationAttributeName = 'NpgsqlLocation'
if __name__ == '__main__':
sys.exit(main())
| [
2,
198,
2,
357,
66,
8,
350,
1691,
8326,
554,
18982,
873,
4343,
198,
2,
720,
7390,
25,
34706,
6307,
16694,
50,
13976,
26796,
13,
9078,
32591,
3648,
12,
486,
12,
2481,
7769,
25,
2623,
25,
486,
57,
290,
260,
72,
720,
198,
2,
198,
... | 3.169231 | 195 |
import io
import json
import os
from . import base as base_module
from .base import KeyManager
from .exceptions import CGAPEnvKeyMissing, CGAPServerKeyMissing
def get_keydict_for_env(env=None):
"""
Gets the appropriate auth info for talking to a given beanstalk environment.
Args:
env: the name of a beanstalk environment
Returns:
Auth information as a dict with keys 'key', 'secret', and 'server'.
"""
keydicts = get_cgap_keydicts()
keydict = keydicts.get(env
# For testing, we sometimes bind base_module.DEFAULT_ENV so we must make sure
# to pick up the value of DEFAULT_ENV indirectly through that module
# rather than importing the variable directly, which would complicate mocking.
# -kmp 4-Sep-2020
or base_module.DEFAULT_ENV)
if not keydict:
raise CGAPEnvKeyMissing(env=env, keyfile=KeyManager.keydicts_filename())
return keydict
def get_keypair_for_env(env=None):
"""
Gets the appropriate auth info for talking to a given beanstalk environment.
Args:
env: the name of a beanstalk environment
Returns:
Auth information as a (key, secret) tuple.
"""
return keydict_to_keypair(get_keydict_for_env(env=env))
def get_keydict_for_server(server=None):
"""
Gets the appropriate auth info for talking to a given beanstalk environment.
Args:
server: the name of a server
Returns:
Auth information.
The auth is a keypair, though we might change this to include a JWT token in the the future.
"""
if server is None:
# The values of keydict_for_server(None) and keydict_for_env(None) should match,
# and since we don't know what server we're looking for anyway,
# let's just look it up the other way and be done...
return get_keydict_for_env()
keydicts = get_cgap_keydicts()
server_to_find = server.rstrip('/')
for keydict in keydicts.values():
if keydict['server'].rstrip('/') == server_to_find:
return keydict
raise CGAPServerKeyMissing(server=server, keyfile=KeyManager.keydicts_filename())
| [
11748,
33245,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
764,
1330,
2779,
355,
2779,
62,
21412,
198,
6738,
764,
8692,
1330,
7383,
13511,
198,
6738,
764,
1069,
11755,
1330,
29925,
2969,
4834,
85,
9218,
43730,
11,
29925,
2969,
... | 2.539326 | 890 |
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import timedelta
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
4818,
8079,... | 3.644444 | 45 |
"""Definitions for common operations on images."""
from io import BytesIO
import numpy as np
from PIL import Image
def extract_patch(im, patch_size):
"""Extract a center cropped patch of size 'patch_size' (2-element tuple).
:param im: input image
:type im: PIL image object
:param patch_size: size of patch
:type patch_size: tuple, array, or similar
:return: center-cropped patch
:rtype: PIL image object
"""
left = (im.size[0] - patch_size[0]) // 2
top = (im.size[1] - patch_size[1]) / 2
right = left + patch_size[0]
bottom = top + patch_size[1]
return im.crop([left, top, right, bottom])
def make_small(im, max_dim: int = 224, resample_method=Image.NEAREST):
"""Make a small version of an image while maintaining aspect ratio.
:param im: input image
:type im: PIL image object
:param max_dim: maximum dimension of resized image (x or y)
:type max_dim: int
:param resample_method: method for resampling the image
:type resample_method: PIL resample method
:return: resized image
:rtype: PIL image object
"""
scale_factor = np.min(im.size) / float(max_dim) # Scale down to a small version
small_size = (
np.round(im.size[0] / scale_factor).astype("int64"),
np.round(im.size[1] / scale_factor).astype("int64"),
)
left = (small_size[0] - max_dim) // 2
right = left + max_dim
upper = (small_size[1] - max_dim) // 2
lower = upper + max_dim
return im.resize(small_size, resample=resample_method).crop(
[left, upper, right, lower]
)
def add_jpeg_compression(im, quality_level: int = 30):
"""Apply JPEG compression to an image with a given quality level.
:param im: input image
:type im: PIL image object
:param quality_level: JPEG qualit level, where: 0 < value <= 100
:type quality_level: int
:return: compressed image
:rtype: PIL image object
"""
buf = BytesIO()
im.save(buf, "JPEG", q=int(quality_level))
return Image.open(buf)
def add_rotation(im, ccw_rotation_degrees: int = 90):
"""Rotate an image CCW by `ccw_rotation_degrees` degrees.
:param im: input image
:type im: PIL image object
:param ccw_rotation_degrees: number of degrees to rotate counter-clockwise
:type ccw_rotation_degrees: int
:return: rotated image
:rtype: PIL image object
"""
return im.rotate(ccw_rotation_degrees, expand=True)
def add_stretching(im, w_percent_additional, h_percent_additional):
"""Stretch an image by the specified percentages.
:param im: input image
:type im: PIL image object
:param w_percent_additional: amount of width stretching to add (0 maintains the same size, 100 doubles the size)
:type w_percent_additional: int or float greater than 0
:param h_percent_additional: amount of height stretching to add (0 maintains the same size, 100 doubles the size)
:type h_percent_additional: int or float greater than 0
:return: stretched image
:rtype: PIL image object
"""
newsize = (
im.size[0] * int(1.0 + float(w_percent_additional) / 100),
im.size[1] * int(1.0 + float(h_percent_additional) / 100),
)
return im.resize(newsize, resample=Image.BILINEAR)
def add_poisson_noise(im, param: float = 1.0, rescale: bool = True):
"""Add Poisson noise to image, where (poisson noise * `param`) is the final noise function.
See http://kmdouglass.github.io/posts/modeling-noise-for-image-simulations for more info.
If `rescale` is set to True, the image will be rescaled after noise is added. Otherwise,
the noise will saturate.
:param im: input image
:type im: PIL image object
:param param: noise parameter
:type param: float
:param rescale: flag indicating whether or not to rescale the image after adding noise (maintaining original image extrema)
:type rescale: bool
:return: image with Poisson noise added
:rtype: PIL image object
"""
image_with_standard_noise = np.random.poisson(im)
noisy_image = np.array(im) + param * image_with_standard_noise
# Fix values beyond the saturation value or rescale
saturation_value = im.getextrema()[0][1]
if rescale:
noisy_image = noisy_image * (saturation_value / np.max(noisy_image))
else:
noisy_image[noisy_image > saturation_value] = saturation_value
return Image.fromarray(noisy_image.astype(np.uint8))
| [
37811,
7469,
50101,
329,
2219,
4560,
319,
4263,
526,
15931,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
4299,
7925,
62,
17147,
7,
320,
11,
8529,
62,
7857,
2... | 2.733415 | 1,628 |
import xarray as xr
from . import density
| [
11748,
2124,
18747,
355,
2124,
81,
198,
198,
6738,
764,
1330,
12109,
628
] | 3.384615 | 13 |
# flask_web/app.py
import os
from flask import Flask
app = Flask(__name__)
@app.route("/")
#@app.route('/')
#def hello_world:
# return 'Hey, we have Flask in a Docker container!'
#if __name == '__main__':
# app.run(debug=True, host='0.0.0.0')
| [
2,
42903,
62,
12384,
14,
1324,
13,
9078,
198,
11748,
28686,
198,
198,
6738,
42903,
1330,
46947,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
31,
1324,
13,
38629,
7203,
14,
4943,
198,
198,
2,
31,
1324,
13,
38629,
10786,
1... | 2.419048 | 105 |
import datetime
import time
import requests
from test_broker_base import TestBrokerBase
| [
11748,
4818,
8079,
198,
11748,
640,
198,
198,
11748,
7007,
198,
198,
6738,
1332,
62,
7957,
6122,
62,
8692,
1330,
6208,
15783,
6122,
14881,
628
] | 3.64 | 25 |
"""
Registries view permission classes
"""
from rest_framework.permissions import BasePermission, IsAdminUser, SAFE_METHODS
class IsAdminOrReadOnly(IsAdminUser):
"""
Allows read-only access to all users (including anonymous users) and write access to admin users only
"""
class IsGwellsAdmin(BasePermission):
"""
Grants permission to users with the is_gwells_admin flag (supplied by Keycloak)
"""
| [
37811,
198,
8081,
32995,
1570,
7170,
6097,
198,
37811,
198,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
7308,
5990,
3411,
11,
1148,
46787,
12982,
11,
37630,
36,
62,
49273,
50,
628,
198,
4871,
1148,
46787,
5574,
5569,
10049,
7,
37... | 3.302326 | 129 |
# Generated by Django 3.1.5 on 2021-02-11 02:59
from django.db import migrations, models
import django.db.models.deletion
import uuid
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
2999,
12,
1157,
7816,
25,
3270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.833333 | 48 |
import copy
import types
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from models.networks.assisting_layers.GateDecoratorLayers import GatedBatchNorm
from models.criterions.SNIP import SNIP
from utils.constants import SNIP_BATCH_ITERATIONS
from utils.data_utils import lookahead_type, lookahead_finished
import numpy as np
from utils.snip_utils import group_snip_forward_linear, group_snip_conv2d_forward
class John(SNIP):
"""
Adapted implementation of GraSP from the paper:
Picking Winning Tickets Before Training by Preserving Gradient Flow
https://arxiv.org/abs/2002.07376
from the authors' github:
https://github.com/alecwangcq/GraSP
"""
def handle_input(self, cutoff, indices, is_conv, length_nonzero, module, n_remaining, name, weight):
""" shrinks a input dimension """
module.update_input_dim(n_remaining)
length_nonzero = int(np.prod(weight.shape))
cutoff = 0
if is_conv:
weight.data = weight[:, indices, :, :]
try:
weight.grad.data = weight.grad.data[:, indices, :, :]
except AttributeError:
pass
if name in self.model.mask:
self.model.mask[name] = self.model.mask[name][:, indices, :, :]
else:
if ((indices.shape[0] % weight.shape[0]) == 0) and not (weight.shape[1] == indices.shape[0]):
ratio = weight.shape[1] // indices.shape[0]
module.update_input_dim(n_remaining * ratio)
new_indices = torch.repeat_interleave(indices, ratio)
weight.data = weight[:, new_indices]
if name in self.model.mask:
self.model.mask[name] = self.model.mask[name][:, new_indices]
try:
weight.grad.data = weight.grad.data[:, new_indices]
except AttributeError:
pass
else:
weight.data = weight[:, indices]
try:
weight.grad.data = weight.grad.data[:, indices]
except AttributeError:
pass
if name in self.model.mask:
self.model.mask[name] = self.model.mask[name][:, indices]
if self.model.is_tracking_weights:
raise NotImplementedError
return cutoff, length_nonzero
def handle_output(self, indices, is_conv, module, n_remaining, name, weight):
""" shrinks a output dimension """
module.update_output_dim(n_remaining)
self.handle_batch_norm(indices, n_remaining, name)
if is_conv:
weight.data = weight[indices, :, :, :]
try:
weight.grad.data = weight.grad.data[indices, :, :, :]
except AttributeError:
pass
if name in self.model.mask:
self.model.mask[name] = self.model.mask[name][indices, :, :, :]
else:
weight.data = weight[indices, :]
try:
weight.grad.data = weight.grad.data[indices, :]
except AttributeError:
pass
if name in self.model.mask:
self.model.mask[name] = self.model.mask[name][indices, :]
self.handle_bias(indices, name)
if self.model.is_tracking_weights:
raise NotImplementedError
def handle_bias(self, indices, name):
""" shrinks a bias """
bias = [val for key, val in self.model.named_parameters() if key == name.split("weight")[0] + "bias"][0]
bias.data = bias[indices]
try:
bias.grad.data = bias.grad.data[indices]
except AttributeError:
pass
def handle_batch_norm(self, indices, n_remaining, name):
""" shrinks a batchnorm layer """
batchnorm = [val for key, val in self.model.named_modules() if
key == name.split(".weight")[0][:-1] + str(int(name.split(".weight")[0][-1]) + 1)][0]
if isinstance(batchnorm, (nn.BatchNorm2d, nn.BatchNorm1d, GatedBatchNorm)):
batchnorm.num_features = n_remaining
from_size = len(batchnorm.bias.data)
batchnorm.bias.data = batchnorm.bias[indices]
batchnorm.weight.data = batchnorm.weight[indices]
try:
batchnorm.bias.grad.data = batchnorm.bias.grad[indices]
batchnorm.weight.grad.data = batchnorm.weight.grad[indices]
except TypeError:
pass
if hasattr(batchnorm, "gate"):
batchnorm.gate.data = batchnorm.gate.data[indices]
batchnorm.gate.grad.data = batchnorm.gate.grad.data[indices]
batchnorm.bn.num_features = n_remaining
for buffer in batchnorm.buffers():
if buffer.data.shape == indices.shape:
buffer.data = buffer.data[indices]
print(f"trimming nodes in layer {name} from {from_size} to {len(batchnorm.bias.data)}")
| [
11748,
4866,
198,
11748,
3858,
198,
11748,
28034,
198,
11748,
28034,
13,
2306,
519,
6335,
355,
1960,
519,
6335,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
4981,
13,
3262,
... | 2.091209 | 2,423 |
from __future__ import print_function
import Adafruit_LSM9DS0
import math
from itertools import permutations
import time
from tabulate import tabulate
# Create new LSM9DS0 instance
imu = Adafruit_LSM9DS0.LSM9DS0()
table = []
while True:
(mag_x, mag_y, mag_z) = imu.readMag()
(acc_x, acc_y, acc_z) = imu.readAccel()
# Normalising the accelerometer data
# Dividing variable (don't know why I use this)
acc_norm_div = math.sqrt(acc_x**2 + acc_y**2 + acc_z**2)
# Normalised values
acc_x_norm = acc_x / acc_norm_div
acc_y_norm = acc_y / acc_norm_div
# Calc pitch and roll using trig
pitch = math.asin(acc_x_norm)
roll = - math.asin(math.radians(acc_y_norm / math.cos(pitch)))
# Do some mathsy stuff to compensate for the tilt
mag_x_comp = mag_x * math.cos(pitch) + mag_z * math.sin(pitch)
mag_y_comp = mag_x * math.sin(roll) * math.sin(pitch) + mag_y * math.cos(roll) - mag_z * math.sin(roll) * math.cos(pitch)
# Calculate the angle in degrees
angle_deg = math.degrees(math.atan2(mag_y_comp, mag_x_comp))
# Work out as a bearing
if angle_deg < 0:
angle_deg += 360
print("Tilt accounted: deg:", angle_deg)
time.sleep(0.05)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1215,
1878,
4872,
62,
6561,
44,
24,
5258,
15,
198,
11748,
10688,
198,
6738,
340,
861,
10141,
1330,
9943,
32855,
198,
11748,
640,
198,
6738,
7400,
5039,
1330,
7400,
5039,
... | 2.486022 | 465 |
#!/usr/bin/env python3
import argparse
import urllib.request
import sys
import re
import csv
csv.register_dialect('tsv', delimiter='\t', quoting=csv.QUOTE_NONE)
lookup_taxonomy = { 'A': 'Archaea',
'B': 'Bacteria',
'E': 'Eukaryota',
'V': 'Virus',
'O': 'Other',
'X': 'Unknown' }
parser = argparse.ArgumentParser(description='Read speclist from uniprot and parse for species code to taxonomy')
parser.add_argument('-o','--output', nargs='?', type=argparse.FileType('w'),
default=sys.stdout,
help='output file name or else will write to stdout')
parser.add_argument('-i', '--input', type=argparse.FileType('r'), nargs='?',
help='species list file from uniprot already downloaded otherwise will open and download')
parser.add_argument('--url', required=False,
default='https://www.uniprot.org/docs/speclist.txt',help='URL to download from instead of local file')
args = parser.parse_args()
matchsp = re.compile(r'(\S+)\s+([A-Z])\s+(\d+):\s+N=(.+)')
# this code is stupidly duplicated until I can figure out best way to deal with the encoding diff for url vs local file
csvout = csv.writer(args.output,dialect="tsv")
if args.input:
for line in args.input:
m = matchsp.match(line)
if m:
csvout.writerow([m.group(1),lookup_taxonomy[m.group(2)],m.group(3),m.group(4)])
else:
with urllib.request.urlopen(args.url) as web:
for line in web:
m = matchsp.match(line.decode('utf-8'))
if m:
csvout.writerow([m.group(1),lookup_taxonomy[m.group(2)],m.group(3),m.group(4)])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
25064,
198,
11748,
302,
198,
11748,
269,
21370,
198,
40664,
13,
30238,
62,
38969,
478,
10786,
912,
85,
3... | 2.190355 | 788 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './prediction.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
19571,
28764,
2867,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,
642,
... | 2.837209 | 86 |
import re
chars = 0
mem = 0
for i in open('day8.txt'):
line = i.strip()
chars += len(line)
s = re.sub(r'(?:(\")|(\\))', repl, line, flags=re.I)
mem += len(s) + 2
print mem - chars
| [
11748,
302,
198,
198,
354,
945,
796,
657,
198,
11883,
796,
657,
198,
198,
1640,
1312,
287,
1280,
10786,
820,
23,
13,
14116,
6,
2599,
198,
1627,
796,
1312,
13,
36311,
3419,
198,
34534,
15853,
18896,
7,
1370,
8,
198,
264,
796,
302,
... | 2.268293 | 82 |
import numpy
from PIL import Image
import torch
img = Image.open("../images/computer.jpg")
img_np = numpy.array(img)/255.0
img_t = torch.from_numpy(img_np).float()
img_t = torch.mean(img_t, dim=2).unsqueeze(0).unsqueeze(1)
hog = HOGLayer()
y = hog(img_t)
y_np = y.detach().to("cpu").numpy()[0]
print(img_t.shape, y_np.shape)
y_np = (y_np + numpy.pi)/(2.0*numpy.pi)
img_y = Image.fromarray(y_np*255)
img_y.show()
| [
11748,
299,
32152,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
11748,
28034,
628,
198,
198,
9600,
796,
7412,
13,
9654,
7203,
40720,
17566,
14,
33215,
13,
9479,
4943,
198,
198,
9600,
62,
37659,
796,
299,
32152,
13,
18747,
7,
9600,
2067... | 2.103448 | 203 |
import game.board as Board
import game.success as Success
import game.moves as Moves
import cursesio as Ui
import playerio as Ui_term
from curses import wrapper
import curses
import sys
| [
11748,
983,
13,
3526,
355,
5926,
198,
11748,
983,
13,
13138,
355,
16282,
198,
11748,
983,
13,
76,
5241,
355,
38213,
198,
11748,
43878,
952,
355,
471,
72,
198,
11748,
2137,
952,
355,
471,
72,
62,
4354,
198,
6738,
43878,
1330,
29908,
... | 3.78 | 50 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
import logging
import math
from datetime import timedelta
from werkzeug import url_encode
from odoo import api, fields, models
from odoo.exceptions import UserError, AccessError, ValidationError
from openerp.tools import float_compare
from odoo.tools.translate import _
_logger = logging.getLogger(__name__)
HOURS_PER_DAY = 8
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
10529,
2238,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
2,
15069,
357,
66,
8,
5075,
12,
13330,
42575,
273,
47341,
43,... | 3.1 | 160 |
from django.apps import apps
from django.test import TestCase
from .apps import BlogConfig
# apps
# models
# widgets
# forms
# viewmixins
# views
| [
6738,
42625,
14208,
13,
18211,
1330,
6725,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
764,
18211,
1330,
14001,
16934,
628,
198,
220,
220,
220,
1303,
6725,
628,
220,
220,
220,
1303,
4981,
628,
220,
220,
220,
... | 2.78125 | 64 |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import zip
import os.path
from itertools import groupby
from operator import itemgetter
import ninemlcatalog
import nest
import numpy as np
from nineml import units as un, Property
from nineml.user import Initial
import argparse
import matplotlib
from pyNN.utility import SimulationProgressBar
import sys
argv = sys.argv[1:]
from pype9.utils.mpi import is_mpi_master # @IgnorePep8
import pype9.utils.logging.handlers.sysout # @UnusedImport @IgnorePep8
if __name__ == '__main__':
run(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
3170,
1040,
1330,
19974,
198,
11748,
28686,
13,
6978,
198,
6738,
340,
861,
10141,
1330... | 3.049751 | 201 |
from astropy.io import fits as pyfits
import pyregion
import warnings
import numpy
| [
6738,
6468,
28338,
13,
952,
1330,
11414,
355,
12972,
21013,
198,
198,
11748,
12972,
36996,
198,
198,
11748,
14601,
198,
198,
11748,
299,
32152,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 2.605263 | 38 |
'''
Created on Feb 20, 2017
@author: sarvi
@copyright: 2017 Cisco Inc. All rights reserved.
'''
# pylint: disable=locally-disabled, too-many-lines
from __future__ import print_function
import base64
import collections
import datetime
import hashlib
import logging.handlers
import os
import platform
import pprint
import pwd
import random
import stat
import subprocess
import sys
import threading
import time
import uuid
from configparser import SafeConfigParser, InterpolationMissingOptionError, InterpolationSyntaxError, ParsingError, Error
from io import StringIO
SIMPLE_FMT = '%(message)s'
SIMPLE_FMT_CORR_ID = '%(corr_id)s %(message)s'
VERBOSE_FMT_THREAD = '[%(asctime)s-%(thread)s] %(levelname)s/%(processName)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s'
VERBOSE_FMT_CORR_ID = '[%(asctime)s-%(corr_id)s] %(levelname)s/%(processName)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s'
VERBOSE_FMT_CORR_ID_TASK = '[%(asctime)s-%(corr_id)s] %(levelname)s/%(processName)s/%(task_name)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s'
log = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name
printlog = logging.getLogger('wisk.print') # pylint: disable=locally-disabled, invalid-name
ENV_VARIABLE_PREFIX = 'WISK_'
ENV_INIT_DONE = False
LOGGING_INIT_DONE = False
MONITOR_INTERVAL = 3
MONITOR_LOW = 20*1024
MONITOR_RESIZE = 50*1024
MIN_CONFIG = '''
[common]
logdir = /auto/wit-log/wisklogs/%(toolrootname)s
savelogs = all
fileloglevel = DEBUG
consoleloglevel = WARNING
log_iso8601_dates = False
'''
INSTALL_TYPE_SEARCH = ['%(config_dir)s/wisk_install_type.cfg']
CLIENT_CFG_SEARCH = ['%(config_dir)s/wisk_common.cfg',
'%(config_dir)s/wisk_%(Site)s.cfg']
INSTALL_PKG_ROOT = os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), '../')))
INSTALLED = True if os.path.basename(INSTALL_PKG_ROOT) != 'src' else False
LOCAL_ONLY_CONFIGS = []
if not INSTALLED:
INSTALL_ROOT = os.path.abspath(os.path.join(INSTALL_PKG_ROOT, '../'))
INSTANCE_NAME = os.path.basename(INSTALL_ROOT)
CONFIG_DIR = os.path.abspath(os.path.join(INSTALL_PKG_ROOT, '../config'))
INSTALL_BIN_DIR = os.path.join(INSTALL_ROOT, 'scripts')
INSTALL_LIB_DIR = os.path.join(INSTALL_ROOT, 'src/lib')
LATEST_INSTALL_ROOT = INSTALL_ROOT
LATEST_BIN_DIR = INSTALL_BIN_DIR
LATEST_LIB_DIR = os.path.abspath(os.path.join(INSTALL_PKG_ROOT, '../src/lib'))
else:
INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_PKG_ROOT, '../'))
INSTANCE_NAME = None
while os.path.basename(INSTALL_ROOT) != 'lib':
INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_ROOT, '../'))
INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_ROOT, '../'))
CONFIG_DIR = os.path.abspath(os.path.join(INSTALL_ROOT, 'var/config'))
INSTALL_BIN_DIR = os.path.join(INSTALL_ROOT, 'bin')
INSTALL_LIB_DIR = os.path.join(INSTALL_ROOT, 'lib')
LATEST_INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_ROOT, '../current'))
LATEST_BIN_DIR = os.path.join(LATEST_INSTALL_ROOT, 'bin')
LATEST_LIB_DIR = os.path.join(LATEST_INSTALL_ROOT, 'lib')
HOST_UNAME = platform.uname()
UMASK = 0o22 # Set default umask to file permissions of 0644 and directory permissions of 0755
os.umask(UMASK)
ENVIRONMENT = {
'start_time': datetime.datetime.now().strftime("_%Y%m%d%H%M%S"),
'pid': str(os.getpid()),
'username': os.environ['SUDO_USER'] if pwd.getpwuid(os.getuid())[0] == 'root' and 'SUDO_USER' in os.environ else pwd.getpwuid(os.getuid())[0],
'installed': INSTALLED,
'instance_name': INSTANCE_NAME,
'install_pkg_root': INSTALL_PKG_ROOT,
'install_root': INSTALL_ROOT,
'config_dir': CONFIG_DIR,
# 'Site': 'local',
'OS': HOST_UNAME[0],
'OS-Version': 'X.XX',
'CPU': 'x86',
'Bits': '64',
'Host OS': HOST_UNAME[0],
'Host-osver': HOST_UNAME[2],
'Host Machine arch': HOST_UNAME[4],
'Host CPU family': HOST_UNAME[5],
'Host Name': HOST_UNAME[1].split('.')[0],
'UMASK': UMASK,
'log_iso8601_dates': False,
}
LUMENS_DATA = {
'dry_run': False,
'group_name': 'wisk',
'data_source': 'cli',
'submitter_id': ENVIRONMENT['username'],
# 'timestamp': None,
# 'uuid': 'valid UUID',
# 'state': 'SUCCESS/FAILURE/IN PROGRESS',
'metadata': {}
}
LOCAL = threading.local()
def get_current_correlation_id():
''' Retrieve operation_id saved to thread '''
try:
return LOCAL.operation_id or ENVIRONMENT.get('uniqueid', None) or '{}.{}'.format(os.getpid(), threading.get_ident())
except AttributeError:
return ENVIRONMENT.get('uniqueid', None) or '{}.{}'.format(os.getpid(), threading.get_ident())
class CorrelationIdFilter(logging.Filter):
''' Correlation ID Filter '''
class MicroFormatter(logging.Formatter):
""" Microsecond precision for CLIP """
def formatTime(self, record, datefmt=None):
"""
Override date format for microseconds
"""
converted = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, converted)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", converted)
if ENVIRONMENT['log_iso8601_dates']:
s = "%s,%03d" % (t, record.msecs)
else:
s = "%s.%06d" % (t, 1000 * record.msecs)
return s
def env_siteinfo_update():
''' Get site information about where the client is running from '''
if ENVIRONMENT['OS'] == 'Darwin':
servinfo = {}
servinfo['Site'] = 'sjc'
servinfo['DC'] = os.environ.get('MY_DEFAULT_DC', 'sjc5c').lower()
servinfo['OS-Version'] = '6.20'
else:
try:
out = subprocess.check_output(['/router/bin/servinfo'])
except subprocess.CalledProcessError as ex:
log.error('Could not get servinfo for client: %s', ex)
return {}
log.debug(out)
out = out.strip()
out = [k.decode("utf-8").split(':') for k in out.splitlines()]
servinfo = {k.strip(): v.strip() for k, v in out}
# servinfo = {k: v for k, v in servinfo.items() if k not in ENVIRONMENT}
ENVIRONMENT.update(servinfo)
LUMENS_DATA['metadata'].update(servinfo)
env_update = {k.replace(ENV_VARIABLE_PREFIX, ''): v for k, v in os.environ.items() if k.startswith(ENV_VARIABLE_PREFIX)}
ENVIRONMENT.update(env_update)
ENVIRONMENT['Host Name'] = ENVIRONMENT['Host Name'].split('.')[0]
return servinfo
def get_unique_id():
''' generate a short unique id for client '''
# return str(uuid.uuid4())
intstr = hex(int(time.time() * 10000))[2:] + hex(random.randrange(0, 0xFFFF))[2:]
return base64.b64encode(intstr.encode(), 'AB'.encode())[:-2].decode('utf-8')
def config_read(cfg_search, doclientcfg=False): # pylint: disable=locally-disabled, too-many-statements, too-many-branches, too-many-locals
''' Read configuration files in a certain order of precedence '''
config = SafeConfigParser()
# Consider using initial values as defaults, so are accessible outside common
# config = SafeConfigParser(ENVIRONMENT)
ENVIRONMENT['config'] = config
config.add_section('common')
config.set('common', 'home_dir', os.path.expanduser('~/'))
config.set('common', 'installed', str(INSTALLED))
config.set('common', 'install_pkg_root', INSTALL_PKG_ROOT)
config.set('common', 'install_root', INSTALL_ROOT)
config.set('common', 'config_dir', CONFIG_DIR)
config.set('common', 'instance_name', str(INSTANCE_NAME))
config.set('common', 'username', ENVIRONMENT['username'])
config.set('common', 'hostname', ENVIRONMENT['Host Name'])
config.set('common', 'toolname', ENVIRONMENT['toolname'])
toolrootname = ENVIRONMENT['toolrootname']
config.set('common', 'toolrootname', toolrootname)
config.set('common', 'buildtag', os.environ.get('BUILD_TAG', ''))
config.set('common', 'buildid', os.environ.get('BUILD_ID', ''))
config.set('common', 'pid', '%d' % os.getpid())
config.set('common', 'worker_id', '0')
config.set('common', 'site', ENVIRONMENT['Site'])
config.set('common', 'datacenter', ENVIRONMENT['DC'])
config.add_section('wisk')
config.set('wisk', 'monitor_interval', str(MONITOR_INTERVAL))
config.set('wisk', 'monitor_low', str(MONITOR_LOW))
config.set('wisk', 'monitor_resize', str(MONITOR_RESIZE))
# Exceptions
if toolrootname in ['uwsgi']:
config.set('common', 'widsuffix', '{worker_id}')
else:
config.set('common', 'widsuffix', '')
if ENVIRONMENT['username'] != 'flxclone':
doclientcfg = True
ENVIRONMENT['doclientcfg'] = doclientcfg
if doclientcfg:
# ENVIRONMENT['uniqueid'] = '%s' % hex(int(time.time() * 10000))[2:]
ENVIRONMENT['uniqueid'] = get_unique_id()
LUMENS_DATA['metadata']['uniqueid'] = ENVIRONMENT['uniqueid']
LOCAL.operation_id = ENVIRONMENT['uniqueid']
config.set('common', 'uniqueid', ENVIRONMENT['uniqueid'])
config.set('common', 'log_root', '%(client_log_root)s')
ENVIRONMENT['logfile'] = '%(logdir)s/%(toolname)s_%(username)s_%(hostname)s_%(uniqueid)s.log'
config.set('common', 'logfilename', ENVIRONMENT['logfile'])
else:
config.set('common', 'uniqueid', '')
config.set('common', 'log_root', '%(server_log_root)s')
if toolrootname in ['uwsgi']:
ENVIRONMENT['logfile'] = '%(logdir)s/%(toolname)s_%(hostname)s_%(widsuffix)s.log'
else:
ENVIRONMENT['logfile'] = '%(logdir)s/%(toolname)s_%(hostname)s.log'
config.set('common', 'logfilename', ENVIRONMENT['logfile'])
if not ENVIRONMENT['installed']:
config.set('common', 'logdir', '%(install_root)s/logs/%(toolrootname)s')
# Read system defaults
cfgfiles = list(cfg_search)
found = []
try:
# Read the minimum configuration
config.readfp(StringIO(MIN_CONFIG))
# read the tool specific list of config files
cfgfiles = [os.path.expanduser(p) % ENVIRONMENT for p in cfgfiles]
found.extend(config.read(cfgfiles))
# Search for install_type config files
installtypecfg = [os.path.expanduser(p) % ENVIRONMENT for p in INSTALL_TYPE_SEARCH]
foundinstalltype = config.read(installtypecfg)
if not foundinstalltype:
sys.exit('Error: install_type config files not found: %s' % (installtypecfg))
found.extend(foundinstalltype)
cfgfiles.extend(installtypecfg)
if doclientcfg:
clientcfg = [os.path.join(get_tool_dir(), 'wisk.cfg')]
found.extend(config.read(clientcfg))
cfgfiles.extend(clientcfg)
else:
servercfgfiles = [os.path.join(get_tool_dir(), 'wisk_server.cfg')]
if config.get('common', 'install_type', vars={'install_type': None}) != 'local':
servercfgfiles.append(os.path.join(get_tool_dir(), 'wisk_server_%s.cfg' % config.get('common', 'install_type', None)))
servercfgfiles = [os.path.expanduser(i) for i in servercfgfiles]
cfgfiles.extend(servercfgfiles)
found.extend(config.read(servercfgfiles))
if config.get('common', 'install_type', vars={'install_type': None}) == 'local':
localcfgfiles = [os.path.join(get_tool_dir(), 'wisk_local.cfg'),
os.path.join(get_tool_dir(), 'wisk_local_%(instance_name)s.cfg')]
localcfgfiles = [os.path.expanduser(i) for i in localcfgfiles]
cfgfiles.extend(localcfgfiles)
found.extend(config.read(localcfgfiles))
# read the config files specified in WISK_CFG environment variable, used for trouble shooting
env_cfg = None if INSTALLED else os.environ.get('WISK`_CFG', None)
if env_cfg is not None:
cfgfiles.append(env_cfg)
found.extend(config.read([env_cfg]))
# Temp code: Remove after CLIP configured
try:
ENVIRONMENT['log_iso8601_dates'] = config.getboolean('common', 'log_iso8601_dates')
except Error:
ENVIRONMENT['log_iso8601_dates'] = False
except (ParsingError, OSError) as ex:
sys.exit('Error reading/parsing Confg files %s : %s' % (cfgfiles, ex))
ENVIRONMENT['install_type'] = config.get('common', 'install_type', vars={'install_type': None})
not_found = set(cfgfiles) - set(found)
ENVIRONMENT['cfg_found'] = found
ENVIRONMENT['cfg_notfound'] = not_found
return found, not_found
def config_dict(section='common', options=None):
"""
Safely return options from config section as dictionary
"""
cdict = {}
config = ENVIRONMENT.get('config', None)
if config and config.has_section(section):
if options is None:
cdict = {option: value for option, value in config.items(section)}
else:
cdict = {key: config.get(section, key) for key in options if config.has_option(section, key)}
return cdict
def loglevelint2str(llevel):
''' Translate a loglevel string to a loglevel integer '''
loglevels = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'warning',
logging.ERROR: 'error',
logging.CRITICAL: 'critical'}
return loglevels.get(llevel, 'notset')
def loglevelstr2int(llevel):
''' Translate a loglevel string to a loglevel integer '''
loglevels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
return loglevels.get(llevel, logging.NOTSET)
def logverbosity2str(verbosity):
''' Return loglevel as a string '''
if verbosity > 3:
verbosity = 3
return ['ERROR', 'WARNING', 'INFO', 'DEBUG'][verbosity]
def loglevel(verbosity):
''' Change log levels if needed '''
if verbosity == 0:
llevel = logging.ERROR
elif verbosity == 1:
llevel = logging.WARNING
elif verbosity == 2:
llevel = logging.INFO
elif verbosity >= 3:
llevel = logging.DEBUG
else:
llevel = logging.DEBUG
ENVIRONMENT['consoleloghandler'].setLevel(llevel)
if hasattr(sys, '_getframe'):
def currentframe():
''' Return Frame '''
# noinspection PyProtectedMember
return sys._getframe(3) # pylint: disable=locally-disabled, protected-access
else:
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except Exception: # pylint: disable=locally-disabled, broad-except
return sys.exc_info()[2].tb_frame.f_back
# _SRCFILE = os.path.normcase(currentframe.__code__.co_filename) # pylint: disable=locally-disabled, no-member
#
#
# def findcaller():
# """
# Find the stack frame of the caller so that we can note the source
# file name, line number and function name.
# """
# f = currentframe()
# # On some versions of IronPython, currentframe() returns None if
# # IronPython isn't run with -X:Frames.
# if f is not None:
# f = f.f_back
# rv = "(unknown file)", 0, "(unknown function)"
# while hasattr(f, "f_code"):
# code = f.f_code
# filename = os.path.normcase(code.co_filename)
# if filename != _SRCFILE:
# f = f.f_back
# continue
# if f.f_back is None or not hasattr(f.f_back, "f_code"):
# rv = (code.co_filename, f.f_lineno, '*%s*' % code.co_name)
# else:
# f = f.f_back
# code = f.f_code
# rv = (code.co_filename, f.f_lineno, code.co_name)
# break
# return rv
#
#
# def genemitmethod(console, origemit):
# ''' generate emit method for handlers '''
#
# def emitmethod(self, record):
# ''' emit method for handlers '''
# try:
# thr = self.threads.setdefault(record.thread, dict(isprint=False, levelno=None, record=None))
# tisprint = thr.get('isprint')
# tlevelno = thr.get('levelno')
# trecord = thr.get('record')
# isprint = getattr(record, 'isprint', False)
# if tlevelno != record.levelno or tisprint != isprint:
# trecord = thr.get('record')
# if trecord:
# origemit(self, trecord)
# thr['record'] = None
# thr['isprint'] = isprint
# thr['levelno'] = record.levelno
# if not isprint:
# return origemit(self, record)
# if console:
# return
# trecord = thr.get('record')
# if trecord is not None:
# trecord.msg += record.msg
# else:
# thr['record'] = record
# record.pathname, record.lineno, record.funcName = findcaller()
# if record.msg.endswith('\n'):
# thr['record'].msg = thr['record'].msg[:-1]
# origemit(self, thr['record'])
# thr['record'] = None
# except (KeyboardInterrupt, SystemExit):
# raise
# except Exception: # pylint: disable=locally-disabled, broad-except
# self.handleError(record)
# return emitmethod
#
from logging import StreamHandler
# class StreamHandler(logging.StreamHandler):
# ''' Stream Handler '''
# threads = {}
# emit = genemitmethod(console=True, origemit=logging.StreamHandler.emit)
#
# def __init__(self, *args, **kwargs):
# if isinstance(kwargs.setdefault('stream', sys.stdout), OutputRedirector):
# kwargs['stream'] = kwargs['stream'].filep
#
# super(StreamHandler, self).__init__(*args, **kwargs)
#
#
from logging import FileHandler
# class FileHandler(logging.FileHandler):
# ''' File Handler '''
# threads = {}
# emit = genemitmethod(console=False, origemit=logging.FileHandler.emit)
#
#
from logging.handlers import RotatingFileHandler
# class RotatingFileHandler(logging.handlers.RotatingFileHandler):
# ''' Rotating Filehandler '''
# threads = {}
# emit = genemitmethod(console=False, origemit=logging.handlers.RotatingFileHandler.emit)
#
#
# class OutputRedirector(object):
# """ Wrapper to redirect stdout or stderr """
#
# def __init__(self, filep, logmethod):
# ''' Output Redirector init '''
# self.filep = filep
# self.logmethod = logmethod
#
# def write(self, s):
# ''' Write '''
# self.logmethod(s, extra={'isprint': True})
# self.filep.write(s)
#
# def origwrite(self, s):
# ''' Write data to stream '''
# self.filep.write(s)
#
# def writelines(self, lines):
# ''' Writelines '''
# self.logmethod('\n'.join(lines), extra={'isprint': True})
# self.filep.writelines(lines)
#
# def origwritelines(self, lines):
# ''' Write data to stream '''
# self.filep.writelines(lines)
#
# def flush(self):
# ''' Flush '''
# self.filep.flush()
#
# def isatty(self, *args, **kwargs): # pylint: disable=locally-disabled, unused-argument, no-self-use
# ''' isatty is False when in redirection '''
# return False
def logging_setup(verbosity, corridfilter=None, onlyerrorlogs=False): # pylint: disable=locally-disabled, too-many-statements, too-many-branches
''' Logging Setup '''
global LOGGING_INIT_DONE # pylint: disable=locally-disabled, global-statement
if LOGGING_INIT_DONE:
return
LOGGING_INIT_DONE = True
config = ENVIRONMENT['config']
if onlyerrorlogs:
config.set('common', 'savelogs', 'error')
# All logging is done in UTC for CLIP
os.environ['TZ'] = 'UTC'
time.tzset()
# create file handler which logs with log level specified in config
ENVIRONMENT['logfile'] = config.get('common', 'logfilename')
if not os.path.exists(os.path.dirname(ENVIRONMENT['logfile'])):
dname = os.path.dirname(ENVIRONMENT['logfile'])
try:
os.makedirs(dname)
os.chmod(dname,
os.stat(dname).st_mode |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
except OSError as e:
sys.exit('Error creating log directory: %s' % e)
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
verbosity = verbosity if verbosity <= 4 else 4
clidebuglevel = (5 - verbosity) * 10
ENVIRONMENT['verbosity'] = verbosity
# create console handler with a default log level from config and increased by verbosity
consoleloglevel = loglevelstr2int(config.get('common', 'consoleloglevel').lower())
if clidebuglevel < consoleloglevel:
consoleloglevel = clidebuglevel
ENVIRONMENT['consoleloglevel'] = consoleloglevel
doclientcfg = ENVIRONMENT.get('doclientcfg', False)
toolname = ENVIRONMENT.get('toolname', '')
if not corridfilter:
corridfilter = CorrelationIdFilter()
logging.getLogger('').addFilter(corridfilter)
if logger.handlers:
ENVIRONMENT['consoleloghandler'] = logger.handlers[0]
else:
if toolname.startswith('eventlistener'):
ENVIRONMENT['consoleloghandler'] = StreamHandler(stream=sys.stderr)
else:
ENVIRONMENT['consoleloghandler'] = StreamHandler()
ENVIRONMENT['consoleloghandler'].addFilter(corridfilter)
ENVIRONMENT['consoleloghandler'].setLevel(consoleloglevel)
if doclientcfg:
if verbosity >= 4:
cformat = MicroFormatter(VERBOSE_FMT_CORR_ID)
else:
cformat = MicroFormatter(SIMPLE_FMT)
else:
cformat = MicroFormatter(VERBOSE_FMT_CORR_ID) # Server
ENVIRONMENT['consoleloghandler'].setFormatter(cformat)
logger.addHandler(ENVIRONMENT['consoleloghandler'])
if doclientcfg:
fileloglevel = config.get('common', 'fileloglevel').lower()
ENVIRONMENT['fileloglevel'] = logging.DEBUG if fileloglevel == 'debug' else logging.INFO
try:
ENVIRONMENT['fileloghandler'] = FileHandler(ENVIRONMENT['logfile'])
except OSError as e:
sys.exit('Error setting up file logging handler: %s, %s' % (ENVIRONMENT['logfile'], e))
ENVIRONMENT['fileloghandler'].addFilter(corridfilter)
logger.addHandler(ENVIRONMENT['fileloghandler'])
ENVIRONMENT['fileloghandler'].setLevel(ENVIRONMENT['fileloglevel'])
fformat = MicroFormatter(VERBOSE_FMT_CORR_ID)
ENVIRONMENT['fileloghandler'].setFormatter(fformat)
# sys.stdout = OutputRedirector(sys.stdout, printlog.info)
# sys.stderr = OutputRedirector(sys.stderr, printlog.error)
elif toolname.startswith('eventlistener') or toolname.startswith('celery-flower'):
# sys.stderr = OutputRedirector(sys.stderr, printlog.info)
handler = FileHandler(ENVIRONMENT['logfile'])
handler.addFilter(corridfilter)
handler.setLevel(logging.DEBUG)
handler.setFormatter(MicroFormatter(VERBOSE_FMT_CORR_ID))
logger.addHandler(handler)
ENVIRONMENT['fileloghandler'] = handler
ENVIRONMENT['fileloglevel'] = logging.DEBUG
loglevel(verbosity)
log.debug('Incoming Environment: %s', pprint.pformat(dict(os.environ), indent=4))
log.debug('Command line: "%s"', '" "'.join(sys.argv))
log.debug('Workspace- ID: %s', ENVIRONMENT['workspace_id'])
log.debug('Config files read from search path: %s', ENVIRONMENT['cfg_found'])
log.debug('Config files not found in search path: %s', ENVIRONMENT['cfg_notfound'])
log.debug('Environment: %s', pprint.pformat(ENVIRONMENT, indent=4))
def gettoolname(programname, subcommands=0):
''' Get toolname from program name and subcommand '''
nodashargs = [i for i in sys.argv if not i.startswith('-')]
for i, v in enumerate(nodashargs):
if programname in v:
return '-'.join([programname] + nodashargs[i + 1:i + subcommands + 1])
return programname
def env_init(toolname, cfgsearch, doclientcfg=False, **kwargs):
''' Initialize the environment. Read platform information. Read configuration. Setup logging '''
global ENV_INIT_DONE # pylint: disable=locally-disabled, global-statement
if not ENV_INIT_DONE:
ENV_INIT_DONE = True
ENVIRONMENT['toolname'] = toolname
ENVIRONMENT['toolrootname'] = toolname.split('-')[0]
ENVIRONMENT.update(kwargs)
env_siteinfo_update()
ENVIRONMENT['workspace_path'] = workspace_path()
ENVIRONMENT['workspace_id'] = workspace_id()
ENVIRONMENT['workspace_guid'] = workspace_guid()
config_read(cfgsearch, doclientcfg)
celmajor, _ = ENVIRONMENT['OS-Version'].strip().split('.')
if int(celmajor) < 6:
sys.exit('ERROR: Tooling requires CEL 6 or above')
return ENV_INIT_DONE
def exit_clean(err):
''' Cleanup and save logs if error or needed before exiting '''
if err is None:
err = 0
try:
logfile = ENVIRONMENT['logfile']
config = ENVIRONMENT['config']
tlogdir = os.path.expanduser(config.get('common', 'logdir'))
savelogs = config.get('common', 'savelogs').lower()
except (InterpolationMissingOptionError, InterpolationSyntaxError) as ex:
log.info(ex)
return err
except KeyError as ex:
return err
if err != 0 or savelogs == 'all':
if not os.path.exists(logfile):
log.error('Log file does not exist: %s', logfile)
return err
logfilename = os.path.basename(logfile)
tlogfilename = list(os.path.splitext(logfilename))
if err:
tlogfilename.insert(-1, '_error')
tlogdir0 = tlogdir
tlogdir = '%s_error' % tlogdir
if not os.path.exists(tlogdir):
# Don't create if does not exist, would be wrong ID
log.error('Error Log dir does not exist: %s', tlogdir)
tlogdir = tlogdir0
tlogfile = os.path.join(tlogdir, ''.join(tlogfilename))
logmsg = log.info if err == 0 else print
if logfile != tlogfile:
try:
# Try to create hardlink
os.link(logfile, tlogfile)
except OSError as e:
log.warning('Error with hard link of %s to %s: %s', logfile, tlogfile, e)
try:
# Try to create symlink
os.symlink(logfile, tlogfile)
except OSError as e:
log.error('Error creating symlink of %s to %s: %s', logfile, tlogfile, e)
return err
try:
os.chmod(tlogfile,
os.stat(tlogfile).st_mode |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
except OSError as e:
log.error('Error updating permissions on log files: %s', e)
logmsg('Detailed Logs at %s' % tlogfile)
else:
log.debug('Save Logs on Error is Enabled. Removing success Logfile: %s', logfile)
if os.path.exists(logfile):
os.remove(logfile)
return err
def get_relversion(client):
''' Get the release version of the current software instance '''
if 'rel-version' in ENVIRONMENT:
return ENVIRONMENT['rel-version']
if INSTALLED:
ENVIRONMENT['rel-version'] = os.path.basename(os.path.realpath(INSTALL_ROOT))
return ENVIRONMENT['rel-version']
try:
prefix = 'WISK_CLIENT' if client else 'WISK_SERVER'
ENVIRONMENT['rel-version'] = subprocess.check_output(
['git', 'describe', '--tags', '--match', '%s_[--9A-Z]*' % prefix, '--always', '--abbrev=4', 'HEAD'],
stderr=subprocess.STDOUT).decode('utf-8').strip().replace('_', '.')
except subprocess.CalledProcessError as ex:
log.debug('Could not get servinfo for client: %s', ex)
ENVIRONMENT['rel-version'] = 'unknown-development-version'
return ENVIRONMENT['rel-version']
def get_reldatetime():
''' Get the release date of the current software instance '''
if 'rel-datetime' in ENVIRONMENT:
return ENVIRONMENT['rel-datetime']
ENVIRONMENT['rel-datetime'] = time.ctime(os.path.getmtime(os.path.realpath(INSTALL_ROOT)))
return ENVIRONMENT['rel-datetime']
def get_verbosity(default=None):
''' Get verbosity from the command line '''
if 'verbosity' in ENVIRONMENT:
return ENVIRONMENT['verbosity']
for v in ['-v', '-verbose', '-verbosity', '--verbose', '--verbosity']:
if v in sys.argv:
i = sys.argv.index(v)
try:
return ENVIRONMENT.setdefault('verbosity', int(sys.argv[i + 1]))
except (ValueError, IndexError):
return ENVIRONMENT.setdefault('verbosity', 1)
for i, o in enumerate(sys.argv):
if o.startswith(v):
if '=' in o:
_, rv = o.split('=')
return ENVIRONMENT.setdefault('verbosity', int(rv))
elif o.startswith('-vv'):
return ENVIRONMENT.setdefault('verbosity', int(len(o[1:])))
else:
rv = o.replace(v, '')
return ENVIRONMENT.setdefault('verbosity', int(rv))
return ENVIRONMENT.setdefault('verbosity', default)
def workspace_path():
''' Recognize the root of the workspace if you are in one '''
wspath = os.path.normpath(os.getcwd())
try:
while '.git' not in os.listdir(wspath):
wspath = os.path.split(wspath)[0]
if wspath == '/':
return None
except OSError:
return '/router/bin'
return wspath
def workspace_id():
''' Generate a workspace id unique to the username, host and workspace path'''
wid = '%(username)s_%(Host Name)s_%(workspace_path)s' % (ENVIRONMENT)
wid = wid.replace('/', '.')
wid = wid.replace('_.', '_')
return wid
def workspace_guid():
''' Generate a workspace guid that can be used for Oracle user name '''
# Cleaning up DB after another user will be challenging if workspace_id varies with user calling this routine
ws_id = '%(Host Name)s_%(workspace_path)s' % ENVIRONMENT # Should be unique across cisco
# Add the owner of the workspace so more easily identified
ws_stat = os.stat(INSTALL_ROOT)
ws_uid = ws_stat.st_uid
ws_owner = pwd.getpwuid(ws_uid).pw_name
slug = hashlib.sha1(ws_id.encode('utf-8')).hexdigest()[:10]
guid = '%s_%s' % (ws_owner, slug)
return guid
def get_tool_dir():
''' Get .wisk dir '''
return os.environ.get('TOOL_DIR', os.path.join(os.path.expanduser('~'), '.wisk'))
def get_correlation_header():
""" Retrieve uniqueid / workspace_guid from config """
uniqueid = ENVIRONMENT.get('uniqueid', '')
if not uniqueid:
uniqueid = workspace_guid()
log.error('Missing uniqueid - using workspace_guid: %s', uniqueid)
log.info('Setting HTTP_X_OPERATION_ID header: %s', uniqueid)
correlation_header = {'X-Operation-ID': uniqueid}
return correlation_header
def get_team_from_local():
''' Get default team from home dir ~/.wisk/wisk.cfg '''
config = ENVIRONMENT['config']
if config.has_option('common', 'team'):
return config.get('common', 'team', '')
return None
def cliprint(obj, cindent=4, indent=4):
''' Display Dictionaries, Lists and Scalars in an indented fashion for CLI display'''
retval = ''
if isinstance(obj, dict) or isinstance(obj, collections.OrderedDict):
width = max([len(i) for i in obj.keys()]) + 1
for k in sorted(obj.keys()):
retval = retval + '\n{}{:<{}} '.format(' ' * cindent, k + ':', width) + cliprint(obj[k], cindent + indent)
return retval
elif isinstance(obj, list):
retval = retval + '['
sep = ''
for v in obj:
retval = retval + sep + '{0}'.format(' ' * cindent) + cliprint(v, cindent)
sep = ','
retval = retval + ']'
return retval
else:
retval = ' %s' % obj
retval = retval.replace('\n', '\n' + ' ' * cindent)
return retval
| [
7061,
6,
198,
41972,
319,
3158,
1160,
11,
2177,
198,
198,
31,
9800,
25,
29008,
8903,
198,
198,
31,
22163,
4766,
25,
220,
2177,
28289,
3457,
13,
1439,
2489,
10395,
13,
198,
7061,
6,
198,
2,
279,
2645,
600,
25,
15560,
28,
17946,
453... | 2.231636 | 14,471 |
#!/usr/bin/env python
# python 2.x
import sys
from oauth2client.service_account import ServiceAccountCredentials
print_access_token() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
21015,
362,
13,
87,
198,
198,
11748,
25064,
198,
6738,
267,
18439,
17,
16366,
13,
15271,
62,
23317,
1330,
4809,
30116,
34,
445,
14817,
198,
198,
4798,
62,
15526,
62,
30001,
3... | 3.162791 | 43 |
from copy import copy
import functools
import os
import sys
# Interfaces
# Derived classes
# Text
# Reader
# Tokenizer
# array of sentences -> array of sentences
# ["a b c", "d e f"] -> [["a", "b", "c"], ["d", "e", "f"]]
# Permutator
# Sorter
# [["clouds", "are", "White"] , ["are", "White", "Clouds"], ["White", "clouds", "are"]]
# Presenter
if __name__ == '__main__':
if len(sys.argv) < 5:
raise Exception('Specify one more param, the text file path')
'''
Corrida 1: Added params START
'''
params = {
'input_file_path': sys.argv[1],
'stop_words_file_path': sys.argv[2],
'descending': sys.argv[3],
'output_file_path': sys.argv[4]
}
with open(params.get('input_file_path'), 'r') as f:
input_text = f.read()
with open(params.get('stop_words_file_path'), 'r') as f:
stop_words = f.read().split('\n')
if params.get('descending') == 'true':
descending_order = True
else:
descending_order = False
'''
Corrida 1: Added params END
'''
reader = SentenceReader(
input_text,
{
'stop_words': stop_words,
'descending_order': descending_order,
'output_file_path': params.get('output_file_path')
}
)
reader.execute_filter() | [
6738,
4866,
1330,
4866,
198,
198,
11748,
1257,
310,
10141,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
2,
4225,
32186,
198,
220,
220,
220,
220,
198,
198,
2,
9626,
1572,
6097,
198,
198,
2,
8255,
198,
198,
2,
25342,
198,
198,
2,
... | 2.185065 | 616 |
from django.shortcuts import render, redirect
from . forms import EmployeeForm
from . models import Employee
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
764,
5107,
1330,
36824,
8479,
198,
6738,
764,
4981,
1330,
36824,
198,
198,
2,
13610,
534,
5009,
994,
13,
628,
198
] | 4.181818 | 33 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import math
from django import template
register = template.Library()
# when this module is imported via {% load pagination_tags %}, it is
# imported as django.templatetags.pagination_tags, which prevents a
# relative import (..conf) to rapidsms from working here. in fact, that
# would import django.conf, so it will appear to be working, but per-app
# settings won't work! PAGINATOR_ defaults are in the ..settings module.
from rapidsms.conf import settings
@register.inclusion_tag("rapidsms/templatetags/paginator.html")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
43907,
25,
257,
72,
40379,
28,
19,
39747,
28,
19,
2123,
1509,
28,
19,
628,
198,
11748,
10688,
198,
6738,
42625,
14208,
1330,
11055,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
1... | 3.227778 | 180 |
import os
import json
from termcolor import colored
from utils import getInfoAboutArchivesToScrape
from PaperScraper.utils.command_line import printProgressBar
from DatasetIndexing.lib.arxiv_scraper import ArXivScraper
from DatasetIndexing.lib.sciencedirect_scraper import ScienceDirectScraper
if __name__ == '__main__':
paper = {
"Title": "BomJi at SemEval-2018 Task 10: Combining Vector-, Pattern- and Graph-based Information to Identify Discriminative Attributes",
"Abstract": "This paper describes BomJi, a supervised system for capturing discriminative attributes in word pairs (e.g. yellow as discriminative for banana over watermelon). The system relies on an XGB classifier trained on carefully engineered graph-, pattern- and word embedding based features. It participated in the SemEval- 2018 Task 10 on Capturing Discriminative Attributes, achieving an F1 score of 0:73 and ranking 2nd out of 26 participant systems.",
"Authors": [
"Enrico Santus",
"Chris Biemann",
"Emmanuele Chersoni"
],
"Date": "2018-04-30T14:58:22Z",
"DOI": [],
"Category": [
"cs.CL"
],
"Link": "http://arxiv.org/pdf/1804.11251v1.pdf",
"Archive": "arXiv",
"Prediction": "Dataset Detected"}
test = ExtractInfoFromPaper(paper)
test.extract()
| [
11748,
28686,
198,
11748,
33918,
198,
6738,
3381,
8043,
1330,
16396,
198,
198,
6738,
3384,
4487,
1330,
651,
12360,
8585,
19895,
1083,
2514,
3351,
13484,
198,
6738,
14962,
3351,
38545,
13,
26791,
13,
21812,
62,
1370,
1330,
3601,
32577,
103... | 2.719368 | 506 |
import manage_data
import task_api
| [
11748,
6687,
62,
7890,
198,
11748,
4876,
62,
15042,
198
] | 3.5 | 10 |
import argparse
from asyncio import get_event_loop
from asyncio import sleep as asleep
from asyncio import TimeoutError
from errno import ECONNRESET
from functools import lru_cache
from functools import partial
from urllib.parse import urljoin
from aioconsole.stream import aprint
from aiohttp import ClientOSError
from aiohttp import ClientPayloadError
from aiohttp import ClientSession
from aiohttp import ClientTimeout
from aiohttp import InvalidURL
from aiohttp import TCPConnector
from lxml.etree import ParserError
from lxml.etree import XMLSyntaxError
from lxml.html import fromstring
from lxml.html import HTMLParser
from utils.cache import LRU
from utils.config import cache_limit
from utils.config import http_cookies
from utils.config import timeout_interval
from utils.config import tracker_url
CONTENT_PATH = "descendant-or-self::tr[contains(@class, 'hl-tr')]"
NAME_PATH = "string(descendant::td[contains(@class, 'tLeft')]/"\
"descendant::a[contains(@class, 'tLink')])"
LINK_PATH = "string(descendant::td[contains(@class, 'small')]/"\
"a[@title='Download' or contains(@class, 'tr-dl')]/@href)"
COOLDOWN = timeout_interval() * 10
HTTP_EXCEPTIONS = (
ClientOSError,
ClientPayloadError,
InvalidURL,
OSError,
TimeoutError,
)
@lru_cache(maxsize=1)
if __name__ == '__main__':
exit(main())
| [
11748,
1822,
29572,
198,
6738,
30351,
952,
1330,
651,
62,
15596,
62,
26268,
198,
6738,
30351,
952,
1330,
3993,
355,
16039,
198,
6738,
30351,
952,
1330,
3862,
448,
12331,
198,
6738,
11454,
3919,
1330,
412,
10943,
45,
19535,
2767,
198,
67... | 2.943107 | 457 |
# Django command helper to apply django related fields patch programattically
# Will cd into 1st argument and run patch
import os
import sys
from subprocess import Popen, PIPE
patch_file = os.path.join(
os.path.abspath('.'), 'bin', 'django-related-fields.patch')
process = Popen(['patch', '-p1'], stdin=PIPE, shell=False, cwd=sys.argv[1])
process.communicate(open(patch_file).read())
| [
2,
37770,
3141,
31904,
284,
4174,
42625,
14208,
3519,
7032,
8529,
1430,
1078,
1146,
198,
2,
2561,
22927,
656,
352,
301,
4578,
290,
1057,
8529,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
850,
14681,
1330,
8099,
268,
11,
350,
4061,
... | 2.932331 | 133 |
import FWCore.ParameterSet.Config as cms
import FastSimulation.Tracking.TrackCandidateProducer_cfi
electronCkfTrackCandidates = FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone(
src = cms.InputTag("electronMergedSeeds"),
MinNumberOfCrossedLayers = 5,
OverlapCleaning = True
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
11748,
12549,
8890,
1741,
13,
2898,
5430,
13,
24802,
41572,
20540,
11547,
2189,
62,
66,
12463,
198,
9509,
1313,
34,
74,
69,
24802,
41572,
37051,
796,
12549,
8890,... | 2.892857 | 112 |
from py2p import mesh
import asyncio, argparse, time, sys
loop = asyncio.get_event_loop()
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bootstrap", type=int, help="whether this node in boostrap node")
parser.add_argument("-p", "--myPort", type=int, help="my port")
args = parser.parse_args()
myPort = args.myPort
sock = mesh.MeshSocket('0.0.0.0', myPort)
if args.bootstrap == 0:
sock.connect('127.0.0.1', 1000)
time.sleep(1)
print(sock.routing_table)
bootstrapID = ""
sock.debug_level = 5
# async def readHandler():
count = 0
try:
future = [readHandler(), main()]
loop.run_until_complete(asyncio.gather(*future))
except KeyboardInterrupt:
pass
finally:
print("ending program...")
loop.close() | [
6738,
12972,
17,
79,
1330,
19609,
198,
11748,
30351,
952,
11,
1822,
29572,
11,
640,
11,
25064,
198,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62,
26268,
3419,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
19... | 2.652482 | 282 |
import numpy as np
import torch.nn as nn
# Reference: https://github.com/imalic3/python-word-error-rate
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
2,
20984,
25,
3740,
1378,
12567,
13,
785,
14,
4402,
291,
18,
14,
29412,
12,
4775... | 2.326923 | 52 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
######################################################################
#
# Model automatically generated by modelGenerator
#
######################################################################
# ----------------------------------------------------------------------
# Network Structure
# ----------------------------------------------------------------------
# conv layer 0: conv | input -1 output 4 kernel 2 post relu
# conv layer 1: pool | kernel 2 post None
# conv layer 2: conv | input 4 output 5 kernel 2 post relu
# conv layer 3: pool | kernel 2 post None
# fc layer 0: fc | input -1 output 84 post relu
# fc layer 1: fc | input 84 output 1 post None
# ----------------------------------------------------------------------
| [
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
29113,
29113,
4242,
2235,
198,
2,
198,
2,
9104,
6338,
7560... | 4.281553 | 206 |
from dataclasses import dataclass
from typing import Any, Optional
from squall.params import Body
@dataclass
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
4377,
11,
32233,
198,
198,
6738,
2809,
439,
13,
37266,
1330,
12290,
628,
198,
31,
19608,
330,
31172,
628,
198,
31,
19608,
330,
31172,
198
] | 3.378378 | 37 |
# -*- coding: utf-8 -*-
"""A Data and Model Zoo for Single-Cell Genomics."""
from ._settings import settings
import sfaira.consts
import sfaira.data
import sfaira.models
import sfaira.train
import sfaira.ui
import sfaira.versions
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__maintainer__ = ', '.join([
"Leander Dony",
"David S. Fischer"
])
__author__ = ', '.join([
"Leander Dony",
"David S. Fischer",
"Lukas Heumos"
])
__email__ = ', '.join([
"leander.dony@helmholtz-munich.de",
"david.fischer@helmholtz-munich.de",
"lukas.heumos@helmholtz-munich.de"
])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
32,
6060,
290,
9104,
21980,
329,
14206,
12,
28780,
5215,
31994,
526,
15931,
198,
198,
6738,
47540,
33692,
1330,
6460,
198,
11748,
264,
22043,
64,
13,
1102,
6448,
... | 2.517787 | 253 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-19 21:49
# @Author : Mayandev
# @Site : https://github.com/Mayandev/
# @File : serializers.py
# @Software: PyCharm
from rest_framework import serializers
from .models import Movie, Genre
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
12,
3023,
12,
1129,
2310,
25,
2920,
198,
2,
2488,
13838,
220,
1058,
1737,
... | 2.514286 | 105 |
# -*- coding: utf-8 -*-
from github.accounts.github_account import GithubAccount
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
33084,
13,
23317,
82,
13,
12567,
62,
23317,
1330,
38994,
30116,
628
] | 3.037037 | 27 |
from core.templatetags import register
from django.utils.safestring import mark_safe
import datetime
@register.filter
def addstr(arg1, arg2):
"""concatenate arg1 & arg2"""
return str(arg1) + str(arg2)
@register.filter
def _not(arg1):
"""Boolean or"""
return not arg1
@register.filter
def _or(arg1, arg2):
"""Boolean or"""
return arg1 or arg2
@register.filter
def _and(arg1, arg2):
"""Boolean and"""
return arg1 and arg2
@register.filter
def _equals(arg1, arg2):
"""Boolean and"""
return arg1 and arg2
@register.filter
def _plus(arg1, arg2):
"""int plus"""
return str(int(arg1) + int(arg2))
@register.filter
def _aslist(arg1):
"""return a list, split from the string supplied"""
return str(arg1).split(",")
@register.filter
def _get(arg1, arg2):
"""get a value from an object"""
return (arg1 or {}).get(arg2)
@register.filter
| [
6738,
4755,
13,
11498,
489,
265,
316,
3775,
1330,
7881,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
11748,
4818,
8079,
628,
198,
31,
30238,
13,
24455,
198,
4299,
751,
2536,
7,
853,
16,
11,
1... | 2.550562 | 356 |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import time
from aiida_fleur.calculation.fleur import FleurCalculation as FleurCalc
from aiida_fleur_ad.util.extract_corelevels import extract_corelevels
from aiida.orm import load_node
from aiida.orm import Computer
from aiida.plugins import Code, DataFactory, CalculationFactory
from pprint import pprint
from lxml.etree import XMLSyntaxError, XPathEvalError
from lxml import etree, objectify
from aiida import load_dbenv, is_dbenv_loaded
from six.moves import range
if not is_dbenv_loaded():
load_dbenv()
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
KpointsData = DataFactory('array.kpoints')
FleurInpCalc = CalculationFactory('fleur.inpgen')
start_time = time.time()
##
# calculation to extract from:
# either from list given here or system argument
calcs_pks = [4743] # 4436]
# calcs_pks = [1464, 1462, 1399, 1403]#, 1059]#, 1414
####
'''
if not calcs_pks:
try:
for arg in sys.argv[1:]:
calc_t = arg
calcs_pks.append(int(calc_t))
except:
pass
#####
'''
# check if calculation pks belong to successful fleur calculations
for pk in calcs_pks:
calc = load_node(pk)
if (not isinstance(calc, FleurCalc)):
raise ValueError("Calculation with pk {} must be a FleurCalculation".format(pk))
if calc.get_state() != 'FINISHED':
raise ValueError("Calculation with pk {} must be in state FINISHED".format(pk))
parser_info = {'parser_warnings': [], 'unparsed': []}
# call
test_outxmlfiles = ['./test_outxml/outBeCr.xml', './test_outxml/out.xml', './test_outxml/outCuF.xml',
'./test_outxml/outFe.xml', './test_outxml/outHg.xml', './test_outxml/outO.xml']
outxmlfile = test_outxmlfiles[0]
corelevels, atomtypes = extract_corelevels(outxmlfile)
# print corelevels
for i in range(0, len(corelevels[0][1]['corestates'])):
print(corelevels[0][1]['corestates'][i]['energy'])
print(calcs_pks)
for calc in calcs_pks:
# get out.xml file of calculation
outxml = load_node(pk).out.retrieved.folder.get_abs_path('path/out.xml')
corelevels, atypes = extract_corelevels(outxmlfile)
#print('corelevels {}'.format(corelevels))
pprint(corelevels)
pprint(atypes)
for i in range(0, len(corelevels[1][0]['corestates'])):
# print corelevels[3][1]['corestates'][i]['energy']
print(corelevels[1][0]['corestates'][i]['energy'])
print(("--- %s seconds ---" % (time.time() - start_time)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
257,
72,
3755,
62,
... | 2.536489 | 1,014 |
_.error_on_external_run # unused attribute (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:20)
_.reuse_existing_virtualenvs # unused attribute (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:21)
_.stop_on_first_error # unused attribute (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:22)
test # unused function (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:25)
Config # unused class (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:34)
search_path # unused variable (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:35)
Config # unused class (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:39)
version # unused variable (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:41)
description # unused variable (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:42)
| [
44807,
18224,
62,
261,
62,
22615,
62,
5143,
220,
1303,
21958,
11688,
50247,
11195,
14,
7278,
354,
17694,
2736,
13254,
14,
6207,
35061,
14,
11930,
31476,
14,
35420,
7753,
13,
9078,
25,
1238,
8,
198,
44807,
260,
1904,
62,
25687,
62,
328... | 2.925676 | 296 |
# -*- coding: utf-8 -*-
import os
from shutil import rmtree
from tempfile import mkdtemp
import sh
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
6738,
20218,
7753,
1330,
33480,
67,
29510,
198,
198,
11748,
427,
628,
198
] | 2.684211 | 38 |
__version__ = '0.1.1'
from .config import config
| [
834,
9641,
834,
796,
705,
15,
13,
16,
13,
16,
6,
198,
198,
6738,
764,
11250,
1330,
4566,
198
] | 2.631579 | 19 |
__doc__ = """Encode a depths matrix and a tnf matrix to latent representation.
Creates a variational autoencoder in PyTorch and tries to represent the depths
and tnf in the latent space under gaussian noise.
Usage:
>>> vae = VAE(nsamples=6)
>>> dataloader, mask = make_dataloader(depths, tnf)
>>> vae.trainmodel(dataloader)
>>> latent = vae.encode(dataloader) # Encode to latent representation
>>> latent.shape
(183882, 32)
"""
__cmd_doc__ = """Encode depths and TNF using a VAE to latent representation"""
import numpy as _np
import torch as _torch
_torch.manual_seed(0)
from math import log as _log
from torch import nn as _nn
from torch.optim import Adam as _Adam
from torch.nn.functional import softmax as _softmax
from torch.utils.data import DataLoader as _DataLoader
from torch.utils.data.dataset import TensorDataset as _TensorDataset
import vamb.vambtools as _vambtools
if _torch.__version__ < '0.4':
raise ImportError('PyTorch version must be 0.4 or newer')
def make_dataloader(rpkm, tnf, batchsize=256, destroy=False, cuda=False):
"""Create a DataLoader and a contig mask from RPKM and TNF.
The dataloader is an object feeding minibatches of contigs to the VAE.
The data are normalized versions of the input datasets, with zero-contigs,
i.e. contigs where a row in either TNF or RPKM are all zeros, removed.
The mask is a boolean mask designating which contigs have been kept.
Inputs:
rpkm: RPKM matrix (N_contigs x N_samples)
tnf: TNF matrix (N_contigs x N_TNF)
batchsize: Starting size of minibatches for dataloader
destroy: Mutate rpkm and tnf array in-place instead of making a copy.
cuda: Pagelock memory of dataloader (use when using GPU acceleration)
Outputs:
DataLoader: An object feeding data to the VAE
mask: A boolean mask of which contigs are kept
"""
if not isinstance(rpkm, _np.ndarray) or not isinstance(tnf, _np.ndarray):
raise ValueError('TNF and RPKM must be Numpy arrays')
if batchsize < 1:
raise ValueError('Minimum batchsize of 1, not {}'.format(batchsize))
if len(rpkm) != len(tnf):
raise ValueError('Lengths of RPKM and TNF must be the same')
if not (rpkm.dtype == tnf.dtype == _np.float32):
raise ValueError('TNF and RPKM must be Numpy arrays of dtype float32')
mask = tnf.sum(axis=1) != 0
# If multiple samples, also include nonzero depth as requirement for accept
# of sequences
if rpkm.shape[1] > 1:
depthssum = rpkm.sum(axis=1)
mask &= depthssum != 0
depthssum = depthssum[mask]
if mask.sum() < batchsize:
raise ValueError('Fewer sequences left after filtering than the batch size.')
if destroy:
rpkm = _vambtools.numpy_inplace_maskarray(rpkm, mask)
tnf = _vambtools.numpy_inplace_maskarray(tnf, mask)
else:
# The astype operation does not copy due to "copy=False", but the masking
# operation does.
rpkm = rpkm[mask].astype(_np.float32, copy=False)
tnf = tnf[mask].astype(_np.float32, copy=False)
# If multiple samples, normalize to sum to 1, else zscore normalize
if rpkm.shape[1] > 1:
rpkm /= depthssum.reshape((-1, 1))
else:
_vambtools.zscore(rpkm, axis=0, inplace=True)
# Normalize arrays and create the Tensors (the tensors share the underlying memory)
# of the Numpy arrays
_vambtools.zscore(tnf, axis=0, inplace=True)
depthstensor = _torch.from_numpy(rpkm)
tnftensor = _torch.from_numpy(tnf)
# Create dataloader
n_workers = 4 if cuda else 1
dataset = _TensorDataset(depthstensor, tnftensor)
dataloader = _DataLoader(dataset=dataset, batch_size=batchsize, drop_last=True,
shuffle=True, num_workers=n_workers, pin_memory=cuda)
return dataloader, mask
class VAE(_nn.Module):
"""Variational autoencoder, subclass of torch.nn.Module.
Instantiate with:
nsamples: Number of samples in abundance matrix
nhiddens: List of n_neurons in the hidden layers [None=Auto]
nlatent: Number of neurons in the latent layer [32]
alpha: Approximate starting TNF/(CE+TNF) ratio in loss. [None = Auto]
beta: Multiply KLD by the inverse of this value [200]
dropout: Probability of dropout on forward pass [0.2]
cuda: Use CUDA (GPU accelerated training) [False]
vae.trainmodel(dataloader, nepochs batchsteps, lrate, logfile, modelfile)
Trains the model, returning None
vae.encode(self, data_loader):
Encodes the data in the data loader and returns the encoded matrix.
If alpha or dropout is None and there is only one sample, they are set to
0.99 and 0.0, respectively
"""
# sample with gaussian noise
def encode(self, data_loader):
"""Encode a data loader to a latent representation with VAE
Input: data_loader: As generated by train_vae
Output: A (n_contigs x n_latent) Numpy array of latent repr.
"""
self.eval()
new_data_loader = _DataLoader(dataset=data_loader.dataset,
batch_size=data_loader.batch_size,
shuffle=False,
drop_last=False,
num_workers=1,
pin_memory=data_loader.pin_memory)
depths_array, tnf_array = data_loader.dataset.tensors
length = len(depths_array)
# We make a Numpy array instead of a Torch array because, if we create
# a Torch array, then convert it to Numpy, Numpy will believe it doesn't
# own the memory block, and array resizes will not be permitted.
latent = _np.empty((length, self.nlatent), dtype=_np.float32)
row = 0
with _torch.no_grad():
for depths, tnf in new_data_loader:
# Move input to GPU if requested
if self.usecuda:
depths = depths.cuda()
tnf = tnf.cuda()
# Evaluate
out_depths, out_tnf, mu, logsigma = self(depths, tnf)
if self.usecuda:
mu = mu.cpu()
latent[row: row + len(mu)] = mu
row += len(mu)
assert row == length
return latent
def save(self, filehandle):
"""Saves the VAE to a path or binary opened file. Load with VAE.load
Input: Path or binary opened filehandle
Output: None
"""
state = {'nsamples': self.nsamples,
'alpha': self.alpha,
'beta': self.beta,
'dropout': self.dropout,
'nhiddens': self.nhiddens,
'nlatent': self.nlatent,
'state': self.state_dict(),
}
_torch.save(state, filehandle)
@classmethod
def load(cls, path, cuda=False, evaluate=True):
"""Instantiates a VAE from a model file.
Inputs:
path: Path to model file as created by functions VAE.save or
VAE.trainmodel.
cuda: If network should work on GPU [False]
evaluate: Return network in evaluation mode [True]
Output: VAE with weights and parameters matching the saved network.
"""
# Forcably load to CPU even if model was saves as GPU model
dictionary = _torch.load(path, map_location=lambda storage, loc: storage)
nsamples = dictionary['nsamples']
alpha = dictionary['alpha']
beta = dictionary['beta']
dropout = dictionary['dropout']
nhiddens = dictionary['nhiddens']
nlatent = dictionary['nlatent']
state = dictionary['state']
vae = cls(nsamples, nhiddens, nlatent, alpha, beta, dropout, cuda)
vae.load_state_dict(state)
if cuda:
vae.cuda()
if evaluate:
vae.eval()
return vae
def trainmodel(self, dataloader, nepochs=500, lrate=1e-3,
batchsteps=[25, 75, 150, 300], logfile=None, modelfile=None):
"""Train the autoencoder from depths array and tnf array.
Inputs:
dataloader: DataLoader made by make_dataloader
nepochs: Train for this many epochs before encoding [500]
lrate: Starting learning rate for the optimizer [0.001]
batchsteps: None or double batchsize at these epochs [25, 75, 150, 300]
logfile: Print status updates to this file if not None [None]
modelfile: Save models to this file if not None [None]
Output: None
"""
if lrate < 0:
raise ValueError('Learning rate must be positive, not {}'.format(lrate))
if nepochs < 1:
raise ValueError('Minimum 1 epoch, not {}'.format(nepochs))
if batchsteps is None:
batchsteps_set = set()
else:
# First collect to list in order to allow all element types, then check that
# they are integers
batchsteps = list(batchsteps)
if not all(isinstance(i, int) for i in batchsteps):
raise ValueError('All elements of batchsteps must be integers')
if max(batchsteps, default=0) >= nepochs:
raise ValueError('Max batchsteps must not equal or exceed nepochs')
last_batchsize = dataloader.batch_size * 2**len(batchsteps)
if len(dataloader.dataset) < last_batchsize:
raise ValueError('Last batch size exceeds dataset length')
batchsteps_set = set(batchsteps)
# Get number of features
ncontigs, nsamples = dataloader.dataset.tensors[0].shape
optimizer = _Adam(self.parameters(), lr=lrate)
if logfile is not None:
print('\tNetwork properties:', file=logfile)
print('\tCUDA:', self.usecuda, file=logfile)
print('\tAlpha:', self.alpha, file=logfile)
print('\tBeta:', self.beta, file=logfile)
print('\tDropout:', self.dropout, file=logfile)
print('\tN hidden:', ', '.join(map(str, self.nhiddens)), file=logfile)
print('\tN latent:', self.nlatent, file=logfile)
print('\n\tTraining properties:', file=logfile)
print('\tN epochs:', nepochs, file=logfile)
print('\tStarting batch size:', dataloader.batch_size, file=logfile)
batchsteps_string = ', '.join(map(str, sorted(batchsteps))) if batchsteps_set else "None"
print('\tBatchsteps:', batchsteps_string, file=logfile)
print('\tLearning rate:', lrate, file=logfile)
print('\tN sequences:', ncontigs, file=logfile)
print('\tN samples:', nsamples, file=logfile, end='\n\n')
# Train
for epoch in range(nepochs):
dataloader = self.trainepoch(dataloader, epoch, optimizer, batchsteps_set, logfile)
# Save weights - Lord forgive me, for I have sinned when catching all exceptions
if modelfile is not None:
try:
self.save(modelfile)
except:
pass
return None
| [
834,
15390,
834,
796,
37227,
4834,
8189,
257,
21593,
17593,
290,
257,
256,
77,
69,
17593,
284,
41270,
10552,
13,
198,
198,
16719,
274,
257,
5553,
864,
1960,
6571,
66,
12342,
287,
9485,
15884,
354,
290,
8404,
284,
2380,
262,
21593,
198... | 2.284931 | 4,924 |
--- evdev/genecodes.py.orig 2020-11-02 00:44:50 UTC
+++ evdev/genecodes.py
@@ -20,7 +20,7 @@ if sys.argv[1:]:
#-----------------------------------------------------------------------------
-macro_regex = r'#define +((?:KEY|ABS|REL|SW|MSC|LED|BTN|REP|SND|ID|EV|BUS|SYN|FF|UI_FF|INPUT_PROP)_\w+)'
+macro_regex = r'#define(?: |\t)+((?:KEY|ABS|REL|SW|MSC|LED|BTN|REP|SND|ID|EV|BUS|SYN|FF|UI_FF|INPUT_PROP)_\w+)'
macro_regex = re.compile(macro_regex)
uname = list(os.uname()); del uname[1]
| [
6329,
819,
7959,
14,
5235,
721,
4147,
13,
9078,
13,
11612,
197,
42334,
12,
1157,
12,
2999,
3571,
25,
2598,
25,
1120,
18119,
198,
45340,
819,
7959,
14,
5235,
721,
4147,
13,
9078,
198,
12404,
532,
1238,
11,
22,
1343,
1238,
11,
22,
2... | 2.157205 | 229 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a real time example how to implement DTLN tf light model with
sounddevice. The script is based on the "wire.py" example of the sounddevice
toolbox. If the command line shows "input underflow", restart the script.
If there are still a lot of dropouts, increase the latency.
First call:
$ python real_time_dtln_audio.py --list-devices
to get your audio devices. In the next step call
$ python real_time_dtln_audio.py -i in_device_idx -o out_device_idx
For .whl files of the tf light runtime go to:
https://www.tensorflow.org/lite/guide/python
Author: Nils L. Westhausen (nils.westhausen@uol.de)
Version: 01.07.2020
This code is licensed under the terms of the MIT-license.
"""
import numpy as np
import sounddevice as sd
import tflite_runtime.interpreter as tflite
import argparse
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'-i', '--input-device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-o', '--output-device', type=int_or_str,
help='output device (numeric ID or substring)')
parser.add_argument(
'-n', '--no-denoise', action='store_true',
help='turn off denoise, pass-through')
parser.add_argument(
'-t', '--threads', type=int, default=1,
help='number of threads for tflite interpreters')
parser.add_argument('--latency', type=float, help='latency in seconds', default=0.2)
args = parser.parse_args(remaining)
# set some parameters
block_len_ms = 32
block_shift_ms = 8
fs_target = 16000
# create the interpreters
interpreter_1 = tflite.Interpreter(model_path='./pretrained_model/model_quant_1.tflite', num_threads=args.threads)
interpreter_1.allocate_tensors()
interpreter_2 = tflite.Interpreter(model_path='./pretrained_model/model_quant_2.tflite', num_threads=args.threads)
interpreter_2.allocate_tensors()
# Get input and output tensors.
input_details_1 = interpreter_1.get_input_details()
output_details_1 = interpreter_1.get_output_details()
input_details_2 = interpreter_2.get_input_details()
output_details_2 = interpreter_2.get_output_details()
# create states for the lstms
states_1 = np.zeros(input_details_1[1]['shape']).astype('float32')
states_2 = np.zeros(input_details_2[1]['shape']).astype('float32')
# calculate shift and length
block_shift = int(np.round(fs_target * (block_shift_ms / 1000)))
block_len = int(np.round(fs_target * (block_len_ms / 1000)))
# create buffer
in_buffer = np.zeros((block_len)).astype('float32')
out_buffer = np.zeros((block_len)).astype('float32')
try:
with sd.Stream(device=(args.input_device, args.output_device),
samplerate=fs_target, blocksize=block_shift,
dtype=np.float32, latency=args.latency,
channels=(6, 1), callback=callback):
print('#' * 80)
print('press Return to quit')
print('#' * 80)
input()
except KeyboardInterrupt:
parser.exit('')
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
318,
257,
1103,
640,
1672,
703,
284,
3494,
360,
14990,
45,
48700,
1657,
2746,
351,
220,
198,
23... | 2.657332 | 1,357 |
import numpy as np
print("---------Exercicios propostos 2---------")
#Definindo a seed
#np.random.seed(5)
#Exercicio 1
print("------------------------Exercicio 1------------------------")
arr1 = np.random.randn(10)
print("Array 1:",arr1)
arr2 = arr1 * 100
print("Array 2:",arr2)
arr3 = arr2.astype(int)
print("Array 3:",arr3)
#Exercicio 2
print("------------------------Exercicio 2------------------------")
np.random.seed(10)
matriz = np.random.randint(1,50,(4,4))
print("Matriz:",matriz)
#Exercicio 3
print("------------------------Exercicio 3------------------------")
print("Media das linhas")
print("Linha 1:",matriz.mean(axis=0)[0])
print("Linha 2:",matriz.mean(axis=0)[1])
print("Linha 3:",matriz.mean(axis=0)[2])
print("Linha 4:",matriz.mean(axis=0)[3])
print("Media das colunas")
print("Coluna 1:",matriz.mean(axis=1)[0])
print("Coluna 2:",matriz.mean(axis=1)[1])
print("Coluna 3:",matriz.mean(axis=1)[2])
print("Coluna 4:",matriz.mean(axis=1)[3])
#Exercicio 4
print("------------------------Exercicio 4------------------------")
print("Numeros unicos:",np.unique(matriz, return_counts=True))
print("Numeros que aparecem duas vezes",np.unique(matriz, return_counts=True)[1]>1)
| [
11748,
299,
32152,
355,
45941,
198,
198,
4798,
7203,
45537,
3109,
2798,
291,
4267,
2632,
455,
418,
362,
45537,
4943,
198,
198,
2,
7469,
259,
521,
78,
257,
9403,
198,
2,
37659,
13,
25120,
13,
28826,
7,
20,
8,
198,
198,
2,
3109,
279... | 2.698198 | 444 |
#!/usr/bin/env python3
import glob
import imp
import os
import sys
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
15095,
198,
11748,
848,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.609756 | 41 |
#!/usr/bin/env python2
"""
SYNOPSIS
Prepare the TCGA BRCA Clinical data into a format that is suitable for input
into the Hotspots 'Mutation_Counts' pipeline.
NOTES
(1) Data source:
- Supplementary Table 1 of the Cell 2015 Paper:
http://www.nature.com/nature/journal/v490/n7418/full/nature11412.html
- Corresponding Cell 2015 data deposited on cBioPortal:
http://www.cbioportal.org/study?id=brca_tcga_pub2015
(2) Note on consistency between the 'Exome' column (Tumor_Sample_Barcode) of
the SI_Table and 'SAMPLE_ID' column of cBioPortal Sample-level Clinical
data file:
- Checked that 'Exome' value always start with '{Patient_ID}-01'
except for TCGA-BH-A1ES --> TCGA-BH-A1ES-06A-12D-A243-09
- Checked that 'SAMPLE_ID' value always equal'{Patient_ID}-01'.
- In the discrepancy case ('TCGA-BH-A1ES-06A-12D-A243-09'), manually
inspected and confirmed that the Histology_Type and Receptor Status
values of this sample reported in the two sources agrees:
Histology_Type: IDC
ER_Status: Positive
PR_Status: Positive
HER2_Status: Negative
EXAMPLES
./prep_tcga_clinical.py \
--in_cbioportal_patient_2015 ../public/tcga_brca/raw/brca_tcga_pub2015/brca_tcga_pub2015/data_bcr_clinical_data_patient.txt \
--in_cbioportal_sample_2015 ../public/tcga_brca/raw/brca_tcga_pub2015/brca_tcga_pub2015/data_bcr_clinical_data_sample.txt \
--in_si_table_cell_2015 ../public/clinical/raw/tcga_brca/Ciriello_Cell_2015.Table_S1.xlsx \
--out_clinical ../public/clinical/tcga.clinical_data.tsv
AUTHOR
Parin Sripakdeevong <parin@jimmy.harvard.edu> (April-2017)
"""
import sys
import os
import time
import copy
import argparse
import numpy as np
import pandas as pd
pd.set_option('display.precision', 2)
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 2000)
OUT_COLS = ['Tumor_Sample_Barcode', 'Center', 'ER_Status', 'PR_Status', 'HER2_Status', 'Biopsy_Site_Type', 'Histology_Type', 'Gender']
RECEPTOR_STATUS_COLS = ['ER_Status', 'PR_Status', 'HER2_Status']
RECEPTOR_STATUSES = ['Positive', 'Negative', 'Unknown']
HISTOLOGY_TYPES = ['Invasive_Ductal_Carcinoma', 'Invasive_Lobular_Carcinoma', 'Mixed_Ductal_and_Lobular_Carcinoma',
'Other_Invasive_Breast_Carcinoma', 'Other_Breast_Cancer', 'Unknown']
GENDER_TYPES = ['Female', 'Male', 'Unknown']
def merge_clinical_df(cli_patient_df, cli_sample_df, cli_si_table_df):
"""Perform outer join on the three input dfs using 'Patient_ID' as the join
key.
Note
----
(1) Will ensure that the set of 'Patient_ID' found in the three input DFs
are the same. So will obtain same result if switch to using INNER JOIN.
"""
# Check that there is no overlap columns between the three input dfs (except
# for the 'Patient_ID' join key)
colnames_so_far = set()
for colname in list(cli_patient_df) + list(cli_sample_df) + list(cli_si_table_df):
if colname != 'Patient_ID':
if colname in colnames_so_far:
raise Exception("Found duplicated colname (%s)." % colname)
colnames_so_far.add(colname)
assert set(cli_patient_df['Patient_ID'].unique()) == set(cli_sample_df['Patient_ID'].unique())
assert set(cli_patient_df['Patient_ID'].unique()) == set(cli_si_table_df['Patient_ID'].unique())
# Perform outer joins
tmp_df = pd.merge(cli_patient_df, cli_sample_df,
how="outer", # OUTER JOIN
on="Patient_ID",
sort=False,
indicator='indicator_column1')
df = pd.merge(tmp_df, cli_si_table_df,
how="outer", # OUTER JOIN
on="Patient_ID",
sort=False,
indicator='indicator_column2')
# Ensure that is is no 'orphan' Patient_ID that is found in only some of the
# input DFs.
assert set(df['indicator_column1'].unique()) == set(['both'])
assert set(df['indicator_column2'].unique()) == set(['both'])
# Ensure that there is no missing data in any of the columns.
assert not df.isnull().values.any()
# Consistency checks
# Ensure that there is no unexpected receptor_status values.
for colname in RECEPTOR_STATUS_COLS:
assert set(df[colname].unique()) == set(RECEPTOR_STATUSES)
# Ensure that there is no unexpected 'Histology_Type' values.
assert set(df['Histology_Type'].unique()) <= set(HISTOLOGY_TYPES)
# Ensure that there is no unexpected 'Gender' values.
assert set(df['Gender'].unique()) <= set(GENDER_TYPES)
# Ensure that there are no duplicated 'Patient_ID' values
assert not df.duplicated(subset=['Patient_ID']).any()
# Ensure that there are no duplicated 'Tumor_Sample_Barcode' values
assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()
return df
def import_si_table_cell_2015_cli_data(infile):
"""Import the TCGA-BRCA clinical data from Supplemental Table 1 of the 2015
Cell paper
Notes
-----
- Extract the following data in standardized format:
(1) Patient_ID (e.g. 'TCGA-A2-A0T2')
(2) Tumor_Sample_Barcode (e.g. 'TCGA-A2-A0T2-01A-11W-A097-09')
(3) Histology_Type (see HISTOLOGY_TYPES list)
(4) ER_Status_SI_Table_NOT_USED (see RECEPTOR_STATUSES list)
(5) PR_Status_SI_Table_NOT_USED (see RECEPTOR_STATUSES list)
(6) HER2_Status_SI_Table_NOT_USED (see RECEPTOR_STATUSES list)
"""
df = pd.read_excel(infile, sheetname="Suppl. Table 1", skiprows=2)
# Select all the require columns + implicitly check that all required columns exist
required_columns = ['Case.ID', 'Exome', 'Final Pathology', 'ER IHC', 'PR IHC', 'HER2 IHC']
df = df[required_columns]
df.rename(columns={'Case.ID': 'Patient_ID'}, inplace=True)
df.rename(columns={'Exome': 'Tumor_Sample_Barcode'}, inplace=True)
df.rename(columns={'Final Pathology': 'Histology_Type'}, inplace=True)
df.rename(columns={'ER IHC': 'ER_Status_SI_Table_NOT_USED'}, inplace=True)
df.rename(columns={'PR IHC': 'PR_Status_SI_Table_NOT_USED'}, inplace=True)
df.rename(columns={'HER2 IHC': 'HER2_Status_SI_Table_NOT_USED'}, inplace=True)
# Standardize the values in the 'Histology_Type' column
df['Histology_Type'].replace('IDC', 'Invasive_Ductal_Carcinoma', inplace=True)
df['Histology_Type'].replace('ILC', 'Invasive_Lobular_Carcinoma', inplace=True)
df['Histology_Type'].replace('Mixed.IDC.ILC', 'Mixed_Ductal_and_Lobular_Carcinoma', inplace=True)
df['Histology_Type'].replace('Other', 'Other_Invasive_Breast_Carcinoma', inplace=True)
# Fill missing ER/PR/HER2 status values
for colname in RECEPTOR_STATUS_COLS:
colname = colname + '_SI_Table_NOT_USED'
df[[colname]] = df[[colname]].fillna(value='Unknown')
# Map of possible missing/equivocal results to 'Unknown'
for colname in RECEPTOR_STATUS_COLS:
colname = colname + '_SI_Table_NOT_USED'
df[colname].replace('[Not Evaluated]', 'Unknown', inplace=True)
df[colname].replace('[Not Available]', 'Unknown', inplace=True)
df[colname].replace('Equivocal', 'Unknown', inplace=True)
df[colname].replace('Indeterminate', 'Unknown', inplace=True)
df[colname].replace('#N/A', 'Unknown', inplace=True)
# Ensure that there is no missing data in any of the columns.
assert not df.isnull().values.any()
# Ensure that there are no duplicated 'Patient_ID' values
assert not df.duplicated(subset=['Patient_ID']).any()
# Ensure that there are no duplicated 'Tumor_Sample_Barcode' values
assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()
# Check that for all rows, Tumor_Sample_Barcode has Patient_ID + '-01' as its prefix.
for index, row in df.iterrows():
if row['Patient_ID'] != 'TCGA-BH-A1ES':
# See header documentation for rationale of this check.
assert row['Tumor_Sample_Barcode'].startswith(row['Patient_ID'] + '-01')
else:
assert row['Tumor_Sample_Barcode'].startswith(row['Patient_ID'] + '-06')
return df
def import_cbioportal_sample_data(infile):
"""Import the Sample-level Clinical Data for the 2015 Cell TCGA dataset
(downloaded from cBioPortal).
Notes
-----
- Extract the following data in standardized format:
(1) Patient_ID (e.g. 'TCGA-LQ-A4E4')
(2) ER_Status (see RECEPTOR_STATUSES list)
(3) PR_Status (see RECEPTOR_STATUSES list)
(4) HER2_Status (see RECEPTOR_STATUSES list)
"""
df = pd.read_table(infile, sep="\t", dtype=str, comment="#", header=0)
remove_duplicate_rows(df)
# Check that for all rows, 'SAMPLE_ID is always equal to PATIENT_ID + '-01'
for index, row in df.iterrows():
assert row['SAMPLE_ID'].startswith(row['PATIENT_ID'] + '-01')
# Select all the require columns + implicitly check that all required columns exist
required_columns = ['PATIENT_ID', 'SAMPLE_ID', 'ER_STATUS_BY_IHC', 'PR_STATUS_BY_IHC', 'IHC_HER2', 'HER2_FISH_STATUS']
df = df[required_columns]
df.rename(columns={'PATIENT_ID': 'Patient_ID'}, inplace=True)
df.rename(columns={'SAMPLE_ID': 'Sample_ID'}, inplace=True)
df.rename(columns={'ER_STATUS_BY_IHC': 'ER_Status'}, inplace=True)
df.rename(columns={'PR_STATUS_BY_IHC': 'PR_Status'}, inplace=True)
df.rename(columns={'IHC_HER2': 'HER2_IHC'}, inplace=True)
df.rename(columns={'HER2_FISH_STATUS': 'HER2_FISH'}, inplace=True)
df = infer_HER2_status(df)
df = df.drop(['HER2_IHC', 'HER2_FISH'], 1)
# Map of possible missing/equivocal results to 'Unknown'
for colname in RECEPTOR_STATUS_COLS:
df[colname].replace('[Not Available]', 'Unknown', inplace=True)
df[colname].replace('Indeterminate', 'Unknown', inplace=True)
# Ensure that there is no missing data in any of the columns.
assert not df.isnull().values.any()
# Ensure that there are no duplicated 'Patient_ID' values
assert not df.duplicated(subset=['Patient_ID']).any()
return df
def remove_duplicate_rows(df):
"""There appear to be 13 'completely' duplicate rows in the cBioPortal
sample-level clinical data file. Remove these duplicate rows from
the dataframe."""
exclude_indices_keep_false = df.index[df.duplicated(keep=False)]
exclude_indices_keep_first = df.index[df.duplicated(keep='first')]
assert len(exclude_indices_keep_false) == 26
assert len(exclude_indices_keep_first) == 13
print "##"
print "## WARNING: Keep only the first instance of the following duplicated",
print "rows from Sample-level Clinical Data File (cBioPortal; Cell 2015 TCGA-BRCA):"""
print "##"
print df[df.index.isin(exclude_indices_keep_false)][['PATIENT_ID', 'SAMPLE_ID', 'OTHER_SAMPLE_ID']]
df.drop(exclude_indices_keep_first, inplace=True)
return df
def infer_HER2_status(df):
"""Infer the sample's HER2_Status from 'HER2_IHC' and 'HER2_FISH' data
Notes
-----
(1) Here is counts of 'HER2_IHC' and 'HER2_FISH' for the Cell 2015 dataset
found in the Sample-level Clinical data downloaded from cBioPortal
(April 05th, 2017).
Command: df.groupby(['HER2_IHC','HER2_FISH']).size()
(A) Post-Standardize Value:
HER2_IHC HER2_FISH Counts Inferred_HER2_Status
-----------------------------------------------------------
Equivocal Equivocal 3 --> Unknown
Negative 109 --> Negative
Positive 16 --> Positive
Unknown 12 --> Unknown
Negative Negative 93 --> Negative
Positive 2 --> Positive [Note: Inconsistent!]
Unknown 330 --> Negative
Positive Equivocal 2 --> Positive
Negative 7 --> Positive [Note: Inconsistent!]
Positive 34 --> Positive
Unknown 78 --> Positive
Unknown Negative 42 --> Negative
Positive 9 --> Positive
Unknown 93 --> Unknown
"""
for colname in ['HER2_IHC', 'HER2_FISH']:
df[colname].replace('[Not Available]', 'Unknown', inplace=True)
df[colname].replace('Indeterminate', 'Unknown', inplace=True)
assert set(df[colname].unique()) == set(['Positive', 'Negative', 'Equivocal', 'Unknown'])
df['HER2_Status'] = "Unknown"
df.loc[(df.HER2_IHC=='Negative'), 'HER2_Status'] = 'Negative'
df.loc[(df.HER2_FISH=='Negative'), 'HER2_Status'] = 'Negative'
df.loc[(df.HER2_IHC=='Positive'), 'HER2_Status'] = 'Positive' # Overide the Negative
df.loc[(df.HER2_FISH=='Positive'), 'HER2_Status'] = 'Positive' # Overide the Negative
return df
def import_cbioportal_patient_data(infile):
"""Import the Patient-level Clinical Data for the 2015 Cell TCGA dataset
(downloaded from cBioPortal).
Notes
-----
- Extract the following data in standardized format:
(1) Patient_ID (e.g. 'TCGA-A2-A0T2')
(2) Histology_Type_cBioPortal_NOT_USED (see HISTOLOGY_TYPES list)
(3) Gender (see GENDER_TYPES list)
"""
df = pd.read_table(infile, sep="\t", dtype=str, comment="#", header=0)
# Select all the require columns + implicitly check that all required columns exist
required_columns = ['PATIENT_ID', 'HISTOLOGICAL_DIAGNOSIS', 'GENDER']
df = df[required_columns]
# Standardize the column names
df.rename(columns={'PATIENT_ID': 'Patient_ID'}, inplace=True)
df.rename(columns={'HISTOLOGICAL_DIAGNOSIS': 'Histology_Type_cBioPortal_NOT_USED'}, inplace=True)
df.rename(columns={'GENDER': 'Gender'}, inplace=True)
colname = 'Histology_Type_cBioPortal_NOT_USED'
# Standardize the values in the 'Histology_Type' column
df[colname].replace('Infiltrating Ductal Carcinoma', 'Invasive_Ductal_Carcinoma', inplace=True)
df[colname].replace('Infiltrating Lobular Carcinoma', 'Invasive_Lobular_Carcinoma', inplace=True)
df[colname].replace('Mixed Histology (please specify)', 'Mixed_Ductal_and_Lobular_Carcinoma', inplace=True)
df[colname].replace('Infiltrating Carcinoma NOS', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[colname].replace('Medullary Carcinoma', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[colname].replace('Mucinous Carcinoma', 'Other_Invasive_Breast_Carcinoma', inplace=True)
df[colname].replace('Metaplastic Carcinoma', 'Other_Breast_Cancer', inplace=True)
df[colname].replace('Other, specify', 'Unknown', inplace=True)
df[colname].replace('[Not Available]', 'Unknown', inplace=True)
# Standardize the values in the 'Gender' column
df['Gender'] = df['Gender'].fillna(value='Unknown')
df['Gender'].replace('FEMALE', 'Female', inplace=True)
df['Gender'].replace('MALE', 'Male', inplace=True)
# Ensure that there is no missing data in any of the columns.
assert not df.isnull().values.any()
# Ensure that there are no duplicated 'Patient_ID' values
assert not df.duplicated(subset=['Patient_ID']).any()
return df
def import_maf_sample_barcodes(infile):
"""Import a set of tumor sample barcodes from the MAF file.
"""
df = pd.read_table(infile, sep="\t", dtype=str, comment="#", header = 0)
sample_barcodes = list(df['Tumor_Sample_Barcode'].unique())
return sample_barcodes
if __name__ == '__main__':
print "## Enter %s (%s).\n##" % (os.path.basename(__file__), time.asctime())
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--in_cbioportal_patient_2015", action="store", required=True,
metavar='FILE',
help="Path to the input Patient-Level Clinical Data of the Cell 2015 paper (from cBioportal).")
parser.add_argument("--in_cbioportal_sample_2015", action="store", required=True,
metavar='FILE',
help="Path to the input Sample-Level Clinical Data of the Cell 2015 paper (from cBioportal).")
parser.add_argument("--in_si_table_cell_2015", action="store", required=True,
metavar='FILE',
help="Path to the input Clinical Data of the Cell 2015 paper (from the SI Table).")
parser.add_argument("--out_clinical", action="store", required=True,
metavar='FILE',
help="Path to output Clinical Data.")
options = parser.parse_args()
print "##", "-" * 50
print "## Specified Options:"
print "## in_cbioportal_patient_2015: ", repr(options.in_cbioportal_patient_2015)
print "## in_cbioportal_sample_2015: ", repr(options.in_cbioportal_sample_2015)
print "## in_si_table_cell_2015: ", repr(options.in_si_table_cell_2015)
print "## out_clinical: ", repr(options.out_clinical)
print "##", "-" * 50
main(options)
print "##"
print "## Exit %s" % os.path.basename(__file__),
print '(%s | total_time = %.3f secs).' % (time.asctime(), time.time() - start_time)
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
37811,
198,
23060,
45,
30737,
1797,
198,
220,
220,
220,
43426,
262,
17283,
9273,
347,
7397,
32,
21234,
1366,
656,
257,
5794,
326,
318,
11080,
329,
5128,
198,
220,
220,
220,
... | 2.310215 | 7,714 |
# -*- coding: utf-8 -*-
from datetime import datetime
import scrapy
from bs4 import BeautifulSoup
class UkDatabaseSpider(scrapy.Spider):
"""Collect information about people."""
name = "uk_database_posts"
"""str: The name of the spider."""
allowed_domains = ["theukdatabase.com"]
"""list: Allowed domains to crawl."""
start_urls = ["https://theukdatabase.com/2018/01/02/daniel-fisher-evesham/"] # Insert an article.
"""list: The URLs to start crawling from."""
def parse(self, response):
"""Generator for parsing a page.
Args:
response: Web page.
Yields:
dict: The scraped information.
class: scrapy.Request for the next page.
"""
post_title = self.__get_post_title(response)
post_text = self.__get_post_text(response)
post_pub_date: datetime = self.__get_post_pub_date(response)
post_images_src: list = self.__get_post_images(response)
if post_title or post_text or post_images_src:
yield {
"TITLE": post_title,
"TEXT": post_text,
"POST_PUB_DATE": post_pub_date,
"IMAGES": post_images_src,
}
# Continues with crawling the 'previous page'.
next_page_to_scrape = response.xpath(
"//nav[@id = 'nav-below']/span[@class = 'nav-previous']/a/@href").extract_first()
if next_page_to_scrape:
yield scrapy.Request(
response.urljoin(next_page_to_scrape),
callback=self.parse
)
@staticmethod
def __get_post_title(response) -> str:
"""Get the post title.
Args:
response: Web page.
Returns:
str: The post title.
"""
try:
return response.xpath("//header[@class = 'post-title']/h1/text()").extract_first()
except Exception as e:
print("Exception:", e)
@staticmethod
def __get_post_pub_date(response) -> datetime:
"""Get the post published date.
Args:
response: Web page.
Returns:
datetime: The post published date.
"""
try:
post_date_html: str = response.xpath("//p[@class = 'post-date']").extract_first()
if post_date_html:
post_date_soup = BeautifulSoup(post_date_html)
date: str = post_date_soup.find("strong").contents[0]
# day_of_the_week: str = post_date_soup.find("em").contents[0]
month_and_year: str = post_date_soup.find("span").contents[0]
date_str = "{} {}".format(date, month_and_year)
post_date = datetime.strptime(date_str, "%d %b %Y")
return post_date
except Exception as e:
print("Exception:", e)
@staticmethod
def __get_post_text(response) -> str:
"""Get the post text.
Args:
response: Web page.
Returns:
str: The post text.
"""
try:
post_html: str = response.xpath("//div[@class = 'post-entry']").extract_first()
if post_html:
advertisements_html: str = response \
.xpath("//div[@class = 'post-entry']//div[@class = 'wpcnt']") \
.extract_first()
unnecessary_scripts_html: str = response \
.xpath("//div[@class = 'post-entry']//div[@id = 'atatags-335202795']") \
.extract_first()
related_posts_html: str = response \
.xpath("//div[@class = 'post-entry']//div[@id = 'jp-post-flair']") \
.extract_first()
# Removes the advertisements, scripts and related posts text.
if advertisements_html:
post_html = post_html.replace(advertisements_html, "")
if unnecessary_scripts_html:
post_html = post_html.replace(unnecessary_scripts_html, "")
if related_posts_html:
post_html = post_html.replace(related_posts_html, "")
post_text_soup = BeautifulSoup(post_html)
post_text_list: list = post_text_soup.findAll(text=True) # Takes the text from the html code.
post_text_list_cleared: list = list(filter("\n".__ne__, post_text_list)) # Remove the "\n".
post_text = " ".join(post_text_list_cleared)
return post_text
except Exception as e:
print("Exception:", e)
@staticmethod
def __get_post_images(response) -> list:
"""Get the post images.
Args:
response: Web page.
Returns:
list: The images from the post.
"""
try:
return response.xpath("//div[@class = 'post-entry']//img/@src").extract()
except Exception as e:
print("Exception:", e)
@staticmethod
def __get_post_summary(response) -> list:
"""Get the post summary.
Args:
response: Web page.
Note:
Not all of the posts have summary.
Returns:
list: List of strings from the post (the summary).
"""
try:
return response.xpath("//div[@class = 'post-entry']/h3[@style = "
"'text-align:center;']/strong/span/text()").extract()
except Exception as e:
print("Exception:", e)
@staticmethod
def __get_event_date(response) -> str:
"""Get the post month and year of the event.
Args:
response: Web page.
Note:
Not all of the posts have event.
Example:
September 2017
Returns:
str: The event date.
"""
try:
return response.xpath("//div[@class = 'post-entry']/h3[@style = "
"'text-align:center;']/strong/span/text()").extract_first()
except Exception as e:
print("Exception:", e)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
15881,
88,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
4871,
5065,
38105,
41294,
7,
1416,
2416,
88,
... | 2.013536 | 3,029 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Validates table stored on the LocalFileSystem.
#
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.955285 | 246 |
import numpy as np
input_data = np.array([2,3])
weights = {
'node_0':np.array([1,1]),
'node_1':np.array([-1,1]),
'output':np.array([2,-1])
}
node_0_value = (input_data * weights['node_0']).sum()
node_1_value = (input_data * weights['node_1']).sum()
hidden_layer_values = np.array([node_0_value, node_1_value])
print(hidden_layer_values)
output = (hidden_layer_values * weights['output']).sum()
print(output)
| [
11748,
299,
32152,
355,
45941,
220,
198,
198,
15414,
62,
7890,
796,
45941,
13,
18747,
26933,
17,
11,
18,
12962,
198,
198,
43775,
796,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
17440,
62,
15,
10354,
37659,
13,
1874... | 2.207729 | 207 |
from baseDatabaseManager import BaseDatabaseManager | [
6738,
2779,
38105,
13511,
1330,
7308,
38105,
13511
] | 6.375 | 8 |
from scrapy import cmdline
cmdline.execute("scrapy crawl xtzx -o result.csv".split()) | [
6738,
15881,
88,
1330,
23991,
1370,
198,
28758,
1370,
13,
41049,
7203,
1416,
2416,
88,
27318,
220,
742,
42592,
532,
78,
1255,
13,
40664,
1911,
35312,
28955
] | 3.148148 | 27 |
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = 'treeviz',
packages = ['treeviz'],
version = '1.1',
license='MIT',
description = 'Print tree in bash manner',
author = 'Chen Tsu Pei',
author_email = 'a5560648@gmail.com',
url = 'https://github.com/tsupei/treeviz',
download_url="https://github.com/tsupei/treeviz/archive/v1.1.tar.gz",
long_description=long_description,
long_description_content_type="text/markdown",
keywords = ['tree', 'treeviz'],
classifiers = [],
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
143... | 2.663507 | 211 |
__author__ = 'MatrixRev'
import json
import codecs
import glob
import os
import sys
#path = u"C://Users//MatrixRev//Desktop//library_5//"
path="C://Users//MatrixRev//Desktop//mainLib//" # input file
outFile='C:/Users/MatrixRev/Desktop/books/output/mainsubjects.txt' #output file
pathNew = u"C://Users//MatrixRev//Desktop//newOut"
counter=0
subjects_list=[]
for root,dir,files in os.walk(path):
for file in files:
counter=counter+1
print(counter,root,dir,file)
if len(file)>0 :
if file.endswith(".json"):
with codecs.open(os.path.join(root, file), "rb",encoding="UTF-8") as fd:
json_data = json.load(fd)
select_num=json_data['isbn']
select_title=json_data['title']
select_data =json_data['subjects']
select_subTitle=json_data['subtitle']
select_Authors=json_data['authors']
select_Comments=json_data['comments']
n = len(json_data['subjects'])
print(n)
newFileName=file.replace('.json','.txt')
newdir=os.path.join(pathNew)
os.chdir(newdir)
with codecs.open(os.path.join(newdir,newFileName),'w',encoding='utf-8')as tf:
for l in list(select_data):
print(l,file=tf)
#for i in list(select_title):
print(select_title,file=tf)
for i in select_Comments:
print(i,file=tf)
for i in select_subTitle:
print(i,file=tf)
for i in range(0,len(select_Authors)):
print(select_Authors[i],file=tf)
# fd.write(n,"\n",select_title,"\n","subjects","\n")
#outfile.write(select_title)
# print("book Title : ",select_title,"\n")
# print("subjects is:")
for i in range(n-0):
print(select_data[i])
subjects_list.append(select_data[i]+" "+"***Title:"+" "+select_title+" "+"***link"+" "+root+"//"+file)
#fd.writelines(['%s\n'for sub in asubjects])
# for s in sub:
# fd.write("".join(s)+'\n')
f=len(subjects_list)
print(f)
#fd.close()
with codecs.open(outFile,'w',encoding='utf-8')as fh:
for sub in subjects_list:
if len(sub)>0:
#sub=sub.replace('-','')
sub=sub.lower()
sub=sub.strip('\n')
# print(sub,file='subject.txt')
fh.write(sub)
fh.write("\n")
| [
834,
9800,
834,
796,
705,
46912,
18009,
6,
198,
11748,
33918,
198,
11748,
40481,
82,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
2,
6978,
796,
334,
1,
34,
1378,
14490,
1003,
46912,
18009,
1003,
36881,
1003,
32016,... | 1.719928 | 1,671 |
# python ./data/compress.py $TORCH_HOME/ILSVRC2012/ $TORCH_HOME/ILSVRC2012-TAR tar
# python ./data/compress.py $TORCH_HOME/ILSVRC2012/ $TORCH_HOME/ILSVRC2012-ZIP zip
import os, sys
from pathlib import Path
if __name__ == '__main__':
assert len(sys.argv) == 4, 'invalid argv : {:}'.format(sys.argv)
source, destination = Path(sys.argv[1]), Path(sys.argv[2])
main(source, destination, sys.argv[3])
| [
2,
21015,
24457,
7890,
14,
5589,
601,
13,
9078,
720,
32961,
3398,
62,
39069,
14,
45484,
53,
7397,
6999,
14,
720,
32961,
3398,
62,
39069,
14,
45484,
53,
7397,
6999,
12,
51,
1503,
13422,
198,
2,
21015,
24457,
7890,
14,
5589,
601,
13,
... | 2.506173 | 162 |
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Test plugin file for netaddr tests: ipv6_ipv4_mapped
"""
from __future__ import absolute_import, division, print_function
from ansible_collections.ansible.utils.plugins.plugin_utils.base.ipaddress_utils import (
ip_address,
_need_ipaddress,
_validate_args,
)
__metaclass__ = type
DOCUMENTATION = """
name: ipv6_ipv4_mapped
author: Priyam Sahoo (@priyamsahoo)
version_added: "2.2.0"
short_description: Test if something appears to be a mapped IPv6 to IPv4 mapped address
description:
- This plugin checks if the provided value is a valid IPv4-mapped IPv6 address
options:
ip:
description:
- A string that represents the value against which the test is going to be performed
- For example:
- "::FFFF:10.1.1.1"
- "::AAAA:10.1.1.1"
- "helloworld"
type: str
required: True
notes:
"""
EXAMPLES = r"""
#### Simple examples
- name: Check if ::FFFF:10.1.1.1 is a valid IPv4-mapped IPv6 address
ansible.builtin.set_fact:
data: "{{ '::FFFF:10.1.1.1' is ansible.utils.ipv6_ipv4_mapped }}"
# TASK [Check if ::FFFF:10.1.1.1 is a valid IPv4-mapped IPv6 address] *************
# ok: [localhost] => {
# "ansible_facts": {
# "data": true
# },
# "changed": false
# }
- name: Check if ::AAAA:10.1.1.1 is not a valid IPv4-mapped IPv6 address
ansible.builtin.set_fact:
data: "{{ '::AAAA:10.1.1.1' is not ansible.utils.ipv6_ipv4_mapped }}"
# TASK [Check if ::AAAA:10.1.1.1 is not a valid IPv4-mapped IPv6 address] ******************
# ok: [localhost] => {
# "ansible_facts": {
# "data": true
# },
# "changed": false
# }
- name: Check if helloworld is not a valid IPv4-mapped IPv6 address
ansible.builtin.set_fact:
data: "{{ 'helloworld' is not ansible.utils.ipv6_ipv4_mapped }}"
# TASK [Check if helloworld is not a valid IPv4-mapped IPv6 address] ***********************
# ok: [localhost] => {
# "ansible_facts": {
# "data": true
# },
# "changed": false
# }
"""
RETURN = """
data:
description:
- If jinja test satisfies plugin expression C(true)
- If jinja test does not satisfy plugin expression C(false)
"""
@_need_ipaddress
def _ipv6_ipv4_mapped(ip):
""" Test if something appears to be a mapped IPv6 to IPv4 mapped address """
params = {"ip": ip}
_validate_args("ipv6_ipv4_mapped", DOCUMENTATION, params)
try:
if ip_address(ip).ipv4_mapped is None:
return False
return True
except Exception:
return False
class TestModule(object):
""" network jinja test"""
test_map = {"ipv6_ipv4_mapped": _ipv6_ipv4_mapped}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
33448,
2297,
10983,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
198,
2,
357,
3826,
27975,
45761,
393,
3740,
1378,
2503,
13,
41791,
13,
2398,
14,
... | 2.326877 | 1,239 |
import json
import logging
import os
import subprocess
import sys
from pathlib import Path
import pytest
from deepdiff import DeepDiff
TEST_ASSETS_ROOT = (Path(__file__).parent.parent / "assets").resolve()
NEURO_EXTRAS_ROOT = Path(__file__).parent.parent.parent / "neuro_extras"
LOGGER = logging.getLogger(__name__)
@pytest.mark.skipif(sys.platform != "darwin", reason="Need sh to test this test.")
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
6738,
2769,
26069,
1330,
10766,
28813,
628,
198,
51,
6465,
62,
10705,
32... | 2.992593 | 135 |
from setuptools import setup
setup(
name='ktorrent',
version='0.4.7',
description='Fetches and parses data from Kickass Torrents.',
long_description=open('README.rst').read(),
license='MIT',
author='Udit Vasu',
author_email='admin@codenirvana.in',
url='https://github.com/codenirvana/kTorrent',
packages=['ktorrent'],
install_requires=[
"beautifulsoup4==4.4.1",
"requests==2.8.1"
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
test_suite='nose.collector',
tests_require=['nose']
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
74,
13165,
1156,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
19,
13,
22,
3256,
198,
220,
220,
220,
6764,
11639,
37,
316,
2052,
290,
13544,
... | 2.539526 | 506 |
from torch.utils.data.dataset import Dataset
import os
import json
from PIL import Image
from torchvision import transforms
import logging
import numpy as np
import torch
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SOS_token = 0
EOS_token = 1
| [
6738,
28034,
13,
26791,
13,
7890,
13,
19608,
292,
316,
1330,
16092,
292,
316,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
28034,
10178,
1330,
31408,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
... | 3.114943 | 87 |
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for create_volume TaskFlow """
import mock
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_consistencygroup
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit.keymgr import mock_key_mgr
from cinder.tests.unit.volume.flows import fake_volume_api
from cinder.volume.flows.api import create_volume
from cinder.volume.flows.manager import create_volume as create_volume_manager
| [
2,
15069,
2211,
19507,
605,
12052,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
... | 3.472464 | 345 |
import pytest
from trafficgenerator.tgn_utils import ApiType
@pytest.fixture(scope='class', autouse=True)
| [
198,
11748,
12972,
9288,
198,
198,
6738,
4979,
8612,
1352,
13,
83,
4593,
62,
26791,
1330,
5949,
72,
6030,
628,
198,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
4871,
3256,
1960,
1076,
28,
17821,
8,
628
] | 2.871795 | 39 |
import graphlab as gl
import re
import random
from copy import copy
import os
import graphlab.aggregate as agg
import array
import numpy as np
import sys
# Run this script in the same directory as the
train_path = "image-sframes/train-%d/"
valid_path = "image-sframes/validation-%d/"
X_data = gl.SFrame("image-sframes/train/")
# The classes were already balanced by create_image_sframe, so we
# don't need to balance them below.
if not (os.path.exists(train_path % 0) and os.path.exists(valid_path % 0)):
print "Skipping class 0; already present. If error, remove these directories and restart."
save_as_train_and_test(X_data, train_path % 0, valid_path % 0)
################################################################################
# Now do the other splitting parts
for mi in [1,2,3,4]:
if os.path.exists(train_path % mi) and os.path.exists(valid_path % mi):
print "Skipping class %d; already present. If error, remove these directories and restart." % mi
continue
print "Running class %d" % mi
X_data["class"] = (X_data["level"] >= mi)
X_data_local = copy(X_data)
n_class_0 = (X_data["class"] == 0).sum()
n_class_1 = (X_data["class"] == 1).sum()
if n_class_0 < n_class_1:
num_to_sample = n_class_1 - n_class_0
# Oversample the ones on the border
level_to_sample = mi - 1
class_to_sample = 0
else:
num_to_sample = n_class_0 - n_class_1
# Oversample the ones on the border
level_to_sample = mi
class_to_sample = 1
X_data_lvl = X_data[X_data["level"] == level_to_sample]
# Do one extra of the closest class to slightly oversample the hard examples.
n = min(X_data_lvl.num_rows(), num_to_sample)
X_data_local = X_data_local.append(X_data_lvl[:n])
num_to_sample -= n
if num_to_sample > 0:
X_data_class = X_data[X_data["class"] == class_to_sample]
while num_to_sample > 0:
n = min(X_data_class.num_rows(), num_to_sample)
X_data_local = X_data_local.append(X_data_class[:n])
num_to_sample -= n
# Sort the rows
X_data_local["_random_"] = np.random.uniform(size = X_data_local.num_rows())
X_data_local = X_data_local.sort("_random_")
del X_data_local["_random_"]
save_as_train_and_test(X_data_local, train_path % mi, valid_path % mi)
| [
11748,
4823,
23912,
355,
1278,
198,
11748,
302,
198,
11748,
4738,
198,
6738,
4866,
1330,
4866,
198,
11748,
28686,
198,
11748,
4823,
23912,
13,
9460,
49373,
355,
4194,
198,
11748,
7177,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064... | 2.464066 | 974 |
from flask import Flask, request, abort, jsonify, Response, json
from marketwatch import marketwatch as MW
from error import Error
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
@app.route('/stock')
@app.route('/stock/financial')
if(__name__ == "__main__"):
app.run(debug=True) | [
6738,
42903,
1330,
46947,
11,
2581,
11,
15614,
11,
33918,
1958,
11,
18261,
11,
33918,
198,
6738,
1910,
8340,
1330,
1910,
8340,
355,
29961,
198,
6738,
4049,
1330,
13047,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
... | 2.911765 | 102 |
from setuptools import setup
name = 'pyDiamondsBackground'
version = '1.1'
release = '1.1.0'
setup(
name=name,
author='Marco Muellner',
author_email='muellnermarco@gmail.com',
version='1.1.2',
packages=['pyDiamondsBackground','pyDiamondsBackground/models'],
licencse = 'MIT',
description='An extension to pyDiamonds, intended for fitting pyDiamondsBackground signals of red giants',
long_description=open('README.rst').read(),
url='https://github.com/muma7490/PyDIAMONDS-Background',
install_requires=[
'numpy',
'pyDiamonds',
'sphinx'
]
) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
3672,
796,
705,
9078,
47710,
82,
21756,
6,
198,
9641,
796,
705,
16,
13,
16,
6,
198,
20979,
796,
705,
16,
13,
16,
13,
15,
6,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
28,
3672,
... | 2.526971 | 241 |
from main.dto.ContactDetailDto import ContactDetailDto
from main.repository.impl.ContactDetailRepositoryImpl import ContactDetailRepositoryImpl
from main.service.ContactDetailService import ContactDetailService
| [
6738,
1388,
13,
67,
1462,
13,
17829,
11242,
603,
35,
1462,
1330,
14039,
11242,
603,
35,
1462,
198,
6738,
1388,
13,
260,
1930,
37765,
13,
23928,
13,
17829,
11242,
603,
6207,
13264,
29710,
1330,
14039,
11242,
603,
6207,
13264,
29710,
198,... | 3.785714 | 56 |
from .vault import Vault, EntityClassId, ClientProperties, ServerProperties, SearchOperation, PropertySearch, SearchRule, SysAclBeh, connect
from .vault import generate_service_urls, VaultServices | [
6738,
764,
85,
1721,
1330,
23450,
11,
20885,
9487,
7390,
11,
20985,
2964,
18200,
11,
9652,
2964,
18200,
11,
11140,
32180,
11,
14161,
18243,
11,
11140,
31929,
11,
311,
893,
32,
565,
25267,
11,
2018,
198,
6738,
764,
85,
1721,
1330,
7716... | 3.92 | 50 |
import numpy as np
import pandas as pd
from mluem import LogisticRegression as LogReg
if __name__ == "__main__":
X, y = load_data()
main(X, y)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
285,
2290,
368,
1330,
5972,
2569,
8081,
2234,
355,
5972,
8081,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
220,
198,
220,
220,... | 2.397059 | 68 |
from copy import deepcopy
from typing import Any, Dict
import optuna
import yaml
from optuna.integration import PyTorchLightningPruningCallback
from pytorch_lightning import Trainer, seed_everything
from cloud.dataset import CloudDataModule
from cloud.model import Cloud
if __name__ == "__main__":
with open("config.yml", "r") as f:
cfg = yaml.safe_load(f)
pruner: optuna.pruners.BasePruner = optuna.pruners.MedianPruner()
study = optuna.create_study(direction="maximize", pruner=pruner)
study.optimize(lambda x: objective(x, cfg), n_trials=100)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
| [
6738,
4866,
1330,
2769,
30073,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
198,
198,
11748,
2172,
9613,
198,
11748,
331,
43695,
198,
6738,
2172,
9613,
13,
18908,
1358,
1330,
9485,
15884,
354,
15047,
768,
47,
5143,
278,
47258,
198,
6738,... | 2.734177 | 316 |
import Adafruit_BBIO.GPIO as GPIO
# Dictionary with the PIN number associated to each room
house = {
"KITCHEN": "P9_12",
"BATHROOM": "P9_15",
"LIVINGROOM": "P9_42"
}
def get_status(pin_number):
"""Verify the status of the GPIO pin with number equal to `pin_number`.
Parameters
----------
pin_number : str
The PIN to check the status.
Returns
-------
str
String with information if the GPIO pin is ON or OFF.
"""
if GPIO.input(pin_number):
return "ON"
return "OFF"
def compose_status():
"""Compose a string with the status of all the rooms (i.e. if they are turned on or off).
Returns
-------
str
String with information if the room's LED is turned on or off.
"""
return "KITCHEN {}, BATHROOM {}, LIVING ROOM {}".format(
get_status(house["KITCHEN"]),
get_status(house["BATHROOM"]),
get_status(house["LIVINGROOM"])
)
def decode_command(command):
"""Decode command from the user and call the appropriate function.
After receiving the command from the user, this function will try to decode
to the available sentences supported by the application. If there is a match,
this function will call the appropriate function to perform the requested
action.
Parameters
----------
command : bytes
The command spoken by the user.
Returns
-------
bool
True if the command could be performed and False otherwise.
"""
command = command.decode().lower()
if command == "kitchen on":
turn_on("KITCHEN")
elif command == "kitchen off":
turn_off("KITCHEN")
elif command == "bathroom on":
turn_on("BATHROOM")
elif command == "bathroom off":
turn_off("BATHROOM")
elif command == "living room on":
turn_on("LIVINGROOM")
elif command == "living room off":
turn_off("LIVINGROOM")
elif command == "all on":
turn_all_on()
elif command == "all off":
turn_all_off()
else:
print("Could not decode command.")
return False
return True
def init_leds():
"""Set all GPIO pins associated to the rooms as output."""
for room in house:
print("Component from {} configured, mode out.".format(room))
GPIO.setup(house[room], GPIO.OUT)
def turn_on(room):
"""Turn on the LED of a specific room.
Parameters
----------
room : str
Name of the room to turn on the LED.
"""
print("Turning on {}".format(room))
GPIO.output(house[room], GPIO.HIGH)
def turn_off(room):
"""Turn off the LED of a specific room.
Parameters
----------
room : str
Name of the room to turn off the LED.
"""
print("Turning off {}".format(room))
GPIO.output(house[room], GPIO.LOW)
def turn_all_on():
"""Turn on the LEDs of all the rooms."""
for room in house:
turn_on(room)
def turn_all_off():
"""Turn off the LEDs of all the rooms."""
for room in house:
turn_off(room)
| [
11748,
1215,
1878,
4872,
62,
33,
3483,
46,
13,
16960,
9399,
355,
50143,
198,
198,
2,
28261,
351,
262,
34279,
1271,
3917,
284,
1123,
2119,
198,
4803,
796,
1391,
198,
197,
1,
42,
31949,
1677,
1298,
366,
47,
24,
62,
1065,
1600,
198,
... | 2.916933 | 939 |
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import ceil
from typing import List, Tuple, Dict, Optional
import json
import numpy as np
from hashlib import sha256
from functools import partial
from bisect import bisect_right
from cape_responder.responder_settings import NUM_WORKERS_PER_REQUEST
from cape_document_manager.document_store import SearchResult, DocumentStore
from cape_document_manager.annotation_store import AnnotationStore
from cape_machine_reader.cape_machine_reader_core import MachineReader, MachineReaderConfiguration
from cape_document_qa import cape_docqa_machine_reader
from cape_api_helpers.exceptions import UserException
from cape_api_helpers.text_responses import ERROR_INVALID_THRESHOLD
from cape_responder.task_manager import connect
THRESHOLD_MAP = {
'savedreply': {
'VERYHIGH': 0.7,
'HIGH': 0.5,
'MEDIUM': 0.25,
'LOW': 0.15,
'VERYLOW': 0.0
},
'document': {
'VERYHIGH': 0.0,
'HIGH': 0.,
'MEDIUM': 0.0,
'LOW': 0.0,
'VERYLOW': 0.0
}
}
SPEED_OR_ACCURACY_CHUNKS_MAP = {'speed': 0.25, 'balanced': 1, 'accuracy': 4, 'total': -1}
MACHINE_READER_MODEL_TYPE_TO_USE = 'CAPE_DOCUMENT_QA'
| [
2,
15069,
2864,
347,
2538,
44,
4944,
5258,
38926,
56,
9552,
40880,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.826367 | 622 |
import pandas as pd
| [
11748,
19798,
292,
355,
279,
67,
628
] | 3 | 7 |
# spark-submit --jars googlegenomics-spark-examples-assembly-1.0.jar \
# --driver-class-path googlegenomics-spark-examples-assembly-1.0.jar \
# src/main/python/variants_pca.py --client-secrets client_secrets.json
import json
import numpy
import operator
import sys
import pyspark
from pyspark import serializers
import pyspark.conf
from pyspark.mllib import common as mllib_common
from pyspark.mllib import linalg
import pyspark.rdd
conf = pyspark.conf.SparkConf()
sc = pyspark.SparkContext(conf=conf)
def prepare_call_data(py_rdd, py_id_to_index):
"""Return an RDD[Seq[int]] from the RDD[(VariantKey, Variant)].
Args:
py_rdd: An RDD of (VariantKey, Variant) of all Variants matching the
search criteria.
py_id_to_index: A dictionary of string to int, giving the indices of
callset names in ``py_rdd``.
Returns:
An RDD[Seq[int]] in the same order of the input RDD, each entry is a
list of indices of variant calls.
"""
# Obtain all samples that have at least one matching call.
samples_with_variant = (py_rdd.
map(lambda v: v.get('calls', [])).
map(lambda calls: [c for c in calls if any(c['genotype'])]).
filter(lambda calls: len(calls) > 0)
)
# Obtain the callset name from the samples.
callset_names = (samples_with_variant.
map(lambda callset: [c['callSetId'] for c in callset])
)
# Convert all names (strings) to indices (ints).
sc = pyspark.SparkContext._active_spark_context
broadcast_index_map = sc.broadcast(py_id_to_index)
call_rdd = callset_names.map(
lambda callset: [broadcast_index_map.value[c] for c in callset]
)
return call_rdd
def calculate_similarity_matrix(call_rdd, matrix_size):
"""Return an RDD[(int, int), int] where each entry is similarity value of
call ``x``, with respect to call ``y``.
Args:
call_rdd: An RDD[Seq[int]] as returned by ``prepare_call_data``.
matrix_size: The size (N) of the N x N matrix.
Returns:
An RDD[(x, y), sim_value] where each entry is similarity value of call
``x`` with respect to call ``y``.
"""
sim_matrix = (call_rdd.
mapPartitions(sum_similarity).
reduceByKey(operator.add)
)
return sim_matrix
def center_matrix(sim_matrix, row_count):
"""Center the rows and columns of a similarity matrix.
Args:
sim_matrix: A similarity matrix as returned by
``calculate_similarity_matrix``.
row_count: The size (N) of the N x N matrix.
Returns:
An RDD[int, (int, float)] representing centered rows. The first int is
the row index, the (int, float) tuple is the column index, and the
centered value.
"""
# Row-by-row (row major) RDD. Each row is a list of (column, value).
entries = (sim_matrix.
map(lambda ((y, x), v): (y, (x, float(v)))).
groupByKey().
sortByKey(True).
cache()
)
row_sums = entries.map(lambda (y, xvs): sum(v for (x, v) in xvs)).collect()
matrix_sum = sum(row_sums)
matrix_mean = float(matrix_sum) / row_count / row_count
sc = pyspark.SparkContext._active_spark_context
broadcast_row_sums = sc.broadcast(row_sums)
return entries.map(center_rows)
def perform_pca(matrix, row_count, nr_principal_components=2):
"""Return principal components of the input matrix.
This function uses MLlib's ``RowMatrix`` to compute principal components.
Args:
matrix: An RDD[int, (int, float)] representing a sparse matrix. This
is returned by ``center_matrix`` but it is not required to center
the matrix first.
row_count: The size (N) of the N x N ``matrix``.
nr_principal_components: Number of components we want to obtain. This
value must be less than or equal to the number of rows in the input
square matrix.
Returns:
An array of ``nr_principal_components`` columns, and same number of rows
as the input ``matrix``. This array is a ``numpy`` array.
"""
py_rdd = matrix.map(lambda row: linalg.Vectors.sparse(row_count, row))
sc = pyspark.SparkContext._active_spark_context
java_rdd = mllib_common._py2java(sc, py_rdd)
scala_rdd = java_rdd.rdd()
sc = pyspark.SparkContext._active_spark_context
row_matrix = (sc._jvm.org.apache.spark.mllib.linalg.distributed.
RowMatrix(scala_rdd)
)
pca = row_matrix.computePrincipalComponents(nr_principal_components)
pca = mllib_common._java2py(sc, pca)
return pca.toArray()
pca(sys.argv[1:])
| [
2,
9009,
12,
46002,
1377,
73,
945,
467,
519,
1455,
268,
31994,
12,
2777,
668,
12,
1069,
12629,
12,
41873,
12,
16,
13,
15,
13,
9491,
3467,
198,
2,
220,
220,
220,
220,
1377,
26230,
12,
4871,
12,
6978,
467,
519,
1455,
268,
31994,
1... | 2.436975 | 1,904 |