content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#
# BSD 3-Clause License
#
# Copyright (c) 2017-2018, plures
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Functions for generating test cases.
import sys
from itertools import accumulate, count, product
from collections import namedtuple
from random import randrange
from ndtypes import ndt, ApplySpec
from _testbuffer import get_sizeof_void_p
SIZEOF_PTR = get_sizeof_void_p()
Mem = namedtuple("Mem", "itemsize align")
# ======================================================================
# Check contiguous fixed dimensions
# ======================================================================
def c_datasize(t):
"""Check the datasize of contiguous arrays."""
datasize = t.itemsize
for v in t.shape:
datasize *= v
return datasize
# ======================================================================
# Check fixed dimensions with arbitary strides
# ======================================================================
def verify_datasize(t):
"""Verify the datasize of fixed dimensions with arbitrary strides."""
if t.itemsize == 0:
return t.datasize == 0
if t.datasize % t.itemsize:
return False
if t.ndim <= 0:
return t.ndim == 0 and not t.shape and not t.strides
if any(v < 0 for v in t.shape):
return False
if any(v % t.itemsize for v in t.strides):
return False
if 0 in t.shape:
return t.datasize == 0
imin = sum(t.strides[j]*(t.shape[j]-1) for j in range(t.ndim)
if t.strides[j] <= 0)
imax = sum(t.strides[j]*(t.shape[j]-1) for j in range(t.ndim)
if t.strides[j] > 0)
return t.datasize == (abs(imin) + imax + t.itemsize)
# ======================================================================
# Typed values
# ======================================================================
DTYPE_TEST_CASES = [
# Tuples
("()", Mem(itemsize=0, align=1)),
("(complex128)", Mem(itemsize=16, align=8)),
("(int8, int64)", Mem(itemsize=16, align=8)),
("(int8, int64, pack=1)", Mem(itemsize=9, align=1)),
("(int8, int64, pack=2)", Mem(itemsize=10, align=2)),
("(int8, int64, pack=4)", Mem(itemsize=12, align=4)),
("(int8, int64, pack=8)", Mem(itemsize=16, align=8)),
("(int8, int64, pack=16)", Mem(itemsize=32, align=16)),
("(int8, int64, align=1)", Mem(itemsize=16, align=8)),
("(int8, int64, align=2)", Mem(itemsize=16, align=8)),
("(int8, int64, align=4)", Mem(itemsize=16, align=8)),
("(int8, int64, align=8)", Mem(itemsize=16, align=8)),
("(int8, int64, align=16)", Mem(itemsize=16, align=16)),
("(int8 |align=1|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=2|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=4|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=8|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=16|, int64)", Mem(itemsize=16, align=16)),
("(uint16, (complex64))", Mem(itemsize=12, align=4)),
("(uint16, (complex64), pack=1)", Mem(itemsize=10, align=1)),
("(uint16, (complex64), pack=2)", Mem(itemsize=10, align=2)),
("(uint16, (complex64), pack=4)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), pack=8)", Mem(itemsize=16, align=8)),
("(uint16, (complex64), align=1)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=2)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=4)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=8)", Mem(itemsize=16, align=8)),
# References to tuples
("&(uint16, (complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("(uint16, &(complex64), pack=1)", Mem(itemsize=2+SIZEOF_PTR, align=1)),
# Constructor containing references to tuples
("Some(&(uint16, (complex64), align=1))", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("Some((uint16, &(complex64), pack=1))", Mem(itemsize=2+SIZEOF_PTR, align=1)),
# Optional tuples
("?(uint16, (complex64), align=1)", Mem(itemsize=12, align=4)),
("(uint16, ?(complex64), align=1)", Mem(itemsize=12, align=4)),
("?(uint16, ?(complex64), align=1)", Mem(itemsize=12, align=4)),
("?(uint16, (complex64), align=2)", Mem(itemsize=12, align=4)),
("(uint16, ?(complex64), align=4)", Mem(itemsize=12, align=4)),
("?(uint16, ?(complex64), align=8)", Mem(itemsize=16, align=8)),
# References to optional tuples or tuples with optional subtrees
("&?(uint16, (complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&(uint16, ?(complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Constructor containing optional tuples or tuples with optional subtrees
("Some(?(uint16, (complex64), align=1))", Mem(itemsize=12, align=4)),
("Some((uint16, ?(complex64), align=1))", Mem(itemsize=12, align=4)),
# Records
("{}", Mem(itemsize=0, align=1)),
("{x: complex128}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64, pack=1}", Mem(itemsize=9, align=1)),
("{x: int8, y: int64, pack=2}", Mem(itemsize=10, align=2)),
("{x: int8, y: int64, pack=4}", Mem(itemsize=12, align=4)),
("{x: int8, y: int64, pack=8}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64, pack=16}", Mem(itemsize=32, align=16)),
("{x: uint16, y: {z: complex128}}", Mem(itemsize=24, align=8)),
("{x: uint16, y: {z: complex128, align=16}}", Mem(itemsize=32, align=16)),
("{x: uint16, y: {z: complex128}, align=16}", Mem(itemsize=32, align=16)),
# Primitive types
("bool", Mem(itemsize=1, align=1)),
("int8", Mem(itemsize=1, align=1)),
("int16", Mem(itemsize=2, align=2)),
("int32", Mem(itemsize=4, align=4)),
("int64", Mem(itemsize=8, align=8)),
("uint8", Mem(itemsize=1, align=1)),
("uint16", Mem(itemsize=2, align=2)),
("uint32", Mem(itemsize=4, align=4)),
("uint64", Mem(itemsize=8, align=8)),
("float32", Mem(itemsize=4, align=4)),
("float64", Mem(itemsize=8, align=8)),
("complex64", Mem(itemsize=8, align=4)),
("complex128", Mem(itemsize=16, align=8)),
# Primitive optional types
("?bool", Mem(itemsize=1, align=1)),
("?int8", Mem(itemsize=1, align=1)),
("?int16", Mem(itemsize=2, align=2)),
("?int32", Mem(itemsize=4, align=4)),
("?int64", Mem(itemsize=8, align=8)),
("?uint8", Mem(itemsize=1, align=1)),
("?uint16", Mem(itemsize=2, align=2)),
("?uint32", Mem(itemsize=4, align=4)),
("?uint64", Mem(itemsize=8, align=8)),
("?float32", Mem(itemsize=4, align=4)),
("?float64", Mem(itemsize=8, align=8)),
("?complex64", Mem(itemsize=8, align=4)),
("?complex128", Mem(itemsize=16, align=8)),
# References
("&bool", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int8", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int16", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int32", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int64", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint8)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint16)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(float32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(float64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(complex64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(complex128)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Optional references
("?&bool", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int8", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int16", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int32", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int64", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint8)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint16)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(float32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(float64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(complex64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(complex128)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# References to optional types
("&?bool", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int8", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int16", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int32", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int64", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint8)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint16)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?float32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?float64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?complex64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?complex128)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Constructors
("Some(bool)", Mem(itemsize=1, align=1)),
("Some(int8)", Mem(itemsize=1, align=1)),
("Some(int16)", Mem(itemsize=2, align=2)),
("Some(int32)", Mem(itemsize=4, align=4)),
("Some(int64)", Mem(itemsize=8, align=8)),
("Some(uint8)", Mem(itemsize=1, align=1)),
("Some(uint16)", Mem(itemsize=2, align=2)),
("Some(uint32)", Mem(itemsize=4, align=4)),
("Some(uint64)", Mem(itemsize=8, align=8)),
("Some(float32)", Mem(itemsize=4, align=4)),
("Some(float64)", Mem(itemsize=8, align=8)),
("Some(complex64)", Mem(itemsize=8, align=4)),
("Some(complex128)", Mem(itemsize=16, align=8)),
# Optional constructors
("?Some(bool)", Mem(itemsize=1, align=1)),
("?Some(int8)", Mem(itemsize=1, align=1)),
("?Some(int16)", Mem(itemsize=2, align=2)),
("?Some(int32)", Mem(itemsize=4, align=4)),
("?Some(int64)", Mem(itemsize=8, align=8)),
("?Some(uint8)", Mem(itemsize=1, align=1)),
("?Some(uint16)", Mem(itemsize=2, align=2)),
("?Some(uint32)", Mem(itemsize=4, align=4)),
("?Some(uint64)", Mem(itemsize=8, align=8)),
("?Some(float32)", Mem(itemsize=4, align=4)),
("?Some(float64)", Mem(itemsize=8, align=8)),
("?Some(complex64)", Mem(itemsize=8, align=4)),
("?Some(complex128)", Mem(itemsize=16, align=8)),
# Constructors containing optional types
("Some(?bool)", Mem(itemsize=1, align=1)),
("Some(?int8)", Mem(itemsize=1, align=1)),
("Some(?int16)", Mem(itemsize=2, align=2)),
("Some(?int32)", Mem(itemsize=4, align=4)),
("Some(?int64)", Mem(itemsize=8, align=8)),
("Some(?uint8)", Mem(itemsize=1, align=1)),
("Some(?uint16)", Mem(itemsize=2, align=2)),
("Some(?uint32)", Mem(itemsize=4, align=4)),
("Some(?uint64)", Mem(itemsize=8, align=8)),
("Some(?float32)", Mem(itemsize=4, align=4)),
("Some(?float64)", Mem(itemsize=8, align=8)),
("Some(?complex64)", Mem(itemsize=8, align=4)),
("Some(?complex128)", Mem(itemsize=16, align=8)),
]
# ======================================================================
# Broadcasting
# ======================================================================
BROADCAST_TEST_CASES = [
dict(sig=ndt("uint8 -> float64"),
args=[ndt("uint8")],
out=None,
spec= ApplySpec(
flags = 'C|Fortran|Strided|Xnd',
outer_dims = 0,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("uint8"), ndt("float64")])),
dict(sig=ndt("... * uint8 -> ... * float64"),
args=[ndt("2 * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptZ|OptC|OptS|C|Fortran|Strided|Xnd',
outer_dims = 1,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("2 * uint8"), ndt("2 * float64")])),
dict(sig=ndt("F[... * uint8] -> F[... * float64]"),
args=[ndt("!2 * 3 * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptS|C|Fortran|Strided|Xnd',
outer_dims = 2,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("!2 * 3 * uint8"), ndt("!2 * 3 * float64")])),
dict(sig=ndt("... * uint8 -> ... * float64"),
args=[ndt("fixed(shape=2, step=10) * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptS|C|Fortran|Strided|Xnd',
outer_dims = 1,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("fixed(shape=2, step=10) * uint8"), ndt("2 * float64")])),
dict(sig=ndt("... * N * uint8 -> ... * N * float64"),
args=[ndt("fixed(shape=2, step=10) * uint8")],
out=None,
spec=ApplySpec(
flags = 'Strided|Xnd',
outer_dims = 0,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("fixed(shape=2, step=10) * uint8"), ndt("2 * float64")])),
dict(sig=ndt("... * N * uint8 -> ... * N * float64"),
args=[ndt("2 * 3 * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptZ|OptC|OptS|C|Fortran|Strided|Xnd' ,
outer_dims = 1,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("2 * 3 * uint8"), ndt("2 * 3 * float64")])),
dict(sig=ndt("... * N * M * uint8 -> ... * N * M * float64"),
args=[ndt("2 * 3 * uint8")],
out=None,
spec=ApplySpec(
flags = 'C|Strided|Xnd',
outer_dims = 0,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("2 * 3 * uint8"), ndt("2 * 3 * float64")])),
dict(sig=ndt("var... * float64 -> var... * float64"),
args=[ndt("var(offsets=[0,2]) * var(offsets=[0,4,11]) * float64")],
out=None,
spec=ApplySpec(
flags = 'Xnd',
outer_dims = 2,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("var(offsets=[0,2]) * var(offsets=[0,4,11]) * float64"),
ndt("var(offsets=[0,2]) * var(offsets=[0,4,11]) * float64")])),
]
| [
2,
198,
2,
347,
10305,
513,
12,
2601,
682,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
12,
7908,
11,
458,
942,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,... | 2.22079 | 7,215 |
# coding=utf-8
from openerp import models, fields, api
from ..controllers import client
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
21996,
79,
1330,
4981,
11,
7032,
11,
40391,
198,
6738,
11485,
3642,
36667,
1330,
5456,
628,
198
] | 3.5 | 26 |
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer speech recognition model (pytorch)."""
from argparse import Namespace
from distutils.util import strtobool
import logging
import math
import json
import numpy as np
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.e2e_asr_common import ErrorCalculator
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_asr import Reporter
from espnet.nets.pytorch_backend.nets_utils import get_subsample
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (
LabelSmoothingLoss, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.mask import target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.scorers.ctc import CTCPrefixScorer
supported_rnns = {
'lstm': nn.LSTM,
'rnn': nn.RNN,
'gru': nn.GRU
}
supported_rnns_inv = dict((v, k) for k, v in supported_rnns.items())
# Wang et al 2016 - Lookahead Convolution Layer for Unidirectional Recurrent Neural Networks
# input shape - sequence, batch, feature - TxNxH
# output shape - same as input
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("deepspeech model setting")
group.add_argument(
"--deepspeech2-rnn-hidden-size", default=768, type=int, help="Number of hidden dimension"
)
group.add_argument(
"--deepspeech2-nb-layers", default=5, type=int, help=""
)
group.add_argument(
"--deepspeech2-rnn-type", default="nn.LSTM", type=str, help=""
)
group.add_argument(
"--deepspeech2-context", default=20, type=int, help=""
)
group.add_argument(
"--deepspeech2-bidirectional", default=True, type=bool, help=""
)
group.add_argument(
"--deepspeech2-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="how to initialize deepspeech parameters",
)
group.add_argument(
"--dropout-rate",
default=0.0,
type=float,
help="Dropout rate for the encoder",
)
return parser
# @property
# def attention_plot_class(self):
# """Return PlotAttentionReport."""
# return PlotAttentionReport
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super(E2E, self).__init__()
self.args = args
self.hidden_size = self.args.deepspeech2_rnn_hidden_size # 768
self.hidden_layers = self.args.deepspeech2_nb_layers # 5
self.rnn_type = eval(self.args.deepspeech2_rnn_type) # nn.LSTM
# self.audio_conf = self.config.feature
self.context = self.args.deepspeech2_context # 20
# with open(self.config.data.label_dir, 'r') as f:
# labels = json.load(f)
self.labels = args.char_list
self.bidirectional = self.args.deepspeech2_bidirectional
self.subsample = get_subsample(args, mode="asr", arch="transformer")
# sample_rate = self.audio_conf.sample_rate # 8000
# window_size = self.audio_conf.window_size / 1000.0 # 0.02 => 0.025
self.idim = idim
self.num_classes = odim
self.conv = MaskConv(nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(41, 11), stride=(2, 1), padding=(20, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True),
nn.Conv2d(32, 32, kernel_size=(21, 11), stride=(2, 1), padding=(10, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True)
))
# Based on above convolutions and spectrogram size using conv formula (W - F + 2P)/ S+1
# rnn_input_size = int(math.floor((sample_rate * window_size) / 2) + 1)
rnn_input_size = self.idim
rnn_input_size = int(math.floor(rnn_input_size + 2 * 20 - 41) / 2 + 1)
rnn_input_size = int(math.floor(rnn_input_size + 2 * 10 - 21) / 2 + 1)
rnn_input_size *= 32
rnns = []
# print('rnn_input_size', rnn_input_size)
rnn = BatchRNN(input_size=rnn_input_size, hidden_size=self.hidden_size, rnn_type=self.rnn_type,
bidirectional=self.bidirectional, batch_norm=False)
rnns.append(('0', rnn))
for x in range(self.hidden_layers - 1):
rnn = BatchRNN(input_size=self.hidden_size, hidden_size=self.hidden_size, rnn_type=self.rnn_type,
bidirectional=self.bidirectional)
rnns.append(('%d' % (x + 1), rnn))
self.rnns = nn.Sequential(OrderedDict(rnns))
self.lookahead = nn.Sequential(
# consider adding batch norm?
Lookahead(self.hidden_size, context=self.context),
nn.Hardtanh(0, 20, inplace=True)
) if not self.bidirectional else None
fully_connected = nn.Sequential(
nn.BatchNorm1d(self.hidden_size),
nn.Linear(self.hidden_size, self.num_classes, bias=False),
)
self.fc = nn.Sequential(
SequenceWise(fully_connected),
)
self.inference_softmax = InferenceBatchSoftmax()
if args.report_cer or args.report_wer:
self.error_calculator = ErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator = None
self.ctc = CTC(
odim, None, args.dropout_rate,
ctc_type=args.ctc_type, reduce=False,
ctc_lo=self.fc,
)
self.reset_parameters(args)
self.rnnlm = None
self.reporter = Reporter()
self.sos = odim - 1
self.eos = odim - 1
self.ignore_id = ignore_id
def reset_parameters(self, args):
"""Initialize parameters."""
# initialize parameters
initialize(self, args.deepspeech2_init)
def get_seq_lens(self, input_length):
"""
Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable
containing the size sequences that will be output by the network.
:param input_length: 1D Tensor
:return: 1D Tensor scaled by model
"""
seq_len = input_length
for m in self.conv.modules():
if type(m) == nn.modules.conv.Conv2d:
seq_len = ((seq_len + 2 * m.padding[1] - m.dilation[1] * (m.kernel_size[1] - 1) - 1) // m.stride[1] + 1)
return seq_len.int()
def forward(self, x, lengths, trns):
'''
:param torch.Tensor x: batch of padded source sequences (B, Tmax, idim)
'''
x = x.transpose(1,2).unsqueeze(1) # (B, 1, idim, Tmax)
# logging.warning(f'{x.size()} {lengths}')
# logging.warning(f'DeepSpeech2 [x size] {x.size()}')
# lengths = lengths.cpu().int()
seq_len = self.get_seq_lens(lengths)
# logging.warning(f'data type{ type(lengths) } {type(seq_len)} {lengths} {seq_len}')
# print('output_lengths', output_lengths, x.size())
x, _ = self.conv(x, seq_len.int())
# logging.warning(f'DeepSpeech2 [CONV x size] {x.size()}')
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # Collapse feature dimension
x = x.transpose(1, 2).transpose(0, 1).contiguous() # TxNxH
# logging.warning(f't n h {x.size()}')
for rnn in self.rnns:
x = rnn(x, seq_len.int())
# if not self.bidirectional: # no need for lookahead layer in bidirectional
# x = self.lookahead(x)
x = x.transpose(0, 1)
# target_lengths = trns.new([len(y[y != self.PAD_token]) for y in trns])
# self.ctc(log_probs, hs_len, ys_pad)
# logging.warning(f'Deepspeech [Size] { x.size() } {seq_len.size()} {trns.size()} {trns}')
loss_ctc_nonreduce = self.ctc(x, seq_len, trns,)
loss_ctc_nonreduce[torch.isinf(loss_ctc_nonreduce)] = 0
loss_ctc_nonreduce[torch.isnan(loss_ctc_nonreduce)] = 0
loss_ctc = loss_ctc_nonreduce[loss_ctc_nonreduce!=0].mean() if any(loss_ctc_nonreduce!=0) else 0
self.loss_ctc_nonreduce = loss_ctc_nonreduce
# if self.error_calculator is not None:
# ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
# cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# else:
cer_ctc = None
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(x).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
if not self.training:
self.ctc.softmax(x)
# loss = self.ctc_loss(log_probs, trns, output_lengths, target_lengths)
# loss = loss.div(target_lengths.float())
self.loss = loss_ctc
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_data, loss_att=None, acc=None, cer_ctc=cer_ctc, cer=None, wer=None, mtl_loss=loss_data
)
# loss_att, acc, cer_ctc, cer, wer, mtl_loss
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss | [
2,
15069,
13130,
911,
328,
39548,
9375,
5350,
198,
2,
220,
24843,
362,
13,
15,
220,
357,
4023,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
8,
198,
198,
37811,
8291,
16354,
4046,
9465,
2746,... | 2.134799 | 5,230 |
"""Centrality based source detection methods."""
from collections import Counter
from typing import Dict, Optional, Union
from networkx import Graph
from rpasdt.algorithm.centralities import (
compute_centrality,
compute_unbiased_centrality,
)
from rpasdt.algorithm.models import (
CentralityBasedSourceDetectionConfig,
CentralityCommunityBasedSourceDetectionConfig,
MultipleCentralityBasedSourceDetectionConfig,
UnbiasedCentralityBasedSourceDetectionConfig,
UnbiasedCentralityCommunityBasedSourceDetectionConfig,
)
from rpasdt.algorithm.source_detectors.common import (
CommunityBasedSourceDetector,
SourceDetector,
)
| [
37811,
30645,
414,
1912,
2723,
13326,
5050,
526,
15931,
198,
6738,
17268,
1330,
15034,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
11,
4479,
198,
198,
6738,
3127,
87,
1330,
29681,
198,
198,
6738,
374,
44429,
28664,
13,
282,
42289,
13,
... | 3.484211 | 190 |
# Python script for use Moodle API.
# In this case the code allows to get enrrolled users in a iMOOC course
# Required libraries for use this code
# - Python JSON library
# - Python Requests library http://docs.python-requests.org/en/latest/
# The required parameters are:
# - The course ID -> variable courseid
# - The admin (or manager) token for access to Moodle services -> variable wstoken
import requests, json
parameters = {'wsfunction': core_enrol_get_enrolled_users', 'courseid':'id', 'moodlewsrestformat':'json', 'wstoken':'xxxxxx'}
url = "http://gridlab.upm.es/imooc/"
response = requests.get(url, params=parameters)
if response.status_code == 200:
results = response.json()
for result in results:
print result
else:
print "Error code %s" % response.status_code
| [
2,
11361,
4226,
329,
779,
25723,
293,
7824,
13,
198,
2,
554,
428,
1339,
262,
2438,
3578,
284,
651,
551,
81,
8375,
2985,
287,
257,
1312,
11770,
4503,
1781,
198,
198,
2,
20906,
12782,
329,
779,
428,
2438,
198,
2,
220,
220,
220,
532,... | 3.060377 | 265 |
for _ in range(int(input())):
x=int(input())
for i in range(x):
if i==0 or i==x-1: print('#'*x)
else: print('#'+'J'*(x-2)+'#')
print() | [
1640,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
2124,
28,
600,
7,
15414,
28955,
198,
329,
1312,
287,
2837,
7,
87,
2599,
198,
220,
611,
1312,
855,
15,
393,
1312,
855,
87,
12,
16,
25,
3601,
10786,
2,
6,
9,
87,
8,
198,... | 2.043478 | 69 |
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import phue
import traceback
import sys
from constants import HUE_ROOM, HUE_BRIDGE_IP
if sys.version_info < (3, 0):
input = raw_input # pylint: disable=E0602
if __name__ == '__main__':
bridge = phue.Bridge(HUE_BRIDGE_IP)
bridge.connect()
bridge = BridgeWrapper(bridge)
listen_on_stdin(bridge)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
872,
518,
198,
11748,
12854,
1891,
198,
11748,
25064,
198,
... | 2.64375 | 160 |
import os
import shutil
import unittest
from xbrr.edinet.client.document_client import DocumentClient
from tests.utils import delay
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
555,
715,
395,
198,
6738,
2124,
1671,
81,
13,
27152,
316,
13,
16366,
13,
22897,
62,
16366,
1330,
16854,
11792,
198,
6738,
5254,
13,
26791,
1330,
5711,
628
] | 3.694444 | 36 |
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin)
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
357,
198,
220,
220,
220,
27741,
14881,
12982,
11,
7308,
12982,
13511,
11,
2448,
8481,
35608,
259,
8,
198,
6738,
42625,
14208,... | 3.352381 | 105 |
expr = x**2 + x**4
replacements = {x**2: y}
expr = expr.xreplace(replacements) | [
31937,
796,
2124,
1174,
17,
1343,
2124,
1174,
19,
198,
35666,
28613,
796,
1391,
87,
1174,
17,
25,
331,
92,
198,
31937,
796,
44052,
13,
87,
33491,
7,
35666,
28613,
8
] | 2.516129 | 31 |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from urllib import urlencode
import random, math, os, base64
import requests
from . import utils
from .forms import BPMPlaylistForm
# Spotify API keys
CLIENT_ID= '4df0271d6b1f4768a5bd929a13091e8b'
CLIENT_SECRET = os.environ.get('BPMPLAYLISTS_CLIENT_SECRET')
REDIRECT_URI = '/callback'
STATE_KEY = 'spotify_auth_state' | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
2956,
297,
571,
1330,
2956,
11925,
8189,
198,
11748,
4738,
11,
10688,
11,
28686,
... | 2.778523 | 149 |
'''OpenGL extension EXT.convolution
Overview (from the spec)
This extension defines 1 and 2 dimensional convolution operations
at a fixed location in the pixel transfer process. Thus pixel drawing,
reading, and copying, as well as texture image definition, are all
candidates for convolution. The convolution kernels are themselves
treated as 1 and 2 dimensional images, which can be loaded from
application memory or from the framebuffer.
This extension is designed to accommodate 3D convolution, but the
API is left for a future extension.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/EXT/convolution.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_convolution'
GL_CONVOLUTION_1D_EXT = constant.Constant( 'GL_CONVOLUTION_1D_EXT', 0x8010 )
glget.addGLGetConstant( GL_CONVOLUTION_1D_EXT, (1,) )
GL_CONVOLUTION_2D_EXT = constant.Constant( 'GL_CONVOLUTION_2D_EXT', 0x8011 )
glget.addGLGetConstant( GL_CONVOLUTION_2D_EXT, (1,) )
GL_SEPARABLE_2D_EXT = constant.Constant( 'GL_SEPARABLE_2D_EXT', 0x8012 )
glget.addGLGetConstant( GL_SEPARABLE_2D_EXT, (1,) )
GL_CONVOLUTION_BORDER_MODE_EXT = constant.Constant( 'GL_CONVOLUTION_BORDER_MODE_EXT', 0x8013 )
GL_CONVOLUTION_FILTER_SCALE_EXT = constant.Constant( 'GL_CONVOLUTION_FILTER_SCALE_EXT', 0x8014 )
GL_CONVOLUTION_FILTER_BIAS_EXT = constant.Constant( 'GL_CONVOLUTION_FILTER_BIAS_EXT', 0x8015 )
GL_REDUCE_EXT = constant.Constant( 'GL_REDUCE_EXT', 0x8016 )
GL_CONVOLUTION_FORMAT_EXT = constant.Constant( 'GL_CONVOLUTION_FORMAT_EXT', 0x8017 )
GL_CONVOLUTION_WIDTH_EXT = constant.Constant( 'GL_CONVOLUTION_WIDTH_EXT', 0x8018 )
GL_CONVOLUTION_HEIGHT_EXT = constant.Constant( 'GL_CONVOLUTION_HEIGHT_EXT', 0x8019 )
GL_MAX_CONVOLUTION_WIDTH_EXT = constant.Constant( 'GL_MAX_CONVOLUTION_WIDTH_EXT', 0x801A )
GL_MAX_CONVOLUTION_HEIGHT_EXT = constant.Constant( 'GL_MAX_CONVOLUTION_HEIGHT_EXT', 0x801B )
GL_POST_CONVOLUTION_RED_SCALE_EXT = constant.Constant( 'GL_POST_CONVOLUTION_RED_SCALE_EXT', 0x801C )
glget.addGLGetConstant( GL_POST_CONVOLUTION_RED_SCALE_EXT, (1,) )
GL_POST_CONVOLUTION_GREEN_SCALE_EXT = constant.Constant( 'GL_POST_CONVOLUTION_GREEN_SCALE_EXT', 0x801D )
glget.addGLGetConstant( GL_POST_CONVOLUTION_GREEN_SCALE_EXT, (1,) )
GL_POST_CONVOLUTION_BLUE_SCALE_EXT = constant.Constant( 'GL_POST_CONVOLUTION_BLUE_SCALE_EXT', 0x801E )
glget.addGLGetConstant( GL_POST_CONVOLUTION_BLUE_SCALE_EXT, (1,) )
GL_POST_CONVOLUTION_ALPHA_SCALE_EXT = constant.Constant( 'GL_POST_CONVOLUTION_ALPHA_SCALE_EXT', 0x801F )
glget.addGLGetConstant( GL_POST_CONVOLUTION_ALPHA_SCALE_EXT, (1,) )
GL_POST_CONVOLUTION_RED_BIAS_EXT = constant.Constant( 'GL_POST_CONVOLUTION_RED_BIAS_EXT', 0x8020 )
glget.addGLGetConstant( GL_POST_CONVOLUTION_RED_BIAS_EXT, (1,) )
GL_POST_CONVOLUTION_GREEN_BIAS_EXT = constant.Constant( 'GL_POST_CONVOLUTION_GREEN_BIAS_EXT', 0x8021 )
glget.addGLGetConstant( GL_POST_CONVOLUTION_GREEN_BIAS_EXT, (1,) )
GL_POST_CONVOLUTION_BLUE_BIAS_EXT = constant.Constant( 'GL_POST_CONVOLUTION_BLUE_BIAS_EXT', 0x8022 )
glget.addGLGetConstant( GL_POST_CONVOLUTION_BLUE_BIAS_EXT, (1,) )
GL_POST_CONVOLUTION_ALPHA_BIAS_EXT = constant.Constant( 'GL_POST_CONVOLUTION_ALPHA_BIAS_EXT', 0x8023 )
glget.addGLGetConstant( GL_POST_CONVOLUTION_ALPHA_BIAS_EXT, (1,) )
glConvolutionFilter1DEXT = platform.createExtensionFunction(
'glConvolutionFilter1DEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLsizei, constants.GLenum, constants.GLenum, ctypes.c_void_p,),
doc = 'glConvolutionFilter1DEXT( GLenum(target), GLenum(internalformat), GLsizei(width), GLenum(format), GLenum(type), c_void_p(image) ) -> None',
argNames = ('target', 'internalformat', 'width', 'format', 'type', 'image',),
)
glConvolutionFilter2DEXT = platform.createExtensionFunction(
'glConvolutionFilter2DEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLsizei, constants.GLsizei, constants.GLenum, constants.GLenum, ctypes.c_void_p,),
doc = 'glConvolutionFilter2DEXT( GLenum(target), GLenum(internalformat), GLsizei(width), GLsizei(height), GLenum(format), GLenum(type), c_void_p(image) ) -> None',
argNames = ('target', 'internalformat', 'width', 'height', 'format', 'type', 'image',),
)
glConvolutionParameterfEXT = platform.createExtensionFunction(
'glConvolutionParameterfEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLfloat,),
doc = 'glConvolutionParameterfEXT( GLenum(target), GLenum(pname), GLfloat(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glConvolutionParameterfvEXT = platform.createExtensionFunction(
'glConvolutionParameterfvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glConvolutionParameterfvEXT( GLenum(target), GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glConvolutionParameteriEXT = platform.createExtensionFunction(
'glConvolutionParameteriEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLint,),
doc = 'glConvolutionParameteriEXT( GLenum(target), GLenum(pname), GLint(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glConvolutionParameterivEXT = platform.createExtensionFunction(
'glConvolutionParameterivEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glConvolutionParameterivEXT( GLenum(target), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glCopyConvolutionFilter1DEXT = platform.createExtensionFunction(
'glCopyConvolutionFilter1DEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLint, constants.GLint, constants.GLsizei,),
doc = 'glCopyConvolutionFilter1DEXT( GLenum(target), GLenum(internalformat), GLint(x), GLint(y), GLsizei(width) ) -> None',
argNames = ('target', 'internalformat', 'x', 'y', 'width',),
)
glCopyConvolutionFilter2DEXT = platform.createExtensionFunction(
'glCopyConvolutionFilter2DEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLint, constants.GLint, constants.GLsizei, constants.GLsizei,),
doc = 'glCopyConvolutionFilter2DEXT( GLenum(target), GLenum(internalformat), GLint(x), GLint(y), GLsizei(width), GLsizei(height) ) -> None',
argNames = ('target', 'internalformat', 'x', 'y', 'width', 'height',),
)
glGetConvolutionFilterEXT = platform.createExtensionFunction(
'glGetConvolutionFilterEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLenum, ctypes.c_void_p,),
doc = 'glGetConvolutionFilterEXT( GLenum(target), GLenum(format), GLenum(type), c_void_p(image) ) -> None',
argNames = ('target', 'format', 'type', 'image',),
)
glGetConvolutionParameterfvEXT = platform.createExtensionFunction(
'glGetConvolutionParameterfvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glGetConvolutionParameterfvEXT( GLenum(target), GLenum(pname), GLfloatArray(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glGetConvolutionParameterivEXT = platform.createExtensionFunction(
'glGetConvolutionParameterivEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLintArray,),
doc = 'glGetConvolutionParameterivEXT( GLenum(target), GLenum(pname), GLintArray(params) ) -> None',
argNames = ('target', 'pname', 'params',),
)
glGetSeparableFilterEXT = platform.createExtensionFunction(
'glGetSeparableFilterEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLenum, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,),
doc = 'glGetSeparableFilterEXT( GLenum(target), GLenum(format), GLenum(type), c_void_p(row), c_void_p(column), c_void_p(span) ) -> None',
argNames = ('target', 'format', 'type', 'row', 'column', 'span',),
)
glSeparableFilter2DEXT = platform.createExtensionFunction(
'glSeparableFilter2DEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLsizei, constants.GLsizei, constants.GLenum, constants.GLenum, ctypes.c_void_p, ctypes.c_void_p,),
doc = 'glSeparableFilter2DEXT( GLenum(target), GLenum(internalformat), GLsizei(width), GLsizei(height), GLenum(format), GLenum(type), c_void_p(row), c_void_p(column) ) -> None',
argNames = ('target', 'internalformat', 'width', 'height', 'format', 'type', 'row', 'column',),
)
def glInitConvolutionEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| [
7061,
6,
11505,
8763,
7552,
27489,
13,
42946,
2122,
198,
198,
29064,
357,
6738,
262,
1020,
8,
198,
197,
198,
197,
1212,
7552,
15738,
352,
290,
362,
38517,
3063,
2122,
4560,
198,
197,
265,
257,
5969,
4067,
287,
262,
17465,
4351,
1429,
... | 2.735794 | 3,414 |
num = int(input())
value = range(1,num+1)
print("----")
mu_function()
| [
22510,
796,
493,
7,
15414,
28955,
198,
8367,
796,
2837,
7,
16,
11,
22510,
10,
16,
8,
198,
4798,
7203,
650,
4943,
628,
198,
30300,
62,
8818,
3419,
197,
197,
197,
198
] | 2.34375 | 32 |
from TheSphinx.tests import *
| [
6738,
383,
50,
746,
28413,
13,
41989,
1330,
1635,
198
] | 3 | 10 |
import sys,json,boto3
from botocore.exceptions import ClientError
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization,hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
def get_or_create(thing_name: str, role_arn: str, region: str):
"""This method first checks for the existence of an existing certificate in Secrets Manager,
based on the Thing name provided. If this Thing name matches the name of an existing
CloudFormation stack, then the stack is queried to identify if it specifies a SecretId.
This is the ARN of an AWS Secrets Manager secret, which would contain the plain text output
of a certificate signing request and a private key.
"""
role = boto3.client("sts").assume_role(RoleArn=role_arn, RoleSessionName="ThingStack-{s}".format(s=thing_name))
session = boto3.Session(
aws_access_key_id=role["Credentials"]["AccessKeyId"],
aws_secret_access_key=role["Credentials"]["SecretAccessKey"],
aws_session_token=role["Credentials"]["SessionToken"],
region_name=region
)
cloudformation = session.client("cloudformation")
secretsmanager = session.client("secretsmanager")
try:
stacks = cloudformation.describe_stacks(StackName=thing_name)
outputs = [x["Outputs"] for x in stacks["Stacks"] if x["StackStatus"] in ["CREATE_COMPLETE","UPDATE_COMPLETE"]]
if len(outputs) > 1:
sys.exit("Too many matching Stacks ({l})".format(l=len(outputs)))
print("Certificate found for {tn}, so using existing certificate.".format(tn=thing_name))
secretId = [x["OutputValue"] for x in outputs[0] if x["OutputKey"] == "SecretId"][0]
secret = secretsmanager.get_secret_value(SecretId=secretId)
secretJson = json.loads(secret["SecretString"])
KEY_TEXT = secretJson["privateKey"]
CSR_TEXT = secretJson["csr"]
except ClientError:
print("No certificate found for {tn}, so creating a new certificate.".format(tn=thing_name))
key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
csr = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "AWS IoT Certificate")])
).sign(key, hashes.SHA256(), default_backend())
KEY_TEXT = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
).decode("UTF-8")
CSR_TEXT = csr.public_bytes(serialization.Encoding.PEM).decode("UTF-8")
return KEY_TEXT, CSR_TEXT | [
11748,
25064,
11,
17752,
11,
65,
2069,
18,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
1891,
2412,
1330,
4277,
62,
1891,
437,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
1... | 2.748054 | 1,028 |
import tensorflow as tf
# Default hyperparameters:
hparams = tf.contrib.training.HParams(
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners" See TRAINING_DATA.md.
cleaners='korean_cleaners',
# Audio:
num_mels=80,
num_freq=1025,
sample_rate=21000,
frame_length_ms=50,
frame_shift_ms=12.5,
preemphasis=0.97,
min_level_db=-100,
ref_level_db=20,
# Encoder:
embed_depth=256,
encoder_conv_filter=512,
encoder_conv_kernel=5,
encoder_stack_size=3,
encoder_lstm_hidden_dim=256,
#Global Style Token
num_gst=15,
style_embed_depth=256,
ref_filters=[32, 32, 64, 64, 128, 128],
ref_depth=128,
style_att_type='mlp_attention',
style_att_dim=128,
gst_index=3,
gst_scale=0.3,
use_gst=True,
#Attention
attention_depth=256,
attention_filters = 32,
attention_kernel = (31, ),
attention_dim = 128,
synthesis_constraint = False,
synthesis_constraint_type = 'window',
attention_win_size = 7,
attention_type = 'mon_bah',
cumulative_weights = True,
num_heads=4,
# Model:
model='tacotron',
outputs_per_step=2,
prenet_depths=[256, 128],
encoder_depth=256,
postnet_depth=256,
reg_weight = 1e-6,
decoder_depth=256,
RNN_type='LSTM_zoneout',
tacotron_zoneout_rate=0.1,
# Training:
batch_size=32,
adam_beta1=0.9,
adam_beta2=0.999,
initial_learning_rate=0.002,
decay_learning_rate=True,
use_cmudict=False, # Use CMUDict during training to learn pronunciation of ARPAbet phonemes
# Eval:
max_iters=1000,
griffin_lim_iters=60,
power=1.5, # Power to raise magnitudes to prior to Griffin-Lim
)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
2,
15161,
8718,
17143,
7307,
25,
198,
71,
37266,
796,
48700,
13,
3642,
822,
13,
34409,
13,
39,
10044,
4105,
7,
198,
220,
220,
220,
1303,
1520,
64,
12,
25512,
515,
1351,
286,
50102,
284... | 2.260223 | 807 |
"""Fast servo implementation."""
from mpf.core.utility_functions import Util
from mpf.platforms.interfaces.servo_platform_interface import ServoPlatformInterface
class FastServo(ServoPlatformInterface):
"""A servo in the FAST platform."""
__slots__ = ["number", "net_connection"]
def __init__(self, number, net_connection):
"""Initialise servo."""
self.number = number
self.net_connection = net_connection
def go_to_position(self, position):
"""Set a servo position."""
if position < 0 or position > 1:
raise AssertionError("Position has to be between 0 and 1")
# convert from [0,1] to [0, 255]
position_numeric = int(position * 255)
cmd = 'XO:{},{}'.format(
self.number,
Util.int_to_hex_string(position_numeric))
self.net_connection.send(cmd)
@classmethod
def set_speed_limit(cls, speed_limit):
"""Todo emulate speed parameter."""
pass
@classmethod
def set_acceleration_limit(cls, acceleration_limit):
"""Todo emulate acceleration parameter."""
pass
| [
37811,
22968,
1113,
78,
7822,
526,
15931,
198,
6738,
29034,
69,
13,
7295,
13,
315,
879,
62,
12543,
2733,
1330,
7273,
346,
198,
6738,
29034,
69,
13,
24254,
82,
13,
3849,
32186,
13,
3168,
78,
62,
24254,
62,
39994,
1330,
3116,
78,
3714... | 2.514412 | 451 |
# Copyright (c) MNELAB developers
#
# License: BSD (3-clause)
from .readers import read_raw
from .writers import write_raw, writers
| [
2,
15069,
357,
66,
8,
29060,
3698,
6242,
6505,
198,
2,
198,
2,
13789,
25,
347,
10305,
357,
18,
12,
565,
682,
8,
198,
198,
6738,
764,
961,
364,
1330,
1100,
62,
1831,
198,
6738,
764,
34422,
1330,
3551,
62,
1831,
11,
8786,
198
] | 3.022727 | 44 |
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from tools.serial.tools.list_ports import comports
from app import app
from datetime import date as dt
import select_port
import edit_deployment
import download_page
import sqlite3
import database
from datetime import datetime, timedelta
from pandas import DataFrame
deploy_top_disabled = [
html.Img(id='JHU_Logo',
src=app.get_asset_url('JHU-Logo1.svg'),
style={'float': 'left', 'margin-top': '1vh', 'height': '7vh'}),
html.Img(id='Label',
src=app.get_asset_url('Label-Logo-Gray.svg'),
style={'margin-left': '5vw', 'height': '10vh'}),
html.Img(id='Deploy',
src=app.get_asset_url('Deploy-Logo-Blue.svg'),
style={'margin-left': '10vw', 'height': '10vh'}),
html.Img(id='Data',
src=app.get_asset_url('Data-Logo-Gray.svg'),
style={'margin-left': '10vw', 'height': '10vh'}),
html.Img(id='Share',
src=app.get_asset_url('Share-Logo-Gray.svg'),
style={'margin-left': '10vw', 'height': '10vh'}),
html.Img(id='EHON1',
src=app.get_asset_url('EHON-Logo.svg'),
style={'margin-left': '0vw', 'height': '4vh',
'width': '36vh', 'float': 'right', 'margin-top': '3vh', 'margin-right': '3vw'})
]
layout = html.Div([
html.Div(id='top-section',
children=html.Div(id='Logos',
children=deploy_top_disabled,
style={'height': '10vh',
'overflow': 'hidden'})
),
dcc.ConfirmDialog(
id='delete-node-confirm',
message='Danger danger! Are you sure you want to continue?',
),
# html.Hr(style={'margin-top': '1vh', 'margin-bottom': '0vh'}),
select_port.deploy_select_port,
edit_deployment.layout,
download_page.layout,
html.Div(style={'display': 'none'}, id='deployment-storage'),
], id="top")
current_ports = {}
# Select Port Dropdown
@app.callback(Output('port-dropdown', 'options'),
[Input('interval-component', 'n_intervals'),
Input('port-dropdown', 'value')])
# Place holder function for illustrative purpose only. Will need to ad
# just link to database later on.
@app.callback(Output('select-deployment-dropdown', 'options'),
[Input('select-deployment-dropdown', 'value'),
Input('port-dropdown', 'value')])
@app.callback(Output('deployment-preview-table', 'children'),
Input('select-deployment-dropdown', 'value'))
@app.callback([Output('deploy-select-port-and-deployment-content', 'style'),
Output('create-or-edit-deployment', 'style'),
Output('download-page', 'style'),
Output('deployment-storage', 'children')],
[Input('connect-button', 'n_clicks'),
Input('edit-selected-deployment', 'n_clicks'),
Input('create-selected-deployment', 'n_clicks')],
State('select-deployment-dropdown', 'value'))
@app.callback([Output('deployment-name', 'value'),
Output('my-date-picker-single', 'date'),
Output('download-interval-dropdown', 'value'),
Output('deployment-on-off-button', 'n_clicks')
],
[Input('deployment-storage', 'children')])
@app.callback([Output('deployment-detail-table', 'children')],
[Input('deployment-storage', 'children')])
@app.callback([Output('datatable-interactivity', 'style_data_conditional'),
Output('sensor-table', 'children')],
[Input('datatable-interactivity', "selected_rows"),
Input('datatable-interactivity', "derived_virtual_data")])
@app.callback([Output('delete-node-confirm', 'displayed'),
Output('delete-node-confirm', 'message'),
Output('hidden-element', "children")],
[Input('datatable-interactivity', 'data_previous')],
[State('datatable-interactivity', 'data')])
@app.callback(Output('delete-node-confirm', 'displayed'),
Input('delete-node-confirm', 'submit_n_clicks'),
State("hidden-element", "children"))
@app.callback(
Output('datatable-sensor', 'data'),
Input('editing-rows-button', 'n_clicks'),
State('datatable-sensor', 'data'),
State('datatable-sensor', 'columns'))
| [
11748,
14470,
198,
11748,
14470,
62,
11487,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
11,
1812,
198,
... | 2.191715 | 2,076 |
import re
import glob, os
import spacy
import xml.etree.ElementTree as ET
os.chdir("xml_datasets/")
sentences=[]
nlp=spacy.load('el_unnamed')
for file in glob.glob("*.xml"):
tree = ET.parse('{}'.format(file))
root = tree.getroot()
begin=[]
end=[]
for child in root:
if (child.tag=='{http:///uima/cas.ecore}Sofa'):
txt=child.attrib['sofaString']
if (child.tag=='{http:///gr/ilsp/types.ecore}Sentence'):
begin.append(int(child.attrib['begin']))
end.append(int(child.attrib['end']))
for i in range(len(begin)):
tmp_sentence=txt[begin[i]:end[i]]
doc=nlp(tmp_sentence)
flag=False
for j in doc:
if (j.tag_=='PROPN'):
flag=True
if (flag==True):
sentences.append(tmp_sentence)
for x in sentences:
print(x)
| [
11748,
302,
198,
11748,
15095,
11,
28686,
198,
11748,
599,
1590,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
418,
13,
354,
15908,
7203,
19875,
62,
19608,
292,
1039,
14,
4943,
198,
34086,
3007,
28,
21737,
198,
2... | 2.21365 | 337 |
"""
Test operators in deep-learning.
Using PyTorch.
====================================
**Author**: `Size Zheng`
"""
import sys
sys.path.append('../../../')
import tvm
import torch
import numpy as np
import copy
from flextensor.nn import *
from flextensor.utils import test_allclose
import pyimpl
if __name__ == "__main__":
print("Test begins...")
test()
print("Done.") | [
37811,
198,
14402,
12879,
287,
2769,
12,
40684,
13,
198,
12814,
9485,
15884,
354,
13,
198,
198,
10052,
1421,
198,
1174,
13838,
1174,
25,
4600,
10699,
44583,
63,
198,
37811,
198,
11748,
25064,
220,
198,
17597,
13,
6978,
13,
33295,
10786,... | 3.092308 | 130 |
import tensorflow as tf
import numpy as np
import argparse
import os
batch_size = 10
files, input_layer, output_layer = [None]*3
if __name__ == "__main__":
args=get_arguments()
print "Extracting Features"
io = build_prepro_graph(args.inception_path)
forward_pass(io, args.data_path)
print "done"
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
43501,
62,
7857,
796,
838,
198,
16624,
11,
5128,
62,
29289,
11,
5072,
62,
29289,
796,
685,
14202,
60,
9,
1... | 2.639344 | 122 |
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import logging as log
import os
import os.path as osp
from datumaro.components.extractor import DatasetItem, SourceExtractor, Importer
from datumaro.components.converter import Converter
from datumaro.util.image import Image
| [
198,
2,
15069,
357,
34,
8,
13130,
12,
42334,
8180,
10501,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
11748,
18931,
355,
2604,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
198,... | 3.315217 | 92 |
import logging
log = logging.getLogger()
log.setLevel(logging.CRITICAL)
| [
11748,
18931,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
3419,
198,
6404,
13,
2617,
4971,
7,
6404,
2667,
13,
9419,
2043,
20151,
8,
198
] | 2.807692 | 26 |
from PIL import Image
#max value for luminance is ~277 (rounded down), min is 0 #print(255*0.299+255*0.7152+255*0.0722) #max value for luminance is 277.032
chars = ["@","$","#","H","E","W","g","h","f","e","c","n","<","+","=","~","-","^","*","'"] #chars in order of perceived brightness: left to right - dark to bright
mult = 1.5 #has to be greater than 1 #inversely proportional to the size of the image output
img = "cat.jpg"
textImg = open((img + ".txt"), "w")
image = Image.open(img)
imageWidth, imageHeight = image.size
imgDat = image.load()
for y in range(0, round(imageHeight/mult)):
for x in range(0, round(imageWidth/mult)):
luminance = (100/277.032) * (imgDat[x*mult,y*mult][0]*0.2126 + imgDat[x*mult,y*mult][1]*0.7152 + imgDat[x*mult,y*mult][2]*0.0722) #imgDat[x,y][0, 1, or 2 for R, G, or B vals]
textImg.write(chars[int(round(luminance)/5)])
textImg.write("\n")
textImg.close()
| [
6738,
350,
4146,
1330,
7412,
201,
198,
201,
198,
2,
9806,
1988,
329,
29763,
590,
318,
5299,
27019,
357,
39262,
866,
828,
949,
318,
657,
1303,
4798,
7,
13381,
9,
15,
13,
22579,
10,
13381,
9,
15,
13,
22,
17827,
10,
13381,
9,
15,
1... | 2.354271 | 398 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 21:17:42 2018
@author: hendrawahyu
"""
import os, glob, csv
import argparse
# =============================================================================
# Read images files using rawpy library
# param: index - index number of image files to open
# dir - directory name (string)
# default -> datafiles
# ext - extension of files -> default: dng
# demosaic - boolean False to use BAYER only whereas True is to
# process the image up to rawpy.postprocess()
# output: output_image - pre / post processed image
# raw_color - bayer color sequence
# Example: img = read_image()[2] -> will open default first image on
# datafiles folder with file ext 'dng'and
# implement postprocess image
# =============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', default='datafiles', type=str, help='Path to image folder')
parser.add_argument('--ext', default='dng', type=str, help='Image Extension')
parser.add_argument('--save', default = False, type=bool, help='create csv of file list')
args = parser.parse_args()
list_dir(args.file)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
3158,
1105,
2310,
25,
1558,
25,
3682,
2864,
198,
198,
31,
9800,
25,
339,
358,
1831,... | 2.606884 | 552 |
from .element import Element
from .prop import ValidProp, IntProp
from .func import get_word | [
6738,
764,
30854,
1330,
11703,
198,
6738,
764,
22930,
1330,
48951,
24331,
11,
2558,
24331,
198,
6738,
764,
20786,
1330,
651,
62,
4775
] | 4 | 23 |
import logging
from timeit import default_timer as timer
logger = logging.getLogger(__name__)
class Timer:
"""Simple timer focused on practical use.
Args:
label (str): label of the timer
at_enter (bool): whether it should be also displayed when entering the context.
Defaults to False.
report (func): function to use for reporting. Defaults to logger.info
"""
| [
11748,
18931,
198,
6738,
640,
270,
1330,
4277,
62,
45016,
355,
19781,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4871,
5045,
263,
25,
198,
220,
220,
220,
37227,
26437,
19781,
5670,
319,
... | 2.942857 | 140 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..utils import InvWarp
| [
2,
47044,
46,
12,
35353,
1137,
11617,
416,
4899,
14,
42116,
431,
6359,
13,
9078,
532,
8410,
5626,
48483,
198,
6738,
11485,
26791,
1330,
10001,
54,
5117,
628
] | 2.964286 | 28 |
import blockformer_core as bc
import blockformer_init as bi
window = bc.Window(1400,500,300,300,60,"Blockformer")
#initialize variables
#Landscape(self,color,x,y,width=20,height=20)
landscape = bc.Landscape(window,(0,255,0),0,0,window.width,100)
window.background.add(landscape.drawable_sprite)
sprite = bc.SmartSprite(window,0,100,20,20,10)
window.sprites.add(sprite.drawable_sprite)
window.run()
| [
11748,
2512,
16354,
62,
7295,
355,
47125,
198,
11748,
2512,
16354,
62,
15003,
355,
3182,
198,
198,
17497,
796,
47125,
13,
27703,
7,
1415,
405,
11,
4059,
11,
6200,
11,
6200,
11,
1899,
553,
12235,
16354,
4943,
198,
198,
2,
36733,
1096,
... | 2.583333 | 156 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-31 15:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1433,
319,
2864,
12,
940,
12,
3132,
1315,
25,
3132,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.753623 | 69 |
"""
:codeauthor: Rahul Handay <rahulha@saltstack.com>
"""
import pytest
import salt.states.win_dns_client as win_dns_client
from tests.support.mock import MagicMock, patch
@pytest.fixture
def test_dns_exists():
"""
Test to configure the DNS server list in the specified interface
"""
ret = {"name": "salt", "changes": {}, "result": False, "comment": ""}
with patch.dict(win_dns_client.__opts__, {"test": False}):
ret.update(
{
"changes": {
"Servers Added": [],
"Servers Removed": [],
"Servers Reordered": [],
},
"comment": "servers entry is not a list !",
}
)
assert win_dns_client.dns_exists("salt") == ret
mock = MagicMock(return_value=[2, "salt"])
with patch.dict(
win_dns_client.__salt__, {"win_dns_client.get_dns_servers": mock}
):
ret.update(
{
"changes": {},
"comment": repr([2, "salt"]) + " are already configured",
"result": True,
}
)
assert win_dns_client.dns_exists("salt", [2, "salt"]) == ret
mock = MagicMock(side_effect=[False, True, True])
with patch.dict(win_dns_client.__salt__, {"win_dns_client.add_dns": mock}):
ret.update(
{
"comment": "Failed to add 1 as DNS server number 1",
"result": False,
}
)
assert win_dns_client.dns_exists("salt", [1, "salt"]) == ret
mock = MagicMock(return_value=False)
with patch.dict(
win_dns_client.__salt__, {"win_dns_client.rm_dns": mock}
):
ret.update(
{
"changes": {
"Servers Added": ["a"],
"Servers Removed": [],
"Servers Reordered": [],
},
"comment": "Failed to remove 2 from DNS server list",
}
)
assert win_dns_client.dns_exists("salt", ["a"], "a", 1) == ret
ret.update({"comment": "DNS Servers have been updated", "result": True})
assert win_dns_client.dns_exists("salt", ["a"]) == ret
def test_dns_dhcp():
"""
Test to configure the DNS server list from DHCP Server
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(side_effect=["dhcp", "salt", "salt"])
with patch.dict(win_dns_client.__salt__, {"win_dns_client.get_dns_config": mock}):
ret.update(
{
"comment": "Local Area Connection already configured"
" with DNS from DHCP"
}
)
assert win_dns_client.dns_dhcp("salt") == ret
with patch.dict(win_dns_client.__opts__, {"test": True}):
ret.update(
{
"comment": "",
"result": None,
"changes": {"dns": "configured from DHCP"},
}
)
assert win_dns_client.dns_dhcp("salt") == ret
with patch.dict(win_dns_client.__opts__, {"test": False}):
mock = MagicMock(return_value=True)
with patch.dict(win_dns_client.__salt__, {"win_dns_client.dns_dhcp": mock}):
ret.update({"result": True})
assert win_dns_client.dns_dhcp("salt") == ret
def test_primary_suffix():
"""
Test to configure the global primary DNS suffix of a DHCP client.
"""
ret = {"name": "salt", "changes": {}, "result": False, "comment": ""}
ret.update({"comment": "'updates' must be a boolean value"})
assert win_dns_client.primary_suffix("salt", updates="a") == ret
mock = MagicMock(
side_effect=[
{"vdata": "a"},
{"vdata": False},
{"vdata": "b"},
{"vdata": False},
]
)
with patch.dict(win_dns_client.__utils__, {"reg.read_value": mock}):
ret.update({"comment": "No changes needed", "result": True})
assert win_dns_client.primary_suffix("salt", "a") == ret
mock = MagicMock(return_value=True)
with patch.dict(win_dns_client.__utils__, {"reg.set_value": mock}):
ret.update(
{
"changes": {"new": {"suffix": "a"}, "old": {"suffix": "b"}},
"comment": "Updated primary DNS suffix (a)",
}
)
assert win_dns_client.primary_suffix("salt", "a") == ret
| [
37811,
198,
220,
220,
220,
1058,
8189,
9800,
25,
48543,
7157,
323,
1279,
11392,
377,
3099,
31,
82,
2501,
25558,
13,
785,
29,
198,
37811,
198,
198,
11748,
12972,
9288,
198,
11748,
8268,
13,
27219,
13,
5404,
62,
67,
5907,
62,
16366,
3... | 1.841185 | 2,632 |
# Append header file search paths to specified Environment Object
# env: Environment object
# paths: array of absolute paths
| [
198,
2,
2034,
437,
13639,
2393,
2989,
13532,
284,
7368,
9344,
9515,
198,
2,
17365,
25,
220,
220,
220,
220,
9344,
2134,
198,
2,
13532,
25,
220,
220,
7177,
286,
4112,
13532,
198
] | 4 | 33 |
# Generated by Django 3.1.7 on 2021-03-17 06:07
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
3070,
12,
1558,
9130,
25,
2998,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import keyboard
from random import randint
from time import sleep
if __name__ == "__main__":
xmastree = ChristmasTree()
try:
while True:
xmastree.draw()
if keyboard.is_pressed('q'):
raise KeyboardInterrupt
except KeyboardInterrupt:
pass
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
11748,
10586,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
3993,
628,
198,
361,... | 2.239766 | 171 |
from django.conf.urls import url
from django_info_system.generic_views import url_helper as u
from person import views
PERSON_REGEXP = r'^(?P<id>[0-9]+)/'
urlpatterns = [
u(r'^$', views.PersonList),
u(r'^(?P<tab>[a-z][a-z][a-z]+)$', views.PersonList,
url_name_suffix="_tab"),
u(PERSON_REGEXP + '$', views.PersonView),
u(PERSON_REGEXP + '/edit/$', views.PersonManageEdit),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
42625,
14208,
62,
10951,
62,
10057,
13,
41357,
62,
33571,
1330,
19016,
62,
2978,
525,
355,
334,
198,
198,
6738,
1048,
1330,
5009,
198,
198,
47,
29086,
62,
31553... | 2.226519 | 181 |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 05 16:29:11 2015
@author: Celso
Objective: General funcions to create a mindmap file
"""
#import pickle
listdesc3 = []
listdesc4 = []
listdesc7 = []
listdesc11 = []
# End CalcSizeIpc1
# End CalcSizeIpc3
# End CalcSizeIpc4
# End CalcSizeIpc7
# End CalcSizeIpc11
# end LoadDescs
# end nodecolor
# end Ipc1Text
# end Ipc3Text
# end Ipc4Text
# end Ipc7Text
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
8621,
8870,
1467,
25,
1959,
25,
1157,
1853,
198,
198,
31,
9800,
25,
15248,
568,
198,
198,
10267,
425,
25,
3611,
25439,
507,
284,
2251,
25... | 2.385965 | 171 |
from bitarray import bitarray
from bitarray.util import ba2int
from bitarray.util import int2ba
import random
| [
6738,
1643,
18747,
1330,
1643,
18747,
198,
6738,
1643,
18747,
13,
22602,
1330,
26605,
17,
600,
198,
6738,
1643,
18747,
13,
22602,
1330,
493,
17,
7012,
198,
11748,
4738,
198
] | 3.666667 | 30 |
import os
import matplotlib.pyplot as plt
import numpy as np
import math
import torch
import gotex.vgg as vgg
import wget
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def ReadImg(imagePath):
'''
Read an image as tensor and ensure that it has 3 channel and range from 0 to 255
output tensImg has dimension [nrow, ncol, nchannel]
'''
npImg = plt.imread(imagePath)
tensImg = torch.tensor(npImg)
if torch.max(tensImg) <= 1:
tensImg*=255
if len(tensImg.shape) < 3:
tensImg = tensImg.unsqueeze(2)
tensImg = torch.cat((tensImg, tensImg, tensImg), 2)
if tensImg.shape[2] > 3:
tensImg = tensImg[:,:,:3]
return tensImg
def ShowImg(tensImg):
'''
Show a tensor image
tensImg dimension should be [nrow, ncol, nchannel]
'''
npImg = np.clip((tensImg.data.cpu().numpy())/255, 0,1)
ax = plt.imshow(npImg)
return ax
def SaveImg(saveName, tensImg):
'''
Show a tensor image as saveName
tensImg dimension should be [nrow, ncol, nchannel]
'''
npImg = np.clip((tensImg.cpu().numpy())/255, 0,1)
if npImg.shape[2] < 3:
npImg = npImg[:,:,0]
plt.imsave(saveName, npImg)
return
def PreProc(tensImg):
'''
pre-process an image in order to feed it in VGG net
input: tensImg as dimension [nrow, ncol, nchannel] with channel RGB
output: normalized preproc image of dimension [1, nchannel, nrow, ncol] with channel BGR
'''
out = tensImg[:,:,[2,1,0]] # RGB to BRG
out = out - torch.tensor([104, 117, 124], device=tensImg.device).view(1,1,3) # substract VGG mean
return out.permute(2,0,1).unsqueeze(0) # permute and unsqueeze
def PostProc(batchImg):
'''
post-process an image in order to display and save it
input: batchImg as dimension [1, nchannel, nrow, ncol] with channel BGR
output: post-processed image of dimension [1, nchannel, nrow, ncol] with channel BGR
'''
out = batchImg.squeeze(0).permute(1,2,0) # permute and squeeze
out = out + torch.tensor([104, 117, 124], device=batchImg.device).view(1,1,3) # add VGG mean
return out[:,:,[2,1,0]] #BRG to RGB
| [
11748,
28686,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
28034,
198,
11748,
1392,
1069,
13,
85,
1130,
355,
410,
1130,
198,
11748,
266,
1136,
198,
1174... | 2.297782 | 947 |
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A foundation for history-free RelStorage tests"""
from relstorage.tests.RecoveryStorage import BasicRecoveryStorage
from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
from relstorage.tests.reltestbase import GenericRelStorageTests
from relstorage.tests.reltestbase import RelStorageTestBase
from ZODB.FileStorage import FileStorage
from ZODB.serialize import referencesf
from ZODB.tests.ConflictResolution import PCounter
from ZODB.tests.PackableStorage import dumps
from ZODB.tests.PackableStorage import pdumps
from ZODB.tests.PackableStorage import Root
from ZODB.tests.PackableStorage import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle
import cPickle
import time
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
3648,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789... | 3.816438 | 365 |
## importing
import csv
import argparse
## arguments
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--spanlen", type=int, help="The number of nucleotides to extract")
parser.add_argument("-i", "--infile", help="Input file containing SNV data")
parser.add_argument("-s", "--sample", help="The name of the sample (will be applied to any output files)")
args = parser.parse_args()
span = args.spanlen
sample = args.sample
infile = args.infile
## get snv spans; get chr:start-end
n = 0
tmp1 = "./"+str(sample)+".span_data_file.txt"
tmp2 = "./"+str(sample)+".span_file.txt"
with open(tmp1, "w", newline= "") as tmp_file:
fieldnames = ["Chr#","SNV_Pos","START", "END", "SpanSeq"]
tmp_writer = csv.DictWriter(tmp_file, fieldnames= fieldnames, delimiter = "\t")
with open(infile, "r", newline= "") as snv_file:
in_filereader = csv.DictReader(snv_file, delimiter = "\t")
tmp_writer.writeheader()
with open(tmp2, "w", newline= "") as outfile:
out_writer = csv.DictWriter(outfile, fieldnames= ["Span"])
#out_writer.writeheader()
for row in in_filereader:
chr_no = row["Region"]
snv_pos = int(row["Position"])
#print("Working on", "chr"+str(chr_no), "position "+str(snv_pos)+"...")
n += 1
span_start = snv_pos - span
span_end = snv_pos + span
tmp_writer.writerow({"Chr#": ("chr"+chr_no), "SpanSeq": (str(chr_no)+":"+str(span_start)+"-"+str(span_end)), "SNV_Pos": snv_pos, "START": span_start, "END": span_end})
out_writer.writerow({"Span": (str(chr_no)+":"+str(span_start)+"-"+str(span_end))})
print("Analysed", n, "positions.")
tmp_file.close()
snv_file.close()
outfile.close()
## done, hopefully
| [
2235,
33332,
198,
11748,
269,
21370,
198,
11748,
1822,
29572,
198,
198,
2235,
7159,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
7203,
12,
77,
1600,
366,
438,
12626,
11925,
1600,
2099,
28,... | 2.232099 | 810 |
import logging
from typing import List
from kubails.utils.service_helpers import call_command, get_command_output
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
7343,
198,
6738,
479,
549,
1768,
13,
26791,
13,
15271,
62,
16794,
364,
1330,
869,
62,
21812,
11,
651,
62,
21812,
62,
22915,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
... | 3.422222 | 45 |
import torch
from .Module import Module
| [
11748,
28034,
198,
6738,
764,
26796,
1330,
19937,
628
] | 4.555556 | 9 |
"""Module for migrating data from the .xml files in the Stack Overflow data
dump to a SQL database.
.. module:: migrate_data
:platform: Linux
:synopsis: Data migration functions.
.. moduleauthor:: Simon Larsén <slarse@kth.se>, Li Ling <liling@kth.se>
"""
from typing import Iterable, Callable, Set
from xml.etree import ElementTree
from functools import partial
from maya import MayaDT
from analyzer import LOGGER
from analyzer.database import Base, commit_all_separately, batch_commit, Post, PostType, Comment
from analyzer.dbops import query_ids_by_model, EXTRACT_FIRSTS_FROM_QUERY
from analyzer.util import sanitize_post, sanitize_comment, yield_batches
BATCH_SIZE = 1000
def fill_database(questions_xml: str = None,
answers_xml: str = None,
comments_xml: str = None,
creation_date_start: MayaDT = None):
"""Fill the database with posts and coments. Text is sanitized first."""
if questions_xml is not None:
_migrate_questions_from_xml_to_db(questions_xml, creation_date_start)
if answers_xml is not None:
_migrate_answers_from_xml_to_db(answers_xml, creation_date_start)
if comments_xml is not None:
_migrate_comments_from_xml_to_db(comments_xml, creation_date_start)
def _xml_to_database(xml_path: str,
model_function: Callable[[ElementTree.Element], Base],
creation_date_start,
post_ids: Set[int] = None):
"""Parse an xml file and add the data to the database.
post_ids are only applicable for answers and comments, and are ignored for
questions. An answer or comment is only added to the database if its
post_id/parent_id is contained within the post_ids set.
"""
rows = _get_rows_from_xml(xml_path, creation_date_start)
count = 0
for batch in yield_batches(rows, BATCH_SIZE):
model_batch = [
e for e in (model_function(elem, post_ids) for elem in batch)
if e is not None
]
committed = len(model_batch)
if not batch_commit(model_batch):
committed = commit_all_separately(model_batch)
count += committed
LOGGER.info(f"Added: {count}")
def _get_rows_from_xml(filepath: str, creation_date_start: MayaDT):
"""Parse the comments xml file and yield all row elements after the given creation date."""
parser = iter(ElementTree.iterparse(filepath, events=['start', 'end']))
_, root = next(parser)
month = 0
for event, elem in parser:
if event == 'end' and elem.tag == 'row':
cd = MayaDT.from_rfc3339(elem.attrib['CreationDate'])
if cd.month != month:
month = cd.month
if creation_date_start is None or creation_date_start <= cd:
yield elem
root.clear()
def _post_xml_row_to_model(elem,
question_ids: Set[int] = None,
target_post_type: PostType = PostType.QUESTION):
"""Convert an xml row from the Posts.xml file to a model. Text is sanitized
before conversion.
question_ids is only applicable if the target post type is
PostType.ANSWER. An answer is only added if its parent_id is
contained in question_ids.
"""
try:
post_type = PostType(int(elem.attrib['PostTypeId']))
except ValueError: # was not a question or answer
return None
# early returns
if target_post_type != post_type:
return None
if target_post_type == PostType.ANSWER and int(
elem.attrib['ParentId']) not in question_ids:
return None
try:
sanitized = sanitize_post(elem.attrib['Body'])
except ValueError:
LOGGER.error(
f"Sanitization failed for Post with Id={elem.attrib['Id']}")
return None
date = MayaDT.from_rfc3339(elem.attrib['CreationDate']).date
if post_type == PostType.ANSWER:
title = None
tags = None
parent_id = elem.attrib['ParentId']
else: # is question
title = elem.attrib['Title']
tags = elem.attrib['Tags']
parent_id = None
post = Post(
id=elem.attrib['Id'],
creation_date=date,
post_type_id=post_type.value,
title=title,
text=sanitized,
tags=tags,
parent_id=parent_id)
return post
def _comment_xml_row_to_model(elem, post_ids: Set[int]):
"""Convert an xml row from the Comments.xml file to a model. Text is
sanitized before conversion.
Return None if the post_id is not contained in post_ids.
"""
post_id = int(elem.attrib['PostId'])
if post_id not in post_ids:
return None
try:
sanitized = sanitize_comment(elem.attrib['Text'])
except Exception as e:
LOGGER.error(
f"Sanitization failed for Comment with Id={elem.attrib['Id']}\n"
f"{type(e).__name__}\n{str(e)}")
return None
date = MayaDT.from_rfc3339(elem.attrib['CreationDate']).date
comment = Comment(
id=elem.attrib['Id'],
creation_date=date,
text=sanitized,
post_id=post_id)
return comment
| [
37811,
26796,
329,
45879,
1366,
422,
262,
764,
19875,
3696,
287,
262,
23881,
3827,
11125,
1366,
198,
39455,
284,
257,
16363,
6831,
13,
198,
198,
492,
8265,
3712,
32492,
62,
7890,
198,
220,
220,
220,
1058,
24254,
25,
7020,
198,
220,
22... | 2.362602 | 2,198 |
'''
Brent Waters (Pairing-based)
| From: "Ciphertext-Policy Attribute-Based Encryption: An Expressive, Efficient, and Provably Secure Realization", Appendix C.
| Published in: 2008
| Available from: http://eprint.iacr.org/2008/290.pdf
| Notes: Security Assumption: parallel q-DBDHE. The sole disadvantage of this scheme is the high number of pairings
| that must be computed during the decryption process (2 + N) for N attributes mathing in the key.
* type: ciphertext-policy attribute-based encryption (public key)
* setting: Pairing
:Authors: J Ayo Akinyele
:Date: 11/2010
'''
from charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,GT,pair
from charm.toolbox.secretutil import SecretUtil
from charm.toolbox.ABEnc import ABEnc
from openpyxl import Workbook
from charm.core.engine.util import serializeDict,objectToBytes
debug = False
#Get the eliptic curve with the bilinear mapping feature needed.
| [
7061,
6,
198,
33,
1156,
21827,
357,
47,
958,
278,
12,
3106,
8,
198,
220,
198,
91,
3574,
25,
366,
34,
10803,
5239,
12,
36727,
3460,
4163,
12,
15001,
14711,
13168,
25,
1052,
5518,
3314,
11,
412,
5632,
11,
290,
7518,
1346,
26707,
641... | 2.948171 | 328 |
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Since this module is beyond QA responsibility we will not fix docstrings here
# pylint: disable=missing-function-docstring, missing-class-docstring
"""Unit-like status API tests"""
import json
import unittest
import requests
import websocket
from tests.base.test_api import ApiTestCase
if __name__ == '__main__':
unittest.main(failfast=True, verbosity=2)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
92... | 3.545455 | 264 |
'''
// Layout of fields in a DDR4 SPD.
// Individual field descriptions are taken directly from the JEDEC spec.
struct Ddr4Fields {
// Number of Serial PD Bytes Written / SPD Device Size / CRC Coverage
uint8_t bytes_used_bytes_total_crc_coverage;
// SPD Revision
uint8_t spd_revision;
// Key Byte / DRAM Device Type
uint8_t dram_device_type;
// Key Byte / Module Type
uint8_t module_type;
// SDRAM Density and Banks
uint8_t sdram_density_and_banks;
// SDRAM Addressing
uint8_t sdram_addressing;
// SDRAM Package Type
uint8_t sdram_package_type;
// SDRAM Optional Features
uint8_t sdram_optional;
// SDRAM Thermal and Refresh Options
uint8_t sdram_thermal_and_refresh;
// Other SDRam Optional Features
uint8_t other_sdram_optional;
// Reserved
uint8_t reserved_0;
// Module Nominal Voltage, VDD
uint8_t vdd;
// Module Organization
uint8_t ranks_width;
// Module Memory Bus Width
uint8_t memory_bus_width;
// Module Thermal Sensor
uint8_t thermal_sensor;
// Extended Module Type
uint8_t extended_module_type;
// Reserved
uint8_t reserved_1;
// Timebases
uint8_t timebases;
// SDRAM Minimum Cycle Time (tCKAVGmin)
uint8_t t_ckavg_min;
// SDRAM Maximum Cycle Time (tCKAVGmax)
uint8_t t_ckavg_max;
// CAS Latencies Supported, First Byte
uint8_t cas_first;
// CAS Latencies Supported, Second Byte
uint8_t cas_second;
// CAS Latencies Supported, Third Byte
uint8_t cas_third;
// CAS Latencies Supported, Fourth Byte
uint8_t cas_fourth;
// Minimum CAS Latency Time (tAAmin)
uint8_t t_aa_min;
// Minimum RAS to CAS Delay Time (tRCDmin)
uint8_t t_rcd_min;
// Minimum Row Precharge Delay Time (tRPmin)
uint8_t t_rp_min;
// Upper Nibbles for tRASmin and tRCmin
uint8_t t_rasmin_t_rcmin_upper_nibbles;
// Minimum Active to Precharge Delay Time (tRASmin), Least Significant Byte
uint8_t t_ras_min_lsb;
// Minimum Active to Active/Refresh Delay Time (tRCmin), Least
// Significant Byte
uint8_t t_rc_min_lsb;
// Minimum Refresh Recovery Delay Time (tRFC1min), Least Significant Byte
uint8_t t_rfc1_min_lsb;
// Minimum Refresh Recovery Delay Time (tRFC1min), Most Significant Byte
uint8_t t_rfc1_min_msb;
// Minimum Refresh Recovery Delay Time (tRFC2min), Least Significant Byte
uint8_t t_rfc2_min_lsb;
// Minimum Refresh Recovery Delay Time (tRFC2min), Most Significant Byte
uint8_t t_rfc2_min_msb;
// Minimum Refresh Recovery Delay Time (tRFC4min), Least Significant Byte
uint8_t t_rfc4_min_lsb;
// Minimum Refresh Recovery Delay Time (tRFC4min), Most Significant Byte
uint8_t t_rfc4_min_msb;
// Minimum Four Activate Window Time (tFAWmin), Most Significant Nibble
uint8_t t_faw_min_ms_nibble;
// Minimum Four Activate Window Time (tFAWmin), Least Significant Byte
uint8_t t_faw_min_lsb;
// Minimum Activate to Activate Delay Time (tRRD_Smin), different bank group
uint8_t t_rrd_smin_diff_bank;
// Minimum Activate to Activate Delay Time (tRRD_Lmin), same bank group
uint8_t t_rrd_smin_same_bank;
// Minimum CAS to CAS Delay Time (tCCD_Lmin), same bank group
uint8_t t_ccd_lmin_same_bank;
// Reserved
uint8_t reserved_2[19];
// Connector to SDRAM Bit Mapping
uint8_t connector_to_sdram[18];
// Reserved
uint8_t reserved_3[39];
// Fine Offset for Minimum CAS to CAS Delay Time (tCCD_Lmin), same bank
// group
uint8_t fine_t_ccd_lmin_same_bank;
// Fine Offset for Minimum Activate to Activate Delay Time (tRRD_Lmin), same
// bank group
uint8_t fine_t_rrd_lmin_same_bank;
// Fine Offset for Minimum Activate to Activate Delay Time (tRRD_Smin),
// different bank group
uint8_t fine_t_rrd_smin_diff_bank;
// Fine Offset for Minimum Activate to Activate/Refresh Delay Time (tRCmin)
uint8_t fine_t_rc_min;
// Fine Offset for Minimum Row Precharge Delay Time (tRPmin)
uint8_t fine_t_rp_min;
// Fine Offset for Minimum RAS to CAS Delay Time (tRCDmin)
uint8_t fine_t_rcd_min;
// Fine Offset for Minimum CAS Latency Time (tAAmin)
uint8_t fine_t_aa_min;
// Fine Offset for SDRAM Maximum Cycle Time (tCKAVGmax)
uint8_t fine_t_ckavg_max;
// Fine Offset for SDRAM Minimum Cycle Time (tCKAVGmin)
uint8_t fine_t_ckavg_min;
// CRC for Base Configuration Section, Least Significant Byte
uint8_t crc_base_config_lsb;
// CRC for Base Configuration Section, Most Significant Byte
uint8_t crc_base_config_msb;
// Module-Specific Section: Bytes 60-116
uint8_t module_specific_section[128];
// Reserved
uint8_t reserved_4[64];
// Module Manufacturer ID Code, Least Significant Byte
uint8_t module_manufacturer_id_cont_bytes;
// Module Manufacturer ID Code, Most Significant Byte
uint8_t module_manufacturer_id_index;
// Module Manufacturing Location
uint8_t manufacturing_location;
// Module Manufacturing Date
uint8_t manufacturing_year; // BCD
uint8_t manufacturing_week; // BCD
// Module Serial Number
uint8_t serial_number[4];
// Module Part Number
uint8_t part_number[20];
// Module Revision Code
uint8_t revision_code;
// DRAM Manufacturer ID Code, Least Significant Byte
uint8_t dram_manufacturer_id_cont_bytes;
// DRAM Manufacturer ID Code, Most Significant Byte
uint8_t dram_manufacturer_id_index;
// DRAM Stepping
uint8_t dram_stepping;
// Manufacturer's Specific Data
uint8_t manufacturer_data[29];
// Reserved
uint8_t reserved_5[2];
// Open for Customer Use
uint8_t customer_data[128];
}
'''
from enum import Enum
from acpi_validation_tool import utils
| [
7061,
6,
201,
198,
1003,
47639,
286,
7032,
287,
257,
30085,
19,
30628,
13,
201,
198,
220,
3373,
18629,
2214,
16969,
389,
2077,
3264,
422,
262,
449,
1961,
2943,
1020,
13,
201,
198,
220,
2878,
360,
7109,
19,
15878,
82,
1391,
201,
198,... | 2.500838 | 2,386 |
from celery.schedules import crontab
from celery.task.base import periodic_task
from corehq.apps.callcenter.indicator_sets import CallCenterIndicators
from corehq.apps.callcenter.utils import get_call_center_domains, is_midnight_for_domain, get_call_center_cases
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@periodic_task(run_every=crontab(minute='*/15'), queue='background_queue')
def calculate_indicators():
"""
Although this task runs every 15 minutes it only re-calculates the
indicators for a domain if we're within 15 minutes after midnight in
the domain's timezone.
"""
domains = [
domain
for domain in get_call_center_domains()
for midnight in domain.midnights()
if is_midnight_for_domain(midnight, error_margin=20) and domain.use_fixtures
]
logger.info("Calculating callcenter indicators for domains:\n{}".format(domains))
for domain in domains:
all_cases = get_call_center_cases(domain.name, domain.cc_case_type)
indicator_set = CallCenterIndicators(
domain.name,
domain.default_timezone,
domain.cc_case_type,
user=None,
override_cases=all_cases,
override_cache=True
)
indicator_set.get_data()
| [
6738,
18725,
1924,
13,
1416,
704,
5028,
1330,
1067,
756,
397,
198,
6738,
18725,
1924,
13,
35943,
13,
8692,
1330,
27458,
62,
35943,
198,
6738,
4755,
71,
80,
13,
18211,
13,
13345,
16159,
13,
521,
26407,
62,
28709,
1330,
4889,
23656,
549... | 2.568359 | 512 |
#!/usr/bin/env python
# Copyright (C) 2011
# Brett Alistair Kromkamp - brettkromkamp@gmail.com
# Copyright (C) 2012-2017
# Xiaming Chen - chenxm35@gmail.com
# and other contributors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from warnings import warn, simplefilter
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
34,
8,
2813,
198,
2,
18726,
978,
396,
958,
509,
398,
40899,
532,
1449,
926,
74,
398,
40899,
31,
14816,
13,
785,
198,
2,
15069,
357,
34,
8,
2321,
12,
5539,
198,
2,
... | 3.61435 | 223 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-06 10:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1959,
319,
12131,
12,
2713,
12,
3312,
838,
25,
2718,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,... | 2.728571 | 70 |
from project.fish.base_fish import BaseFish
| [
6738,
1628,
13,
11084,
13,
8692,
62,
11084,
1330,
7308,
39428,
201,
198,
201,
198
] | 3.133333 | 15 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
class attrdict(dict):
'''
Use dict key as attribute if available
'''
@classmethod
@classmethod
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
12067,
628,
198,
4871,
708,
4372,
713,
7,
11600,
2599,
628,
220,
220,
220,
705,
7061,
198,
220,
220,
220,... | 2.77027 | 74 |
from setting.Ini import Ini
from _python.designpattern.Singleton import Singleton
import pathlib
from setting.YamlMeta import YamlMeta
import yaml
#class Config(metaclass=Singleton):
| [
6738,
4634,
13,
818,
72,
1330,
554,
72,
220,
198,
6738,
4808,
29412,
13,
26124,
33279,
13,
29974,
10565,
1330,
5573,
10565,
198,
11748,
3108,
8019,
198,
6738,
4634,
13,
56,
43695,
48526,
1330,
14063,
75,
48526,
198,
11748,
331,
43695,
... | 3.407407 | 54 |
import json
import logging
import os
import apache_beam as beam
from xml.etree import ElementTree
from apache_beam.coders import coders
logger = logging.getLogger(__name__)
def run_pipeline(config):
"""
Execute the SOTorrent pipeline in Google Cloud.
:return: None
"""
input_paths = config.input_paths
output_dir = config.pipeline['output_dir']
logger.info(f"Writing output of pipeline to '{output_dir}'")
for table_name, input_path in input_paths.items():
logger.info(f"Reading and converting XML file for table '{table_name}' from '{input_path}'...")
with beam.Pipeline(options=config.get_pipeline_options(table_name)) as p:
dict_elements = (p
| "Read XML file" >> beam.io.ReadFromText(input_path)
| "Ignore non-row elements" >> beam.Filter(filter_rows)
| "Convert XML attributes to dict elements" >> beam.Map(xml_attributes_to_dict))
bigquery_dataset = config.pipeline['bigquery_dataset']
logger.info(f"Writing data into BigQuery dataset '{bigquery_dataset}'")
(dict_elements | "Write data into BigQuery table" >> beam.io.WriteToBigQuery(
f'{bigquery_dataset}.{table_name}',
schema=config.bigquery_schemas_with_fields[table_name],
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED))
file_name_without_extension = os.path.join(output_dir, table_name)
logger.info(f"Writing data to JSONL file '{file_name_without_extension}.jsonl'")
(dict_elements | "Writing data to JSONL file" >> WriteToJson(file_name_without_extension, num_shards=1))
logger.info(f"Pipeline finished.")
def filter_rows(input_str):
"""
Filter matching rows, i.e. strings containing <row> XML elements.
:param input_str: row possibly containing a <row> XML element (could also contain their root element, e.g. <post>)
:return:
"""
return input_str.lstrip().startswith('<row')
def xml_attributes_to_dict(xml_str):
"""
Parse an XML <row> element and return its attributes as dict.
:param xml_str: string containing XML <row> element
:return:
"""
return ElementTree.fromstring(xml_str).attrib
class JsonSink(beam.io.FileBasedSink):
"""
An Apache Beam sink for writing JSON files.
See also: https://stackoverflow.com/a/43185539
"""
def open(self, temp_path):
"""
Open JSON file and initialize it with an opening square bracket, i.e. a JSON list.
"""
file_handle = super(JsonSink, self).open(temp_path)
if not self.write_jsonl:
file_handle.write(self.coder.encode('[\n'))
return file_handle
def write_record(self, file_handle, value):
"""
Converts a single record to an encoded JSON and writes it terminated by a comma.
"""
# write previous encoded value and store current value (to be able to handle the last value differently)
if self.previous_row.get(file_handle, None) is not None:
file_handle.write(self.coder.encode(json.dumps(self.previous_row[file_handle])))
if not self.write_jsonl:
file_handle.write(self.coder.encode(','))
file_handle.write(self.coder.encode('\n'))
self.previous_row[file_handle] = value
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record to the file handle returned by ``open()``.
"""
raise NotImplementedError
def close(self, file_handle):
"""
Add closing square bracket to finalize the JSON list and close the file handle
"""
if file_handle is not None:
# write last row without a comma
file_handle.write(self.coder.encode(json.dumps(self.previous_row[file_handle])))
if not self.write_jsonl:
# close JSON list
file_handle.write(self.coder.encode('\n]\n'))
# close file handle
file_handle.close()
class WriteToJson(beam.PTransform):
"""
A PTransform writing to a JsonSink.
"""
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
2471,
4891,
62,
40045,
355,
15584,
198,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
198,
6738,
2471,
4891,
62,
40045,
13,
19815,
364,
1330,
14873,
364,
198,
19... | 2.376868 | 1,807 |
from __future__ import print_function
def cut_rod(price, length):
""" simplest cut rod algorithm
return value of maximum income
"""
if length == 0:
return 0
income = float("-Inf")
for i in range(length):
# recursive call for rod with a shorter length (n - i - 1)
income = max(income, price[i] + cut_rod(price, length - i - 1))
return income
def memoized_cut_rod(price, length):
""" cut rod algorithm with memoized income """
incomelst = [float("-Inf") for _ in range(length + 1)]
# set zero income for zero length
incomelst[0] = 0
return memoized_cut_rod_aux(price, length, incomelst)
def memoized_cut_rod_aux(price, length, incomelst):
""" recursive cut rod algorithm with memoized income values """
if incomelst[length] >= 0:
# if the calculation was performed earlier
# return income value for current length
return incomelst[length]
income = float("-Inf")
for i in range(length):
income = max(income, price[i] +
memoized_cut_rod_aux(price, length - i - 1, incomelst))
incomelst[length] = income
return income
def bottom_up_cut_rod(price, length):
""" bottom up implementation of cut rod memoized algorithm """
incomelst = [float("-Inf") for _ in range(length + 1)]
# set zero income for zero length
incomelst[0] = 0
for j in range(1, length + 1):
income = float("-Inf")
for i in range(j):
income = max(income, price[i] + incomelst[j - i - 1])
# set income for current length
incomelst[j] = income
# income for whole rod
return incomelst[length]
def ext_bottom_up_cut_rod(price, length):
""" bottom up implementation of cut rod memoized algorithm """
incomelst = [float("-Inf") for _ in range(length + 1)]
cutlst = [0 for _ in range(length + 1)]
# set zero income for zero length
incomelst[0] = 0
for j in range(1, length + 1):
income = float("-Inf")
for i in range(j):
if income < price[i] + incomelst[j - i - 1]:
income = price[i] + incomelst[j - i - 1]
cutlst[j] = i + 1
# set income for current length
incomelst[j] = income
# income for whole rod
return incomelst, cutlst
if __name__ in '__main__':
# price for length
# length:1 2 3 4 5 6 7 8 9 10
PRICE = [1, 5, 8, 9, 10, 17, 17, 20, 25, 30]
# rod length
ROD = 7
print('simple cut rod :', cut_rod(PRICE, ROD))
print('memoized cut rod :', memoized_cut_rod(PRICE, ROD))
print('bottom up cut rod:', bottom_up_cut_rod(PRICE, ROD))
print('optimal cutting of the rod:', print_cut_rod(PRICE, ROD))
| [
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
4299,
2005,
62,
14892,
7,
20888,
11,
4129,
2599,
198,
220,
220,
220,
37227,
24043,
2005,
15299,
11862,
198,
220,
220,
220,
220,
220,
220,
220,
1441,
1988,
286,
5415,
3739,
... | 2.44603 | 1,121 |
import re
import json
import flask
_URIPATH_REGEX = re.compile(r'http[s]?://[^/]+/(.*)')
| [
11748,
302,
198,
11748,
33918,
198,
11748,
42903,
628,
198,
62,
4261,
4061,
12599,
62,
31553,
6369,
796,
302,
13,
5589,
576,
7,
81,
6,
4023,
58,
82,
60,
30,
1378,
58,
61,
14,
48688,
29006,
15885,
8,
11537,
198
] | 2.275 | 40 |
# Generated by Django 3.2.6 on 2022-01-08 17:35
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
21,
319,
33160,
12,
486,
12,
2919,
1596,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Generated by Django 2.2.4 on 2019-08-21 15:41
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
19,
319,
13130,
12,
2919,
12,
2481,
1315,
25,
3901,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
def create_tag(item, tag="a"):
""" some function to wrapp HTML-tag with data-attributes around a string"""
try:
item_text = item.text
except AttributeError:
item_text = "no text provided"
try:
item_lang = item.language
except AttributeError:
item_lang = "no lang provided"
try:
item_id = item.id
except AttributeError:
item_id = "no ID"
try:
item_url = item.get_absolute_url()
except AttributeError:
item_url = "#"
#item_url = item
return "<{} data-lang='{}' data-id='{}' href='{}'>{}</{}>".format(
tag, item_lang, item_id, item_url, item_text, tag
)
| [
4299,
2251,
62,
12985,
7,
9186,
11,
7621,
2625,
64,
1,
2599,
198,
220,
220,
220,
37227,
617,
2163,
284,
7917,
381,
11532,
12,
12985,
351,
1366,
12,
1078,
7657,
1088,
257,
4731,
37811,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
2... | 2.221854 | 302 |
#!/usr/bin/env python
from loginsightwebhookdemo import app, parse, callapi
from flask import request, json
import logging
__author__ = "Steve Flanders"
__license__ = "Apache v2"
__verion__ = "1.0"
# Parameters
JENKINSURL = 'https://wh.jandi.com/connect-api/webhook/15292345/a76ad35760d264ff84ddc964e35efa2f'
# Only required if not passed
#JENKINSJOBNAME = ''
#JENKINSTOKEN = ''
# Route without <ALERTID> are for LI, with are for vROps
@app.route("/endpoint/jenkins", methods=['POST'])
@app.route("/endpoint/jenkins/<ALERTID>", methods=['POST','PUT'])
@app.route("/endpoint/jenkins/<JOBNAME>/<TOKEN>", methods=['POST'])
@app.route("/endpoint/jenkins/<JOBNAME>/<TOKEN>/<ALERTID>", methods=['POST','PUT'])
def jenkins(ALERTID=None, JOBNAME=None, TOKEN=None):
"""
If called, run a Jenkins job without parameters -- request results are discarded.
Requires `JENKINSURL defined in the form `https://jenkins.domain.com`.
If `JOBNAME` and `TOKEN` are not passed then the must be defined
For more information, see https://wiki.jenkins-ci.org/display/JENKINS/Remote+access+API
"""
if not JENKINSURL or (not JENKINSJOBNAME and not JOBNAME) or (not JENKINSTOKEN and not TOKEN):
return ("Parameters must be set, please edit the shim!", 500, None)
# We need to make the Jenkins URL
#if TOKEN:
# URL = JENKINSURL + "/job/" + JOBNAME + "/build?token=" + TOKEN
#else:
# URL = JENKINSURL + "/job/" + JENKINSJOBNAME + "/build?token=" + JENKINSTOKEN
# No need to parse the request as we just want to run a job
#a = parse(request)
#payload = {
# "body": a['info'],
# "title": a['AlertName'],
# "type": "link",
# "url": a['url'],
#}
URL = JENKINSURL
payload = {
"body" : "[[PizzaHouse]](http://url_to_text) You have a new Pizza order.",
"connectColor" : "#FAC11B",
"connectInfo" : [{
"title" : "Topping",
"description" : "Pepperoni"
},
{
"title": "Location",
"description": "Empire State Building, 5th Ave, New York",
"imageUrl": "http://url_to_text"
}]
}
headers = {'Accept': 'application/vnd.tosslab.jandi-v2+json' , 'Content-Type': 'application/json'}
if headers:
return callapi(URL, 'post', json.dumps(payload), headers)
else:
return callapi(URL, 'post', json.dumps(payload))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
2604,
1040,
432,
12384,
25480,
9536,
78,
1330,
598,
11,
21136,
11,
869,
15042,
198,
6738,
42903,
1330,
2581,
11,
33918,
198,
11748,
18931,
628,
198,
834,
9800,
834,
796,
366... | 2.264253 | 1,105 |
# -*- coding: utf-8 -*-
"""
@Time : 2020/3/11 16:28
@Author : 半纸梁
@File : urls.py
"""
from django.urls import path
from BDUser import views
app_name = "bd"
urlpatterns = [
path("register/", views.register, name="register")
] | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
7575,
220,
220,
220,
1058,
12131,
14,
18,
14,
1157,
1467,
25,
2078,
198,
31,
13838,
220,
1058,
10263,
235,
232,
163,
118,
116,
162,
95,
223,
198,
31,... | 2.192661 | 109 |
"""
Exercício Python 095: Aprimore o desafio 93 (https://github.com/ItanuRomero/PythonStudyPrograms/blob/master/ProgramsToRead/ExampleDictionaryFunctions003.py) para que ele funcione com vários jogadores, incluindo um sistema de visualização de detalhes do aproveitamento de cada jogador.
"""
jogador = dict()
lista_jogadores = list()
while True:
jogador['nome'] = str(input('Nome do jogador: ')).capitalize()
jogador['partidas'] = int(input('Quantidade de partidas: '))
jogador['gol por partida'] = list()
for contador in range(1, jogador['partidas'] + 1):
gol_partida = int(input(f'Quantos gols na partida {contador}: '))
jogador['gol por partida'].append(gol_partida)
if contador == 1:
jogador['total de gols'] = gol_partida
else:
jogador['total de gols'] += gol_partida
jogador['aproveitamento'] = jogador['total de gols'] / jogador['partidas']
lista_jogadores.append(jogador.copy())
while True:
resposta = str(input('Continuar? [s/n] ')).strip().lower()[0]
if resposta in 'sn':
break
print('ERRO, digite novamente: ')
if resposta == 'n':
break
print(f'{"FICHAS":-^40}')
print(f'{"No. "}{"NOME":<10}{"PARTIDAS":>10}{"TOTAL DE GOLS":>15}')
for index, dicionario in enumerate(lista_jogadores):
print(f'{index:^4}{dicionario["nome"]:<10}{dicionario["partidas"]:>10}'
f'{dicionario["total de gols"]:>15}')
print(f'{" MAIS DETALHES ":=^40}')
while True:
while True:
busca_aproveitamento = int(input('Digite o numero do jogador: '))
if busca_aproveitamento <= len(lista_jogadores) or busca_aproveitamento == 999:
break
print('Nao encontramos o jogador desse numero, digite novamente.\n'
'(digite 999 para parar)')
if busca_aproveitamento == 999:
break
print('Aqui esta:')
for jogo, gols in enumerate(lista_jogadores[busca_aproveitamento]['gol por partida']):
print(f'No jogo {jogo + 1}, '
f'{lista_jogadores[busca_aproveitamento]["nome"]} fez {gols} gols.')
print()
print(f'\n{"ENCERRANDO":-^40}')
| [
37811,
198,
3109,
2798,
8836,
66,
952,
11361,
657,
3865,
25,
2758,
9401,
267,
748,
1878,
952,
10261,
357,
5450,
1378,
12567,
13,
785,
14,
1026,
42357,
22834,
3529,
14,
37906,
39841,
15167,
82,
14,
2436,
672,
14,
9866,
14,
15167,
82,
... | 2.167677 | 990 |
#!/usr/bin/python3
import numpy as np
import torch
from torch import Tensor
from torch.utils.data import Dataset, DataLoader
import pickle
import os
import librosa
from feature_extraction import LFCC
from torch.utils.data.dataloader import default_collate
lfcc = LFCC(320, 160, 512, 16000, 20, with_energy=False)
wavform = torch.Tensor(np.expand_dims([0]*3200, axis=0))
lfcc_silence = lfcc(wavform)
silence_pad_value = lfcc_silence[:,0,:].unsqueeze(0)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
309,
22854,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
11,
6060,
17401,
198,
11748,
2298,
... | 2.668605 | 172 |
from imdbpie import Imdb
from bs4 import BeautifulSoup
from termcolor import colored
import requests
imdb = Imdb(cache=True)
| [
6738,
545,
9945,
21749,
1330,
1846,
9945,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
3381,
8043,
1330,
16396,
198,
11748,
7007,
198,
320,
9945,
796,
1846,
9945,
7,
23870,
28,
17821,
8,
628
] | 3.405405 | 37 |
from serif.model.relation_model import RelationModel
| [
6738,
1055,
361,
13,
19849,
13,
49501,
62,
19849,
1330,
4718,
341,
17633,
628
] | 3.857143 | 14 |
#
""" Implementation based on 'pkg_resources' from 'setuptools'
"""
import copy
import enum
import pkg_resources
INDENTATION = 2
def main(user_selection, is_reverse, is_flat):
""" Main function """
#
preselection = _make_preselection(user_selection, is_reverse)
(distributions, selection) = _discover_distributions(
preselection,
is_reverse,
is_flat,
)
#
for requirement_key in sorted(selection):
requirement = selection[requirement_key]
if is_flat:
if is_reverse:
_display_reverse_flat(distributions, requirement)
else:
_display_forward_flat(distributions, requirement)
else:
if is_reverse:
_display_reverse_tree(distributions, requirement, [])
else:
_display_forward_tree(distributions, requirement, [])
# EOF
| [
2,
198,
198,
37811,
46333,
1912,
319,
705,
35339,
62,
37540,
6,
422,
705,
2617,
37623,
10141,
6,
198,
37811,
198,
198,
11748,
4866,
198,
11748,
33829,
198,
198,
11748,
279,
10025,
62,
37540,
198,
198,
12115,
3525,
6234,
796,
362,
628,... | 2.272506 | 411 |
from __future__ import annotations
import base64
import logging
import pickle
import threading
from pathlib import Path
from typing import TYPE_CHECKING, Any
import click
from flask import Flask, current_app
from rich.box import SIMPLE_HEAVY
from rich.table import Table
from textual.app import App
from textual.widget import Widget
import pandas as pd
from traffic import config
from traffic.data import ModeS_Decoder, aircraft
if TYPE_CHECKING:
from traffic.core.structure import Airport
app = Flask(__name__)
@app.route("/")
@app.route("/traffic")
@click.command()
@click.argument("source")
@click.option(
"-r",
"--reference",
"initial_reference",
help="Reference position (airport code)",
)
@click.option(
"-f",
"--filename",
default="~/ADSB_EHS_RAW_%Y%m%d.csv",
show_default=True,
help="Filename pattern describing where to dump raw data",
)
@click.option(
"--host",
"serve_host",
show_default=True,
default="127.0.0.1",
help="host address where to serve decoded information",
)
@click.option(
"--port",
"serve_port",
show_default=True,
default=5050,
type=int,
help="port to serve decoded information",
)
@click.option(
"--tui",
is_flag=True,
show_default=True,
default=False,
help="Display aircraft table in text user interface mode",
)
@click.option("-v", "--verbose", count=True, help="Verbosity level")
if __name__ == "__main__":
main()
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
2779,
2414,
198,
11748,
18931,
198,
11748,
2298,
293,
198,
11748,
4704,
278,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
4377,
198,
198,
... | 2.765038 | 532 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
chrome_driver_path = "C:\Development\chromedriver_win32\chromedriver.exe"
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome(executable_path=chrome_driver_path, options=options)
driver.get("https://en.wikipedia.org/wiki/Main_Page")
article_count = driver.find_element_by_css_selector("#articlecount a")
# article_count.click()
all_portals = driver.find_element_by_link_text("All portals")
# all_portals.click()
search = driver.find_element_by_name("search")
search.send_keys("Python")
search.send_keys(Keys.ENTER)
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
13083,
1330,
26363,
198,
198,
46659,
62,
26230,
62,
6978,
796,
366,
34,
7479,
41206,
59,
28663,
276,
38291,
62,
5404,
2624,
59,
2... | 3.044843 | 223 |
import pytest
from pywps import Service
from pywps.tests import assert_response_success
import requests
from .common import TESTDATA, client_for
from hummingbird.processes.wps_cfchecker import CFChecker
@pytest.mark.skip("cfchecker not installed")
@pytest.mark.skipif(
requests.head(TESTDATA['noaa_nc_1']).ok is False,
reason="website unavailable")
@pytest.mark.online
| [
11748,
12972,
9288,
198,
6738,
12972,
86,
862,
1330,
4809,
198,
6738,
12972,
86,
862,
13,
41989,
1330,
6818,
62,
26209,
62,
13138,
198,
198,
11748,
7007,
198,
198,
6738,
764,
11321,
1330,
43001,
26947,
11,
5456,
62,
1640,
198,
6738,
4... | 3 | 127 |
from django.core.cache import get_cache
| [
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
651,
62,
23870,
628,
628,
198
] | 3.142857 | 14 |
import requests
from urllib import parse
import pandas as pd
dashboard_url = ''
output_path = ''
cert = '' # VPN cert if needed
tags = [] # tags to filter among anns
api_key = ''
server = ''
endpoint_path = "/api/annotations/"
url = dashboard_url
input_dict = parse.parse_qs(parse.urlsplit(url).query)
dashboard_uid = parse.urlsplit(url).path.split('/')[2]
dashboardId = get_dashboard_id(dashboard_uid)
from_p = input_dict['from'][0]
to_p = input_dict['to'][0]
endpoint = f'{server}{endpoint_path}?orgId=1&from={from_p}&to={to_p}&tags={tags[0]}&dashboardId={dashboardId}'
r = requests.get(endpoint, auth=BearerAuth(api_key), verify=cert)
ann = r.json()
| [
11748,
7007,
198,
6738,
2956,
297,
571,
1330,
21136,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
42460,
3526,
62,
6371,
796,
10148,
198,
22915,
62,
6978,
796,
10148,
198,
22583,
796,
10148,
220,
1303,
21669,
5051,
611,
2622,
198,
... | 2.603922 | 255 |
# KNN
# Created by JKChang
# 27/01/2020, 15:53
# Tag:
# Description:
import operator
import matplotlib.pyplot as plt
from numpy import *
group, labels = createDataSet()
drawGraph(group, labels)
print(classify([0.5, 0.5], group, labels, 2))
| [
2,
509,
6144,
198,
2,
15622,
416,
449,
42,
1925,
648,
198,
2,
2681,
14,
486,
14,
42334,
11,
1315,
25,
4310,
198,
2,
17467,
25,
198,
2,
12489,
25,
220,
198,
198,
11748,
10088,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
... | 2.638298 | 94 |
BTC_MAGIC_NUMBERS = {
"main": 0xD9B4BEF9,
"testnet": 0xDAB5BFFA,
"testnet3": 0x0709110B,
"regtest": 0xDAB5BFFA,
"namecoin": 0xFEB4BEF9
}
# The length of everything in the header minus the checksum
BTC_HEADER_MINUS_CHECKSUM = 20
BTC_HDR_COMMON_OFF = 24 # type: int
BTC_BLOCK_HDR_SIZE = 80
BTC_SHORT_NONCE_SIZE = 8
# Length of a sha256 hash
BTC_SHA_HASH_LEN = 32
BTC_IP_ADDR_PORT_SIZE = 18
BTC_COMPACT_BLOCK_SHORT_ID_LEN = 6
BTC_VARINT_MIN_SIZE = 3
# The services that we provide
# 1: can ask for full blocks.
# 0x20: Node that is compatible with the hard fork.
BTC_CASH_SERVICE_BIT = 0x20 # Bitcoin cash service bit
BTC_NODE_SERVICES = 1
BTC_CASH_SERVICES = 33
BTC_OBJTYPE_TX = 1
BTC_OBJTYPE_BLOCK = 2
BTC_OBJTYPE_FILTERED_BLOCK = 3
BTC_HELLO_MESSAGES = [b"version", b"verack"]
# Indicator byte compressing bitcoin blocks to indicate short id
BTC_SHORT_ID_INDICATOR = 0xFF
BTC_SHORT_ID_INDICATOR_AS_BYTEARRAY = bytearray([BTC_SHORT_ID_INDICATOR])
BTC_SHORT_ID_INDICATOR_LENGTH = 1
TX_VERSION_LEN = 4
TX_SEGWIT_FLAG_LEN = 2
TX_LOCK_TIME_LEN = 4
TX_SEGWIT_FLAG_VALUE = 1
NODE_WITNESS_SERVICE_FLAG = (1 << 3)
BTC_VARINT_SHORT_INDICATOR = 0xFD
BTC_VARINT_SHORT_INDICATOR_AS_BYTEARRAY = bytearray([BTC_VARINT_SHORT_INDICATOR])
BTC_VARINT_INT_INDICATOR = 0xFE
BTC_VARINT_INT_INDICATOR_AS_BYTEARRAY = bytearray([BTC_VARINT_INT_INDICATOR])
BTC_VARINT_LONG_INDICATOR = 0xFF
BTC_VARINT_LONG_INDICATOR_AS_BYTEARRAY = bytearray([BTC_VARINT_LONG_INDICATOR])
BTC_COMPACT_BLOCK_RECOVERY_TIMEOUT_S = 10
BTC_COMPACT_BLOCK_DECOMPRESS_MIN_TX_COUNT = 10000
BTC_DEFAULT_BLOCK_SIZE = 621000
BTC_MINIMAL_SUB_TASK_TX_COUNT = 2500
| [
35964,
62,
45820,
2149,
62,
41359,
33,
4877,
796,
1391,
198,
220,
220,
220,
366,
12417,
1298,
657,
87,
35,
24,
33,
19,
12473,
37,
24,
11,
198,
220,
220,
220,
366,
9288,
3262,
1298,
657,
87,
5631,
33,
20,
33,
5777,
32,
11,
198,
... | 2.168212 | 755 |
INT, FLOAT, STR = int, float, str
| [
12394,
11,
9977,
46,
1404,
11,
19269,
796,
493,
11,
12178,
11,
965,
628,
628,
628
] | 2.4375 | 16 |
from flask_script import Manager
from schedule import frontend, api
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
from schedule.core import db
manager = Manager(frontend.create_app())
@manager.command
@manager.command
@manager.command
if __name__ == '__main__':
manager.run(default_command='runserver')
| [
6738,
42903,
62,
12048,
1330,
9142,
198,
6738,
7269,
1330,
2166,
437,
11,
40391,
198,
6738,
266,
9587,
2736,
1018,
13,
18504,
12397,
1330,
3167,
8071,
2044,
34621,
1574,
198,
6738,
266,
9587,
2736,
1018,
13,
31293,
1330,
1057,
62,
36439... | 3.327103 | 107 |
#!/usr/bin/env python
# Copyright 2016 Attic Labs, Inc. All rights reserved.
# Licensed under the Apache License, version 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
import argparse, os, subprocess, sys
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
1584,
3460,
291,
23500,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
2196,
362,
13,
15,
25,
198,
2,
2638,
1378,
2503,
13,
43073,
1... | 2.885057 | 87 |
from conftest import get_mysql_database
from synch.factory import get_writer
| [
6738,
369,
701,
395,
1330,
651,
62,
28744,
13976,
62,
48806,
198,
6738,
6171,
354,
13,
69,
9548,
1330,
651,
62,
16002,
628,
628
] | 3.333333 | 24 |
#! /usr/bin/env python
# --- import -------------------------------------------------------------------------------------
import os
from setuptools import setup, find_packages
# --- define -------------------------------------------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
extra_files = []
extra_files.append(os.path.join(here, "CONTRIBUTORS"))
extra_files.append(os.path.join(here, "LICENSE"))
extra_files.append(os.path.join(here, "README.md"))
extra_files.append(os.path.join(here, "research_kit", "VERSION"))
# --- setup --------------------------------------------------------------------------------------
with open(os.path.join(here, "requirements.txt")) as f:
required = f.read().splitlines()
with open(os.path.join(here, "research_kit", "VERSION")) as version_file:
version = version_file.read().strip()
setup(
name="research_kit",
version=version,
packages=find_packages(),
package_data={"": extra_files},
install_requires=required,
author="Darien Morrow",
author_email="darienmorrow@gmail.com",
license="MIT",
url="https://github.com/darienmorrow/research_kit",
keywords="photophysics spectroscopy science",
entry_points={
"console_scripts": [
"dir_PL_work=research_kit.__main__:read_plot_save",
"dir_hl3=research_kit.__main__:dir_hl3",
]
},
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
2,
11420,
1330,
16529,
19351,
12,
628,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
2,
11420,
8160,
16529,
19351,
12,
628,
... | 2.946515 | 617 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import scipy.fftpack as osp_fft # TODO use scipy.fft once scipy>=1.4.0 is used
from jax import lax, numpy as jnp
from jax._src.util import canonicalize_axis
from jax._src.numpy.util import _wraps
# Implementation based on
# John Makhoul: A Fast Cosine Transform in One and Two Dimensions (1980)
@_wraps(osp_fft.dct)
@_wraps(osp_fft.dctn)
| [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.234694 | 294 |
# Import packages
import csv # Read in case parameters and write out solutions
import numpy as np
#-----------------------------------------------------------------
with open('casefile.csv',newline='') as casefile: # load cases
casereader = csv.DictReader(casefile)
i = 0
caselist = {}
for row in casereader:
caselist[i] = row
i += 1
caselist[0]['dx']
base = case_param(caselist[0])
print('Inlet:',base.x0)
print('Spacing:',base.dx)
base_mesh = mesh(base) # base mesh object
Nx = base_mesh.Nx # too much text for commonly used variable
# Cursory Check
print('Inlet and node spacing:',base_mesh.x[0:5])
print('Number of elements:',Nx)
print('Outlet:',base_mesh.x[Nx-1])
base_mesh.output('base_mesh.dat') # Output mesh to file for full confirmation
fl1 = fluid(base_mesh,base.fl)
pm1 = por_med(base_mesh,base.pm)
print('Original pressure (0):',fl1.p[0:4])
fl1.p_lin(base_mesh)
print('Linear pressure:',fl1.p[0:4])
print('Original Velocity (correct):',fl1.u[0:4])
fl1.u = np.zeros(base_mesh.Nx)
fl1.darcyv(base_mesh,pm1)
print('Darcy Velocity:',fl1.u[0:4]) | [
2,
17267,
10392,
198,
11748,
269,
21370,
1303,
4149,
287,
1339,
10007,
290,
3551,
503,
8136,
198,
11748,
299,
32152,
355,
45941,
198,
2,
10097,
12,
198,
198,
4480,
1280,
10786,
7442,
7753,
13,
40664,
3256,
3605,
1370,
28,
7061,
8,
355... | 2.468889 | 450 |
# Copyright 2022 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Dict, List, Optional
from nemo.core.classes import Dataset
from nemo.core.neural_types import NeuralType, StringLabel, StringType
__all__ = ['PTuneTextClassificationDataset', 'token_wrapper']
| [
2,
15069,
33160,
383,
3012,
9552,
15417,
4816,
46665,
290,
198,
2,
383,
12905,
2667,
32388,
3457,
13,
1074,
13,
198,
2,
15069,
357,
66,
8,
12131,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
... | 3.695652 | 253 |
import re
from typing import List
import string
from qanta import qlogging
from nltk import word_tokenize
from sklearn.model_selection import train_test_split
from qanta.datasets.abstract import TrainingData
log = qlogging.get(__name__)
ftp_patterns = {
"\n",
", for 10 points,",
", for ten points,",
"--for 10 points--",
"for 10 points, ",
"for 10 points--",
"for ten points, ",
"for 10 points ",
"for ten points ",
", ftp," "ftp,",
"ftp",
}
patterns = ftp_patterns | set(string.punctuation)
regex_pattern = "|".join([re.escape(p) for p in patterns])
regex_pattern += r"|\[.*?\]|\(.*?\)"
def clean_question(question: str):
"""
Remove pronunciation guides and other formatting extras
:param question:
:return:
"""
return re.sub(regex_pattern, "", question.strip().lower())
def preprocess_dataset(
data: TrainingData,
train_size=0.9,
test_size=0.1,
vocab=None,
class_to_i=None,
i_to_class=None,
create_runs=False,
full_question=False,
):
"""
This function does primarily text preprocessing on the dataset. It will return x_train and x_test as a list of
examples where each word is a tokenized word list (not padded). y_train and y_test is a list of indices coresponding
to the class labels that are associated with i_to_class and class_to_i. vocab consists of any word which occurred
in the training set.
TODO: Implement an option for maximum vocab size which takes the most frequently occurring words only.
:param data:
:param train_size:
:param vocab:
:param class_to_i:
:param i_to_class:
:param create_runs:
:param full_question:
:return:
"""
if full_question and create_runs:
raise ValueError(
"The options create_runs={} and full_question={} are not compatible".format(
create_runs, full_question
)
)
if train_size + test_size > 1:
raise ValueError(
f"Train + test must sum to 1 or less: train={train_size} test={test_size} sum={train_size + test_size}"
)
classes = set(data[1])
if class_to_i is None or i_to_class is None:
class_to_i = {}
i_to_class = []
for i, ans_class in enumerate(classes):
class_to_i[ans_class] = i
i_to_class.append(ans_class)
x_train = []
y_train = []
x_test = []
y_test = []
if vocab is None:
vocab = set()
question_runs_with_answer = list(zip(data[0], data[1]))
if train_size != 1:
train, test = train_test_split(
question_runs_with_answer, train_size=train_size, test_size=test_size
)
else:
train = question_runs_with_answer
test = []
for q, ans in train:
q_text = []
for sentence in q:
t_question = tokenize_question(sentence)
if create_runs or full_question:
q_text.extend(t_question)
else:
q_text = t_question
if len(t_question) > 0:
for w in t_question:
vocab.add(w)
if create_runs:
x_train.append(list(q_text))
elif not full_question:
x_train.append(q_text)
if not full_question:
y_train.append(class_to_i[ans])
if full_question:
x_train.append(q_text)
y_train.append(class_to_i[ans])
for q, ans in test:
q_text = []
for sentence in q:
t_question = tokenize_question(sentence)
if create_runs or full_question:
q_text.extend(t_question)
if not full_question:
x_test.append(list(q_text))
else:
q_text = t_question
x_test.append(q_text)
if not full_question:
y_test.append(class_to_i[ans])
if full_question:
x_test.append(q_text)
y_test.append(class_to_i[ans])
return (x_train, y_train, x_test, y_test, vocab, class_to_i, i_to_class)
| [
11748,
302,
198,
6738,
19720,
1330,
7343,
198,
11748,
4731,
198,
198,
6738,
10662,
4910,
1330,
10662,
6404,
2667,
198,
6738,
299,
2528,
74,
1330,
1573,
62,
30001,
1096,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,... | 2.117081 | 1,973 |
from PySide2 import QtWidgets
from PySide2.QtCore import Qt
from PySide2.QtGui import QPixmap
from node_launcher.gui.assets import asset_access
| [
6738,
9485,
24819,
17,
1330,
33734,
54,
312,
11407,
198,
6738,
9485,
24819,
17,
13,
48,
83,
14055,
1330,
33734,
198,
6738,
9485,
24819,
17,
13,
48,
83,
8205,
72,
1330,
1195,
47,
844,
8899,
198,
198,
6738,
10139,
62,
38722,
2044,
13,... | 2.862745 | 51 |
import matplotlib.pyplot as plt
# plt.ion()
# 在使用matplotlib的过程中,常常会需要画很多图,但是好像并不能同时展示许多图。这是因为python可视化库matplotlib的显示模式默认为阻塞(block)模式。
# 什么是阻塞模式那?我的理解就是在plt.show()之后,程序会暂停到那儿,并不会继续执行下去。
# 如果需要继续执行程序,就要关闭图片。那如何展示动态图或多个窗口呢?
# 这就要使用plt.ion()这个函数,使matplotlib的显示模式转换为交互(interactive)模式。即使在脚本中遇到plt.show(),代码还是会继续执行。
# 在交互模式下:
#
# plt.plot(x)或plt.imshow(x)是直接出图像,不需要plt.show()
# 如果在脚本中使用ion()命令开启了交互模式,没有使用ioff()关闭的话,则图像会一闪而过,并不会常留。要想防止这种情况,需要在plt.show()之前加上ioff()命令。
# 在阻塞模式下:
#
# 打开一个窗口以后必须关掉才能打开下一个新的窗口。这种情况下,默认是不能像Matlab一样同时开很多窗口进行对比的。
# plt.plot(x)或plt.imshow(x)是直接出图像,需要plt.show()后才能显示图像
plt.ion()
plt.plot([1.6, 2.7])
plt.title("interactive test")
plt.xlabel("index")
# plt.show()
ax = plt.gca()
ax.plot([3.1, 2.2])
plt.draw()
plt.ioff()
plt.plot([1.6, 2.7])
plt.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
458,
83,
13,
295,
3419,
198,
2,
10263,
250,
101,
45635,
18796,
101,
6759,
29487,
8019,
21410,
32573,
229,
163,
101,
233,
40792,
171,
120,
234,
30585,
116,
30585,
... | 0.824973 | 937 |
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'specs' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = \
u'System Emulation Guest Hardware Specifications'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
1195,
3620,
52,
10314,
1382,
8398,
2393,
329,
262,
705,
4125,
6359,
6,
10107,
13,
198,
2,
198,
2,
770,
3407,
262,
1353,
1241,
1013,
2393,
290,
788,
1838,
... | 3.19209 | 177 |
# coding: utf-8
"""
Implements various interpreters and modders for FEFF calculations.
"""
import os
from pymatgen.io.feff.sets import FEFFDictSet
from custodian.ansible.actions import FileActions, DictActions
from custodian.ansible.interpreter import Modder
class FeffModder(Modder):
"""
A Modder for FeffInput sets
"""
def __init__(self, actions=None, strict=True, feffinp=None):
"""
Args:
actions ([Action]): A sequence of supported actions. See
actions ([Action]): A sequence of supported actions. See
:mod:`custodian.ansible.actions`. Default is None,
which means DictActions and FileActions are supported.
strict (bool): Indicating whether to use strict mode. In non-strict
mode, unsupported actions are simply ignored without any
errors raised. In strict mode, if an unsupported action is
supplied, a ValueError is raised. Defaults to True.
feffinp (FEFFInput): A FeffInput object from the current directory.
Initialized automatically if not passed (but passing it will
avoid having to reparse the directory).
"""
self.feffinp = feffinp or FEFFDictSet.from_directory(".")
self.feffinp = self.feffinp.all_input()
actions = actions or [FileActions, DictActions]
super().__init__(actions, strict)
def apply_actions(self, actions):
"""
Applies a list of actions to the FEFF Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': feffinput_key,
'action': moddermodification}
"""
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k])
elif "file" in a:
self.modify(a["action"], a["file"])
else:
raise ValueError("Unrecognized format: {}".format(a))
if modified:
feff = self.feffinp
feff_input = "\n\n".join(
str(feff[k])
for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"]
if k in feff
)
for k, v in feff.items():
with open(os.path.join(".", k), "w") as f:
f.write(str(v))
with open(os.path.join(".", "feff.inp"), "w") as f:
f.write(feff_input)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
3546,
1154,
902,
2972,
16795,
1010,
290,
953,
67,
364,
329,
18630,
5777,
16765,
13,
198,
37811,
198,
198,
11748,
28686,
198,
198,
6738,
279,
4948,
265,
5235,
13,
952,
13,
5036,
... | 2.109005 | 1,266 |
import json
from peewee import *
db = SqliteDatabase('assets.db')
class Base(Model):
"""
Base Model
"""
class User(Base):
"""
User Model
Attributes
---------
id: int
Primary key of the user
name: String
Name of user
"""
id = IntegerField(primary_key=True)
name = TextField()
class Job(Base):
"""
Job Model
Attributes
---------
id: int
Primary key of the Job posting
title: String
Title of job
company: String
Name of Company
"""
id = IntegerField(primary_key=True)
title = TextField()
company = TextField()
class Tag(Base):
"""
Tag Model
Attributes
---------
tag: Char
character that describes the tag
"""
tag = CharField()
class UserTags(Base):
"""
Tag Many To Many Table with Users
Attributes
---------
tag: Foreign key
linked to Tag Table
user_id: Foreign key
linked to User Table
"""
tag = ForeignKeyField(Tag)
user_id = ForeignKeyField(User)
class JobTags(Base):
"""
Tag Many To Many Table with Jobs Table
Attributes
---------
tag: Foreign key
linked to Tag Table
job_id: Foreign key
linked to Job Table
"""
tag = ForeignKeyField(Tag)
job_id = ForeignKeyField(Job)
class Assets:
"""
A class to represent all the assets
Attributes
---------
_db: SqliteDatabase
database used.
_users: List
list of users currently loaded
_jobs: List
list of jobs currently loaded
Methods
---------
update_users(users_json=None)
Get User data.
update_jobs(jobs_json=None)
Get Job data.
"""
@property
@property
def update_users(self, users_json=None):
"""
Get User data.
:param users_json: path of file that contains user data
None if not given
:type users_json: str
:return: None
"""
file_name = users_json if users_json is not None else 'data/users.json'
with open(file_name) as users_json:
user_data = json.load(users_json)
for user in user_data:
user_tags = user['tags']
user_id = user['id']
user_name = user['name']
self._users.append(user_id)
u, created = User.get_or_create(id=user_id, name=user_name)
for tag in user_tags:
t, created = Tag.get_or_create(tag=tag)
UserTags.get_or_create(tag=t, user_id=u)
def print_users(self):
"""
Print Users Stored in Database
"""
users_query = (User.select())
users = users_query.dicts()
for user in users:
print(str(user))
def update_jobs(self, jobs_json=None):
"""
Get Job data.
:param jobs_json: path of file that contains user job None if not given
:type jobs_json: str
:return: None
"""
file_name = (jobs_json if jobs_json is not None else 'data/jobs.json')
with open(file_name) as jobs_json:
job_data = json.load(jobs_json)
for job in job_data:
job_id = job['id']
job_name = job['title']
job_company = job['company']
job_tags = job['tags']
self._jobs.append(job_id)
j, created = Job.get_or_create(id=job_id, title=job_name, company=job_company)
for tag in job_tags:
t, created = Tag.get_or_create(tag=tag)
JobTags.get_or_create(tag=t, job_id=j)
def print_jobs(self):
"""
Print Jobs Stored in Database
"""
jobs_query = (Job.select())
jobs = jobs_query.dicts()
for job in jobs:
print(str(job))
def find_tag_match(self):
"""
Print number of matches between first_tags and second_tags
:return: None
"""
# Query to get tags linked with jobs
tags_query = (Tag
.select(JobTags.job_id, fn.GROUP_CONCAT(Tag.tag).alias("tags"))
.join(JobTags, on=(Tag.id == JobTags.tag_id))
.group_by(JobTags.job_id)
.order_by(JobTags.job_id))
# Query to get the user id and the job posting's characteristics
query = (User
.select(User.id.alias("userID"), Job.id, Job.title, Job.company, tags_query.c.tags)
.join(UserTags, JOIN.LEFT_OUTER, on=(UserTags.user_id == User.id)) # joining the user's tags with user
.join(JobTags, on=(JobTags.tag_id == UserTags.tag_id))
.join(Job, on=(JobTags.job_id == Job.id))
.join(tags_query, on=(tags_query.c.job_id == Job.id)) # Link Previous query (tags_query) with current
.group_by(User.id, Job.id) # Grouping duplicate entries
.having(fn.count(JobTags.tag_id) >= 2)) # Constraint for the "jobs that match at least 2 tags"
# Convert query to dictionary
q = query.dicts()
for job in q:
self._matches.append(job)
# Convert the tag string into an array.
tags = job['tags'].split(',')
# String description for the job.
job_string = "'id': '{job_id}', 'title': '{title}', 'company': '{company}', 'tags': {tags}" \
.format(job_id=job['id'], title=job['title'], company=job['company'], tags=tags)
print("User " + str(job['userID']) + ' matched to {' + job_string + '}')
| [
11748,
33918,
198,
6738,
613,
413,
1453,
1330,
1635,
198,
198,
9945,
796,
311,
13976,
578,
38105,
10786,
19668,
13,
9945,
11537,
628,
198,
4871,
7308,
7,
17633,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7308,
9104,
198,
220... | 2.119303 | 2,699 |
from swaps.model.market.candlestick import Candlestick
from swaps.model.market.candlestick_event import CandlestickEvent
from swaps.model.market.candlestick_req import CandlestickReq
from swaps.model.market.last_trade_bestquote import LastTradeAndBestQuote
from swaps.model.market.pricedepth import PriceDepth
from swaps.model.market.pricedepth_event import PriceDepthEvent
from swaps.model.market.pricedepth_req import PriceDepthReq
from swaps.model.market.pricedepth_bbo import PriceDepthBbo
from swaps.model.market.pricedepth_bbo_event import PriceDepthBboEvent
from swaps.model.market.market_detail_merged import MarketDetailMerged
from swaps.model.market.market_detail import MarketDetail
from swaps.model.market.market_detail_event import MarketDetailEvent
from swaps.model.market.market_detail_req import MarketDetailReq
from swaps.model.market.trade import Trade
from swaps.model.market.trade_detail import TradeDetail
from swaps.model.market.trade_detail_event import TradeDetailEvent
from swaps.model.market.trade_detail_req import TradeDetailReq
from swaps.model.market.market_ticker import MarketTicker
from swaps.model.market.depth_entry import DepthEntry
from swaps.model.market.mbp import Mbp
from swaps.model.market.mbp_increase_event import MbpIncreaseEvent
from swaps.model.market.mbp_full_event import MbpFullEvent
from swaps.model.market.mbp_req import MbpReq
| [
6738,
43997,
13,
19849,
13,
10728,
13,
46188,
32712,
624,
1330,
15518,
32712,
624,
198,
6738,
43997,
13,
19849,
13,
10728,
13,
46188,
32712,
624,
62,
15596,
1330,
15518,
32712,
624,
9237,
198,
6738,
43997,
13,
19849,
13,
10728,
13,
4618... | 3.483627 | 397 |
import math
an = float(input("Digite um angulo: "))
seno = math.sin(math.radians(an))
cos = math.cos(math.radians(an))
tan = math.tan(math.tan(an))
print("O seno de {} é,{:.2f}".format(an, seno))
print("O cosseno de {} é,{:.2f}".format(an, cos))
print("A tangente de {} é,{:.2f}".format(an, tan)) | [
11748,
10688,
198,
198,
272,
796,
12178,
7,
15414,
7203,
19511,
578,
23781,
3550,
43348,
25,
366,
4008,
198,
6248,
78,
796,
10688,
13,
31369,
7,
11018,
13,
6335,
1547,
7,
272,
4008,
198,
6966,
796,
10688,
13,
6966,
7,
11018,
13,
633... | 2.267176 | 131 |
"""
Checks that primitive values are not used in an
iterating/mapping context.
"""
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods,no-init,no-self-use,import-error,unused-argument,bad-mcs-method-argument,wrong-import-position,no-else-return
from __future__ import print_function
# primitives
numbers = [1, 2, 3]
for i in numbers:
pass
for i in iter(numbers):
pass
for i in "123":
pass
for i in u"123":
pass
for i in b"123":
pass
for i in bytearray(b"123"):
pass
for i in set(numbers):
pass
for i in frozenset(numbers):
pass
for i in dict(a=1, b=2):
pass
# comprehensions
for i in [x for x in range(10)]:
pass
for i in {x for x in range(1, 100, 2)}:
pass
for i in {x: 10 - x for x in range(10)}:
pass
# generators
for i in powers_of_two():
pass
for i in powers_of_two: # [not-an-iterable]
pass
# check for custom iterators
class C(object):
"old-style iterator"
for i in C():
print(i)
test(*A()) # [not-an-iterable]
test(*B())
test(*B) # [not-an-iterable]
for i in A(): # [not-an-iterable]
pass
for i in B():
pass
for i in B: # [not-an-iterable]
pass
for i in range: # [not-an-iterable]
pass
# check that primitive non-iterable types are caught
for i in True: # [not-an-iterable]
pass
for i in None: # [not-an-iterable]
pass
for i in 8.5: # [not-an-iterable]
pass
for i in 10: # [not-an-iterable]
pass
# skip uninferable instances
from some_missing_module import Iterable
m = MyClass()
for i in m:
print(i)
# skip checks if statement is inside mixin/base/abstract class
# class is not named as abstract
# but still is deduceably abstract
| [
37811,
198,
7376,
4657,
326,
20049,
3815,
389,
407,
973,
287,
281,
198,
2676,
803,
14,
76,
5912,
4732,
13,
198,
37811,
198,
2,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
11,
259,
12102,
12,
3672,
11,
18820,
12,
32146,
... | 2.431429 | 700 |
import os
import numpy as np
import pandas as pd
#postive_300 = pd.read_pickle(r'300_pos_exs.pkl')
#postive_300 = pd.read_pickle(r'63_1a2o_neg_exs.pkl')
#postive_300 = pd.read_pickle(r'1000_decoy_exs.pkl')
#postive_300 = pd.read_pickle(r'1000_pos_exs.pkl')
#print("postive samples:", len(postive_300))
def run(workdir, out_path, in_file, isDecoy = False):
'''
TO DO: The bad_sample must be related to some bugs. Need to be fixed.
'''
os.makedirs(workdir + out_path, exist_ok=True)
postive_300 = pd.read_pickle(workdir + in_file)
# bad_sample =[126,291,343,345,346,373,383,385,398,580,600,625,
# 672,793,984]
bads = []
tag = '_1.npy'
if isDecoy:
tag = '_0.npy'
for i in range(len(postive_300)):
one_protein = postive_300[i]
print("protein shape:", one_protein.shape)
# if i in bad_sample:
# continue
try:
dist = one_protein[0:4, :, :]
dist_new = reformat_image(dist.copy())
dist_new = fill_diagonal_distance_map(dist_new)
print("min, max", dist_new.min(), dist_new.max())
print("dist_new:", dist_new.shape)
check_one_distance_map(dist_new, 3)
# combine distance and residue information
dist_new = np.concatenate([dist_new, one_protein[4:, :, :]], axis = 0)
print("final dist_new:", dist_new.shape)
check_one_distance_map(dist_new, 13)
protein_name = "bindcore_" + str(i)
np.save(workdir + out_path + protein_name + tag, dist_new)
except:
bads.append(i)
#Print out the bads for debug purpose
print('Bads: {}'.format(bads))
return
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
7353,
425,
62,
6200,
796,
279,
67,
13,
961,
62,
27729,
293,
7,
81,
6,
6200,
62,
1930,
62,
1069,
82,
13,
79,
41582,
11537,
198,
... | 1.941935 | 930 |
__author__ = 'flipajs'
| [
834,
9800,
834,
796,
705,
2704,
541,
1228,
82,
6,
198
] | 2.090909 | 11 |
import subprocess
import config
import argparse
import itertools
SPLA_PATH = config.DEPS / "spla"
SPLA_BUILD = SPLA_PATH / "build"
SPLA_TARGETS = ["spla_bfs", "spla_sssp", "spla_tc", "spla_data"]
if __name__ == '__main__':
exit(main())
| [
11748,
850,
14681,
198,
11748,
4566,
198,
11748,
1822,
29572,
198,
11748,
340,
861,
10141,
198,
198,
4303,
13534,
62,
34219,
796,
4566,
13,
7206,
3705,
1220,
366,
22018,
64,
1,
198,
4303,
13534,
62,
19499,
26761,
796,
6226,
13534,
62,
... | 2.401961 | 102 |
"""
Functions for type inference.
"""
# pylint: disable=unused-argument
from typing import TYPE_CHECKING, List, Optional, Type, Union, cast
from sqloxide import parse_sql
from datajunction.models.column import Column
from datajunction.sql.functions import function_registry
from datajunction.sql.parse import find_nodes_by_key
from datajunction.typing import ColumnType, Expression, Function, Identifier, Value
if TYPE_CHECKING:
from datajunction.models.node import Node
class Wildcard: # pylint: disable=too-few-public-methods
"""
Represents the star in a SQL expression.
"""
def infer_columns(sql: str, parents: List["Node"]) -> List[Column]:
"""
Given a a SQL expression and parents, infer schema.
"""
tree = parse_sql(sql, dialect="ansi")
# Use the first projection. We actually want to check that all the projections
# produce the same columns, and raise an error if not.
projection = next(find_nodes_by_key(tree, "projection"))
columns = []
for expression in projection:
alias: Optional[str] = None
if "UnnamedExpr" in expression:
expression = expression["UnnamedExpr"]
elif "ExprWithAlias" in expression:
alias = expression["ExprWithAlias"]["alias"]["value"]
expression = expression["ExprWithAlias"]["expr"]
else:
raise NotImplementedError(f"Unable to handle expression: {expression}")
columns.append(get_column_from_expression(parents, expression, alias))
# name nameless columns
i = 0
for column in columns:
if column.name is None:
column.name = f"_col{i}"
i += 1
return columns
def evaluate_identifier(parents: List["Node"], identifier: Identifier) -> Column:
"""
Evaluate an "Identifier" node.
"""
value = identifier["value"]
candidates = []
for parent in parents:
for column in parent.columns:
if column.name == value:
candidates.append(column)
break
if len(candidates) != 1:
raise Exception(f'Unable to determine origin of column "{value}"')
return candidates[0]
def evaluate_compound_identifier(
parents: List["Node"],
compound_identifier: List[Identifier],
) -> Column:
"""
Evaluate a "CompoundIdentifier" node.
"""
name = compound_identifier[-1]["value"]
parent_name = ".".join(part["value"] for part in compound_identifier[:-1])
parent: Optional["Node"] = None
for parent in parents:
if parent.name == parent_name:
break
else:
parent = None
if not parent:
raise Exception(
f'Unable to determine origin of column "{parent_name}.{name}"',
)
for column in parent.columns:
if column.name == name:
return column
raise Exception(f'Unable to find column "{name}" in node "{parent.name}"')
def evaluate_function(
parents: List["Node"],
function: Function,
alias: Optional[str] = None,
) -> Column:
"""
Evaluate a "Function" node.
"""
name = ".".join(part["value"] for part in function["name"])
args: List[Expression] = []
for arg in function["args"]:
if isinstance(arg["Unnamed"], dict) and "Expr" in arg["Unnamed"]:
args.append(arg["Unnamed"]["Expr"])
else:
args.append(cast(Expression, arg["Unnamed"]))
evaluated_args = [evaluate_expression(parents, arg) for arg in args]
type_ = function_registry[name].infer_type(*evaluated_args)
return Column(name=alias, type=type_)
def evaluate_value(
value: Value,
alias: Optional[str] = None,
) -> Union[int, float, str]:
"""
Evaluate a "Value" node.
"""
if "Number" in value:
try:
return int(value["Number"][0])
except ValueError:
return float(value["Number"][0])
elif "SingleQuotedString" in value:
return value["SingleQuotedString"]
raise NotImplementedError(f"Unable to handle value: {value}")
def evaluate_expression(
parents: List["Node"],
expression: Expression,
alias: Optional[str] = None,
) -> Union[Column, int, float, str, Type[Wildcard]]:
"""
Evaluates an expression from a projection.
"""
if "Identifier" in expression:
return evaluate_identifier(parents, expression["Identifier"])
if "CompoundIdentifier" in expression:
return evaluate_compound_identifier(parents, expression["CompoundIdentifier"])
if "Function" in expression:
return evaluate_function(parents, expression["Function"], alias)
if "Value" in expression:
return evaluate_value(expression["Value"], alias)
if expression == "Wildcard":
return Wildcard
raise NotImplementedError(f"Unable to evaluate expression: {expression}")
def get_column_from_expression(
parents: List["Node"],
expression: Expression,
alias: Optional[str] = None,
) -> Column:
"""
Return a column from an expression from a projection.
"""
value = evaluate_expression(parents, expression, alias)
if isinstance(value, Column):
return value
if isinstance(value, int):
type_ = ColumnType.INT
elif isinstance(value, float):
type_ = ColumnType.FLOAT
elif isinstance(value, str):
type_ = ColumnType.STR
else:
raise Exception(f"Invalid expression for column: {expression}")
return Column(name=alias, type=type_)
| [
37811,
198,
24629,
2733,
329,
2099,
32278,
13,
198,
37811,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
49140,
198,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
7343,
11,
32233,
11,
5994,
11,
4479,
11,
3350,
198... | 2.672983 | 2,058 |
from django.conf.urls import include, url
from django.contrib import admin
import hello_world.urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(hello_world.urls))
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
11748,
23748,
62,
6894,
13,
6371,
82,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
... | 2.5875 | 80 |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import difflib
| [
2,
15069,
357,
66,
8,
13130,
12,
25579,
11,
3203,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
4... | 3.732143 | 56 |
"""
Filename: test_localint.py
Tests for localint.py
"""
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from quantecon.game_theory import LocalInteraction
class TestLocalInteraction:
'''Test the methods of LocalInteraction'''
def setUp(self):
'''Setup a LocalInteraction instance'''
payoff_matrix = np.asarray([[4, 0], [2, 3]])
adj_matrix = np.asarray([[0, 1, 3],
[2, 0, 1],
[3, 2, 0]])
self.li = LocalInteraction(payoff_matrix, adj_matrix)
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
| [
37811,
198,
35063,
25,
1332,
62,
12001,
600,
13,
9078,
198,
198,
51,
3558,
329,
1957,
600,
13,
9078,
198,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
18747,
62,
40496,
11,
6... | 2.155556 | 360 |
__author__ = 'gsanroma'
import argparse
from fnmatch import fnmatch
import os
import subprocess
import sys
import csv
from scheduler import Launcher, check_file_repeat
from shutil import rmtree
parser = argparse.ArgumentParser(description='Computes Dice score of estimated segmentations w.r.t. ground truth segmentations.\n'
'Average per-label Dice score and average per-subject Dice score are stored in \n'
'label_dice.csv and subj_dice.csv in est_dir directory, respectively')
parser.add_argument("--est_dir", type=str, nargs=1, action='append', required=True, help="Directory of estimated segmentations")
parser.add_argument("--est_suffix", type=str, nargs=1, required=True, help="Suffix of estimated segmentation files")
parser.add_argument("--gtr_dir", type=str, nargs=1, required=True, help="Directory of ground-truth segmentations")
parser.add_argument("--gtr_suffix", type=str, nargs=1, required=True, help="Suffix of ground truth segmentation files")
args = parser.parse_args()
if sys.platform == 'darwin':
is_hpc = False
else:
is_hpc = True
for est_dir in args.est_dir:
#
# Retrieve estimated files
files_list = os.listdir(est_dir[0])
est_files = [f for f in files_list if fnmatch(f, '*' + args.est_suffix[0])]
est_names = [f.split(args.est_suffix[0])[0] for f in est_files]
assert est_files, "No estimated segmentation found"
#
# Retrieve ground truth files
gtr_files = [f + args.gtr_suffix[0] for f in est_names]
assert not False in [os.path.exists(os.path.join(args.gtr_dir[0], f)) for f in gtr_files], "Some ground-truth segmentations not found"
Nimg = len(est_files)
# temp directory
tmp_dir = os.path.join(est_dir[0], 'tmp')
if os.path.exists(tmp_dir):
rmtree(tmp_dir)
os.makedirs(tmp_dir)
imagemath_path = os.path.join(os.environ['ANTSPATH'],'ImageMath')
wait_jobs = [os.path.join(os.environ['ANTSSCRIPTS'], "waitForSGEQJobs.pl"), '0', '10']
out_paths = []
for i_img in range(Nimg):
est_path = os.path.join(est_dir[0], est_files[i_img])
gtr_path = os.path.join(args.gtr_dir[0], gtr_files[i_img])
out_path = os.path.join(tmp_dir, est_names[i_img])
out_paths += [out_path]
cmdline = "{} 3 {} DiceAndMinDistSum {} {}\n".format(imagemath_path, out_path, est_path, gtr_path)
qsub_launcher = Launcher(cmdline)
print "Launching Dice evaluation job for labels {}".format(est_names[i_img])
qsub_launcher.name = est_names[i_img]
qsub_launcher.folder = tmp_dir
qsub_launcher.queue = 'short.q'
job_id = qsub_launcher.run()
if is_hpc:
wait_jobs += [job_id]
if is_hpc:
print "Waiting for Dice evaluation jobs to finish..."
subprocess.call(wait_jobs)
print "Dice evaluation finished."
subj_dices = dict([])
label_dices = dict([])
for out_path in out_paths:
# Read per-label Dice
check_file_repeat(out_path + '.csv')
f = open(out_path + '.csv', 'r')
reader = csv.reader(f)
count = 0
dice = 0.
for row in reader:
count += 1
if count == 1:
continue
dice += float(row[1])
try:
label_dices[row[0].split('_')[1]] += float(row[1]) / len(out_paths)
except:
label_dices[row[0].split('_')[1]] = float(row[1]) / len(out_paths)
f.close()
subj_dices[os.path.basename(out_path)] = dice/(count-1)
subj_dice_file = "subj_dice.csv"
label_dice_file = "label_dice.csv"
with open(os.path.join(est_dir[0], subj_dice_file), 'w') as csvfile:
writer = csv.DictWriter(csvfile, subj_dices.keys())
writer.writeheader()
writer.writerow(subj_dices)
with open(os.path.join(est_dir[0], label_dice_file), 'w') as csvfile:
writer = csv.DictWriter(csvfile, label_dices.keys())
writer.writeheader()
writer.writerow(label_dices)
rmtree(tmp_dir)
| [
834,
9800,
834,
796,
705,
70,
12807,
42902,
6,
198,
198,
11748,
1822,
29572,
198,
6738,
24714,
15699,
1330,
24714,
15699,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
269,
21370,
198,
6738,
6038,
18173,
1330,
... | 2.238587 | 1,840 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
from numpy import *
import json
import sys
ParseInput(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
6738,
299,
32152,
1330,
1635,
198,
11748,
33918,
198,
11748,
25064,
198,
198,
10044,
325,
20560,
7,
17597,
13,
853,
85,
8,
... | 2.444444 | 45 |
# dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = 'mail@simon-dirmeier.net'
import copy
import dataframe
from ._dataframe_abstract import ADataFrame
from ._dataframe_grouping import DataFrameGrouping
from ._check import is_callable, is_none, has_elements, is_disjoint
from ._piping_exception import PipingException
__DISJOINT_SETS_ERROR__ = "Cannot aggregate grouping variable(s)!"
class GroupedDataFrame(ADataFrame):
"""
The base GroupedDataFrame class.
Subsets a DataFrame object into several groups given several columns.
"""
def __str__(self):
"""
ToString method for GroupedDataFrame.
:return: returns the string representation
:rtype: str
"""
return self.__grouping.__str__()
@property
def colnames(self):
"""
Getter for the column names of the DataFrame.
:return: returns column names
:rtype: list(str)
"""
return self.__grouping.ungroup().colnames
@property
def groups(self):
"""
Getter for all groups.
:return: returns the groups
:rtype: list(DataFrameGroup)
"""
return self.__grouping.groups
def ungroup(self):
"""
Undo the grouping and return the DataFrame.
:return: returns the original DataFrame
:rtype: DataFrame
"""
return self.__grouping.ungroup()
def subset(self, *args):
"""
Subset only some of the columns of the DataFrame.
:param args: list of column names of the object that should be subsetted
:type args: tuple
:return: returns DataFrame with only the columns you selected
:rtype: DataFrame
"""
args = list(args)
args.extend([x for x in
self.__grouping.grouping_colnames if x not in args])
return GroupedDataFrame(self.__grouping.ungroup().subset(*args),
*self.__grouping.grouping_colnames)
def group(self, *args):
"""
Group the DataFrame into row-subsets.
:param args: list of column names taht should be used for grouping
:type args: tuple
:return: returns a dataframe that has grouping information
:rtype: GroupedDataFrame
"""
args = list(args)
args.extend([x for x in
self.__grouping.grouping_colnames if x not in args])
return GroupedDataFrame(self.__grouping.ungroup(), *args)
def modify(self, clazz, new_col, *args):
"""
Modify some columns (i.e. apply a function)
and add the result to the table.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that
function should be applied to
:type args: tuple
:return: returns a new GroupedDataFrame object with the modified
values, i.e. the new column of values
:rtype: GroupedDataFrame
"""
if is_callable(clazz) \
and not is_none(new_col) \
and has_elements(*args) \
and is_disjoint(self.__grouping.grouping_colnames,
args,
__DISJOINT_SETS_ERROR__):
return self.__do_modify(clazz, new_col, *args)
def aggregate(self, clazz, new_col, *args):
"""
Aggregate the rows of each group into a single value.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that
function should be applied to
:type args: varargs
:return: returns a new dataframe object with the aggregated value
:rtype: DataFrame
"""
if is_callable(clazz) \
and not is_none(new_col) \
and has_elements(*args) \
and is_disjoint(self.__grouping.grouping_colnames,
args,
__DISJOINT_SETS_ERROR__):
return self.__do_aggregate(clazz, new_col, *args)
| [
2,
1366,
14535,
25,
257,
1366,
12,
14535,
7822,
1262,
2446,
48426,
198,
2,
198,
2,
15069,
357,
34,
8,
1584,
11288,
36202,
49468,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
1366,
14535,
13,
198,
2,
198,
2,
1366,
14535,
318,
1479,
... | 2.390956 | 2,123 |