content stringlengths 1 1.05M | input_ids listlengths 1 883k | ratio_char_token float64 1 22.9 | token_count int64 1 883k |
|---|---|---|---|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countGroups' function below.
#
# The function is expected to return an INTEGER.
# The function accepts STRING_ARRAY related as parameter.
#
def convertMatrixToGraph(mat):
"""
Accept the input which is an adjacency matrix and return a Graph, which is an adjacency list
"""
n = len(mat)
g = Graph(n)
for i in range(n):
for j in range(n):
if j > i and mat[i][j] == '1':
g.addEdge(i,j)
return g
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
related_count = int(input().strip())
related = []
for _ in range(related_count):
related_item = input()
related.append(related_item)
result = countGroups(related)
fptr.write(str(result) + '\n')
fptr.close()
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
628,
198,
198,
2,
198,
2,
13248,
262,
705,
9127,
38,
14459,
6,
2163,
2174,
13,
198,
2,
198,
2,
383,
21... | 2.428571 | 364 |
import logging.config
import util.logger_init
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from util.tensorboard_util import plot_confusion_matrix, plot_to_image
from tensorflow.python.keras.callbacks_v1 import TensorBoard
from keras import backend as K
# Define TensorBoard callback child class
| [
11748,
18931,
13,
11250,
198,
11748,
7736,
13,
6404,
1362,
62,
15003,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
10802,
62,
6759,
8609,
198,
6738,
7736,
... | 3.39604 | 101 |
from .BaseRequest import BaseRequest
| [
6738,
764,
14881,
18453,
1330,
7308,
18453,
628
] | 4.75 | 8 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
import click
import inspect
import numpy as np
import cython_examples as cyth
def run_func(func, *args, **kwargs):
"""Helper function for running examples"""
ray.init()
func = ray.remote(func)
# NOTE: kwargs not allowed for now
result = ray.get(func.remote(*args))
# Inspect the stack to get calling example
caller = inspect.stack()[1][3]
print("%s: %s" % (caller, str(result)))
return result
if __name__ == "__main__":
cli()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
26842,
198,
11748,
3904,
198,
11748,
10104,
198,
198,
11748,
299,
32152,
355,
... | 2.908654 | 208 |
from DataGenerator import *
from Encoder import *
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Activation, Flatten, Input, Dropout, MaxPooling1D, Convolution1D
from keras.layers import LSTM, Lambda, merge, Masking
from keras.layers import Embedding, TimeDistributed
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
import numpy as np
import tensorflow as tf
import re
from keras import backend as K
import keras.callbacks
import sys
import os
import time
import matplotlib.pyplot as plt
import pickle
# record history of training
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='attempts to discern data types looking at columns holistically.')
parser.add_argument('--cp', dest='checkpoint',
help='checkpoint to load')
parser.add_argument('--config', dest='execution_config',
help='execution configuration to load. contains max_cells, and encoder config.')
parser.add_argument('--train', dest='should_train', action="store_true",
default="True", help='run training')
parser.add_argument('--no_train', dest='should_train', action="store_false",
default="True", help='do not run training')
parser.set_defaults(should_train=True)
parser.add_argument('--data_count', dest='data_count', action="store", type=int,
default=100, help='number of data rows to create')
parser.add_argument('--data_cols', dest='data_cols', action="store", type=int,
default=10, help='number of data cols to create')
parser.add_argument('--nullpct', dest='null_pct', action="store", type=float,
default=0, help='percent of Nulls to put in each column')
parser.add_argument('--nb_epoch', dest='nb_epoch', action="store", type=int,
default=5, help='number of epochs')
parser.add_argument('--try_reuse_data', dest='try_reuse_data', action="store_true",
default="True", help='loads existing data if the dimensions have been stored')
parser.add_argument('--force_new_data', dest='try_reuse_data', action="store_false",
default="True", help='forces the creation of new data, even if the dimensions have been stored')
parser.add_argument('--batch_size', dest='batch_size', action="store", type=int,
default=64, help='batch size for training')
args = parser.parse_args()
main(args.checkpoint, args.data_count, args.data_cols, args.should_train,
args.nb_epoch, args.null_pct, args.try_reuse_data, args.batch_size, args.execution_config)
| [
6738,
6060,
8645,
1352,
1330,
1635,
198,
6738,
14711,
12342,
1330,
1635,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
13144,
341,
11,
161... | 2.620818 | 1,076 |
"""This module implements methods for solving scalar valued functions.
"""
__all__ = ["DiscreteMinimizer", "ScalarMethod", "ScalarMinimizer", "ScalarSolverException"]
from desdeo_tools.solver.ScalarSolver import DiscreteMinimizer, ScalarMethod, ScalarMinimizer, ScalarSolverException
| [
37811,
1212,
8265,
23986,
5050,
329,
18120,
16578,
283,
17560,
5499,
13,
198,
198,
37811,
198,
198,
834,
439,
834,
796,
14631,
15642,
8374,
9452,
320,
7509,
1600,
366,
3351,
282,
283,
17410,
1600,
366,
3351,
282,
283,
9452,
320,
7509,
... | 3.188889 | 90 |
from hawc_hal.maptree.map_tree import map_tree_factory
from hawc_hal.response import hawc_response_factory
import os
from conftest import check_map_trees, check_responses
| [
6738,
23185,
66,
62,
14201,
13,
76,
2373,
631,
13,
8899,
62,
21048,
1330,
3975,
62,
21048,
62,
69,
9548,
198,
6738,
23185,
66,
62,
14201,
13,
26209,
1330,
23185,
66,
62,
26209,
62,
69,
9548,
198,
11748,
28686,
198,
6738,
369,
701,
... | 2.966102 | 59 |
from __future__ import absolute_import
from io import BytesIO
import zstd
from .base import BaseCompressor, BaseDecompressor
from ..protocol import CompressionMethod, CompressionMethodByte
from ..reader import read_binary_uint32
from ..writer import write_binary_uint32, write_binary_uint8
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
11748,
1976,
19282,
198,
198,
6738,
764,
8692,
1330,
7308,
7293,
44292,
11,
7308,
10707,
3361,
44292,
198,
6738,
11485,
11235,
4668,
1330,... | 3.721519 | 79 |
import requests
import json
import os
API_TOKEN = os.environ.get("TMDB_API_TOKEN", "")
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
28686,
628,
198,
17614,
62,
10468,
43959,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
15972,
11012,
62,
17614,
62,
10468,
43959,
1600,
366,
4943,
198,
220,
220,
220,
220,
220,
198
] | 2.435897 | 39 |
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import models, fields, api, _ | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
21996,
79,
13,
418,
85,
1330,
7032,
11,
267,
21370,
198,
6738,
21996,
79,
1330,
4981,
11,
7032,
11,
40391,
11,
4808
] | 2.710526 | 38 |
# -*- coding: utf-8 -*-
#
# Copyright Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for environ.py
"""
# Standard library imports
import os
# Test library imports
import pytest
# Third party imports
from qtpy.QtCore import QTimer
# Local imports
from spyder.utils.test import close_message_box
def test_environ(environ_dialog, qtbot):
"""Test the environment variables dialog."""
environ_dialog.show()
assert environ_dialog
if __name__ == "__main__":
pytest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
220,
23688,
1082,
4935,
25767,
669,
198,
2,
49962,
739,
262,
2846,
286,
262,
17168,
13789,
198,
2,
198,
198,
37811,
198,
51,
3558,
329,
551,
2268,
... | 2.955556 | 180 |
import math
main()
| [
11748,
10688,
198,
12417,
3419,
198
] | 3.166667 | 6 |
import unittest
import json
from server import server
from models.abc import db
from repositories import ChannelRepository, GitlabProvider
from unittest.mock import MagicMock, Mock
# from flask import make_response
# from flask.json import jsonify
from util import test_client
| [
11748,
555,
715,
395,
198,
11748,
33918,
198,
6738,
4382,
1330,
4382,
198,
6738,
4981,
13,
39305,
1330,
20613,
198,
6738,
38072,
1330,
11102,
6207,
13264,
11,
15151,
23912,
29495,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
... | 3.985714 | 70 |
"""
This is an example cog that shows how you would make use of Lavalink.py.
This example cog requires that you have python 3.6 or higher due to the
f-strings.
"""
import math
import re
import discord
import lavalink
from discord.ext import commands
from discord.ext import menus
from .utils import checks
from typing import List
import asyncio
import logging
url_rx = re.compile('https?:\\/\\/(?:www\\.)?.+') # noqa: W605
def can_stop():
return commands.check(predicate)
| [
37811,
201,
198,
1212,
318,
281,
1672,
43072,
326,
2523,
703,
345,
561,
787,
779,
286,
406,
9226,
676,
13,
9078,
13,
201,
198,
1212,
1672,
43072,
4433,
326,
345,
423,
21015,
513,
13,
21,
393,
2440,
2233,
284,
262,
201,
198,
69,
12... | 2.988166 | 169 |
import numpy as np
import sys
import os
sys.path.append('utils/')
from config import *
from utils import *
sys.path.append(pycaffe_dir)
import time
import pdb
import random
import pickle as pkl
import caffe
from multiprocessing import Pool
from threading import Thread
import random
import h5py
import itertools
import math
import re
glove_dim = 300
glove_path = 'data/glove.6B.%dd.txt' %glove_dim
#glove_path = 'data/glove_debug_path.txt' #for debugging
if glove_path == 'data/glove_debug_path.txt':
print "continue?"
pdb.set_trace()
possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
possible_segments.append(i)
length_prep_word = 40
length_prep_character = 250
vocab_file = 'data/vocab_glove_complete.txt'
#Methods for extracting visual features
def feature_process_base(start, end, features):
return np.mean(features[start:end+1,:], axis = 0)
def feature_process_norm(start, end, features):
base_feature = np.mean(features[start:end+1,:], axis = 0)
return base_feature/(np.linalg.norm(base_feature) + 0.00001)
def feature_process_context(start, end, features):
feature_dim = features.shape[1]
full_feature = np.zeros((feature_dim*2,))
if np.sum(features[5,:]) > 0:
full_feature[:feature_dim] = feature_process_norm(0,6, features)
else:
full_feature[:feature_dim] = feature_process_norm(0,5, features)
full_feature[feature_dim:feature_dim*2] = feature_process_norm(start, end, features)
return full_feature
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
language_feature_process_dict = {'zero_language': zero_language_vector,
'recurrent_embedding': recurrent_embedding}
| [
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
33295,
10786,
26791,
14,
11537,
198,
6738,
4566,
1330,
1635,
198,
6738,
3384,
4487,
1330,
1635,
198,
17597,
13,
6978,
13,
33295,
7,
9078,
66,... | 2.47254 | 874 |
import math
import os
from collections import Counter
from fractions import Fraction
import numpy as np
from nltk import ngrams
from nltk.translate.bleu_score import SmoothingFunction
from .utils import get_ngrams, Threader
def corpus_bleu(references,
hypothesis,
reference_max_counts,
ref_lens,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweight=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweight: Option to re-normalize the weights uniformly.
:type auto_reweight: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
# Iterate through each hypothesis and their corresponding references.
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(reference_max_counts, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(ref_lens, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweight:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
return s
def modified_precision(reference_max_counts, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = reference_max_counts[n - 1]
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts.get(ngram, 0)) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
def closest_ref_length(ref_lens, hyp_len):
"""
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
"""
closest_ref_len = min(
ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
)
return closest_ref_len
def brevity_penalty(closest_ref_len, hyp_len):
"""
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len)
| [
11748,
10688,
198,
11748,
28686,
198,
6738,
17268,
1330,
15034,
198,
6738,
49876,
1330,
376,
7861,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
2528,
74,
1330,
299,
4546,
82,
198,
6738,
299,
2528,
74,
13,
7645,
17660,
13,
... | 2.528247 | 5,859 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import re
from setuptools import setup, find_packages
# classifiers = """\
# Development Status :: 4 - Beta
# Programming Language :: Python
# Programming Language :: Python :: 3
# Programming Language :: Python :: 3.4
# Programming Language :: Python :: 3.5
# Programming Language :: Python :: 3.6
# Programming Language :: Python :: 3.7
# Programming Language :: Python :: 3.8
# """
install_requires = get_requirements('requirements.txt')
packages = find_packages()
setup(
name='bwtools',
author='George Spracklin',
author_email='@mit.edu',
version=get_version(),
license='MIT',
description='tools for bigwigs',
long_description=get_long_description(),
long_description_content_type='text/markdown',
keywords=['genomics', 'bioinformatics', 'Hi-C', 'analysis', 'cooler'],
url='https://github.com/gspracklin/bwtools',
zip_safe=False,
# classifiers=[s.strip() for s in classifiers.split('\n') if s],
packages=packages,
install_requires=install_requires,
entry_points={
'console_scripts': [
'bwtools = bwtools.cli:cli',
]
}
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33245,
198,
11748,
28686,
198,
11748,
302,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
1... | 2.700441 | 454 |
from .trainer.models import MultiTaskTagger
from .trainer.utils import load_dictionaries,Config
from .trainer.tasks.multitask_tagging import MultiTaskTaggingModule
from fairseq.data.data_utils import collate_tokens
from attacut import tokenize
| [
6738,
764,
2213,
10613,
13,
27530,
1330,
15237,
25714,
51,
7928,
198,
6738,
764,
2213,
10613,
13,
26791,
1330,
3440,
62,
67,
2867,
3166,
11,
16934,
198,
6738,
764,
2213,
10613,
13,
83,
6791,
13,
16680,
270,
2093,
62,
12985,
2667,
1330... | 3.310811 | 74 |
from . import usuario
from . import equipo_cambio
from . import equipo_computo
from . import sucursales
from . import depto
from . import usuario
| [
6738,
764,
1330,
514,
84,
4982,
198,
6738,
764,
1330,
25725,
78,
62,
66,
4131,
952,
198,
6738,
764,
1330,
25725,
78,
62,
785,
1996,
78,
198,
6738,
764,
1330,
6522,
1834,
2040,
198,
6738,
764,
1330,
390,
457,
78,
198,
6738,
764,
13... | 3.041667 | 48 |
from pyglet.window import Window as PygletWindow
from .controllers import ComponentContainerController
from .models.container import ComponentContainerModel
from .views import OrthoViewport
| [
6738,
12972,
70,
1616,
13,
17497,
1330,
26580,
355,
9485,
70,
1616,
27703,
198,
198,
6738,
764,
3642,
36667,
1330,
35100,
29869,
22130,
198,
6738,
764,
27530,
13,
34924,
1330,
35100,
29869,
17633,
198,
6738,
764,
33571,
1330,
47664,
78,
... | 4.465116 | 43 |
from pyconductor import load_test_values, calculate_conductance
if __name__ == "__main__":
conductance_calc()
| [
6738,
12972,
17561,
33029,
1330,
3440,
62,
9288,
62,
27160,
11,
15284,
62,
36495,
590,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3189,
590,
62,
9948,
66,
3419,
198
] | 3.052632 | 38 |
from .Fragment import Fragment
from utils.flags import *
from utils.CustomLogging import CustomLogging
#from python_compiler.engines.utils.types import get_python_type_str, ANY
DEFAULT_ASSIGN_OPERATOR = "="
ASSIGN_OPERATORS = {
"=":"=",
"+=":"+=",
"-=":"-=",
"*=":"*=",
"/=":"/=",
"//=":"//=",
"%=":"%=",
"**=":"**=",
"&=":"&=",
"|=":"|=",
"^=":"^=",
">>=":">>=",
"<<=":"<<=",
}
| [
6738,
764,
42974,
434,
1330,
24229,
434,
198,
6738,
3384,
4487,
13,
33152,
1330,
1635,
198,
6738,
3384,
4487,
13,
15022,
11187,
2667,
1330,
8562,
11187,
2667,
198,
2,
6738,
21015,
62,
5589,
5329,
13,
1516,
1127,
13,
26791,
13,
19199,
... | 2.052133 | 211 |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
if not os.path.exists(sys.argv[1]):
raise Exception()
open(sys.argv[2], 'w').close()
| [
2,
15069,
357,
66,
8,
2321,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
1174... | 3.022727 | 88 |
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtAbstractScaleDraw
--------------------
.. autoclass:: QwtAbstractScaleDraw
:members:
QwtScaleDraw
------------
.. autoclass:: QwtScaleDraw
:members:
"""
from qwt.scale_div import QwtScaleDiv
from qwt.scale_map import QwtScaleMap
from qwt.text import QwtText
from qwt._math import qwtRadians
from qtpy.QtGui import QPalette, QFontMetrics, QTransform
from qtpy.QtCore import Qt, qFuzzyCompare, QLocale, QRectF, QPointF, QRect, QPoint
from math import ceil
import numpy as np
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
49962,
739,
262,
2846,
286,
262,
1195,
46569,
13789,
198,
2,
15069,
357,
66,
8,
6244,
471,
732,
26494,
9038,
11,
329,
262,
2656,
327,
4880,
2438,
198,
2,
... | 2.960317 | 252 |
from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import re
cfgs = [
FuzzConfig(job="SYSCONFIG40", device="LIFCL-40", sv="../shared/empty_40.v",
tiles=["CIB_R0C75:EFB_0", "CIB_R0C72:BANKREF0", "CIB_R0C77:EFB_1_OSC", "CIB_R0C79:EFB_2",
"CIB_R0C81:I2C_EFB_3", "CIB_R0C85:PMU", "CIB_R0C87:MIB_CNR_32_FAFD", "CIB_R1C87:IREF_P33", "CIB_R2C87:POR"]),
FuzzConfig(job="SYSCONFIG17", device="LIFCL-17", sv="../shared/empty_17.v",
tiles=["CIB_R1C75:IREF_15K", "CIB_R0C75:PPT_QOUT_15K", "CIB_R0C74:PVTCAL33_15K", "CIB_R0C73:POR_15K",
"CIB_R0C72:I2C_15K", "CIB_R0C71:OSC_15K", "CIB_R0C70:PMU_15K", "CIB_R0C66:EFB_15K"])
]
if __name__ == "__main__":
main()
| [
6738,
26080,
11250,
1330,
376,
4715,
16934,
198,
11748,
1729,
81,
13660,
198,
11748,
26080,
5439,
2840,
198,
11748,
302,
198,
198,
12993,
14542,
796,
685,
198,
220,
220,
220,
376,
4715,
16934,
7,
21858,
2625,
23060,
6173,
1340,
16254,
1... | 1.706024 | 415 |
import argparse
import collections
import os
import random
import json
from copy import deepcopy
import ConfigSpace
import numpy as np
# from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark,\
# FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark
from tabular_benchmarks import NASCifar10A, NASCifar10B | [
11748,
1822,
29572,
198,
11748,
17268,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
33918,
198,
6738,
4866,
1330,
2769,
30073,
198,
198,
11748,
17056,
14106,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
422,
7400,
934,
62,
26968,
... | 3.439252 | 107 |
from uuid import UUID
import os
import pytest
from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64
def test_base64_converter_to_url_raises_validation_error():
with pytest.raises(Exception):
uuid_to_base64(object())
| [
6738,
334,
27112,
1330,
471,
27586,
198,
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
6738,
19605,
62,
26791,
13,
8692,
2414,
62,
12303,
312,
1330,
2779,
2414,
62,
1462,
62,
12303,
312,
11,
334,
27112,
62,
1462,
62,
8692,
2414,
11... | 2.566372 | 113 |
"""The devolo_home_control integration."""
from __future__ import annotations
import asyncio
from functools import partial
from types import MappingProxyType
from typing import Any
from devolo_home_control_api.exceptions.gateway import GatewayOfflineError
from devolo_home_control_api.homecontrol import HomeControl
from devolo_home_control_api.mydevolo import Mydevolo
from homeassistant.components import zeroconf
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from .const import (
CONF_MYDEVOLO,
DEFAULT_MYDEVOLO,
DOMAIN,
GATEWAY_SERIAL_PATTERN,
PLATFORMS,
)
def configure_mydevolo(conf: dict[str, Any] | MappingProxyType[str, Any]) -> Mydevolo:
"""Configure mydevolo."""
mydevolo = Mydevolo()
mydevolo.user = conf[CONF_USERNAME]
mydevolo.password = conf[CONF_PASSWORD]
mydevolo.url = conf.get(CONF_MYDEVOLO, DEFAULT_MYDEVOLO)
return mydevolo
| [
37811,
464,
1614,
14057,
62,
11195,
62,
13716,
11812,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
3858,
1330,
337,
5912,
44148,
6030,
198,
6738,
... | 2.932115 | 383 |
## -------------------------------------------------------- ##
# Trab 1 IA 2019-2
#
# Rafael Belmock Pedruzzi
#
# hillClimbing.py: implements the hill climbing metaheuristic for the bag problem
#
# Python version: 3.7.4
## -------------------------------------------------------- ##
import bagProblem as bp
from time import time
# Returns True and the valid state with the biggest value, or False if no state is valid:
# Hill Climbing:
# T = 19 # bag size
# OBJs = [(1,3), (4,6), (5,7)] # object list (v,t)
# print(hill_Climbing(T,OBJs))
| [
2235,
20368,
22369,
22492,
198,
2,
220,
220,
833,
397,
352,
35229,
13130,
12,
17,
198,
2,
198,
2,
220,
220,
31918,
3944,
76,
735,
13457,
622,
46218,
198,
2,
198,
2,
220,
220,
12788,
34,
2475,
4623,
13,
9078,
25,
23986,
262,
12788,... | 3.313253 | 166 |
"""ssb-pseudonymization - Data pseudonymization functions used by SSB"""
__version__ = '0.0.2'
__author__ = 'Statistics Norway (ssb.no)'
__all__ = []
| [
37811,
824,
65,
12,
7752,
463,
5177,
1634,
532,
6060,
48129,
1634,
5499,
973,
416,
6723,
33,
37811,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
15,
13,
17,
6,
198,
834,
9800,
834,
796,
705,
48346,
15238,
357,
824,
65,
13,
3919,
... | 2.903846 | 52 |
"""
Keepkey
*******
"""
from ..errors import (
DEVICE_NOT_INITIALIZED,
DeviceNotReadyError,
common_err_msgs,
handle_errors,
)
from .trezorlib import protobuf as p
from .trezorlib.transport import (
hid,
udp,
webusb,
)
from .trezor import TrezorClient, HID_IDS, WEBUSB_IDS
from .trezorlib.messages import (
DebugLinkState,
Features,
HDNodeType,
ResetDevice,
)
from typing import (
Any,
Dict,
List,
Optional,
)
py_enumerate = enumerate # Need to use the enumerate built-in but there's another function already named that
KEEPKEY_HID_IDS = {(0x2B24, 0x0001)}
KEEPKEY_WEBUSB_IDS = {(0x2B24, 0x0002)}
KEEPKEY_SIMULATOR_PATH = '127.0.0.1:11044'
HID_IDS.update(KEEPKEY_HID_IDS)
WEBUSB_IDS.update(KEEPKEY_WEBUSB_IDS)
def enumerate(password: str = "") -> List[Dict[str, Any]]:
results = []
devs = hid.HidTransport.enumerate(usb_ids=KEEPKEY_HID_IDS)
devs.extend(webusb.WebUsbTransport.enumerate(usb_ids=KEEPKEY_WEBUSB_IDS))
devs.extend(udp.UdpTransport.enumerate(KEEPKEY_SIMULATOR_PATH))
for dev in devs:
d_data: Dict[str, Any] = {}
d_data['type'] = 'keepkey'
d_data['model'] = 'keepkey'
d_data['path'] = dev.get_path()
client = None
with handle_errors(common_err_msgs["enumerate"], d_data):
client = KeepkeyClient(d_data['path'], password)
try:
client.client.refresh_features()
except TypeError:
continue
if 'keepkey' not in client.client.features.vendor:
continue
d_data['label'] = client.client.features.label
if d_data['path'].startswith('udp:'):
d_data['model'] += '_simulator'
d_data['needs_pin_sent'] = client.client.features.pin_protection and not client.client.features.unlocked
d_data['needs_passphrase_sent'] = client.client.features.passphrase_protection # always need the passphrase sent for Keepkey if it has passphrase protection enabled
if d_data['needs_pin_sent']:
raise DeviceNotReadyError('Keepkey is locked. Unlock by using \'promptpin\' and then \'sendpin\'.')
if d_data['needs_passphrase_sent'] and not password:
raise DeviceNotReadyError("Passphrase needs to be specified before the fingerprint information can be retrieved")
if client.client.features.initialized:
d_data['fingerprint'] = client.get_master_fingerprint().hex()
d_data['needs_passphrase_sent'] = False # Passphrase is always needed for the above to have worked, so it's already sent
else:
d_data['error'] = 'Not initialized'
d_data['code'] = DEVICE_NOT_INITIALIZED
if client:
client.close()
results.append(d_data)
return results
| [
37811,
198,
15597,
2539,
198,
2466,
8162,
198,
37811,
198,
198,
6738,
11485,
48277,
1330,
357,
198,
220,
220,
220,
5550,
27389,
62,
11929,
62,
1268,
2043,
12576,
14887,
1961,
11,
198,
220,
220,
220,
16232,
3673,
35474,
12331,
11,
198,
... | 2.265882 | 1,275 |
# -*- coding: utf-8 -*-
"""K-medoids clustering"""
# Authors: Timo Erkkil <timo.erkkila@gmail.com>
# Antti Lehmussola <antti.lehmussola@gmail.com>
# Kornel Kieczewski <kornel.mail@gmail.com>
# Zane Dufour <zane.dufour@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import (
pairwise_distances,
pairwise_distances_argmin,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import ConvergenceWarning
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
42,
12,
1150,
10994,
32966,
1586,
37811,
198,
198,
2,
46665,
25,
5045,
78,
5256,
28747,
346,
1279,
16514,
78,
13,
9587,
74,
10102,
31,
14816,
13,
785,
29,
198,
... | 2.626415 | 265 |
from logic import *
| [
6738,
9156,
1330,
1635,
628,
198
] | 3.666667 | 6 |
problem_type = "segmentation"
dataset_name = "synthia_rand_cityscapes"
dataset_name2 = None
perc_mb2 = None
model_name = "resnetFCN"
freeze_layers_from = None
show_model = False
load_imageNet = True
load_pretrained = False
weights_file = "weights.hdf5"
train_model = True
test_model = True
pred_model = False
debug = True
debug_images_train = 50
debug_images_valid = 50
debug_images_test = 50
debug_n_epochs = 2
batch_size_train = 2
batch_size_valid = 2
batch_size_test = 2
crop_size_train = (512, 512)
crop_size_valid = None
crop_size_test = None
resize_train = None
resize_valid = None
resize_test = None
shuffle_train = True
shuffle_valid = False
shuffle_test = False
seed_train = 1924
seed_valid = 1924
seed_test = 1924
optimizer = "rmsprop"
learning_rate = 0.0001
weight_decay = 0.0
n_epochs = 1000
save_results_enabled = True
save_results_nsamples = 5
save_results_batch_size = 5
save_results_n_legend_rows = 1
earlyStopping_enabled = True
earlyStopping_monitor = "val_jaccard"
earlyStopping_mode = "max"
earlyStopping_patience = 50
earlyStopping_verbose = 0
checkpoint_enabled = True
checkpoint_monitor = "val_jaccard"
checkpoint_mode = "max"
checkpoint_save_best_only = True
checkpoint_save_weights_only = True
checkpoint_verbose = 0
plotHist_enabled = True
plotHist_verbose = 0
LRScheduler_enabled = True
LRScheduler_batch_epoch = "batch"
LRScheduler_type = "poly"
LRScheduler_M = 75000
LRScheduler_decay = 0.1
LRScheduler_S = 10000
LRScheduler_power = 0.9
TensorBoard_enabled = True
TensorBoard_histogram_freq = 1
TensorBoard_write_graph = True
TensorBoard_write_images = False
TensorBoard_logs_folder = None
norm_imageNet_preprocess = True
norm_fit_dataset = False
norm_rescale = 1
norm_featurewise_center = False
norm_featurewise_std_normalization = False
norm_samplewise_center = False
norm_samplewise_std_normalization = False
norm_gcn = False
norm_zca_whitening = False
cb_weights_method = None
da_rotation_range = 0
da_width_shift_range = 0.0
da_height_shift_range = 0.0
da_shear_range = 0.0
da_zoom_range = 0.5
da_channel_shift_range = 0.0
da_fill_mode = "constant"
da_cval = 0.0
da_horizontal_flip = True
da_vertical_flip = False
da_spline_warp = False
da_warp_sigma = 10
da_warp_grid_size = 3
da_save_to_dir = False | [
45573,
62,
4906,
796,
366,
325,
5154,
341,
1,
198,
19608,
292,
316,
62,
3672,
796,
366,
28869,
31079,
62,
25192,
62,
19205,
1416,
7916,
1,
198,
19608,
292,
316,
62,
3672,
17,
796,
6045,
198,
525,
66,
62,
2022,
17,
796,
6045,
198,
... | 2.685096 | 832 |
from bs4 import BeautifulSoup
from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer
from optimizers.CSSOptimizer import CSSOptimizer | [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
6738,
6436,
11341,
13,
28809,
41,
15821,
457,
320,
7509,
1330,
13435,
41,
15821,
457,
320,
7509,
198,
6738,
6436,
11341,
13,
49155,
27871,
320,
7509,
1330,
17391,
27871,
320,
7509
] | 3.463415 | 41 |
'''Provide interface for game.'''
from typing import Any, Dict, List, Optional, Union
import flask
from flask import Blueprint, url_for
from flask_login import current_user, login_required
from flask_wtf import FlaskForm
from flask_sse import sse
from werkzeug.wrappers import Response
from wtforms import IntegerField, SubmitField
from wtforms.validators import DataRequired, NumberRange
# from spades import exceptions
from spades.game import GameState
from spades.game.models.player import Player
main = Blueprint('main', __name__)
mock_names: List[str] = ['john']
__game: GameState = GameState()
def get_player() -> Optional[Player]:
player = __game.get_player_by_username(current_user.username)
if not player:
__game.add_player(Player(current_user.username))
player = __game.get_player_by_username(current_user.username)
return player
def get_turns(players: List[Player]) -> List[Dict[str, Any]]:
player_turns: List[Dict[str, Any]] = []
for n, player in enumerate(players):
inst = {
'username': player.username,
'active': is_active(n)
}
if player.username == current_user.username:
inst['hand'] = player.hand.to_json # type: ignore
else:
inst['card_count'] = len(player.hand) # type: ignore
player_turns.append(inst)
print('player turns', player_turns)
return player_turns
| [
7061,
6,
15946,
485,
7071,
329,
983,
2637,
7061,
198,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
4479,
198,
198,
11748,
42903,
198,
6738,
42903,
1330,
39932,
11,
19016,
62,
1640,
198,
6738,
42903,
62,
38235,
... | 2.748077 | 520 |
{
"targets": [
{
"target_name": "cclust",
"sources": [ "./src/heatmap_clustering_js_module.cpp" ],
'dependencies': ['bonsaiclust']
},
{
'target_name': 'bonsaiclust',
'type': 'static_library',
'sources': [ 'src/cluster.c' ],
'cflags': ['-fPIC', '-I', '-pedantic', '-Wall']
}
]
}
| [
90,
198,
220,
366,
83,
853,
1039,
1298,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
366,
16793,
62,
3672,
1298,
366,
535,
38878,
1600,
198,
220,
220,
220,
220,
220,
366,
82,
2203,
1298,
685,
366,
19571,
10677,
14,... | 1.971098 | 173 |
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .actuator import Actuator
from .axis import Axis
from .box import Box
from .child import Child
from .collision import Collision
from .color import Color
from .cylinder import Cylinder
from .dynamics import Dynamics
from .gazebo import Gazebo
from .geometry import Geometry
from .hardware_interface import HardwareInterface
from .inertia import Inertia
from .inertial import Inertial
from .joint import Joint
from .limit import Limit
from .link import Link
from .mass import Mass
from .material import Material
from .mechanical_reduction import MechanicalReduction
from .mesh import Mesh
from .mimic import Mimic
from .origin import Origin
from .parent import Parent
from .robot import Robot
from .safety_controller import SafetyController
from .sphere import Sphere
from .texture import Texture
from .transmission import Transmission
from .type import Type
from .visual import Visual
def get_all_urdf_element_classes():
"""Get list of all URDF element classes."""
import sys
import inspect
from ..types import XMLBase
output = list()
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase) and obj._TYPE == 'urdf':
output.append(obj)
return output
def create_urdf_element(tag, *args):
"""URDF element factory.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
* `args`: Extra arguments for URDF element constructor.
> *Returns*
URDF element if `tag` refers to a valid URDF element.
`None`, otherwise.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj(*args)
return None
def create_urdf_type(tag):
"""Return handle of the URDF element type.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
> *Returns*
URDF element type if `tag` is valid, `None` otherwise`.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj
return None
def is_urdf_element(obj):
"""Test if XML element is an URDF element."""
from ..types import XMLBase
return obj.__class__ in XMLBase.__subclasses__() and \
obj._TYPE == 'urdf'
__all__ = [
'get_all_urdf_element_classes',
'create_urdf_element',
'create_urdf_type',
'is_urdf_element',
'Actuator',
'Axis',
'Box',
'Child',
'Collision',
'Color',
'Cylinder',
'Dynamics',
'Gazebo',
'Geometry',
'HardwareInterface',
'Inertia',
'Inertial',
'Joint',
'Limit',
'Link',
'Mass',
'Material',
'MechanicalReduction',
'Mesh',
'Mimic',
'Origin',
'Parent',
'Robot',
'SafetyController',
'Sphere',
'Texture',
'Transmission',
'Type',
'Visual'
]
| [
2,
15069,
357,
66,
8,
13130,
532,
383,
23652,
1523,
16588,
329,
21347,
1765,
78,
7035,
198,
2,
1114,
1321,
319,
262,
11756,
6634,
4870,
766,
262,
28536,
2393,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
... | 2.775412 | 1,456 |
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
| [
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
17077,
1330,
5313,
32103,
21321,
198,
6738,
384,
11925,
1505,
13,
11321,
13,
1069,
11755,
1330,
1400,
16678,
20180,
16922,
198
] | 3.870968 | 31 |
"""Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
topos = { 'vpls': ( lambda: MyTopo() ) }
| [
37811,
15022,
1353,
1435,
1672,
198,
198,
7571,
3264,
5884,
18225,
5556,
257,
2583,
329,
1123,
5078,
25,
628,
220,
220,
2583,
11420,
5078,
11420,
5078,
11420,
2583,
198,
198,
32901,
262,
705,
4852,
418,
6,
8633,
351,
257,
1994,
14,
83... | 3.367925 | 106 |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses `FeatherDispatcher` class, that is used for reading `.feather` files."""
from modin.engines.base.io.column_stores.column_store_dispatcher import (
ColumnStoreDispatcher,
)
| [
2,
49962,
284,
3401,
259,
7712,
4816,
739,
530,
393,
517,
18920,
5964,
11704,
13,
198,
2,
4091,
262,
28536,
2393,
9387,
351,
428,
670,
329,
3224,
1321,
5115,
198,
2,
6634,
9238,
13,
220,
383,
3401,
259,
7712,
4816,
16625,
428,
2393,... | 3.959514 | 247 |
import pytest
from npRNN.tree_utils import Node, NodeTree
| [
11748,
12972,
9288,
198,
198,
6738,
45941,
49,
6144,
13,
21048,
62,
26791,
1330,
19081,
11,
19081,
27660,
198
] | 3.105263 | 19 |
"""A script is a series of operations."""
import json
import os
from .ops import create
def load_script(f):
"""Load and parse the script given.
Args:
f (:class:`file` or :class:`str`): Open file object or filename.
Returns:
:class:`Script`: The parsed script object.
"""
if isinstance(f, (str, os.PathLike)):
f = open(f)
with f:
return parse(f.read())
parse = Script
| [
37811,
32,
4226,
318,
257,
2168,
286,
4560,
526,
15931,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
764,
2840,
1330,
2251,
628,
198,
198,
4299,
3440,
62,
12048,
7,
69,
2599,
198,
220,
220,
220,
37227,
8912,
290,
21136,
... | 2.48 | 175 |
from plasTeX import Command, Environment, sourceChildren
from plasTeX.Base.LaTeX import Math
from plasTeX.Base.TeX.Primitives import BoxCommand
# mhchem package - mostly handled by mathjax
# Overrive boxcommands inside MathJaX to avoid extra <script type="math/tex">
| [
6738,
458,
292,
49568,
1330,
9455,
11,
9344,
11,
2723,
26829,
198,
6738,
458,
292,
49568,
13,
14881,
13,
14772,
49568,
1330,
16320,
198,
6738,
458,
292,
49568,
13,
14881,
13,
49568,
13,
23828,
20288,
1330,
8315,
21575,
198,
2,
285,
71... | 3.573333 | 75 |
"""
Totally untested file. Will be removed in subsequent commits
"""
import tensorflow as tf
import matplotlib.image as mpimg
import numpy as np
from math import ceil, floor
import os
IMAGE_SIZE = 720
from math import ceil, floor
# Produce each image at scaling of 90%, 75% and 60% of original image.
X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/")
scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60])
translated_imgs = translate_images(X_imgs)
rotated_imgs = rotate_images(X_imgs)
flipped_images = flip_images(X_imgs)
| [
37811,
198,
51,
38908,
1418,
7287,
2393,
13,
2561,
307,
4615,
287,
8840,
23463,
198,
37811,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2603,
29487,
8019,
13,
9060,
355,
29034,
9600,
198,
11748,
299,
32152,
355,
45941,
198,
6... | 2.76 | 200 |
#!/usr/bin/env python3
# encoding: utf-8
#
# (C) 2012-2016 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Link To The Past - a backup tool
Hash functions and commands.
"""
import hashlib
import zlib
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SUPPORTED_HASHES = {
'NONE': NoHash,
'CRC32': CRC32,
'MD5': hashlib.md5,
'SHA-256': hashlib.sha256,
'SHA-512': hashlib.sha512,
}
def get_factory(name):
"""\
Get an object for calculating a hash.
>>> f = get_factory('SHA-256')
>>> h = f()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e'
"""
if name is None:
name = 'NONE'
return SUPPORTED_HASHES[name.upper()]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
357,
34,
8,
2321,
12,
5304,
5180,
12060,
354,
20259,
1279,
565,
494,
354,
20259,
31,
70,
36802,
13,
3262,
29,
198,
2,
198,
2... | 2.094421 | 466 |
#!/usr/bin/env python3.8
table="".maketrans("0123456789","\N{Devanagari digit zero}\N{Devanagari digit one}"
"\N{Devanagari digit two}\N{Devanagari digit three}"
"\N{Devanagari digit four}\N{Devanagari digit five}"
"\N{Devanagari digit six}\N{Devanagari digit seven}"
"\N{Devanagari digit eight}\N{Devanagari digit nine}")
print("0123456789".translate(table)) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
23,
198,
198,
11487,
2625,
1911,
76,
461,
21879,
504,
7203,
486,
1954,
2231,
3134,
4531,
2430,
59,
45,
90,
13603,
272,
363,
2743,
16839,
6632,
32239,
45,
90,
13603,
272,
363,
2743... | 2.482759 | 145 |
import logging
import warnings
import pyopencl
import pyopencl.array
logger = logging.getLogger(__name__)
gpu_initialized = False
gpu_ctx = None
gpu_queue = None
def avoid_apple_cpu(ctx):
"""
The Apple CPU OpenCL implementation is awful. Instead, we should just use
PoCL.
"""
if ctx.devices[0].platform.name == "Apple" and "CPU" in ctx.devices[0].name:
platforms = pyopencl.get_platforms()
platform_idx = None
for i, p in enumerate(platforms):
if p.name != "Apple":
platform_idx = i
else:
apple_platform_idx = i
if platform_idx is not None:
warnings.warn(
"The OpenCL context created used the Apple CPU"
" implementation which is not supported. Trying again"
f" with a different platform: {p.name}"
)
return pyopencl.create_some_context(answers=[str(platform_idx)])
# If no other platforms were found, let's try to
# find a non-CPU device like an Iris Pro.
platform_idx = apple_platform_idx
device_idx = None
for i, d in enumerate(platforms[platform_idx].get_devices()):
if "CPU" in d.name:
continue
device_idx = i
break
if device_idx is not None:
warnings.warn(
"The OpenCL context created used the Apple CPU"
" implementation which is not supported. Trying again"
f" with a different device: {d.name}"
)
return pyopencl.create_some_context(
answers=[str(platform_idx), str(device_idx)]
)
raise NotImplementedError(
"cutde does not support the Apple CPU OpenCL implementation and no other"
" platform or device was found. Please consult the cutde README"
)
return ctx
cluda_preamble = """
// taken from pyopencl._cluda
#define LOCAL_BARRIER barrier(CLK_LOCAL_MEM_FENCE)
// 'static' helps to avoid the "no previous prototype for function" warning
#if __OPENCL_VERSION__ >= 120
#define WITHIN_KERNEL static
#else
#define WITHIN_KERNEL
#endif
#define KERNEL __kernel
#define GLOBAL_MEM __global
#define LOCAL_MEM __local
#define LOCAL_MEM_DYNAMIC __local
#define LOCAL_MEM_ARG __local
#define CONSTANT __constant
// INLINE is already defined in Beignet driver
#ifndef INLINE
#define INLINE inline
#endif
#define SIZE_T size_t
#define VSIZE_T size_t
// used to align fields in structures
#define ALIGN(bytes) __attribute__ ((aligned(bytes)))
#if defined(cl_khr_fp64)
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#elif defined(cl_amd_fp64)
#pragma OPENCL EXTENSION cl_amd_fp64: enable
#endif
"""
| [
11748,
18931,
198,
11748,
14601,
198,
198,
11748,
12972,
9654,
565,
198,
11748,
12972,
9654,
565,
13,
18747,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
46999,
62,
17532,
796,
10352,
198,
4... | 2.323232 | 1,188 |
import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6468,
305,
6144,
13,
2543,
455,
1330,
28400,
62,
82,
2122,
11,
24543,
62,
18487,
13814,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,... | 2.849057 | 53 |
from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import JSONParser
import numpy as np
import json
import os
from .utils.spectrogram_utils import SpectrogramUtils
from .utils.feature_extraction_utils import FeatureExtractionUtils
from .utils.classification_utils import ClassificationUtils
from .utils.file_utils import FileUtils
from .utils.dir_utils import DirUtils
from .constants.headers import headers_data, headers_clusters, headers_clusters_no_display
file_utils = FileUtils()
dir_utils = DirUtils()
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
40391,
62,
1177,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
30751,
62,
37724,
198,
6738,
1334,
62,
30604,
13,
... | 3.609195 | 174 |
# Modules
import os
import csv
#Set up path for file
csvpath=os.path.join("..", "Resources", "election_data.csv" )
#print(csvpath)
total_votes=0
#total_profit=0
#previous_value=0
#current_value=0
#list_changes=[]
print("Election Results")
print("---------------------")
#Open the csv file
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
#print(csvreader)
#Read the header row
csv_header=next(csvreader)
#print(f"CSV Header: {csv_header}")
#Read each row of data after the header
for row in csvreader:
total_votes=total_votes+1
current_value=int(row[0])
#total_profit=total_profit+1
#current_value=int(row[1])
#monthly_diff=current_value-previous_value
#list_changes.append(monthly_diff)
#list_changes.remove("867884")
#previous_value=current_value
#avg_monthly_diff=sum[list_changes]
# Calculate the average of the changes in Profit/Lossess over the entire period
# Determine the greateest increase in profits (date and amount) over the entire period
# Determine the greaterst decrease in losses (datea and amount) ove the entire period
print("Total Votes: " + str(total_votes))
print("---------------------")
#print("Total: $"+str(total_profit))
print("---------------------")
#print("Average Change: $" +str(total_profit))
print("---------------------")
#print(row)
| [
2,
3401,
5028,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
198,
2,
7248,
510,
3108,
329,
2393,
198,
40664,
6978,
28,
418,
13,
6978,
13,
22179,
7203,
492,
1600,
366,
33236,
1600,
366,
14300,
62,
7890,
13,
40664,
1,
1267,
198,
2,
... | 2.707457 | 523 |
"""
Extension to the logging package to support buildlogger.
"""
# Alias the built-in logging.Logger class for type checking arguments. Those interested in
# constructing a new Logger instance should use the loggers.new_logger() function instead.
from logging import Logger
from . import config
from . import buildlogger
from . import flush
from . import loggers
| [
37811,
198,
11627,
3004,
284,
262,
18931,
5301,
284,
1104,
1382,
6404,
1362,
13,
198,
37811,
628,
198,
198,
2,
978,
4448,
262,
3170,
12,
259,
18931,
13,
11187,
1362,
1398,
329,
2099,
10627,
7159,
13,
5845,
4609,
287,
198,
2,
30580,
... | 3.98913 | 92 |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
13130,
1766,
956,
414,
3457,
13,
628,
628
] | 2.416667 | 24 |
from utils.utils import *
lines = get_input(__file__)
lines_as_nums = lines_to_nums(lines)
print("part1:", part1(lines_as_nums))
print("part2:", part2())
| [
6738,
3384,
4487,
13,
26791,
1330,
1635,
198,
198,
6615,
796,
651,
62,
15414,
7,
834,
7753,
834,
8,
198,
6615,
62,
292,
62,
77,
5700,
796,
3951,
62,
1462,
62,
77,
5700,
7,
6615,
8,
628,
628,
198,
4798,
7203,
3911,
16,
25,
1600,
... | 2.409091 | 66 |
import datetime
import os
import sys
from google.cloud import firestore
from peewee import *
sys.path.append(os.getcwd())
home_dir = os.getenv('HOME')
db_file_path = os.getcwd() + '/../../data/news_rider.db'
print("Reading database from {}".format(db_file_path))
old_db = SqliteDatabase(db_file_path)
db = firestore.Client()
posts_ref = db.collection('posts')
if __name__ == '__main__':
for news_item in NewsItem.select():
if not exists_in_database(news_item.NewsUrl):
save_data(news_item.NewsUrl, news_item.NewsTitle, news_item.TimeStamp)
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
23645,
13,
17721,
1330,
2046,
8095,
198,
6738,
613,
413,
1453,
1330,
1635,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
1136,
66,
16993,
28955,
198,
198,
11... | 2.613636 | 220 |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from tensorforce.core.baselines import NetworkBaseline
| [
2,
15069,
2177,
19594,
13,
952,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
137... | 4.23 | 200 |
# coding=utf-8
# Filename: h5tree.py
"""
Print the ROOT file structure.
Usage:
rtree FILE
rtree (-h | --help)
rtree --version
Options:
FILE Input file.
-h --help Show this screen.
"""
from __future__ import division, absolute_import, print_function
from km3pipe.io.root import open_rfile
__author__ = "Moritz Lotze"
__copyright__ = "Copyright 2016, Moritz Lotze and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Moritz Lotze"
__email__ = "mlotze@km3net.de"
__status__ = "Development"
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
7066,
12453,
25,
289,
20,
21048,
13,
9078,
198,
37811,
198,
18557,
262,
15107,
2394,
2393,
4645,
13,
198,
198,
28350,
25,
198,
220,
220,
220,
374,
21048,
45811,
198,
220,
220,
220,
374,
21048,
... | 2.658654 | 208 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from psychopy.visual import Window, TextStim
from psychopy.core import wait, Clock, quit
from psychopy.event import clearEvents, waitKeys, Mouse
from psychopy.gui import Dlg
from time import gmtime, strftime
from codecs import open
from random import shuffle, choice, randint
from copy import deepcopy
from psychopy.iohub import launchHubServer
from numpy import mean, std
from datetime import datetime
from itertools import permutations
import random
## for testing
testing = False # True for testing, False for real recording
###
main_ddline = 1 # sec
isi_set = (500, 800, 1100)
instruction_color = '#111111' #formerly = #9999FF
############ MAIN ITEMS - paste from JS
probe_crime_list_1 = ' Ausgeben als : Tim Koch\n\n Nachricht an Deckname : Blaue Jacke\n\n Aktion : Operation Kuh\n\n Objekt : Regen Akte\n\n Inhalt des Objektes : Helikopter Plne\n\n Adresse : Hai Strae'
probe_crime_list_2 = ' Ausgeben als : Paul Nowak\n\n Nachricht an Deckname : Weies Shirt\n\n Aktion : Operation Fichte\n\n Objekt : Eulen Akte\n\n Inhalt des Objektes : Messing Plne\n\n Adresse : Lwen Strae'
crime_list_1 = ["Tim Koch", "Blaue Jacke", "Operation Kuh", "Regen Akte", "Helikopter Plne", "Hai Strae"]
crime_list_2 = ["Paul Nowak", "Weies Shirt","Operation Fichte","Eulen Akte","Messing Plne","Lwen Strae"]
dummy_list_numbers = [0, 1, 2, 3, 4, 5]
training_recall_item = {0 : 'Ausgeben als', 1 : 'Nachricht an Deckname', 2 : 'Aktion', 3 : 'Objekt', 4 : 'Inhalt des Objektes', 5 : 'Adresse'}
rounds = 1
if testing:
escape_key = 'escape'
instr_wait = 0.1
else:
escape_key = 'notallowed'
instr_wait = 0.5
# EXECUTE all main functions here
# from https://github.com/luosch/similar_text
def similar_str(str1, str2):
"""
return the len of longest string both in str1 and str2
and the positions in str1 and str2
"""
max_len = tmp = pos1 = pos2 = 0
len1, len2 = len(str1), len(str2)
for p in range(len1):
for q in range(len2):
tmp = 0
while p + tmp < len1 and q + tmp < len2 \
and str1[p + tmp] == str2[q + tmp]:
tmp += 1
if tmp > max_len:
max_len, pos1, pos2 = tmp, p, q
return max_len, pos1, pos2
def similar_char(str1, str2):
"""
return the total length of longest string both in str1 and str2
"""
max_len, pos1, pos2 = similar_str(str1, str2)
total = max_len
if max_len != 0:
if pos1 and pos2:
total += similar_char(str1[:pos1], str2[:pos2])
if pos1 + max_len < len(str1) and pos2 + max_len < len(str2):
total += similar_char(str1[pos1 + max_len:], str2[pos2 + max_len:]);
return total
def similar_text(str1, str2):
"""
return a int value in [0, 100], which stands for match level
"""
if not (isinstance(str1, str) or isinstance(str1, unicode)):
raise TypeError("must be str or unicode")
elif not (isinstance(str2, str) or isinstance(str2, unicode)):
raise TypeError("must be str or unicode")
elif len(str1) == 0 and len(str2) == 0:
return 0.0
else:
return int(similar_char(str1, str2) * 200.0 / (len(str1) + len(str2)))
# EXECUTE
execute()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
19... | 2.443478 | 1,380 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy_import
short_description: Manage BIG-IP ASM policy imports
description:
- Manage BIG-IP ASM policies policy imports.
version_added: 2.8
options:
name:
description:
- The ASM policy to create or override.
type: str
required: True
inline:
description:
- When specified the ASM policy is created from a provided string.
- Content needs to be provided in a valid XML format otherwise the operation will fail.
type: str
source:
description:
- Full path to a policy file to be imported into the BIG-IP ASM.
- Policy files exported from newer versions of BIG-IP cannot be imported into older
versions of BIG-IP. The opposite, however, is true; you can import older into
newer.
- The file format can be binary of XML.
type: path
force:
description:
- When set to C(yes) any existing policy with the same name will be overwritten by the new import.
- Works for both inline and file imports, if the policy does not exist this setting is ignored.
default: no
type: bool
partition:
description:
- Device partition to create policy on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Import ASM policy
bigip_asm_policy_import:
name: new_asm_policy
file: /root/asm_policy.xml
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import ASM policy inline
bigip_asm_policy_import:
name: foo-policy4
inline: <xml>content</xml>
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Override existing ASM policy
bigip_asm_policy:
name: new_asm_policy
file: /root/asm_policy_new.xml
force: yes
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
source:
description: Local path to an ASM policy file.
returned: changed
type: str
sample: /root/some_policy.xml
inline:
description: Contents of policy as an inline string
returned: changed
type: str
sample: <xml>foobar contents</xml>
name:
description: Name of the ASM policy to be created/overwritten
returned: changed
type: str
sample: Asm_APP1_Transparent
force:
description: Set when overwriting an existing policy
returned: changed
type: bool
sample: yes
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.icontrol import module_provisioned
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
25,
357,
66,
8,
2864,
11,
376,
20,
27862,
3457,
13,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,... | 2.785714 | 1,680 |
__author__ = 'Riccardo Frigerio'
'''
Oggetto HOST
Attributi:
- mac_address: indirizzo MAC
- port: porta a cui e' collegato
- dpid: switch a cui e' collegato
'''
| [
834,
9800,
834,
796,
705,
49,
291,
9517,
78,
1305,
8254,
952,
6,
198,
198,
7061,
6,
198,
46,
1130,
316,
1462,
367,
10892,
198,
8086,
2455,
72,
25,
198,
12,
8352,
62,
21975,
25,
773,
343,
6457,
78,
20582,
198,
12,
2493,
25,
2493,... | 2.314286 | 70 |
from tweepy import OAuthHandler, Stream, API
from tweepy.streaming import StreamListener
import json
import logging
import pymongo
import config
client = pymongo.MongoClient(host='mongo_container', port=27018)
db = client.tweets_db
auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)
auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
api = API(auth, wait_on_rate_limit=True)
user = api.me()
logging.critical("connection established with user: " + user.name)
# # Function for Twitter authentication
# def authenticate():
# auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)
# auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
# return auth
# Function for streaming tweets
# Driver function
if __name__ == '__main__':
while True:
stream_tweets(5, warning_log)
time.sleep(30) | [
6738,
4184,
538,
88,
1330,
440,
30515,
25060,
11,
13860,
11,
7824,
198,
6738,
4184,
538,
88,
13,
5532,
278,
1330,
13860,
33252,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
279,
4948,
25162,
198,
11748,
4566,
628,
198,
16366,
796,
... | 2.754601 | 326 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from assets.models import Item
from catalog.models import Inventory
CONNECTION_TYPES = (
(1, "Ethernet 1Gb"),
(2, "Ethernet 100Mb"),
(3, "WIFI"),
(4, "Optic Fiber"),
(5, "USB"),
(6, "HDMI"),
(7, "Telephone"),
)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
6798,
13,
27530,
1330,
9097,
198,
6738,
18388,
13,
27530,
1330,
35772,
198,
198,
... | 2.366906 | 139 |
# L1_mpu.py
# Author: Roy Kruemcke (roanoake)
# 30 NOV 2021
# Allows for the interfacing to the MPU9250 using the smbus2 i2c module
# Written for use with Raspberry Pi 4 Model B
import smbus2
import numpy as np
import data
import time
# Initialize Register Data
CONFIG = 0x1A
USER_CTRL = 0x6A
PWR_MGMT_1, PWR_MGMT_2 = 0x6B, 0x6C
GYRO_CONFIG = 0x1B
G_OFFSET = 0x13
GYRO_OUT = 0x43
ACCEL_CONFIG = 0x1C
ACCEL_CONFIG_2 = 0x1D
A_OFFSET = 0x77
ACCEL_OUT = 0x3B
TEMP_OUT = 0x41
# Initialize Scales
MAX_VAL = 2**16
ACCL_SCALE_2G=MAX_VAL/(2*2) # +-2G
ACCL_SCALE_4G=MAX_VAL/(4*2) # +-4G
ACCL_SCALE_8G=MAX_VAL/(8*2) # +-8G
ACCL_SCALE_16G=MAX_VAL/(16*2) # +-16G
GYRO_SCALE_250DG=MAX_VAL/(250*2) # +-250 deg/s
GYRO_SCALE_500DG=MAX_VAL/(500*2) # +-500 deg/s
GYRO_SCALE_1000DG=MAX_VAL/(1000*2) # +-1000 deg/s
GYRO_SCALE_2000DG=MAX_VAL/(2000*2) # +-2000 deg/s
# Open I2C bus
bus=smbus2.SMBus(1)
mpu = 0x68 # Default address for MPU
def getAccelScale():
"""
Reads the current accelerometer scale, and returns the scaling factor.
"""
acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG)
scale = (acnfg & 0x18) >> 3 # Bits 4:3 hold the full scale
# Return the corresponding scale
if scale==0: return ACCL_SCALE_2G
elif scale==1: return ACCL_SCALE_4G
elif scale==2: return ACCL_SCALE_8G
elif scale==3: return ACCL_SCALE_16G
return None # If you make it here, its bad
def setAccelScale(newScale:int):
"""
Sets the accelerometer scale. Returns True if successful, False otherwise.
:param scale: integer 0-3 that corresponds to the scale.
"""
# Check input
if not(0<=newScale<=3):
print(">> ERROR: attempted to set ACCEL_SCALE to an improper value")
return False
# First, read the current scale
acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG
acnfg &= ~0x18 # Clear previous scale
acnfg |= (newScale << 3) # Set new scale
bus.write_byte_data(mpu,ACCEL_CONFIG,acnfg) # Write new data
time.sleep(0.01) # Wait 10ms
# Check for completion
tmp=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG
tmp=(tmp & 0x18) >> 3 # Isolate scale
if tmp==newScale: # Scale was updated
return True
else: # Scale was not updated
print("> Warning: ACCEL_SCALE did not update")
return False
print(readAccelerometer())
| [
2,
406,
16,
62,
3149,
84,
13,
9078,
198,
2,
6434,
25,
9817,
33909,
368,
66,
365,
357,
305,
5733,
539,
8,
198,
2,
1542,
8005,
53,
33448,
198,
2,
40402,
329,
262,
9556,
4092,
284,
262,
4904,
52,
5892,
1120,
1262,
262,
895,
10885,
... | 2.077733 | 1,235 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_paletted_texture'
GL_COLOR_INDEX12_EXT=_C('GL_COLOR_INDEX12_EXT',0x80E6)
GL_COLOR_INDEX16_EXT=_C('GL_COLOR_INDEX16_EXT',0x80E7)
GL_COLOR_INDEX1_EXT=_C('GL_COLOR_INDEX1_EXT',0x80E2)
GL_COLOR_INDEX2_EXT=_C('GL_COLOR_INDEX2_EXT',0x80E3)
GL_COLOR_INDEX4_EXT=_C('GL_COLOR_INDEX4_EXT',0x80E4)
GL_COLOR_INDEX8_EXT=_C('GL_COLOR_INDEX8_EXT',0x80E5)
GL_TEXTURE_INDEX_SIZE_EXT=_C('GL_TEXTURE_INDEX_SIZE_EXT',0x80ED)
| [
7061,
6,
16541,
519,
877,
515,
416,
35555,
62,
8612,
378,
4226,
11,
466,
407,
4370,
0,
7061,
6,
201,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
26515,
201,
198,
2,
6127,
5270,
3544,
428,
201,
198,
6738,
30672,
13,
1831,
13,
... | 2.252199 | 341 |
from rest_framework.parsers import JSONParser, FileUploadParser
from rest_framework.views import APIView
from ..models import City
from ..models import Country
from ..models import University
from ..models import Faculty
from ..models import Program
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.decorators import parser_classes
from django.utils import timezone
try:
from django.utils import simplejson as json
except ImportError:
import json
| [
6738,
1334,
62,
30604,
13,
79,
945,
364,
1330,
19449,
46677,
11,
9220,
41592,
46677,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
198,
6738,
11485,
27530,
1330,
2254,
198,
6738,
11485,
27530,
1330,
12946,
198,
6738... | 4.077465 | 142 |
"""
Write a function with a list of ints as a paramter. /
Return True if any two nums sum to 0. /
>>> add_to_zero([]) /
False /
>>> add_to_zero([1]) /
False /
>>> add_to_zero([1, 2, 3]) /
False /
>>> add_to_zero([1, 2, 3, -2]) /
True /
"""
| [
37811,
198,
16594,
257,
2163,
351,
257,
1351,
286,
493,
82,
355,
257,
5772,
353,
13,
1220,
198,
13615,
6407,
611,
597,
734,
997,
82,
2160,
284,
657,
13,
1220,
198,
198,
33409,
751,
62,
1462,
62,
22570,
26933,
12962,
1220,
198,
25101... | 2.368932 | 103 |
"""
ASGI config for op_trans project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from op_trans.websocket import websocket_application
from op_trans.redis_cli import RedisCli
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'op_trans.settings')
django_application = get_asgi_application()
| [
37811,
198,
1921,
18878,
4566,
329,
1034,
62,
7645,
1628,
13,
198,
198,
1026,
32142,
262,
7054,
18878,
869,
540,
355,
257,
8265,
12,
5715,
7885,
3706,
7559,
31438,
15506,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
198,
... | 2.964072 | 167 |
# -*- coding: utf-8 -*-
"""API routes config for notifai_recruitment project.
REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent
way of wiring view logic to a set of URLs.
For more information on this file, see
https://www.django-rest-framework.org/api-guide/routers/
"""
from rest_framework import routers
from textify.api.views import NoteViewSet
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
17614,
11926,
4566,
329,
407,
361,
1872,
62,
8344,
4872,
434,
1628,
13,
198,
198,
49,
6465,
9355,
6673,
1104,
329,
11353,
10289,
28166,
284,
37770,
11,
290,
3769,... | 3.41844 | 141 |
# File to ingest an equities bundle for zipline
# Import libraries
import pandas as pd
import numpy as np | [
2,
9220,
284,
26151,
281,
1602,
871,
18537,
329,
1976,
24705,
500,
198,
198,
2,
17267,
12782,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941
] | 3.533333 | 30 |
import torch
import torch.nn as nn | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77
] | 3.4 | 10 |
# -*- coding: utf-8 -*-
# This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from math import ceil
from sql import Select, Column
from sql.functions import Function
from sql.aggregate import Count
from werkzeug.utils import cached_property
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
770,
2393,
318,
636,
286,
9993,
1122,
1222,
399,
567,
312,
13,
383,
27975,
38162,
9947,
2393,
379,
262,
1353,
1241,
286,
198,
2,
428,
16099,
4909,
262,
1336,
6634,
... | 3.648936 | 94 |
from flask import Flask, request, send_file, render_template, url_for
import pytube
import logging
import sys
import os
from hello import timed_delete
from threading import Timer
timed_delete()
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app = Flask(__name__)
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
3758,
62,
7753,
11,
8543,
62,
28243,
11,
19016,
62,
1640,
198,
11748,
12972,
29302,
198,
11748,
18931,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
23748,
1330,
28805,
62,
33678,
198,
6738,
47... | 3.325301 | 83 |
#!/usr/bin/env python3
#
# base.py
"""
Base functionality.
"""
#
# Copyright (c) 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Based on cyberpandas
# https://github.com/ContinuumIO/cyberpandas
# Copyright (c) 2018, Anaconda, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# _isstringslice based on awkward-array
# https://github.com/scikit-hep/awkward-array
# Copyright (c) 2018-2019, Jim Pivarski
# Licensed under the BSD 3-Clause License
#
# stdlib
from abc import abstractmethod
from numbers import Real
from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload
# 3rd party
import numpy # type: ignore
from domdf_python_tools.doctools import prettify_docstrings
from pandas.core.arrays import ExtensionArray # type: ignore
from pandas.core.dtypes.base import ExtensionDtype # type: ignore
from pandas.core.dtypes.generic import ABCExtensionArray # type: ignore
from typing_extensions import Literal, Protocol
__all__ = ["NumPyBackedExtensionArrayMixin"]
def setitem(self, indexer, value):
"""
Set the 'value' inplace.
"""
# I think having a separate than __setitem__ is good
# since we have to return here, but __setitem__ doesn't.
self[indexer] = value
return self
def copy(self, deep: bool = False) -> ABCExtensionArray:
"""
Return a copy of the array.
:param deep:
:return:
:rtype:
"""
return type(self)(self.data.copy())
def tolist(self) -> List:
"""
Convert the array to a Python list.
"""
return self.data.tolist()
def argsort(
self,
ascending: bool = True,
kind: Union[Literal["quicksort"], Literal["mergesort"], Literal["heapsort"]] = "quicksort",
*args,
**kwargs,
) -> numpy.ndarray:
r"""
Return the indices that would sort this array.
:param ascending: Whether the indices should result in an ascending
or descending sort.
:param kind: {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
\*args and \*\*kwargs are passed through to :func:`numpy.argsort`.
:return: Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
.. seealso::
:class:`numpy.argsort`: Sorting implementation used internally.
"""
return self.data.argsort()
_A = TypeVar("_A")
def isna(self):
"""
Indicator for whether each element is missing.
"""
if numpy.isnan(self.na_value):
return numpy.isnan(self.data)
else:
return self.data == self.na_value
# From https://github.com/scikit-hep/awkward-array/blob/2bbdb68d7a4fff2eeaed81eb76195e59232e8c13/awkward/array/base.py#L611
def append(self, value) -> None:
"""
Append a value to this BaseArray.
:param value:
"""
self.data = numpy.append(self.data, self._parser(value).data)
class _SupportsIndex(Protocol):
_F = TypeVar("_F", bound="UserFloat")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
220,
2779,
13,
9078,
198,
37811,
198,
14881,
11244,
13,
198,
37811,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
12131,
36401,
7802,
12,
37,
6197,
1279,
3438,
47277,
31,
... | 2.968472 | 1,459 |
from django.contrib import admin
from .models import Agent
# Register your models here.
admin.site.register(Agent) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
15906,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
36772,
8
] | 3.625 | 32 |
#!/usr/bin/env python
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# A library for reading Microsoft's OLE Compound Document format
# Copyright (c) 2014 Dave Hughes <dave@waveform.org.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
native_str = str
str = type('')
import struct as st
# Magic identifier at the start of the file
COMPOUND_MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
FREE_SECTOR = 0xFFFFFFFF # denotes an unallocated (free) sector
END_OF_CHAIN = 0xFFFFFFFE # denotes the end of a stream chain
NORMAL_FAT_SECTOR = 0xFFFFFFFD # denotes a sector used for the regular FAT
MASTER_FAT_SECTOR = 0xFFFFFFFC # denotes a sector used for the master FAT
MAX_NORMAL_SECTOR = 0xFFFFFFFA # the maximum sector in a file
MAX_REG_SID = 0xFFFFFFFA # maximum directory entry ID
NO_STREAM = 0xFFFFFFFF # unallocated directory entry
DIR_INVALID = 0 # unknown/empty(?) storage type
DIR_STORAGE = 1 # element is a storage (dir) object
DIR_STREAM = 2 # element is a stream (file) object
DIR_LOCKBYTES = 3 # element is an ILockBytes object
DIR_PROPERTY = 4 # element is an IPropertyStorage object
DIR_ROOT = 5 # element is the root storage object
FILENAME_ENCODING = 'latin-1'
COMPOUND_HEADER = st.Struct(native_str(''.join((
native_str('<'), # little-endian format
native_str('8s'), # magic string
native_str('16s'), # file UUID (unused)
native_str('H'), # file header major version
native_str('H'), # file header minor version
native_str('H'), # byte order mark
native_str('H'), # sector size (actual size is 2**sector_size)
native_str('H'), # mini sector size (actual size is 2**short_sector_size)
native_str('6s'), # unused
native_str('L'), # directory chain sector count
native_str('L'), # normal-FAT sector count
native_str('L'), # ID of first sector of the normal-FAT
native_str('L'), # transaction signature (unused)
native_str('L'), # minimum size of a normal stream
native_str('L'), # ID of first sector of the mini-FAT
native_str('L'), # mini-FAT sector count
native_str('L'), # ID of first sector of the master-FAT
native_str('L'), # master-FAT sector count
))))
DIR_HEADER = st.Struct(native_str(''.join((
native_str('<'), # little-endian format
native_str('64s'), # NULL-terminated filename in UTF-16 little-endian encoding
native_str('H'), # length of filename in bytes (why?!)
native_str('B'), # dir-entry type
native_str('B'), # red (0) or black (1) entry
native_str('L'), # ID of left-sibling node
native_str('L'), # ID of right-sibling node
native_str('L'), # ID of children's root node
native_str('16s'), # dir-entry UUID (unused)
native_str('L'), # user flags (unused)
native_str('Q'), # creation timestamp
native_str('Q'), # modification timestamp
native_str('L'), # start sector of stream
native_str('L'), # low 32-bits of stream size
native_str('L'), # high 32-bits of stream size
))))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
43907,
25,
900,
2123,
1509,
28,
19,
39747,
28,
19,
2393,
12685,
7656,
28,
40477,
12,
23,
25,
198,
2,
198,
2,
317,
5888,
329,
3555,
5413,
338,
440,
2538,
3082,
633,
16854,
5794,
... | 2.841785 | 1,479 |
"""
view predication for point cloud,
Run valid_one_point_cloud first
"""
import torch
import numpy as np
import sys
import os
import pptk
# ------ Configurations ------
# path to pth file
pth_file = "../tmp/scene0015_00_vh_clean_2.pth.Random.100"
show_gt = False # show groundtruth or not; groudtruth draw first, i.e., on back
# --- end of configurations ---
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf',
'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink',
'bathtub', 'otherfurniture']
# CLASS_COLOR = [
# [138, 43, 226], [0, 128, 128], [0, 255, 0], [0, 0, 255], [255, 255, 0],
# [0, 255, 255], [255, 0, 255], [192, 192, 192], [128, 128, 128], [128, 0, 0],
# [128, 128, 0], [0, 128, 0], [128, 0, 128], [255, 0, 0], [0, 0, 128],
# [34, 139, 34], [64, 224, 208], [0, 0, 0], [75, 0, 130], [205, 133, 63]
# ]
SCANNET_COLOR_MAP = SCANNET_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
VALID_CLASS_IDS = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39
]
CLASS_COLOR = []
for valid_id in VALID_CLASS_IDS:
CLASS_COLOR.append(SCANNET_COLOR_MAP[valid_id])
CLASS_COLOR = np.array(CLASS_COLOR) / 255.0
if __name__ == "__main__":
show_predication_result(pth_file, show_gt)
| [
37811,
198,
220,
220,
220,
1570,
2747,
3299,
329,
966,
6279,
11,
198,
220,
220,
220,
5660,
4938,
62,
505,
62,
4122,
62,
17721,
717,
198,
37811,
198,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,... | 2.18315 | 1,092 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import datetime
from django.conf import settings
from django.db import models
from django.utils import translation
import tower
from babel import Locale, numbers
from jingo import env
from jinja2.filters import do_dictsort
from tower import ugettext as _
import amo
from amo.fields import DecimalCharField
from amo.helpers import absolutify, urlparams
from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja
| [
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
1330,
11059,
198,
198,
11748,
10580,
198,
6738,
9289,
417,
1330,
15181,
1000,
11,
... | 3.317829 | 129 |
from kol.request.GenericRequest import GenericRequest
| [
6738,
479,
349,
13,
25927,
13,
46189,
18453,
1330,
42044,
18453,
198
] | 4.5 | 12 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of pandas-learn
# https://github.com/RichLewis42/pandas-learn
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
# Copyright (c) 2015, Rich Lewis <rl403@cam.ac.uk>
"""
pdlearn.adaptor.methods
~~~~~~~~~~~~~~~~~~~~~~~
Module implementing methods for pdlearn classes.
"""
import pandas as pd
def feature_property(name):
"""
Create a method adapting a parent class' property to return a pandas frame.
"""
# pylint: disable=C0111
return method
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
19798,
292,
12,
35720,
198,
2,
3740,
1378,
12567,
13,
785,
14,
14868,
40330,... | 2.857143 | 196 |
# nested loops = The "inner loop" will finish all of it's iterations before
# finishing one iteration of the "outer loop"
rows = int(input("How many rows?: "))
columns = int(input("How many columns?: "))
symbol = input("Enter a symbol to use: ")
#symbol = int(input("Enter a symbol to use: "))
for i in range(rows):
for j in range(columns):
print(symbol, end="")
print() | [
2,
28376,
23607,
796,
383,
366,
5083,
9052,
1,
481,
5461,
477,
286,
340,
338,
34820,
878,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
12848,
530,
24415,
286,
262,
366,
39605,
9052,
1,
198,
... | 2.869565 | 138 |
"""
.. module:: uwsgi
:platform: Any
:synopsis: Reads UWSGI stats
.. moduleauthor:: Colin Alston <colin@imcol.in>
"""
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from zope.interface import implementer
from twisted.internet import defer, reactor
from twisted.internet.protocol import ClientCreator, Protocol
from duct.interfaces import IDuctSource
from duct.objects import Source
| [
37811,
198,
492,
8265,
3712,
334,
18504,
12397,
198,
220,
220,
1058,
24254,
25,
4377,
198,
220,
220,
1058,
28869,
24608,
25,
4149,
82,
471,
19416,
18878,
9756,
198,
198,
492,
8265,
9800,
3712,
18373,
978,
3743,
1279,
4033,
259,
31,
32... | 3.360902 | 133 |
import datetime
import importlib
import json
import logging
import math
import mimetypes
import os
import re
import sys
import uuid
import requests
from urllib.parse import urljoin
from wsgiref.util import FileWrapper
from xml.dom import minidom, Node
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import ValidationError
from django.db import IntegrityError
from django.http import HttpResponse, Http404
from django.http import HttpResponseNotFound, StreamingHttpResponse
from django.utils import timezone
from rest_framework import exceptions
from .tags import XFORM_ID_STRING, VERSION
PENDING = 0
SUCCESSFUL = 1
FAILED = 2
EXTERNAL_EXPORT_TYPES = ['xls']
EXPORT_EXT = {
'csv': 'csv',
'csvzip': 'csv_zip',
'kml': 'kml',
'savzip': 'sav_zip',
'uuid': 'external',
'xls': 'xls',
'xlsx': 'xls',
'zip': 'zip',
}
def generate_content_disposition_header(name, extension, show_date=True):
if name is None:
return 'attachment;'
if show_date:
name = "%s-%s" % (name, timezone.now().strftime("%Y-%m-%d-%H-%M-%S"))
return 'attachment; filename=%s.%s' % (name, extension)
def _get_all_attributes(node):
"""
Go through an XML document returning all the attributes we see.
"""
if hasattr(node, "hasAttributes") and node.hasAttributes():
for key in node.attributes.keys():
yield key, node.getAttribute(key)
for child in node.childNodes:
for pair in _get_all_attributes(child):
yield pair
def set_uuid(obj):
"""
Only give an object a new UUID if it does not have one.
"""
if not obj.uuid:
obj.uuid = uuid.uuid4().hex
def get_numeric_fields(xform):
"""List of numeric field names for specified xform"""
return _get_fields_of_type(xform, ['decimal', 'integer'])
def response_with_mimetype_and_name(mimetype,
name,
extension=None,
show_date=True,
file_path=None,
use_local_filesystem=False,
full_mime=False):
if extension is None:
extension = mimetype
if not full_mime:
mimetype = "application/%s" % mimetype
if file_path:
try:
if isinstance(file_path, InMemoryUploadedFile):
response = StreamingHttpResponse(
file_path, content_type=mimetype)
response['Content-Length'] = file_path.size
elif not use_local_filesystem:
default_storage = get_storage_class()()
wrapper = FileWrapper(default_storage.open(file_path))
response = StreamingHttpResponse(
wrapper, content_type=mimetype)
response['Content-Length'] = default_storage.size(file_path)
else:
wrapper = FileWrapper(open(file_path))
response = StreamingHttpResponse(
wrapper, content_type=mimetype)
response['Content-Length'] = os.path.getsize(file_path)
except IOError:
response = HttpResponseNotFound(
"The requested file could not be found.")
else:
response = HttpResponse(content_type=mimetype)
response['Content-Disposition'] = generate_content_disposition_header(
name, extension, show_date)
return response
def _get_export_type(export_type):
if export_type in list(EXPORT_EXT):
export_type = EXPORT_EXT[export_type]
else:
raise exceptions.ParseError(
"'%(export_type)s' format not known or not implemented!" %
{'export_type': export_type})
return export_type
def get_file_extension(content_type):
return mimetypes.guess_extension(content_type)[1:]
def get_media_file_response(metadata, username=None):
"""
Returns a HTTP response for media files.
HttpResponse 200 if it represents a file on disk.
HttpResponseRedirect 302 incase the metadata represents a url.
HttpResponseNotFound 404 if the metadata file cannot be found.
"""
if metadata.data_type == 'media' and metadata.data_file:
file_path = metadata.data_file.name
filename, extension = os.path.splitext(file_path.split('/')[-1])
extension = extension.strip('.')
dfs = get_storage_class()()
if dfs.exists(file_path):
return response_with_mimetype_and_name(
metadata.data_file_type,
filename,
extension=extension,
show_date=False,
file_path=file_path,
full_mime=True)
elif metadata.data_type == 'url' and not metadata.data_file:
url = requests.Request(
'GET', metadata.data_value, params={
'username': username
}
).prepare().url
try:
data_file = metadata.get_file(url)
except Exception:
raise Http404
return response_with_mimetype_and_name(
mimetype=data_file.content_type,
name=data_file.name,
extension=get_file_extension(data_file.content_type),
show_date=False,
file_path=data_file,
use_local_filesystem=False,
full_mime=True
)
return HttpResponseNotFound()
def report_exception(*args, **kwargs):
# dummy
return
def publish_form(callback):
"""
Calls the callback function to publish a XLSForm and returns appropriate
message depending on exception throw during publishing of a XLSForm.
"""
try:
return callback()
# except (PyXFormError, XLSFormError) as e:
# return {'type': 'alert-error', 'text': str(e)}
except IntegrityError as e:
return {
'type': 'alert-error',
'text': 'Form with this id or SMS-keyword already exists.',
}
# except ProcessTimedOut as e:
# # catch timeout errors
# return {
# 'type': 'alert-error',
# 'text': 'Form validation timeout, please try again.',
# }
except (MemoryError, OSError) as e:
return {
'type': 'alert-error',
'text': (
'An error occurred while publishing the form. '
'Please try again.'
),
}
except (AttributeError, Exception, ValidationError) as e:
report_exception("Form publishing exception: {}".format(e), str(e),
sys.exc_info())
return {'type': 'alert-error', 'text': str(e)}
def _get_tag_or_element_type_xpath(xform, tag):
elems = xform.get_survey_elements_of_type(tag)
return elems[0].get_abbreviated_xpath() if elems else tag
def calculate_duration(start_time, end_time):
"""
This function calculates duration when given start and end times.
An empty string is returned if either of the time formats does
not match '_format' format else, the duration is returned
"""
_format = "%Y-%m-%dT%H:%M:%S"
try:
_start = datetime.datetime.strptime(start_time[:19], _format)
_end = datetime.datetime.strptime(end_time[:19], _format)
except (TypeError, ValueError):
return ''
duration = (_end - _start).total_seconds()
return duration
def handle_enketo_error(response):
"""Handle enketo error response."""
try:
data = json.loads(response.content)
except ValueError:
pass
if response.status_code == 502:
raise EnketoError(
u"Sorry, we cannot load your form right now. Please try "
"again later.")
raise EnketoError()
else:
if 'message' in data:
raise EnketoError(data['message'])
raise EnketoError(response.text)
def get_form_url(
request, protocol='http', preview=False, # xform_pk=None
):
"""
Return a form list url endpoint to be used to make a request to Enketo.
For example, it will return https://example.com and Enketo will know to
look for the form list at https://example.com/formList. If a username is
provided then Enketo will request the form list from
https://example.com/[username]/formList. Same applies for preview if
preview is True and also to a single form when xform_pk is provided.
"""
http_host = request.META.get('HTTP_HOST', 'dev.monitora.sisicmbio.icmbio.gov.br')
url = '%s://%s' % (protocol, http_host)
if preview:
url = '%s/preview' % url
return "{}/xform".format(url)
| [
11748,
4818,
8079,
198,
11748,
1330,
8019,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
17007,
2963,
12272,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
334,
27112,
198,
198,
11748,
7007,
198,
... | 2.285084 | 3,855 |
import datetime
from pymongo import MongoClient
import pymongo
import pprint
try:
db = MongoClient("mongodb://localhost:27017")["hkust"]
f=0.05
try:
print("Querying Documents...")
listOfCourseWithWaitingListSize = db.course.aggregate([
{ "$unwind": "$sections" },
# { "$project": { "newProduct": {"$multiply": [f, "$sections.enrol"]}, "satisfied": satisfied} },
# { "$project": { "compareResult": {"$gte": ["$sections.wait", "$newProduct"]}, "match_ts" : "$sections.recordTime"} },
{"$match": #filter timeslot
{"$and":[
# {"compareResult": "true"},
# {"satisfied" : "Yes"},
#{"sections.sectionId": {"$ne": null}},
#{"sections.sectionId": {"$exists": true}},
# {"sections.sectionId": {"$regex": '^L'}},
{"sections.recordTime": {"$gte": datetime.datetime.strptime("2018-01-26T14:00Z", "%Y-%m-%dT%H:%MZ")}},
{"sections.recordTime": {"$lte": datetime.datetime.strptime("2018-02-01T11:30Z", "%Y-%m-%dT%H:%MZ")}}
]
}
},
{ "$project":
{"code": 1,
"title": 1,
"credits": 1,
"sections":1,
# "description":1,
"satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]},
"lecSatisfied":{
"$cond":[{
"$and":[
{
"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]
},
{
"$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"]
}
]
},1,0]
}
},
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code", "recordTime":"$sections.recordTime"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$sections.recordTime"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait",
"satisfied":"$satisfied",
}
},
"lecSatisfiedCount":{"$sum":"$lecSatisfied"}
}
},
{ "$match": {"lecSatisfiedCount": {"$gt":0}}
},
{
"$sort": {"recordTime": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$recordTime"},
"sections":{"$last": "$sections"},
"lecSatisfiedCount":{"$last": "$lecSatisfiedCount"}
}
},
{
"$project":{
"_id":0,
"code": 1,
"title":1,
"credits": 1,
"recordTime":1,
"sections":1
}
}
]
)
# pprint.pprint(listOfCourseWithWaitingListSize)
recordNo = 0
for oneCourse in listOfCourseWithWaitingListSize:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
pprint.pprint(oneCourse)
# print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
# for oneSection in oneCourse["sections"]:
# print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
# print("description: {:s}".format(oneCourse["description"]))
#pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"]))
#print("Record {:d}: (course={:s})".format(recordNo, oneCourse))
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
#return outputCourseDetails(courseCode, lectureSection, satisfied)
except pymongo.errors.ConnectionFailure as error:
print("Document Insertion Failed! Error Message: \"{}\"".format(error))
import numpy
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
#Model 1
# model 2:
# model 3:
# model 4:
# model 5:
| [
11748,
4818,
8079,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
279,
4948,
25162,
198,
11748,
279,
4798,
198,
28311,
25,
198,
197,
9945,
796,
42591,
11792,
7203,
31059,
375,
65,
1378,
36750,
25,
1983,
29326,
4943,
14692,
... | 2.226913 | 1,895 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from investing_algorithm_framework.core.models.order_status import OrderStatus
from investing_algorithm_framework.core.models.order_type import OrderType
from investing_algorithm_framework.core.models.order_side import OrderSide
from investing_algorithm_framework.core.models.time_unit import TimeUnit
from investing_algorithm_framework.core.models.order import Order
from investing_algorithm_framework.core.models.portfolio import Portfolio
from investing_algorithm_framework.core.models.position import Position
__all__ = [
"db",
"Portfolio",
"Position",
'Order',
"OrderType",
'OrderSide',
"TimeUnit",
"create_all_tables",
"initialize_db",
"OrderStatus"
]
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
9945,
796,
16363,
2348,
26599,
3419,
628,
198,
198,
6738,
14771,
62,
282,
42289,
62,
30604,
13,
7295,
13,
27530,
13,
2875,
62,
13376,... | 3.217213 | 244 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .fileservice import FileService
from .models import (
Share,
ShareProperties,
File,
FileProperties,
Directory,
DirectoryProperties,
FileRange,
ContentSettings,
CopyProperties,
SharePermissions,
FilePermissions,
DeleteSnapshot,
)
| [
2,
16529,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
16529,
35937,
198,
6... | 4 | 148 |
from db.table import Table
from db.study import Study
from db.series import Series
from pypika.pseudocolumns import PseudoColumn
| [
6738,
20613,
13,
11487,
1330,
8655,
198,
6738,
20613,
13,
44517,
1330,
12481,
198,
6738,
20613,
13,
25076,
1330,
7171,
198,
6738,
279,
4464,
9232,
13,
7752,
463,
4668,
4182,
82,
1330,
49693,
12003,
39470,
628
] | 3.611111 | 36 |
#!/usr/bin/env python
import sys
import json
if len(sys.argv) != 4:
print "Usage:", __file__, "<segment> <text> <json>"
print " e.g.:", __file__, "data/dev/segmetns data/dev/text trans.json"
sys.exit(1)
segment_filename = sys.argv[1]
text_filename = sys.argv[2]
output_filename = sys.argv[3]
start_time = {}
end_time = {}
utt2chn = {}
utt2id = {}
with open(segment_filename) as segmentfile:
for line in segmentfile:
fields = line.split()
utt = fields[0]
start_time[utt] = float(fields[2]);
end_time[utt] = float(fields[3]);
id, chn = fields[1].split("_", 1)
utt2chn[utt] = chn
utt2id[utt] = id
data = {}
with open(text_filename) as textfile:
for line in textfile:
utt, text = line.split(" ", 1)
chn = utt2chn[utt]
if chn not in data:
data[chn] = {
'EmpID1': utt2id[utt],
'transcript': []
}
start = sec2str(start_time[utt])
end = sec2str(end_time[utt])
utt_info = {
'start': start,
'end': end,
'usable': True,
'speaker': 'OFFICER',
'utterance': text.strip()
}
data[chn]['transcript'].append(utt_info)
with open(output_filename, 'w') as outfile:
json.dump(data, outfile)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
33918,
628,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
604,
25,
198,
220,
3601,
366,
28350,
25,
1600,
11593,
7753,
834,
11,
33490,
325,
5154,
29... | 2.147569 | 576 |
import sys
from string import ascii_lowercase as alphabet
test_cases = open(sys.argv[1], "r")
words = set([test.strip() for test in test_cases])
test_cases.close()
print generate_network(words, "hello")
| [
11748,
25064,
198,
6738,
4731,
1330,
355,
979,
72,
62,
21037,
7442,
355,
24830,
628,
628,
198,
9288,
62,
33964,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
4357,
366,
81,
4943,
198,
10879,
796,
900,
26933,
9288,
13,
36311,
3419,
329,
... | 3.044118 | 68 |
from sqlalchemy import Column, Integer
from sqlalchemy import ForeignKey
from sqlalchemy.orm import declarative_base
from .base import Base
| [
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
198,
6738,
44161,
282,
26599,
1330,
8708,
9218,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2377,
283,
876,
62,
8692,
198,
198,
6738,
764,
8692,
1330,
7308,
628
] | 3.837838 | 37 |
from sudoku.constants import SIZE, BOX_SIZE
from sudoku import Sudoku
| [
6738,
424,
67,
11601,
13,
9979,
1187,
1330,
311,
35400,
11,
45216,
62,
33489,
198,
6738,
424,
67,
11601,
1330,
14818,
11601,
628
] | 3.086957 | 23 |
#!/usr/bin/python3
print('Hello world') | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
4798,
10786,
15496,
995,
11537
] | 2.666667 | 15 |
from algoritmia.problems.binpacking.nextfitbinpacker import NextFitBinPacker
| [
6738,
435,
7053,
270,
20730,
13,
1676,
22143,
13,
8800,
41291,
13,
19545,
11147,
8800,
8002,
263,
1330,
7406,
31805,
33,
259,
47,
10735,
201
] | 3.08 | 25 |
https://leetcode.com/problems/word-search/description/ | [
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
4775,
12,
12947,
14,
11213,
14
] | 3.176471 | 17 |
#!/usr/bin/env python
from __future__ import generators
import tables, cPickle, time
#################################################################################
def make_col(row_type, row_name, row_item, str_len):
'''for strings it will always make at least 80 char or twice mac char size'''
set_len=80
if str_len:
if 2*str_len>set_len:
set_len=2*str_len
row_type[row_name]=tables.Col("CharType", set_len)
else:
type_matrix={
int: tables.Col("Int32", 1),
float: tables.Col("Float32", 4), #Col("Int16", 1)
}
row_type[row_name]=type_matrix[type(row_item)]
########################
if __name__=='__main__':
a=raw_input('enter y to write out test file to test.hdf')
if a.strip()=='y':
print 'writing'
write_small('test.hdf')
print 'reading'
read_small('test.hdf')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
27298,
198,
198,
11748,
8893,
11,
269,
31686,
293,
11,
640,
198,
29113,
29113,
14468,
2,
628,
628,
198,
4299,
787,
62,
4033,
7,
808,
62,
4906,
11,... | 2.351282 | 390 |
"""
jinja2content.py
----------------
DONT EDIT THIS FILE
Pelican plugin that processes Markdown files as jinja templates.
"""
from jinja2 import Environment, FileSystemLoader, ChoiceLoader
import os
from pelican import signals
from pelican.readers import MarkdownReader, HTMLReader, RstReader
from pelican.utils import pelican_open
from tempfile import NamedTemporaryFile
def add_reader(readers):
for Reader in [JinjaMarkdownReader, JinjaRstReader, JinjaHTMLReader]:
for ext in Reader.file_extensions:
readers.reader_classes[ext] = Reader
| [
37811,
198,
18594,
6592,
17,
11299,
13,
9078,
198,
1783,
198,
198,
35,
35830,
48483,
12680,
45811,
198,
198,
47,
417,
7490,
13877,
326,
7767,
2940,
2902,
3696,
355,
474,
259,
6592,
24019,
13,
198,
198,
37811,
198,
198,
6738,
474,
259,... | 3.220339 | 177 |
import json
from tqdm import tqdm
from utils import *
from alexnet import AlexNet
def get_video_feature(net, folder_name, resize=(224, 224)):
"""
:param folder_name: video_0000
:param resize:(224,224)
:return: 1410
['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver',
'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block']
4[]
"""
class_feature = classify(net, folder_name, resize)
images, is_moved = video_loader(folder_name)
move_feature = collide_detection_blocks(images, is_moved)
#feature = np.concatenate([class_feature, move_feature])
return class_feature, move_feature
#if __name__ == '__main__':
#net = AlexNet()
#net.load_state_dict(torch.load('./alexnet.pt'))
# idx_to_class = ['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver',
# 'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block']
# classes = classify(net, './dataset/task2/test/0/video_0006')
#import json
#import os
#label = dict()
#path='./dataset/train'
#for folder in os.listdir(path):
#for sample in os.listdir(os.path.join(path, folder)):
#images, is_moved = video_loader(os.path.join(path, folder, sample))
#move_feature = collide_detection_blocks(images, is_moved)
#label[folder + '/' + sample] = move_feature
#with open('./dataset/train.json', 'w') as f:
#json.dump(label,f) | [
11748,
33918,
201,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
201,
198,
6738,
3384,
4487,
1330,
1635,
201,
198,
6738,
257,
2588,
3262,
1330,
4422,
7934,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
429... | 2.23416 | 726 |
import serial
with serial.Serial("/dev/ttyUSB0", 115200) as ser:
while 1:
for i in range(5):
n = ser.read()[0]
print("{:x}".format(n))
print("--------")
| [
11748,
11389,
628,
198,
4480,
11389,
13,
32634,
7203,
14,
7959,
14,
42852,
27155,
15,
1600,
12279,
2167,
8,
355,
1055,
25,
198,
220,
220,
220,
981,
352,
25,
198,
220,
220,
220,
220,
220,
220,
220,
329,
1312,
287,
2837,
7,
20,
2599... | 1.980198 | 101 |
# -*- coding: utf-8 -*-
#
# michael a.g. avzis
# orthologue
# (c) 1998-2021 all rights reserved
#
# end of file
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
285,
40302,
257,
13,
70,
13,
1196,
89,
271,
198,
2,
29617,
39795,
198,
2,
357,
66,
8,
7795,
12,
1238,
2481,
477,
2489,
10395,
198,
2,
628,
198,
198,
2... | 2.211538 | 52 |