content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from distutils.core import setup
import versioneer
version = versioneer.get_version()
setup(name='papers-cli',
version=version,
cmdclass = versioneer.get_cmdclass(),
author='Mahe Perrette',
author_email='mahe.perrette@gmail.com',
description='utilities to keep your PDF library organized',
url='https://github.com/perrette/papers',
download_url=f'https://github.com/perrette/papers/archive/{version}.tar.gz',
packages=['papers'],
scripts=['scripts/papers'],
license = "MIT",
requires = ["bibtexparser","crossrefapi","rapidfuzz", "unidecode", "scholarly", "six"],
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
11748,
2196,
28153,
198,
198,
9641,
796,
2196,
28153,
13,
1136,
62,
9641,
3419,
198,
198,
40406,
7,
3672,
11639,
40491,
12,
44506,
3256,
198,
220,
220,
220,
220,
220,
2196,
28,
9641,
11,
... | 2.651452 | 241 |
from pygears.core.gear import alternative, gear
from pygears.typing import Queue, Tuple, typeof
from pygears.common.shred import shred
from pygears.common.ccat import ccat
from pygears.util.utils import quiter_async
from pygears import module
@gear(enablement=b'len(din) == 2')
@alternative(cart)
@gear
# TODO: Lowest eot for each uncart output needs to be shortened to 1 data using
# flattening
@gear
@gear(enablement=b'len(din) == 2')
@alternative(cart_sync)
@gear
@gear
| [
6738,
12972,
70,
4127,
13,
7295,
13,
31763,
1330,
5559,
11,
7733,
198,
6738,
12972,
70,
4127,
13,
774,
13886,
1330,
4670,
518,
11,
309,
29291,
11,
2099,
1659,
198,
6738,
12972,
70,
4127,
13,
11321,
13,
1477,
445,
1330,
21163,
198,
6... | 2.864706 | 170 |
import pandas as pd
import requests
import io
import numpy as np
#Goal: go through daily reports of JHU to get data for
# Texas, Travis, Harris, Dallas
baseurl = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
#Start from March 1
#March 1 to March 21: jh only reported texas
#March 22 onwards, jh reported county level
results = {}
#JHU changed the formats several times
maxday0 = 9
for i in range(maxday0):
strbase = '03-0'+str(i+1)+'-2020.csv'
url = baseurl + strbase
df = pd.read_csv(url)
result = df[df['Province/State'].str.contains(', TX', na=False)].Confirmed.sum(axis=0)
print(result)
results[strbase] = result
#x = sdfsdfsfd
maxday1 = 21
for i in range(maxday0,maxday1):
strbase = '03-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl+strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province/State']=='Texas'].Confirmed.to_numpy()
if len(result) > 0:
results[strbase] = np.ndarray.item(result)
# print(np.size(result))
maxday2 = 31
for i in range(maxday1, maxday2):
strbase = '03-'+str(i+1) + '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
maxday2 = 30
for i in range(0, maxday2):
strbase = '04-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
maxday2 = 29
for i in range(0, maxday2):
strbase = '05-'
if i+1 < 10:
strbase += '0'+str(i+1)
else:
strbase += str(i+1)
strbase += '-2020.csv'
url = baseurl + strbase
print(url)
df = pd.read_csv(url)
result = df[df['Province_State'] == 'Texas'].Confirmed.sum(axis=0)
results[strbase] = result
print(results)
| [
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7007,
198,
11748,
33245,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
49045,
25,
467,
832,
4445,
3136,
286,
449,
39,
52,
284,
651,
1366,
329,
198,
2,
3936,
11,
19804,
11,
10026,
... | 1.980672 | 1,190 |
from typing import Callable
from torchvision import transforms as tf
from . import TRANSFORM_REGISTRY
__all__ = ['ManyTimes', 'Twice']
@TRANSFORM_REGISTRY.register
class IdentityAndManyTimes:
"""
This class changes an image to a normalized tensor image and a series of augmented image.
Args:
transform: A list of image augmentation.
norm: A list of image normalization.
n: The times that the transform perform.
"""
@TRANSFORM_REGISTRY.register
class ManyTimes:
"""
This class transfers an image to a series of augmented images.
Args:
transform: The transform for augmentation and normalization of images.
n: The times that the transform performs.
Returns:
The tuple of augmented images.
"""
def __call__(self, inp) -> tuple:
"""
Call of this class.
Args:
inp: something importance.
"""
return (*(self.transform(inp) for _ in range(self.n)),)
@TRANSFORM_REGISTRY.register
def Twice(transform: Callable) -> ManyTimes:
"""
The easy call method of ManyTimes(transform, 2).
Args:
transform: The transform for augmentation and normalization of images.
Returns:
The class of ManyTimes(transform, 2).
"""
return ManyTimes(transform, 2)
| [
6738,
19720,
1330,
4889,
540,
198,
6738,
28034,
10178,
1330,
31408,
355,
48700,
198,
6738,
764,
1330,
44069,
21389,
62,
31553,
1797,
40405,
628,
198,
834,
439,
834,
796,
37250,
7085,
28595,
3256,
705,
5080,
501,
20520,
628,
198,
31,
544... | 2.741273 | 487 |
from src.protocol import anonymous_transmission
if __name__ == "__main__":
main()
| [
6738,
12351,
13,
11235,
4668,
1330,
11614,
62,
7645,
3411,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.966667 | 30 |
# -*- coding: utf-8 -*-
from collections.abc import Mapping
import re
import pytest
from scipy import sparse
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import (
assert_almost_equal,
fails_if_pypy,
assert_allclose_dense_sparse,
skip_if_32bit,
)
from collections import defaultdict
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_custom_token_pattern(get_names):
"""Check `get_feature_names()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
vectorizer = CountVectorizer(token_pattern=token_pattern)
vectorizer.fit_transform(corpus)
expected = ["document", "one", "sample"]
feature_names_out = getattr(vectorizer, get_names)()
assert_array_equal(feature_names_out, expected)
def test_countvectorizer_custom_token_pattern_with_several_group():
"""Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
err_msg = "More than 1 capturing group in token pattern"
vectorizer = CountVectorizer(token_pattern=token_pattern)
with pytest.raises(ValueError, match=err_msg):
vectorizer.fit(corpus)
def test_tf_transformer_feature_names_out():
"""Check get_feature_names_out for TfidfTransformer"""
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
feature_names_in = ["a", "c", "b"]
feature_names_out = tr.get_feature_names_out(feature_names_in)
assert_array_equal(feature_names_in, feature_names_out)
@fails_if_pypy
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize(
"params, err_type, message",
(
({"max_df": 2.0}, ValueError, "max_df == 2.0, must be <= 1.0."),
({"min_df": 1.5}, ValueError, "min_df == 1.5, must be <= 1.0."),
({"max_df": -2}, ValueError, "max_df == -2, must be >= 0."),
({"min_df": -10}, ValueError, "min_df == -10, must be >= 0."),
({"min_df": 3, "max_df": 2.0}, ValueError, "max_df == 2.0, must be <= 1.0."),
({"min_df": 1.5, "max_df": 50}, ValueError, "min_df == 1.5, must be <= 1.0."),
({"max_features": -10}, ValueError, "max_features == -10, must be >= 0."),
(
{"max_features": 3.5},
TypeError,
"max_features must be an instance of <class 'numbers.Integral'>, not <class"
" 'float'>",
),
),
)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@fails_if_pypy
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
@fails_if_pypy
@pytest.mark.parametrize(
"factory",
[
CountVectorizer.build_analyzer,
CountVectorizer.build_preprocessor,
CountVectorizer.build_tokenizer,
],
)
def test_pickling_built_processors(factory):
"""Tokenizers cannot be pickled
https://github.com/scikit-learn/scikit-learn/issues/12833
"""
vec = CountVectorizer()
function = factory(vec)
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
roundtripped_function = pickle.loads(pickle.dumps(function))
expected = function(text)
result = roundtripped_function(text)
assert result == expected
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@fails_if_pypy
@pytest.mark.parametrize(
"Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer)
)
@pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"vectorizer_dtype, output_dtype, warning_expected",
[
(np.int32, np.float64, True),
(np.int64, np.float64, True),
(np.float32, np.float32, False),
(np.float64, np.float64, False),
],
)
@pytest.mark.parametrize(
"vec",
[
HashingVectorizer(ngram_range=(2, 1)),
CountVectorizer(ngram_range=(2, 1)),
TfidfVectorizer(ngram_range=(2, 1)),
],
)
@fails_if_pypy
@skip_if_32bit
def test_countvectorizer_sort_features_64bit_sparse_indices():
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = sparse.csr_matrix((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
@fails_if_pypy
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"input_type, err_type, err_msg",
[
("filename", FileNotFoundError, ""),
("file", AttributeError, "'str' object has no attribute 'read'"),
],
)
@pytest.mark.parametrize(
"Estimator",
[
CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy),
],
)
@pytest.mark.parametrize(
"analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()]
)
@pytest.mark.parametrize("input_type", ["file", "filename"])
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer]
)
@pytest.mark.parametrize(
"stop_words, tokenizer, preprocessor, ngram_range, token_pattern,"
"analyzer, unused_name, ovrd_name, ovrd_msg",
[
(
["you've", "you'll"],
None,
None,
(1, 1),
None,
"char",
"'stop_words'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
None,
"char",
"'tokenizer'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
r"\w+",
"word",
"'token_pattern'",
"'tokenizer'",
"is not None",
),
(
None,
None,
lambda s: s.upper(),
(1, 1),
r"\w+",
lambda s: s.upper(),
"'preprocessor'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 2),
None,
lambda s: s.upper(),
"'ngram_range'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 1),
r"\w+",
"char",
"'token_pattern'",
"'analyzer'",
"!= 'word'",
),
],
)
@pytest.mark.parametrize(
"Vectorizer, X",
(
(HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]),
(CountVectorizer, JUNK_FOOD_DOCS),
),
)
# TODO: Remove in 1.2 when get_feature_names is removed
@fails_if_pypy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
17268,
13,
39305,
1330,
337,
5912,
198,
11748,
302,
198,
198,
11748,
12972,
9288,
198,
6738,
629,
541,
88,
1330,
29877,
198,
198,
6738,
1341,
35720,
13,
30053,
62,... | 2.261734 | 4,730 |
"""
__author__ = HackPrinceton 2017 Best Team
__description__ = Initializes files for extraction module
"""
| [
37811,
198,
834,
9800,
834,
796,
18281,
42904,
18483,
2177,
6705,
4816,
198,
834,
11213,
834,
796,
20768,
4340,
3696,
329,
22236,
8265,
198,
37811,
198
] | 4.153846 | 26 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._bch_expansion import bch_expand
from ._channel_state import (amplitude_damping_channel, dephasing_channel,
depolarizing_channel)
from ._commutators import anticommutator, commutator, double_commutator
from ._grid import Grid
from ._lcu_util import (lambda_norm,
preprocess_lcu_coefficients_for_reversible_sampling)
from ._operator_utils import (chemist_ordered, count_qubits,
eigenspectrum, fourier_transform,
freeze_orbitals, get_file_path,
hermitian_conjugated, inline_sum,
inverse_fourier_transform,
is_hermitian, is_identity,
normal_ordered, prune_unused_indices,
reorder, up_then_down,
load_operator, save_operator)
from ._rdm_mapping_functions import (kronecker_delta,
map_two_pdm_to_two_hole_dm,
map_two_pdm_to_one_pdm,
map_one_pdm_to_one_hole_dm,
map_one_hole_dm_to_one_pdm,
map_two_pdm_to_particle_hole_dm,
map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm,
map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
from ._slater_determinants import (gaussian_state_preparation_circuit,
slater_determinant_preparation_circuit)
from ._special_operators import (majorana_operator, number_operator,
s_minus_operator, s_plus_operator,
s_squared_operator,
sx_operator, sy_operator, sz_operator,
up_index, down_index)
from ._testing_utils import (random_antisymmetric_matrix,
random_diagonal_coulomb_hamiltonian,
random_hermitian_matrix,
random_interaction_operator,
random_quadratic_hamiltonian,
random_unitary_matrix)
from ._trotter_error import error_bound, error_operator
from ._trotter_exp_to_qgates import (pauli_exp_to_qasm,
trotterize_exp_qubop_to_qasm,
trotter_operator_grouping)
from ._unitary_cc import (uccsd_convert_amplitude_format,
uccsd_generator,
uccsd_singlet_generator,
uccsd_singlet_get_packed_amplitudes,
uccsd_singlet_paramsize)
# Imports out of alphabetical order to avoid circular dependency.
from ._jellium_hf_state import hartree_fock_state_jellium
from ._low_depth_trotter_error import (
low_depth_second_order_trotter_error_bound,
low_depth_second_order_trotter_error_operator)
from ._sparse_tools import (boson_ladder_sparse,
boson_operator_sparse,
expectation,
expectation_computational_basis_state,
get_density_matrix,
get_gap,
get_ground_state,
get_linear_qubit_operator_diagonal,
inner_product,
jordan_wigner_sparse,
jw_configuration_state,
jw_hartree_fock_state,
jw_get_gaussian_state,
jw_get_ground_state_at_particle_number,
jw_number_restrict_operator,
jw_number_restrict_state,
jw_slater_determinant,
jw_sz_restrict_operator,
jw_sz_restrict_state,
qubit_operator_sparse,
sparse_eigenspectrum,
variance)
from ._davidson import Davidson, DavidsonOptions, QubitDavidson, SparseDavidson
from ._linear_qubit_operator import (
LinearQubitOperator,
LinearQubitOperatorOptions,
ParallelLinearQubitOperator,
generate_linear_qubit_operator,
)
from ._pubchem import geometry_from_pubchem
| [
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
220,
220,
921,
743,
7330,
257,
4866,... | 1.766381 | 2,915 |
import base64
import json
import struct
from collections import abc
from enum import Enum
from os import environ as env
from time import sleep, time
from typing import cast, Any, Dict, Iterable, Iterator, List, Optional, \
Tuple, TypeVar, Union
import pyarrow as pa
from pyarrow.lib import ArrowKeyError, RecordBatch, Schema, Table
import pyarrow.flight as flight
# Known job types supported by the Java plugin.
_JOB_BULK_IMPORT = "import.bulk"
_JOB_CYPHER = "cypher.read"
_JOB_GDS_READ = "gds.read" # TODO: rename
_JOB_GDS_WRITE_NODES = "gds.write.nodes"
_JOB_GDS_WRITE_RELS = "gds.write.relationships"
_JOB_KHOP = "khop"
_JOB_STATUS = "job.status"
_JOB_INFO_VERSION = "info.version"
_JOB_INFO_STATUS = "info.jobs"
# These defaults should stay in sync with those in the Java plugin.
# See org.neo4j.arrow.Neo4jDefaults for reference.
_ID = 'ID'
_LABELS = 'LABELS'
_START_ID = 'START_ID'
_END_ID = 'END_ID'
_TYPE = 'TYPE'
_DEFAULT_HOST = env.get('NEO4J_ARROW_HOST', 'localhost')
_DEFAULT_PORT = int(env.get('NEO4J_ARROW_PORT', '9999'))
pa.enable_signal_handlers(True)
TableLike = TypeVar('TableLike', bound=Union[RecordBatch, Table])
class JobStatus(Enum):
"""Represents the state of a server-side job."""
UNKNOWN = "UNKNOWN"
INITIALIZING = "INITIALIZING"
PENDING = "PENDING"
COMPLETE = "COMPLETE"
ERROR = "ERROR"
PRODUCING = "PRODUCING"
@classmethod
def _coerce_ticket(maybe_ticket: Union[bytes, flight.Ticket]) -> flight.Ticket:
"""
Coerce the given value into a Flight Ticket.
:param maybe_ticket: possible Ticket
:return: a Ticket
"""
ticket: flight.Ticket
if type(maybe_ticket) is flight.Ticket:
ticket = maybe_ticket
else:
ticket = flight.Ticket.deserialize(cast(bytes, maybe_ticket))
return ticket
def _coerce_table(data: Union[Dict[Any, Any],
TableLike,
flight.FlightStreamChunk]) -> Table:
"""
Coerce a TableLike value into a PyArrow Table.
:param data: coercible value
:return: a PyArrow Table
"""
if type(data) is dict:
return Table.from_pydict(data)
elif type(data) is RecordBatch:
return Table.from_batches([data])
elif type(data) is Table:
return data
elif type(data) is flight.FlightStreamChunk:
# TODO: this is a pretty wasteful wrapping
return Table.from_batches([data.data])
# yolo
return pa.table(data=data)
class Neo4jArrow:
"""
A client for interacting with a remote Neo4j Arrow service. Useful for
working with large datasets, retrieving bulk data, and async batch jobs!
"""
# TODO: rename camelCase args to snake case
_client: flight.FlightClient
_location: flight.Location
_options: flight.FlightCallOptions
def __init__(self, user: str, password: str,
location: Tuple[str, int] = (_DEFAULT_HOST, _DEFAULT_PORT),
tls: bool = False, verify_tls: bool = True):
"""
Create a new Neo4jArrow client. Note: the client connects
:param user: Neo4j user to authenticate as
:param password: password for user
:param location: tuple of host, port (optional)
:param tls: use TLS?
:param verify_tls: verify server identity in x.509 certificate?
"""
token = base64.b64encode(f'{user}:{password}'.encode('utf8'))
self._options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + token)
])
host, port = location
if tls:
self._location = flight.Location.for_grpc_tls(host, port)
else:
self._location = flight.Location.for_grpc_tcp(host, port)
self._client = flight.FlightClient(self._location,
disable_server_verification=(not verify_tls))
def list_actions(self) -> List[flight.Action]:
"""
List all actions available on the server.
:return: list of all available Actions
"""
return list(self._client.list_actions(self._options))
def list_flights(self) -> List[flight.FlightInfo]:
"""
List all known/existing Flights on the server.
:return: list of Flights
"""
return list(self._client.list_flights(None, self._options))
def info(self) -> Dict[str, Any]:
"""
Get info on the Neo4j Arrow server
:return: metadata describing Neo4j Arrow server (e.g. version)
"""
result = self._client.do_action(
(_JOB_INFO_VERSION, b''), self._options)
obj = json.loads(next(result).body.to_pybytes())
if type(obj) is dict:
return obj
raise RuntimeError("server returned unexpected data format")
def _submit(self, action: Union[Tuple[str, bytes],
flight.Action]) -> flight.Ticket:
"""Attempt to ticket the given action/job"""
results = self._client.do_action(action, self._options)
return flight.Ticket.deserialize((next(results).body.to_pybytes()))
def cypher(self, cypher: str, database: str = 'neo4j',
params: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""Submit a Cypher job with optional parameters. Returns a ticket."""
cypher_bytes = cypher.encode('utf8')
db_bytes = database.encode('utf8')
params_bytes = json.dumps(params or {}).encode('utf8')
# Our CypherMessage format is simple:
# - 16 bit unsigned length of the cypher byte string
# - the cypher byte string payload
# - 16 bit unsigned length of the database byte string
# - the database byte string payload
# - 16 bit unsigned length of the param json payload
# - the param json byte string payload
fmt = f"!H{len(cypher_bytes)}sH{len(db_bytes)}sH{len(params_bytes)}s"
buffer = struct.pack(fmt,
len(cypher_bytes), cypher_bytes,
len(db_bytes), db_bytes,
len(params_bytes), params_bytes)
return self._submit((_JOB_CYPHER, buffer))
def gds_nodes(self, graph: str, database: str = 'neo4j',
properties: Optional[List[str]] = None,
filters: Optional[List[str]] = None,
node_id: str = '',
extra: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""Submit a GDS job for streaming Node properties. Returns a ticket."""
params = {
'db': database,
'graph': graph,
'type': 'node',
'node_id': node_id,
'properties': properties or [],
'filters': filters or [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def gds_write_nodes(self, graph: str, database: str = 'neo4j',
id_field: str = _ID,
labels_field: str = _LABELS) -> flight.Ticket:
"""Submit a GDS Write Job for creating Nodes and Node Properties."""
params = {
'db': database,
'graph': graph,
'idField': id_field,
'labelsField': labels_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_WRITE_NODES, params_bytes))
def gds_write_relationships(self, graph: str, database: str = 'neo4j',
source_field: str = _START_ID,
target_field: str = _END_ID,
type_field: str = _TYPE) -> flight.Ticket:
"""Submit a GDS Write Job for creating Rels and Rel Properties."""
params = {
'db': database,
'graph': graph,
'source_field': source_field,
'target_field': target_field,
'type_field': type_field,
}
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_WRITE_RELS, params_bytes))
def gds_relationships(self, graph: str, database: str = 'neo4j',
properties: Optional[List[str]] = None,
filters: Optional[List[str]] = None,
node_id: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None) -> flight.Ticket:
"""
Submit a GDS job for retrieving Relationship properties.
:param graph: name of the GDS graph
:param database: name of the underlying Neo4j database
:param properties: relationship properties to retrieve
:param filters: relationship type filter
:param node_id: property to use as an alternative node id (default is
to use the internal opaque id)
:param extra: additional custom message parameters
:return: new Ticket
"""
params = {
'db': database,
'graph': graph,
'type': 'relationship',
'node_id': node_id or '',
'properties': properties or [],
'filters': filters or [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def khop(self, graph: str, database: str = 'neo4j',
node_id: Optional[str] = None, rel_property: str = '_type_',
extra: Optional[Dict[str, Any]] = None) -> pa.flight.Ticket:
"""
**Experimental** K-Hop Job support
:param graph: gds graph to analyze
:param database: underlying neo4j database
:param node_id: optional property to use as a logical node id
:param rel_property: special relationship property used to encode
orientation of the edge
:param extra: any extra k/v pairs for the KhopMessage
:return: ticket to a new KHop job
"""
params = {
'db': database,
'graph': graph,
'node_id': node_id or '',
'type': 'khop',
'properties': [rel_property],
'filters': [],
}
params.update(extra or {})
params_bytes = json.dumps(params).encode('utf8')
return self._submit((_JOB_GDS_READ, params_bytes))
def status(self, ticket: Union[bytes, flight.Ticket]) -> JobStatus:
"""
Inspect the status a server-side Job associated with a given Ticket.
:param ticket: Optional Ticket for filtering Jobs
:return: list of tuples of Job ID (a string) and Job Status
"""
body = _coerce_ticket(ticket).serialize()
action = (_JOB_STATUS, body)
results = self._client.do_action(action, self._options)
status = next(results).body.to_pybytes().decode('utf8')
return JobStatus.from_str(status)
def wait_for_job(self, ticket: Union[bytes, pa.flight.Ticket],
desired: JobStatus = JobStatus.PRODUCING,
must_exist: bool = True,
timeout: Optional[int] = None) -> bool:
"""Block until a given job (specified by a ticket) reaches a status."""
start = time()
timeout = timeout or (1 << 25) # well beyond someone's patience
while time() - start < timeout:
try:
current = self.status(ticket)
if current == desired:
return True
except ArrowKeyError:
if must_exist:
print(f'no job found for ticket {ticket!r}')
return False
sleep(1) # TODO: is 1s too fast? too slow? just right?
return False
def stream(self, ticket: Union[bytes, flight.Ticket],
timeout: Optional[int] = None) -> flight.FlightStreamReader:
"""
Read the stream associated with the given ticket.
:param ticket: ticket to an active Read Job
:param timeout: timeout to wait for stream to start producing
:return: new FlightStreamReader for consuming the results
"""
ticket = _coerce_ticket(ticket)
self.wait_for_job(ticket, timeout=timeout)
return self._client.do_get(ticket, self._options)
def put(self, ticket: Union[bytes, flight.Ticket],
data: Union[Dict[Any, Any], TableLike, Iterable[TableLike],
Iterator[TableLike], flight.FlightStreamReader],
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) -> Tuple[int, int]:
"""
Send data to the server for the corresponding Flight.
:param ticket: a Ticket to a Flight stream
:param data: the data to stream to the server
:param metadata: optional metadata to append to the stream's Schema
:return: number of rows sent, number of bytes sent
"""
ticket = _coerce_ticket(ticket)
if isinstance(data, flight.FlightStreamReader):
# XXX must come first as it's also an instance of Iterable!
return self.put_stream_from_reader(ticket, data, schema, metadata)
elif isinstance(data, (abc.Iterable, abc.Iterator)):
return self.put_stream_batches(ticket, data, schema, metadata)
return self.put_stream(ticket, data, metadata)
def put_stream(self, ticket: Union[bytes, flight.Ticket],
data: Union[Dict[Any, Any], TableLike],
metadata: Optional[Dict[Any, Any]] = None) -> Tuple[int, int]:
"""
Write a stream to the server
:param ticket: ticket for the associated Flight
:param data: Table or convertible table
:param metadata: optional metadata to include in the Table Schema
:return: number of rows and number of bytes transmitted
"""
table = _coerce_table(data)
ticket = _coerce_ticket(ticket)
if metadata:
schema = table.schema.with_metadata(metadata)
table = table.replace_schema_metadata(schema.metadata)
try:
descriptor = flight.FlightDescriptor.for_command(
ticket.serialize())
writer, _ = self._client.do_put(descriptor, table.schema,
self._options)
# TODO: configurable or auto-chosen chunksize
writer.write_table(table, max_chunksize=8192)
writer.close()
# TODO: server should be telling us what the results were.
# We shouldn't assume all data was accepted.
return table.num_rows, table.nbytes
except Exception as e:
print(f"put_stream error: {e}")
return 0, 0
def put_stream_batches(self, ticket: flight.Ticket,
batches: Union[Iterable[TableLike],
Iterator[TableLike]],
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) \
-> Tuple[int, int]:
"""
Write a stream using a batch producer.
:param ticket: ticket for the Flight
:param batches: a RecordBatchStream producing the input data
:param schema: optional overriding Schema for the stream
:param metadata: optional metadata to append to the Schema
:return: number of rows and number of bytes transmitted
"""
descriptor = flight.FlightDescriptor.for_command(ticket.serialize())
batches = iter(batches)
# peek and get our schema, updating with any overrides desired
batch = next(batches)
table = _coerce_table(batch)
schema = schema or table.schema
if metadata:
schema = schema.with_metadata(metadata)
writer, _ = self._client.do_put(descriptor, schema, self._options)
try:
writer.write_table(table)
rows, nbytes = len(batch), batch.nbytes
for batch in batches:
writer.write_table(_coerce_table(batch))
nbytes += batch.nbytes
rows += len(batch)
finally:
writer.close()
print(f"wrote {rows:,} rows, {round(nbytes / (1 << 20), 2):,} MiB")
return rows, nbytes
def put_stream_from_reader(self, ticket: flight.Ticket,
reader: flight.FlightStreamReader,
schema: Optional[Schema] = None,
metadata: Optional[Dict[Any, Any]] = None) \
-> Tuple[int, int]:
"""
Relay an existing Arrow Flight stream provided by the given reader.
:param ticket:
:param reader:
:param schema:
:param metadata:
:return:
"""
descriptor = flight.FlightDescriptor.for_command(ticket.serialize())
chunk_stream = iter(reader)
table = _coerce_table(next(chunk_stream))
schema = schema or table.schema
if metadata:
schema = schema.with_metadata(metadata)
writer, _ = self._client.do_put(descriptor, schema, self._options)
try:
writer.write_table(table)
rows, nbytes = len(table), table.nbytes
for chunk in chunk_stream:
table = _coerce_table(chunk)
writer.write_table(table)
nbytes += table.nbytes
rows += len(table)
finally:
writer.close()
print(f"wrote {rows:,} rows, {round(nbytes / (1 << 20), 2):,} MiB")
return rows, nbytes
| [
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
2878,
198,
6738,
17268,
1330,
450,
66,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
28686,
1330,
551,
2268,
355,
17365,
198,
6738,
640,
1330,
3993,
11,
640,
198,
6738,
19720,
1330,
3350... | 2.231233 | 7,966 |
print(type(5))
my_dict = {}
print(type(my_dict))
my_list = []
print(type(my_list)) | [
4798,
7,
4906,
7,
20,
4008,
198,
198,
1820,
62,
11600,
796,
23884,
198,
198,
4798,
7,
4906,
7,
1820,
62,
11600,
4008,
198,
198,
1820,
62,
4868,
796,
17635,
198,
198,
4798,
7,
4906,
7,
1820,
62,
4868,
4008
] | 2.15 | 40 |
import json
import datetime
import numpy as np
import unittest
import great_expectations as ge
if __name__ == "__main__":
unittest.main() | [
11748,
33918,
198,
11748,
4818,
8079,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
198,
11748,
1049,
62,
1069,
806,
602,
355,
4903,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
129... | 2.865385 | 52 |
# -*- coding: utf-8 -*-
"""
@author: Zheng Fang
"""
from numba import cuda
import numpy as np
n = int(2e4) # this is not to exceed 10^7
# supply data
data = np.random.normal(size=n, loc=0, scale=1).astype('float64')
# define convenience function
#======================================================================
for _ in range(5):
timer()
"""
%timeit -r 50 -n 10 timer()
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
31,
9800,
25,
44583,
24468,
201,
198,
37811,
201,
198,
201,
198,
201,
198,
6738,
997,
7012,
1330,
269,
15339,
201,
198,
11748,
299,
32152,
355,
4... | 2.604938 | 162 |
"""Collections of objects and collection helper functions."""
__all__ = [
'BiDict',
'DictBuilder',
'DictView',
'LoadingDict',
'LruCache',
'NamedTuple',
'SingletonMeta',
'Symbols',
'Trie',
'collect',
'collect_pairs',
'group',
'is_ordered',
'unique',
]
import operator
from collections import (
Mapping,
MutableMapping,
OrderedDict,
UserDict,
)
from garage.assertions import ASSERT
def is_ordered(lst, key=None, strict=False):
"""True if input list is (strictly) ordered."""
if key is None:
key = lambda item: item
cmp = operator.lt if strict else operator.le
return all(cmp(key(x0), key(x1)) for x0, x1 in zip(lst, lst[1:]))
def unique(iterable, key=None):
"""Return unique elements of an iterable."""
if key:
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), element)
return list(odict.values())
else:
return list(OrderedDict.fromkeys(iterable))
def collect(iterable, key=None, value=None):
"""Collect elements by key, preserving order."""
if key is None:
key = lambda element: element
if value is None:
value = lambda element: element
odict = OrderedDict()
for element in iterable:
odict.setdefault(key(element), []).append(value(element))
return odict
def collect_pairs(iterable):
"""Collect pairs, preserving order."""
return collect(
iterable, key=lambda pair: pair[0], value=lambda pair: pair[1])
def group(iterable, key=None):
"""Group elements by key, preserving order."""
return list(collect(iterable, key=key).values())
class DictView(Mapping):
"""Read-only view of a dict-like object."""
class BiDict(MutableMapping):
"""Bidirectional dict."""
class DictBuilder:
"""A fluent-style builder of dict object."""
# It does not support nested if-block at the moment
# Setter methods
class NamedTupleMeta(type):
"""This is similar to typing.NamedTupleMeta but supports base
classes (so that you may use mixin pattern).
Note that, to adhere to Liskov Substitution Principle, you cannot
inherit from multiple subclass of NamedTuple.
"""
@staticmethod
def make_new(class_name, field_names):
"""Make a __new__ method for the new class."""
if not field_names:
args = ''
elif len(field_names) == 1:
# `(x)` is the same as `x` and you need the extra comma.
args = '{},'.format(field_names[0])
else:
args = ', '.join(field_names)
code = (
'def __new__(cls, {args}):\n'
' """Create new instance of {class_name}({args})."""\n'
' return tuple.__new__(cls, ({args}))\n'
.format(class_name=class_name, args=args)
)
variables = {'__name__': class_name}
exec(code, variables)
return variables['__new__']
@staticmethod
def make_repr(class_name, field_names):
"""Make a __repr__ method for the new class."""
field_formats = ('%s=%%r' % name for name in field_names)
repr_format = '%s(%s)' % (class_name, ', '.join(field_formats))
def __repr__(self):
"""Return a nicely formatted representation string"""
return repr_format % self
return __repr__
class SingletonMeta(type):
"""Metaclass to create singleton types."""
class Symbols:
"""Read-only namespace."""
| [
37811,
5216,
26448,
286,
5563,
290,
4947,
31904,
5499,
526,
15931,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
23286,
35,
713,
3256,
198,
220,
220,
220,
705,
35,
713,
32875,
3256,
198,
220,
220,
220,
705,
35,
713,
... | 2.43565 | 1,453 |
"""
Inverse chi-square distribution
-------------------------------
The probability density function for the inverse chi-square
distribution is
f(x, nu) = 2**(-nu/2) / Gamma(nu/2) * x**(-nu/2 - 1) * exp(-1/(2*x))
See the Wikipedia article `"Inverse-chi-squared distribution"
<https://en.wikipedia.org/wiki/Inverse-chi-squared_distribution>`_
for more information. The functions here implement the first
definition given in the wikipedia article. That is, if X has the
chi-square distribution with nu degrees of freedom, then 1/X has the
inverse chi-square distribution with nu degrees of freedom.
"""
import re
import mpmath
# module docstring substitution
_math_expression = r"""
.. math::
f(x, \\nu) = \\frac{2^{-\\nu/2}}{\\Gamma(\\nu/2)}
x^{-\\nu/2 - 1} e^{-1/(2x)}
"""
_docstring_re_subs = [
(r' f\(x,.*$', _math_expression, 0, re.MULTILINE),
(' nu ', r' :math:`\\nu` ', 0, 0),
]
__all__ = ['pdf', 'logpdf', 'cdf', 'sf', 'mean', 'mode', 'variance']
def pdf(x, nu):
"""
PDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
hnu = nu/2
p = (mpmath.power(2, -hnu) * x**(-hnu - 1) * mpmath.exp(-1/(2*x))
/ mpmath.gamma(hnu))
return p
def logpdf(x, nu):
"""
Logarithm of the PDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.ninf
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
hnu = nu/2
logp = (-hnu*mpmath.log(2) + (-hnu - 1)*mpmath.log(x) - 1/(2*x)
- mpmath.loggamma(hnu))
return logp
def cdf(x, nu):
"""
CDF for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
c = mpmath.gammainc(nu/2, a=1/(2*x), b=mpmath.inf, regularized=True)
return c
def sf(x, nu):
"""
Survival function for the inverse chi-square distribution.
"""
_validate_nu(nu)
if x <= 0:
return mpmath.mp.one
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
s = mpmath.gammainc(nu/2, a=0, b=1/(2*x), regularized=True)
return s
def mean(nu):
"""
Mean of the inverse chi-square distribution.
For nu > 2, the mean is 1/(nu - 2).
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return mpmath.mp.one / (nu - 2) if nu > 2 else mpmath.nan
mean._docstring_re_subs = [
(r' *1.*2\)$',
'\n'.join([r'.. math::',
r' \\frac{1}{\\nu - 2}',
r'']),
0, re.MULTILINE),
(r'1/\(nu - 2\)', r':math:`1/(\\nu - 2)`', 0, 0),
('nu > 2', r':math:`\\nu > 2`', 0, 0),
]
def mode(nu):
"""
Mode of the inverse chi-square distribution.
The mode is max(k - 2, 0).
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return 1 / (nu + 2)
def variance(nu):
"""
Variance of the inverse chi-square distribution.
For nu > 4, the variance is
2 / ((nu - 2)**2 (nu - 4))
"""
_validate_nu(nu)
with mpmath.extradps(5):
nu = mpmath.mpf(nu)
return 2/(nu - 2)**2 / (nu - 4) if nu > 4 else mpmath.nan
variance._docstring_re_subs = [
(r' *2.*4\)\)$',
'\n'.join([r'.. math::',
r' \\frac{2}{(\\nu - 2)^2 (\\nu - 4)}',
r'']),
0, re.MULTILINE),
('nu > 4', r':math:`\\nu > 4`', 0, 0),
]
| [
37811,
198,
818,
4399,
33166,
12,
23415,
6082,
198,
1783,
24305,
198,
198,
464,
12867,
12109,
2163,
329,
262,
34062,
33166,
12,
23415,
198,
17080,
3890,
318,
628,
220,
220,
220,
277,
7,
87,
11,
14364,
8,
796,
362,
1174,
32590,
28803,
... | 1.998932 | 1,873 |
#!/usr/bin/python
import psutil
import subprocess
import simplejson
import time
import random
import multiprocessing as mp
procs_id = 0
procs = {}
procs_data = []
url_num = 0
# Define an output queue
output = mp.Queue()
MAX_THREAD_NUM = 500
#proxy_url='10.0.0.204:80'
proxy_url=''
urls = [
'http://drbd.linbit.com/home/what-is-drbd/',
'http://drbd.linbit.com/home/what-is-ha/',
'http://en.wikipedia.org/wiki/Main_Page',
'http://en.wikipedia.org/wiki/Walden%E2%80%93Wallkill_Rail_Trail',
'http://en.wikipedia.org/wiki/New_York_metropolitan_area',
'http://www.citrix.com/products.html',
'http://www.citrix.co.jp/products.html?posit=glnav',
'http://www.citrix.co.jp/products/gotowebinar/overview.html'
]
#Get http access time for particular url with/without proxy
# Runs command silently
#Main function
if __name__ == '__main__':
#warmup for ATS
print ("warmup start....")
for url in urls:
getInfoForCurl(url,proxy_url)
print ("test start....")
# Setup a list of processes that we want to run
print "add it into thead queue...."
processes = [mp.Process(target=accesswithOutput, args=(proxy_url,)) for x in range(MAX_THREAD_NUM)]
#processes = [mp.Process(target=accesswithOutput, args=('',)) for x in range(MAX_THREAD_NUM)]
# Run processes
print "thread start..."
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
print "thread exit!"
# Get process results from the output queue
results = [output.get() for p in processes]
time_sum=0
for result in results:
time_sum =time_sum + result[2]
print(time_sum)
# for url in urls:
# info= getInfoForCurl(url,proxy_url)
# print (info)
# info= getInfoForCurl(url)
# print (info)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
26692,
22602,
198,
11748,
850,
14681,
198,
11748,
2829,
17752,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
18540,
305,
919,
278,
355,
29034,
198,
198,
1676,
6359,
62,
312,
796,
657,
... | 2.439633 | 762 |
from bs4 import BeautifulSoup as soup
import os
output_filename = "output_files/slideshow/app_info/app_info.csv"
output_file = open(output_filename, "a")
headers = "title, subtitle, publisher, country, year, last_month_downloads, last_month_revenue\n"
output_file.write(headers)
for filename in os.listdir(os.getcwd() + "/input_files/slideshow/app_info"):
if filename.endswith(".html"):
with open(os.path.join(os.getcwd() + "/input_files/slideshow/app_info", filename), 'r') as f:
file_content = f.read()
page_soup = soup(file_content, 'html.parser')
title = clean_value(page_soup.find("span", {"class": "app-name-wrapper"}))
sub_title = clean_value(page_soup.find("h3", {"class": "subtitle-text"}))
last_month_downloads = clean_value(page_soup.find("span", {"class": "downloads"}))
last_month_revenue = clean_value(page_soup.find("span", {"class": "revenue"}))
about_items = page_soup.find("table", {"class": "about-app-table"}).find_all("tr")
for item in about_items:
if item.find("td", {"class": "name"}).text.strip() == "Support URL:":
publisher = clean_value(item.find("td", {"class": "value"}))
if item.find("td", {"class": "name"}).text.strip() == "Most Popular Country:":
country = clean_value(item.find("td", {"class": "value"}))
if item.find("td", {"class": "name"}).text.strip() == "Country Release Date:":
release_date = clean_value(item.find("td", {"class": "value"}))
output_file.write(title + ", " + sub_title + ", " + publisher + ", " + country + ", " + release_date + ", " + last_month_downloads + ", " + last_month_revenue + "\n")
output_file.close()
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
17141,
198,
11748,
28686,
628,
198,
22915,
62,
34345,
796,
366,
22915,
62,
16624,
14,
6649,
42286,
14,
1324,
62,
10951,
14,
1324,
62,
10951,
13,
40664,
1,
198,
22915,
62,
7753,
796,
12... | 2.370224 | 759 |
from linty.linty import lint_text | [
6738,
300,
600,
88,
13,
75,
600,
88,
1330,
300,
600,
62,
5239
] | 2.538462 | 13 |
# coding: utf-8
__version__ = '0.1.0'
import logging
import math
import struct
import time
from typing import Union
from netaddr import IPSet
MMDBType = Union[dict, list, str, bytes, int, bool]
logger = logging.getLogger(__name__)
METADATA_MAGIC = b'\xab\xcd\xefMaxMind.com'
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
834,
9641,
834,
796,
705,
15,
13,
16,
13,
15,
6,
198,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
2878,
198,
11748,
640,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
2010,
29851,
1330,
3... | 2.623853 | 109 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
import argparse
import base64
import socket
import os
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.commands.validators import \
(validate_tags, get_default_location_from_resource_group)
from azure.cli.core.commands.template_create import get_folded_parameter_validator
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.commands.validators import validate_parameter_set
from azure.cli.core.profiles import ResourceType
logger = get_logger(__name__)
# pylint: disable=inconsistent-return-statements
def validate_ip_tags(cmd, namespace):
''' Extracts multiple space-separated tags in TYPE=VALUE format '''
IpTag = cmd.get_models('IpTag')
if namespace.ip_tags and IpTag:
ip_tags = []
for item in namespace.ip_tags:
tag_type, tag_value = item.split('=', 1)
ip_tags.append(IpTag(ip_tag_type=tag_type, tag=tag_value))
namespace.ip_tags = ip_tags
def get_public_ip_validator(has_type_field=False, allow_none=False, allow_new=False,
default_none=False):
""" Retrieves a validator for public IP address. Accepting all defaults will perform a check
for an existing name or ID with no ARM-required -type parameter. """
from msrestazure.tools import is_valid_resource_id, resource_id
return complex_validator_with_type if has_type_field else simple_validator
# COMMAND NAMESPACE VALIDATORS
# pylint: disable=too-few-public-methods
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 3.034848 | 660 |
import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("TOKEN")
COMMAND_PREFIX = os.getenv("PREFIX")
OWNER = os.getenv("OWNER")
ANNOUNCEMENTS_CHANNEL = os.getenv("ANNOUNCEMENTS")
SECURITY_CHANNEL = os.getenv("SECURITY")
SWEAR_WORDS_LIST = os.getenv("BANNEDWORDS").split(",")
| [
11748,
28686,
198,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
198,
2220,
62,
26518,
24330,
3419,
198,
198,
10468,
43959,
796,
28686,
13,
1136,
24330,
7203,
10468,
43959,
4943,
198,
9858,
44,
6981,
62,
47,
31688,
10426,
... | 2.3125 | 128 |
#! /usr/bin/env python
"""run and initiate nltk.download('all') """
import nltk
# setup or argparse
PERMISSION = input("Would you like to continue and install all nltk dependanies? [Y/n] ")
if PERMISSION == 'Y':
try:
nltk.download('all')
COMPLETE = """We have completed the initial setup for ntlk download.
You can now run bigramft.py"""
print('\n', COMPLETE, '\n')
except Exception as error:
print('There was an error: ', error)
else:
EXIT_MSG = """No worries we can have some bigram fun later when your ready to setup.
Never rush quality!"""
print(EXIT_MSG)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
5143,
290,
22118,
299,
2528,
74,
13,
15002,
10786,
439,
11537,
37227,
198,
198,
11748,
299,
2528,
74,
198,
198,
2,
9058,
393,
1822,
29572,
198,
198,
18973,
44,
40373,
7... | 2.604167 | 240 |
from django.contrib import admin
from employee.models import Department, Employee, Room
admin.site.register(Department)
admin.site.register(Employee)
admin.site.register(Room)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
6538,
13,
27530,
1330,
2732,
11,
36824,
11,
10096,
198,
198,
28482,
13,
15654,
13,
30238,
7,
36261,
8,
198,
28482,
13,
15654,
13,
30238,
7,
29733,
1453,
8,
198,
28482,
... | 3.56 | 50 |
import os
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeVideoClip, TextClip
import random
import numpy as np
import time
from video_facial_landmarks_minmax import calculate_distance
from face_embedding import calculate_euclidean_distance
import cv2
import subprocess
ONE_FRAME_SEC = 0.03336666666666588 # 29.97002997002997fps의 역수! 한 프레임당 시간을 계싼해서 프레임 id만 알면 현재 시간 알수 있도록 함# 0.03336666666666588??
EYE_MIN_DIFF = 65 # 두 영상의 눈 크기 차이가 거리 이상이면, crossfade 전환 하지 않는다.
TOTAL_MIN_DIFF = 200 # 두 영상의 눈 거리가 이 이상이면 전환 자체를 시도하지 않는다(엉뚱한데 옮겨가는거 피하기)
ROTATE_MAX = 7 # 각 도 차이가 이 값 이상이면, crossfade 전환하지 않는다.
WINDOW_TIME = 10 # WINDOW_TIME 초 안에서 최소 거리를 찾는다. 얼굴이 겹치는 부분이 없다면, WINDOW_TIME 만큼 자르고 radom으로 다음 영상을 재생한다.
PADDED_TIME = 3 # 최소 시간으로 영상을 자른 뒤 PADDED_TIME 만큼은 얼굴 거리를 계산하지 않는다.
# TRANSITION INFO
ZOOM_FRAME = 20 # 얼굴 확대하는 FRAME 수
CROSS_FRAME = 4 # CrossFade FRAME 수
ONE_ZOOM = 1.2 # 회전 확대 후 검은 비율을 줄이기 위해서 확대하는 비율
AGAIN_ZOOM = 1.15 # 영상이 확대가 불가능(영상 최대 크기 넘어감)할 때 한번 더 확대할 수 있는 비율. 한번 더 확대하고도 범위가 넘어가면, 그냥 아무 효과없이 전환한다.
PANELTY = 100
print('hyper parameter')
print(ONE_FRAME_SEC, EYE_MIN_DIFF, ROTATE_MAX, WINDOW_TIME, PADDED_TIME, ZOOM_FRAME, CROSS_FRAME, ONE_ZOOM, AGAIN_ZOOM)
TEST = False
TEST_TIME = 30
# Moving = 더 작은 쪽에서 하는 것!
# Rotate 할 때 빈 자리 메꾸기 위해서 기본적으로 ONE_ZOOM 만큼 확대하기!
# 이건 사이즈가 안맞아서 한번 더 확대 했을때 다른 쪽 영상을 처리하는 Class
# ForceZoom = 더 큰쪽에서 하는 것!!
start_time = time.time()
use_face_panelty = True # FacePanelty를 사용하면 Panelty값이 기본적으로 들어가니까 자연스러운 전환을 위해서는 역치값을 높여아 함
if use_face_panelty==True:
EYE_MIN_DIFF += PANELTY
TOTAL_MIN_DIFF += PANELTY
crosscut(videos_path="./video", option="norandom", use_face_panelty = False)
end_time = time.time()
print(end_time - start_time, 'total Generation time')
| [
11748,
28686,
198,
6738,
3807,
9078,
13,
35352,
1330,
7623,
8979,
2601,
541,
11,
1673,
36686,
378,
62,
85,
485,
38679,
2419,
11,
49355,
10798,
2601,
541,
11,
8255,
2601,
541,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
... | 1.103995 | 1,577 |
#!/usr/bin/env python
from __future__ import print_function
from collections import namedtuple
from common import config
from common.response import text_response, populate_html, redirect
import os
import cgi
import hashlib
import MySQLdb
import Cookie
try:
process_input()
except:
cgi.print_exception()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
6738,
2219,
1330,
4566,
198,
6738,
2219,
13,
26209,
1330,
2420,
62,
26209,
11,
48... | 3.393617 | 94 |
from gi.repository import Gtk
| [
6738,
308,
72,
13,
260,
1930,
37765,
1330,
402,
30488,
628
] | 2.818182 | 11 |
#! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
import sys
import os
import platform
import shutil
# We need to import setuptools before because it monkey-patches distutils
import setuptools # noqa
from distutils.command.clean import clean as Clean
from distutils.command.sdist import sdist
import traceback
import importlib
try:
import builtins
except ImportError:
# Python 2 compat: just to be able to declare that Python >=3.7 is needed.
import __builtin__ as builtins
# This is a bit (!) hackish: we are setting a global variable so that the
# main sklearn __init__ can detect if it is being loaded by the setup
# routine, to avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to
# recursively build the compiled extensions in sub-packages is based on the
# Python import machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
DOWNLOAD_URL = 'https://pypi.org/project/scikit-learn/#files'
LICENSE = 'new BSD'
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/scikit-learn/scikit-learn/issues',
'Documentation': 'https://scikit-learn.org/stable/documentation.html',
'Source Code': 'https://github.com/scikit-learn/scikit-learn'
}
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
import sklearn._min_dependencies as min_deps # noqa
from sklearn.externals._packaging.version import parse as parse_version # noqa
VERSION = sklearn.__version__
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = {
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
}
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
key: min_deps.tag_to_packages[key] for
key in ['examples', 'docs', 'tests', 'benchmark']
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
cmdclass = {'clean': CleanCommand, 'sdist': sdist}
# Custom build_ext command to set OpenMP compile flags depending on os and
# compiler. Also makes it possible to set the parallelism level via
# and environment variable (useful for the wheel building CI).
# build_ext has to be imported after setuptools
try:
from numpy.distutils.command.build_ext import build_ext # noqa
cmdclass['build_ext'] = build_ext_subclass
except ImportError:
# Numpy should not be a dependency just to be able to introspect
# that python 3.7 is required.
pass
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def check_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
module = importlib.import_module(package)
package_version = module.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
req_str = "scikit-learn requires {} >= {}.\n".format(
package, min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if package_status['up_to_date'] is False:
if package_status['version']:
raise ImportError("Your installation of {} "
"{} is out-of-date.\n{}{}"
.format(package, package_status['version'],
req_str, instructions))
else:
raise ImportError("{} is not "
"installed.\n{}{}"
.format(package, req_str, instructions))
if __name__ == "__main__":
setup_package()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
34,
8,
4343,
12,
10531,
327,
1798,
1758,
559,
3271,
1279,
66,
1798,
1758,
31,
14816,
13,
785,
29,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.724961 | 1,927 |
import textwrap
from functools import partial
import click
import tqdm
from wasabi import Printer, MESSAGES
from wasabi.util import ICONS
msg = Printer()
success = partial(msg.text, color=MESSAGES.GOOD, icon=MESSAGES.GOOD)
info = partial(msg.text, color=MESSAGES.INFO, icon=MESSAGES.INFO)
error = partial(msg.text, color=MESSAGES.FAIL, icon=MESSAGES.FAIL)
warning = partial(msg.text, color=MESSAGES.WARN, icon=MESSAGES.WARN)
error_box = partial(message_box, bg="red", icon=MESSAGES.FAIL)
info_box = partial(message_box, bg="blue", icon=MESSAGES.INFO)
warning_box = partial(message_box, bg="yellow", icon=MESSAGES.WARN)
success_box = partial(message_box, bg="green", icon=MESSAGES.GOOD)
| [
11748,
2420,
37150,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
11748,
3904,
198,
11748,
256,
80,
36020,
198,
6738,
373,
17914,
1330,
1736,
3849,
11,
337,
1546,
4090,
48075,
198,
6738,
373,
17914,
13,
22602,
1330,
314,
10943,
50... | 2.599251 | 267 |
import sqlite3
import json
import uuid | [
11748,
44161,
578,
18,
198,
11748,
33918,
198,
11748,
334,
27112
] | 3.454545 | 11 |
s = 'ORnPBPMgArCaCaCaSiThCaCaSiThCaCaPBSiRnFArRnFArCaCaSiThCaCaSiThCaCaCaCaCaCaSiRnFYFArSiRnMgArCaSiRnPTiTiBFYPBFArSiRnCaSiRnTiRnFArSiAlArPTiBPTiRnCaSiAlArCaPTiTiBPMgYFArPTiRnFArSiRnCaCaFArRnCaFArCaSiRnSiRnMgArFYCaSiRnMgArCaCaSiThPRnFArPBCaSiRnMgArCaCaSiThCaSiRnTiMgArFArSiThSiThCaCaSiRnMgArCaCaSiRnFArTiBPTiRnCaSiAlArCaPTiRnFArPBPBCaCaSiThCaPBSiThPRnFArSiThCaSiThCaSiThCaPTiBSiRnFYFArCaCaPRnFArPBCaCaPBSiRnTiRnFArCaPRnFArSiRnCaCaCaSiThCaRnCaFArYCaSiRnFArBCaCaCaSiThFArPBFArCaSiRnFArRnCaCaCaFArSiRnFArTiRnPMgArF'
cmds = """Al => ThF
Al => ThRnFAr
B => BCa
B => TiB
B => TiRnFAr
Ca => CaCa
Ca => PB
Ca => PRnFAr
Ca => SiRnFYFAr
Ca => SiRnMgAr
Ca => SiTh
F => CaF
F => PMg
F => SiAl
H => CRnAlAr
H => CRnFYFYFAr
H => CRnFYMgAr
H => CRnMgYFAr
H => HCa
H => NRnFYFAr
H => NRnMgAr
H => NTh
H => OB
H => ORnFAr
Mg => BF
Mg => TiMg
N => CRnFAr
N => HSi
O => CRnFYFAr
O => CRnMgAr
O => HP
O => NRnFAr
O => OTi
P => CaP
P => PTi
P => SiRnFAr
Si => CaSi
Th => ThCa
Ti => BP
Ti => TiTi
e => HF
e => NAl
e => OMg"""
import re, copy
t = [n.split('=>') for n in cmds.replace(' ','').split('\n')]
conv = dict()
for name, value in t:
if name not in conv:
conv[name] = [value]
else:
conv[name].append(value)
final = set()
for name in conv:
index = [m.start() for m in list(re.finditer(name, s))]
for b in conv[name]:
for i in index:
final.add(s[:i] + b + s[i+len(name):])
print len(final) | [
82,
796,
705,
1581,
77,
49079,
5868,
70,
3163,
24334,
24334,
24334,
42801,
817,
24334,
24334,
42801,
817,
24334,
24334,
47,
4462,
72,
49,
77,
37,
3163,
49,
77,
37,
3163,
24334,
24334,
42801,
817,
24334,
24334,
42801,
817,
24334,
24334,
... | 1.824377 | 763 |
import urllib.parse, urllib.request, json
CompareDocs("""
All human beings are born free and equal in dignity and rights.
They are endowed with reason and conscience and should act towards
one another in a spirit of brotherhood.
Everyone is entitled to all the rights and freedoms set forth in
this Declaration, without distinction of any kind, such as race,
colour, sex, language, religion, political or other opinion,
national or social origin, property, birth or other status.
Furthermore, no distinction shall be made on the basis of the
political, jurisdictional or international status of the country
or territory to which a person belongs, whether it be independent,
trust, non-self-governing or under any other limitation of
sovereignty.""",
"en",
"""
Alle Menschen sind frei und gleich an W\u00fcrde und Rechten geboren.
Sie sind mit Vernunft und Gewissen begabt und sollen einander im
Geist der Br\u00fcderlichkeit begegnen.
Jeder hat Anspruch auf die in dieser Erkl\u00e4rung verk\u00fcndeten Rechte
und Freiheiten ohne irgendeinen Unterschied, etwa nach Rasse,
Hautfarbe, Geschlecht, Sprache, Religion, politischer oder sonstiger
\u00dcberzeugung, nationaler oder sozialer Herkunft, Verm\u00f6gen, Geburt
oder sonstigem Stand.
Des weiteren darf kein Unterschied gemacht werden auf Grund der
politischen, rechtlichen oder internationalen Stellung des Landes
oder Gebiets, dem eine Person angeh\u00f6rt, gleichg\u00fcltig ob dieses
unabh\u00e4ngig ist, unter Treuhandschaft steht, keine Selbstregierung
besitzt oder sonst in seiner Souver\u00e4nit\u00e4t eingeschr\u00e4nkt ist.
""",
"de")
| [
11748,
2956,
297,
571,
13,
29572,
11,
2956,
297,
571,
13,
25927,
11,
33918,
198,
198,
41488,
23579,
82,
7203,
15931,
198,
220,
220,
220,
1439,
1692,
9791,
389,
4642,
1479,
290,
4961,
287,
16247,
290,
2489,
13,
220,
198,
220,
220,
22... | 2.754358 | 631 |
from . import FixtureTest
| [
6738,
764,
1330,
376,
9602,
14402,
628
] | 3.857143 | 7 |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from shared.lsu_desc import LSUDesc
from shared.operand import ImmOperandType, RegOperandType
from ..program import ProgInsn, Program
from ..model import Model
from ..snippet import Snippet
from ..snippet_gen import SnippetGen
class StraightLineInsn(SnippetGen):
'''A super-simple snippet consisting of a single instruction'''
def fill_insn(self, insn: Insn, model: Model) -> Optional[ProgInsn]:
'''Try to fill out an instruction
This might fail if, for example, the model doesn't have enough
registers with architectural values. In that case, return None.
'''
# If this is not an LSU operation, or it is an LSU operation that
# operates on CSR/WSRs, we can pick operands independently.
if insn.lsu is None:
# For each operand, pick a value that's allowed by the model (i.e.
# one that won't trigger any undefined behaviour)
op_vals = []
for operand in insn.operands:
op_val = model.pick_operand_value(operand.op_type)
if op_val is None:
return None
op_vals.append(op_val)
assert len(op_vals) == len(insn.operands)
return ProgInsn(insn, op_vals, None)
# If this is an LSU operation, then the target address is given by the
# sum of one or more operands. For each of these operands with a
# register type, we are going to need to look in the model to figure
# out the list of different known values we can give it. At the moment,
# we only support the case when there is at most one non-register
# operand, which must be an immediate. Grab that operand's name too.
lsu_imm_op = None
lsu_reg_ops = []
lsu_reg_types = set()
imm_op_min = 0
imm_op_max = 0
for tgt_op_name in insn.lsu.target:
tgt_op = insn.name_to_operand[tgt_op_name]
if isinstance(tgt_op.op_type, ImmOperandType):
if lsu_imm_op is not None:
raise RuntimeError('Multiple immediate operands '
'contribute to target for instruction '
'{!r}. Not currently supported.'
.format(insn.mnemonic))
lsu_imm_op = tgt_op_name
imm_op_range = tgt_op.op_type.get_op_val_range(model.pc)
if imm_op_range is None:
assert tgt_op.op_type.width is None
raise RuntimeError('The {!r} immediate operand for the '
'{!r} instruction contributes to its '
'LSU target but has no width.'
.format(tgt_op_name, insn.mnemonic))
imm_op_min, imm_op_max = imm_op_range
continue
if isinstance(tgt_op.op_type, RegOperandType):
reg_type = tgt_op.op_type.reg_type
lsu_reg_ops.append((tgt_op_name, reg_type))
lsu_reg_types.add(reg_type)
continue
raise RuntimeError('Unknown operand type for {!r} operand of '
'{!r} instruction: {}.'
.format(tgt_op_name, insn.mnemonic,
type(tgt_op.op_type).__name__))
# We have a list of register operands, together with their types. Get a
# list of registers with known values for each register type we've seen.
known_regs_by_type = {rtype: model.regs_with_known_vals(rtype)
for rtype in lsu_reg_types}
# And turn that into a dict keyed by operand name
op_to_known_regs = {op_name: known_regs_by_type[op_type]
for op_name, op_type in lsu_reg_ops}
# Ask the model to try to find a target we can use. If this is a load
# or a CSR operation, it will have to be an address that already has an
# architectural value. If a store, it can be any address in range.
lsu_type_to_info = {
'mem-load': ('dmem', True),
'mem-store': ('dmem', False),
'csr': ('csr', True),
'wsr': ('wsr', True)
}
assert set(lsu_type_to_info.keys()) == set(LSUDesc.TYPES)
mem_type, loads_value = lsu_type_to_info[insn.lsu.lsu_type]
tgt = model.pick_lsu_target(mem_type,
loads_value,
op_to_known_regs,
imm_op_min,
imm_op_max,
insn.lsu.idx_width)
if tgt is None:
return None
addr, imm_val, reg_indices = tgt
assert imm_op_min <= imm_val <= imm_op_max
enc_vals = []
for operand in insn.operands:
# Is this the immediate? If the immediate operand is signed then
# note that imm_op_min < 0 and we might have that imm_val < 0.
# However, we store everything as an enc_val, so we have to
# "re-encode" here.
if operand.name == lsu_imm_op:
enc_val = operand.op_type.op_val_to_enc_val(imm_val, model.pc)
assert enc_val is not None
enc_vals.append(enc_val)
continue
# Or is it a register operand contributing to the target address?
reg_val = reg_indices.get(operand.name)
if reg_val is not None:
enc_vals.append(reg_val)
continue
# Otherwise it's some other operand. Pick any old value.
val = model.pick_operand_value(operand.op_type)
if val is None:
return None
enc_vals.append(val)
assert len(enc_vals) == len(insn.operands)
return ProgInsn(insn, enc_vals, (mem_type, addr))
| [
2,
15069,
1877,
49,
37719,
20420,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
11,
766,
38559,
24290,
329,
3307,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
... | 1.998407 | 3,139 |
# from django.http import request
# import pytest
# from django.contrib.auth.models import AnonymousUser
# from django.http.response import Http404, HttpResponse
# from django.shortcuts import get_object_or_404
# from django.test import RequestFactory
# from shortit.shortener.models import ShortUrl
# from shortit.shortener.views import short_url_redirect_view
# from shortit.shortener.tests.factories import UrlFactory
# pytestmark = pytest.mark.django_db
# class TestShortUrlRedirectVieww:
# def test_get_redirect_url(self, short_url: ShortUrl, rf: RequestFactory):
# request = rf.get("/fake-url")
# view = short_url_redirect_view(request)
# obj = get_object_or_404(short_url, shortcode=shortcode)
# = short_url
# view.request = request
# assert HttpResponse == f"{obj.url}/"
| [
2,
422,
42625,
14208,
13,
4023,
1330,
2581,
198,
2,
1330,
12972,
9288,
198,
2,
422,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
19200,
12982,
198,
2,
422,
42625,
14208,
13,
4023,
13,
26209,
1330,
367,
29281,
26429,
11,
... | 2.639752 | 322 |
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright: Hybrid Labs
# Licence: See LICENSE
#==============================================================================
"""
Python to Javascript compiler
"""
from .compiler import js_compile, runtime, JSCode
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
23926,
25609,
855,
201,
198,
2,
15069,
25,
220,
220,
220,
29481,
23500,
201,
198,
2,
10483,
594,
25,
220,
220,
220,
220,
220,
4091,
38559,
24290,
201,
198,
2... | 4.059524 | 84 |
from conans.model.ref import ConanFileReference, PackageReference
| [
6738,
369,
504,
13,
19849,
13,
5420,
1330,
31634,
8979,
26687,
11,
15717,
26687,
628
] | 4.466667 | 15 |
import multiprocessing
pool = None
| [
11748,
18540,
305,
919,
278,
198,
198,
7742,
796,
6045,
628,
198
] | 3.166667 | 12 |
# Copyright 2015 Florian Ludwig
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import sys
import contextlib
import functools
import inspect
from tornado import stack_context, gen
NOT_PROVIDED = object()
SCOPE_CHAIN = None
class Scope(dict):
"""
"""
def get(self, key, default=NOT_PROVIDED, scopes=None):
"""
:param str key:
:param default:
:param list[Scope] scopes:
:param str prefix:
:return: :raise IndexError:
"""
if scopes is None:
if SCOPE_CHAIN:
scopes = list(reversed(SCOPE_CHAIN))
else:
scopes = [self]
if key == 'scope':
return self
for i, scope in enumerate(scopes):
if key in scope:
return scope[key]
elif key in scope._provider:
scope[key] = scope._provider[key]()
del scope._provider[key]
return scope[key]
elif key in scope._subscopes:
return SubScopeView(key, scopes)
if default is not NOT_PROVIDED:
return default
msg = 'No value for "{}" stored and no default given'.format(key)
raise IndexError(msg)
@contextlib.contextmanager
| [
2,
15069,
1853,
4432,
666,
44476,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
1... | 2.4753 | 749 |
gen = genPrimes()
print gen.next()
print gen.next()
print gen.next()
print gen.next() | [
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
5235,
796,
2429,
6836,
999,
3419,
198,
4798,
2429,
13,
19545,
3419,
198,
4798,
2429,
13,
19545,
3419,
198,
4798,
2429,
13,
19545,
3419,
198,
4798,
2429,
13,
19545,
341... | 2.27907 | 43 |
# Generated by Django 3.1.2 on 2020-11-15 11:40
import django.contrib.auth.models
import django.contrib.postgres.fields.jsonb
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
1157,
12,
1314,
1367,
25,
1821,
198,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
... | 3.012821 | 78 |
import fire
from pi_camera_capture.app import main
if __name__ == "__main__":
cli()
| [
11748,
2046,
198,
6738,
31028,
62,
25695,
62,
27144,
495,
13,
1324,
1330,
1388,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
537,
72,
3419,
198
] | 2.6 | 35 |
# Word list for hangman
words = ["aback", "abaft", "abandoned", "abashed", "aberrant", "abhorrent", "abiding", "abject", "ablaze", "able",
"abnormal", "aboard", "aboriginal", "abortive", "abounding", "abrasive", "abrupt", "absent", "absorbed",
"absorbing", "abstracted", "absurd", "abundant", "abusive", "accept", "acceptable", "accessible", "accidental",
"account", "accurate", "achiever", "acid", "acidic", "acoustic", "acoustics", "acrid", "act", "action",
"activity", "actor", "actually", "ad hoc", "adamant", "adaptable", "add", "addicted", "addition", "adhesive",
"adjoining", "adjustment", "admire", "admit", "adorable", "adventurous", "advertisement", "advice", "advise",
"afford", "afraid", "aftermath", "afternoon", "afterthought", "aggressive", "agonizing", "agree", "agreeable",
"agreement", "ahead", "air", "airplane", "airport", "ajar", "alarm", "alcoholic", "alert", "alike", "alive",
"alleged", "allow", "alluring", "aloof", "amazing", "ambiguous", "ambitious", "amount", "amuck", "amuse",
"amused", "amusement", "amusing", "analyze", "ancient", "anger", "angle", "angry", "animal", "animated",
"announce", "annoy", "annoyed", "annoying", "answer", "ants", "anxious", "apathetic", "apologise", "apparatus",
"apparel", "appear", "applaud", "appliance", "appreciate", "approval", "approve", "aquatic", "arch", "argue",
"argument", "arithmetic", "arm", "army", "aromatic", "arrange", "arrest", "arrive", "arrogant", "art",
"ashamed", "ask", "aspiring", "assorted", "astonishing", "attach", "attack", "attempt", "attend", "attract",
"attraction", "attractive", "aunt", "auspicious", "authority", "automatic", "available", "average", "avoid",
"awake", "aware", "awesome", "awful", "axiomatic", "babies", "baby", "back", "bad", "badge", "bag", "bait",
"bake", "balance", "ball", "ban", "bang", "barbarous", "bare", "base", "baseball", "bashful", "basin", "basket",
"basketball", "bat", "bath", "bathe", "battle", "bawdy", "bead", "beam", "bear", "beautiful", "bed", "bedroom",
"beds", "bee", "beef", "befitting", "beg", "beginner", "behave", "behavior", "belief", "believe", "bell",
"belligerent", "bells", "belong", "beneficial", "bent", "berry", "berserk", "best", "better", "bewildered",
"big", "bike", "bikes", "billowy", "bird", "birds", "birth", "birthday", "bit", "bite", "bite-sized", "bitter",
"bizarre", "black", "black-and-white", "blade", "bleach", "bless", "blind", "blink", "blood", "bloody", "blot",
"blow", "blue", "blue-eyed", "blush", "blushing", "board", "boast", "boat", "boil", "boiling", "bolt", "bomb",
"bone", "book", "books", "boorish", "boot", "border", "bore", "bored", "boring", "borrow", "bottle", "bounce",
"bouncy", "boundary", "boundless", "bow", "box", "boy", "brainy", "brake", "branch", "brash", "brass", "brave",
"brawny", "breakable", "breath", "breathe", "breezy", "brick", "bridge", "brief", "bright", "broad", "broken",
"brother", "brown", "bruise", "brush", "bubble", "bucket", "building", "bulb", "bump", "bumpy", "burly", "burn",
"burst", "bury", "bushes", "business", "bustling", "busy", "butter", "button", "buzz", "cabbage", "cable",
"cactus", "cagey", "cake", "cakes", "calculate", "calculating", "calculator", "calendar", "call", "callous",
"calm", "camera", "camp", "can", "cannon", "canvas", "cap", "capable", "capricious", "caption", "car", "card",
"care", "careful", "careless", "caring", "carpenter", "carriage", "carry", "cars", "cart", "carve", "cast",
"cat", "cats", "cattle", "cause", "cautious", "cave", "ceaseless", "celery", "cellar", "cemetery", "cent",
"certain", "chalk", "challenge", "chance", "change", "changeable", "channel", "charge", "charming", "chase",
"cheap", "cheat", "check", "cheer", "cheerful", "cheese", "chemical", "cherries", "cherry", "chess", "chew",
"chicken", "chickens", "chief", "childlike", "children", "chilly", "chin", "chivalrous", "choke", "chop",
"chubby", "chunky", "church", "circle", "claim", "clam", "clammy", "clap", "class", "classy", "clean", "clear",
"clever", "clip", "cloistered", "close", "closed", "cloth", "cloudy", "clover", "club", "clumsy", "cluttered",
"coach", "coal", "coast", "coat", "cobweb", "coherent", "coil", "cold", "collar", "collect", "color",
"colorful", "colossal", "colour", "comb", "combative", "comfortable", "command", "committee", "common",
"communicate", "company", "compare", "comparison", "compete", "competition", "complain", "complete", "complex",
"concentrate", "concern", "concerned", "condemned", "condition", "confess", "confuse", "confused", "connect",
"connection", "conscious", "consider", "consist", "contain", "continue", "control", "cooing", "cook", "cool",
"cooperative", "coordinated", "copper", "copy", "corn", "correct", "cough", "count", "country", "courageous",
"cover", "cow", "cowardly", "cows", "crabby", "crack", "cracker", "crash", "crate", "craven", "crawl", "crayon",
"crazy", "cream", "creator", "creature", "credit", "creepy", "crib", "crime", "crook", "crooked", "cross",
"crow", "crowd", "crowded", "crown", "cruel", "crush", "cry", "cub", "cuddly", "cultured", "cumbersome", "cup",
"cure", "curious", "curl", "curly", "current", "curtain", "curve", "curved", "curvy", "cushion", "cut", "cute",
"cycle", "cynical", "dad", "daffy", "daily", "dam", "damage", "damaged", "damaging", "damp", "dance",
"dangerous", "dapper", "dare", "dark", "dashing", "daughter", "day", "dazzling", "dead", "deadpan", "deafening",
"dear", "death", "debonair", "debt", "decay", "deceive", "decide", "decision", "decisive", "decorate",
"decorous", "deep", "deeply", "deer", "defeated", "defective", "defiant", "degree", "delay", "delicate",
"delicious", "delight", "delightful", "delirious", "deliver", "demonic", "depend", "dependent", "depressed",
"deranged", "describe", "descriptive", "desert", "deserted", "deserve", "design", "desire", "desk", "destroy",
"destruction", "detail", "detailed", "detect", "determined", "develop", "development", "devilish", "didactic",
"different", "difficult", "digestion", "diligent", "dime", "dinner", "dinosaurs", "direction", "direful",
"dirt", "dirty", "disagree", "disagreeable", "disappear", "disapprove", "disarm", "disastrous", "discover",
"discovery", "discreet", "discussion", "disgusted", "disgusting", "disillusioned", "dislike", "dispensable",
"distance", "distinct", "distribution", "disturbed", "divergent", "divide", "division", "dizzy", "dock",
"doctor", "dog", "dogs", "doll", "dolls", "domineering", "donkey", "door", "double", "doubt", "doubtful",
"downtown", "drab", "draconian", "drag", "drain", "dramatic", "drawer", "dream", "dreary", "dress", "drink",
"drip", "driving", "drop", "drown", "drum", "drunk", "dry", "duck", "ducks", "dull", "dust", "dusty", "dynamic",
"dysfunctional", "eager", "ear", "early", "earn", "earsplitting", "earth", "earthquake", "earthy", "easy",
"eatable", "economic", "edge", "educate", "educated", "education", "effect", "efficacious", "efficient", "egg",
"eggnog", "eggs", "eight", "elastic", "elated", "elbow", "elderly", "electric", "elegant", "elfin", "elite",
"embarrass", "embarrassed", "eminent", "employ", "empty", "enchanted", "enchanting", "encourage", "encouraging",
"end", "endurable", "energetic", "engine", "enjoy", "enormous", "enter", "entertain", "entertaining",
"enthusiastic", "envious", "equable", "equal", "erect", "erratic", "error", "escape", "ethereal", "evanescent",
"evasive", "even", "event", "examine", "example", "excellent", "exchange", "excite", "excited", "exciting",
"exclusive", "excuse", "exercise", "exist", "existence", "exotic", "expand", "expansion", "expect", "expensive",
"experience", "expert", "explain", "explode", "extend", "extra-large", "extra-small", "exuberant", "exultant",
"eye", "eyes", "fabulous", "face", "fact", "fade", "faded", "fail", "faint", "fair", "fairies", "faithful",
"fall", "fallacious", "false", "familiar", "famous", "fanatical", "fancy", "fang", "fantastic", "far",
"far-flung", "farm", "fascinated", "fast", "fasten", "fat", "faulty", "fax", "fear", "fearful", "fearless",
"feeble", "feeling", "feigned", "female", "fence", "fertile", "festive", "fetch", "few", "field", "fierce",
"file", "fill", "film", "filthy", "fine", "finger", "finicky", "fire", "fireman", "first", "fish", "fit",
"five", "fix", "fixed", "flag", "flagrant", "flaky", "flame", "flap", "flash", "flashy", "flat", "flavor",
"flawless", "flesh", "flight", "flimsy", "flippant", "float", "flock", "flood", "floor", "flow", "flower",
"flowers", "flowery", "fluffy", "fluttering", "fly", "foamy", "fog", "fold", "follow", "food", "fool",
"foolish", "foot", "force", "foregoing", "forgetful", "fork", "form", "fortunate", "found", "four", "fowl",
"fragile", "frail", "frame", "frantic", "free", "freezing", "frequent", "fresh", "fretful", "friction",
"friend", "friendly", "friends", "frighten", "frightened", "frightening", "frog", "frogs", "front", "fruit",
"fry", "fuel", "full", "fumbling", "functional", "funny", "furniture", "furry", "furtive", "future",
"futuristic", "fuzzy", "gabby", "gainful", "gamy", "gaping", "garrulous", "gate", "gather", "gaudy", "gaze",
"geese", "general", "gentle", "ghost", "giant", "giants", "giddy", "gifted", "gigantic", "giraffe", "girl",
"girls", "glamorous", "glass", "gleaming", "glib", "glistening", "glorious", "glossy", "glove", "glow", "glue",
"godly", "gold", "good", "goofy", "gorgeous", "government", "governor", "grab", "graceful", "grade", "grain",
"grandfather", "grandiose", "grandmother", "grape", "grass", "grate", "grateful", "gratis", "gray", "grease",
"greasy", "great", "greedy", "green", "greet", "grey", "grieving", "grin", "grip", "groan", "groovy",
"grotesque", "grouchy", "ground", "group", "growth", "grubby", "gruesome", "grumpy", "guarantee", "guard",
"guarded", "guess", "guide", "guiltless", "guitar", "gullible", "gun", "gusty", "guttural", "habitual", "hair",
"haircut", "half", "hall", "hallowed", "halting", "hammer", "hand", "handle", "hands", "handsome", "handsomely",
"handy", "hang", "hanging", "hapless", "happen", "happy", "harass", "harbor", "hard", "hard-to-find", "harm",
"harmonious", "harmony", "harsh", "hat", "hate", "hateful", "haunt", "head", "heady", "heal", "health",
"healthy", "heap", "heartbreaking", "heat", "heavenly", "heavy", "hellish", "help", "helpful", "helpless",
"hesitant", "hideous", "high", "high-pitched", "highfalutin", "hilarious", "hill", "hissing", "historical",
"history", "hobbies", "hole", "holiday", "holistic", "hollow", "home", "homeless", "homely", "honey",
"honorable", "hook", "hop", "hope", "horn", "horrible", "horse", "horses", "hose", "hospitable", "hospital",
"hot", "hour", "house", "houses", "hover", "hug", "huge", "hulking", "hum", "humdrum", "humor", "humorous",
"hungry", "hunt", "hurried", "hurry", "hurt", "hushed", "husky", "hydrant", "hypnotic", "hysterical", "ice",
"icicle", "icky", "icy", "idea", "identify", "idiotic", "ignorant", "ignore", "ill", "ill-fated",
"ill-informed", "illegal", "illustrious", "imaginary", "imagine", "immense", "imminent", "impartial",
"imperfect", "impolite", "important", "imported", "impossible", "impress", "improve", "impulse", "incandescent",
"include", "income", "incompetent", "inconclusive", "increase", "incredible", "industrious", "industry",
"inexpensive", "infamous", "influence", "inform", "inject", "injure", "ink", "innate", "innocent",
"inquisitive", "insect", "insidious", "instinctive", "instruct", "instrument", "insurance", "intelligent",
"intend", "interest", "interesting", "interfere", "internal", "interrupt", "introduce", "invent", "invention",
"invincible", "invite", "irate", "iron", "irritate", "irritating", "island", "itch", "itchy", "jaded", "jagged",
"jail", "jam", "jar", "jazzy", "jealous", "jeans", "jelly", "jellyfish", "jewel", "jittery", "jobless", "jog",
"join", "joke", "jolly", "joyous", "judge", "judicious", "juggle", "juice", "juicy", "jumbled", "jump", "jumpy",
"juvenile", "kaput", "keen", "kettle", "key", "kick", "kill", "kind", "kindhearted", "kindly", "kiss",
"kittens", "kitty", "knee", "kneel", "knife", "knit", "knock", "knot", "knotty", "knowing", "knowledge",
"knowledgeable", "known", "label", "labored", "laborer", "lace", "lackadaisical", "lacking", "ladybug", "lake",
"lame", "lamentable", "lamp", "land", "language", "languid", "large", "last", "late", "laugh", "laughable",
"launch", "lavish", "lazy", "lean", "learn", "learned", "leather", "left", "leg", "legal", "legs", "lethal",
"letter", "letters", "lettuce", "level", "lewd", "library", "license", "lick", "lie", "light", "lighten",
"like", "likeable", "limit", "limping", "line", "linen", "lip", "liquid", "list", "listen", "literate",
"little", "live", "lively", "living", "load", "loaf", "lock", "locket", "lonely", "long", "long-term",
"longing", "look", "loose", "lopsided", "loss", "loud", "loutish", "love", "lovely", "loving", "low", "lowly",
"lucky", "ludicrous", "lumber", "lumpy", "lunch", "lunchroom", "lush", "luxuriant", "lying", "lyrical",
"macabre", "machine", "macho", "maddening", "madly", "magenta", "magic", "magical", "magnificent", "maid",
"mailbox", "majestic", "makeshift", "male", "malicious", "mammoth", "man", "manage", "maniacal", "many",
"marble", "march", "mark", "marked", "market", "married", "marry", "marvelous", "mask", "mass", "massive",
"match", "mate", "material", "materialistic", "matter", "mature", "meal", "mean", "measly", "measure", "meat",
"meaty", "meddle", "medical", "meek", "meeting", "mellow", "melodic", "melt", "melted", "memorize", "memory",
"men", "mend", "merciful", "mere", "mess up", "messy", "metal", "mice", "middle", "mighty", "military", "milk",
"milky", "mind", "mindless", "mine", "miniature", "minister", "minor", "mint", "minute", "miscreant", "miss",
"mist", "misty", "mitten", "mix", "mixed", "moan", "moaning", "modern", "moldy", "mom", "momentous", "money",
"monkey", "month", "moon", "moor", "morning", "mother", "motion", "motionless", "mountain", "mountainous",
"mourn", "mouth", "move", "muddle", "muddled", "mug", "multiply", "mundane", "murder", "murky", "muscle",
"mushy", "mute", "mysterious", "nail", "naive", "name", "nappy", "narrow", "nasty", "nation", "natural",
"naughty", "nauseating", "near", "neat", "nebulous", "necessary", "neck", "need", "needle", "needless", "needy",
"neighborly", "nerve", "nervous", "nest", "new", "next", "nice", "nifty", "night", "nimble", "nine", "nippy",
"nod", "noise", "noiseless", "noisy", "nonchalant", "nondescript", "nonstop", "normal", "north", "nose",
"nostalgic", "nosy", "note", "notebook", "notice", "noxious", "null", "number", "numberless", "numerous", "nut",
"nutritious", "nutty", "oafish", "oatmeal", "obedient", "obeisant", "obese", "obey", "object", "obnoxious",
"obscene", "obsequious", "observant", "observation", "observe", "obsolete", "obtain", "obtainable", "occur",
"ocean", "oceanic", "odd", "offbeat", "offend", "offer", "office", "oil", "old", "old-fashioned", "omniscient",
"one", "onerous", "open", "opposite", "optimal", "orange", "oranges", "order", "ordinary", "organic",
"ossified", "outgoing", "outrageous", "outstanding", "oval", "oven", "overconfident", "overflow", "overjoyed",
"overrated", "overt", "overwrought", "owe", "own", "pack", "paddle", "page", "pail", "painful", "painstaking",
"paint", "pale", "paltry", "pan", "pancake", "panicky", "panoramic", "paper", "parallel", "parcel", "parched",
"park", "parsimonious", "part", "partner", "party", "pass", "passenger", "past", "paste", "pastoral", "pat",
"pathetic", "pause", "payment", "peace", "peaceful", "pear", "peck", "pedal", "peel", "peep", "pen", "pencil",
"penitent", "perfect", "perform", "periodic", "permissible", "permit", "perpetual", "person", "pest", "pet",
"petite", "pets", "phobic", "phone", "physical", "picayune", "pick", "pickle", "picture", "pie", "pies", "pig",
"pigs", "pin", "pinch", "pine", "pink", "pipe", "piquant", "pizzas", "place", "placid", "plain", "plan",
"plane", "planes", "plant", "plantation", "plants", "plastic", "plate", "plausible", "play", "playground",
"pleasant", "please", "pleasure", "plot", "plough", "plucky", "plug", "pocket", "point", "pointless", "poised",
"poison", "poke", "polish", "polite", "political", "pollution", "poor", "pop", "popcorn", "porter", "position",
"possess", "possessive", "possible", "post", "pot", "potato", "pour", "powder", "power", "powerful", "practice",
"pray", "preach", "precede", "precious", "prefer", "premium", "prepare", "present", "preserve", "press",
"pretend", "pretty", "prevent", "previous", "price", "pricey", "prick", "prickly", "print", "private",
"probable", "produce", "productive", "profit", "profuse", "program", "promise", "property", "prose", "protect",
"protective", "protest", "proud", "provide", "psychedelic", "psychotic", "public", "puffy", "pull", "pump",
"pumped", "punch", "puncture", "punish", "punishment", "puny", "purple", "purpose", "purring", "push", "pushy",
"puzzled", "puzzling", "quack", "quaint", "quarrelsome", "quarter", "quartz", "queen", "question",
"questionable", "queue", "quick", "quickest", "quicksand", "quiet", "quill", "quilt", "quince", "quirky",
"quiver", "quixotic", "quizzical", "rabbit", "rabbits", "rabid", "race", "racial", "radiate", "ragged", "rail",
"railway", "rain", "rainstorm", "rainy", "raise", "rake", "rambunctious", "rampant", "range", "rapid", "rare",
"raspy", "rat", "rate", "ratty", "ray", "reach", "reaction", "reading", "ready", "real", "realize", "reason",
"rebel", "receipt", "receive", "receptive", "recess", "recognise", "recondite", "record", "red", "reduce",
"redundant", "reflect", "reflective", "refuse", "regret", "regular", "reign", "reject", "rejoice", "relation",
"relax", "release", "relieved", "religion", "rely", "remain", "remarkable", "remember", "remind", "reminiscent",
"remove", "repair", "repeat", "replace", "reply", "report", "representative", "reproduce", "repulsive",
"request", "rescue", "resolute", "resonant", "respect", "responsible", "rest", "retire", "return", "reward",
"rhetorical", "rhyme", "rhythm", "rice", "rich", "riddle", "rifle", "right", "righteous", "rightful", "rigid",
"ring", "rings", "rinse", "ripe", "risk", "ritzy", "river", "road", "roasted", "rob", "robin", "robust", "rock",
"rod", "roll", "romantic", "roof", "room", "roomy", "root", "rose", "rot", "rotten", "rough", "round", "route",
"royal", "rub", "ruddy", "rude", "ruin", "rule", "run", "rural", "rush", "rustic", "ruthless", "sable", "sack",
"sad", "safe", "sail", "salt", "salty", "same", "sand", "sassy", "satisfy", "satisfying", "save", "savory",
"saw", "scale", "scandalous", "scarce", "scare", "scarecrow", "scared", "scarf", "scary", "scatter",
"scattered", "scene", "scent", "school", "science", "scientific", "scintillating", "scissors", "scold",
"scorch", "scrape", "scratch", "scrawny", "scream", "screeching", "screw", "scribble", "scrub", "sea", "seal",
"search", "seashore", "seat", "second", "second-hand", "secret", "secretary", "secretive", "sedate", "seed",
"seemly", "selection", "selective", "self", "selfish", "sense", "separate", "serious", "servant", "serve",
"settle", "shade", "shaggy", "shake", "shaky", "shallow", "shame", "shape", "share", "sharp", "shave", "sheep",
"sheet", "shelf", "shelter", "shiny", "ship", "shirt", "shiver", "shivering", "shock", "shocking", "shoe",
"shoes", "shop", "short", "show", "shrill", "shrug", "shut", "shy", "sick", "side", "sidewalk", "sigh", "sign",
"signal", "silent", "silk", "silky", "silly", "silver", "simple", "simplistic", "sin", "sincere", "sink", "sip",
"sister", "sisters", "six", "size", "skate", "ski", "skillful", "skin", "skinny", "skip", "skirt", "sky",
"slap", "slave", "sleep", "sleepy", "sleet", "slim", "slimy", "slip", "slippery", "slope", "sloppy", "slow",
"small", "smart", "smash", "smell", "smelly", "smile", "smiling", "smoggy", "smoke", "smooth", "snail",
"snails", "snake", "snakes", "snatch", "sneaky", "sneeze", "sniff", "snobbish", "snore", "snotty", "snow",
"soak", "soap", "society", "sock", "soda", "sofa", "soft", "soggy", "solid", "somber", "son", "song", "songs",
"soothe", "sophisticated", "sordid", "sore", "sort", "sound", "soup", "sour", "space", "spade", "spare",
"spark", "sparkle", "sparkling", "special", "spectacular", "spell", "spicy", "spiders", "spiffy", "spiky",
"spill", "spiritual", "spiteful", "splendid", "spoil", "sponge", "spooky", "spoon", "spot", "spotless",
"spotted", "spotty", "spray", "spring", "sprout", "spurious", "spy", "squalid", "square", "squash", "squeak",
"squeal", "squealing", "squeamish", "squeeze", "squirrel", "stage", "stain", "staking", "stale", "stamp",
"standing", "star", "stare", "start", "statement", "station", "statuesque", "stay", "steadfast", "steady",
"steam", "steel", "steep", "steer", "stem", "step", "stereotyped", "stew", "stick", "sticks", "sticky", "stiff",
"stimulating", "stingy", "stir", "stitch", "stocking", "stomach", "stone", "stop", "store", "stormy", "story",
"stove", "straight", "strange", "stranger", "strap", "straw", "stream", "street", "strengthen", "stretch",
"string", "strip", "striped", "stroke", "strong", "structure", "stuff", "stupendous", "stupid", "sturdy",
"subdued", "subsequent", "substance", "substantial", "subtract", "succeed", "successful", "succinct", "suck",
"sudden", "suffer", "sugar", "suggest", "suggestion", "suit", "sulky", "summer", "sun", "super", "superb",
"superficial", "supply", "support", "suppose", "supreme", "surprise", "surround", "suspect", "suspend",
"swanky", "sweater", "sweet", "sweltering", "swift", "swim", "swing", "switch", "symptomatic", "synonymous",
"system", "table", "taboo", "tacit", "tacky", "tail", "talented", "talk", "tall", "tame", "tan", "tangible",
"tangy", "tank", "tap", "tart", "taste", "tasteful", "tasteless", "tasty", "tawdry", "tax", "teaching", "team",
"tearful", "tease", "tedious", "teeny", "teeny-tiny", "teeth", "telephone", "telling", "temper", "temporary",
"tempt", "ten", "tendency", "tender", "tense", "tent", "tenuous", "terrible", "terrific", "terrify",
"territory", "test", "tested", "testy", "texture", "thank", "thankful", "thaw", "theory", "therapeutic",
"thick", "thin", "thing", "things", "thinkable", "third", "thirsty", "thought", "thoughtful", "thoughtless",
"thread", "threatening", "three", "thrill", "throat", "throne", "thumb", "thunder", "thundering", "tick",
"ticket", "tickle", "tidy", "tie", "tiger", "tight", "tightfisted", "time", "tin", "tiny", "tip", "tire",
"tired", "tiresome", "title", "toad", "toe", "toes", "tomatoes", "tongue", "tooth", "toothbrush", "toothpaste",
"toothsome", "top", "torpid", "touch", "tough", "tour", "tow", "towering", "town", "toy", "toys", "trace",
"trade", "trail", "train", "trains", "tramp", "tranquil", "transport", "trap", "trashy", "travel", "tray",
"treat", "treatment", "tree", "trees", "tremble", "tremendous", "trick", "tricky", "trip", "trite", "trot",
"trouble", "troubled", "trousers", "truck", "trucks", "truculent", "true", "trust", "truthful", "try", "tub",
"tug", "tumble", "turkey", "turn", "twig", "twist", "two", "type", "typical", "ubiquitous", "ugliest", "ugly",
"ultra", "umbrella", "unable", "unaccountable", "unadvised", "unarmed", "unbecoming", "unbiased", "uncle",
"uncovered", "understood", "underwear", "undesirable", "undress", "unequal", "unequaled", "uneven", "unfasten",
"unhealthy", "uninterested", "unique", "unit", "unite", "unkempt", "unknown", "unlock", "unnatural", "unpack",
"unruly", "unsightly", "unsuitable", "untidy", "unused", "unusual", "unwieldy", "unwritten", "upbeat", "uppity",
"upset", "uptight", "use", "used", "useful", "useless", "utopian", "utter", "uttermost", "vacation", "vacuous",
"vagabond", "vague", "valuable", "value", "van", "vanish", "various", "vase", "vast", "vegetable", "veil",
"vein", "vengeful", "venomous", "verdant", "verse", "versed", "vessel", "vest", "victorious", "view",
"vigorous", "violent", "violet", "visit", "visitor", "vivacious", "voice", "voiceless", "volatile", "volcano",
"volleyball", "voracious", "voyage", "vulgar", "wacky", "waggish", "wail", "wait", "waiting", "wakeful", "walk",
"wall", "wander", "wandering", "want", "wanting", "war", "warlike", "warm", "warn", "wary", "wash", "waste",
"wasteful", "watch", "water", "watery", "wave", "waves", "wax", "way", "weak", "wealth", "wealthy", "weary",
"weather", "week", "weigh", "weight", "welcome", "well-groomed", "well-made", "well-off", "well-to-do", "wet",
"wheel", "whimsical", "whine", "whip", "whirl", "whisper", "whispering", "whistle", "white", "whole",
"wholesale", "wicked", "wide", "wide-eyed", "wiggly", "wild", "wilderness", "willing", "wind", "window",
"windy", "wine", "wing", "wink", "winter", "wipe", "wire", "wiry", "wise", "wish", "wistful", "witty", "wobble",
"woebegone", "woman", "womanly", "women", "wonder", "wonderful", "wood", "wooden", "wool", "woozy", "word",
"work", "workable", "worm", "worried", "worry", "worthless", "wound", "wrap", "wrathful", "wreck", "wren",
"wrench", "wrestle", "wretched", "wriggle", "wrist", "writer", "writing", "wrong", "wry", "x-ray", "yak", "yam",
"yard", "yarn", "yawn", "year", "yell", "yellow", "yielding", "yoke", "young", "youthful", "yummy", "zany",
"zealous", "zebra", "zephyr", "zesty", "zinc", "zip", "zipper", "zippy", "zonked", "zoo", "zoom"]
| [
2,
9678,
1351,
329,
8181,
805,
201,
198,
10879,
796,
14631,
397,
441,
1600,
366,
397,
14940,
1600,
366,
397,
5063,
276,
1600,
366,
397,
5263,
1600,
366,
27359,
5250,
1600,
366,
397,
17899,
1156,
1600,
366,
43056,
1600,
366,
397,
752,
... | 2.365854 | 11,521 |
import os
train_src="../dynet_nmt/data/train.de-en.de.wmixerprep"
train_tgt="../dynet_nmt/data/train.de-en.en.wmixerprep"
dev_src="../dynet_nmt/data/valid.de-en.de"
dev_tgt="../dynet_nmt/data/valid.de-en.en"
test_src="../dynet_nmt/data/test.de-en.de"
test_tgt="../dynet_nmt/data/test.de-en.en"
for temp in [0.6, 0.8]: # 0.75, 0.80, 0.85, 0.90, 0.95, 1.0
job_name = 'iwslt14.raml.512enc.corrupt_ngram.t%.3f' % temp
train_log = 'train.' + job_name + '.log'
model_name = 'model.' + job_name
job_file = 'scripts/train.%s.sh' % job_name
decode_file = job_name + '.test.en'
with open(job_file, 'w') as f:
f.write("""#!/bin/sh
python nmt.py \
--cuda \
--mode raml_train \
--vocab iwslt.vocab.bin \
--save_to models/{model_name} \
--valid_niter 15400 \
--valid_metric ppl \
--beam_size 5 \
--batch_size 10 \
--sample_size 10 \
--hidden_size 256 \
--embed_size 256 \
--uniform_init 0.1 \
--dropout 0.2 \
--clip_grad 5.0 \
--lr_decay 0.5 \
--temp {temp} \
--raml_sample_file samples.corrupt_ngram.bleu_score.txt \
--train_src {train_src} \
--train_tgt {train_tgt} \
--dev_src {dev_src} \
--dev_tgt {dev_tgt} 2>logs/{train_log}
python nmt.py \
--cuda \
--mode test \
--load_model models/{model_name}.bin \
--beam_size 5 \
--decode_max_time_step 100 \
--save_to_file decode/{decode_file} \
--test_src {test_src} \
--test_tgt {test_tgt}
echo "test result" >> logs/{train_log}
perl multi-bleu.perl {test_tgt} < decode/{decode_file} >> logs/{train_log}
""".format(model_name=model_name, temp=temp,
train_src=train_src, train_tgt=train_tgt,
dev_src=dev_src, dev_tgt=dev_tgt,
test_src=test_src, test_tgt=test_tgt,
train_log=train_log, decode_file=decode_file))
os.system('bash submit_job.sh %s' % job_file)
| [
11748,
28686,
198,
198,
27432,
62,
10677,
2625,
40720,
67,
2047,
316,
62,
77,
16762,
14,
7890,
14,
27432,
13,
2934,
12,
268,
13,
2934,
13,
86,
19816,
263,
46012,
1,
198,
27432,
62,
83,
13655,
2625,
40720,
67,
2047,
316,
62,
77,
16... | 2.039173 | 919 |
import numpy as np
def calculate_probs(predicted_classes, num_classes):
'''
This function is to calculate the probabilities for each class given the softmax output
:param predicted_classes: matrix num_datapoints X num_ensembles (or dropout_iterations)
:param num_classes:
:return: For each datapoint it returns a vector with 10 elements, corresponding to the prob of each class
'''
probs = np.mean(predicted_classes,axis = 1)
return probs | [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
15284,
62,
1676,
1443,
7,
28764,
5722,
62,
37724,
11,
997,
62,
37724,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
770,
2163,
318,
284,
15284,
262,
39522,
329,
1123,
1398,
1... | 3.189189 | 148 |
from __future__ import unicode_literals
from django.db import models
import datetime as dt
from django.contrib.auth.mixins import LoginRequiredMixin
from django.dispatch import receiver
from django.db.models.signals import (post_save,pre_save,)
# from PIL import Image
from django.core.files import File
from django.dispatch import receiver
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
from phonenumber_field.modelfields import PhoneNumberField
import numpy as np
from django.db.models import Avg, Max, Min
# Create your models here.
post_save.connect(create_profile, sender = User)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
4818,
8079,
355,
288,
83,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
356... | 3.489011 | 182 |
import torchvision
from torch import nn
def deeplabv3_resnet50_features(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Coco
"""
model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=pretrained)
model.classifier._modules = {k: model.classifier._modules[k] for k in list(model.classifier._modules.keys())[:-1]}
return DeeplabV3_features(model, [3, 4, 6, 3], **kwargs)
| [
11748,
28034,
10178,
198,
6738,
28034,
1330,
299,
77,
628,
198,
198,
4299,
390,
68,
489,
397,
85,
18,
62,
411,
3262,
1120,
62,
40890,
7,
5310,
13363,
28,
25101,
11,
12429,
46265,
22046,
2599,
198,
220,
220,
220,
37227,
42316,
82,
25... | 2.663102 | 187 |
import abc
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
from globus_sdk.response import GlobusHTTPResponse
class Paginator(Iterable[GlobusHTTPResponse], metaclass=abc.ABCMeta):
"""
Base class for all paginators.
This guarantees is that they have generator methods named ``pages`` and ``items``.
Iterating on a Paginator is equivalent to iterating on its ``pages``.
:param method: A bound method of an SDK client, used to generate a paginated variant
:type method: callable
:param items_key: The key to use within pages of results to get an array of items
:type items_key: str
:param client_args: Arguments to the underlying method which are passed when the
paginator is instantiated. i.e. given ``client.paginated.foo(a, b, c=1)``, this
will be ``(a, b)``. The paginator will pass these arguments to each call of the
bound method as it pages.
:type client_args: tuple
:param client_kwargs: Keyword arguments to the underlying method, like
``client_args`` above. ``client.paginated.foo(a, b, c=1)`` will pass this as
``{"c": 1}``. As with ``client_args``, it's passed to each paginated call.
:type client_kwargs: dict
"""
@abc.abstractmethod
def pages(self) -> Iterator[GlobusHTTPResponse]:
"""``pages()`` yields GlobusHTTPResponse objects, each one representing a page
of results."""
def items(self) -> Iterator:
"""
``items()`` of a paginator is a generator which yields each item in each page of
results.
``items()`` may raise a ``ValueError`` if the paginator was constructed without
identifying a key for use within each page of results. This may be the case for
paginators whose pages are not primarily an array of data.
"""
if self.items_key is None:
raise ValueError(
"Cannot provide items() iteration on a paginator where 'items_key' "
"is not set."
)
for page in self.pages():
yield from page[self.items_key]
| [
11748,
450,
66,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
11,
40806,
540,
11,
40806,
1352,
11,
7343,
11,
32233,
198,
198,
6738,
15095,
385,
62,
21282,
74,
13,
26209,
1330,
40713,
385,
6535,
51,
4805,
9774,
2591,
628... | 2.755208 | 768 |
# -*- coding: utf-8 -*-
"""Top-level package for pysurf."""
__author__ = """Maximilian F.S.J. Menger, Johannes Ehrmaier"""
__email__ = 'menger.maximilian@gmail.com'
__version__ = '0.1.0'
#
import os
#
from colt import PluginLoader
from .spp.spp import SurfacePointProvider
from .spp import AbinitioBase, Model, Interpolator
# load plugins
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
core_plugins = os.path.join(base, "core_plugins")
user_plugins = os.path.join(base, "plugins")
# load core plugins
PluginLoader(core_plugins, ignorefile='plugins.ini')
# load user plugins
PluginLoader(user_plugins, ignorefile='plugins.ini')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
279,
893,
333,
69,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
11518,
26641,
666,
376,
13,
50,
13,
41,
13,
6065,
1362,
1... | 2.793991 | 233 |
"""
This holds functionality to get commands, and parse commands
"""
from quick_netmiko import QuickNetmiko
from pyats_genie_command_parse import GenieCommandParse
def command_parse(python_dict, fifo_queue, thread_lock): # pylint: disable=inconsistent-return-statements
"""Function to get and parse commands from devices
:type python_dict: Dict
:param python_dict: A dictionary of connection data
:type fifo_queue: queue.Queue Object
:param fifo_queue: The FIFO queue
:type thread_lock: threading.Lock Object
:param thread_lock: The thread lock
:rtype: None
:returns: None, but it does put a item in the fifo_queue
"""
with thread_lock:
allowed_device_types = {'ios', 'iosxe', 'iosxr', 'nxos'}
if python_dict.get('device_type') not in allowed_device_types:
return None
command = python_dict.get('command')
netmiko_obj = QuickNetmiko(python_dict.get('device_ip_name'), python_dict.get('device_type'),
python_dict.get('username'), python_dict.get('password'))
command_result = netmiko_obj.send_commands(command)
genie_parse_obj = GenieCommandParse(python_dict.get('device_type'))
parse_result = genie_parse_obj.parse_string(command, command_result)
fifo_queue.put((parse_result, command_result))
| [
37811,
198,
1212,
6622,
11244,
284,
651,
9729,
11,
290,
21136,
9729,
198,
198,
37811,
198,
6738,
2068,
62,
3262,
76,
12125,
1330,
12029,
7934,
76,
12125,
198,
6738,
12972,
1381,
62,
5235,
494,
62,
21812,
62,
29572,
1330,
49405,
21575,
... | 2.636364 | 517 |
import math
import torch
from torch import nn
from torch.nn import Parameter, Linear
from modules.commons.layers import LayerNorm, Embedding
from utils.nn.seq_utils import get_incremental_state, set_incremental_state, softmax, make_positions
import torch.nn.functional as F
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
| [
11748,
10688,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
25139,
2357,
11,
44800,
198,
6738,
13103,
13,
9503,
684,
13,
75,
6962,
1330,
34398,
35393,
11,
13302,
6048,
278,
198,
6738,
3384,
4487,... | 2.413346 | 1,079 |
import numpy as np
from geometer import (
Point,
Line,
Plane,
PointCollection,
LineCollection,
PlaneCollection,
join,
meet,
is_perpendicular,
translation,
rotation,
)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4903,
15635,
1330,
357,
198,
220,
220,
220,
6252,
11,
198,
220,
220,
220,
6910,
11,
198,
220,
220,
220,
36829,
11,
198,
220,
220,
220,
6252,
36307,
11,
198,
220,
220,
220,
6910,
36307,
11,
... | 2.41573 | 89 |
from logging import getLogger
MPIO_BUS_DRIVER_INSTANCE_ID = u"Root\\MPIO\\0000".lower()
logger = getLogger(__name__)
| [
6738,
18931,
1330,
651,
11187,
1362,
198,
198,
7378,
9399,
62,
45346,
62,
7707,
38757,
62,
38604,
19240,
62,
2389,
796,
334,
1,
30016,
6852,
7378,
9399,
6852,
2388,
1911,
21037,
3419,
198,
6404,
1362,
796,
651,
11187,
1362,
7,
834,
36... | 2.595745 | 47 |
# extra processing after fmriprep, for all tasks
import os, json
import sys, glob
import re
import numpy as np
import pandas as pd
from utils import * #import script to use relevante functions
# define participant number and open json parameter file
if len(sys.argv)<2:
raise NameError('Please add subject number (ex:01) '
'as 1st argument in the command line!')
else:
sj = str(sys.argv[1]).zfill(2) #fill subject number with 0 in case user forgets
with open('analysis_params.json','r') as json_file:
analysis_params = json.load(json_file)
# define paths and list of files
filepath = glob.glob(os.path.join(analysis_params['fmriprep_dir'],'sub-{sj}'.format(sj=sj),'*','func/*'))
tasks = ['prf']#['fn','prf','soma','rlb','rli','rs']
for t,cond in enumerate(tasks):
# list of functional files
filename = [run for run in filepath if 'task-'+tasks[t] in run and 'fsaverage' in run and run.endswith('.func.gii')]
filename.sort()
# list of confounds
confounds = [run for run in filepath if 'task-'+tasks[t] in run and run.endswith('_desc-confounds_regressors.tsv')]
confounds.sort()
if not filename: # if list empty
print('Subject %s has no files for %s' %(sj,cond))
else:
TR = analysis_params["TR"]
# set output path for processed files
outpath = os.path.join(analysis_params['post_fmriprep_outdir'],tasks[t],'sub-{sj}'.format(sj=sj))
if not os.path.exists(outpath): # check if path to save median run exist
os.makedirs(outpath)
# make loop for length of filenames
for _,file in enumerate(filename):
# define hemisphere to plot
hemi='left' if '_hemi-L' in file else 'right'
# plot all steps as sanity check
#plot_tSNR(file,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
if cond in ('prf'): # if pRF we cut out first 7TRs from "raw file" to make further analysis better
file = crop_gii(file,analysis_params['crop_pRF_TR'],outpath)
# high pass filter all runs (savgoy-golay)
filt_gii,filt_gii_pth = highpass_gii(file,analysis_params['sg_filt_polyorder'],analysis_params['sg_filt_deriv'],
analysis_params['sg_filt_window_length'],outpath)
#plot_tSNR(filt_gii_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
if cond in ('prf','fn','soma'): # don't clean confounds for prf or fn.. doenst help retino maps(?)
clean_gii = filt_gii
clean_gii_pth = filt_gii_pth
else: #regress out confounds from data (not doing pca)
# to get run number, hence making sure that subtracting right confounds
run_str = '_run-'
run_num = os.path.split(file)[-1][os.path.split(file)[-1].index(run_str)+len(run_str):][0:2]
# confound for that run
conf = [tsv for _,tsv in enumerate(confounds) if run_str+run_num in os.path.split(tsv)[-1]][0]
# first sg filter them
filt_conf = highpass_pca_confounds(conf,analysis_params['nuisance_columns'],analysis_params['sg_filt_polyorder'],analysis_params['sg_filt_deriv'],
analysis_params['sg_filt_window_length'],TR,outpath)
# clean the counfounds from data
clean_gii, clean_gii_pth = clean_confounds(filt_gii_pth,filt_conf,outpath)
# do PSC
psc_data,psc_data_pth = psc_gii(clean_gii_pth,outpath, method='median')
#plot_tSNR(psc_data_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
# smooth it
smt_file, smt_pth = smooth_gii(psc_data_pth,outpath,fwhm=analysis_params['smooth_fwhm'])
#plot_tSNR(smt_pth,hemi,os.path.join(outpath,'tSNR'),mesh='fsaverage')
| [
198,
2,
3131,
7587,
706,
277,
76,
380,
46012,
11,
329,
477,
8861,
198,
198,
11748,
28686,
11,
33918,
198,
11748,
25064,
11,
15095,
198,
11748,
302,
220,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
... | 2.071247 | 1,965 |
from django.contrib import admin
from .models import Profile, FriendRequest
admin.site.register(Profile)
admin.site.register(FriendRequest)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
13118,
11,
9182,
18453,
198,
198,
28482,
13,
15654,
13,
30238,
7,
37046,
8,
198,
28482,
13,
15654,
13,
30238,
7,
23331,
18453,
8,
198
] | 3.710526 | 38 |
# Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dimod
from dwave.system import DWaveSampler, EmbeddingComposite
# 1. Define sampler
sampler = EmbeddingComposite(DWaveSampler(solver={'topology__type': 'chimera'}))
# 2. Define problem: anti-ferromagnetic chain
# E = a*b + b*c + c*a
bqm = dimod.BQM({}, {'ab': 1, 'bc': 1, 'ca': 1}, 0, 'SPIN')
# 3. Submit problem and parameters to the solver
sampleset = sampler.sample(bqm, num_reads=10)
# 4. Evaluate the solution
print(sampleset) | [
2,
15069,
33448,
360,
12,
39709,
11998,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
1... | 3.080597 | 335 |
#encode:utf-8
from __future__ import unicode_literals
import re
import os
from wapiti import Model
from genius.trie import TrieTree
from genius.word import Word
here = os.path.abspath(os.path.dirname(__file__))
library_path = os.path.join(here, 'library')
| [
2,
268,
8189,
25,
40477,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
302,
198,
11748,
28686,
198,
6738,
266,
499,
8846,
1330,
9104,
198,
6738,
15632,
13,
83,
5034,
1330,
309,
5034,
... | 2.888889 | 90 |
import urllib.request
import urllib.parse
def search(parsmeters)
data = urllib.parse.urlencode(parameters)
print(data)
request_ = urllib.request.Request(url='http://www.baidu.com/s?'+data
,method="GET")
response = urllib.request.urlopen(request_)
print(response.url)
HTML=response.read().decode()
print(HTML)
with open("/home/ubuntu/Desktop/lj2.txt",mode='w') as f:
f.write(HTML)
def main():
pars={
"wd":"胡旺是个好人"
} | [
11748,
2956,
297,
571,
13,
25927,
198,
11748,
2956,
297,
571,
13,
29572,
198,
198,
4299,
2989,
7,
79,
945,
4164,
364,
8,
198,
198,
7890,
796,
220,
2956,
297,
571,
13,
29572,
13,
6371,
268,
8189,
7,
17143,
7307,
8,
198,
4798,
7,
... | 2.315789 | 190 |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.classifier.clients import FirehoseClient
from streamalert.shared.utils import get_database_name, get_data_file_format
from streamalert.shared.alert import Alert
from streamalert.shared.athena import AthenaClient
from streamalert.shared.config import firehose_alerts_bucket, firehose_data_bucket
from streamalert.shared.logger import get_logger
from streamalert_cli.athena import helpers
from streamalert_cli.helpers import continue_prompt, record_to_schema
from streamalert_cli.utils import (
CLICommand,
generate_subparser,
set_parser_epilog,
UniqueSetAction
)
LOGGER = get_logger(__name__)
CREATE_TABLE_STATEMENT = ('CREATE EXTERNAL TABLE {table_name} ({schema}) '
'PARTITIONED BY (dt string) '
'{file_format} '
'LOCATION \'s3://{bucket}/{table_name}/\'')
STORE_FORMAT_JSON = ('ROW FORMAT SERDE \'org.openx.data.jsonserde.JsonSerDe\' '
'WITH SERDEPROPERTIES (\'ignore.malformed.json\' = \'true\')')
STORE_FORMAT_PARQUET = 'STORED AS PARQUET'
def get_athena_client(config):
"""Get an athena client using the current config settings
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
AthenaClient: instantiated client for performing athena actions
"""
prefix = config['global']['account']['prefix']
athena_config = config['lambda']['athena_partition_refresh_config']
db_name = get_database_name(config)
# Get the S3 bucket to store Athena query results
results_bucket = athena_config.get(
'results_bucket',
's3://{}-streamalert-athena-results'.format(prefix)
)
return AthenaClient(
db_name,
results_bucket,
'streamalert_cli',
region=config['global']['account']['region']
)
def rebuild_partitions(table, bucket, config):
"""Rebuild an Athena table's partitions
Steps:
- Get the list of current partitions
- Destroy existing table
- Re-create tables
- Re-create partitions
Args:
table (str): The name of the table being rebuilt
bucket (str): The s3 bucket to be used as the location for Athena data
table_type (str): The type of table being refreshed
Types of 'data' and 'alert' are accepted, but only 'data' is implemented
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
sanitized_table_name = FirehoseClient.firehose_log_name(table)
athena_client = get_athena_client(config)
# Get the current set of partitions
partitions = athena_client.get_table_partitions(sanitized_table_name)
if not partitions:
LOGGER.info('No partitions to rebuild for %s, nothing to do', sanitized_table_name)
return False
# Drop the table
LOGGER.info('Dropping table %s', sanitized_table_name)
if not athena_client.drop_table(sanitized_table_name):
return False
LOGGER.info('Creating table %s', sanitized_table_name)
# Re-create the table with previous partitions
if not create_table(table, bucket, config):
return False
new_partitions_statements = helpers.add_partition_statements(
partitions, bucket, sanitized_table_name)
LOGGER.info('Creating total %d new partitions for %s', len(partitions), sanitized_table_name)
for idx, statement in enumerate(new_partitions_statements):
success = athena_client.run_query(query=statement)
LOGGER.info('Rebuilt partitions part %d', idx+1)
if not success:
LOGGER.error('Error re-creating new partitions for %s', sanitized_table_name)
write_partitions_statements(new_partitions_statements, sanitized_table_name)
return False
LOGGER.info('Successfully rebuilt all partitions for %s', sanitized_table_name)
return True
def write_partitions_statements(statements, sanitized_table_name):
"""Write partitions statements to a file if re-creating new partitions failed"""
file_name = 'partitions_{}.txt'.format(sanitized_table_name)
LOGGER.error(
'Rebuild partitions failed, writing to local file with name %s',
file_name
)
with open(file_name, 'w') as partition_file:
partition_file.write(statements)
def drop_all_tables(config):
"""Drop all 'streamalert' Athena tables
Used when cleaning up an existing deployment
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not continue_prompt(message='Are you sure you want to drop all Athena tables?'):
return False
athena_client = get_athena_client(config)
if not athena_client.drop_all_tables():
LOGGER.error('Failed to drop one or more tables from database: %s', athena_client.database)
return False
LOGGER.info('Successfully dropped all tables from database: %s', athena_client.database)
return True
def _construct_create_table_statement(schema, table_name, bucket, file_format='parquet'):
"""Convert a dictionary based Athena schema to a Hive DDL statement
Args:
schema (dict): The sanitized Athena schema
table_name (str): The name of the Athena table to create
bucket (str): The S3 bucket containing the data
Returns:
str: The Hive DDL CREATE TABLE expression
"""
# Construct the main Athena Schema
schema_statement = []
for key_name in sorted(schema.keys()):
key_type = schema[key_name]
if isinstance(key_type, str):
schema_statement.append('{0} {1}'.format(key_name, key_type))
# Account for nested structs
elif isinstance(key_type, dict):
struct_schema = ', '.join(
'{0}:{1}'.format(sub_key, key_type[sub_key])
for sub_key in sorted(key_type.keys())
)
schema_statement.append('{0} struct<{1}>'.format(key_name, struct_schema))
return CREATE_TABLE_STATEMENT.format(
table_name=table_name,
schema=', '.join(schema_statement),
file_format=STORE_FORMAT_PARQUET if file_format == 'parquet' else STORE_FORMAT_JSON,
bucket=bucket)
def create_table(table, bucket, config, schema_override=None):
"""Create a 'streamalert' Athena table
Args:
table (str): The name of the table being rebuilt
bucket (str): The s3 bucket to be used as the location for Athena data
table_type (str): The type of table being refreshed
config (CLIConfig): Loaded StreamAlert config
schema_override (set): An optional set of key=value pairs to be used for
overriding the configured column_name=value_type.
Returns:
bool: False if errors occurred, True otherwise
"""
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
# Convert special characters in schema name to underscores
sanitized_table_name = FirehoseClient.firehose_log_name(table)
# Check that the log type is enabled via Firehose
if sanitized_table_name != 'alerts' and sanitized_table_name not in enabled_logs:
LOGGER.error('Table name %s missing from configuration or '
'is not enabled.', sanitized_table_name)
return False
athena_client = get_athena_client(config)
config_data_bucket = firehose_data_bucket(config)
if not config_data_bucket:
LOGGER.error('The \'firehose\' module is not enabled in global.json')
return False
# Check if the table exists
if athena_client.check_table_exists(sanitized_table_name):
LOGGER.info('The \'%s\' table already exists.', sanitized_table_name)
return True
if table == 'alerts':
# get a fake alert so we can get the keys needed and their types
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = helpers.logs_schema_to_athena_schema(schema)
# Use the bucket if supplied, otherwise use the default alerts bucket
bucket = bucket or firehose_alerts_bucket(config)
query = _construct_create_table_statement(
schema=athena_schema,
table_name=table,
bucket=bucket,
file_format=get_data_file_format(config)
)
else: # all other tables are log types
# Use the bucket if supplied, otherwise use the default data bucket
bucket = bucket or config_data_bucket
log_info = config['logs'][table.replace('_', ':', 1)]
schema = dict(log_info['schema'])
sanitized_schema = FirehoseClient.sanitize_keys(schema)
athena_schema = helpers.logs_schema_to_athena_schema(sanitized_schema)
# Add envelope keys to Athena Schema
configuration_options = log_info.get('configuration')
if configuration_options:
envelope_keys = configuration_options.get('envelope_keys')
if envelope_keys:
sanitized_envelope_key_schema = FirehoseClient.sanitize_keys(envelope_keys)
# Note: this key is wrapped in backticks to be Hive compliant
athena_schema['`streamalert:envelope_keys`'] = helpers.logs_schema_to_athena_schema(
sanitized_envelope_key_schema)
# Handle Schema overrides
# This is useful when an Athena schema needs to differ from the normal log schema
if schema_override:
for override in schema_override:
column_name, column_type = override.split('=')
# Columns are escaped to avoid Hive issues with special characters
column_name = '`{}`'.format(column_name)
if column_name in athena_schema:
athena_schema[column_name] = column_type
LOGGER.info('Applied schema override: %s:%s', column_name, column_type)
else:
LOGGER.error(
'Schema override column %s not found in Athena Schema, skipping',
column_name
)
query = _construct_create_table_statement(
schema=athena_schema,
table_name=sanitized_table_name,
bucket=bucket,
file_format=get_data_file_format(config)
)
success = athena_client.run_query(query=query)
if not success:
LOGGER.error('The %s table could not be created', sanitized_table_name)
return False
# Update the CLI config
if table != 'alerts' and bucket != config_data_bucket:
# Only add buckets to the config if they are not one of the default/configured buckets
# Ensure 'buckets' exists in the config (since it is not required)
config['lambda']['athena_partition_refresh_config']['buckets'] = (
config['lambda']['athena_partition_refresh_config'].get('buckets', {})
)
if bucket not in config['lambda']['athena_partition_refresh_config']['buckets']:
config['lambda']['athena_partition_refresh_config']['buckets'][bucket] = 'data'
config.write()
LOGGER.info('The %s table was successfully created!', sanitized_table_name)
return True
def create_log_tables(config):
"""Create all tables needed for historical search
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not config['global']['infrastructure'].get('firehose', {}).get('enabled'):
return True
firehose_config = config['global']['infrastructure']['firehose']
firehose_s3_bucket_suffix = firehose_config.get('s3_bucket_suffix', 'streamalert-data')
firehose_s3_bucket_name = '{}-{}'.format(config['global']['account']['prefix'],
firehose_s3_bucket_suffix)
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
for log_stream_name in enabled_logs:
if not create_table(log_stream_name, firehose_s3_bucket_name, config):
return False
return True
| [
37811,
198,
15269,
2177,
12,
25579,
35079,
11,
3457,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.526521 | 5,128 |
"""
Actions for Ruby dependency resolution with Bundler
"""
import logging
from aws_lambda_builders.actions import BaseAction, Purpose, ActionFailedError
from .bundler import BundlerExecutionError
LOG = logging.getLogger(__name__)
class RubyBundlerInstallAction(BaseAction):
"""
A Lambda Builder Action which runs bundle install in order to build a full Gemfile.lock
"""
NAME = 'RubyBundle'
DESCRIPTION = "Resolving dependencies using Bundler"
PURPOSE = Purpose.RESOLVE_DEPENDENCIES
class RubyBundlerVendorAction(BaseAction):
"""
A Lambda Builder Action which vendors dependencies to the vendor/bundle directory.
"""
NAME = 'RubyBundleDeployment'
DESCRIPTION = "Package dependencies for deployment."
PURPOSE = Purpose.RESOLVE_DEPENDENCIES
| [
37811,
198,
32,
2733,
329,
10888,
20203,
6323,
351,
13319,
1754,
198,
37811,
198,
198,
11748,
18931,
198,
198,
6738,
3253,
82,
62,
50033,
62,
50034,
13,
4658,
1330,
7308,
12502,
11,
32039,
11,
7561,
37,
6255,
12331,
198,
6738,
764,
65... | 3.298755 | 241 |
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import math
def network_mask_from_cidr_mask(cidr_mask):
'''Calcula a máscara de uma rede a partir do número do bloco do endereço.
@param cidr_mask: Valor do bloco do endereço.
@return: Tuple com o octeto 1, 2, 3, 4 da máscara: (oct1,oct2,oct3,oct4).
'''
address = 0xFFFFFFFF
address = address << (32 - cidr_mask)
return ((address >> 24) & 0xFF, (address >> 16) & 0xFF, (address >> 8) & 0xFF, (address >> 0) & 0xFF)
def is_subnetwork(network_address_01, network_address_02):
'''Verifica se o endereço network_address_01 é sub-rede do endereço network_address_02.
@param network_address_01: Uma tuple com os octetos do endereço, formato: (oct1, oct2, oct3, oct5)
@param network_address_02: Uma tuple com os octetos do endereço e o bloco, formato: (oct1, oct2, oct3, oct5, bloco)
@return: True se network_address_01 é sub-rede de network_address_02. False caso contrário.
'''
if network_address_01 is None or network_address_02 is None:
return False
if len(network_address_01) < 4 or len(network_address_02) != 5:
return False
network_mask_02 = network_mask_from_cidr_mask(network_address_02[4])
return network_address_02[0:4] == _applyNetmask(network_address_01, network_mask_02)
def is_valid_ip(address):
"""Verifica se address é um endereço ip válido."""
if address is None:
return address
pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b"
return re.match(pattern, address)
#=========================================================================
# Function to calculate num_hosts by prefix:
#
# IPV4:
# 2^(32-p) = num_hosts
# IPV6:
# 2^(128-p) = num_hosts
#
# where 'p' is, for example, 24, 32 (x.x.x.x/32)...
#
# so, to calculate prefix by number of hosts:
#
# IPV4:
# 32 - logarithm(num_hosts, 2) = p
# IPV6:
# 128 - logarithm(num_hosts, 2) = p
#
# where 'num_hosts' is the number of hosts expected
#=========================================================================
MAX_IPV4_HOSTS = 4294967296
MAX_IPV6_HOSTS = 340282366920938463463374607431768211456
if __name__ == '__main__':
print get_prefix_IPV4(17)
print get_prefix_IPV4(33)
print get_prefix_IPV4(255)
# IPV4
#=========================================================================
# /0 : 4294967296 /11 : 2097152 /22 : 1024
# /1 : 2147483648 /12 : 1048576 /23 : 512
# /2 : 1073741824 /13 : 524288 /24 : 256
# /3 : 536870912 /14 : 262144 /25 : 128
# /4 : 268435456 /15 : 131072 /26 : 64
# /5 : 134217728 /16 : 65536 /27 : 32
# /6 : 67108864 /17 : 32768 /28 : 16
# /7 : 33554432 /18 : 16384 /29 : 8
# /8 : 16777216 /19 : 8192 /30 : 4
# /9 : 8388608 /20 : 4096 /31 : 2
# /10 : 4194304 /21 : 2048 /32 : 1
#=========================================================================
# IPV6
#=========================================================================
# /0 : 340282366920938463463374607431768211456 /11 : 166153499473114484112975882535043072 /22 : 81129638414606681695789005144064
# /1 : 170141183460469231731687303715884105728 /12 : 83076749736557242056487941267521536 /23 : 40564819207303340847894502572032
# /2 : 85070591730234615865843651857942052864 /13 : 41538374868278621028243970633760768 /24 : 20282409603651670423947251286016
# /3 : 42535295865117307932921825928971026432 /14 : 20769187434139310514121985316880384 /25 : 10141204801825835211973625643008
# /4 : 21267647932558653966460912964485513216 /15 : 10384593717069655257060992658440192 /26 : 5070602400912917605986812821504
# /5 : 10633823966279326983230456482242756608 /16 : 5192296858534827628530496329220096 /27 : 2535301200456458802993406410752
# /6 : 5316911983139663491615228241121378304 /17 : 2596148429267413814265248164610048 /28 : 1267650600228229401496703205376
# /7 : 2658455991569831745807614120560689152 /18 : 1298074214633706907132624082305024 /29 : 633825300114114700748351602688
# /8 : 1329227995784915872903807060280344576 /19 : 649037107316853453566312041152512 /30 : 316912650057057350374175801344
# /9 : 664613997892457936451903530140172288 /20 : 324518553658426726783156020576256 /31 : 158456325028528675187087900672
# /10 : 332306998946228968225951765070086144 /21 : 162259276829213363391578010288128 /32 : 79228162514264337593543950336
#
# /33 : 39614081257132168796771975168 /44 : 19342813113834066795298816 /55 : 9444732965739290427392
# /34 : 19807040628566084398385987584 /45 : 9671406556917033397649408 /56 : 4722366482869645213696
# /35 : 9903520314283042199192993792 /46 : 4835703278458516698824704 /57 : 2361183241434822606848
# /36 : 4951760157141521099596496896 /47 : 2417851639229258349412352 /58 : 1180591620717411303424
# /37 : 2475880078570760549798248448 /48 : 1208925819614629174706176 /59 : 590295810358705651712
# /38 : 1237940039285380274899124224 /49 : 604462909807314587353088 /60 : 295147905179352825856
# /39 : 618970019642690137449562112 /50 : 302231454903657293676544 /61 : 147573952589676412928
# /40 : 309485009821345068724781056 /51 : 151115727451828646838272 /62 : 73786976294838206464
# /41 : 154742504910672534362390528 /52 : 75557863725914323419136 /63 : 36893488147419103232
# /42 : 77371252455336267181195264 /53 : 37778931862957161709568 /64 : 18446744073709551616
# /43 : 38685626227668133590597632 /54 : 18889465931478580854784 /65 : 9223372036854775808
#
# /66 : 4611686018427387904 /77 : 2251799813685248 /88 : 1099511627776 /99 : 536870912
# /67 : 2305843009213693952 /78 : 1125899906842624 /89 : 549755813888 /100 : 268435456
# /68 : 1152921504606846976 /79 : 562949953421312 /90 : 274877906944 /101 : 134217728
# /69 : 576460752303423488 /80 : 281474976710656 /91 : 137438953472 /102 : 67108864
# /70 : 288230376151711744 /81 : 140737488355328 /92 : 68719476736 /103 : 33554432
# /71 : 144115188075855872 /82 : 70368744177664 /93 : 34359738368 /104 : 16777216
# /72 : 72057594037927936 /83 : 35184372088832 /94 : 17179869184 /105 : 8388608
# /73 : 36028797018963968 /84 : 17592186044416 /95 : 8589934592 /106 : 4194304
# /74 : 18014398509481984 /85 : 8796093022208 /96 : 4294967296 /107 : 2097152
# /75 : 9007199254740992 /86 : 4398046511104 /97 : 2147483648 /108 : 1048576
# /76 : 4503599627370496 /87 : 2199023255552 /98 : 1073741824 /109 : 524288
#
# /110 : 262144 /122 : 64
# /111 : 131072 /123 : 32
# /112 : 65536 /124 : 16
# /113 : 32768 /125 : 8
# /114 : 16384 /126 : 4
# /115 : 8192 /127 : 2
# /116 : 4096 /128 : 1
# /117 : 2048
# /118 : 1024
# /119 : 512
# /120 : 256
# /121 : 128
#=========================================================================
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
... | 2.084668 | 4,122 |
from random import randint
n =int(input("n = "))
A =[]
B =[]
for i in range (n):
A.append(randint(1,20))
B.append(randint(1,20))
print(A,B)
dist=0
for i in range (n):
if A[i] != B[i]:
dist+=1
print(dist)
| [
6738,
4738,
1330,
43720,
600,
198,
77,
796,
600,
7,
15414,
7203,
77,
796,
366,
4008,
198,
32,
796,
21737,
198,
33,
796,
21737,
198,
1640,
1312,
287,
2837,
357,
77,
2599,
198,
220,
220,
220,
317,
13,
33295,
7,
25192,
600,
7,
16,
... | 2 | 112 |
import platform
# method copied from STL, not original work of author
| [
11748,
3859,
198,
198,
2,
2446,
18984,
422,
37269,
11,
407,
2656,
670,
286,
1772,
198
] | 4.4375 | 16 |
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An OpenFlow 1.0 L2 learning switch implementation.
"""
import collections
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import arp, ether_types, ethernet, icmp, ipv4, packet, tcp
from ryu.ofproto import inet, ofproto_v1_0, ofproto_v1_0_parser
IpPort = collections.namedtuple('IpPort', 'ip port')
ether_type_names = {
ether_types.ETH_TYPE_IP: "IPv4",
ether_types.ETH_TYPE_IPV6: "IPv6",
ether_types.ETH_TYPE_LLDP: "LLDP",
ether_types.ETH_TYPE_ARP: "ARP"
}
arp_opcode_names = {arp.ARP_REPLY: "Reply", arp.ARP_REQUEST: "Request"}
ip_proto_names = {
inet.IPPROTO_ICMP: "ICMP",
inet.IPPROTO_ICMPV6: "ICMPv6",
inet.IPPROTO_TCP: "TCP",
inet.IPPROTO_UDP: "UDP"
}
| [
2,
15069,
357,
34,
8,
2813,
399,
3974,
261,
21821,
290,
44735,
10501,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 2.759223 | 515 |
# Este exemplo carrega a base Wine da UCI, treina uma Arvore de decisao usando
# holdout e outra usando validacao cruzada com 10 pastas.
# Importa bibliotecas necessarias
import numpy as np
import urllib
from sklearn import tree
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
from IPython.display import Image
from IPython.display import display
import pydotplus
#from sklearn.model_selection import StratifiedShuffleSplit
# Carrega uma base de dados do UCI
# Exemplo carrega a base Wine
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
raw_data = urllib.request.urlopen(url)
# Carrega arquivo como uma matriz
dataset = np.loadtxt(raw_data, delimiter=",")
# Imprime quantide de instancias e atributos da base
print("Instancias e atributos")
print(dataset.shape)
# Coloca em X os 13 atributos de entrada e em y as classes
# Observe que na base Wine a classe eh primeiro atributo
X = dataset[:,1:13]
y = dataset[:,0]
# EXEMPLO USANDO HOLDOUT
# Holdout -> dividindo a base em treinamento (70%) e teste (30%), estratificada
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.3, random_state=42, stratify=y)
# declara o classificador
clfa = tree.DecisionTreeClassifier(criterion='entropy')
# treina o classificador
clfa = clfa.fit(X_train, y_train)
# testa usando a base de testes
predicted=clfa.predict(X_test)
# calcula a acuracia na base de teste (taxa de acerto)
score=clfa.score(X_test, y_test)
# calcula a matriz de confusao
matrix = confusion_matrix(y_test, predicted)
# apresenta os resultados
print("\nResultados baseados em Holdout 70/30")
print("Taxa de acerto = %.2f " % score)
print("Matriz de confusao:")
print(matrix)
# EXEMPLO USANDO VALIDACAO CRUZADA
clfb = tree.DecisionTreeClassifier(criterion='entropy')
folds=10
result = model_selection.cross_val_score(clfb, X, y, cv=folds)
print("\nResultados baseados em Validacao Cruzada")
print("Qtde folds: %d:" % folds)
print("Taxa de Acerto: %.2f" % result.mean())
print("Desvio padrao: %.2f" % result.std())
# matriz de confusão da validacao cruzada
Z = model_selection.cross_val_predict(clfb, X, y, cv=folds)
cm=confusion_matrix(y, Z)
print("Matriz de confusao:")
print(cm)
#imprime a arvore gerada
print("\nArvore gerada no experimento baseado em Holdout")
dot_data = StringIO()
export_graphviz(clfa, out_file=dot_data,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
im=Image(graph.create_png())
display(im)
| [
2,
412,
4169,
21433,
78,
1097,
2301,
64,
257,
2779,
20447,
12379,
14417,
40,
11,
2054,
1437,
334,
2611,
943,
85,
382,
390,
875,
271,
5488,
514,
25440,
220,
198,
2,
1745,
448,
304,
503,
430,
514,
25440,
4938,
330,
5488,
4630,
89,
4... | 2.636893 | 1,030 |
# Copyright 2018-2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Snapshot operation."""
from pennylane import Snapshot
def test_decomposition():
"""Test the decomposition of the Snapshot operation."""
assert Snapshot.compute_decomposition() == []
assert Snapshot().decomposition() == []
def test_label_method():
"""Test the label method for the Snapshot operation."""
assert Snapshot().label() == "|S|"
assert Snapshot("my_label").label() == "|S|"
| [
2,
15069,
2864,
12,
1238,
1828,
47482,
324,
84,
29082,
21852,
3457,
13,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 3.570934 | 289 |
from aiogram import types
from app.modules.base.templates import choice_student_text, choice_teacher_text
from app.modules.schedule.consts import query_type
| [
6738,
257,
72,
21857,
1330,
3858,
198,
198,
6738,
598,
13,
18170,
13,
8692,
13,
11498,
17041,
1330,
3572,
62,
50139,
62,
5239,
11,
3572,
62,
660,
3493,
62,
5239,
198,
6738,
598,
13,
18170,
13,
15952,
5950,
13,
1102,
6448,
1330,
1240... | 3.456522 | 46 |
import json
from airone.lib.acl import ACLType
from airone.lib.test import AironeViewTest
from airone.lib.types import AttrTypeStr
from airone.lib.types import AttrTypeArrStr, AttrTypeArrObj
from airone.lib.types import AttrTypeValue
from django.urls import reverse
from entity.models import Entity, EntityAttr
from entry.models import Entry, AttributeValue
from entry import tasks as entry_tasks
from entity import tasks as entity_tasks
from unittest.mock import patch
from unittest.mock import Mock
class ComplexViewTest(AironeViewTest):
"""
This has complex tests that combine multiple requests across the inter-applicational
"""
@patch('entry.tasks.create_entry_attrs.delay', Mock(side_effect=entry_tasks.create_entry_attrs))
@patch('entry.tasks.edit_entry_attrs.delay', Mock(side_effect=entry_tasks.edit_entry_attrs))
@patch('entity.tasks.create_entity.delay', Mock(side_effect=entity_tasks.create_entity))
@patch('entity.tasks.edit_entity.delay', Mock(side_effect=entity_tasks.edit_entity))
def test_add_attr_after_creating_entry(self):
"""
This test executes followings
- create a new Entity(entity) with an EntityAttr(attr)
- create a new Entry for entity
- update entity to append new EntityAttrs(arr-str, arr-obj)
Then, this checks following
- created additional Attributes which are corresponding to the added EntityAttrs
automatically for accessing show page.
- enable to edit entry correctly because #152 is fixed
"""
user = self.admin_login()
# create an Entity
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [
{'name': 'attr', 'type': str(AttrTypeStr), 'is_delete_in_chain': True,
'is_mandatory': False, 'row_index': '1'},
],
}
resp = self.client.post(reverse('entity:do_create'),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# get created objects
entity = Entity.objects.get(name='entity')
attr = entity.attrs.get(name='attr')
# create an Entry for the created entity
params = {
'entry_name': 'entry',
'attrs': [
{'id': str(attr.id), 'type': str(AttrTypeStr),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# get created entry object
entry = Entry.objects.get(name='entry')
refer_entity = Entity.objects.create(name='E0', note='', created_user=user)
# edit entity to append a new Array attributes
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [{
'id': str(attr.id),
'name': attr.name,
'type': str(attr.type),
'is_mandatory': attr.is_mandatory,
'is_delete_in_chain': False,
'row_index': '1',
}, {
'name': 'arr-str',
'type': str(AttrTypeArrStr),
'is_mandatory': True,
'is_delete_in_chain': False,
'row_index': '2',
}, {
'name': 'arr-obj',
'type': str(AttrTypeArrObj),
'ref_ids': [refer_entity.id],
'is_mandatory': True,
'is_delete_in_chain': False,
'row_index': '3',
}],
}
resp = self.client.post(reverse('entity:do_edit', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# Checks that the Attributes associated to the added EntityAttrs are not created
self.assertEqual(entity.attrs.count(), 3)
self.assertEqual(entry.attrs.count(), 1)
resp = self.client.get(reverse('entry:show', args=[entry.id]))
self.assertEqual(resp.status_code, 200)
# Checks that the new Attibutes is created in the show processing
self.assertEqual(entity.attrs.count(), 3)
self.assertEqual(entry.attrs.count(), entity.attrs.count())
attr_str = entry.attrs.get(name=attr.name)
attr_arr_str = entry.attrs.get(name='arr-str')
attr_arr_obj = entry.attrs.get(name='arr-obj')
refer_entry = Entry.objects.create(name='e0', schema=refer_entity, created_user=user)
attr_str_value_count = attr_str.values.count()
attr_arr_str_value_count = attr_arr_str.values.count()
attr_arr_obj_value_count = attr_arr_obj.values.count()
self.assertEqual(attr_str_value_count, 1)
self.assertEqual(attr_arr_str_value_count, 1)
self.assertEqual(attr_arr_obj_value_count, 1)
# edit to add values to the new attributes
params = {
'entry_name': entry.name,
'attrs': [
{
'id': str(attr_str.id),
'type': str(attr.type),
'value': [{'data': 'hoge', 'index': 0}],
'referral_key': []
},
{
'id': str(attr_arr_str.id),
'type': str(AttrTypeArrStr),
'value': [
{'data': 'foo', 'index': 0},
{'data': 'bar', 'index': 1},
],
'referral_key': []
},
{
'id': str(attr_arr_obj.id),
'type': str(AttrTypeArrObj),
'value': [{'data': refer_entry.id, 'index': 0}],
'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_edit', args=[entry.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
# check updated values structure and count of AttributeValues
self.assertEqual(attr_str.values.count(), attr_str_value_count + 1)
self.assertEqual(attr_arr_str.values.count(), attr_arr_str_value_count + 1)
self.assertEqual(attr_arr_obj.values.count(), attr_arr_obj_value_count + 1)
value_arr_str = attr_arr_str.values.last()
self.assertEqual(value_arr_str.data_array.count(), 2)
value_arr_obj = attr_arr_obj.values.last()
self.assertEqual(value_arr_obj.data_array.count(), 1)
@patch('entity.tasks.create_entity.delay', Mock(side_effect=entity_tasks.create_entity))
@patch('entry.tasks.create_entry_attrs.delay', Mock(side_effect=entry_tasks.create_entry_attrs))
def test_inherite_attribute_acl(self):
"""
This test executes followings
- create a new Entity(entity) with an EntityAttr(attr)
- change ACL of attr to be private by admin user
- create a new Entry(entry1) from entity by admin user
- switch the user to guest
- create a new Entry(entry2) from entity by guest user
Then, this checks following
- The Entry(entry1) whcih is created by the admin user has one Attribute
- The Entry(entry2) whcih is created by the guest user has no Attribute
"""
user = self.admin_login()
# create an Entity
params = {
'name': 'entity',
'note': '',
'is_toplevel': False,
'attrs': [
{'name': 'attr', 'type': str(AttrTypeStr),
'is_delete_in_chain': False, 'is_mandatory': False, 'row_index': '1'},
],
}
resp = self.client.post(reverse('entity:do_create'),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(EntityAttr.objects.count(), 1)
# set acl of attr
entityattr = EntityAttr.objects.get(name='attr')
params = {
'object_id': str(entityattr.id),
'object_type': str(entityattr.objtype),
'acl': [
{
'member_id': str(user.id),
'member_type': 'user',
'value': str(ACLType.Full.id)
}
],
'default_permission': str(ACLType.Nothing.id),
}
resp = self.client.post(reverse('acl:set'), json.dumps(params), 'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entity.objects.count(), 1)
self.assertFalse(EntityAttr.objects.get(name='attr').is_public)
# create Entity by admin
entity = Entity.objects.get(name='entity')
params = {
'entry_name': 'entry1',
'attrs': [
{'id': str(entityattr.id), 'type': str(entityattr.objtype),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entry.objects.count(), 1)
self.assertEqual(Entry.objects.get(name='entry1').attrs.count(), 1)
# switch to guest user
self.guest_login()
entity = Entity.objects.get(name='entity')
params = {
'entry_name': 'entry2',
'attrs': [
{'id': str(entityattr.id), 'type': str(entityattr.objtype),
'value': [{'data': 'attr-value', 'index': 0}], 'referral_key': []},
],
}
resp = self.client.post(reverse('entry:do_create', args=[entity.id]),
json.dumps(params),
'application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(Entry.objects.count(), 2)
self.assertEqual(Entry.objects.get(name='entry2').attrs.count(), 0)
@patch('entity.tasks.edit_entity.delay', Mock(side_effect=entity_tasks.edit_entity))
| [
11748,
33918,
198,
198,
6738,
1633,
505,
13,
8019,
13,
37779,
1330,
17382,
6030,
198,
6738,
1633,
505,
13,
8019,
13,
9288,
1330,
3701,
505,
7680,
14402,
198,
6738,
1633,
505,
13,
8019,
13,
19199,
1330,
3460,
81,
6030,
13290,
198,
6738... | 2.003211 | 5,295 |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import AllowAny
from . import serializers
from . import models
from . permissions import IsAdminUser, IsLoggedInUserOrAdmin
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
220,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
198,
6738,
1334,
62,
30604,
13,
41299,
3299,
1330,
29130,
47649,
3299,
19... | 4.168142 | 113 |
from matplotlib import pyplot as plt
axes_pars = {'axes.labelpad': 5,
'axes.titlepad': 5,
'axes.titlesize': 'small',
'axes.grid': False,
'axes.xmargin': 0,
'axes.ymargin': 0}
plt.rcParams.update(axes_pars) | [
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
897,
274,
62,
79,
945,
796,
1391,
6,
897,
274,
13,
18242,
15636,
10354,
642,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
897,
274,
... | 1.732484 | 157 |
import config as C
from servers import app
if __name__ == '__main__':
app_run = app.init()
app_run.run(host='0.0.0.0', port=C.API_PORT) | [
11748,
4566,
355,
327,
198,
6738,
9597,
1330,
598,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
598,
62,
5143,
796,
598,
13,
15003,
3419,
198,
220,
220,
220,
598,
62,
5143,
13,
5143,
7,
4... | 2.377049 | 61 |
import tensorflow as tf
import numpy as np
from cossmo.output_networks import BalancedOutputNetwork, RaggedOutputNetwork
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
269,
793,
5908,
13,
22915,
62,
3262,
5225,
1330,
38984,
26410,
26245,
11,
371,
14655,
26410,
26245,
628,
198
] | 3.727273 | 33 |
import os
import json
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from collections import defaultdict
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
if __name__ == '__main__':
os.remove("tweets1234.json")
os.system('twitterscraper #GOOGL --limit 100 -bd 2018-01-10 -ed 2018-09-20 --output=tweets1234.json')
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt', 'via']
with open('tweets1234.json', 'r') as f:
line = f.read() # read only the first tweet/line
total = list()
sentiment = 0.0
pos = 0.0
neg = 0.0
tweet = json.loads(line) # load it as Python dict
type(tweet)
for key in tweet:
#print("\n")
#print("\n Tweet : ")
terms_stop = [term for term in word_tokenize(key['text']) if term not in stop] #Using Nltk to tokenize
total.extend(terms_stop)
for key in total:
if(len(key) < 3):
total.remove(key)
for i in range(len(total)):
total[i] = total[i].lower()
with open('bulltest.json','r') as temp:
bull = json.load(temp)
print(bull)
with open('beartest.json', 'r') as temp:
bear = json.load(temp)
print(bear)
f.close()
sentpos = 0.0
sentneg = 0.0
freq = leaders(total)
for key1 in freq:
#t1 = list(key) #convert tuple to list for comparing
for key2 in bull:
if(key1[0].lower() == key2[0].lower()):
sentpos = sentpos + (key2[1] * key1[1])
for key3 in bear:
if(key1[0].lower() == key3[0].lower()):
sentneg = sentneg - (key3[1] * key1[1])
print("\n\n")
# print(freq)
print(sentpos)
print(sentneg)
print(sentpos+sentneg)
| [
11748,
28686,
198,
11748,
33918,
198,
6738,
299,
2528,
74,
13,
30001,
1096,
1330,
1573,
62,
30001,
1096,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
11748,
4731,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,... | 2.143646 | 905 |
import os, errno
import pandas as pd
first_file = str(input('First Country: '))
second_file = str(input('Second Country: '))
file_path = str(input('Path:(year/month) '))
find_files(first_file, second_file, file_path)
| [
11748,
28686,
11,
11454,
3919,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
201,
198,
201,
198,
201,
198,
11085,
62,
7753,
796,
965,
7,
15414,
10786,
5962,
12946,
25,
705,
4008,
201,
198,
12227,
62,
7753,
796,
965,
7,
15414,... | 2.595506 | 89 |
import pygame
from board import Grid
from player import Player, Stats
from enum import Enum, auto
import os
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (400,100)
surface = pygame.display.set_mode((1200, 900))
pygame.display.set_caption('Minesweeper')
state = States.running
player = Player()
grid = Grid(player)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and state == States.running:
if pygame.mouse.get_pressed()[0]: # check for the left mouse button
pos = pygame.mouse.get_pos()
grid.click(pos[0], pos[1])
elif pygame.mouse.get_pressed()[2]:
pos = pygame.mouse.get_pos()
grid.mark_mine(pos[0]//30, pos[1]//30)
if grid.check_if_win():
state = States.win
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and (state == States.game_over or state == States.win):
grid.reload()
state = States.running
if event.key == pygame.K_b:
grid.show_mines()
surface.fill((0,0,0))
if player.get_health() == 0:
state = States.game_over
if state == States.game_over:
Stats.draw(surface, 'Game over!', (970, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
elif state == States.win:
Stats.draw(surface, 'You win!', (1000, 350))
Stats.draw(surface, 'Press Space to restart', (920, 400))
grid.draw(surface)
Stats.draw(surface, 'Lives remaining', (950, 100))
Stats.draw(surface, str(player.get_health()), (1020, 200))
Stats.draw(surface, 'RMB to mark mine', (950, 550))
Stats.draw(surface, 'press b to show mines', (920, 650))
pygame.display.flip()
| [
11748,
12972,
6057,
198,
6738,
3096,
1330,
24846,
198,
6738,
2137,
1330,
7853,
11,
20595,
198,
6738,
33829,
1330,
2039,
388,
11,
8295,
198,
198,
11748,
28686,
198,
418,
13,
268,
2268,
17816,
10305,
43,
62,
42937,
62,
28929,
3913,
62,
... | 2.245238 | 840 |
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
| [
37811,
8094,
278,
8265,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
6738,
3170,
1040,
1330,
1635,
198,
198,
11748,
340,
861,
10141,
198,
198,
11748,
2... | 2.400198 | 8,076 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021-2022 Valory AG
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with methods for ipfs registry."""
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import jsonschema
from aea_cli_ipfs.exceptions import HashNotProvided
from aea_cli_ipfs.ipfs_utils import DownloadError, IPFSTool, NodeError
from aea.cli.registry.settings import DEFAULT_IPFS_URL
from aea.cli.utils.config import get_ipfs_node_multiaddr
from aea.configurations.base import PublicId
_default_logger = logging.getLogger(__name__)
LocalRegistry = Dict[str, Dict[str, str]]
LOCAL_REGISTRY_PATH = os.path.join(
os.path.expanduser("~"), ".aea", "local_registry.json"
)
LOCAL_REGISTRY_DEFAULT: LocalRegistry = {
"protocols": {},
"skills": {},
"connections": {},
"contracts": {},
"agents": {},
}
LOCAL_REGISTRY_SCHEMA = {
"type": "object",
"properties": {
"protocols": {
"type": "object",
"propertyNames": {"pattern": r"^[a-z][a-z0-9_]+\/[a-z_0-9]+:\d\.\d\.\d$"},
},
"skills": {"type": "object"},
"connections": {"type": "object"},
"contracts": {"type": "object"},
"agents": {"type": "object"},
},
"required": ["protocols", "skills", "connections", "contracts", "agents"],
}
def validate_registry(registry_data: LocalRegistry) -> None:
"""
Validate local registry data.
:param registry_data: json like object containing registry data.
"""
try:
jsonschema.validate(registry_data, schema=LOCAL_REGISTRY_SCHEMA)
except jsonschema.ValidationError as e:
_default_logger.debug("Registry Not Valid")
raise ValueError(str(e))
def write_local_registry(
registry_data: LocalRegistry, registry_path: str = LOCAL_REGISTRY_PATH
) -> None:
"""
Write registry data to file.
:param registry_data: json like object containing registry data.
:param registry_path: local registry path.
"""
validate_registry(registry_data)
with open(registry_path, mode="w+", encoding="utf-8") as fp:
json.dump(registry_data, fp)
def load_local_registry(registry_path: str = LOCAL_REGISTRY_PATH) -> LocalRegistry:
"""Returns local registry data."""
local_registry_path = Path(registry_path)
if not local_registry_path.is_file():
write_local_registry(LOCAL_REGISTRY_DEFAULT)
return LOCAL_REGISTRY_DEFAULT
with open(local_registry_path, mode="r", encoding="utf-8") as fp:
registry_data = json.load(fp)
validate_registry(registry_data)
return registry_data
def get_ipfs_hash_from_public_id(
item_type: str,
public_id: PublicId,
registry_path: str = LOCAL_REGISTRY_PATH,
) -> Optional[str]:
"""Get IPFS hash from local registry."""
registry_data = load_local_registry(registry_path=registry_path)
if public_id.package_version.is_latest:
package_versions: List[PublicId] = [
PublicId.from_str(_public_id)
for _public_id in registry_data.get(f"{item_type}s", {}).keys()
if public_id.same_prefix(PublicId.from_str(_public_id))
]
package_versions = list(
reversed(sorted(package_versions, key=lambda x: x.package_version))
)
if len(package_versions) == 0:
return None
public_id, *_ = package_versions
return registry_data.get(f"{item_type}s", {}).get(str(public_id), None)
def register_item_to_local_registry(
item_type: str,
public_id: Union[str, PublicId],
package_hash: str,
registry_path: str = LOCAL_REGISTRY_PATH,
) -> None:
"""
Add PublicId to hash mapping in the local registry.
:param item_type: item type.
:param public_id: public id of package.
:param package_hash: hash of package.
:param registry_path: local registry path.
"""
registry_data = load_local_registry(registry_path=registry_path)
registry_data[f"{item_type}s"][str(public_id)] = str(package_hash)
write_local_registry(registry_data, registry_path)
def fetch_ipfs(
item_type: str,
public_id: PublicId,
dest: str,
remote: bool = True,
) -> Optional[Path]:
"""Fetch a package from IPFS node."""
if remote:
ipfs_tool = IPFSTool(get_ipfs_node_multiaddr())
else:
ipfs_tool = IPFSTool(addr=DEFAULT_IPFS_URL)
try:
package_hash = public_id.hash
except ValueError:
package_hash = (
None if remote else get_ipfs_hash_from_public_id(item_type, public_id)
)
if package_hash is None:
raise HashNotProvided(f"Please provide hash; Public id {public_id}.")
try:
ipfs_tool.check_ipfs_node_running()
except NodeError: # pragma: nocover
if not remote:
ipfs_tool.daemon.start()
else:
raise Exception(f"Cannot connect to node with addr: {ipfs_tool.addr}")
try:
*_download_dir, _ = os.path.split(dest)
download_dir = os.path.sep.join(_download_dir)
ipfs_tool.download(package_hash, download_dir)
package_path = Path(dest).absolute()
ipfs_tool.daemon.stop()
return package_path
except DownloadError as e: # pragma: nocover
ipfs_tool.daemon.stop()
raise Exception(str(e)) from e
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
26171,
198,
2,
198,
2,
220,
220,
15069,
33448,
12,
1238,
1828,
3254,
652,
13077,
198,
2,
220,
220,
15069,
2864,
12,
23344,
376,
7569,
13,
20185,
15302,
198,
... | 2.530756 | 2,406 |
# TODO: Turn this into a more general class which can subscribe and unsubscribe from
# TODO: anything, with a context manager interface.
from library.domain.events import subscribe, unsubscribe, DomainEvent
| [
198,
198,
2,
16926,
46,
25,
6756,
428,
656,
257,
517,
2276,
1398,
543,
460,
12383,
290,
32793,
12522,
422,
198,
2,
16926,
46,
25,
1997,
11,
351,
257,
4732,
4706,
7071,
13,
198,
6738,
5888,
13,
27830,
13,
31534,
1330,
12383,
11,
32... | 4.285714 | 49 |
import os
from typing import Optional
| [
11748,
28686,
198,
6738,
19720,
1330,
32233,
628
] | 4.875 | 8 |
import pandas as pd
from sklearn import preprocessing
import nltk
nltk.download('punkt')
dataset_structure = None
TIMESTAMP_FEATURES = {
"timestamp": True,
"day_of_week": True,
"day_of_month": True,
"month": True,
"hour": True,
"minute": True,
"year": True
}
def preprocess(filename, train=True):
""" This function do all the preprocess according to the structure
Args:
filename ([string]): [filename with dataset as tsv]
Returns:
[dataframe]: [dataset after preprocess]
"""
dataset_train_structure = [{"name": "tweet_id", "func": empty_func},
{"name": "user_handle", "func": dummy_encoder},
{"name": "text", "func": text_preprocess},
{"name": "timestamp", "func": timestamp_preprocess},
{"name": "device", "func": label_encoder}]
dataset_test_structure = [{"name": "user_handle", "func": dummy_encoder},
{"name": "text", "func": text_preprocess},
{"name": "timestamp", "func": timestamp_preprocess}]
dataset_structure = dataset_train_structure if train else dataset_test_structure
column_names = list(map(lambda col_s: col_s["name"], dataset_structure))
ds = load_data(filename, column_names)
ds.dropna(thresh=0, inplace=True)
for i in range(len(dataset_structure)):
column_structure = dataset_structure[i]
ds = column_structure["func"](ds, i, column_structure["name"])
ds.reset_index(drop=True, inplace=True)
return ds
def load_data(filename, column_names):
"""This function loads the dataset into dataframe
Args:
filename ([string]): [filename]
Returns:
[dataframe]: [raw dataset]
"""
ds = pd.read_table(filename, names=column_names)
return ds
def dummy_encoder(ds, column, name):
"""this function transform a column in the dataframe into dummy code
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
dummies = pd.get_dummies(ds[name], prefix=name)
ds = ds.drop(columns=[name])
ds = pd.concat([ds, dummies], axis=1)
return ds
def text_preprocess(ds, column, name):
"""This function preprocess the text in the dataset
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
text = ds[name]
text = text.str.lower()
text = text.apply(remove_whitespace)
text = text.apply(lambda X: nltk.word_tokenize(X))
text = text.apply(lambda X: remove_punct(X))
ds[name] = text
return ds
def timestamp_preprocess(ds, column, name):
"""This function takes the timestamp in the dataset and create from it features according to the settings above
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
ts = pd.to_datetime(ds[name])
for feature in TIMESTAMP_FEATURES.keys():
if TIMESTAMP_FEATURES[feature] is not None:
if feature == "timestamp":
ds[feature] = ts
elif feature == "day_of_week":
ds[feature] = ts.apply(lambda X: X.day_of_week)
elif feature == "day_of_month":
ds[feature] = ts.apply(lambda X: X.day)
elif feature == "month":
ds[feature] = ts.apply(lambda X: X.month)
elif feature == "hour":
ds[feature] = ts.apply(lambda X: X.hour)
elif feature == "minute":
ds[feature] = ts.apply(lambda X: X.minute)
elif feature == "year":
ds[feature] = ts.apply(lambda X: X.year)
return ds
def label_encoder(ds, column, name):
"""This function transform labels in the column into numbers (label encoder)
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
alowed_labels = ["android", "iphone"]
ds = ds[ds[name].isin(alowed_labels)]
le = preprocessing.LabelEncoder()
le.fit(ds[name])
ds[name] = le.transform(ds[name])
## iphone 0 , android 1
return ds
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
1330,
662,
36948,
198,
11748,
299,
2528,
74,
198,
198,
77,
2528,
74,
13,
15002,
10786,
30354,
83,
11537,
198,
198,
19608,
292,
316,
62,
301,
5620,
796,
6045,
198,
198,
51,
395... | 2.307576 | 1,980 |
# Generated by Django 2.2.11 on 2020-08-13 09:21
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1157,
319,
12131,
12,
2919,
12,
1485,
7769,
25,
2481,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
#! /usr/bin/env python
#
# Copyright (c) 2011 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''A robot exclusion protocol parser. Because I could not find a good one.'''
__maintainer__ = 'Dan Lecocq'
__copyright__ = '2011-2014 SEOmoz'
__license__ = 'SEOmoz'
__version__ = '0.3.0'
__author__ = 'Dan Lecocq'
__status__ = 'Development'
__email__ = 'dan@moz.com'
#####################################################
# All things logging
#####################################################
import logging
logger = logging.getLogger('reppy')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
#####################################################
# A couple utilities
#####################################################
import sys
import re
import time
import email.utils
try:
from urllib import parse as urlparse
except ImportError:
# Python 2
import urlparse
if sys.version_info[0] == 3:
long = int
#####################################################
# Import our exceptions at the global level
#####################################################
from .exceptions import ServerError, ReppyException
class Utility(object):
'''Utility methods'''
@staticmethod
def hostname(url):
'''Return a normalized, canonicalized version of the url's hostname'''
return urlparse.urlparse(url).netloc
@staticmethod
def roboturl(url):
'''Return a normalized uri to the robots.txt'''
parsed = urlparse.urlparse(url)
return '%s://%s/robots.txt' % (parsed.scheme, parsed.netloc)
@staticmethod
def short_user_agent(strng):
'''Return a default user agent string to match, based on strng. For
example, for 'MyUserAgent/1.0', it will generate 'MyUserAgent' '''
index = strng.find('/')
if index == -1:
return strng
return strng[0:index]
@staticmethod
def parse_time(strng):
'''Parse an HTTP-style (i.e. email-style) time into a timestamp'''
v = email.utils.parsedate_tz(strng)
if v is None:
# Reject bad data
raise ValueError("Invalid time.")
if v[9] is None:
# Default time zone is GMT/UTC
v = list(v) # @$%?? Dutch
v[9] = 0
v = tuple(v)
return email.utils.mktime_tz(v)
@staticmethod
def get_ttl(headers, default):
'''Extract the correct ttl from the provided headers, or default'''
# Now, we'll determine the expiration
ttl = None
# If max-age is specified in Cache-Control, use it and ignore any
# Expires header, as per RFC2616 Sec. 13.2.4.
if headers.get('cache-control') is not None:
for directive in headers['cache-control'].split(','):
tokens = directive.lower().partition('=')
t_name = tokens[0].strip()
t_value = tokens[2].strip()
# If we're not allowed to cache, then expires is now
if t_name in ('no-store', 'must-revalidate'):
return 0
elif t_name == 'no-cache' and t_value == '':
# Only honor no-cache if there is no =value after it
return 0
elif t_name == 's-maxage':
try:
# Since s-maxage should override max-age, return
return long(t_value)
except ValueError:
# Couldn't parse s-maxage as an integer
continue
elif t_name == 'max-age':
try:
ttl = long(t_value)
except ValueError:
# Couldn't parse max-age as an integer
continue
# We should honor cache-control first, so if we found anything at
# all, we should return that
if ttl is not None:
return ttl
# Otherwise, we should use the expires tag
expires = headers.get('expires')
date = headers.get('date')
if expires:
if date is None:
base = time.time()
else:
try:
base = Utility.parse_time(date)
except ValueError:
base = time.time()
try:
return Utility.parse_time(expires) - base
except ValueError:
pass
return ttl or default
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
2813,
30850,
5908,
89,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
198,
2,
257,
4866,
286,
428,
... | 2.428817 | 2,325 |
from flask import Flask
from flask_iniconfig import INIConfig
from flask_sqlalchemy import SQLAlchemy
from ConfigParser import SafeConfigParser, NoSectionError
app = Flask(__name__)
parser = SafeConfigParser()
parser.read('../gateConfigs.ini')
app.config['Testing'] = True
app.config['DEBUG'] = True
app.config['WTF_CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = "super-generic-string"
#app.config['SERVER_NAME'] = parser.get('Flask', 'SERVER_NAME')
#print parser.get('Flask', 'SERVER_NAME')
app.config['SQLALCHEMY_DATABASE_URI'] = parser.get('PostgresConfigs', 'URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.config['SQLALCHEMY_DATABASE_URI']=parser.get('PostgresConfigs', 'URL')
#SERVER_NAME = "127.0.0.1:3000"
print parser.get('PostgresConfigs', 'URL')
db = SQLAlchemy(app)
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
259,
4749,
5647,
1330,
3268,
2149,
261,
5647,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
6738,
17056,
46677,
1330,
19978,
16934,
46677,
11,
1400,
16375,
123... | 2.752577 | 291 |
from django.conf.urls import patterns, url
from talks.users.views import (manage_collections, list_public_collections, browse_public_collections, view_collection, add_collection, edit_collection, delete_collection, my_talks)
urlpatterns = patterns('',
url(r'^lists$', manage_collections, name='manage-lists'),
url(r'^mytalks$', my_talks, name='my-talks'),
url(r'^lists/public$', list_public_collections, name='view-public-lists'),
url(r'^lists/browse-public$', browse_public_collections, name='list-public-lists'),
url(r'^lists/new$', add_collection, name='add-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/$', view_collection, name='view-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/edit$', edit_collection, name='edit-list'),
url(r'^lists/id/(?P<collection_slug>[^/]+)/delete', delete_collection, name='delete-list'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
198,
6738,
6130,
13,
18417,
13,
33571,
1330,
357,
805,
496,
62,
4033,
26448,
11,
1351,
62,
11377,
62,
4033,
26448,
11,
25675,
62,
11377,
62,
4033,
26448,
11,
1... | 2.695652 | 322 |
"""
Serialize data to/from JSON
"""
import datetime
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # Python 2.3 fallback
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream)):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| [
37811,
198,
32634,
1096,
1366,
284,
14,
6738,
19449,
198,
37811,
198,
198,
11748,
4818,
8079,
198,
6738,
10903,
9399,
1330,
10903,
9399,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
46911,
11341,
13,
29412,
1330,
23283,
7509,
355,
11361,
... | 2.957921 | 404 |
#!/usr/bin/env python
# Copyright 2018 Nils Bore (nbore@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import random
import copy
import math
import os
import csv
import tf
from visualization_msgs.msg import Marker, InteractiveMarkerControl
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from sensor_msgs.msg import NavSatFix
from geodesy import utm
## Initialize the right-click menu
# Add Vertex callback
# Add Vertex callback
# Add Vertex callback
# Delete Vertex callback
# This part draws the line strips between the points
if __name__ == "__main__":
rospy.init_node('mission_planner', anonymous=True)
mission_planner = MissionPlanner()
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
2864,
399,
4487,
45409,
357,
46803,
382,
31,
74,
400,
13,
325,
8,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
17613,
... | 3.607372 | 624 |
import requests
from st2actions.runners.pythonrunner import Action
| [
11748,
7007,
198,
6738,
336,
17,
4658,
13,
36740,
13,
29412,
16737,
1330,
7561,
628
] | 4.533333 | 15 |
import copy
from vyper import (
ast as vy_ast,
)
from vyper.exceptions import (
StructureException,
TypeMismatch,
VariableDeclarationException,
)
from vyper.parser.context import (
Context,
)
from vyper.parser.expr import (
Expr,
)
from vyper.parser.memory_allocator import (
MemoryAllocator,
)
from vyper.types.types import (
BaseType,
ByteArrayType,
)
from vyper.utils import (
SizeLimits,
is_instances,
)
| [
11748,
4866,
198,
198,
6738,
410,
88,
525,
1330,
357,
198,
220,
220,
220,
6468,
355,
410,
88,
62,
459,
11,
198,
8,
198,
6738,
410,
88,
525,
13,
1069,
11755,
1330,
357,
198,
220,
220,
220,
32522,
16922,
11,
198,
220,
220,
220,
59... | 2.522222 | 180 |
#!/usr/bin/python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
A helper script to copy SIP related files.
"""
from __future__ import print_function
import os
import shutil
import sys
import plistlib
def get_version():
'''Obtain system version info from the disk version plist'''
SYSTEM_VERSION = ('/System/Library/CoreServices/SystemVersion.plist')
try:
sys_ver = plistlib.readPlist(SYSTEM_VERSION)
except:
sys.stderr.write("ERROR: Unable to read SystemVersion.plist")
sys.exit(1)
return sys_ver
def main():
'''Main method for copying files for git references'''
ver = get_version()
directory = '{}_{}'.format(ver.get('ProductUserVisibleVersion'),
ver.get('ProductBuildVersion'))
if os.path.exists(directory):
sys.stderr.write("ERROR: Directory '{}' exists. "
"Exiting...".format(directory))
sys.exit(1)
else:
os.makedirs(directory)
# Copy the launchd rootless file
LAUNCHD_FILE_NAME = 'com.apple.xpc.launchd.rootless.plist'
LAUNCHD_FILE = os.path.join('/System/Library/Sandbox/', LAUNCHD_FILE_NAME)
shutil.copyfile(LAUNCHD_FILE, os.path.join(directory, LAUNCHD_FILE_NAME))
# Copy the rootless conf file
CONF_FILE_NAME = 'rootless.conf'
CONF_FILE = os.path.join('/System/Library/Sandbox/', CONF_FILE_NAME)
shutil.copyfile(CONF_FILE, os.path.join(directory, CONF_FILE_NAME))
print("SUCESSFUL: Copy complete...")
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
43907,
25,
7400,
11338,
28,
23,
4292,
8658,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
37811,
198,
32,
31904,
4226,
284,
4866,
311,
4061,
3519,
3696,
13,
198,
37811,
198,
198,
... | 2.444094 | 635 |
#!/usr/bin/python
# coding: utf-8
#
# HeapViewer - by @danigargu
#
import os
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
679,
499,
7680,
263,
532,
416,
2488,
25604,
328,
853,
84,
198,
2,
198,
198,
11748,
28686,
628,
198
] | 2.105263 | 38 |
import numpy as np
from sklearn.datasets import load_svmlight_file as lsf
from autosklearn.pipeline.components.classification import add_classifier
from autosklearn.data import competition_data_manager as askdata
import autosklearn.automl as autosk
from component import DeepFeedNet
aad_dataset_dir = '../datasets/dataset_243/'
automl_dataset_dir = '/data/aad/automl_data/openml/293_acc/293_acc_'
libsvm_dataset = '../datasets/covtype.libsvm.binary'
# Also one need to size of features
X_list = askdata.sparse_file_to_sparse_list(automl_dataset_dir + 'train.data')
X_train = askdata.sparse_list_to_csr_sparse(X_list, nbr_features=54)
y_train = np.loadtxt(automl_dataset_dir + 'train.solution')
#X, y = lsf(libsvm_dataset, n_features=54)
#train_size = int(X.shape[0] * 0.9)
#X_train = X[:train_size]
#y_train = y[:train_size] - 1
add_classifier(DeepFeedNet.DeepFeedNet)
# Create model
modl = autosk.AutoML(time_left_for_this_task=1800, seed=20, per_run_time_limit=180,
ensemble_nbest=1, ensemble_size=1,
ml_memory_limit=2048, resampling_strategy='holdout',
tmp_dir='tmp/sparse_tmp', output_dir='tmp/sparse_out',
delete_tmp_folder_after_terminate=False,
initial_configurations_via_metalearning=None,
include_preprocessors=['no_preprocessing'],
include_estimators=['DeepFeedNet'])
modl.fit(X_train, y_train)
# Also one need to size of features
X_test_list = askdata.sparse_file_to_sparse_list(automl_dataset_dir + 'test.data')
X_test = askdata.sparse_list_to_csr_sparse(X_list, nbr_features=54)
y_test = np.loadtxt(automl_dataset_dir + 'test.solution')
#X_test = X[train_size:]
#y_test = y[train_size:] - 1
# Only predict before getting scorin'
y_pred = modl.predict(X_test)
tot_score = modl.score(X_test, y_test)
print(tot_score)
# Comparison
accuracy = np.count_nonzero(y_test == y_pred)
print(float(accuracy) / X_test.shape[0])
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
82,
14761,
2971,
62,
7753,
355,
300,
28202,
198,
6738,
44619,
74,
35720,
13,
79,
541,
4470,
13,
5589,
3906,
13,
4871,
2649,
1330,
751,
62,
... | 2.240135 | 887 |
import tensorflow as tf
def parse_mot_record(serialized_example):
"""Parse serialized example of TfRecord and extract dictionary of all the information
"""
features = tf.io.parse_single_example(
serialized_example,
features={
'video_name': tf.io.FixedLenFeature([], tf.string),
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'person_id': tf.io.VarLenFeature(tf.int64),
'xmin': tf.io.VarLenFeature(tf.int64),
'xmax': tf.io.VarLenFeature(tf.int64),
'ymin': tf.io.VarLenFeature(tf.int64),
'ymax': tf.io.VarLenFeature(tf.int64),
'mark': tf.io.VarLenFeature(tf.int64),
'label': tf.io.VarLenFeature(tf.int64),
'visibility_ratio': tf.io.VarLenFeature(tf.float32),
'image_name': tf.io.FixedLenFeature([], tf.string),
'image_jpeg': tf.io.FixedLenFeature([], tf.string),
'is_ignore': tf.io.VarLenFeature(tf.int64),
})
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_name = tf.cast(features['image_name'], tf.string)
video_name = tf.cast(features['video_name'], tf.string)
image = tf.image.decode_jpeg(features['image_jpeg'], channels=3, dct_method='INTEGER_ACCURATE')
image_shape = tf.stack([height, width, 3])
image = tf.cast(tf.reshape(image, image_shape), tf.uint8)
image_info = {
'image_name': image_name, 'video_name': video_name, 'height': height, 'width': width,
'xmin': tf.sparse.to_dense(features['xmin'], default_value=0),
'xmax': tf.sparse.to_dense(features['xmax'], default_value=0),
'ymin': tf.sparse.to_dense(features['ymin'], default_value=0),
'ymax': tf.sparse.to_dense(features['ymax'], default_value=0),
'person_id': tf.sparse.to_dense(features['person_id'], default_value=0),
'label': tf.sparse.to_dense(features['label'], default_value=0),
'is_ignore': tf.sparse.to_dense(features['is_ignore'], default_value=0),
}
return [image, image_info]
| [
11748,
11192,
273,
11125,
355,
48700,
628,
198,
4299,
21136,
62,
27926,
62,
22105,
7,
46911,
1143,
62,
20688,
2599,
198,
220,
220,
220,
37227,
10044,
325,
11389,
1143,
1672,
286,
309,
69,
23739,
290,
7925,
22155,
286,
477,
262,
1321,
... | 2.200815 | 981 |
# Copyright 2020 Erik Härkönen. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from pathlib import Path
import requests
import pickle
import sys
import numpy as np
# Reimplementation of StyleGAN in PyTorch
# Source: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
class MyConv2d(nn.Module):
"""Conv layer with equalized learning rate and custom learning rate multiplier."""
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
class LayerEpilogue(nn.Module):
"""Things to do at the end of each layer."""
# From: https://github.com/lernapparat/lernapparat/releases/download/v2019-02-01/ | [
2,
15069,
12131,
22722,
367,
11033,
81,
74,
9101,
38572,
13,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
318,
11971,
284,
345,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
... | 3.492857 | 420 |
from unittest import TestCase
import boto3
from moto import mock_ec2
from altimeter.aws.resource.ec2.vpc import VPCResourceSpec
from altimeter.aws.scan.aws_accessor import AWSAccessor
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
275,
2069,
18,
198,
6738,
285,
2069,
1330,
15290,
62,
721,
17,
198,
198,
6738,
5988,
16912,
13,
8356,
13,
31092,
13,
721,
17,
13,
85,
14751,
1330,
569,
5662,
26198,
22882,
198... | 3.065574 | 61 |
import aiohttp
apiUrl = "https://api2.hiveos.farm/api/v2"
| [
11748,
257,
952,
4023,
198,
198,
15042,
28165,
796,
366,
5450,
1378,
15042,
17,
13,
71,
425,
418,
13,
43323,
14,
15042,
14,
85,
17,
1,
628
] | 2.222222 | 27 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CreditPayRefuseVO import CreditPayRefuseVO
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
26209,
13,
2348,
541,
323,
31077,
1330,
978,... | 2.684932 | 73 |
#!/usr/bin/env python
"""
See LICENSE.txt file for copyright and license details.
"""
"""
A plot that shows when break even is reached.
"""
import numpy as np
import matplotlib.pyplot as plt
from decimal import Decimal
import sys
x_array = []
y_array = []
def load_data():
"""
Load data
"""
var_data = open(sys.argv[1].strip(), 'r').read()
var_data_array = var_data.split('\n')
i = 0
for line in var_data_array:
i += 1
# skip the last 2 lines of the output
if (len(line)>1) and (i<len(var_data_array) - 2):
x_array.append(abs(float(line.strip().split(' ')[0].strip())))
y_array.append(i)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
220,
220,
220,
4091,
38559,
24290,
13,
14116,
2393,
329,
6634,
290,
5964,
3307,
13,
198,
37811,
628,
198,
37811,
198,
220,
220,
220,
317,
7110,
326,
2523,
618,
2270,
772,
... | 2.332192 | 292 |
from moai.nn.utils.instantiate import instantiate
from moai.nn.utils.itertools import repeat
__all__ = [
"instantiate",
"repeat",
] | [
6738,
6941,
1872,
13,
20471,
13,
26791,
13,
8625,
415,
9386,
1330,
9113,
9386,
198,
6738,
6941,
1872,
13,
20471,
13,
26791,
13,
270,
861,
10141,
1330,
9585,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
8625,
415,
9386... | 2.641509 | 53 |
#
# @lc app=leetcode id=938 lang=python
#
# [938] Range Sum of BST
#
# https://leetcode.com/problems/range-sum-of-bst/description/
#
# algorithms
# Easy (78.13%)
# Likes: 448
# Dislikes: 85
# Total Accepted: 83.4K
# Total Submissions: 106.7K
# Testcase Example: '[10,5,15,3,7,null,18]\n7\n15'
#
# Given the root node of a binary search tree, return the sum of values of all
# nodes with value between L and R (inclusive).
#
# The binary search tree is guaranteed to have unique values.
#
#
#
#
# Example 1:
#
#
# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
# Output: 32
#
#
#
# Example 2:
#
#
# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10
# Output: 23
#
#
#
#
# Note:
#
#
# The number of nodes in the tree is at most 10000.
# The final answer is guaranteed to be less than 2^31.
#
#
#
#
# Definition for a binary tree node.
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
24,
2548,
42392,
28,
29412,
198,
2,
198,
2,
685,
24,
2548,
60,
13667,
5060,
286,
44992,
198,
2,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
9521,
... | 2.345845 | 373 |
# Databricks notebook source
dbutils.notebook.run("notebook_workflow", 0, {'action':'landing_load','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
dbutils.notebook.run("notebook_workflow", 0, {'action':'staging_load_videoCat','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
dbutils.notebook.run("notebook_workflow", 0, {'action':'int_load_videoCat','job':'ytb_videoCat','mode':'dbfs','tbl':'ytb_videoCat'})
# COMMAND ----------
| [
2,
16092,
397,
23706,
20922,
2723,
198,
67,
4360,
4487,
13,
11295,
2070,
13,
5143,
7203,
11295,
2070,
62,
1818,
11125,
1600,
657,
11,
1391,
6,
2673,
10354,
6,
1044,
278,
62,
2220,
41707,
21858,
10354,
6,
20760,
65,
62,
15588,
21979,
... | 2.507538 | 199 |
#!/usr/bin/env python3
import sys
nums = list(sorted([0] + [int(line.rstrip()) for line in sys.stdin]))
nums.append(nums[-1] + 3)
dp = [1]
while len(dp) < len(nums):
cur = len(dp)
i = cur-1
cum = 0
while i >= 0 and nums[cur]-nums[i] <= 3:
cum += dp[i]
i -= 1
dp.append(cum)
print(dp[-1])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
198,
77,
5700,
796,
1351,
7,
82,
9741,
26933,
15,
60,
1343,
685,
600,
7,
1370,
13,
81,
36311,
28955,
329,
1627,
287,
25064,
13,
19282,
259,
60,
4008,
19... | 1.912791 | 172 |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy.stats as ss
import fire
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler() # messages show up in terminal
formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s') # format the message for the terminal output
stream_handler.setFormatter(formatter) # add formatter to the stream handler
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
class ABTest(object):
"""
A simple AB Test for two proportions or averages.
"""
def __t_test(self, col1, col2, ci=True):
"""
Two-sample (Independent Samples) T-test (two-tailed)
Input:
col1: pandas.Series
col2: pandas.Series
Return
t_test_statistic: T test statistic
p_value: P-value for hypothesis test
ci_lower: Confidence Interval Lower limit
ci_upper: Confidence Interval Upper limit
"""
assert type(self.df[col1]) == pd.core.series.Series, "Col1 Should be pandas.Series"
assert type(self.df[col2]) == pd.core.series.Series, "Col1 Should be pandas.Series"
logging.info("Two-sample (Independent Samples) T-test (two-tailed) method running!")
# Means
mean1, mean2 = self.df[col1].mean(), self.df[col2].mean()
# Calculate Standard error
std1, std2 = self.df[col1].std(), self.df[col2].std()
se1 = std1 / np.sqrt(self.df[col1].shape[0])
se2 = std2 / np.sqrt(self.df[col2].shape[0])
standard_error_for_difference_between_means = np.sqrt(se1**2 + se2**2)
mean_diff = abs(mean1 - mean2)
t_test_statistic = np.round((mean_diff / standard_error_for_difference_between_means),3)
degrees_of_freedom = self.df[[col1, col2]].shape[0] - 2
p_value = np.round((1 - ss.t.cdf(abs(t_test_statistic), degrees_of_freedom)) * 2, 3) # two-tailed
# CONFIDENCE INTERVAL
if ci:
t_cl = ss.t.ppf(self.__b, df=degrees_of_freedom) # t value for confidence interval
ci_lower = mean_diff - t_cl * standard_error_for_difference_between_means
ci_upper = mean_diff + t_cl * standard_error_for_difference_between_means
return t_test_statistic, p_value, np.round((ci_lower, ci_upper), 3)
else:
return t_test_statistic, p_value
def __z_test(self, col1, col2, ci=True):
"""
Z-test for two proportions
Input:
col1: pandas.Series
col2: pandas.Series
Return
z_test_statistic: z test statistic
p_value: P-value for hypothesis test
ci_lower: Confidence Interval Lower limit
ci_upper: Confidence Interval Upper limit
"""
assert type(self.df[col1]) == pd.core.series.Series, "Col1 Should be pandas.Series"
assert type(self.df[col2]) == pd.core.series.Series, "Col1 Should be pandas.Series"
logging.info("Z-test for two proportions method running!")
prop_a, n_a = self.df[col1].value_counts(normalize=True)[1], len(self.df[col1])
prop_b, n_b = self.df[col2].value_counts(normalize=True)[1], len(self.df[col2])
prop_a, prop_b, n_a, n_b = float(prop_a), float(prop_b), float(n_a), float(n_b)
# Standard error of two proportions
se1 = np.sqrt((prop_a*(1-prop_a))/n_a)
se2 = np.sqrt((prop_b*(1-prop_b))/n_b)
standard_error_for_difference_between_proportions = np.sqrt(se1**2 + se2**2)
prop_diff = abs(prop_b - prop_a)
z_test_statistic = np.round((prop_diff / standard_error_for_difference_between_proportions),3)
pvalue = np.round((ss.norm.pdf(abs(z_test_statistic)) * 2),3) # two-tailed
# CONFIDENCE INTERVAL
if ci:
z_cl = ss.norm.ppf(self.__b)
ci_lower = prop_diff - z_cl * standard_error_for_difference_between_proportions
ci_upper = prop_diff + z_cl * standard_error_for_difference_between_proportions
return z_test_statistic, pvalue, np.round((ci_lower, ci_upper), 3)
else:
return z_test_statistic, pvalue
def run(self, method: str, data: pd.DataFrame, col1: str, col2: str) -> list:
"""
Run:
python3 ab_test.py run --method=props --data=ab_test_prop.csv --col1=websiteA --col2=websiteB
python3 ab_test.py run --method=avgs --data=ab_test_avg.csv --col1=websiteA --col2=websiteB
"""
try:
self.df = data
except (ValueError, TypeError):
pass
try:
self.df = pd.read_csv(data, delimiter=',')
except (KeyError, ValueError):
#print('Delimeter maybe wrong')
pass
if method=='avgs':
return self.__t_test(col1, col2)
elif method=='props':
return self.__z_test(col1, col2)
else:
raise ValueError("Should not come here.")
# TESTS
import unittest
if __name__ == '__main__':
fire.Fire(ABTest) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
355,
37786,
198,
11748,
2046,
198,
11748,
18931,
628,
198,
6404,
1362... | 2.1542 | 2,393 |